1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/ext4/inode.c
4 *
5 * Copyright (C) 1992, 1993, 1994, 1995
6 * Remy Card (card@masi.ibp.fr)
7 * Laboratoire MASI - Institut Blaise Pascal
8 * Universite Pierre et Marie Curie (Paris VI)
9 *
10 * from
11 *
12 * linux/fs/minix/inode.c
13 *
14 * Copyright (C) 1991, 1992 Linus Torvalds
15 *
16 * 64-bit file support on 64-bit platforms by Jakub Jelinek
17 * (jj@sunsite.ms.mff.cuni.cz)
18 *
19 * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
20 */
21
22 #include <linux/fs.h>
23 #include <linux/mount.h>
24 #include <linux/time.h>
25 #include <linux/highuid.h>
26 #include <linux/pagemap.h>
27 #include <linux/dax.h>
28 #include <linux/quotaops.h>
29 #include <linux/string.h>
30 #include <linux/buffer_head.h>
31 #include <linux/writeback.h>
32 #include <linux/pagevec.h>
33 #include <linux/mpage.h>
34 #include <linux/namei.h>
35 #include <linux/uio.h>
36 #include <linux/bio.h>
37 #include <linux/workqueue.h>
38 #include <linux/kernel.h>
39 #include <linux/printk.h>
40 #include <linux/slab.h>
41 #include <linux/bitops.h>
42 #include <linux/iomap.h>
43 #include <linux/iversion.h>
44
45 #include "ext4_jbd2.h"
46 #include "xattr.h"
47 #include "acl.h"
48 #include "truncate.h"
49
50 #include <trace/events/ext4.h>
51
ext4_inode_csum(struct inode * inode,struct ext4_inode * raw,struct ext4_inode_info * ei)52 static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
53 struct ext4_inode_info *ei)
54 {
55 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
56 __u32 csum;
57 __u16 dummy_csum = 0;
58 int offset = offsetof(struct ext4_inode, i_checksum_lo);
59 unsigned int csum_size = sizeof(dummy_csum);
60
61 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, offset);
62 csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, csum_size);
63 offset += csum_size;
64 csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
65 EXT4_GOOD_OLD_INODE_SIZE - offset);
66
67 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
68 offset = offsetof(struct ext4_inode, i_checksum_hi);
69 csum = ext4_chksum(sbi, csum, (__u8 *)raw +
70 EXT4_GOOD_OLD_INODE_SIZE,
71 offset - EXT4_GOOD_OLD_INODE_SIZE);
72 if (EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
73 csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum,
74 csum_size);
75 offset += csum_size;
76 }
77 csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
78 EXT4_INODE_SIZE(inode->i_sb) - offset);
79 }
80
81 return csum;
82 }
83
ext4_inode_csum_verify(struct inode * inode,struct ext4_inode * raw,struct ext4_inode_info * ei)84 static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
85 struct ext4_inode_info *ei)
86 {
87 __u32 provided, calculated;
88
89 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
90 cpu_to_le32(EXT4_OS_LINUX) ||
91 !ext4_has_metadata_csum(inode->i_sb))
92 return 1;
93
94 provided = le16_to_cpu(raw->i_checksum_lo);
95 calculated = ext4_inode_csum(inode, raw, ei);
96 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
97 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
98 provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16;
99 else
100 calculated &= 0xFFFF;
101
102 return provided == calculated;
103 }
104
ext4_inode_csum_set(struct inode * inode,struct ext4_inode * raw,struct ext4_inode_info * ei)105 void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
106 struct ext4_inode_info *ei)
107 {
108 __u32 csum;
109
110 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
111 cpu_to_le32(EXT4_OS_LINUX) ||
112 !ext4_has_metadata_csum(inode->i_sb))
113 return;
114
115 csum = ext4_inode_csum(inode, raw, ei);
116 raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF);
117 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
118 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
119 raw->i_checksum_hi = cpu_to_le16(csum >> 16);
120 }
121
ext4_begin_ordered_truncate(struct inode * inode,loff_t new_size)122 static inline int ext4_begin_ordered_truncate(struct inode *inode,
123 loff_t new_size)
124 {
125 trace_ext4_begin_ordered_truncate(inode, new_size);
126 /*
127 * If jinode is zero, then we never opened the file for
128 * writing, so there's no need to call
129 * jbd2_journal_begin_ordered_truncate() since there's no
130 * outstanding writes we need to flush.
131 */
132 if (!EXT4_I(inode)->jinode)
133 return 0;
134 return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
135 EXT4_I(inode)->jinode,
136 new_size);
137 }
138
139 static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
140 int pextents);
141
142 /*
143 * Test whether an inode is a fast symlink.
144 * A fast symlink has its symlink data stored in ext4_inode_info->i_data.
145 */
ext4_inode_is_fast_symlink(struct inode * inode)146 int ext4_inode_is_fast_symlink(struct inode *inode)
147 {
148 if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
149 int ea_blocks = EXT4_I(inode)->i_file_acl ?
150 EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0;
151
152 if (ext4_has_inline_data(inode))
153 return 0;
154
155 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
156 }
157 return S_ISLNK(inode->i_mode) && inode->i_size &&
158 (inode->i_size < EXT4_N_BLOCKS * 4);
159 }
160
161 /*
162 * Called at the last iput() if i_nlink is zero.
163 */
ext4_evict_inode(struct inode * inode)164 void ext4_evict_inode(struct inode *inode)
165 {
166 handle_t *handle;
167 int err;
168 /*
169 * Credits for final inode cleanup and freeing:
170 * sb + inode (ext4_orphan_del()), block bitmap, group descriptor
171 * (xattr block freeing), bitmap, group descriptor (inode freeing)
172 */
173 int extra_credits = 6;
174 struct ext4_xattr_inode_array *ea_inode_array = NULL;
175 bool freeze_protected = false;
176
177 trace_ext4_evict_inode(inode);
178
179 if (EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)
180 ext4_evict_ea_inode(inode);
181 if (inode->i_nlink) {
182 truncate_inode_pages_final(&inode->i_data);
183
184 goto no_delete;
185 }
186
187 if (is_bad_inode(inode))
188 goto no_delete;
189 dquot_initialize(inode);
190
191 if (ext4_should_order_data(inode))
192 ext4_begin_ordered_truncate(inode, 0);
193 truncate_inode_pages_final(&inode->i_data);
194
195 /*
196 * For inodes with journalled data, transaction commit could have
197 * dirtied the inode. And for inodes with dioread_nolock, unwritten
198 * extents converting worker could merge extents and also have dirtied
199 * the inode. Flush worker is ignoring it because of I_FREEING flag but
200 * we still need to remove the inode from the writeback lists.
201 */
202 if (!list_empty_careful(&inode->i_io_list))
203 inode_io_list_del(inode);
204
205 /*
206 * Protect us against freezing - iput() caller didn't have to have any
207 * protection against it. When we are in a running transaction though,
208 * we are already protected against freezing and we cannot grab further
209 * protection due to lock ordering constraints.
210 */
211 if (!ext4_journal_current_handle()) {
212 sb_start_intwrite(inode->i_sb);
213 freeze_protected = true;
214 }
215
216 if (!IS_NOQUOTA(inode))
217 extra_credits += EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb);
218
219 /*
220 * Block bitmap, group descriptor, and inode are accounted in both
221 * ext4_blocks_for_truncate() and extra_credits. So subtract 3.
222 */
223 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE,
224 ext4_blocks_for_truncate(inode) + extra_credits - 3);
225 if (IS_ERR(handle)) {
226 ext4_std_error(inode->i_sb, PTR_ERR(handle));
227 /*
228 * If we're going to skip the normal cleanup, we still need to
229 * make sure that the in-core orphan linked list is properly
230 * cleaned up.
231 */
232 ext4_orphan_del(NULL, inode);
233 if (freeze_protected)
234 sb_end_intwrite(inode->i_sb);
235 goto no_delete;
236 }
237
238 if (IS_SYNC(inode))
239 ext4_handle_sync(handle);
240
241 /*
242 * Set inode->i_size to 0 before calling ext4_truncate(). We need
243 * special handling of symlinks here because i_size is used to
244 * determine whether ext4_inode_info->i_data contains symlink data or
245 * block mappings. Setting i_size to 0 will remove its fast symlink
246 * status. Erase i_data so that it becomes a valid empty block map.
247 */
248 if (ext4_inode_is_fast_symlink(inode))
249 memset(EXT4_I(inode)->i_data, 0, sizeof(EXT4_I(inode)->i_data));
250 inode->i_size = 0;
251 err = ext4_mark_inode_dirty(handle, inode);
252 if (err) {
253 ext4_warning(inode->i_sb,
254 "couldn't mark inode dirty (err %d)", err);
255 goto stop_handle;
256 }
257 if (inode->i_blocks) {
258 err = ext4_truncate(inode);
259 if (err) {
260 ext4_error_err(inode->i_sb, -err,
261 "couldn't truncate inode %lu (err %d)",
262 inode->i_ino, err);
263 goto stop_handle;
264 }
265 }
266
267 /* Remove xattr references. */
268 err = ext4_xattr_delete_inode(handle, inode, &ea_inode_array,
269 extra_credits);
270 if (err) {
271 ext4_warning(inode->i_sb, "xattr delete (err %d)", err);
272 stop_handle:
273 ext4_journal_stop(handle);
274 ext4_orphan_del(NULL, inode);
275 if (freeze_protected)
276 sb_end_intwrite(inode->i_sb);
277 ext4_xattr_inode_array_free(ea_inode_array);
278 goto no_delete;
279 }
280
281 /*
282 * Kill off the orphan record which ext4_truncate created.
283 * AKPM: I think this can be inside the above `if'.
284 * Note that ext4_orphan_del() has to be able to cope with the
285 * deletion of a non-existent orphan - this is because we don't
286 * know if ext4_truncate() actually created an orphan record.
287 * (Well, we could do this if we need to, but heck - it works)
288 */
289 ext4_orphan_del(handle, inode);
290 EXT4_I(inode)->i_dtime = (__u32)ktime_get_real_seconds();
291
292 /*
293 * One subtle ordering requirement: if anything has gone wrong
294 * (transaction abort, IO errors, whatever), then we can still
295 * do these next steps (the fs will already have been marked as
296 * having errors), but we can't free the inode if the mark_dirty
297 * fails.
298 */
299 if (ext4_mark_inode_dirty(handle, inode))
300 /* If that failed, just do the required in-core inode clear. */
301 ext4_clear_inode(inode);
302 else
303 ext4_free_inode(handle, inode);
304 ext4_journal_stop(handle);
305 if (freeze_protected)
306 sb_end_intwrite(inode->i_sb);
307 ext4_xattr_inode_array_free(ea_inode_array);
308 return;
309 no_delete:
310 /*
311 * Check out some where else accidentally dirty the evicting inode,
312 * which may probably cause inode use-after-free issues later.
313 */
314 WARN_ON_ONCE(!list_empty_careful(&inode->i_io_list));
315
316 if (!list_empty(&EXT4_I(inode)->i_fc_list))
317 ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_NOMEM, NULL);
318 ext4_clear_inode(inode); /* We must guarantee clearing of inode... */
319 }
320
321 #ifdef CONFIG_QUOTA
ext4_get_reserved_space(struct inode * inode)322 qsize_t *ext4_get_reserved_space(struct inode *inode)
323 {
324 return &EXT4_I(inode)->i_reserved_quota;
325 }
326 #endif
327
328 /*
329 * Called with i_data_sem down, which is important since we can call
330 * ext4_discard_preallocations() from here.
331 */
ext4_da_update_reserve_space(struct inode * inode,int used,int quota_claim)332 void ext4_da_update_reserve_space(struct inode *inode,
333 int used, int quota_claim)
334 {
335 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
336 struct ext4_inode_info *ei = EXT4_I(inode);
337
338 spin_lock(&ei->i_block_reservation_lock);
339 trace_ext4_da_update_reserve_space(inode, used, quota_claim);
340 if (unlikely(used > ei->i_reserved_data_blocks)) {
341 ext4_warning(inode->i_sb, "%s: ino %lu, used %d "
342 "with only %d reserved data blocks",
343 __func__, inode->i_ino, used,
344 ei->i_reserved_data_blocks);
345 WARN_ON(1);
346 used = ei->i_reserved_data_blocks;
347 }
348
349 /* Update per-inode reservations */
350 ei->i_reserved_data_blocks -= used;
351 percpu_counter_sub(&sbi->s_dirtyclusters_counter, used);
352
353 spin_unlock(&ei->i_block_reservation_lock);
354
355 /* Update quota subsystem for data blocks */
356 if (quota_claim)
357 dquot_claim_block(inode, EXT4_C2B(sbi, used));
358 else {
359 /*
360 * We did fallocate with an offset that is already delayed
361 * allocated. So on delayed allocated writeback we should
362 * not re-claim the quota for fallocated blocks.
363 */
364 dquot_release_reservation_block(inode, EXT4_C2B(sbi, used));
365 }
366
367 /*
368 * If we have done all the pending block allocations and if
369 * there aren't any writers on the inode, we can discard the
370 * inode's preallocations.
371 */
372 if ((ei->i_reserved_data_blocks == 0) &&
373 !inode_is_open_for_write(inode))
374 ext4_discard_preallocations(inode, 0);
375 }
376
__check_block_validity(struct inode * inode,const char * func,unsigned int line,struct ext4_map_blocks * map)377 static int __check_block_validity(struct inode *inode, const char *func,
378 unsigned int line,
379 struct ext4_map_blocks *map)
380 {
381 if (ext4_has_feature_journal(inode->i_sb) &&
382 (inode->i_ino ==
383 le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum)))
384 return 0;
385 if (!ext4_inode_block_valid(inode, map->m_pblk, map->m_len)) {
386 ext4_error_inode(inode, func, line, map->m_pblk,
387 "lblock %lu mapped to illegal pblock %llu "
388 "(length %d)", (unsigned long) map->m_lblk,
389 map->m_pblk, map->m_len);
390 return -EFSCORRUPTED;
391 }
392 return 0;
393 }
394
ext4_issue_zeroout(struct inode * inode,ext4_lblk_t lblk,ext4_fsblk_t pblk,ext4_lblk_t len)395 int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
396 ext4_lblk_t len)
397 {
398 int ret;
399
400 if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode))
401 return fscrypt_zeroout_range(inode, lblk, pblk, len);
402
403 ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS);
404 if (ret > 0)
405 ret = 0;
406
407 return ret;
408 }
409
410 #define check_block_validity(inode, map) \
411 __check_block_validity((inode), __func__, __LINE__, (map))
412
413 #ifdef ES_AGGRESSIVE_TEST
ext4_map_blocks_es_recheck(handle_t * handle,struct inode * inode,struct ext4_map_blocks * es_map,struct ext4_map_blocks * map,int flags)414 static void ext4_map_blocks_es_recheck(handle_t *handle,
415 struct inode *inode,
416 struct ext4_map_blocks *es_map,
417 struct ext4_map_blocks *map,
418 int flags)
419 {
420 int retval;
421
422 map->m_flags = 0;
423 /*
424 * There is a race window that the result is not the same.
425 * e.g. xfstests #223 when dioread_nolock enables. The reason
426 * is that we lookup a block mapping in extent status tree with
427 * out taking i_data_sem. So at the time the unwritten extent
428 * could be converted.
429 */
430 down_read(&EXT4_I(inode)->i_data_sem);
431 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
432 retval = ext4_ext_map_blocks(handle, inode, map, 0);
433 } else {
434 retval = ext4_ind_map_blocks(handle, inode, map, 0);
435 }
436 up_read((&EXT4_I(inode)->i_data_sem));
437
438 /*
439 * We don't check m_len because extent will be collpased in status
440 * tree. So the m_len might not equal.
441 */
442 if (es_map->m_lblk != map->m_lblk ||
443 es_map->m_flags != map->m_flags ||
444 es_map->m_pblk != map->m_pblk) {
445 printk("ES cache assertion failed for inode: %lu "
446 "es_cached ex [%d/%d/%llu/%x] != "
447 "found ex [%d/%d/%llu/%x] retval %d flags %x\n",
448 inode->i_ino, es_map->m_lblk, es_map->m_len,
449 es_map->m_pblk, es_map->m_flags, map->m_lblk,
450 map->m_len, map->m_pblk, map->m_flags,
451 retval, flags);
452 }
453 }
454 #endif /* ES_AGGRESSIVE_TEST */
455
456 /*
457 * The ext4_map_blocks() function tries to look up the requested blocks,
458 * and returns if the blocks are already mapped.
459 *
460 * Otherwise it takes the write lock of the i_data_sem and allocate blocks
461 * and store the allocated blocks in the result buffer head and mark it
462 * mapped.
463 *
464 * If file type is extents based, it will call ext4_ext_map_blocks(),
465 * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
466 * based files
467 *
468 * On success, it returns the number of blocks being mapped or allocated. if
469 * create==0 and the blocks are pre-allocated and unwritten, the resulting @map
470 * is marked as unwritten. If the create == 1, it will mark @map as mapped.
471 *
472 * It returns 0 if plain look up failed (blocks have not been allocated), in
473 * that case, @map is returned as unmapped but we still do fill map->m_len to
474 * indicate the length of a hole starting at map->m_lblk.
475 *
476 * It returns the error in case of allocation failure.
477 */
ext4_map_blocks(handle_t * handle,struct inode * inode,struct ext4_map_blocks * map,int flags)478 int ext4_map_blocks(handle_t *handle, struct inode *inode,
479 struct ext4_map_blocks *map, int flags)
480 {
481 struct extent_status es;
482 int retval;
483 int ret = 0;
484 #ifdef ES_AGGRESSIVE_TEST
485 struct ext4_map_blocks orig_map;
486
487 memcpy(&orig_map, map, sizeof(*map));
488 #endif
489
490 map->m_flags = 0;
491 ext_debug(inode, "flag 0x%x, max_blocks %u, logical block %lu\n",
492 flags, map->m_len, (unsigned long) map->m_lblk);
493
494 /*
495 * ext4_map_blocks returns an int, and m_len is an unsigned int
496 */
497 if (unlikely(map->m_len > INT_MAX))
498 map->m_len = INT_MAX;
499
500 /* We can handle the block number less than EXT_MAX_BLOCKS */
501 if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS))
502 return -EFSCORRUPTED;
503
504 /* Lookup extent status tree firstly */
505 if (!(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) &&
506 ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
507 if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) {
508 map->m_pblk = ext4_es_pblock(&es) +
509 map->m_lblk - es.es_lblk;
510 map->m_flags |= ext4_es_is_written(&es) ?
511 EXT4_MAP_MAPPED : EXT4_MAP_UNWRITTEN;
512 retval = es.es_len - (map->m_lblk - es.es_lblk);
513 if (retval > map->m_len)
514 retval = map->m_len;
515 map->m_len = retval;
516 } else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) {
517 map->m_pblk = 0;
518 retval = es.es_len - (map->m_lblk - es.es_lblk);
519 if (retval > map->m_len)
520 retval = map->m_len;
521 map->m_len = retval;
522 retval = 0;
523 } else {
524 BUG();
525 }
526
527 if (flags & EXT4_GET_BLOCKS_CACHED_NOWAIT)
528 return retval;
529 #ifdef ES_AGGRESSIVE_TEST
530 ext4_map_blocks_es_recheck(handle, inode, map,
531 &orig_map, flags);
532 #endif
533 goto found;
534 }
535 /*
536 * In the query cache no-wait mode, nothing we can do more if we
537 * cannot find extent in the cache.
538 */
539 if (flags & EXT4_GET_BLOCKS_CACHED_NOWAIT)
540 return 0;
541
542 /*
543 * Try to see if we can get the block without requesting a new
544 * file system block.
545 */
546 down_read(&EXT4_I(inode)->i_data_sem);
547 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
548 retval = ext4_ext_map_blocks(handle, inode, map, 0);
549 } else {
550 retval = ext4_ind_map_blocks(handle, inode, map, 0);
551 }
552 if (retval > 0) {
553 unsigned int status;
554
555 if (unlikely(retval != map->m_len)) {
556 ext4_warning(inode->i_sb,
557 "ES len assertion failed for inode "
558 "%lu: retval %d != map->m_len %d",
559 inode->i_ino, retval, map->m_len);
560 WARN_ON(1);
561 }
562
563 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
564 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
565 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
566 !(status & EXTENT_STATUS_WRITTEN) &&
567 ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
568 map->m_lblk + map->m_len - 1))
569 status |= EXTENT_STATUS_DELAYED;
570 ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
571 map->m_pblk, status);
572 }
573 up_read((&EXT4_I(inode)->i_data_sem));
574
575 found:
576 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
577 ret = check_block_validity(inode, map);
578 if (ret != 0)
579 return ret;
580 }
581
582 /* If it is only a block(s) look up */
583 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
584 return retval;
585
586 /*
587 * Returns if the blocks have already allocated
588 *
589 * Note that if blocks have been preallocated
590 * ext4_ext_get_block() returns the create = 0
591 * with buffer head unmapped.
592 */
593 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
594 /*
595 * If we need to convert extent to unwritten
596 * we continue and do the actual work in
597 * ext4_ext_map_blocks()
598 */
599 if (!(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN))
600 return retval;
601
602 /*
603 * Here we clear m_flags because after allocating an new extent,
604 * it will be set again.
605 */
606 map->m_flags &= ~EXT4_MAP_FLAGS;
607
608 /*
609 * New blocks allocate and/or writing to unwritten extent
610 * will possibly result in updating i_data, so we take
611 * the write lock of i_data_sem, and call get_block()
612 * with create == 1 flag.
613 */
614 down_write(&EXT4_I(inode)->i_data_sem);
615
616 /*
617 * We need to check for EXT4 here because migrate
618 * could have changed the inode type in between
619 */
620 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
621 retval = ext4_ext_map_blocks(handle, inode, map, flags);
622 } else {
623 retval = ext4_ind_map_blocks(handle, inode, map, flags);
624
625 if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
626 /*
627 * We allocated new blocks which will result in
628 * i_data's format changing. Force the migrate
629 * to fail by clearing migrate flags
630 */
631 ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
632 }
633 }
634
635 if (retval > 0) {
636 unsigned int status;
637
638 if (unlikely(retval != map->m_len)) {
639 ext4_warning(inode->i_sb,
640 "ES len assertion failed for inode "
641 "%lu: retval %d != map->m_len %d",
642 inode->i_ino, retval, map->m_len);
643 WARN_ON(1);
644 }
645
646 /*
647 * We have to zeroout blocks before inserting them into extent
648 * status tree. Otherwise someone could look them up there and
649 * use them before they are really zeroed. We also have to
650 * unmap metadata before zeroing as otherwise writeback can
651 * overwrite zeros with stale data from block device.
652 */
653 if (flags & EXT4_GET_BLOCKS_ZERO &&
654 map->m_flags & EXT4_MAP_MAPPED &&
655 map->m_flags & EXT4_MAP_NEW) {
656 ret = ext4_issue_zeroout(inode, map->m_lblk,
657 map->m_pblk, map->m_len);
658 if (ret) {
659 retval = ret;
660 goto out_sem;
661 }
662 }
663
664 /*
665 * If the extent has been zeroed out, we don't need to update
666 * extent status tree.
667 */
668 if ((flags & EXT4_GET_BLOCKS_PRE_IO) &&
669 ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
670 if (ext4_es_is_written(&es))
671 goto out_sem;
672 }
673 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
674 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
675 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
676 !(status & EXTENT_STATUS_WRITTEN) &&
677 ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
678 map->m_lblk + map->m_len - 1))
679 status |= EXTENT_STATUS_DELAYED;
680 ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
681 map->m_pblk, status);
682 }
683
684 out_sem:
685 up_write((&EXT4_I(inode)->i_data_sem));
686 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
687 ret = check_block_validity(inode, map);
688 if (ret != 0)
689 return ret;
690
691 /*
692 * Inodes with freshly allocated blocks where contents will be
693 * visible after transaction commit must be on transaction's
694 * ordered data list.
695 */
696 if (map->m_flags & EXT4_MAP_NEW &&
697 !(map->m_flags & EXT4_MAP_UNWRITTEN) &&
698 !(flags & EXT4_GET_BLOCKS_ZERO) &&
699 !ext4_is_quota_file(inode) &&
700 ext4_should_order_data(inode)) {
701 loff_t start_byte =
702 (loff_t)map->m_lblk << inode->i_blkbits;
703 loff_t length = (loff_t)map->m_len << inode->i_blkbits;
704
705 if (flags & EXT4_GET_BLOCKS_IO_SUBMIT)
706 ret = ext4_jbd2_inode_add_wait(handle, inode,
707 start_byte, length);
708 else
709 ret = ext4_jbd2_inode_add_write(handle, inode,
710 start_byte, length);
711 if (ret)
712 return ret;
713 }
714 }
715 if (retval > 0 && (map->m_flags & EXT4_MAP_UNWRITTEN ||
716 map->m_flags & EXT4_MAP_MAPPED))
717 ext4_fc_track_range(handle, inode, map->m_lblk,
718 map->m_lblk + map->m_len - 1);
719 if (retval < 0)
720 ext_debug(inode, "failed with err %d\n", retval);
721 return retval;
722 }
723
724 /*
725 * Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages
726 * we have to be careful as someone else may be manipulating b_state as well.
727 */
ext4_update_bh_state(struct buffer_head * bh,unsigned long flags)728 static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags)
729 {
730 unsigned long old_state;
731 unsigned long new_state;
732
733 flags &= EXT4_MAP_FLAGS;
734
735 /* Dummy buffer_head? Set non-atomically. */
736 if (!bh->b_page) {
737 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags;
738 return;
739 }
740 /*
741 * Someone else may be modifying b_state. Be careful! This is ugly but
742 * once we get rid of using bh as a container for mapping information
743 * to pass to / from get_block functions, this can go away.
744 */
745 old_state = READ_ONCE(bh->b_state);
746 do {
747 new_state = (old_state & ~EXT4_MAP_FLAGS) | flags;
748 } while (unlikely(!try_cmpxchg(&bh->b_state, &old_state, new_state)));
749 }
750
_ext4_get_block(struct inode * inode,sector_t iblock,struct buffer_head * bh,int flags)751 static int _ext4_get_block(struct inode *inode, sector_t iblock,
752 struct buffer_head *bh, int flags)
753 {
754 struct ext4_map_blocks map;
755 int ret = 0;
756
757 if (ext4_has_inline_data(inode))
758 return -ERANGE;
759
760 map.m_lblk = iblock;
761 map.m_len = bh->b_size >> inode->i_blkbits;
762
763 ret = ext4_map_blocks(ext4_journal_current_handle(), inode, &map,
764 flags);
765 if (ret > 0) {
766 map_bh(bh, inode->i_sb, map.m_pblk);
767 ext4_update_bh_state(bh, map.m_flags);
768 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
769 ret = 0;
770 } else if (ret == 0) {
771 /* hole case, need to fill in bh->b_size */
772 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
773 }
774 return ret;
775 }
776
ext4_get_block(struct inode * inode,sector_t iblock,struct buffer_head * bh,int create)777 int ext4_get_block(struct inode *inode, sector_t iblock,
778 struct buffer_head *bh, int create)
779 {
780 return _ext4_get_block(inode, iblock, bh,
781 create ? EXT4_GET_BLOCKS_CREATE : 0);
782 }
783
784 /*
785 * Get block function used when preparing for buffered write if we require
786 * creating an unwritten extent if blocks haven't been allocated. The extent
787 * will be converted to written after the IO is complete.
788 */
ext4_get_block_unwritten(struct inode * inode,sector_t iblock,struct buffer_head * bh_result,int create)789 int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
790 struct buffer_head *bh_result, int create)
791 {
792 int ret = 0;
793
794 ext4_debug("ext4_get_block_unwritten: inode %lu, create flag %d\n",
795 inode->i_ino, create);
796 ret = _ext4_get_block(inode, iblock, bh_result,
797 EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT);
798
799 /*
800 * If the buffer is marked unwritten, mark it as new to make sure it is
801 * zeroed out correctly in case of partial writes. Otherwise, there is
802 * a chance of stale data getting exposed.
803 */
804 if (ret == 0 && buffer_unwritten(bh_result))
805 set_buffer_new(bh_result);
806
807 return ret;
808 }
809
810 /* Maximum number of blocks we map for direct IO at once. */
811 #define DIO_MAX_BLOCKS 4096
812
813 /*
814 * `handle' can be NULL if create is zero
815 */
ext4_getblk(handle_t * handle,struct inode * inode,ext4_lblk_t block,int map_flags)816 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
817 ext4_lblk_t block, int map_flags)
818 {
819 struct ext4_map_blocks map;
820 struct buffer_head *bh;
821 int create = map_flags & EXT4_GET_BLOCKS_CREATE;
822 bool nowait = map_flags & EXT4_GET_BLOCKS_CACHED_NOWAIT;
823 int err;
824
825 ASSERT((EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
826 || handle != NULL || create == 0);
827 ASSERT(create == 0 || !nowait);
828
829 map.m_lblk = block;
830 map.m_len = 1;
831 err = ext4_map_blocks(handle, inode, &map, map_flags);
832
833 if (err == 0)
834 return create ? ERR_PTR(-ENOSPC) : NULL;
835 if (err < 0)
836 return ERR_PTR(err);
837
838 if (nowait)
839 return sb_find_get_block(inode->i_sb, map.m_pblk);
840
841 bh = sb_getblk(inode->i_sb, map.m_pblk);
842 if (unlikely(!bh))
843 return ERR_PTR(-ENOMEM);
844 if (map.m_flags & EXT4_MAP_NEW) {
845 ASSERT(create != 0);
846 ASSERT((EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
847 || (handle != NULL));
848
849 /*
850 * Now that we do not always journal data, we should
851 * keep in mind whether this should always journal the
852 * new buffer as metadata. For now, regular file
853 * writes use ext4_get_block instead, so it's not a
854 * problem.
855 */
856 lock_buffer(bh);
857 BUFFER_TRACE(bh, "call get_create_access");
858 err = ext4_journal_get_create_access(handle, inode->i_sb, bh,
859 EXT4_JTR_NONE);
860 if (unlikely(err)) {
861 unlock_buffer(bh);
862 goto errout;
863 }
864 if (!buffer_uptodate(bh)) {
865 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
866 set_buffer_uptodate(bh);
867 }
868 unlock_buffer(bh);
869 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
870 err = ext4_handle_dirty_metadata(handle, inode, bh);
871 if (unlikely(err))
872 goto errout;
873 } else
874 BUFFER_TRACE(bh, "not a new buffer");
875 return bh;
876 errout:
877 brelse(bh);
878 return ERR_PTR(err);
879 }
880
ext4_bread(handle_t * handle,struct inode * inode,ext4_lblk_t block,int map_flags)881 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
882 ext4_lblk_t block, int map_flags)
883 {
884 struct buffer_head *bh;
885 int ret;
886
887 bh = ext4_getblk(handle, inode, block, map_flags);
888 if (IS_ERR(bh))
889 return bh;
890 if (!bh || ext4_buffer_uptodate(bh))
891 return bh;
892
893 ret = ext4_read_bh_lock(bh, REQ_META | REQ_PRIO, true);
894 if (ret) {
895 put_bh(bh);
896 return ERR_PTR(ret);
897 }
898 return bh;
899 }
900
901 /* Read a contiguous batch of blocks. */
ext4_bread_batch(struct inode * inode,ext4_lblk_t block,int bh_count,bool wait,struct buffer_head ** bhs)902 int ext4_bread_batch(struct inode *inode, ext4_lblk_t block, int bh_count,
903 bool wait, struct buffer_head **bhs)
904 {
905 int i, err;
906
907 for (i = 0; i < bh_count; i++) {
908 bhs[i] = ext4_getblk(NULL, inode, block + i, 0 /* map_flags */);
909 if (IS_ERR(bhs[i])) {
910 err = PTR_ERR(bhs[i]);
911 bh_count = i;
912 goto out_brelse;
913 }
914 }
915
916 for (i = 0; i < bh_count; i++)
917 /* Note that NULL bhs[i] is valid because of holes. */
918 if (bhs[i] && !ext4_buffer_uptodate(bhs[i]))
919 ext4_read_bh_lock(bhs[i], REQ_META | REQ_PRIO, false);
920
921 if (!wait)
922 return 0;
923
924 for (i = 0; i < bh_count; i++)
925 if (bhs[i])
926 wait_on_buffer(bhs[i]);
927
928 for (i = 0; i < bh_count; i++) {
929 if (bhs[i] && !buffer_uptodate(bhs[i])) {
930 err = -EIO;
931 goto out_brelse;
932 }
933 }
934 return 0;
935
936 out_brelse:
937 for (i = 0; i < bh_count; i++) {
938 brelse(bhs[i]);
939 bhs[i] = NULL;
940 }
941 return err;
942 }
943
ext4_walk_page_buffers(handle_t * handle,struct inode * inode,struct buffer_head * head,unsigned from,unsigned to,int * partial,int (* fn)(handle_t * handle,struct inode * inode,struct buffer_head * bh))944 int ext4_walk_page_buffers(handle_t *handle, struct inode *inode,
945 struct buffer_head *head,
946 unsigned from,
947 unsigned to,
948 int *partial,
949 int (*fn)(handle_t *handle, struct inode *inode,
950 struct buffer_head *bh))
951 {
952 struct buffer_head *bh;
953 unsigned block_start, block_end;
954 unsigned blocksize = head->b_size;
955 int err, ret = 0;
956 struct buffer_head *next;
957
958 for (bh = head, block_start = 0;
959 ret == 0 && (bh != head || !block_start);
960 block_start = block_end, bh = next) {
961 next = bh->b_this_page;
962 block_end = block_start + blocksize;
963 if (block_end <= from || block_start >= to) {
964 if (partial && !buffer_uptodate(bh))
965 *partial = 1;
966 continue;
967 }
968 err = (*fn)(handle, inode, bh);
969 if (!ret)
970 ret = err;
971 }
972 return ret;
973 }
974
975 /*
976 * Helper for handling dirtying of journalled data. We also mark the folio as
977 * dirty so that writeback code knows about this page (and inode) contains
978 * dirty data. ext4_writepages() then commits appropriate transaction to
979 * make data stable.
980 */
ext4_dirty_journalled_data(handle_t * handle,struct buffer_head * bh)981 static int ext4_dirty_journalled_data(handle_t *handle, struct buffer_head *bh)
982 {
983 folio_mark_dirty(bh->b_folio);
984 return ext4_handle_dirty_metadata(handle, NULL, bh);
985 }
986
do_journal_get_write_access(handle_t * handle,struct inode * inode,struct buffer_head * bh)987 int do_journal_get_write_access(handle_t *handle, struct inode *inode,
988 struct buffer_head *bh)
989 {
990 int dirty = buffer_dirty(bh);
991 int ret;
992
993 if (!buffer_mapped(bh) || buffer_freed(bh))
994 return 0;
995 /*
996 * __block_write_begin() could have dirtied some buffers. Clean
997 * the dirty bit as jbd2_journal_get_write_access() could complain
998 * otherwise about fs integrity issues. Setting of the dirty bit
999 * by __block_write_begin() isn't a real problem here as we clear
1000 * the bit before releasing a page lock and thus writeback cannot
1001 * ever write the buffer.
1002 */
1003 if (dirty)
1004 clear_buffer_dirty(bh);
1005 BUFFER_TRACE(bh, "get write access");
1006 ret = ext4_journal_get_write_access(handle, inode->i_sb, bh,
1007 EXT4_JTR_NONE);
1008 if (!ret && dirty)
1009 ret = ext4_dirty_journalled_data(handle, bh);
1010 return ret;
1011 }
1012
1013 #ifdef CONFIG_FS_ENCRYPTION
ext4_block_write_begin(struct folio * folio,loff_t pos,unsigned len,get_block_t * get_block)1014 static int ext4_block_write_begin(struct folio *folio, loff_t pos, unsigned len,
1015 get_block_t *get_block)
1016 {
1017 unsigned from = pos & (PAGE_SIZE - 1);
1018 unsigned to = from + len;
1019 struct inode *inode = folio->mapping->host;
1020 unsigned block_start, block_end;
1021 sector_t block;
1022 int err = 0;
1023 unsigned blocksize = inode->i_sb->s_blocksize;
1024 unsigned bbits;
1025 struct buffer_head *bh, *head, *wait[2];
1026 int nr_wait = 0;
1027 int i;
1028
1029 BUG_ON(!folio_test_locked(folio));
1030 BUG_ON(from > PAGE_SIZE);
1031 BUG_ON(to > PAGE_SIZE);
1032 BUG_ON(from > to);
1033
1034 head = folio_buffers(folio);
1035 if (!head) {
1036 create_empty_buffers(&folio->page, blocksize, 0);
1037 head = folio_buffers(folio);
1038 }
1039 bbits = ilog2(blocksize);
1040 block = (sector_t)folio->index << (PAGE_SHIFT - bbits);
1041
1042 for (bh = head, block_start = 0; bh != head || !block_start;
1043 block++, block_start = block_end, bh = bh->b_this_page) {
1044 block_end = block_start + blocksize;
1045 if (block_end <= from || block_start >= to) {
1046 if (folio_test_uptodate(folio)) {
1047 set_buffer_uptodate(bh);
1048 }
1049 continue;
1050 }
1051 if (buffer_new(bh))
1052 clear_buffer_new(bh);
1053 if (!buffer_mapped(bh)) {
1054 WARN_ON(bh->b_size != blocksize);
1055 err = get_block(inode, block, bh, 1);
1056 if (err)
1057 break;
1058 if (buffer_new(bh)) {
1059 if (folio_test_uptodate(folio)) {
1060 clear_buffer_new(bh);
1061 set_buffer_uptodate(bh);
1062 mark_buffer_dirty(bh);
1063 continue;
1064 }
1065 if (block_end > to || block_start < from)
1066 folio_zero_segments(folio, to,
1067 block_end,
1068 block_start, from);
1069 continue;
1070 }
1071 }
1072 if (folio_test_uptodate(folio)) {
1073 set_buffer_uptodate(bh);
1074 continue;
1075 }
1076 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1077 !buffer_unwritten(bh) &&
1078 (block_start < from || block_end > to)) {
1079 ext4_read_bh_lock(bh, 0, false);
1080 wait[nr_wait++] = bh;
1081 }
1082 }
1083 /*
1084 * If we issued read requests, let them complete.
1085 */
1086 for (i = 0; i < nr_wait; i++) {
1087 wait_on_buffer(wait[i]);
1088 if (!buffer_uptodate(wait[i]))
1089 err = -EIO;
1090 }
1091 if (unlikely(err)) {
1092 folio_zero_new_buffers(folio, from, to);
1093 } else if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
1094 for (i = 0; i < nr_wait; i++) {
1095 int err2;
1096
1097 err2 = fscrypt_decrypt_pagecache_blocks(folio,
1098 blocksize, bh_offset(wait[i]));
1099 if (err2) {
1100 clear_buffer_uptodate(wait[i]);
1101 err = err2;
1102 }
1103 }
1104 }
1105
1106 return err;
1107 }
1108 #endif
1109
1110 /*
1111 * To preserve ordering, it is essential that the hole instantiation and
1112 * the data write be encapsulated in a single transaction. We cannot
1113 * close off a transaction and start a new one between the ext4_get_block()
1114 * and the ext4_write_end(). So doing the jbd2_journal_start at the start of
1115 * ext4_write_begin() is the right place.
1116 */
ext4_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,struct page ** pagep,void ** fsdata)1117 static int ext4_write_begin(struct file *file, struct address_space *mapping,
1118 loff_t pos, unsigned len,
1119 struct page **pagep, void **fsdata)
1120 {
1121 struct inode *inode = mapping->host;
1122 int ret, needed_blocks;
1123 handle_t *handle;
1124 int retries = 0;
1125 struct folio *folio;
1126 pgoff_t index;
1127 unsigned from, to;
1128
1129 if (unlikely(ext4_forced_shutdown(inode->i_sb)))
1130 return -EIO;
1131
1132 trace_ext4_write_begin(inode, pos, len);
1133 /*
1134 * Reserve one block more for addition to orphan list in case
1135 * we allocate blocks but write fails for some reason
1136 */
1137 needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
1138 index = pos >> PAGE_SHIFT;
1139 from = pos & (PAGE_SIZE - 1);
1140 to = from + len;
1141
1142 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
1143 ret = ext4_try_to_write_inline_data(mapping, inode, pos, len,
1144 pagep);
1145 if (ret < 0)
1146 return ret;
1147 if (ret == 1)
1148 return 0;
1149 }
1150
1151 /*
1152 * __filemap_get_folio() can take a long time if the
1153 * system is thrashing due to memory pressure, or if the folio
1154 * is being written back. So grab it first before we start
1155 * the transaction handle. This also allows us to allocate
1156 * the folio (if needed) without using GFP_NOFS.
1157 */
1158 retry_grab:
1159 folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
1160 mapping_gfp_mask(mapping));
1161 if (IS_ERR(folio))
1162 return PTR_ERR(folio);
1163 /*
1164 * The same as page allocation, we prealloc buffer heads before
1165 * starting the handle.
1166 */
1167 if (!folio_buffers(folio))
1168 create_empty_buffers(&folio->page, inode->i_sb->s_blocksize, 0);
1169
1170 folio_unlock(folio);
1171
1172 retry_journal:
1173 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks);
1174 if (IS_ERR(handle)) {
1175 folio_put(folio);
1176 return PTR_ERR(handle);
1177 }
1178
1179 folio_lock(folio);
1180 if (folio->mapping != mapping) {
1181 /* The folio got truncated from under us */
1182 folio_unlock(folio);
1183 folio_put(folio);
1184 ext4_journal_stop(handle);
1185 goto retry_grab;
1186 }
1187 /* In case writeback began while the folio was unlocked */
1188 folio_wait_stable(folio);
1189
1190 #ifdef CONFIG_FS_ENCRYPTION
1191 if (ext4_should_dioread_nolock(inode))
1192 ret = ext4_block_write_begin(folio, pos, len,
1193 ext4_get_block_unwritten);
1194 else
1195 ret = ext4_block_write_begin(folio, pos, len, ext4_get_block);
1196 #else
1197 if (ext4_should_dioread_nolock(inode))
1198 ret = __block_write_begin(&folio->page, pos, len,
1199 ext4_get_block_unwritten);
1200 else
1201 ret = __block_write_begin(&folio->page, pos, len, ext4_get_block);
1202 #endif
1203 if (!ret && ext4_should_journal_data(inode)) {
1204 ret = ext4_walk_page_buffers(handle, inode,
1205 folio_buffers(folio), from, to,
1206 NULL, do_journal_get_write_access);
1207 }
1208
1209 if (ret) {
1210 bool extended = (pos + len > inode->i_size) &&
1211 !ext4_verity_in_progress(inode);
1212
1213 folio_unlock(folio);
1214 /*
1215 * __block_write_begin may have instantiated a few blocks
1216 * outside i_size. Trim these off again. Don't need
1217 * i_size_read because we hold i_rwsem.
1218 *
1219 * Add inode to orphan list in case we crash before
1220 * truncate finishes
1221 */
1222 if (extended && ext4_can_truncate(inode))
1223 ext4_orphan_add(handle, inode);
1224
1225 ext4_journal_stop(handle);
1226 if (extended) {
1227 ext4_truncate_failed_write(inode);
1228 /*
1229 * If truncate failed early the inode might
1230 * still be on the orphan list; we need to
1231 * make sure the inode is removed from the
1232 * orphan list in that case.
1233 */
1234 if (inode->i_nlink)
1235 ext4_orphan_del(NULL, inode);
1236 }
1237
1238 if (ret == -ENOSPC &&
1239 ext4_should_retry_alloc(inode->i_sb, &retries))
1240 goto retry_journal;
1241 folio_put(folio);
1242 return ret;
1243 }
1244 *pagep = &folio->page;
1245 return ret;
1246 }
1247
1248 /* For write_end() in data=journal mode */
write_end_fn(handle_t * handle,struct inode * inode,struct buffer_head * bh)1249 static int write_end_fn(handle_t *handle, struct inode *inode,
1250 struct buffer_head *bh)
1251 {
1252 int ret;
1253 if (!buffer_mapped(bh) || buffer_freed(bh))
1254 return 0;
1255 set_buffer_uptodate(bh);
1256 ret = ext4_dirty_journalled_data(handle, bh);
1257 clear_buffer_meta(bh);
1258 clear_buffer_prio(bh);
1259 return ret;
1260 }
1261
1262 /*
1263 * We need to pick up the new inode size which generic_commit_write gave us
1264 * `file' can be NULL - eg, when called from page_symlink().
1265 *
1266 * ext4 never places buffers on inode->i_mapping->private_list. metadata
1267 * buffers are managed internally.
1268 */
ext4_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct page * page,void * fsdata)1269 static int ext4_write_end(struct file *file,
1270 struct address_space *mapping,
1271 loff_t pos, unsigned len, unsigned copied,
1272 struct page *page, void *fsdata)
1273 {
1274 struct folio *folio = page_folio(page);
1275 handle_t *handle = ext4_journal_current_handle();
1276 struct inode *inode = mapping->host;
1277 loff_t old_size = inode->i_size;
1278 int ret = 0, ret2;
1279 int i_size_changed = 0;
1280 bool verity = ext4_verity_in_progress(inode);
1281
1282 trace_ext4_write_end(inode, pos, len, copied);
1283
1284 if (ext4_has_inline_data(inode) &&
1285 ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA))
1286 return ext4_write_inline_data_end(inode, pos, len, copied,
1287 folio);
1288
1289 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
1290 /*
1291 * it's important to update i_size while still holding folio lock:
1292 * page writeout could otherwise come in and zero beyond i_size.
1293 *
1294 * If FS_IOC_ENABLE_VERITY is running on this inode, then Merkle tree
1295 * blocks are being written past EOF, so skip the i_size update.
1296 */
1297 if (!verity)
1298 i_size_changed = ext4_update_inode_size(inode, pos + copied);
1299 folio_unlock(folio);
1300 folio_put(folio);
1301
1302 if (old_size < pos && !verity)
1303 pagecache_isize_extended(inode, old_size, pos);
1304 /*
1305 * Don't mark the inode dirty under folio lock. First, it unnecessarily
1306 * makes the holding time of folio lock longer. Second, it forces lock
1307 * ordering of folio lock and transaction start for journaling
1308 * filesystems.
1309 */
1310 if (i_size_changed)
1311 ret = ext4_mark_inode_dirty(handle, inode);
1312
1313 if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
1314 /* if we have allocated more blocks and copied
1315 * less. We will have blocks allocated outside
1316 * inode->i_size. So truncate them
1317 */
1318 ext4_orphan_add(handle, inode);
1319
1320 ret2 = ext4_journal_stop(handle);
1321 if (!ret)
1322 ret = ret2;
1323
1324 if (pos + len > inode->i_size && !verity) {
1325 ext4_truncate_failed_write(inode);
1326 /*
1327 * If truncate failed early the inode might still be
1328 * on the orphan list; we need to make sure the inode
1329 * is removed from the orphan list in that case.
1330 */
1331 if (inode->i_nlink)
1332 ext4_orphan_del(NULL, inode);
1333 }
1334
1335 return ret ? ret : copied;
1336 }
1337
1338 /*
1339 * This is a private version of folio_zero_new_buffers() which doesn't
1340 * set the buffer to be dirty, since in data=journalled mode we need
1341 * to call ext4_dirty_journalled_data() instead.
1342 */
ext4_journalled_zero_new_buffers(handle_t * handle,struct inode * inode,struct folio * folio,unsigned from,unsigned to)1343 static void ext4_journalled_zero_new_buffers(handle_t *handle,
1344 struct inode *inode,
1345 struct folio *folio,
1346 unsigned from, unsigned to)
1347 {
1348 unsigned int block_start = 0, block_end;
1349 struct buffer_head *head, *bh;
1350
1351 bh = head = folio_buffers(folio);
1352 do {
1353 block_end = block_start + bh->b_size;
1354 if (buffer_new(bh)) {
1355 if (block_end > from && block_start < to) {
1356 if (!folio_test_uptodate(folio)) {
1357 unsigned start, size;
1358
1359 start = max(from, block_start);
1360 size = min(to, block_end) - start;
1361
1362 folio_zero_range(folio, start, size);
1363 write_end_fn(handle, inode, bh);
1364 }
1365 clear_buffer_new(bh);
1366 }
1367 }
1368 block_start = block_end;
1369 bh = bh->b_this_page;
1370 } while (bh != head);
1371 }
1372
ext4_journalled_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct page * page,void * fsdata)1373 static int ext4_journalled_write_end(struct file *file,
1374 struct address_space *mapping,
1375 loff_t pos, unsigned len, unsigned copied,
1376 struct page *page, void *fsdata)
1377 {
1378 struct folio *folio = page_folio(page);
1379 handle_t *handle = ext4_journal_current_handle();
1380 struct inode *inode = mapping->host;
1381 loff_t old_size = inode->i_size;
1382 int ret = 0, ret2;
1383 int partial = 0;
1384 unsigned from, to;
1385 int size_changed = 0;
1386 bool verity = ext4_verity_in_progress(inode);
1387
1388 trace_ext4_journalled_write_end(inode, pos, len, copied);
1389 from = pos & (PAGE_SIZE - 1);
1390 to = from + len;
1391
1392 BUG_ON(!ext4_handle_valid(handle));
1393
1394 if (ext4_has_inline_data(inode))
1395 return ext4_write_inline_data_end(inode, pos, len, copied,
1396 folio);
1397
1398 if (unlikely(copied < len) && !folio_test_uptodate(folio)) {
1399 copied = 0;
1400 ext4_journalled_zero_new_buffers(handle, inode, folio,
1401 from, to);
1402 } else {
1403 if (unlikely(copied < len))
1404 ext4_journalled_zero_new_buffers(handle, inode, folio,
1405 from + copied, to);
1406 ret = ext4_walk_page_buffers(handle, inode,
1407 folio_buffers(folio),
1408 from, from + copied, &partial,
1409 write_end_fn);
1410 if (!partial)
1411 folio_mark_uptodate(folio);
1412 }
1413 if (!verity)
1414 size_changed = ext4_update_inode_size(inode, pos + copied);
1415 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1416 folio_unlock(folio);
1417 folio_put(folio);
1418
1419 if (old_size < pos && !verity)
1420 pagecache_isize_extended(inode, old_size, pos);
1421
1422 if (size_changed) {
1423 ret2 = ext4_mark_inode_dirty(handle, inode);
1424 if (!ret)
1425 ret = ret2;
1426 }
1427
1428 if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
1429 /* if we have allocated more blocks and copied
1430 * less. We will have blocks allocated outside
1431 * inode->i_size. So truncate them
1432 */
1433 ext4_orphan_add(handle, inode);
1434
1435 ret2 = ext4_journal_stop(handle);
1436 if (!ret)
1437 ret = ret2;
1438 if (pos + len > inode->i_size && !verity) {
1439 ext4_truncate_failed_write(inode);
1440 /*
1441 * If truncate failed early the inode might still be
1442 * on the orphan list; we need to make sure the inode
1443 * is removed from the orphan list in that case.
1444 */
1445 if (inode->i_nlink)
1446 ext4_orphan_del(NULL, inode);
1447 }
1448
1449 return ret ? ret : copied;
1450 }
1451
1452 /*
1453 * Reserve space for a single cluster
1454 */
ext4_da_reserve_space(struct inode * inode)1455 static int ext4_da_reserve_space(struct inode *inode)
1456 {
1457 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1458 struct ext4_inode_info *ei = EXT4_I(inode);
1459 int ret;
1460
1461 /*
1462 * We will charge metadata quota at writeout time; this saves
1463 * us from metadata over-estimation, though we may go over by
1464 * a small amount in the end. Here we just reserve for data.
1465 */
1466 ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
1467 if (ret)
1468 return ret;
1469
1470 spin_lock(&ei->i_block_reservation_lock);
1471 if (ext4_claim_free_clusters(sbi, 1, 0)) {
1472 spin_unlock(&ei->i_block_reservation_lock);
1473 dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
1474 return -ENOSPC;
1475 }
1476 ei->i_reserved_data_blocks++;
1477 trace_ext4_da_reserve_space(inode);
1478 spin_unlock(&ei->i_block_reservation_lock);
1479
1480 return 0; /* success */
1481 }
1482
ext4_da_release_space(struct inode * inode,int to_free)1483 void ext4_da_release_space(struct inode *inode, int to_free)
1484 {
1485 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1486 struct ext4_inode_info *ei = EXT4_I(inode);
1487
1488 if (!to_free)
1489 return; /* Nothing to release, exit */
1490
1491 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1492
1493 trace_ext4_da_release_space(inode, to_free);
1494 if (unlikely(to_free > ei->i_reserved_data_blocks)) {
1495 /*
1496 * if there aren't enough reserved blocks, then the
1497 * counter is messed up somewhere. Since this
1498 * function is called from invalidate page, it's
1499 * harmless to return without any action.
1500 */
1501 ext4_warning(inode->i_sb, "ext4_da_release_space: "
1502 "ino %lu, to_free %d with only %d reserved "
1503 "data blocks", inode->i_ino, to_free,
1504 ei->i_reserved_data_blocks);
1505 WARN_ON(1);
1506 to_free = ei->i_reserved_data_blocks;
1507 }
1508 ei->i_reserved_data_blocks -= to_free;
1509
1510 /* update fs dirty data blocks counter */
1511 percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free);
1512
1513 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1514
1515 dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free));
1516 }
1517
1518 /*
1519 * Delayed allocation stuff
1520 */
1521
1522 struct mpage_da_data {
1523 /* These are input fields for ext4_do_writepages() */
1524 struct inode *inode;
1525 struct writeback_control *wbc;
1526 unsigned int can_map:1; /* Can writepages call map blocks? */
1527
1528 /* These are internal state of ext4_do_writepages() */
1529 pgoff_t first_page; /* The first page to write */
1530 pgoff_t next_page; /* Current page to examine */
1531 pgoff_t last_page; /* Last page to examine */
1532 /*
1533 * Extent to map - this can be after first_page because that can be
1534 * fully mapped. We somewhat abuse m_flags to store whether the extent
1535 * is delalloc or unwritten.
1536 */
1537 struct ext4_map_blocks map;
1538 struct ext4_io_submit io_submit; /* IO submission data */
1539 unsigned int do_map:1;
1540 unsigned int scanned_until_end:1;
1541 unsigned int journalled_more_data:1;
1542 };
1543
mpage_release_unused_pages(struct mpage_da_data * mpd,bool invalidate)1544 static void mpage_release_unused_pages(struct mpage_da_data *mpd,
1545 bool invalidate)
1546 {
1547 unsigned nr, i;
1548 pgoff_t index, end;
1549 struct folio_batch fbatch;
1550 struct inode *inode = mpd->inode;
1551 struct address_space *mapping = inode->i_mapping;
1552
1553 /* This is necessary when next_page == 0. */
1554 if (mpd->first_page >= mpd->next_page)
1555 return;
1556
1557 mpd->scanned_until_end = 0;
1558 index = mpd->first_page;
1559 end = mpd->next_page - 1;
1560 if (invalidate) {
1561 ext4_lblk_t start, last;
1562 start = index << (PAGE_SHIFT - inode->i_blkbits);
1563 last = end << (PAGE_SHIFT - inode->i_blkbits);
1564
1565 /*
1566 * avoid racing with extent status tree scans made by
1567 * ext4_insert_delayed_block()
1568 */
1569 down_write(&EXT4_I(inode)->i_data_sem);
1570 ext4_es_remove_extent(inode, start, last - start + 1);
1571 up_write(&EXT4_I(inode)->i_data_sem);
1572 }
1573
1574 folio_batch_init(&fbatch);
1575 while (index <= end) {
1576 nr = filemap_get_folios(mapping, &index, end, &fbatch);
1577 if (nr == 0)
1578 break;
1579 for (i = 0; i < nr; i++) {
1580 struct folio *folio = fbatch.folios[i];
1581
1582 if (folio->index < mpd->first_page)
1583 continue;
1584 if (folio_next_index(folio) - 1 > end)
1585 continue;
1586 BUG_ON(!folio_test_locked(folio));
1587 BUG_ON(folio_test_writeback(folio));
1588 if (invalidate) {
1589 if (folio_mapped(folio))
1590 folio_clear_dirty_for_io(folio);
1591 block_invalidate_folio(folio, 0,
1592 folio_size(folio));
1593 folio_clear_uptodate(folio);
1594 }
1595 folio_unlock(folio);
1596 }
1597 folio_batch_release(&fbatch);
1598 }
1599 }
1600
ext4_print_free_blocks(struct inode * inode)1601 static void ext4_print_free_blocks(struct inode *inode)
1602 {
1603 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1604 struct super_block *sb = inode->i_sb;
1605 struct ext4_inode_info *ei = EXT4_I(inode);
1606
1607 ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld",
1608 EXT4_C2B(EXT4_SB(inode->i_sb),
1609 ext4_count_free_clusters(sb)));
1610 ext4_msg(sb, KERN_CRIT, "Free/Dirty block details");
1611 ext4_msg(sb, KERN_CRIT, "free_blocks=%lld",
1612 (long long) EXT4_C2B(EXT4_SB(sb),
1613 percpu_counter_sum(&sbi->s_freeclusters_counter)));
1614 ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld",
1615 (long long) EXT4_C2B(EXT4_SB(sb),
1616 percpu_counter_sum(&sbi->s_dirtyclusters_counter)));
1617 ext4_msg(sb, KERN_CRIT, "Block reservation details");
1618 ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u",
1619 ei->i_reserved_data_blocks);
1620 return;
1621 }
1622
1623 /*
1624 * ext4_insert_delayed_block - adds a delayed block to the extents status
1625 * tree, incrementing the reserved cluster/block
1626 * count or making a pending reservation
1627 * where needed
1628 *
1629 * @inode - file containing the newly added block
1630 * @lblk - logical block to be added
1631 *
1632 * Returns 0 on success, negative error code on failure.
1633 */
ext4_insert_delayed_block(struct inode * inode,ext4_lblk_t lblk)1634 static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk)
1635 {
1636 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1637 int ret;
1638 bool allocated = false;
1639
1640 /*
1641 * If the cluster containing lblk is shared with a delayed,
1642 * written, or unwritten extent in a bigalloc file system, it's
1643 * already been accounted for and does not need to be reserved.
1644 * A pending reservation must be made for the cluster if it's
1645 * shared with a written or unwritten extent and doesn't already
1646 * have one. Written and unwritten extents can be purged from the
1647 * extents status tree if the system is under memory pressure, so
1648 * it's necessary to examine the extent tree if a search of the
1649 * extents status tree doesn't get a match.
1650 */
1651 if (sbi->s_cluster_ratio == 1) {
1652 ret = ext4_da_reserve_space(inode);
1653 if (ret != 0) /* ENOSPC */
1654 return ret;
1655 } else { /* bigalloc */
1656 if (!ext4_es_scan_clu(inode, &ext4_es_is_delonly, lblk)) {
1657 if (!ext4_es_scan_clu(inode,
1658 &ext4_es_is_mapped, lblk)) {
1659 ret = ext4_clu_mapped(inode,
1660 EXT4_B2C(sbi, lblk));
1661 if (ret < 0)
1662 return ret;
1663 if (ret == 0) {
1664 ret = ext4_da_reserve_space(inode);
1665 if (ret != 0) /* ENOSPC */
1666 return ret;
1667 } else {
1668 allocated = true;
1669 }
1670 } else {
1671 allocated = true;
1672 }
1673 }
1674 }
1675
1676 ext4_es_insert_delayed_block(inode, lblk, allocated);
1677 return 0;
1678 }
1679
1680 /*
1681 * This function is grabs code from the very beginning of
1682 * ext4_map_blocks, but assumes that the caller is from delayed write
1683 * time. This function looks up the requested blocks and sets the
1684 * buffer delay bit under the protection of i_data_sem.
1685 */
ext4_da_map_blocks(struct inode * inode,sector_t iblock,struct ext4_map_blocks * map,struct buffer_head * bh)1686 static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
1687 struct ext4_map_blocks *map,
1688 struct buffer_head *bh)
1689 {
1690 struct extent_status es;
1691 int retval;
1692 sector_t invalid_block = ~((sector_t) 0xffff);
1693 #ifdef ES_AGGRESSIVE_TEST
1694 struct ext4_map_blocks orig_map;
1695
1696 memcpy(&orig_map, map, sizeof(*map));
1697 #endif
1698
1699 if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
1700 invalid_block = ~0;
1701
1702 map->m_flags = 0;
1703 ext_debug(inode, "max_blocks %u, logical block %lu\n", map->m_len,
1704 (unsigned long) map->m_lblk);
1705
1706 /* Lookup extent status tree firstly */
1707 if (ext4_es_lookup_extent(inode, iblock, NULL, &es)) {
1708 if (ext4_es_is_hole(&es)) {
1709 retval = 0;
1710 down_read(&EXT4_I(inode)->i_data_sem);
1711 goto add_delayed;
1712 }
1713
1714 /*
1715 * Delayed extent could be allocated by fallocate.
1716 * So we need to check it.
1717 */
1718 if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) {
1719 map_bh(bh, inode->i_sb, invalid_block);
1720 set_buffer_new(bh);
1721 set_buffer_delay(bh);
1722 return 0;
1723 }
1724
1725 map->m_pblk = ext4_es_pblock(&es) + iblock - es.es_lblk;
1726 retval = es.es_len - (iblock - es.es_lblk);
1727 if (retval > map->m_len)
1728 retval = map->m_len;
1729 map->m_len = retval;
1730 if (ext4_es_is_written(&es))
1731 map->m_flags |= EXT4_MAP_MAPPED;
1732 else if (ext4_es_is_unwritten(&es))
1733 map->m_flags |= EXT4_MAP_UNWRITTEN;
1734 else
1735 BUG();
1736
1737 #ifdef ES_AGGRESSIVE_TEST
1738 ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0);
1739 #endif
1740 return retval;
1741 }
1742
1743 /*
1744 * Try to see if we can get the block without requesting a new
1745 * file system block.
1746 */
1747 down_read(&EXT4_I(inode)->i_data_sem);
1748 if (ext4_has_inline_data(inode))
1749 retval = 0;
1750 else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
1751 retval = ext4_ext_map_blocks(NULL, inode, map, 0);
1752 else
1753 retval = ext4_ind_map_blocks(NULL, inode, map, 0);
1754
1755 add_delayed:
1756 if (retval == 0) {
1757 int ret;
1758
1759 /*
1760 * XXX: __block_prepare_write() unmaps passed block,
1761 * is it OK?
1762 */
1763
1764 ret = ext4_insert_delayed_block(inode, map->m_lblk);
1765 if (ret != 0) {
1766 retval = ret;
1767 goto out_unlock;
1768 }
1769
1770 map_bh(bh, inode->i_sb, invalid_block);
1771 set_buffer_new(bh);
1772 set_buffer_delay(bh);
1773 } else if (retval > 0) {
1774 unsigned int status;
1775
1776 if (unlikely(retval != map->m_len)) {
1777 ext4_warning(inode->i_sb,
1778 "ES len assertion failed for inode "
1779 "%lu: retval %d != map->m_len %d",
1780 inode->i_ino, retval, map->m_len);
1781 WARN_ON(1);
1782 }
1783
1784 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
1785 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
1786 ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
1787 map->m_pblk, status);
1788 }
1789
1790 out_unlock:
1791 up_read((&EXT4_I(inode)->i_data_sem));
1792
1793 return retval;
1794 }
1795
1796 /*
1797 * This is a special get_block_t callback which is used by
1798 * ext4_da_write_begin(). It will either return mapped block or
1799 * reserve space for a single block.
1800 *
1801 * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
1802 * We also have b_blocknr = -1 and b_bdev initialized properly
1803 *
1804 * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
1805 * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
1806 * initialized properly.
1807 */
ext4_da_get_block_prep(struct inode * inode,sector_t iblock,struct buffer_head * bh,int create)1808 int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
1809 struct buffer_head *bh, int create)
1810 {
1811 struct ext4_map_blocks map;
1812 int ret = 0;
1813
1814 BUG_ON(create == 0);
1815 BUG_ON(bh->b_size != inode->i_sb->s_blocksize);
1816
1817 map.m_lblk = iblock;
1818 map.m_len = 1;
1819
1820 /*
1821 * first, we need to know whether the block is allocated already
1822 * preallocated blocks are unmapped but should treated
1823 * the same as allocated blocks.
1824 */
1825 ret = ext4_da_map_blocks(inode, iblock, &map, bh);
1826 if (ret <= 0)
1827 return ret;
1828
1829 map_bh(bh, inode->i_sb, map.m_pblk);
1830 ext4_update_bh_state(bh, map.m_flags);
1831
1832 if (buffer_unwritten(bh)) {
1833 /* A delayed write to unwritten bh should be marked
1834 * new and mapped. Mapped ensures that we don't do
1835 * get_block multiple times when we write to the same
1836 * offset and new ensures that we do proper zero out
1837 * for partial write.
1838 */
1839 set_buffer_new(bh);
1840 set_buffer_mapped(bh);
1841 }
1842 return 0;
1843 }
1844
mpage_folio_done(struct mpage_da_data * mpd,struct folio * folio)1845 static void mpage_folio_done(struct mpage_da_data *mpd, struct folio *folio)
1846 {
1847 mpd->first_page += folio_nr_pages(folio);
1848 folio_unlock(folio);
1849 }
1850
mpage_submit_folio(struct mpage_da_data * mpd,struct folio * folio)1851 static int mpage_submit_folio(struct mpage_da_data *mpd, struct folio *folio)
1852 {
1853 size_t len;
1854 loff_t size;
1855 int err;
1856
1857 BUG_ON(folio->index != mpd->first_page);
1858 folio_clear_dirty_for_io(folio);
1859 /*
1860 * We have to be very careful here! Nothing protects writeback path
1861 * against i_size changes and the page can be writeably mapped into
1862 * page tables. So an application can be growing i_size and writing
1863 * data through mmap while writeback runs. folio_clear_dirty_for_io()
1864 * write-protects our page in page tables and the page cannot get
1865 * written to again until we release folio lock. So only after
1866 * folio_clear_dirty_for_io() we are safe to sample i_size for
1867 * ext4_bio_write_folio() to zero-out tail of the written page. We rely
1868 * on the barrier provided by folio_test_clear_dirty() in
1869 * folio_clear_dirty_for_io() to make sure i_size is really sampled only
1870 * after page tables are updated.
1871 */
1872 size = i_size_read(mpd->inode);
1873 len = folio_size(folio);
1874 if (folio_pos(folio) + len > size &&
1875 !ext4_verity_in_progress(mpd->inode))
1876 len = size & ~PAGE_MASK;
1877 err = ext4_bio_write_folio(&mpd->io_submit, folio, len);
1878 if (!err)
1879 mpd->wbc->nr_to_write--;
1880
1881 return err;
1882 }
1883
1884 #define BH_FLAGS (BIT(BH_Unwritten) | BIT(BH_Delay))
1885
1886 /*
1887 * mballoc gives us at most this number of blocks...
1888 * XXX: That seems to be only a limitation of ext4_mb_normalize_request().
1889 * The rest of mballoc seems to handle chunks up to full group size.
1890 */
1891 #define MAX_WRITEPAGES_EXTENT_LEN 2048
1892
1893 /*
1894 * mpage_add_bh_to_extent - try to add bh to extent of blocks to map
1895 *
1896 * @mpd - extent of blocks
1897 * @lblk - logical number of the block in the file
1898 * @bh - buffer head we want to add to the extent
1899 *
1900 * The function is used to collect contig. blocks in the same state. If the
1901 * buffer doesn't require mapping for writeback and we haven't started the
1902 * extent of buffers to map yet, the function returns 'true' immediately - the
1903 * caller can write the buffer right away. Otherwise the function returns true
1904 * if the block has been added to the extent, false if the block couldn't be
1905 * added.
1906 */
mpage_add_bh_to_extent(struct mpage_da_data * mpd,ext4_lblk_t lblk,struct buffer_head * bh)1907 static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk,
1908 struct buffer_head *bh)
1909 {
1910 struct ext4_map_blocks *map = &mpd->map;
1911
1912 /* Buffer that doesn't need mapping for writeback? */
1913 if (!buffer_dirty(bh) || !buffer_mapped(bh) ||
1914 (!buffer_delay(bh) && !buffer_unwritten(bh))) {
1915 /* So far no extent to map => we write the buffer right away */
1916 if (map->m_len == 0)
1917 return true;
1918 return false;
1919 }
1920
1921 /* First block in the extent? */
1922 if (map->m_len == 0) {
1923 /* We cannot map unless handle is started... */
1924 if (!mpd->do_map)
1925 return false;
1926 map->m_lblk = lblk;
1927 map->m_len = 1;
1928 map->m_flags = bh->b_state & BH_FLAGS;
1929 return true;
1930 }
1931
1932 /* Don't go larger than mballoc is willing to allocate */
1933 if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN)
1934 return false;
1935
1936 /* Can we merge the block to our big extent? */
1937 if (lblk == map->m_lblk + map->m_len &&
1938 (bh->b_state & BH_FLAGS) == map->m_flags) {
1939 map->m_len++;
1940 return true;
1941 }
1942 return false;
1943 }
1944
1945 /*
1946 * mpage_process_page_bufs - submit page buffers for IO or add them to extent
1947 *
1948 * @mpd - extent of blocks for mapping
1949 * @head - the first buffer in the page
1950 * @bh - buffer we should start processing from
1951 * @lblk - logical number of the block in the file corresponding to @bh
1952 *
1953 * Walk through page buffers from @bh upto @head (exclusive) and either submit
1954 * the page for IO if all buffers in this page were mapped and there's no
1955 * accumulated extent of buffers to map or add buffers in the page to the
1956 * extent of buffers to map. The function returns 1 if the caller can continue
1957 * by processing the next page, 0 if it should stop adding buffers to the
1958 * extent to map because we cannot extend it anymore. It can also return value
1959 * < 0 in case of error during IO submission.
1960 */
mpage_process_page_bufs(struct mpage_da_data * mpd,struct buffer_head * head,struct buffer_head * bh,ext4_lblk_t lblk)1961 static int mpage_process_page_bufs(struct mpage_da_data *mpd,
1962 struct buffer_head *head,
1963 struct buffer_head *bh,
1964 ext4_lblk_t lblk)
1965 {
1966 struct inode *inode = mpd->inode;
1967 int err;
1968 ext4_lblk_t blocks = (i_size_read(inode) + i_blocksize(inode) - 1)
1969 >> inode->i_blkbits;
1970
1971 if (ext4_verity_in_progress(inode))
1972 blocks = EXT_MAX_BLOCKS;
1973
1974 do {
1975 BUG_ON(buffer_locked(bh));
1976
1977 if (lblk >= blocks || !mpage_add_bh_to_extent(mpd, lblk, bh)) {
1978 /* Found extent to map? */
1979 if (mpd->map.m_len)
1980 return 0;
1981 /* Buffer needs mapping and handle is not started? */
1982 if (!mpd->do_map)
1983 return 0;
1984 /* Everything mapped so far and we hit EOF */
1985 break;
1986 }
1987 } while (lblk++, (bh = bh->b_this_page) != head);
1988 /* So far everything mapped? Submit the page for IO. */
1989 if (mpd->map.m_len == 0) {
1990 err = mpage_submit_folio(mpd, head->b_folio);
1991 if (err < 0)
1992 return err;
1993 mpage_folio_done(mpd, head->b_folio);
1994 }
1995 if (lblk >= blocks) {
1996 mpd->scanned_until_end = 1;
1997 return 0;
1998 }
1999 return 1;
2000 }
2001
2002 /*
2003 * mpage_process_folio - update folio buffers corresponding to changed extent
2004 * and may submit fully mapped page for IO
2005 * @mpd: description of extent to map, on return next extent to map
2006 * @folio: Contains these buffers.
2007 * @m_lblk: logical block mapping.
2008 * @m_pblk: corresponding physical mapping.
2009 * @map_bh: determines on return whether this page requires any further
2010 * mapping or not.
2011 *
2012 * Scan given folio buffers corresponding to changed extent and update buffer
2013 * state according to new extent state.
2014 * We map delalloc buffers to their physical location, clear unwritten bits.
2015 * If the given folio is not fully mapped, we update @mpd to the next extent in
2016 * the given folio that needs mapping & return @map_bh as true.
2017 */
mpage_process_folio(struct mpage_da_data * mpd,struct folio * folio,ext4_lblk_t * m_lblk,ext4_fsblk_t * m_pblk,bool * map_bh)2018 static int mpage_process_folio(struct mpage_da_data *mpd, struct folio *folio,
2019 ext4_lblk_t *m_lblk, ext4_fsblk_t *m_pblk,
2020 bool *map_bh)
2021 {
2022 struct buffer_head *head, *bh;
2023 ext4_io_end_t *io_end = mpd->io_submit.io_end;
2024 ext4_lblk_t lblk = *m_lblk;
2025 ext4_fsblk_t pblock = *m_pblk;
2026 int err = 0;
2027 int blkbits = mpd->inode->i_blkbits;
2028 ssize_t io_end_size = 0;
2029 struct ext4_io_end_vec *io_end_vec = ext4_last_io_end_vec(io_end);
2030
2031 bh = head = folio_buffers(folio);
2032 do {
2033 if (lblk < mpd->map.m_lblk)
2034 continue;
2035 if (lblk >= mpd->map.m_lblk + mpd->map.m_len) {
2036 /*
2037 * Buffer after end of mapped extent.
2038 * Find next buffer in the folio to map.
2039 */
2040 mpd->map.m_len = 0;
2041 mpd->map.m_flags = 0;
2042 io_end_vec->size += io_end_size;
2043
2044 err = mpage_process_page_bufs(mpd, head, bh, lblk);
2045 if (err > 0)
2046 err = 0;
2047 if (!err && mpd->map.m_len && mpd->map.m_lblk > lblk) {
2048 io_end_vec = ext4_alloc_io_end_vec(io_end);
2049 if (IS_ERR(io_end_vec)) {
2050 err = PTR_ERR(io_end_vec);
2051 goto out;
2052 }
2053 io_end_vec->offset = (loff_t)mpd->map.m_lblk << blkbits;
2054 }
2055 *map_bh = true;
2056 goto out;
2057 }
2058 if (buffer_delay(bh)) {
2059 clear_buffer_delay(bh);
2060 bh->b_blocknr = pblock++;
2061 }
2062 clear_buffer_unwritten(bh);
2063 io_end_size += (1 << blkbits);
2064 } while (lblk++, (bh = bh->b_this_page) != head);
2065
2066 io_end_vec->size += io_end_size;
2067 *map_bh = false;
2068 out:
2069 *m_lblk = lblk;
2070 *m_pblk = pblock;
2071 return err;
2072 }
2073
2074 /*
2075 * mpage_map_buffers - update buffers corresponding to changed extent and
2076 * submit fully mapped pages for IO
2077 *
2078 * @mpd - description of extent to map, on return next extent to map
2079 *
2080 * Scan buffers corresponding to changed extent (we expect corresponding pages
2081 * to be already locked) and update buffer state according to new extent state.
2082 * We map delalloc buffers to their physical location, clear unwritten bits,
2083 * and mark buffers as uninit when we perform writes to unwritten extents
2084 * and do extent conversion after IO is finished. If the last page is not fully
2085 * mapped, we update @map to the next extent in the last page that needs
2086 * mapping. Otherwise we submit the page for IO.
2087 */
mpage_map_and_submit_buffers(struct mpage_da_data * mpd)2088 static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
2089 {
2090 struct folio_batch fbatch;
2091 unsigned nr, i;
2092 struct inode *inode = mpd->inode;
2093 int bpp_bits = PAGE_SHIFT - inode->i_blkbits;
2094 pgoff_t start, end;
2095 ext4_lblk_t lblk;
2096 ext4_fsblk_t pblock;
2097 int err;
2098 bool map_bh = false;
2099
2100 start = mpd->map.m_lblk >> bpp_bits;
2101 end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits;
2102 lblk = start << bpp_bits;
2103 pblock = mpd->map.m_pblk;
2104
2105 folio_batch_init(&fbatch);
2106 while (start <= end) {
2107 nr = filemap_get_folios(inode->i_mapping, &start, end, &fbatch);
2108 if (nr == 0)
2109 break;
2110 for (i = 0; i < nr; i++) {
2111 struct folio *folio = fbatch.folios[i];
2112
2113 err = mpage_process_folio(mpd, folio, &lblk, &pblock,
2114 &map_bh);
2115 /*
2116 * If map_bh is true, means page may require further bh
2117 * mapping, or maybe the page was submitted for IO.
2118 * So we return to call further extent mapping.
2119 */
2120 if (err < 0 || map_bh)
2121 goto out;
2122 /* Page fully mapped - let IO run! */
2123 err = mpage_submit_folio(mpd, folio);
2124 if (err < 0)
2125 goto out;
2126 mpage_folio_done(mpd, folio);
2127 }
2128 folio_batch_release(&fbatch);
2129 }
2130 /* Extent fully mapped and matches with page boundary. We are done. */
2131 mpd->map.m_len = 0;
2132 mpd->map.m_flags = 0;
2133 return 0;
2134 out:
2135 folio_batch_release(&fbatch);
2136 return err;
2137 }
2138
mpage_map_one_extent(handle_t * handle,struct mpage_da_data * mpd)2139 static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
2140 {
2141 struct inode *inode = mpd->inode;
2142 struct ext4_map_blocks *map = &mpd->map;
2143 int get_blocks_flags;
2144 int err, dioread_nolock;
2145
2146 trace_ext4_da_write_pages_extent(inode, map);
2147 /*
2148 * Call ext4_map_blocks() to allocate any delayed allocation blocks, or
2149 * to convert an unwritten extent to be initialized (in the case
2150 * where we have written into one or more preallocated blocks). It is
2151 * possible that we're going to need more metadata blocks than
2152 * previously reserved. However we must not fail because we're in
2153 * writeback and there is nothing we can do about it so it might result
2154 * in data loss. So use reserved blocks to allocate metadata if
2155 * possible.
2156 *
2157 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if
2158 * the blocks in question are delalloc blocks. This indicates
2159 * that the blocks and quotas has already been checked when
2160 * the data was copied into the page cache.
2161 */
2162 get_blocks_flags = EXT4_GET_BLOCKS_CREATE |
2163 EXT4_GET_BLOCKS_METADATA_NOFAIL |
2164 EXT4_GET_BLOCKS_IO_SUBMIT;
2165 dioread_nolock = ext4_should_dioread_nolock(inode);
2166 if (dioread_nolock)
2167 get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
2168 if (map->m_flags & BIT(BH_Delay))
2169 get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
2170
2171 err = ext4_map_blocks(handle, inode, map, get_blocks_flags);
2172 if (err < 0)
2173 return err;
2174 if (dioread_nolock && (map->m_flags & EXT4_MAP_UNWRITTEN)) {
2175 if (!mpd->io_submit.io_end->handle &&
2176 ext4_handle_valid(handle)) {
2177 mpd->io_submit.io_end->handle = handle->h_rsv_handle;
2178 handle->h_rsv_handle = NULL;
2179 }
2180 ext4_set_io_unwritten_flag(inode, mpd->io_submit.io_end);
2181 }
2182
2183 BUG_ON(map->m_len == 0);
2184 return 0;
2185 }
2186
2187 /*
2188 * mpage_map_and_submit_extent - map extent starting at mpd->lblk of length
2189 * mpd->len and submit pages underlying it for IO
2190 *
2191 * @handle - handle for journal operations
2192 * @mpd - extent to map
2193 * @give_up_on_write - we set this to true iff there is a fatal error and there
2194 * is no hope of writing the data. The caller should discard
2195 * dirty pages to avoid infinite loops.
2196 *
2197 * The function maps extent starting at mpd->lblk of length mpd->len. If it is
2198 * delayed, blocks are allocated, if it is unwritten, we may need to convert
2199 * them to initialized or split the described range from larger unwritten
2200 * extent. Note that we need not map all the described range since allocation
2201 * can return less blocks or the range is covered by more unwritten extents. We
2202 * cannot map more because we are limited by reserved transaction credits. On
2203 * the other hand we always make sure that the last touched page is fully
2204 * mapped so that it can be written out (and thus forward progress is
2205 * guaranteed). After mapping we submit all mapped pages for IO.
2206 */
mpage_map_and_submit_extent(handle_t * handle,struct mpage_da_data * mpd,bool * give_up_on_write)2207 static int mpage_map_and_submit_extent(handle_t *handle,
2208 struct mpage_da_data *mpd,
2209 bool *give_up_on_write)
2210 {
2211 struct inode *inode = mpd->inode;
2212 struct ext4_map_blocks *map = &mpd->map;
2213 int err;
2214 loff_t disksize;
2215 int progress = 0;
2216 ext4_io_end_t *io_end = mpd->io_submit.io_end;
2217 struct ext4_io_end_vec *io_end_vec;
2218
2219 io_end_vec = ext4_alloc_io_end_vec(io_end);
2220 if (IS_ERR(io_end_vec))
2221 return PTR_ERR(io_end_vec);
2222 io_end_vec->offset = ((loff_t)map->m_lblk) << inode->i_blkbits;
2223 do {
2224 err = mpage_map_one_extent(handle, mpd);
2225 if (err < 0) {
2226 struct super_block *sb = inode->i_sb;
2227
2228 if (ext4_forced_shutdown(sb))
2229 goto invalidate_dirty_pages;
2230 /*
2231 * Let the uper layers retry transient errors.
2232 * In the case of ENOSPC, if ext4_count_free_blocks()
2233 * is non-zero, a commit should free up blocks.
2234 */
2235 if ((err == -ENOMEM) ||
2236 (err == -ENOSPC && ext4_count_free_clusters(sb))) {
2237 if (progress)
2238 goto update_disksize;
2239 return err;
2240 }
2241 ext4_msg(sb, KERN_CRIT,
2242 "Delayed block allocation failed for "
2243 "inode %lu at logical offset %llu with"
2244 " max blocks %u with error %d",
2245 inode->i_ino,
2246 (unsigned long long)map->m_lblk,
2247 (unsigned)map->m_len, -err);
2248 ext4_msg(sb, KERN_CRIT,
2249 "This should not happen!! Data will "
2250 "be lost\n");
2251 if (err == -ENOSPC)
2252 ext4_print_free_blocks(inode);
2253 invalidate_dirty_pages:
2254 *give_up_on_write = true;
2255 return err;
2256 }
2257 progress = 1;
2258 /*
2259 * Update buffer state, submit mapped pages, and get us new
2260 * extent to map
2261 */
2262 err = mpage_map_and_submit_buffers(mpd);
2263 if (err < 0)
2264 goto update_disksize;
2265 } while (map->m_len);
2266
2267 update_disksize:
2268 /*
2269 * Update on-disk size after IO is submitted. Races with
2270 * truncate are avoided by checking i_size under i_data_sem.
2271 */
2272 disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT;
2273 if (disksize > READ_ONCE(EXT4_I(inode)->i_disksize)) {
2274 int err2;
2275 loff_t i_size;
2276
2277 down_write(&EXT4_I(inode)->i_data_sem);
2278 i_size = i_size_read(inode);
2279 if (disksize > i_size)
2280 disksize = i_size;
2281 if (disksize > EXT4_I(inode)->i_disksize)
2282 EXT4_I(inode)->i_disksize = disksize;
2283 up_write(&EXT4_I(inode)->i_data_sem);
2284 err2 = ext4_mark_inode_dirty(handle, inode);
2285 if (err2) {
2286 ext4_error_err(inode->i_sb, -err2,
2287 "Failed to mark inode %lu dirty",
2288 inode->i_ino);
2289 }
2290 if (!err)
2291 err = err2;
2292 }
2293 return err;
2294 }
2295
2296 /*
2297 * Calculate the total number of credits to reserve for one writepages
2298 * iteration. This is called from ext4_writepages(). We map an extent of
2299 * up to MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping
2300 * the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN +
2301 * bpp - 1 blocks in bpp different extents.
2302 */
ext4_da_writepages_trans_blocks(struct inode * inode)2303 static int ext4_da_writepages_trans_blocks(struct inode *inode)
2304 {
2305 int bpp = ext4_journal_blocks_per_page(inode);
2306
2307 return ext4_meta_trans_blocks(inode,
2308 MAX_WRITEPAGES_EXTENT_LEN + bpp - 1, bpp);
2309 }
2310
ext4_journal_folio_buffers(handle_t * handle,struct folio * folio,size_t len)2311 static int ext4_journal_folio_buffers(handle_t *handle, struct folio *folio,
2312 size_t len)
2313 {
2314 struct buffer_head *page_bufs = folio_buffers(folio);
2315 struct inode *inode = folio->mapping->host;
2316 int ret, err;
2317
2318 ret = ext4_walk_page_buffers(handle, inode, page_bufs, 0, len,
2319 NULL, do_journal_get_write_access);
2320 err = ext4_walk_page_buffers(handle, inode, page_bufs, 0, len,
2321 NULL, write_end_fn);
2322 if (ret == 0)
2323 ret = err;
2324 err = ext4_jbd2_inode_add_write(handle, inode, folio_pos(folio), len);
2325 if (ret == 0)
2326 ret = err;
2327 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
2328
2329 return ret;
2330 }
2331
mpage_journal_page_buffers(handle_t * handle,struct mpage_da_data * mpd,struct folio * folio)2332 static int mpage_journal_page_buffers(handle_t *handle,
2333 struct mpage_da_data *mpd,
2334 struct folio *folio)
2335 {
2336 struct inode *inode = mpd->inode;
2337 loff_t size = i_size_read(inode);
2338 size_t len = folio_size(folio);
2339
2340 folio_clear_checked(folio);
2341 mpd->wbc->nr_to_write--;
2342
2343 if (folio_pos(folio) + len > size &&
2344 !ext4_verity_in_progress(inode))
2345 len = size & (len - 1);
2346
2347 return ext4_journal_folio_buffers(handle, folio, len);
2348 }
2349
2350 /*
2351 * mpage_prepare_extent_to_map - find & lock contiguous range of dirty pages
2352 * needing mapping, submit mapped pages
2353 *
2354 * @mpd - where to look for pages
2355 *
2356 * Walk dirty pages in the mapping. If they are fully mapped, submit them for
2357 * IO immediately. If we cannot map blocks, we submit just already mapped
2358 * buffers in the page for IO and keep page dirty. When we can map blocks and
2359 * we find a page which isn't mapped we start accumulating extent of buffers
2360 * underlying these pages that needs mapping (formed by either delayed or
2361 * unwritten buffers). We also lock the pages containing these buffers. The
2362 * extent found is returned in @mpd structure (starting at mpd->lblk with
2363 * length mpd->len blocks).
2364 *
2365 * Note that this function can attach bios to one io_end structure which are
2366 * neither logically nor physically contiguous. Although it may seem as an
2367 * unnecessary complication, it is actually inevitable in blocksize < pagesize
2368 * case as we need to track IO to all buffers underlying a page in one io_end.
2369 */
mpage_prepare_extent_to_map(struct mpage_da_data * mpd)2370 static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
2371 {
2372 struct address_space *mapping = mpd->inode->i_mapping;
2373 struct folio_batch fbatch;
2374 unsigned int nr_folios;
2375 pgoff_t index = mpd->first_page;
2376 pgoff_t end = mpd->last_page;
2377 xa_mark_t tag;
2378 int i, err = 0;
2379 int blkbits = mpd->inode->i_blkbits;
2380 ext4_lblk_t lblk;
2381 struct buffer_head *head;
2382 handle_t *handle = NULL;
2383 int bpp = ext4_journal_blocks_per_page(mpd->inode);
2384
2385 if (mpd->wbc->sync_mode == WB_SYNC_ALL || mpd->wbc->tagged_writepages)
2386 tag = PAGECACHE_TAG_TOWRITE;
2387 else
2388 tag = PAGECACHE_TAG_DIRTY;
2389
2390 mpd->map.m_len = 0;
2391 mpd->next_page = index;
2392 if (ext4_should_journal_data(mpd->inode)) {
2393 handle = ext4_journal_start(mpd->inode, EXT4_HT_WRITE_PAGE,
2394 bpp);
2395 if (IS_ERR(handle))
2396 return PTR_ERR(handle);
2397 }
2398 folio_batch_init(&fbatch);
2399 while (index <= end) {
2400 nr_folios = filemap_get_folios_tag(mapping, &index, end,
2401 tag, &fbatch);
2402 if (nr_folios == 0)
2403 break;
2404
2405 for (i = 0; i < nr_folios; i++) {
2406 struct folio *folio = fbatch.folios[i];
2407
2408 /*
2409 * Accumulated enough dirty pages? This doesn't apply
2410 * to WB_SYNC_ALL mode. For integrity sync we have to
2411 * keep going because someone may be concurrently
2412 * dirtying pages, and we might have synced a lot of
2413 * newly appeared dirty pages, but have not synced all
2414 * of the old dirty pages.
2415 */
2416 if (mpd->wbc->sync_mode == WB_SYNC_NONE &&
2417 mpd->wbc->nr_to_write <=
2418 mpd->map.m_len >> (PAGE_SHIFT - blkbits))
2419 goto out;
2420
2421 /* If we can't merge this page, we are done. */
2422 if (mpd->map.m_len > 0 && mpd->next_page != folio->index)
2423 goto out;
2424
2425 if (handle) {
2426 err = ext4_journal_ensure_credits(handle, bpp,
2427 0);
2428 if (err < 0)
2429 goto out;
2430 }
2431
2432 folio_lock(folio);
2433 /*
2434 * If the page is no longer dirty, or its mapping no
2435 * longer corresponds to inode we are writing (which
2436 * means it has been truncated or invalidated), or the
2437 * page is already under writeback and we are not doing
2438 * a data integrity writeback, skip the page
2439 */
2440 if (!folio_test_dirty(folio) ||
2441 (folio_test_writeback(folio) &&
2442 (mpd->wbc->sync_mode == WB_SYNC_NONE)) ||
2443 unlikely(folio->mapping != mapping)) {
2444 folio_unlock(folio);
2445 continue;
2446 }
2447
2448 folio_wait_writeback(folio);
2449 BUG_ON(folio_test_writeback(folio));
2450
2451 /*
2452 * Should never happen but for buggy code in
2453 * other subsystems that call
2454 * set_page_dirty() without properly warning
2455 * the file system first. See [1] for more
2456 * information.
2457 *
2458 * [1] https://lore.kernel.org/linux-mm/20180103100430.GE4911@quack2.suse.cz
2459 */
2460 if (!folio_buffers(folio)) {
2461 ext4_warning_inode(mpd->inode, "page %lu does not have buffers attached", folio->index);
2462 folio_clear_dirty(folio);
2463 folio_unlock(folio);
2464 continue;
2465 }
2466
2467 if (mpd->map.m_len == 0)
2468 mpd->first_page = folio->index;
2469 mpd->next_page = folio_next_index(folio);
2470 /*
2471 * Writeout when we cannot modify metadata is simple.
2472 * Just submit the page. For data=journal mode we
2473 * first handle writeout of the page for checkpoint and
2474 * only after that handle delayed page dirtying. This
2475 * makes sure current data is checkpointed to the final
2476 * location before possibly journalling it again which
2477 * is desirable when the page is frequently dirtied
2478 * through a pin.
2479 */
2480 if (!mpd->can_map) {
2481 err = mpage_submit_folio(mpd, folio);
2482 if (err < 0)
2483 goto out;
2484 /* Pending dirtying of journalled data? */
2485 if (folio_test_checked(folio)) {
2486 err = mpage_journal_page_buffers(handle,
2487 mpd, folio);
2488 if (err < 0)
2489 goto out;
2490 mpd->journalled_more_data = 1;
2491 }
2492 mpage_folio_done(mpd, folio);
2493 } else {
2494 /* Add all dirty buffers to mpd */
2495 lblk = ((ext4_lblk_t)folio->index) <<
2496 (PAGE_SHIFT - blkbits);
2497 head = folio_buffers(folio);
2498 err = mpage_process_page_bufs(mpd, head, head,
2499 lblk);
2500 if (err <= 0)
2501 goto out;
2502 err = 0;
2503 }
2504 }
2505 folio_batch_release(&fbatch);
2506 cond_resched();
2507 }
2508 mpd->scanned_until_end = 1;
2509 if (handle)
2510 ext4_journal_stop(handle);
2511 return 0;
2512 out:
2513 folio_batch_release(&fbatch);
2514 if (handle)
2515 ext4_journal_stop(handle);
2516 return err;
2517 }
2518
ext4_do_writepages(struct mpage_da_data * mpd)2519 static int ext4_do_writepages(struct mpage_da_data *mpd)
2520 {
2521 struct writeback_control *wbc = mpd->wbc;
2522 pgoff_t writeback_index = 0;
2523 long nr_to_write = wbc->nr_to_write;
2524 int range_whole = 0;
2525 int cycled = 1;
2526 handle_t *handle = NULL;
2527 struct inode *inode = mpd->inode;
2528 struct address_space *mapping = inode->i_mapping;
2529 int needed_blocks, rsv_blocks = 0, ret = 0;
2530 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
2531 struct blk_plug plug;
2532 bool give_up_on_write = false;
2533
2534 trace_ext4_writepages(inode, wbc);
2535
2536 /*
2537 * No pages to write? This is mainly a kludge to avoid starting
2538 * a transaction for special inodes like journal inode on last iput()
2539 * because that could violate lock ordering on umount
2540 */
2541 if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
2542 goto out_writepages;
2543
2544 /*
2545 * If the filesystem has aborted, it is read-only, so return
2546 * right away instead of dumping stack traces later on that
2547 * will obscure the real source of the problem. We test
2548 * fs shutdown state instead of sb->s_flag's SB_RDONLY because
2549 * the latter could be true if the filesystem is mounted
2550 * read-only, and in that case, ext4_writepages should
2551 * *never* be called, so if that ever happens, we would want
2552 * the stack trace.
2553 */
2554 if (unlikely(ext4_forced_shutdown(mapping->host->i_sb))) {
2555 ret = -EROFS;
2556 goto out_writepages;
2557 }
2558
2559 /*
2560 * If we have inline data and arrive here, it means that
2561 * we will soon create the block for the 1st page, so
2562 * we'd better clear the inline data here.
2563 */
2564 if (ext4_has_inline_data(inode)) {
2565 /* Just inode will be modified... */
2566 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
2567 if (IS_ERR(handle)) {
2568 ret = PTR_ERR(handle);
2569 goto out_writepages;
2570 }
2571 BUG_ON(ext4_test_inode_state(inode,
2572 EXT4_STATE_MAY_INLINE_DATA));
2573 ext4_destroy_inline_data(handle, inode);
2574 ext4_journal_stop(handle);
2575 }
2576
2577 /*
2578 * data=journal mode does not do delalloc so we just need to writeout /
2579 * journal already mapped buffers. On the other hand we need to commit
2580 * transaction to make data stable. We expect all the data to be
2581 * already in the journal (the only exception are DMA pinned pages
2582 * dirtied behind our back) so we commit transaction here and run the
2583 * writeback loop to checkpoint them. The checkpointing is not actually
2584 * necessary to make data persistent *but* quite a few places (extent
2585 * shifting operations, fsverity, ...) depend on being able to drop
2586 * pagecache pages after calling filemap_write_and_wait() and for that
2587 * checkpointing needs to happen.
2588 */
2589 if (ext4_should_journal_data(inode)) {
2590 mpd->can_map = 0;
2591 if (wbc->sync_mode == WB_SYNC_ALL)
2592 ext4_fc_commit(sbi->s_journal,
2593 EXT4_I(inode)->i_datasync_tid);
2594 }
2595 mpd->journalled_more_data = 0;
2596
2597 if (ext4_should_dioread_nolock(inode)) {
2598 /*
2599 * We may need to convert up to one extent per block in
2600 * the page and we may dirty the inode.
2601 */
2602 rsv_blocks = 1 + ext4_chunk_trans_blocks(inode,
2603 PAGE_SIZE >> inode->i_blkbits);
2604 }
2605
2606 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2607 range_whole = 1;
2608
2609 if (wbc->range_cyclic) {
2610 writeback_index = mapping->writeback_index;
2611 if (writeback_index)
2612 cycled = 0;
2613 mpd->first_page = writeback_index;
2614 mpd->last_page = -1;
2615 } else {
2616 mpd->first_page = wbc->range_start >> PAGE_SHIFT;
2617 mpd->last_page = wbc->range_end >> PAGE_SHIFT;
2618 }
2619
2620 ext4_io_submit_init(&mpd->io_submit, wbc);
2621 retry:
2622 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2623 tag_pages_for_writeback(mapping, mpd->first_page,
2624 mpd->last_page);
2625 blk_start_plug(&plug);
2626
2627 /*
2628 * First writeback pages that don't need mapping - we can avoid
2629 * starting a transaction unnecessarily and also avoid being blocked
2630 * in the block layer on device congestion while having transaction
2631 * started.
2632 */
2633 mpd->do_map = 0;
2634 mpd->scanned_until_end = 0;
2635 mpd->io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
2636 if (!mpd->io_submit.io_end) {
2637 ret = -ENOMEM;
2638 goto unplug;
2639 }
2640 ret = mpage_prepare_extent_to_map(mpd);
2641 /* Unlock pages we didn't use */
2642 mpage_release_unused_pages(mpd, false);
2643 /* Submit prepared bio */
2644 ext4_io_submit(&mpd->io_submit);
2645 ext4_put_io_end_defer(mpd->io_submit.io_end);
2646 mpd->io_submit.io_end = NULL;
2647 if (ret < 0)
2648 goto unplug;
2649
2650 while (!mpd->scanned_until_end && wbc->nr_to_write > 0) {
2651 /* For each extent of pages we use new io_end */
2652 mpd->io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
2653 if (!mpd->io_submit.io_end) {
2654 ret = -ENOMEM;
2655 break;
2656 }
2657
2658 WARN_ON_ONCE(!mpd->can_map);
2659 /*
2660 * We have two constraints: We find one extent to map and we
2661 * must always write out whole page (makes a difference when
2662 * blocksize < pagesize) so that we don't block on IO when we
2663 * try to write out the rest of the page. Journalled mode is
2664 * not supported by delalloc.
2665 */
2666 BUG_ON(ext4_should_journal_data(inode));
2667 needed_blocks = ext4_da_writepages_trans_blocks(inode);
2668
2669 /* start a new transaction */
2670 handle = ext4_journal_start_with_reserve(inode,
2671 EXT4_HT_WRITE_PAGE, needed_blocks, rsv_blocks);
2672 if (IS_ERR(handle)) {
2673 ret = PTR_ERR(handle);
2674 ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
2675 "%ld pages, ino %lu; err %d", __func__,
2676 wbc->nr_to_write, inode->i_ino, ret);
2677 /* Release allocated io_end */
2678 ext4_put_io_end(mpd->io_submit.io_end);
2679 mpd->io_submit.io_end = NULL;
2680 break;
2681 }
2682 mpd->do_map = 1;
2683
2684 trace_ext4_da_write_pages(inode, mpd->first_page, wbc);
2685 ret = mpage_prepare_extent_to_map(mpd);
2686 if (!ret && mpd->map.m_len)
2687 ret = mpage_map_and_submit_extent(handle, mpd,
2688 &give_up_on_write);
2689 /*
2690 * Caution: If the handle is synchronous,
2691 * ext4_journal_stop() can wait for transaction commit
2692 * to finish which may depend on writeback of pages to
2693 * complete or on page lock to be released. In that
2694 * case, we have to wait until after we have
2695 * submitted all the IO, released page locks we hold,
2696 * and dropped io_end reference (for extent conversion
2697 * to be able to complete) before stopping the handle.
2698 */
2699 if (!ext4_handle_valid(handle) || handle->h_sync == 0) {
2700 ext4_journal_stop(handle);
2701 handle = NULL;
2702 mpd->do_map = 0;
2703 }
2704 /* Unlock pages we didn't use */
2705 mpage_release_unused_pages(mpd, give_up_on_write);
2706 /* Submit prepared bio */
2707 ext4_io_submit(&mpd->io_submit);
2708
2709 /*
2710 * Drop our io_end reference we got from init. We have
2711 * to be careful and use deferred io_end finishing if
2712 * we are still holding the transaction as we can
2713 * release the last reference to io_end which may end
2714 * up doing unwritten extent conversion.
2715 */
2716 if (handle) {
2717 ext4_put_io_end_defer(mpd->io_submit.io_end);
2718 ext4_journal_stop(handle);
2719 } else
2720 ext4_put_io_end(mpd->io_submit.io_end);
2721 mpd->io_submit.io_end = NULL;
2722
2723 if (ret == -ENOSPC && sbi->s_journal) {
2724 /*
2725 * Commit the transaction which would
2726 * free blocks released in the transaction
2727 * and try again
2728 */
2729 jbd2_journal_force_commit_nested(sbi->s_journal);
2730 ret = 0;
2731 continue;
2732 }
2733 /* Fatal error - ENOMEM, EIO... */
2734 if (ret)
2735 break;
2736 }
2737 unplug:
2738 blk_finish_plug(&plug);
2739 if (!ret && !cycled && wbc->nr_to_write > 0) {
2740 cycled = 1;
2741 mpd->last_page = writeback_index - 1;
2742 mpd->first_page = 0;
2743 goto retry;
2744 }
2745
2746 /* Update index */
2747 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2748 /*
2749 * Set the writeback_index so that range_cyclic
2750 * mode will write it back later
2751 */
2752 mapping->writeback_index = mpd->first_page;
2753
2754 out_writepages:
2755 trace_ext4_writepages_result(inode, wbc, ret,
2756 nr_to_write - wbc->nr_to_write);
2757 return ret;
2758 }
2759
ext4_writepages(struct address_space * mapping,struct writeback_control * wbc)2760 static int ext4_writepages(struct address_space *mapping,
2761 struct writeback_control *wbc)
2762 {
2763 struct super_block *sb = mapping->host->i_sb;
2764 struct mpage_da_data mpd = {
2765 .inode = mapping->host,
2766 .wbc = wbc,
2767 .can_map = 1,
2768 };
2769 int ret;
2770 int alloc_ctx;
2771
2772 if (unlikely(ext4_forced_shutdown(sb)))
2773 return -EIO;
2774
2775 alloc_ctx = ext4_writepages_down_read(sb);
2776 ret = ext4_do_writepages(&mpd);
2777 /*
2778 * For data=journal writeback we could have come across pages marked
2779 * for delayed dirtying (PageChecked) which were just added to the
2780 * running transaction. Try once more to get them to stable storage.
2781 */
2782 if (!ret && mpd.journalled_more_data)
2783 ret = ext4_do_writepages(&mpd);
2784 ext4_writepages_up_read(sb, alloc_ctx);
2785
2786 return ret;
2787 }
2788
ext4_normal_submit_inode_data_buffers(struct jbd2_inode * jinode)2789 int ext4_normal_submit_inode_data_buffers(struct jbd2_inode *jinode)
2790 {
2791 struct writeback_control wbc = {
2792 .sync_mode = WB_SYNC_ALL,
2793 .nr_to_write = LONG_MAX,
2794 .range_start = jinode->i_dirty_start,
2795 .range_end = jinode->i_dirty_end,
2796 };
2797 struct mpage_da_data mpd = {
2798 .inode = jinode->i_vfs_inode,
2799 .wbc = &wbc,
2800 .can_map = 0,
2801 };
2802 return ext4_do_writepages(&mpd);
2803 }
2804
ext4_dax_writepages(struct address_space * mapping,struct writeback_control * wbc)2805 static int ext4_dax_writepages(struct address_space *mapping,
2806 struct writeback_control *wbc)
2807 {
2808 int ret;
2809 long nr_to_write = wbc->nr_to_write;
2810 struct inode *inode = mapping->host;
2811 int alloc_ctx;
2812
2813 if (unlikely(ext4_forced_shutdown(inode->i_sb)))
2814 return -EIO;
2815
2816 alloc_ctx = ext4_writepages_down_read(inode->i_sb);
2817 trace_ext4_writepages(inode, wbc);
2818
2819 ret = dax_writeback_mapping_range(mapping,
2820 EXT4_SB(inode->i_sb)->s_daxdev, wbc);
2821 trace_ext4_writepages_result(inode, wbc, ret,
2822 nr_to_write - wbc->nr_to_write);
2823 ext4_writepages_up_read(inode->i_sb, alloc_ctx);
2824 return ret;
2825 }
2826
ext4_nonda_switch(struct super_block * sb)2827 static int ext4_nonda_switch(struct super_block *sb)
2828 {
2829 s64 free_clusters, dirty_clusters;
2830 struct ext4_sb_info *sbi = EXT4_SB(sb);
2831
2832 /*
2833 * switch to non delalloc mode if we are running low
2834 * on free block. The free block accounting via percpu
2835 * counters can get slightly wrong with percpu_counter_batch getting
2836 * accumulated on each CPU without updating global counters
2837 * Delalloc need an accurate free block accounting. So switch
2838 * to non delalloc when we are near to error range.
2839 */
2840 free_clusters =
2841 percpu_counter_read_positive(&sbi->s_freeclusters_counter);
2842 dirty_clusters =
2843 percpu_counter_read_positive(&sbi->s_dirtyclusters_counter);
2844 /*
2845 * Start pushing delalloc when 1/2 of free blocks are dirty.
2846 */
2847 if (dirty_clusters && (free_clusters < 2 * dirty_clusters))
2848 try_to_writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE);
2849
2850 if (2 * free_clusters < 3 * dirty_clusters ||
2851 free_clusters < (dirty_clusters + EXT4_FREECLUSTERS_WATERMARK)) {
2852 /*
2853 * free block count is less than 150% of dirty blocks
2854 * or free blocks is less than watermark
2855 */
2856 return 1;
2857 }
2858 return 0;
2859 }
2860
ext4_da_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,struct page ** pagep,void ** fsdata)2861 static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
2862 loff_t pos, unsigned len,
2863 struct page **pagep, void **fsdata)
2864 {
2865 int ret, retries = 0;
2866 struct folio *folio;
2867 pgoff_t index;
2868 struct inode *inode = mapping->host;
2869
2870 if (unlikely(ext4_forced_shutdown(inode->i_sb)))
2871 return -EIO;
2872
2873 index = pos >> PAGE_SHIFT;
2874
2875 if (ext4_nonda_switch(inode->i_sb) || ext4_verity_in_progress(inode)) {
2876 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
2877 return ext4_write_begin(file, mapping, pos,
2878 len, pagep, fsdata);
2879 }
2880 *fsdata = (void *)0;
2881 trace_ext4_da_write_begin(inode, pos, len);
2882
2883 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
2884 ret = ext4_da_write_inline_data_begin(mapping, inode, pos, len,
2885 pagep, fsdata);
2886 if (ret < 0)
2887 return ret;
2888 if (ret == 1)
2889 return 0;
2890 }
2891
2892 retry:
2893 folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
2894 mapping_gfp_mask(mapping));
2895 if (IS_ERR(folio))
2896 return PTR_ERR(folio);
2897
2898 #ifdef CONFIG_FS_ENCRYPTION
2899 ret = ext4_block_write_begin(folio, pos, len, ext4_da_get_block_prep);
2900 #else
2901 ret = __block_write_begin(&folio->page, pos, len, ext4_da_get_block_prep);
2902 #endif
2903 if (ret < 0) {
2904 folio_unlock(folio);
2905 folio_put(folio);
2906 /*
2907 * block_write_begin may have instantiated a few blocks
2908 * outside i_size. Trim these off again. Don't need
2909 * i_size_read because we hold inode lock.
2910 */
2911 if (pos + len > inode->i_size)
2912 ext4_truncate_failed_write(inode);
2913
2914 if (ret == -ENOSPC &&
2915 ext4_should_retry_alloc(inode->i_sb, &retries))
2916 goto retry;
2917 return ret;
2918 }
2919
2920 *pagep = &folio->page;
2921 return ret;
2922 }
2923
2924 /*
2925 * Check if we should update i_disksize
2926 * when write to the end of file but not require block allocation
2927 */
ext4_da_should_update_i_disksize(struct folio * folio,unsigned long offset)2928 static int ext4_da_should_update_i_disksize(struct folio *folio,
2929 unsigned long offset)
2930 {
2931 struct buffer_head *bh;
2932 struct inode *inode = folio->mapping->host;
2933 unsigned int idx;
2934 int i;
2935
2936 bh = folio_buffers(folio);
2937 idx = offset >> inode->i_blkbits;
2938
2939 for (i = 0; i < idx; i++)
2940 bh = bh->b_this_page;
2941
2942 if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh))
2943 return 0;
2944 return 1;
2945 }
2946
ext4_da_do_write_end(struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct page * page)2947 static int ext4_da_do_write_end(struct address_space *mapping,
2948 loff_t pos, unsigned len, unsigned copied,
2949 struct page *page)
2950 {
2951 struct inode *inode = mapping->host;
2952 loff_t old_size = inode->i_size;
2953 bool disksize_changed = false;
2954 loff_t new_i_size;
2955
2956 /*
2957 * block_write_end() will mark the inode as dirty with I_DIRTY_PAGES
2958 * flag, which all that's needed to trigger page writeback.
2959 */
2960 copied = block_write_end(NULL, mapping, pos, len, copied, page, NULL);
2961 new_i_size = pos + copied;
2962
2963 /*
2964 * It's important to update i_size while still holding page lock,
2965 * because page writeout could otherwise come in and zero beyond
2966 * i_size.
2967 *
2968 * Since we are holding inode lock, we are sure i_disksize <=
2969 * i_size. We also know that if i_disksize < i_size, there are
2970 * delalloc writes pending in the range up to i_size. If the end of
2971 * the current write is <= i_size, there's no need to touch
2972 * i_disksize since writeback will push i_disksize up to i_size
2973 * eventually. If the end of the current write is > i_size and
2974 * inside an allocated block which ext4_da_should_update_i_disksize()
2975 * checked, we need to update i_disksize here as certain
2976 * ext4_writepages() paths not allocating blocks and update i_disksize.
2977 */
2978 if (new_i_size > inode->i_size) {
2979 unsigned long end;
2980
2981 i_size_write(inode, new_i_size);
2982 end = (new_i_size - 1) & (PAGE_SIZE - 1);
2983 if (copied && ext4_da_should_update_i_disksize(page_folio(page), end)) {
2984 ext4_update_i_disksize(inode, new_i_size);
2985 disksize_changed = true;
2986 }
2987 }
2988
2989 unlock_page(page);
2990 put_page(page);
2991
2992 if (old_size < pos)
2993 pagecache_isize_extended(inode, old_size, pos);
2994
2995 if (disksize_changed) {
2996 handle_t *handle;
2997
2998 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
2999 if (IS_ERR(handle))
3000 return PTR_ERR(handle);
3001 ext4_mark_inode_dirty(handle, inode);
3002 ext4_journal_stop(handle);
3003 }
3004
3005 return copied;
3006 }
3007
ext4_da_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct page * page,void * fsdata)3008 static int ext4_da_write_end(struct file *file,
3009 struct address_space *mapping,
3010 loff_t pos, unsigned len, unsigned copied,
3011 struct page *page, void *fsdata)
3012 {
3013 struct inode *inode = mapping->host;
3014 int write_mode = (int)(unsigned long)fsdata;
3015 struct folio *folio = page_folio(page);
3016
3017 if (write_mode == FALL_BACK_TO_NONDELALLOC)
3018 return ext4_write_end(file, mapping, pos,
3019 len, copied, &folio->page, fsdata);
3020
3021 trace_ext4_da_write_end(inode, pos, len, copied);
3022
3023 if (write_mode != CONVERT_INLINE_DATA &&
3024 ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) &&
3025 ext4_has_inline_data(inode))
3026 return ext4_write_inline_data_end(inode, pos, len, copied,
3027 folio);
3028
3029 if (unlikely(copied < len) && !PageUptodate(page))
3030 copied = 0;
3031
3032 return ext4_da_do_write_end(mapping, pos, len, copied, &folio->page);
3033 }
3034
3035 /*
3036 * Force all delayed allocation blocks to be allocated for a given inode.
3037 */
ext4_alloc_da_blocks(struct inode * inode)3038 int ext4_alloc_da_blocks(struct inode *inode)
3039 {
3040 trace_ext4_alloc_da_blocks(inode);
3041
3042 if (!EXT4_I(inode)->i_reserved_data_blocks)
3043 return 0;
3044
3045 /*
3046 * We do something simple for now. The filemap_flush() will
3047 * also start triggering a write of the data blocks, which is
3048 * not strictly speaking necessary (and for users of
3049 * laptop_mode, not even desirable). However, to do otherwise
3050 * would require replicating code paths in:
3051 *
3052 * ext4_writepages() ->
3053 * write_cache_pages() ---> (via passed in callback function)
3054 * __mpage_da_writepage() -->
3055 * mpage_add_bh_to_extent()
3056 * mpage_da_map_blocks()
3057 *
3058 * The problem is that write_cache_pages(), located in
3059 * mm/page-writeback.c, marks pages clean in preparation for
3060 * doing I/O, which is not desirable if we're not planning on
3061 * doing I/O at all.
3062 *
3063 * We could call write_cache_pages(), and then redirty all of
3064 * the pages by calling redirty_page_for_writepage() but that
3065 * would be ugly in the extreme. So instead we would need to
3066 * replicate parts of the code in the above functions,
3067 * simplifying them because we wouldn't actually intend to
3068 * write out the pages, but rather only collect contiguous
3069 * logical block extents, call the multi-block allocator, and
3070 * then update the buffer heads with the block allocations.
3071 *
3072 * For now, though, we'll cheat by calling filemap_flush(),
3073 * which will map the blocks, and start the I/O, but not
3074 * actually wait for the I/O to complete.
3075 */
3076 return filemap_flush(inode->i_mapping);
3077 }
3078
3079 /*
3080 * bmap() is special. It gets used by applications such as lilo and by
3081 * the swapper to find the on-disk block of a specific piece of data.
3082 *
3083 * Naturally, this is dangerous if the block concerned is still in the
3084 * journal. If somebody makes a swapfile on an ext4 data-journaling
3085 * filesystem and enables swap, then they may get a nasty shock when the
3086 * data getting swapped to that swapfile suddenly gets overwritten by
3087 * the original zero's written out previously to the journal and
3088 * awaiting writeback in the kernel's buffer cache.
3089 *
3090 * So, if we see any bmap calls here on a modified, data-journaled file,
3091 * take extra steps to flush any blocks which might be in the cache.
3092 */
ext4_bmap(struct address_space * mapping,sector_t block)3093 static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
3094 {
3095 struct inode *inode = mapping->host;
3096 sector_t ret = 0;
3097
3098 inode_lock_shared(inode);
3099 /*
3100 * We can get here for an inline file via the FIBMAP ioctl
3101 */
3102 if (ext4_has_inline_data(inode))
3103 goto out;
3104
3105 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
3106 (test_opt(inode->i_sb, DELALLOC) ||
3107 ext4_should_journal_data(inode))) {
3108 /*
3109 * With delalloc or journalled data we want to sync the file so
3110 * that we can make sure we allocate blocks for file and data
3111 * is in place for the user to see it
3112 */
3113 filemap_write_and_wait(mapping);
3114 }
3115
3116 ret = iomap_bmap(mapping, block, &ext4_iomap_ops);
3117
3118 out:
3119 inode_unlock_shared(inode);
3120 return ret;
3121 }
3122
ext4_read_folio(struct file * file,struct folio * folio)3123 static int ext4_read_folio(struct file *file, struct folio *folio)
3124 {
3125 int ret = -EAGAIN;
3126 struct inode *inode = folio->mapping->host;
3127
3128 trace_ext4_read_folio(inode, folio);
3129
3130 if (ext4_has_inline_data(inode))
3131 ret = ext4_readpage_inline(inode, folio);
3132
3133 if (ret == -EAGAIN)
3134 return ext4_mpage_readpages(inode, NULL, folio);
3135
3136 return ret;
3137 }
3138
ext4_readahead(struct readahead_control * rac)3139 static void ext4_readahead(struct readahead_control *rac)
3140 {
3141 struct inode *inode = rac->mapping->host;
3142
3143 /* If the file has inline data, no need to do readahead. */
3144 if (ext4_has_inline_data(inode))
3145 return;
3146
3147 ext4_mpage_readpages(inode, rac, NULL);
3148 }
3149
ext4_invalidate_folio(struct folio * folio,size_t offset,size_t length)3150 static void ext4_invalidate_folio(struct folio *folio, size_t offset,
3151 size_t length)
3152 {
3153 trace_ext4_invalidate_folio(folio, offset, length);
3154
3155 /* No journalling happens on data buffers when this function is used */
3156 WARN_ON(folio_buffers(folio) && buffer_jbd(folio_buffers(folio)));
3157
3158 block_invalidate_folio(folio, offset, length);
3159 }
3160
__ext4_journalled_invalidate_folio(struct folio * folio,size_t offset,size_t length)3161 static int __ext4_journalled_invalidate_folio(struct folio *folio,
3162 size_t offset, size_t length)
3163 {
3164 journal_t *journal = EXT4_JOURNAL(folio->mapping->host);
3165
3166 trace_ext4_journalled_invalidate_folio(folio, offset, length);
3167
3168 /*
3169 * If it's a full truncate we just forget about the pending dirtying
3170 */
3171 if (offset == 0 && length == folio_size(folio))
3172 folio_clear_checked(folio);
3173
3174 return jbd2_journal_invalidate_folio(journal, folio, offset, length);
3175 }
3176
3177 /* Wrapper for aops... */
ext4_journalled_invalidate_folio(struct folio * folio,size_t offset,size_t length)3178 static void ext4_journalled_invalidate_folio(struct folio *folio,
3179 size_t offset,
3180 size_t length)
3181 {
3182 WARN_ON(__ext4_journalled_invalidate_folio(folio, offset, length) < 0);
3183 }
3184
ext4_release_folio(struct folio * folio,gfp_t wait)3185 static bool ext4_release_folio(struct folio *folio, gfp_t wait)
3186 {
3187 struct inode *inode = folio->mapping->host;
3188 journal_t *journal = EXT4_JOURNAL(inode);
3189
3190 trace_ext4_release_folio(inode, folio);
3191
3192 /* Page has dirty journalled data -> cannot release */
3193 if (folio_test_checked(folio))
3194 return false;
3195 if (journal)
3196 return jbd2_journal_try_to_free_buffers(journal, folio);
3197 else
3198 return try_to_free_buffers(folio);
3199 }
3200
ext4_inode_datasync_dirty(struct inode * inode)3201 static bool ext4_inode_datasync_dirty(struct inode *inode)
3202 {
3203 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
3204
3205 if (journal) {
3206 if (jbd2_transaction_committed(journal,
3207 EXT4_I(inode)->i_datasync_tid))
3208 return false;
3209 if (test_opt2(inode->i_sb, JOURNAL_FAST_COMMIT))
3210 return !list_empty(&EXT4_I(inode)->i_fc_list);
3211 return true;
3212 }
3213
3214 /* Any metadata buffers to write? */
3215 if (!list_empty(&inode->i_mapping->private_list))
3216 return true;
3217 return inode->i_state & I_DIRTY_DATASYNC;
3218 }
3219
ext4_set_iomap(struct inode * inode,struct iomap * iomap,struct ext4_map_blocks * map,loff_t offset,loff_t length,unsigned int flags)3220 static void ext4_set_iomap(struct inode *inode, struct iomap *iomap,
3221 struct ext4_map_blocks *map, loff_t offset,
3222 loff_t length, unsigned int flags)
3223 {
3224 u8 blkbits = inode->i_blkbits;
3225
3226 /*
3227 * Writes that span EOF might trigger an I/O size update on completion,
3228 * so consider them to be dirty for the purpose of O_DSYNC, even if
3229 * there is no other metadata changes being made or are pending.
3230 */
3231 iomap->flags = 0;
3232 if (ext4_inode_datasync_dirty(inode) ||
3233 offset + length > i_size_read(inode))
3234 iomap->flags |= IOMAP_F_DIRTY;
3235
3236 if (map->m_flags & EXT4_MAP_NEW)
3237 iomap->flags |= IOMAP_F_NEW;
3238
3239 if (flags & IOMAP_DAX)
3240 iomap->dax_dev = EXT4_SB(inode->i_sb)->s_daxdev;
3241 else
3242 iomap->bdev = inode->i_sb->s_bdev;
3243 iomap->offset = (u64) map->m_lblk << blkbits;
3244 iomap->length = (u64) map->m_len << blkbits;
3245
3246 if ((map->m_flags & EXT4_MAP_MAPPED) &&
3247 !ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3248 iomap->flags |= IOMAP_F_MERGED;
3249
3250 /*
3251 * Flags passed to ext4_map_blocks() for direct I/O writes can result
3252 * in m_flags having both EXT4_MAP_MAPPED and EXT4_MAP_UNWRITTEN bits
3253 * set. In order for any allocated unwritten extents to be converted
3254 * into written extents correctly within the ->end_io() handler, we
3255 * need to ensure that the iomap->type is set appropriately. Hence, the
3256 * reason why we need to check whether the EXT4_MAP_UNWRITTEN bit has
3257 * been set first.
3258 */
3259 if (map->m_flags & EXT4_MAP_UNWRITTEN) {
3260 iomap->type = IOMAP_UNWRITTEN;
3261 iomap->addr = (u64) map->m_pblk << blkbits;
3262 if (flags & IOMAP_DAX)
3263 iomap->addr += EXT4_SB(inode->i_sb)->s_dax_part_off;
3264 } else if (map->m_flags & EXT4_MAP_MAPPED) {
3265 iomap->type = IOMAP_MAPPED;
3266 iomap->addr = (u64) map->m_pblk << blkbits;
3267 if (flags & IOMAP_DAX)
3268 iomap->addr += EXT4_SB(inode->i_sb)->s_dax_part_off;
3269 } else {
3270 iomap->type = IOMAP_HOLE;
3271 iomap->addr = IOMAP_NULL_ADDR;
3272 }
3273 }
3274
ext4_iomap_alloc(struct inode * inode,struct ext4_map_blocks * map,unsigned int flags)3275 static int ext4_iomap_alloc(struct inode *inode, struct ext4_map_blocks *map,
3276 unsigned int flags)
3277 {
3278 handle_t *handle;
3279 u8 blkbits = inode->i_blkbits;
3280 int ret, dio_credits, m_flags = 0, retries = 0;
3281
3282 /*
3283 * Trim the mapping request to the maximum value that we can map at
3284 * once for direct I/O.
3285 */
3286 if (map->m_len > DIO_MAX_BLOCKS)
3287 map->m_len = DIO_MAX_BLOCKS;
3288 dio_credits = ext4_chunk_trans_blocks(inode, map->m_len);
3289
3290 retry:
3291 /*
3292 * Either we allocate blocks and then don't get an unwritten extent, so
3293 * in that case we have reserved enough credits. Or, the blocks are
3294 * already allocated and unwritten. In that case, the extent conversion
3295 * fits into the credits as well.
3296 */
3297 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits);
3298 if (IS_ERR(handle))
3299 return PTR_ERR(handle);
3300
3301 /*
3302 * DAX and direct I/O are the only two operations that are currently
3303 * supported with IOMAP_WRITE.
3304 */
3305 WARN_ON(!(flags & (IOMAP_DAX | IOMAP_DIRECT)));
3306 if (flags & IOMAP_DAX)
3307 m_flags = EXT4_GET_BLOCKS_CREATE_ZERO;
3308 /*
3309 * We use i_size instead of i_disksize here because delalloc writeback
3310 * can complete at any point during the I/O and subsequently push the
3311 * i_disksize out to i_size. This could be beyond where direct I/O is
3312 * happening and thus expose allocated blocks to direct I/O reads.
3313 */
3314 else if (((loff_t)map->m_lblk << blkbits) >= i_size_read(inode))
3315 m_flags = EXT4_GET_BLOCKS_CREATE;
3316 else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3317 m_flags = EXT4_GET_BLOCKS_IO_CREATE_EXT;
3318
3319 ret = ext4_map_blocks(handle, inode, map, m_flags);
3320
3321 /*
3322 * We cannot fill holes in indirect tree based inodes as that could
3323 * expose stale data in the case of a crash. Use the magic error code
3324 * to fallback to buffered I/O.
3325 */
3326 if (!m_flags && !ret)
3327 ret = -ENOTBLK;
3328
3329 ext4_journal_stop(handle);
3330 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
3331 goto retry;
3332
3333 return ret;
3334 }
3335
3336
ext4_iomap_begin(struct inode * inode,loff_t offset,loff_t length,unsigned flags,struct iomap * iomap,struct iomap * srcmap)3337 static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
3338 unsigned flags, struct iomap *iomap, struct iomap *srcmap)
3339 {
3340 int ret;
3341 struct ext4_map_blocks map;
3342 u8 blkbits = inode->i_blkbits;
3343
3344 if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK)
3345 return -EINVAL;
3346
3347 if (WARN_ON_ONCE(ext4_has_inline_data(inode)))
3348 return -ERANGE;
3349
3350 /*
3351 * Calculate the first and last logical blocks respectively.
3352 */
3353 map.m_lblk = offset >> blkbits;
3354 map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits,
3355 EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1;
3356
3357 if (flags & IOMAP_WRITE) {
3358 /*
3359 * We check here if the blocks are already allocated, then we
3360 * don't need to start a journal txn and we can directly return
3361 * the mapping information. This could boost performance
3362 * especially in multi-threaded overwrite requests.
3363 */
3364 if (offset + length <= i_size_read(inode)) {
3365 ret = ext4_map_blocks(NULL, inode, &map, 0);
3366 if (ret > 0 && (map.m_flags & EXT4_MAP_MAPPED))
3367 goto out;
3368 }
3369 ret = ext4_iomap_alloc(inode, &map, flags);
3370 } else {
3371 ret = ext4_map_blocks(NULL, inode, &map, 0);
3372 }
3373
3374 if (ret < 0)
3375 return ret;
3376 out:
3377 /*
3378 * When inline encryption is enabled, sometimes I/O to an encrypted file
3379 * has to be broken up to guarantee DUN contiguity. Handle this by
3380 * limiting the length of the mapping returned.
3381 */
3382 map.m_len = fscrypt_limit_io_blocks(inode, map.m_lblk, map.m_len);
3383
3384 ext4_set_iomap(inode, iomap, &map, offset, length, flags);
3385
3386 return 0;
3387 }
3388
ext4_iomap_overwrite_begin(struct inode * inode,loff_t offset,loff_t length,unsigned flags,struct iomap * iomap,struct iomap * srcmap)3389 static int ext4_iomap_overwrite_begin(struct inode *inode, loff_t offset,
3390 loff_t length, unsigned flags, struct iomap *iomap,
3391 struct iomap *srcmap)
3392 {
3393 int ret;
3394
3395 /*
3396 * Even for writes we don't need to allocate blocks, so just pretend
3397 * we are reading to save overhead of starting a transaction.
3398 */
3399 flags &= ~IOMAP_WRITE;
3400 ret = ext4_iomap_begin(inode, offset, length, flags, iomap, srcmap);
3401 WARN_ON_ONCE(!ret && iomap->type != IOMAP_MAPPED);
3402 return ret;
3403 }
3404
ext4_iomap_end(struct inode * inode,loff_t offset,loff_t length,ssize_t written,unsigned flags,struct iomap * iomap)3405 static int ext4_iomap_end(struct inode *inode, loff_t offset, loff_t length,
3406 ssize_t written, unsigned flags, struct iomap *iomap)
3407 {
3408 /*
3409 * Check to see whether an error occurred while writing out the data to
3410 * the allocated blocks. If so, return the magic error code so that we
3411 * fallback to buffered I/O and attempt to complete the remainder of
3412 * the I/O. Any blocks that may have been allocated in preparation for
3413 * the direct I/O will be reused during buffered I/O.
3414 */
3415 if (flags & (IOMAP_WRITE | IOMAP_DIRECT) && written == 0)
3416 return -ENOTBLK;
3417
3418 return 0;
3419 }
3420
3421 const struct iomap_ops ext4_iomap_ops = {
3422 .iomap_begin = ext4_iomap_begin,
3423 .iomap_end = ext4_iomap_end,
3424 };
3425
3426 const struct iomap_ops ext4_iomap_overwrite_ops = {
3427 .iomap_begin = ext4_iomap_overwrite_begin,
3428 .iomap_end = ext4_iomap_end,
3429 };
3430
ext4_iomap_is_delalloc(struct inode * inode,struct ext4_map_blocks * map)3431 static bool ext4_iomap_is_delalloc(struct inode *inode,
3432 struct ext4_map_blocks *map)
3433 {
3434 struct extent_status es;
3435 ext4_lblk_t offset = 0, end = map->m_lblk + map->m_len - 1;
3436
3437 ext4_es_find_extent_range(inode, &ext4_es_is_delayed,
3438 map->m_lblk, end, &es);
3439
3440 if (!es.es_len || es.es_lblk > end)
3441 return false;
3442
3443 if (es.es_lblk > map->m_lblk) {
3444 map->m_len = es.es_lblk - map->m_lblk;
3445 return false;
3446 }
3447
3448 offset = map->m_lblk - es.es_lblk;
3449 map->m_len = es.es_len - offset;
3450
3451 return true;
3452 }
3453
ext4_iomap_begin_report(struct inode * inode,loff_t offset,loff_t length,unsigned int flags,struct iomap * iomap,struct iomap * srcmap)3454 static int ext4_iomap_begin_report(struct inode *inode, loff_t offset,
3455 loff_t length, unsigned int flags,
3456 struct iomap *iomap, struct iomap *srcmap)
3457 {
3458 int ret;
3459 bool delalloc = false;
3460 struct ext4_map_blocks map;
3461 u8 blkbits = inode->i_blkbits;
3462
3463 if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK)
3464 return -EINVAL;
3465
3466 if (ext4_has_inline_data(inode)) {
3467 ret = ext4_inline_data_iomap(inode, iomap);
3468 if (ret != -EAGAIN) {
3469 if (ret == 0 && offset >= iomap->length)
3470 ret = -ENOENT;
3471 return ret;
3472 }
3473 }
3474
3475 /*
3476 * Calculate the first and last logical block respectively.
3477 */
3478 map.m_lblk = offset >> blkbits;
3479 map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits,
3480 EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1;
3481
3482 /*
3483 * Fiemap callers may call for offset beyond s_bitmap_maxbytes.
3484 * So handle it here itself instead of querying ext4_map_blocks().
3485 * Since ext4_map_blocks() will warn about it and will return
3486 * -EIO error.
3487 */
3488 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
3489 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3490
3491 if (offset >= sbi->s_bitmap_maxbytes) {
3492 map.m_flags = 0;
3493 goto set_iomap;
3494 }
3495 }
3496
3497 ret = ext4_map_blocks(NULL, inode, &map, 0);
3498 if (ret < 0)
3499 return ret;
3500 if (ret == 0)
3501 delalloc = ext4_iomap_is_delalloc(inode, &map);
3502
3503 set_iomap:
3504 ext4_set_iomap(inode, iomap, &map, offset, length, flags);
3505 if (delalloc && iomap->type == IOMAP_HOLE)
3506 iomap->type = IOMAP_DELALLOC;
3507
3508 return 0;
3509 }
3510
3511 const struct iomap_ops ext4_iomap_report_ops = {
3512 .iomap_begin = ext4_iomap_begin_report,
3513 };
3514
3515 /*
3516 * For data=journal mode, folio should be marked dirty only when it was
3517 * writeably mapped. When that happens, it was already attached to the
3518 * transaction and marked as jbddirty (we take care of this in
3519 * ext4_page_mkwrite()). On transaction commit, we writeprotect page mappings
3520 * so we should have nothing to do here, except for the case when someone
3521 * had the page pinned and dirtied the page through this pin (e.g. by doing
3522 * direct IO to it). In that case we'd need to attach buffers here to the
3523 * transaction but we cannot due to lock ordering. We cannot just dirty the
3524 * folio and leave attached buffers clean, because the buffers' dirty state is
3525 * "definitive". We cannot just set the buffers dirty or jbddirty because all
3526 * the journalling code will explode. So what we do is to mark the folio
3527 * "pending dirty" and next time ext4_writepages() is called, attach buffers
3528 * to the transaction appropriately.
3529 */
ext4_journalled_dirty_folio(struct address_space * mapping,struct folio * folio)3530 static bool ext4_journalled_dirty_folio(struct address_space *mapping,
3531 struct folio *folio)
3532 {
3533 WARN_ON_ONCE(!folio_buffers(folio));
3534 if (folio_maybe_dma_pinned(folio))
3535 folio_set_checked(folio);
3536 return filemap_dirty_folio(mapping, folio);
3537 }
3538
ext4_dirty_folio(struct address_space * mapping,struct folio * folio)3539 static bool ext4_dirty_folio(struct address_space *mapping, struct folio *folio)
3540 {
3541 WARN_ON_ONCE(!folio_test_locked(folio) && !folio_test_dirty(folio));
3542 WARN_ON_ONCE(!folio_buffers(folio));
3543 return block_dirty_folio(mapping, folio);
3544 }
3545
ext4_iomap_swap_activate(struct swap_info_struct * sis,struct file * file,sector_t * span)3546 static int ext4_iomap_swap_activate(struct swap_info_struct *sis,
3547 struct file *file, sector_t *span)
3548 {
3549 return iomap_swapfile_activate(sis, file, span,
3550 &ext4_iomap_report_ops);
3551 }
3552
3553 static const struct address_space_operations ext4_aops = {
3554 .read_folio = ext4_read_folio,
3555 .readahead = ext4_readahead,
3556 .writepages = ext4_writepages,
3557 .write_begin = ext4_write_begin,
3558 .write_end = ext4_write_end,
3559 .dirty_folio = ext4_dirty_folio,
3560 .bmap = ext4_bmap,
3561 .invalidate_folio = ext4_invalidate_folio,
3562 .release_folio = ext4_release_folio,
3563 .direct_IO = noop_direct_IO,
3564 .migrate_folio = buffer_migrate_folio,
3565 .is_partially_uptodate = block_is_partially_uptodate,
3566 .error_remove_page = generic_error_remove_page,
3567 .swap_activate = ext4_iomap_swap_activate,
3568 };
3569
3570 static const struct address_space_operations ext4_journalled_aops = {
3571 .read_folio = ext4_read_folio,
3572 .readahead = ext4_readahead,
3573 .writepages = ext4_writepages,
3574 .write_begin = ext4_write_begin,
3575 .write_end = ext4_journalled_write_end,
3576 .dirty_folio = ext4_journalled_dirty_folio,
3577 .bmap = ext4_bmap,
3578 .invalidate_folio = ext4_journalled_invalidate_folio,
3579 .release_folio = ext4_release_folio,
3580 .direct_IO = noop_direct_IO,
3581 .migrate_folio = buffer_migrate_folio_norefs,
3582 .is_partially_uptodate = block_is_partially_uptodate,
3583 .error_remove_page = generic_error_remove_page,
3584 .swap_activate = ext4_iomap_swap_activate,
3585 };
3586
3587 static const struct address_space_operations ext4_da_aops = {
3588 .read_folio = ext4_read_folio,
3589 .readahead = ext4_readahead,
3590 .writepages = ext4_writepages,
3591 .write_begin = ext4_da_write_begin,
3592 .write_end = ext4_da_write_end,
3593 .dirty_folio = ext4_dirty_folio,
3594 .bmap = ext4_bmap,
3595 .invalidate_folio = ext4_invalidate_folio,
3596 .release_folio = ext4_release_folio,
3597 .direct_IO = noop_direct_IO,
3598 .migrate_folio = buffer_migrate_folio,
3599 .is_partially_uptodate = block_is_partially_uptodate,
3600 .error_remove_page = generic_error_remove_page,
3601 .swap_activate = ext4_iomap_swap_activate,
3602 };
3603
3604 static const struct address_space_operations ext4_dax_aops = {
3605 .writepages = ext4_dax_writepages,
3606 .direct_IO = noop_direct_IO,
3607 .dirty_folio = noop_dirty_folio,
3608 .bmap = ext4_bmap,
3609 .swap_activate = ext4_iomap_swap_activate,
3610 };
3611
ext4_set_aops(struct inode * inode)3612 void ext4_set_aops(struct inode *inode)
3613 {
3614 switch (ext4_inode_journal_mode(inode)) {
3615 case EXT4_INODE_ORDERED_DATA_MODE:
3616 case EXT4_INODE_WRITEBACK_DATA_MODE:
3617 break;
3618 case EXT4_INODE_JOURNAL_DATA_MODE:
3619 inode->i_mapping->a_ops = &ext4_journalled_aops;
3620 return;
3621 default:
3622 BUG();
3623 }
3624 if (IS_DAX(inode))
3625 inode->i_mapping->a_ops = &ext4_dax_aops;
3626 else if (test_opt(inode->i_sb, DELALLOC))
3627 inode->i_mapping->a_ops = &ext4_da_aops;
3628 else
3629 inode->i_mapping->a_ops = &ext4_aops;
3630 }
3631
__ext4_block_zero_page_range(handle_t * handle,struct address_space * mapping,loff_t from,loff_t length)3632 static int __ext4_block_zero_page_range(handle_t *handle,
3633 struct address_space *mapping, loff_t from, loff_t length)
3634 {
3635 ext4_fsblk_t index = from >> PAGE_SHIFT;
3636 unsigned offset = from & (PAGE_SIZE-1);
3637 unsigned blocksize, pos;
3638 ext4_lblk_t iblock;
3639 struct inode *inode = mapping->host;
3640 struct buffer_head *bh;
3641 struct folio *folio;
3642 int err = 0;
3643
3644 folio = __filemap_get_folio(mapping, from >> PAGE_SHIFT,
3645 FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
3646 mapping_gfp_constraint(mapping, ~__GFP_FS));
3647 if (IS_ERR(folio))
3648 return PTR_ERR(folio);
3649
3650 blocksize = inode->i_sb->s_blocksize;
3651
3652 iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
3653
3654 bh = folio_buffers(folio);
3655 if (!bh) {
3656 create_empty_buffers(&folio->page, blocksize, 0);
3657 bh = folio_buffers(folio);
3658 }
3659
3660 /* Find the buffer that contains "offset" */
3661 pos = blocksize;
3662 while (offset >= pos) {
3663 bh = bh->b_this_page;
3664 iblock++;
3665 pos += blocksize;
3666 }
3667 if (buffer_freed(bh)) {
3668 BUFFER_TRACE(bh, "freed: skip");
3669 goto unlock;
3670 }
3671 if (!buffer_mapped(bh)) {
3672 BUFFER_TRACE(bh, "unmapped");
3673 ext4_get_block(inode, iblock, bh, 0);
3674 /* unmapped? It's a hole - nothing to do */
3675 if (!buffer_mapped(bh)) {
3676 BUFFER_TRACE(bh, "still unmapped");
3677 goto unlock;
3678 }
3679 }
3680
3681 /* Ok, it's mapped. Make sure it's up-to-date */
3682 if (folio_test_uptodate(folio))
3683 set_buffer_uptodate(bh);
3684
3685 if (!buffer_uptodate(bh)) {
3686 err = ext4_read_bh_lock(bh, 0, true);
3687 if (err)
3688 goto unlock;
3689 if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
3690 /* We expect the key to be set. */
3691 BUG_ON(!fscrypt_has_encryption_key(inode));
3692 err = fscrypt_decrypt_pagecache_blocks(folio,
3693 blocksize,
3694 bh_offset(bh));
3695 if (err) {
3696 clear_buffer_uptodate(bh);
3697 goto unlock;
3698 }
3699 }
3700 }
3701 if (ext4_should_journal_data(inode)) {
3702 BUFFER_TRACE(bh, "get write access");
3703 err = ext4_journal_get_write_access(handle, inode->i_sb, bh,
3704 EXT4_JTR_NONE);
3705 if (err)
3706 goto unlock;
3707 }
3708 folio_zero_range(folio, offset, length);
3709 BUFFER_TRACE(bh, "zeroed end of block");
3710
3711 if (ext4_should_journal_data(inode)) {
3712 err = ext4_dirty_journalled_data(handle, bh);
3713 } else {
3714 err = 0;
3715 mark_buffer_dirty(bh);
3716 if (ext4_should_order_data(inode))
3717 err = ext4_jbd2_inode_add_write(handle, inode, from,
3718 length);
3719 }
3720
3721 unlock:
3722 folio_unlock(folio);
3723 folio_put(folio);
3724 return err;
3725 }
3726
3727 /*
3728 * ext4_block_zero_page_range() zeros out a mapping of length 'length'
3729 * starting from file offset 'from'. The range to be zero'd must
3730 * be contained with in one block. If the specified range exceeds
3731 * the end of the block it will be shortened to end of the block
3732 * that corresponds to 'from'
3733 */
ext4_block_zero_page_range(handle_t * handle,struct address_space * mapping,loff_t from,loff_t length)3734 static int ext4_block_zero_page_range(handle_t *handle,
3735 struct address_space *mapping, loff_t from, loff_t length)
3736 {
3737 struct inode *inode = mapping->host;
3738 unsigned offset = from & (PAGE_SIZE-1);
3739 unsigned blocksize = inode->i_sb->s_blocksize;
3740 unsigned max = blocksize - (offset & (blocksize - 1));
3741
3742 /*
3743 * correct length if it does not fall between
3744 * 'from' and the end of the block
3745 */
3746 if (length > max || length < 0)
3747 length = max;
3748
3749 if (IS_DAX(inode)) {
3750 return dax_zero_range(inode, from, length, NULL,
3751 &ext4_iomap_ops);
3752 }
3753 return __ext4_block_zero_page_range(handle, mapping, from, length);
3754 }
3755
3756 /*
3757 * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
3758 * up to the end of the block which corresponds to `from'.
3759 * This required during truncate. We need to physically zero the tail end
3760 * of that block so it doesn't yield old data if the file is later grown.
3761 */
ext4_block_truncate_page(handle_t * handle,struct address_space * mapping,loff_t from)3762 static int ext4_block_truncate_page(handle_t *handle,
3763 struct address_space *mapping, loff_t from)
3764 {
3765 unsigned offset = from & (PAGE_SIZE-1);
3766 unsigned length;
3767 unsigned blocksize;
3768 struct inode *inode = mapping->host;
3769
3770 /* If we are processing an encrypted inode during orphan list handling */
3771 if (IS_ENCRYPTED(inode) && !fscrypt_has_encryption_key(inode))
3772 return 0;
3773
3774 blocksize = inode->i_sb->s_blocksize;
3775 length = blocksize - (offset & (blocksize - 1));
3776
3777 return ext4_block_zero_page_range(handle, mapping, from, length);
3778 }
3779
ext4_zero_partial_blocks(handle_t * handle,struct inode * inode,loff_t lstart,loff_t length)3780 int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
3781 loff_t lstart, loff_t length)
3782 {
3783 struct super_block *sb = inode->i_sb;
3784 struct address_space *mapping = inode->i_mapping;
3785 unsigned partial_start, partial_end;
3786 ext4_fsblk_t start, end;
3787 loff_t byte_end = (lstart + length - 1);
3788 int err = 0;
3789
3790 partial_start = lstart & (sb->s_blocksize - 1);
3791 partial_end = byte_end & (sb->s_blocksize - 1);
3792
3793 start = lstart >> sb->s_blocksize_bits;
3794 end = byte_end >> sb->s_blocksize_bits;
3795
3796 /* Handle partial zero within the single block */
3797 if (start == end &&
3798 (partial_start || (partial_end != sb->s_blocksize - 1))) {
3799 err = ext4_block_zero_page_range(handle, mapping,
3800 lstart, length);
3801 return err;
3802 }
3803 /* Handle partial zero out on the start of the range */
3804 if (partial_start) {
3805 err = ext4_block_zero_page_range(handle, mapping,
3806 lstart, sb->s_blocksize);
3807 if (err)
3808 return err;
3809 }
3810 /* Handle partial zero out on the end of the range */
3811 if (partial_end != sb->s_blocksize - 1)
3812 err = ext4_block_zero_page_range(handle, mapping,
3813 byte_end - partial_end,
3814 partial_end + 1);
3815 return err;
3816 }
3817
ext4_can_truncate(struct inode * inode)3818 int ext4_can_truncate(struct inode *inode)
3819 {
3820 if (S_ISREG(inode->i_mode))
3821 return 1;
3822 if (S_ISDIR(inode->i_mode))
3823 return 1;
3824 if (S_ISLNK(inode->i_mode))
3825 return !ext4_inode_is_fast_symlink(inode);
3826 return 0;
3827 }
3828
3829 /*
3830 * We have to make sure i_disksize gets properly updated before we truncate
3831 * page cache due to hole punching or zero range. Otherwise i_disksize update
3832 * can get lost as it may have been postponed to submission of writeback but
3833 * that will never happen after we truncate page cache.
3834 */
ext4_update_disksize_before_punch(struct inode * inode,loff_t offset,loff_t len)3835 int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
3836 loff_t len)
3837 {
3838 handle_t *handle;
3839 int ret;
3840
3841 loff_t size = i_size_read(inode);
3842
3843 WARN_ON(!inode_is_locked(inode));
3844 if (offset > size || offset + len < size)
3845 return 0;
3846
3847 if (EXT4_I(inode)->i_disksize >= size)
3848 return 0;
3849
3850 handle = ext4_journal_start(inode, EXT4_HT_MISC, 1);
3851 if (IS_ERR(handle))
3852 return PTR_ERR(handle);
3853 ext4_update_i_disksize(inode, size);
3854 ret = ext4_mark_inode_dirty(handle, inode);
3855 ext4_journal_stop(handle);
3856
3857 return ret;
3858 }
3859
ext4_wait_dax_page(struct inode * inode)3860 static void ext4_wait_dax_page(struct inode *inode)
3861 {
3862 filemap_invalidate_unlock(inode->i_mapping);
3863 schedule();
3864 filemap_invalidate_lock(inode->i_mapping);
3865 }
3866
ext4_break_layouts(struct inode * inode)3867 int ext4_break_layouts(struct inode *inode)
3868 {
3869 struct page *page;
3870 int error;
3871
3872 if (WARN_ON_ONCE(!rwsem_is_locked(&inode->i_mapping->invalidate_lock)))
3873 return -EINVAL;
3874
3875 do {
3876 page = dax_layout_busy_page(inode->i_mapping);
3877 if (!page)
3878 return 0;
3879
3880 error = ___wait_var_event(&page->_refcount,
3881 atomic_read(&page->_refcount) == 1,
3882 TASK_INTERRUPTIBLE, 0, 0,
3883 ext4_wait_dax_page(inode));
3884 } while (error == 0);
3885
3886 return error;
3887 }
3888
3889 /*
3890 * ext4_punch_hole: punches a hole in a file by releasing the blocks
3891 * associated with the given offset and length
3892 *
3893 * @inode: File inode
3894 * @offset: The offset where the hole will begin
3895 * @len: The length of the hole
3896 *
3897 * Returns: 0 on success or negative on failure
3898 */
3899
ext4_punch_hole(struct file * file,loff_t offset,loff_t length)3900 int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
3901 {
3902 struct inode *inode = file_inode(file);
3903 struct super_block *sb = inode->i_sb;
3904 ext4_lblk_t first_block, stop_block;
3905 struct address_space *mapping = inode->i_mapping;
3906 loff_t first_block_offset, last_block_offset, max_length;
3907 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3908 handle_t *handle;
3909 unsigned int credits;
3910 int ret = 0, ret2 = 0;
3911
3912 trace_ext4_punch_hole(inode, offset, length, 0);
3913
3914 /*
3915 * Write out all dirty pages to avoid race conditions
3916 * Then release them.
3917 */
3918 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
3919 ret = filemap_write_and_wait_range(mapping, offset,
3920 offset + length - 1);
3921 if (ret)
3922 return ret;
3923 }
3924
3925 inode_lock(inode);
3926
3927 /* No need to punch hole beyond i_size */
3928 if (offset >= inode->i_size)
3929 goto out_mutex;
3930
3931 /*
3932 * If the hole extends beyond i_size, set the hole
3933 * to end after the page that contains i_size
3934 */
3935 if (offset + length > inode->i_size) {
3936 length = inode->i_size +
3937 PAGE_SIZE - (inode->i_size & (PAGE_SIZE - 1)) -
3938 offset;
3939 }
3940
3941 /*
3942 * For punch hole the length + offset needs to be within one block
3943 * before last range. Adjust the length if it goes beyond that limit.
3944 */
3945 max_length = sbi->s_bitmap_maxbytes - inode->i_sb->s_blocksize;
3946 if (offset + length > max_length)
3947 length = max_length - offset;
3948
3949 if (offset & (sb->s_blocksize - 1) ||
3950 (offset + length) & (sb->s_blocksize - 1)) {
3951 /*
3952 * Attach jinode to inode for jbd2 if we do any zeroing of
3953 * partial block
3954 */
3955 ret = ext4_inode_attach_jinode(inode);
3956 if (ret < 0)
3957 goto out_mutex;
3958
3959 }
3960
3961 /* Wait all existing dio workers, newcomers will block on i_rwsem */
3962 inode_dio_wait(inode);
3963
3964 ret = file_modified(file);
3965 if (ret)
3966 goto out_mutex;
3967
3968 /*
3969 * Prevent page faults from reinstantiating pages we have released from
3970 * page cache.
3971 */
3972 filemap_invalidate_lock(mapping);
3973
3974 ret = ext4_break_layouts(inode);
3975 if (ret)
3976 goto out_dio;
3977
3978 first_block_offset = round_up(offset, sb->s_blocksize);
3979 last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
3980
3981 /* Now release the pages and zero block aligned part of pages*/
3982 if (last_block_offset > first_block_offset) {
3983 ret = ext4_update_disksize_before_punch(inode, offset, length);
3984 if (ret)
3985 goto out_dio;
3986 truncate_pagecache_range(inode, first_block_offset,
3987 last_block_offset);
3988 }
3989
3990 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3991 credits = ext4_writepage_trans_blocks(inode);
3992 else
3993 credits = ext4_blocks_for_truncate(inode);
3994 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
3995 if (IS_ERR(handle)) {
3996 ret = PTR_ERR(handle);
3997 ext4_std_error(sb, ret);
3998 goto out_dio;
3999 }
4000
4001 ret = ext4_zero_partial_blocks(handle, inode, offset,
4002 length);
4003 if (ret)
4004 goto out_stop;
4005
4006 first_block = (offset + sb->s_blocksize - 1) >>
4007 EXT4_BLOCK_SIZE_BITS(sb);
4008 stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
4009
4010 /* If there are blocks to remove, do it */
4011 if (stop_block > first_block) {
4012
4013 down_write(&EXT4_I(inode)->i_data_sem);
4014 ext4_discard_preallocations(inode, 0);
4015
4016 ext4_es_remove_extent(inode, first_block,
4017 stop_block - first_block);
4018
4019 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4020 ret = ext4_ext_remove_space(inode, first_block,
4021 stop_block - 1);
4022 else
4023 ret = ext4_ind_remove_space(handle, inode, first_block,
4024 stop_block);
4025
4026 up_write(&EXT4_I(inode)->i_data_sem);
4027 }
4028 ext4_fc_track_range(handle, inode, first_block, stop_block);
4029 if (IS_SYNC(inode))
4030 ext4_handle_sync(handle);
4031
4032 inode->i_mtime = inode_set_ctime_current(inode);
4033 ret2 = ext4_mark_inode_dirty(handle, inode);
4034 if (unlikely(ret2))
4035 ret = ret2;
4036 if (ret >= 0)
4037 ext4_update_inode_fsync_trans(handle, inode, 1);
4038 out_stop:
4039 ext4_journal_stop(handle);
4040 out_dio:
4041 filemap_invalidate_unlock(mapping);
4042 out_mutex:
4043 inode_unlock(inode);
4044 return ret;
4045 }
4046
ext4_inode_attach_jinode(struct inode * inode)4047 int ext4_inode_attach_jinode(struct inode *inode)
4048 {
4049 struct ext4_inode_info *ei = EXT4_I(inode);
4050 struct jbd2_inode *jinode;
4051
4052 if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal)
4053 return 0;
4054
4055 jinode = jbd2_alloc_inode(GFP_KERNEL);
4056 spin_lock(&inode->i_lock);
4057 if (!ei->jinode) {
4058 if (!jinode) {
4059 spin_unlock(&inode->i_lock);
4060 return -ENOMEM;
4061 }
4062 ei->jinode = jinode;
4063 jbd2_journal_init_jbd_inode(ei->jinode, inode);
4064 jinode = NULL;
4065 }
4066 spin_unlock(&inode->i_lock);
4067 if (unlikely(jinode != NULL))
4068 jbd2_free_inode(jinode);
4069 return 0;
4070 }
4071
4072 /*
4073 * ext4_truncate()
4074 *
4075 * We block out ext4_get_block() block instantiations across the entire
4076 * transaction, and VFS/VM ensures that ext4_truncate() cannot run
4077 * simultaneously on behalf of the same inode.
4078 *
4079 * As we work through the truncate and commit bits of it to the journal there
4080 * is one core, guiding principle: the file's tree must always be consistent on
4081 * disk. We must be able to restart the truncate after a crash.
4082 *
4083 * The file's tree may be transiently inconsistent in memory (although it
4084 * probably isn't), but whenever we close off and commit a journal transaction,
4085 * the contents of (the filesystem + the journal) must be consistent and
4086 * restartable. It's pretty simple, really: bottom up, right to left (although
4087 * left-to-right works OK too).
4088 *
4089 * Note that at recovery time, journal replay occurs *before* the restart of
4090 * truncate against the orphan inode list.
4091 *
4092 * The committed inode has the new, desired i_size (which is the same as
4093 * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see
4094 * that this inode's truncate did not complete and it will again call
4095 * ext4_truncate() to have another go. So there will be instantiated blocks
4096 * to the right of the truncation point in a crashed ext4 filesystem. But
4097 * that's fine - as long as they are linked from the inode, the post-crash
4098 * ext4_truncate() run will find them and release them.
4099 */
ext4_truncate(struct inode * inode)4100 int ext4_truncate(struct inode *inode)
4101 {
4102 struct ext4_inode_info *ei = EXT4_I(inode);
4103 unsigned int credits;
4104 int err = 0, err2;
4105 handle_t *handle;
4106 struct address_space *mapping = inode->i_mapping;
4107
4108 /*
4109 * There is a possibility that we're either freeing the inode
4110 * or it's a completely new inode. In those cases we might not
4111 * have i_rwsem locked because it's not necessary.
4112 */
4113 if (!(inode->i_state & (I_NEW|I_FREEING)))
4114 WARN_ON(!inode_is_locked(inode));
4115 trace_ext4_truncate_enter(inode);
4116
4117 if (!ext4_can_truncate(inode))
4118 goto out_trace;
4119
4120 if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
4121 ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
4122
4123 if (ext4_has_inline_data(inode)) {
4124 int has_inline = 1;
4125
4126 err = ext4_inline_data_truncate(inode, &has_inline);
4127 if (err || has_inline)
4128 goto out_trace;
4129 }
4130
4131 /* If we zero-out tail of the page, we have to create jinode for jbd2 */
4132 if (inode->i_size & (inode->i_sb->s_blocksize - 1)) {
4133 err = ext4_inode_attach_jinode(inode);
4134 if (err)
4135 goto out_trace;
4136 }
4137
4138 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4139 credits = ext4_writepage_trans_blocks(inode);
4140 else
4141 credits = ext4_blocks_for_truncate(inode);
4142
4143 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
4144 if (IS_ERR(handle)) {
4145 err = PTR_ERR(handle);
4146 goto out_trace;
4147 }
4148
4149 if (inode->i_size & (inode->i_sb->s_blocksize - 1))
4150 ext4_block_truncate_page(handle, mapping, inode->i_size);
4151
4152 /*
4153 * We add the inode to the orphan list, so that if this
4154 * truncate spans multiple transactions, and we crash, we will
4155 * resume the truncate when the filesystem recovers. It also
4156 * marks the inode dirty, to catch the new size.
4157 *
4158 * Implication: the file must always be in a sane, consistent
4159 * truncatable state while each transaction commits.
4160 */
4161 err = ext4_orphan_add(handle, inode);
4162 if (err)
4163 goto out_stop;
4164
4165 down_write(&EXT4_I(inode)->i_data_sem);
4166
4167 ext4_discard_preallocations(inode, 0);
4168
4169 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4170 err = ext4_ext_truncate(handle, inode);
4171 else
4172 ext4_ind_truncate(handle, inode);
4173
4174 up_write(&ei->i_data_sem);
4175 if (err)
4176 goto out_stop;
4177
4178 if (IS_SYNC(inode))
4179 ext4_handle_sync(handle);
4180
4181 out_stop:
4182 /*
4183 * If this was a simple ftruncate() and the file will remain alive,
4184 * then we need to clear up the orphan record which we created above.
4185 * However, if this was a real unlink then we were called by
4186 * ext4_evict_inode(), and we allow that function to clean up the
4187 * orphan info for us.
4188 */
4189 if (inode->i_nlink)
4190 ext4_orphan_del(handle, inode);
4191
4192 inode->i_mtime = inode_set_ctime_current(inode);
4193 err2 = ext4_mark_inode_dirty(handle, inode);
4194 if (unlikely(err2 && !err))
4195 err = err2;
4196 ext4_journal_stop(handle);
4197
4198 out_trace:
4199 trace_ext4_truncate_exit(inode);
4200 return err;
4201 }
4202
ext4_inode_peek_iversion(const struct inode * inode)4203 static inline u64 ext4_inode_peek_iversion(const struct inode *inode)
4204 {
4205 if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4206 return inode_peek_iversion_raw(inode);
4207 else
4208 return inode_peek_iversion(inode);
4209 }
4210
ext4_inode_blocks_set(struct ext4_inode * raw_inode,struct ext4_inode_info * ei)4211 static int ext4_inode_blocks_set(struct ext4_inode *raw_inode,
4212 struct ext4_inode_info *ei)
4213 {
4214 struct inode *inode = &(ei->vfs_inode);
4215 u64 i_blocks = READ_ONCE(inode->i_blocks);
4216 struct super_block *sb = inode->i_sb;
4217
4218 if (i_blocks <= ~0U) {
4219 /*
4220 * i_blocks can be represented in a 32 bit variable
4221 * as multiple of 512 bytes
4222 */
4223 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
4224 raw_inode->i_blocks_high = 0;
4225 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4226 return 0;
4227 }
4228
4229 /*
4230 * This should never happen since sb->s_maxbytes should not have
4231 * allowed this, sb->s_maxbytes was set according to the huge_file
4232 * feature in ext4_fill_super().
4233 */
4234 if (!ext4_has_feature_huge_file(sb))
4235 return -EFSCORRUPTED;
4236
4237 if (i_blocks <= 0xffffffffffffULL) {
4238 /*
4239 * i_blocks can be represented in a 48 bit variable
4240 * as multiple of 512 bytes
4241 */
4242 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
4243 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
4244 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4245 } else {
4246 ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4247 /* i_block is stored in file system block size */
4248 i_blocks = i_blocks >> (inode->i_blkbits - 9);
4249 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
4250 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
4251 }
4252 return 0;
4253 }
4254
ext4_fill_raw_inode(struct inode * inode,struct ext4_inode * raw_inode)4255 static int ext4_fill_raw_inode(struct inode *inode, struct ext4_inode *raw_inode)
4256 {
4257 struct ext4_inode_info *ei = EXT4_I(inode);
4258 uid_t i_uid;
4259 gid_t i_gid;
4260 projid_t i_projid;
4261 int block;
4262 int err;
4263
4264 err = ext4_inode_blocks_set(raw_inode, ei);
4265
4266 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
4267 i_uid = i_uid_read(inode);
4268 i_gid = i_gid_read(inode);
4269 i_projid = from_kprojid(&init_user_ns, ei->i_projid);
4270 if (!(test_opt(inode->i_sb, NO_UID32))) {
4271 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid));
4272 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid));
4273 /*
4274 * Fix up interoperability with old kernels. Otherwise,
4275 * old inodes get re-used with the upper 16 bits of the
4276 * uid/gid intact.
4277 */
4278 if (ei->i_dtime && list_empty(&ei->i_orphan)) {
4279 raw_inode->i_uid_high = 0;
4280 raw_inode->i_gid_high = 0;
4281 } else {
4282 raw_inode->i_uid_high =
4283 cpu_to_le16(high_16_bits(i_uid));
4284 raw_inode->i_gid_high =
4285 cpu_to_le16(high_16_bits(i_gid));
4286 }
4287 } else {
4288 raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid));
4289 raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid));
4290 raw_inode->i_uid_high = 0;
4291 raw_inode->i_gid_high = 0;
4292 }
4293 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
4294
4295 EXT4_INODE_SET_CTIME(inode, raw_inode);
4296 EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
4297 EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
4298 EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
4299
4300 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
4301 raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
4302 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT)))
4303 raw_inode->i_file_acl_high =
4304 cpu_to_le16(ei->i_file_acl >> 32);
4305 raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
4306 ext4_isize_set(raw_inode, ei->i_disksize);
4307
4308 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
4309 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
4310 if (old_valid_dev(inode->i_rdev)) {
4311 raw_inode->i_block[0] =
4312 cpu_to_le32(old_encode_dev(inode->i_rdev));
4313 raw_inode->i_block[1] = 0;
4314 } else {
4315 raw_inode->i_block[0] = 0;
4316 raw_inode->i_block[1] =
4317 cpu_to_le32(new_encode_dev(inode->i_rdev));
4318 raw_inode->i_block[2] = 0;
4319 }
4320 } else if (!ext4_has_inline_data(inode)) {
4321 for (block = 0; block < EXT4_N_BLOCKS; block++)
4322 raw_inode->i_block[block] = ei->i_data[block];
4323 }
4324
4325 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
4326 u64 ivers = ext4_inode_peek_iversion(inode);
4327
4328 raw_inode->i_disk_version = cpu_to_le32(ivers);
4329 if (ei->i_extra_isize) {
4330 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
4331 raw_inode->i_version_hi =
4332 cpu_to_le32(ivers >> 32);
4333 raw_inode->i_extra_isize =
4334 cpu_to_le16(ei->i_extra_isize);
4335 }
4336 }
4337
4338 if (i_projid != EXT4_DEF_PROJID &&
4339 !ext4_has_feature_project(inode->i_sb))
4340 err = err ?: -EFSCORRUPTED;
4341
4342 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
4343 EXT4_FITS_IN_INODE(raw_inode, ei, i_projid))
4344 raw_inode->i_projid = cpu_to_le32(i_projid);
4345
4346 ext4_inode_csum_set(inode, raw_inode, ei);
4347 return err;
4348 }
4349
4350 /*
4351 * ext4_get_inode_loc returns with an extra refcount against the inode's
4352 * underlying buffer_head on success. If we pass 'inode' and it does not
4353 * have in-inode xattr, we have all inode data in memory that is needed
4354 * to recreate the on-disk version of this inode.
4355 */
__ext4_get_inode_loc(struct super_block * sb,unsigned long ino,struct inode * inode,struct ext4_iloc * iloc,ext4_fsblk_t * ret_block)4356 static int __ext4_get_inode_loc(struct super_block *sb, unsigned long ino,
4357 struct inode *inode, struct ext4_iloc *iloc,
4358 ext4_fsblk_t *ret_block)
4359 {
4360 struct ext4_group_desc *gdp;
4361 struct buffer_head *bh;
4362 ext4_fsblk_t block;
4363 struct blk_plug plug;
4364 int inodes_per_block, inode_offset;
4365
4366 iloc->bh = NULL;
4367 if (ino < EXT4_ROOT_INO ||
4368 ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
4369 return -EFSCORRUPTED;
4370
4371 iloc->block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
4372 gdp = ext4_get_group_desc(sb, iloc->block_group, NULL);
4373 if (!gdp)
4374 return -EIO;
4375
4376 /*
4377 * Figure out the offset within the block group inode table
4378 */
4379 inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
4380 inode_offset = ((ino - 1) %
4381 EXT4_INODES_PER_GROUP(sb));
4382 iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
4383
4384 block = ext4_inode_table(sb, gdp);
4385 if ((block <= le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) ||
4386 (block >= ext4_blocks_count(EXT4_SB(sb)->s_es))) {
4387 ext4_error(sb, "Invalid inode table block %llu in "
4388 "block_group %u", block, iloc->block_group);
4389 return -EFSCORRUPTED;
4390 }
4391 block += (inode_offset / inodes_per_block);
4392
4393 bh = sb_getblk(sb, block);
4394 if (unlikely(!bh))
4395 return -ENOMEM;
4396 if (ext4_buffer_uptodate(bh))
4397 goto has_buffer;
4398
4399 lock_buffer(bh);
4400 if (ext4_buffer_uptodate(bh)) {
4401 /* Someone brought it uptodate while we waited */
4402 unlock_buffer(bh);
4403 goto has_buffer;
4404 }
4405
4406 /*
4407 * If we have all information of the inode in memory and this
4408 * is the only valid inode in the block, we need not read the
4409 * block.
4410 */
4411 if (inode && !ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
4412 struct buffer_head *bitmap_bh;
4413 int i, start;
4414
4415 start = inode_offset & ~(inodes_per_block - 1);
4416
4417 /* Is the inode bitmap in cache? */
4418 bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp));
4419 if (unlikely(!bitmap_bh))
4420 goto make_io;
4421
4422 /*
4423 * If the inode bitmap isn't in cache then the
4424 * optimisation may end up performing two reads instead
4425 * of one, so skip it.
4426 */
4427 if (!buffer_uptodate(bitmap_bh)) {
4428 brelse(bitmap_bh);
4429 goto make_io;
4430 }
4431 for (i = start; i < start + inodes_per_block; i++) {
4432 if (i == inode_offset)
4433 continue;
4434 if (ext4_test_bit(i, bitmap_bh->b_data))
4435 break;
4436 }
4437 brelse(bitmap_bh);
4438 if (i == start + inodes_per_block) {
4439 struct ext4_inode *raw_inode =
4440 (struct ext4_inode *) (bh->b_data + iloc->offset);
4441
4442 /* all other inodes are free, so skip I/O */
4443 memset(bh->b_data, 0, bh->b_size);
4444 if (!ext4_test_inode_state(inode, EXT4_STATE_NEW))
4445 ext4_fill_raw_inode(inode, raw_inode);
4446 set_buffer_uptodate(bh);
4447 unlock_buffer(bh);
4448 goto has_buffer;
4449 }
4450 }
4451
4452 make_io:
4453 /*
4454 * If we need to do any I/O, try to pre-readahead extra
4455 * blocks from the inode table.
4456 */
4457 blk_start_plug(&plug);
4458 if (EXT4_SB(sb)->s_inode_readahead_blks) {
4459 ext4_fsblk_t b, end, table;
4460 unsigned num;
4461 __u32 ra_blks = EXT4_SB(sb)->s_inode_readahead_blks;
4462
4463 table = ext4_inode_table(sb, gdp);
4464 /* s_inode_readahead_blks is always a power of 2 */
4465 b = block & ~((ext4_fsblk_t) ra_blks - 1);
4466 if (table > b)
4467 b = table;
4468 end = b + ra_blks;
4469 num = EXT4_INODES_PER_GROUP(sb);
4470 if (ext4_has_group_desc_csum(sb))
4471 num -= ext4_itable_unused_count(sb, gdp);
4472 table += num / inodes_per_block;
4473 if (end > table)
4474 end = table;
4475 while (b <= end)
4476 ext4_sb_breadahead_unmovable(sb, b++);
4477 }
4478
4479 /*
4480 * There are other valid inodes in the buffer, this inode
4481 * has in-inode xattrs, or we don't have this inode in memory.
4482 * Read the block from disk.
4483 */
4484 trace_ext4_load_inode(sb, ino);
4485 ext4_read_bh_nowait(bh, REQ_META | REQ_PRIO, NULL);
4486 blk_finish_plug(&plug);
4487 wait_on_buffer(bh);
4488 ext4_simulate_fail_bh(sb, bh, EXT4_SIM_INODE_EIO);
4489 if (!buffer_uptodate(bh)) {
4490 if (ret_block)
4491 *ret_block = block;
4492 brelse(bh);
4493 return -EIO;
4494 }
4495 has_buffer:
4496 iloc->bh = bh;
4497 return 0;
4498 }
4499
__ext4_get_inode_loc_noinmem(struct inode * inode,struct ext4_iloc * iloc)4500 static int __ext4_get_inode_loc_noinmem(struct inode *inode,
4501 struct ext4_iloc *iloc)
4502 {
4503 ext4_fsblk_t err_blk = 0;
4504 int ret;
4505
4506 ret = __ext4_get_inode_loc(inode->i_sb, inode->i_ino, NULL, iloc,
4507 &err_blk);
4508
4509 if (ret == -EIO)
4510 ext4_error_inode_block(inode, err_blk, EIO,
4511 "unable to read itable block");
4512
4513 return ret;
4514 }
4515
ext4_get_inode_loc(struct inode * inode,struct ext4_iloc * iloc)4516 int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
4517 {
4518 ext4_fsblk_t err_blk = 0;
4519 int ret;
4520
4521 ret = __ext4_get_inode_loc(inode->i_sb, inode->i_ino, inode, iloc,
4522 &err_blk);
4523
4524 if (ret == -EIO)
4525 ext4_error_inode_block(inode, err_blk, EIO,
4526 "unable to read itable block");
4527
4528 return ret;
4529 }
4530
4531
ext4_get_fc_inode_loc(struct super_block * sb,unsigned long ino,struct ext4_iloc * iloc)4532 int ext4_get_fc_inode_loc(struct super_block *sb, unsigned long ino,
4533 struct ext4_iloc *iloc)
4534 {
4535 return __ext4_get_inode_loc(sb, ino, NULL, iloc, NULL);
4536 }
4537
ext4_should_enable_dax(struct inode * inode)4538 static bool ext4_should_enable_dax(struct inode *inode)
4539 {
4540 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4541
4542 if (test_opt2(inode->i_sb, DAX_NEVER))
4543 return false;
4544 if (!S_ISREG(inode->i_mode))
4545 return false;
4546 if (ext4_should_journal_data(inode))
4547 return false;
4548 if (ext4_has_inline_data(inode))
4549 return false;
4550 if (ext4_test_inode_flag(inode, EXT4_INODE_ENCRYPT))
4551 return false;
4552 if (ext4_test_inode_flag(inode, EXT4_INODE_VERITY))
4553 return false;
4554 if (!test_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags))
4555 return false;
4556 if (test_opt(inode->i_sb, DAX_ALWAYS))
4557 return true;
4558
4559 return ext4_test_inode_flag(inode, EXT4_INODE_DAX);
4560 }
4561
ext4_set_inode_flags(struct inode * inode,bool init)4562 void ext4_set_inode_flags(struct inode *inode, bool init)
4563 {
4564 unsigned int flags = EXT4_I(inode)->i_flags;
4565 unsigned int new_fl = 0;
4566
4567 WARN_ON_ONCE(IS_DAX(inode) && init);
4568
4569 if (flags & EXT4_SYNC_FL)
4570 new_fl |= S_SYNC;
4571 if (flags & EXT4_APPEND_FL)
4572 new_fl |= S_APPEND;
4573 if (flags & EXT4_IMMUTABLE_FL)
4574 new_fl |= S_IMMUTABLE;
4575 if (flags & EXT4_NOATIME_FL)
4576 new_fl |= S_NOATIME;
4577 if (flags & EXT4_DIRSYNC_FL)
4578 new_fl |= S_DIRSYNC;
4579
4580 /* Because of the way inode_set_flags() works we must preserve S_DAX
4581 * here if already set. */
4582 new_fl |= (inode->i_flags & S_DAX);
4583 if (init && ext4_should_enable_dax(inode))
4584 new_fl |= S_DAX;
4585
4586 if (flags & EXT4_ENCRYPT_FL)
4587 new_fl |= S_ENCRYPTED;
4588 if (flags & EXT4_CASEFOLD_FL)
4589 new_fl |= S_CASEFOLD;
4590 if (flags & EXT4_VERITY_FL)
4591 new_fl |= S_VERITY;
4592 inode_set_flags(inode, new_fl,
4593 S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_DAX|
4594 S_ENCRYPTED|S_CASEFOLD|S_VERITY);
4595 }
4596
ext4_inode_blocks(struct ext4_inode * raw_inode,struct ext4_inode_info * ei)4597 static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
4598 struct ext4_inode_info *ei)
4599 {
4600 blkcnt_t i_blocks ;
4601 struct inode *inode = &(ei->vfs_inode);
4602 struct super_block *sb = inode->i_sb;
4603
4604 if (ext4_has_feature_huge_file(sb)) {
4605 /* we are using combined 48 bit field */
4606 i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
4607 le32_to_cpu(raw_inode->i_blocks_lo);
4608 if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) {
4609 /* i_blocks represent file system block size */
4610 return i_blocks << (inode->i_blkbits - 9);
4611 } else {
4612 return i_blocks;
4613 }
4614 } else {
4615 return le32_to_cpu(raw_inode->i_blocks_lo);
4616 }
4617 }
4618
ext4_iget_extra_inode(struct inode * inode,struct ext4_inode * raw_inode,struct ext4_inode_info * ei)4619 static inline int ext4_iget_extra_inode(struct inode *inode,
4620 struct ext4_inode *raw_inode,
4621 struct ext4_inode_info *ei)
4622 {
4623 __le32 *magic = (void *)raw_inode +
4624 EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize;
4625
4626 if (EXT4_INODE_HAS_XATTR_SPACE(inode) &&
4627 *magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
4628 int err;
4629
4630 ext4_set_inode_state(inode, EXT4_STATE_XATTR);
4631 err = ext4_find_inline_data_nolock(inode);
4632 if (!err && ext4_has_inline_data(inode))
4633 ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
4634 return err;
4635 } else
4636 EXT4_I(inode)->i_inline_off = 0;
4637 return 0;
4638 }
4639
ext4_get_projid(struct inode * inode,kprojid_t * projid)4640 int ext4_get_projid(struct inode *inode, kprojid_t *projid)
4641 {
4642 if (!ext4_has_feature_project(inode->i_sb))
4643 return -EOPNOTSUPP;
4644 *projid = EXT4_I(inode)->i_projid;
4645 return 0;
4646 }
4647
4648 /*
4649 * ext4 has self-managed i_version for ea inodes, it stores the lower 32bit of
4650 * refcount in i_version, so use raw values if inode has EXT4_EA_INODE_FL flag
4651 * set.
4652 */
ext4_inode_set_iversion_queried(struct inode * inode,u64 val)4653 static inline void ext4_inode_set_iversion_queried(struct inode *inode, u64 val)
4654 {
4655 if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4656 inode_set_iversion_raw(inode, val);
4657 else
4658 inode_set_iversion_queried(inode, val);
4659 }
4660
check_igot_inode(struct inode * inode,ext4_iget_flags flags)4661 static const char *check_igot_inode(struct inode *inode, ext4_iget_flags flags)
4662
4663 {
4664 if (flags & EXT4_IGET_EA_INODE) {
4665 if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4666 return "missing EA_INODE flag";
4667 if (ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
4668 EXT4_I(inode)->i_file_acl)
4669 return "ea_inode with extended attributes";
4670 } else {
4671 if ((EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4672 return "unexpected EA_INODE flag";
4673 }
4674 if (is_bad_inode(inode) && !(flags & EXT4_IGET_BAD))
4675 return "unexpected bad inode w/o EXT4_IGET_BAD";
4676 return NULL;
4677 }
4678
__ext4_iget(struct super_block * sb,unsigned long ino,ext4_iget_flags flags,const char * function,unsigned int line)4679 struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
4680 ext4_iget_flags flags, const char *function,
4681 unsigned int line)
4682 {
4683 struct ext4_iloc iloc;
4684 struct ext4_inode *raw_inode;
4685 struct ext4_inode_info *ei;
4686 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
4687 struct inode *inode;
4688 const char *err_str;
4689 journal_t *journal = EXT4_SB(sb)->s_journal;
4690 long ret;
4691 loff_t size;
4692 int block;
4693 uid_t i_uid;
4694 gid_t i_gid;
4695 projid_t i_projid;
4696
4697 if ((!(flags & EXT4_IGET_SPECIAL) &&
4698 ((ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO) ||
4699 ino == le32_to_cpu(es->s_usr_quota_inum) ||
4700 ino == le32_to_cpu(es->s_grp_quota_inum) ||
4701 ino == le32_to_cpu(es->s_prj_quota_inum) ||
4702 ino == le32_to_cpu(es->s_orphan_file_inum))) ||
4703 (ino < EXT4_ROOT_INO) ||
4704 (ino > le32_to_cpu(es->s_inodes_count))) {
4705 if (flags & EXT4_IGET_HANDLE)
4706 return ERR_PTR(-ESTALE);
4707 __ext4_error(sb, function, line, false, EFSCORRUPTED, 0,
4708 "inode #%lu: comm %s: iget: illegal inode #",
4709 ino, current->comm);
4710 return ERR_PTR(-EFSCORRUPTED);
4711 }
4712
4713 inode = iget_locked(sb, ino);
4714 if (!inode)
4715 return ERR_PTR(-ENOMEM);
4716 if (!(inode->i_state & I_NEW)) {
4717 if ((err_str = check_igot_inode(inode, flags)) != NULL) {
4718 ext4_error_inode(inode, function, line, 0, err_str);
4719 iput(inode);
4720 return ERR_PTR(-EFSCORRUPTED);
4721 }
4722 return inode;
4723 }
4724
4725 ei = EXT4_I(inode);
4726 iloc.bh = NULL;
4727
4728 ret = __ext4_get_inode_loc_noinmem(inode, &iloc);
4729 if (ret < 0)
4730 goto bad_inode;
4731 raw_inode = ext4_raw_inode(&iloc);
4732
4733 if ((flags & EXT4_IGET_HANDLE) &&
4734 (raw_inode->i_links_count == 0) && (raw_inode->i_mode == 0)) {
4735 ret = -ESTALE;
4736 goto bad_inode;
4737 }
4738
4739 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4740 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
4741 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
4742 EXT4_INODE_SIZE(inode->i_sb) ||
4743 (ei->i_extra_isize & 3)) {
4744 ext4_error_inode(inode, function, line, 0,
4745 "iget: bad extra_isize %u "
4746 "(inode size %u)",
4747 ei->i_extra_isize,
4748 EXT4_INODE_SIZE(inode->i_sb));
4749 ret = -EFSCORRUPTED;
4750 goto bad_inode;
4751 }
4752 } else
4753 ei->i_extra_isize = 0;
4754
4755 /* Precompute checksum seed for inode metadata */
4756 if (ext4_has_metadata_csum(sb)) {
4757 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4758 __u32 csum;
4759 __le32 inum = cpu_to_le32(inode->i_ino);
4760 __le32 gen = raw_inode->i_generation;
4761 csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
4762 sizeof(inum));
4763 ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
4764 sizeof(gen));
4765 }
4766
4767 if ((!ext4_inode_csum_verify(inode, raw_inode, ei) ||
4768 ext4_simulate_fail(sb, EXT4_SIM_INODE_CRC)) &&
4769 (!(EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY))) {
4770 ext4_error_inode_err(inode, function, line, 0,
4771 EFSBADCRC, "iget: checksum invalid");
4772 ret = -EFSBADCRC;
4773 goto bad_inode;
4774 }
4775
4776 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
4777 i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
4778 i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
4779 if (ext4_has_feature_project(sb) &&
4780 EXT4_INODE_SIZE(sb) > EXT4_GOOD_OLD_INODE_SIZE &&
4781 EXT4_FITS_IN_INODE(raw_inode, ei, i_projid))
4782 i_projid = (projid_t)le32_to_cpu(raw_inode->i_projid);
4783 else
4784 i_projid = EXT4_DEF_PROJID;
4785
4786 if (!(test_opt(inode->i_sb, NO_UID32))) {
4787 i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
4788 i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
4789 }
4790 i_uid_write(inode, i_uid);
4791 i_gid_write(inode, i_gid);
4792 ei->i_projid = make_kprojid(&init_user_ns, i_projid);
4793 set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
4794
4795 ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
4796 ei->i_inline_off = 0;
4797 ei->i_dir_start_lookup = 0;
4798 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
4799 /* We now have enough fields to check if the inode was active or not.
4800 * This is needed because nfsd might try to access dead inodes
4801 * the test is that same one that e2fsck uses
4802 * NeilBrown 1999oct15
4803 */
4804 if (inode->i_nlink == 0) {
4805 if ((inode->i_mode == 0 || flags & EXT4_IGET_SPECIAL ||
4806 !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) &&
4807 ino != EXT4_BOOT_LOADER_INO) {
4808 /* this inode is deleted or unallocated */
4809 if (flags & EXT4_IGET_SPECIAL) {
4810 ext4_error_inode(inode, function, line, 0,
4811 "iget: special inode unallocated");
4812 ret = -EFSCORRUPTED;
4813 } else
4814 ret = -ESTALE;
4815 goto bad_inode;
4816 }
4817 /* The only unlinked inodes we let through here have
4818 * valid i_mode and are being read by the orphan
4819 * recovery code: that's fine, we're about to complete
4820 * the process of deleting those.
4821 * OR it is the EXT4_BOOT_LOADER_INO which is
4822 * not initialized on a new filesystem. */
4823 }
4824 ei->i_flags = le32_to_cpu(raw_inode->i_flags);
4825 ext4_set_inode_flags(inode, true);
4826 inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
4827 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
4828 if (ext4_has_feature_64bit(sb))
4829 ei->i_file_acl |=
4830 ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
4831 inode->i_size = ext4_isize(sb, raw_inode);
4832 if ((size = i_size_read(inode)) < 0) {
4833 ext4_error_inode(inode, function, line, 0,
4834 "iget: bad i_size value: %lld", size);
4835 ret = -EFSCORRUPTED;
4836 goto bad_inode;
4837 }
4838 /*
4839 * If dir_index is not enabled but there's dir with INDEX flag set,
4840 * we'd normally treat htree data as empty space. But with metadata
4841 * checksumming that corrupts checksums so forbid that.
4842 */
4843 if (!ext4_has_feature_dir_index(sb) && ext4_has_metadata_csum(sb) &&
4844 ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) {
4845 ext4_error_inode(inode, function, line, 0,
4846 "iget: Dir with htree data on filesystem without dir_index feature.");
4847 ret = -EFSCORRUPTED;
4848 goto bad_inode;
4849 }
4850 ei->i_disksize = inode->i_size;
4851 #ifdef CONFIG_QUOTA
4852 ei->i_reserved_quota = 0;
4853 #endif
4854 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
4855 ei->i_block_group = iloc.block_group;
4856 ei->i_last_alloc_group = ~0;
4857 /*
4858 * NOTE! The in-memory inode i_data array is in little-endian order
4859 * even on big-endian machines: we do NOT byteswap the block numbers!
4860 */
4861 for (block = 0; block < EXT4_N_BLOCKS; block++)
4862 ei->i_data[block] = raw_inode->i_block[block];
4863 INIT_LIST_HEAD(&ei->i_orphan);
4864 ext4_fc_init_inode(&ei->vfs_inode);
4865
4866 /*
4867 * Set transaction id's of transactions that have to be committed
4868 * to finish f[data]sync. We set them to currently running transaction
4869 * as we cannot be sure that the inode or some of its metadata isn't
4870 * part of the transaction - the inode could have been reclaimed and
4871 * now it is reread from disk.
4872 */
4873 if (journal) {
4874 transaction_t *transaction;
4875 tid_t tid;
4876
4877 read_lock(&journal->j_state_lock);
4878 if (journal->j_running_transaction)
4879 transaction = journal->j_running_transaction;
4880 else
4881 transaction = journal->j_committing_transaction;
4882 if (transaction)
4883 tid = transaction->t_tid;
4884 else
4885 tid = journal->j_commit_sequence;
4886 read_unlock(&journal->j_state_lock);
4887 ei->i_sync_tid = tid;
4888 ei->i_datasync_tid = tid;
4889 }
4890
4891 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4892 if (ei->i_extra_isize == 0) {
4893 /* The extra space is currently unused. Use it. */
4894 BUILD_BUG_ON(sizeof(struct ext4_inode) & 3);
4895 ei->i_extra_isize = sizeof(struct ext4_inode) -
4896 EXT4_GOOD_OLD_INODE_SIZE;
4897 } else {
4898 ret = ext4_iget_extra_inode(inode, raw_inode, ei);
4899 if (ret)
4900 goto bad_inode;
4901 }
4902 }
4903
4904 EXT4_INODE_GET_CTIME(inode, raw_inode);
4905 EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
4906 EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
4907 EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
4908
4909 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
4910 u64 ivers = le32_to_cpu(raw_inode->i_disk_version);
4911
4912 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4913 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
4914 ivers |=
4915 (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
4916 }
4917 ext4_inode_set_iversion_queried(inode, ivers);
4918 }
4919
4920 ret = 0;
4921 if (ei->i_file_acl &&
4922 !ext4_inode_block_valid(inode, ei->i_file_acl, 1)) {
4923 ext4_error_inode(inode, function, line, 0,
4924 "iget: bad extended attribute block %llu",
4925 ei->i_file_acl);
4926 ret = -EFSCORRUPTED;
4927 goto bad_inode;
4928 } else if (!ext4_has_inline_data(inode)) {
4929 /* validate the block references in the inode */
4930 if (!(EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY) &&
4931 (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
4932 (S_ISLNK(inode->i_mode) &&
4933 !ext4_inode_is_fast_symlink(inode)))) {
4934 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4935 ret = ext4_ext_check_inode(inode);
4936 else
4937 ret = ext4_ind_check_inode(inode);
4938 }
4939 }
4940 if (ret)
4941 goto bad_inode;
4942
4943 if (S_ISREG(inode->i_mode)) {
4944 inode->i_op = &ext4_file_inode_operations;
4945 inode->i_fop = &ext4_file_operations;
4946 ext4_set_aops(inode);
4947 } else if (S_ISDIR(inode->i_mode)) {
4948 inode->i_op = &ext4_dir_inode_operations;
4949 inode->i_fop = &ext4_dir_operations;
4950 } else if (S_ISLNK(inode->i_mode)) {
4951 /* VFS does not allow setting these so must be corruption */
4952 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) {
4953 ext4_error_inode(inode, function, line, 0,
4954 "iget: immutable or append flags "
4955 "not allowed on symlinks");
4956 ret = -EFSCORRUPTED;
4957 goto bad_inode;
4958 }
4959 if (IS_ENCRYPTED(inode)) {
4960 inode->i_op = &ext4_encrypted_symlink_inode_operations;
4961 } else if (ext4_inode_is_fast_symlink(inode)) {
4962 inode->i_link = (char *)ei->i_data;
4963 inode->i_op = &ext4_fast_symlink_inode_operations;
4964 nd_terminate_link(ei->i_data, inode->i_size,
4965 sizeof(ei->i_data) - 1);
4966 } else {
4967 inode->i_op = &ext4_symlink_inode_operations;
4968 }
4969 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
4970 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
4971 inode->i_op = &ext4_special_inode_operations;
4972 if (raw_inode->i_block[0])
4973 init_special_inode(inode, inode->i_mode,
4974 old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
4975 else
4976 init_special_inode(inode, inode->i_mode,
4977 new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
4978 } else if (ino == EXT4_BOOT_LOADER_INO) {
4979 make_bad_inode(inode);
4980 } else {
4981 ret = -EFSCORRUPTED;
4982 ext4_error_inode(inode, function, line, 0,
4983 "iget: bogus i_mode (%o)", inode->i_mode);
4984 goto bad_inode;
4985 }
4986 if (IS_CASEFOLDED(inode) && !ext4_has_feature_casefold(inode->i_sb)) {
4987 ext4_error_inode(inode, function, line, 0,
4988 "casefold flag without casefold feature");
4989 ret = -EFSCORRUPTED;
4990 goto bad_inode;
4991 }
4992 if ((err_str = check_igot_inode(inode, flags)) != NULL) {
4993 ext4_error_inode(inode, function, line, 0, err_str);
4994 ret = -EFSCORRUPTED;
4995 goto bad_inode;
4996 }
4997
4998 brelse(iloc.bh);
4999 unlock_new_inode(inode);
5000 return inode;
5001
5002 bad_inode:
5003 brelse(iloc.bh);
5004 iget_failed(inode);
5005 return ERR_PTR(ret);
5006 }
5007
__ext4_update_other_inode_time(struct super_block * sb,unsigned long orig_ino,unsigned long ino,struct ext4_inode * raw_inode)5008 static void __ext4_update_other_inode_time(struct super_block *sb,
5009 unsigned long orig_ino,
5010 unsigned long ino,
5011 struct ext4_inode *raw_inode)
5012 {
5013 struct inode *inode;
5014
5015 inode = find_inode_by_ino_rcu(sb, ino);
5016 if (!inode)
5017 return;
5018
5019 if (!inode_is_dirtytime_only(inode))
5020 return;
5021
5022 spin_lock(&inode->i_lock);
5023 if (inode_is_dirtytime_only(inode)) {
5024 struct ext4_inode_info *ei = EXT4_I(inode);
5025
5026 inode->i_state &= ~I_DIRTY_TIME;
5027 spin_unlock(&inode->i_lock);
5028
5029 spin_lock(&ei->i_raw_lock);
5030 EXT4_INODE_SET_CTIME(inode, raw_inode);
5031 EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
5032 EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
5033 ext4_inode_csum_set(inode, raw_inode, ei);
5034 spin_unlock(&ei->i_raw_lock);
5035 trace_ext4_other_inode_update_time(inode, orig_ino);
5036 return;
5037 }
5038 spin_unlock(&inode->i_lock);
5039 }
5040
5041 /*
5042 * Opportunistically update the other time fields for other inodes in
5043 * the same inode table block.
5044 */
ext4_update_other_inodes_time(struct super_block * sb,unsigned long orig_ino,char * buf)5045 static void ext4_update_other_inodes_time(struct super_block *sb,
5046 unsigned long orig_ino, char *buf)
5047 {
5048 unsigned long ino;
5049 int i, inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
5050 int inode_size = EXT4_INODE_SIZE(sb);
5051
5052 /*
5053 * Calculate the first inode in the inode table block. Inode
5054 * numbers are one-based. That is, the first inode in a block
5055 * (assuming 4k blocks and 256 byte inodes) is (n*16 + 1).
5056 */
5057 ino = ((orig_ino - 1) & ~(inodes_per_block - 1)) + 1;
5058 rcu_read_lock();
5059 for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) {
5060 if (ino == orig_ino)
5061 continue;
5062 __ext4_update_other_inode_time(sb, orig_ino, ino,
5063 (struct ext4_inode *)buf);
5064 }
5065 rcu_read_unlock();
5066 }
5067
5068 /*
5069 * Post the struct inode info into an on-disk inode location in the
5070 * buffer-cache. This gobbles the caller's reference to the
5071 * buffer_head in the inode location struct.
5072 *
5073 * The caller must have write access to iloc->bh.
5074 */
ext4_do_update_inode(handle_t * handle,struct inode * inode,struct ext4_iloc * iloc)5075 static int ext4_do_update_inode(handle_t *handle,
5076 struct inode *inode,
5077 struct ext4_iloc *iloc)
5078 {
5079 struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
5080 struct ext4_inode_info *ei = EXT4_I(inode);
5081 struct buffer_head *bh = iloc->bh;
5082 struct super_block *sb = inode->i_sb;
5083 int err;
5084 int need_datasync = 0, set_large_file = 0;
5085
5086 spin_lock(&ei->i_raw_lock);
5087
5088 /*
5089 * For fields not tracked in the in-memory inode, initialise them
5090 * to zero for new inodes.
5091 */
5092 if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
5093 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
5094
5095 if (READ_ONCE(ei->i_disksize) != ext4_isize(inode->i_sb, raw_inode))
5096 need_datasync = 1;
5097 if (ei->i_disksize > 0x7fffffffULL) {
5098 if (!ext4_has_feature_large_file(sb) ||
5099 EXT4_SB(sb)->s_es->s_rev_level == cpu_to_le32(EXT4_GOOD_OLD_REV))
5100 set_large_file = 1;
5101 }
5102
5103 err = ext4_fill_raw_inode(inode, raw_inode);
5104 spin_unlock(&ei->i_raw_lock);
5105 if (err) {
5106 EXT4_ERROR_INODE(inode, "corrupted inode contents");
5107 goto out_brelse;
5108 }
5109
5110 if (inode->i_sb->s_flags & SB_LAZYTIME)
5111 ext4_update_other_inodes_time(inode->i_sb, inode->i_ino,
5112 bh->b_data);
5113
5114 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
5115 err = ext4_handle_dirty_metadata(handle, NULL, bh);
5116 if (err)
5117 goto out_error;
5118 ext4_clear_inode_state(inode, EXT4_STATE_NEW);
5119 if (set_large_file) {
5120 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get write access");
5121 err = ext4_journal_get_write_access(handle, sb,
5122 EXT4_SB(sb)->s_sbh,
5123 EXT4_JTR_NONE);
5124 if (err)
5125 goto out_error;
5126 lock_buffer(EXT4_SB(sb)->s_sbh);
5127 ext4_set_feature_large_file(sb);
5128 ext4_superblock_csum_set(sb);
5129 unlock_buffer(EXT4_SB(sb)->s_sbh);
5130 ext4_handle_sync(handle);
5131 err = ext4_handle_dirty_metadata(handle, NULL,
5132 EXT4_SB(sb)->s_sbh);
5133 }
5134 ext4_update_inode_fsync_trans(handle, inode, need_datasync);
5135 out_error:
5136 ext4_std_error(inode->i_sb, err);
5137 out_brelse:
5138 brelse(bh);
5139 return err;
5140 }
5141
5142 /*
5143 * ext4_write_inode()
5144 *
5145 * We are called from a few places:
5146 *
5147 * - Within generic_file_aio_write() -> generic_write_sync() for O_SYNC files.
5148 * Here, there will be no transaction running. We wait for any running
5149 * transaction to commit.
5150 *
5151 * - Within flush work (sys_sync(), kupdate and such).
5152 * We wait on commit, if told to.
5153 *
5154 * - Within iput_final() -> write_inode_now()
5155 * We wait on commit, if told to.
5156 *
5157 * In all cases it is actually safe for us to return without doing anything,
5158 * because the inode has been copied into a raw inode buffer in
5159 * ext4_mark_inode_dirty(). This is a correctness thing for WB_SYNC_ALL
5160 * writeback.
5161 *
5162 * Note that we are absolutely dependent upon all inode dirtiers doing the
5163 * right thing: they *must* call mark_inode_dirty() after dirtying info in
5164 * which we are interested.
5165 *
5166 * It would be a bug for them to not do this. The code:
5167 *
5168 * mark_inode_dirty(inode)
5169 * stuff();
5170 * inode->i_size = expr;
5171 *
5172 * is in error because write_inode() could occur while `stuff()' is running,
5173 * and the new i_size will be lost. Plus the inode will no longer be on the
5174 * superblock's dirty inode list.
5175 */
ext4_write_inode(struct inode * inode,struct writeback_control * wbc)5176 int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
5177 {
5178 int err;
5179
5180 if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
5181 return 0;
5182
5183 if (unlikely(ext4_forced_shutdown(inode->i_sb)))
5184 return -EIO;
5185
5186 if (EXT4_SB(inode->i_sb)->s_journal) {
5187 if (ext4_journal_current_handle()) {
5188 ext4_debug("called recursively, non-PF_MEMALLOC!\n");
5189 dump_stack();
5190 return -EIO;
5191 }
5192
5193 /*
5194 * No need to force transaction in WB_SYNC_NONE mode. Also
5195 * ext4_sync_fs() will force the commit after everything is
5196 * written.
5197 */
5198 if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync)
5199 return 0;
5200
5201 err = ext4_fc_commit(EXT4_SB(inode->i_sb)->s_journal,
5202 EXT4_I(inode)->i_sync_tid);
5203 } else {
5204 struct ext4_iloc iloc;
5205
5206 err = __ext4_get_inode_loc_noinmem(inode, &iloc);
5207 if (err)
5208 return err;
5209 /*
5210 * sync(2) will flush the whole buffer cache. No need to do
5211 * it here separately for each inode.
5212 */
5213 if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
5214 sync_dirty_buffer(iloc.bh);
5215 if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
5216 ext4_error_inode_block(inode, iloc.bh->b_blocknr, EIO,
5217 "IO error syncing inode");
5218 err = -EIO;
5219 }
5220 brelse(iloc.bh);
5221 }
5222 return err;
5223 }
5224
5225 /*
5226 * In data=journal mode ext4_journalled_invalidate_folio() may fail to invalidate
5227 * buffers that are attached to a folio straddling i_size and are undergoing
5228 * commit. In that case we have to wait for commit to finish and try again.
5229 */
ext4_wait_for_tail_page_commit(struct inode * inode)5230 static void ext4_wait_for_tail_page_commit(struct inode *inode)
5231 {
5232 unsigned offset;
5233 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
5234 tid_t commit_tid = 0;
5235 int ret;
5236
5237 offset = inode->i_size & (PAGE_SIZE - 1);
5238 /*
5239 * If the folio is fully truncated, we don't need to wait for any commit
5240 * (and we even should not as __ext4_journalled_invalidate_folio() may
5241 * strip all buffers from the folio but keep the folio dirty which can then
5242 * confuse e.g. concurrent ext4_writepages() seeing dirty folio without
5243 * buffers). Also we don't need to wait for any commit if all buffers in
5244 * the folio remain valid. This is most beneficial for the common case of
5245 * blocksize == PAGESIZE.
5246 */
5247 if (!offset || offset > (PAGE_SIZE - i_blocksize(inode)))
5248 return;
5249 while (1) {
5250 struct folio *folio = filemap_lock_folio(inode->i_mapping,
5251 inode->i_size >> PAGE_SHIFT);
5252 if (IS_ERR(folio))
5253 return;
5254 ret = __ext4_journalled_invalidate_folio(folio, offset,
5255 folio_size(folio) - offset);
5256 folio_unlock(folio);
5257 folio_put(folio);
5258 if (ret != -EBUSY)
5259 return;
5260 commit_tid = 0;
5261 read_lock(&journal->j_state_lock);
5262 if (journal->j_committing_transaction)
5263 commit_tid = journal->j_committing_transaction->t_tid;
5264 read_unlock(&journal->j_state_lock);
5265 if (commit_tid)
5266 jbd2_log_wait_commit(journal, commit_tid);
5267 }
5268 }
5269
5270 /*
5271 * ext4_setattr()
5272 *
5273 * Called from notify_change.
5274 *
5275 * We want to trap VFS attempts to truncate the file as soon as
5276 * possible. In particular, we want to make sure that when the VFS
5277 * shrinks i_size, we put the inode on the orphan list and modify
5278 * i_disksize immediately, so that during the subsequent flushing of
5279 * dirty pages and freeing of disk blocks, we can guarantee that any
5280 * commit will leave the blocks being flushed in an unused state on
5281 * disk. (On recovery, the inode will get truncated and the blocks will
5282 * be freed, so we have a strong guarantee that no future commit will
5283 * leave these blocks visible to the user.)
5284 *
5285 * Another thing we have to assure is that if we are in ordered mode
5286 * and inode is still attached to the committing transaction, we must
5287 * we start writeout of all the dirty pages which are being truncated.
5288 * This way we are sure that all the data written in the previous
5289 * transaction are already on disk (truncate waits for pages under
5290 * writeback).
5291 *
5292 * Called with inode->i_rwsem down.
5293 */
ext4_setattr(struct mnt_idmap * idmap,struct dentry * dentry,struct iattr * attr)5294 int ext4_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
5295 struct iattr *attr)
5296 {
5297 struct inode *inode = d_inode(dentry);
5298 int error, rc = 0;
5299 int orphan = 0;
5300 const unsigned int ia_valid = attr->ia_valid;
5301 bool inc_ivers = true;
5302
5303 if (unlikely(ext4_forced_shutdown(inode->i_sb)))
5304 return -EIO;
5305
5306 if (unlikely(IS_IMMUTABLE(inode)))
5307 return -EPERM;
5308
5309 if (unlikely(IS_APPEND(inode) &&
5310 (ia_valid & (ATTR_MODE | ATTR_UID |
5311 ATTR_GID | ATTR_TIMES_SET))))
5312 return -EPERM;
5313
5314 error = setattr_prepare(idmap, dentry, attr);
5315 if (error)
5316 return error;
5317
5318 error = fscrypt_prepare_setattr(dentry, attr);
5319 if (error)
5320 return error;
5321
5322 error = fsverity_prepare_setattr(dentry, attr);
5323 if (error)
5324 return error;
5325
5326 if (is_quota_modification(idmap, inode, attr)) {
5327 error = dquot_initialize(inode);
5328 if (error)
5329 return error;
5330 }
5331
5332 if (i_uid_needs_update(idmap, attr, inode) ||
5333 i_gid_needs_update(idmap, attr, inode)) {
5334 handle_t *handle;
5335
5336 /* (user+group)*(old+new) structure, inode write (sb,
5337 * inode block, ? - but truncate inode update has it) */
5338 handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
5339 (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) +
5340 EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)) + 3);
5341 if (IS_ERR(handle)) {
5342 error = PTR_ERR(handle);
5343 goto err_out;
5344 }
5345
5346 /* dquot_transfer() calls back ext4_get_inode_usage() which
5347 * counts xattr inode references.
5348 */
5349 down_read(&EXT4_I(inode)->xattr_sem);
5350 error = dquot_transfer(idmap, inode, attr);
5351 up_read(&EXT4_I(inode)->xattr_sem);
5352
5353 if (error) {
5354 ext4_journal_stop(handle);
5355 return error;
5356 }
5357 /* Update corresponding info in inode so that everything is in
5358 * one transaction */
5359 i_uid_update(idmap, attr, inode);
5360 i_gid_update(idmap, attr, inode);
5361 error = ext4_mark_inode_dirty(handle, inode);
5362 ext4_journal_stop(handle);
5363 if (unlikely(error)) {
5364 return error;
5365 }
5366 }
5367
5368 if (attr->ia_valid & ATTR_SIZE) {
5369 handle_t *handle;
5370 loff_t oldsize = inode->i_size;
5371 loff_t old_disksize;
5372 int shrink = (attr->ia_size < inode->i_size);
5373
5374 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
5375 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5376
5377 if (attr->ia_size > sbi->s_bitmap_maxbytes) {
5378 return -EFBIG;
5379 }
5380 }
5381 if (!S_ISREG(inode->i_mode)) {
5382 return -EINVAL;
5383 }
5384
5385 if (attr->ia_size == inode->i_size)
5386 inc_ivers = false;
5387
5388 if (shrink) {
5389 if (ext4_should_order_data(inode)) {
5390 error = ext4_begin_ordered_truncate(inode,
5391 attr->ia_size);
5392 if (error)
5393 goto err_out;
5394 }
5395 /*
5396 * Blocks are going to be removed from the inode. Wait
5397 * for dio in flight.
5398 */
5399 inode_dio_wait(inode);
5400 }
5401
5402 filemap_invalidate_lock(inode->i_mapping);
5403
5404 rc = ext4_break_layouts(inode);
5405 if (rc) {
5406 filemap_invalidate_unlock(inode->i_mapping);
5407 goto err_out;
5408 }
5409
5410 if (attr->ia_size != inode->i_size) {
5411 handle = ext4_journal_start(inode, EXT4_HT_INODE, 3);
5412 if (IS_ERR(handle)) {
5413 error = PTR_ERR(handle);
5414 goto out_mmap_sem;
5415 }
5416 if (ext4_handle_valid(handle) && shrink) {
5417 error = ext4_orphan_add(handle, inode);
5418 orphan = 1;
5419 }
5420 /*
5421 * Update c/mtime on truncate up, ext4_truncate() will
5422 * update c/mtime in shrink case below
5423 */
5424 if (!shrink)
5425 inode->i_mtime = inode_set_ctime_current(inode);
5426
5427 if (shrink)
5428 ext4_fc_track_range(handle, inode,
5429 (attr->ia_size > 0 ? attr->ia_size - 1 : 0) >>
5430 inode->i_sb->s_blocksize_bits,
5431 EXT_MAX_BLOCKS - 1);
5432 else
5433 ext4_fc_track_range(
5434 handle, inode,
5435 (oldsize > 0 ? oldsize - 1 : oldsize) >>
5436 inode->i_sb->s_blocksize_bits,
5437 (attr->ia_size > 0 ? attr->ia_size - 1 : 0) >>
5438 inode->i_sb->s_blocksize_bits);
5439
5440 down_write(&EXT4_I(inode)->i_data_sem);
5441 old_disksize = EXT4_I(inode)->i_disksize;
5442 EXT4_I(inode)->i_disksize = attr->ia_size;
5443 rc = ext4_mark_inode_dirty(handle, inode);
5444 if (!error)
5445 error = rc;
5446 /*
5447 * We have to update i_size under i_data_sem together
5448 * with i_disksize to avoid races with writeback code
5449 * running ext4_wb_update_i_disksize().
5450 */
5451 if (!error)
5452 i_size_write(inode, attr->ia_size);
5453 else
5454 EXT4_I(inode)->i_disksize = old_disksize;
5455 up_write(&EXT4_I(inode)->i_data_sem);
5456 ext4_journal_stop(handle);
5457 if (error)
5458 goto out_mmap_sem;
5459 if (!shrink) {
5460 pagecache_isize_extended(inode, oldsize,
5461 inode->i_size);
5462 } else if (ext4_should_journal_data(inode)) {
5463 ext4_wait_for_tail_page_commit(inode);
5464 }
5465 }
5466
5467 /*
5468 * Truncate pagecache after we've waited for commit
5469 * in data=journal mode to make pages freeable.
5470 */
5471 truncate_pagecache(inode, inode->i_size);
5472 /*
5473 * Call ext4_truncate() even if i_size didn't change to
5474 * truncate possible preallocated blocks.
5475 */
5476 if (attr->ia_size <= oldsize) {
5477 rc = ext4_truncate(inode);
5478 if (rc)
5479 error = rc;
5480 }
5481 out_mmap_sem:
5482 filemap_invalidate_unlock(inode->i_mapping);
5483 }
5484
5485 if (!error) {
5486 if (inc_ivers)
5487 inode_inc_iversion(inode);
5488 setattr_copy(idmap, inode, attr);
5489 mark_inode_dirty(inode);
5490 }
5491
5492 /*
5493 * If the call to ext4_truncate failed to get a transaction handle at
5494 * all, we need to clean up the in-core orphan list manually.
5495 */
5496 if (orphan && inode->i_nlink)
5497 ext4_orphan_del(NULL, inode);
5498
5499 if (!error && (ia_valid & ATTR_MODE))
5500 rc = posix_acl_chmod(idmap, dentry, inode->i_mode);
5501
5502 err_out:
5503 if (error)
5504 ext4_std_error(inode->i_sb, error);
5505 if (!error)
5506 error = rc;
5507 return error;
5508 }
5509
ext4_dio_alignment(struct inode * inode)5510 u32 ext4_dio_alignment(struct inode *inode)
5511 {
5512 if (fsverity_active(inode))
5513 return 0;
5514 if (ext4_should_journal_data(inode))
5515 return 0;
5516 if (ext4_has_inline_data(inode))
5517 return 0;
5518 if (IS_ENCRYPTED(inode)) {
5519 if (!fscrypt_dio_supported(inode))
5520 return 0;
5521 return i_blocksize(inode);
5522 }
5523 return 1; /* use the iomap defaults */
5524 }
5525
ext4_getattr(struct mnt_idmap * idmap,const struct path * path,struct kstat * stat,u32 request_mask,unsigned int query_flags)5526 int ext4_getattr(struct mnt_idmap *idmap, const struct path *path,
5527 struct kstat *stat, u32 request_mask, unsigned int query_flags)
5528 {
5529 struct inode *inode = d_inode(path->dentry);
5530 struct ext4_inode *raw_inode;
5531 struct ext4_inode_info *ei = EXT4_I(inode);
5532 unsigned int flags;
5533
5534 if ((request_mask & STATX_BTIME) &&
5535 EXT4_FITS_IN_INODE(raw_inode, ei, i_crtime)) {
5536 stat->result_mask |= STATX_BTIME;
5537 stat->btime.tv_sec = ei->i_crtime.tv_sec;
5538 stat->btime.tv_nsec = ei->i_crtime.tv_nsec;
5539 }
5540
5541 /*
5542 * Return the DIO alignment restrictions if requested. We only return
5543 * this information when requested, since on encrypted files it might
5544 * take a fair bit of work to get if the file wasn't opened recently.
5545 */
5546 if ((request_mask & STATX_DIOALIGN) && S_ISREG(inode->i_mode)) {
5547 u32 dio_align = ext4_dio_alignment(inode);
5548
5549 stat->result_mask |= STATX_DIOALIGN;
5550 if (dio_align == 1) {
5551 struct block_device *bdev = inode->i_sb->s_bdev;
5552
5553 /* iomap defaults */
5554 stat->dio_mem_align = bdev_dma_alignment(bdev) + 1;
5555 stat->dio_offset_align = bdev_logical_block_size(bdev);
5556 } else {
5557 stat->dio_mem_align = dio_align;
5558 stat->dio_offset_align = dio_align;
5559 }
5560 }
5561
5562 flags = ei->i_flags & EXT4_FL_USER_VISIBLE;
5563 if (flags & EXT4_APPEND_FL)
5564 stat->attributes |= STATX_ATTR_APPEND;
5565 if (flags & EXT4_COMPR_FL)
5566 stat->attributes |= STATX_ATTR_COMPRESSED;
5567 if (flags & EXT4_ENCRYPT_FL)
5568 stat->attributes |= STATX_ATTR_ENCRYPTED;
5569 if (flags & EXT4_IMMUTABLE_FL)
5570 stat->attributes |= STATX_ATTR_IMMUTABLE;
5571 if (flags & EXT4_NODUMP_FL)
5572 stat->attributes |= STATX_ATTR_NODUMP;
5573 if (flags & EXT4_VERITY_FL)
5574 stat->attributes |= STATX_ATTR_VERITY;
5575
5576 stat->attributes_mask |= (STATX_ATTR_APPEND |
5577 STATX_ATTR_COMPRESSED |
5578 STATX_ATTR_ENCRYPTED |
5579 STATX_ATTR_IMMUTABLE |
5580 STATX_ATTR_NODUMP |
5581 STATX_ATTR_VERITY);
5582
5583 generic_fillattr(idmap, request_mask, inode, stat);
5584 return 0;
5585 }
5586
ext4_file_getattr(struct mnt_idmap * idmap,const struct path * path,struct kstat * stat,u32 request_mask,unsigned int query_flags)5587 int ext4_file_getattr(struct mnt_idmap *idmap,
5588 const struct path *path, struct kstat *stat,
5589 u32 request_mask, unsigned int query_flags)
5590 {
5591 struct inode *inode = d_inode(path->dentry);
5592 u64 delalloc_blocks;
5593
5594 ext4_getattr(idmap, path, stat, request_mask, query_flags);
5595
5596 /*
5597 * If there is inline data in the inode, the inode will normally not
5598 * have data blocks allocated (it may have an external xattr block).
5599 * Report at least one sector for such files, so tools like tar, rsync,
5600 * others don't incorrectly think the file is completely sparse.
5601 */
5602 if (unlikely(ext4_has_inline_data(inode)))
5603 stat->blocks += (stat->size + 511) >> 9;
5604
5605 /*
5606 * We can't update i_blocks if the block allocation is delayed
5607 * otherwise in the case of system crash before the real block
5608 * allocation is done, we will have i_blocks inconsistent with
5609 * on-disk file blocks.
5610 * We always keep i_blocks updated together with real
5611 * allocation. But to not confuse with user, stat
5612 * will return the blocks that include the delayed allocation
5613 * blocks for this file.
5614 */
5615 delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb),
5616 EXT4_I(inode)->i_reserved_data_blocks);
5617 stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits - 9);
5618 return 0;
5619 }
5620
ext4_index_trans_blocks(struct inode * inode,int lblocks,int pextents)5621 static int ext4_index_trans_blocks(struct inode *inode, int lblocks,
5622 int pextents)
5623 {
5624 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
5625 return ext4_ind_trans_blocks(inode, lblocks);
5626 return ext4_ext_index_trans_blocks(inode, pextents);
5627 }
5628
5629 /*
5630 * Account for index blocks, block groups bitmaps and block group
5631 * descriptor blocks if modify datablocks and index blocks
5632 * worse case, the indexs blocks spread over different block groups
5633 *
5634 * If datablocks are discontiguous, they are possible to spread over
5635 * different block groups too. If they are contiguous, with flexbg,
5636 * they could still across block group boundary.
5637 *
5638 * Also account for superblock, inode, quota and xattr blocks
5639 */
ext4_meta_trans_blocks(struct inode * inode,int lblocks,int pextents)5640 static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
5641 int pextents)
5642 {
5643 ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
5644 int gdpblocks;
5645 int idxblocks;
5646 int ret;
5647
5648 /*
5649 * How many index blocks need to touch to map @lblocks logical blocks
5650 * to @pextents physical extents?
5651 */
5652 idxblocks = ext4_index_trans_blocks(inode, lblocks, pextents);
5653
5654 ret = idxblocks;
5655
5656 /*
5657 * Now let's see how many group bitmaps and group descriptors need
5658 * to account
5659 */
5660 groups = idxblocks + pextents;
5661 gdpblocks = groups;
5662 if (groups > ngroups)
5663 groups = ngroups;
5664 if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
5665 gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
5666
5667 /* bitmaps and block group descriptor blocks */
5668 ret += groups + gdpblocks;
5669
5670 /* Blocks for super block, inode, quota and xattr blocks */
5671 ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
5672
5673 return ret;
5674 }
5675
5676 /*
5677 * Calculate the total number of credits to reserve to fit
5678 * the modification of a single pages into a single transaction,
5679 * which may include multiple chunks of block allocations.
5680 *
5681 * This could be called via ext4_write_begin()
5682 *
5683 * We need to consider the worse case, when
5684 * one new block per extent.
5685 */
ext4_writepage_trans_blocks(struct inode * inode)5686 int ext4_writepage_trans_blocks(struct inode *inode)
5687 {
5688 int bpp = ext4_journal_blocks_per_page(inode);
5689 int ret;
5690
5691 ret = ext4_meta_trans_blocks(inode, bpp, bpp);
5692
5693 /* Account for data blocks for journalled mode */
5694 if (ext4_should_journal_data(inode))
5695 ret += bpp;
5696 return ret;
5697 }
5698
5699 /*
5700 * Calculate the journal credits for a chunk of data modification.
5701 *
5702 * This is called from DIO, fallocate or whoever calling
5703 * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks.
5704 *
5705 * journal buffers for data blocks are not included here, as DIO
5706 * and fallocate do no need to journal data buffers.
5707 */
ext4_chunk_trans_blocks(struct inode * inode,int nrblocks)5708 int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
5709 {
5710 return ext4_meta_trans_blocks(inode, nrblocks, 1);
5711 }
5712
5713 /*
5714 * The caller must have previously called ext4_reserve_inode_write().
5715 * Give this, we know that the caller already has write access to iloc->bh.
5716 */
ext4_mark_iloc_dirty(handle_t * handle,struct inode * inode,struct ext4_iloc * iloc)5717 int ext4_mark_iloc_dirty(handle_t *handle,
5718 struct inode *inode, struct ext4_iloc *iloc)
5719 {
5720 int err = 0;
5721
5722 if (unlikely(ext4_forced_shutdown(inode->i_sb))) {
5723 put_bh(iloc->bh);
5724 return -EIO;
5725 }
5726 ext4_fc_track_inode(handle, inode);
5727
5728 /* the do_update_inode consumes one bh->b_count */
5729 get_bh(iloc->bh);
5730
5731 /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
5732 err = ext4_do_update_inode(handle, inode, iloc);
5733 put_bh(iloc->bh);
5734 return err;
5735 }
5736
5737 /*
5738 * On success, We end up with an outstanding reference count against
5739 * iloc->bh. This _must_ be cleaned up later.
5740 */
5741
5742 int
ext4_reserve_inode_write(handle_t * handle,struct inode * inode,struct ext4_iloc * iloc)5743 ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
5744 struct ext4_iloc *iloc)
5745 {
5746 int err;
5747
5748 if (unlikely(ext4_forced_shutdown(inode->i_sb)))
5749 return -EIO;
5750
5751 err = ext4_get_inode_loc(inode, iloc);
5752 if (!err) {
5753 BUFFER_TRACE(iloc->bh, "get_write_access");
5754 err = ext4_journal_get_write_access(handle, inode->i_sb,
5755 iloc->bh, EXT4_JTR_NONE);
5756 if (err) {
5757 brelse(iloc->bh);
5758 iloc->bh = NULL;
5759 }
5760 }
5761 ext4_std_error(inode->i_sb, err);
5762 return err;
5763 }
5764
__ext4_expand_extra_isize(struct inode * inode,unsigned int new_extra_isize,struct ext4_iloc * iloc,handle_t * handle,int * no_expand)5765 static int __ext4_expand_extra_isize(struct inode *inode,
5766 unsigned int new_extra_isize,
5767 struct ext4_iloc *iloc,
5768 handle_t *handle, int *no_expand)
5769 {
5770 struct ext4_inode *raw_inode;
5771 struct ext4_xattr_ibody_header *header;
5772 unsigned int inode_size = EXT4_INODE_SIZE(inode->i_sb);
5773 struct ext4_inode_info *ei = EXT4_I(inode);
5774 int error;
5775
5776 /* this was checked at iget time, but double check for good measure */
5777 if ((EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > inode_size) ||
5778 (ei->i_extra_isize & 3)) {
5779 EXT4_ERROR_INODE(inode, "bad extra_isize %u (inode size %u)",
5780 ei->i_extra_isize,
5781 EXT4_INODE_SIZE(inode->i_sb));
5782 return -EFSCORRUPTED;
5783 }
5784 if ((new_extra_isize < ei->i_extra_isize) ||
5785 (new_extra_isize < 4) ||
5786 (new_extra_isize > inode_size - EXT4_GOOD_OLD_INODE_SIZE))
5787 return -EINVAL; /* Should never happen */
5788
5789 raw_inode = ext4_raw_inode(iloc);
5790
5791 header = IHDR(inode, raw_inode);
5792
5793 /* No extended attributes present */
5794 if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
5795 header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
5796 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE +
5797 EXT4_I(inode)->i_extra_isize, 0,
5798 new_extra_isize - EXT4_I(inode)->i_extra_isize);
5799 EXT4_I(inode)->i_extra_isize = new_extra_isize;
5800 return 0;
5801 }
5802
5803 /*
5804 * We may need to allocate external xattr block so we need quotas
5805 * initialized. Here we can be called with various locks held so we
5806 * cannot affort to initialize quotas ourselves. So just bail.
5807 */
5808 if (dquot_initialize_needed(inode))
5809 return -EAGAIN;
5810
5811 /* try to expand with EAs present */
5812 error = ext4_expand_extra_isize_ea(inode, new_extra_isize,
5813 raw_inode, handle);
5814 if (error) {
5815 /*
5816 * Inode size expansion failed; don't try again
5817 */
5818 *no_expand = 1;
5819 }
5820
5821 return error;
5822 }
5823
5824 /*
5825 * Expand an inode by new_extra_isize bytes.
5826 * Returns 0 on success or negative error number on failure.
5827 */
ext4_try_to_expand_extra_isize(struct inode * inode,unsigned int new_extra_isize,struct ext4_iloc iloc,handle_t * handle)5828 static int ext4_try_to_expand_extra_isize(struct inode *inode,
5829 unsigned int new_extra_isize,
5830 struct ext4_iloc iloc,
5831 handle_t *handle)
5832 {
5833 int no_expand;
5834 int error;
5835
5836 if (ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND))
5837 return -EOVERFLOW;
5838
5839 /*
5840 * In nojournal mode, we can immediately attempt to expand
5841 * the inode. When journaled, we first need to obtain extra
5842 * buffer credits since we may write into the EA block
5843 * with this same handle. If journal_extend fails, then it will
5844 * only result in a minor loss of functionality for that inode.
5845 * If this is felt to be critical, then e2fsck should be run to
5846 * force a large enough s_min_extra_isize.
5847 */
5848 if (ext4_journal_extend(handle,
5849 EXT4_DATA_TRANS_BLOCKS(inode->i_sb), 0) != 0)
5850 return -ENOSPC;
5851
5852 if (ext4_write_trylock_xattr(inode, &no_expand) == 0)
5853 return -EBUSY;
5854
5855 error = __ext4_expand_extra_isize(inode, new_extra_isize, &iloc,
5856 handle, &no_expand);
5857 ext4_write_unlock_xattr(inode, &no_expand);
5858
5859 return error;
5860 }
5861
ext4_expand_extra_isize(struct inode * inode,unsigned int new_extra_isize,struct ext4_iloc * iloc)5862 int ext4_expand_extra_isize(struct inode *inode,
5863 unsigned int new_extra_isize,
5864 struct ext4_iloc *iloc)
5865 {
5866 handle_t *handle;
5867 int no_expand;
5868 int error, rc;
5869
5870 if (ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
5871 brelse(iloc->bh);
5872 return -EOVERFLOW;
5873 }
5874
5875 handle = ext4_journal_start(inode, EXT4_HT_INODE,
5876 EXT4_DATA_TRANS_BLOCKS(inode->i_sb));
5877 if (IS_ERR(handle)) {
5878 error = PTR_ERR(handle);
5879 brelse(iloc->bh);
5880 return error;
5881 }
5882
5883 ext4_write_lock_xattr(inode, &no_expand);
5884
5885 BUFFER_TRACE(iloc->bh, "get_write_access");
5886 error = ext4_journal_get_write_access(handle, inode->i_sb, iloc->bh,
5887 EXT4_JTR_NONE);
5888 if (error) {
5889 brelse(iloc->bh);
5890 goto out_unlock;
5891 }
5892
5893 error = __ext4_expand_extra_isize(inode, new_extra_isize, iloc,
5894 handle, &no_expand);
5895
5896 rc = ext4_mark_iloc_dirty(handle, inode, iloc);
5897 if (!error)
5898 error = rc;
5899
5900 out_unlock:
5901 ext4_write_unlock_xattr(inode, &no_expand);
5902 ext4_journal_stop(handle);
5903 return error;
5904 }
5905
5906 /*
5907 * What we do here is to mark the in-core inode as clean with respect to inode
5908 * dirtiness (it may still be data-dirty).
5909 * This means that the in-core inode may be reaped by prune_icache
5910 * without having to perform any I/O. This is a very good thing,
5911 * because *any* task may call prune_icache - even ones which
5912 * have a transaction open against a different journal.
5913 *
5914 * Is this cheating? Not really. Sure, we haven't written the
5915 * inode out, but prune_icache isn't a user-visible syncing function.
5916 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
5917 * we start and wait on commits.
5918 */
__ext4_mark_inode_dirty(handle_t * handle,struct inode * inode,const char * func,unsigned int line)5919 int __ext4_mark_inode_dirty(handle_t *handle, struct inode *inode,
5920 const char *func, unsigned int line)
5921 {
5922 struct ext4_iloc iloc;
5923 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5924 int err;
5925
5926 might_sleep();
5927 trace_ext4_mark_inode_dirty(inode, _RET_IP_);
5928 err = ext4_reserve_inode_write(handle, inode, &iloc);
5929 if (err)
5930 goto out;
5931
5932 if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize)
5933 ext4_try_to_expand_extra_isize(inode, sbi->s_want_extra_isize,
5934 iloc, handle);
5935
5936 err = ext4_mark_iloc_dirty(handle, inode, &iloc);
5937 out:
5938 if (unlikely(err))
5939 ext4_error_inode_err(inode, func, line, 0, err,
5940 "mark_inode_dirty error");
5941 return err;
5942 }
5943
5944 /*
5945 * ext4_dirty_inode() is called from __mark_inode_dirty()
5946 *
5947 * We're really interested in the case where a file is being extended.
5948 * i_size has been changed by generic_commit_write() and we thus need
5949 * to include the updated inode in the current transaction.
5950 *
5951 * Also, dquot_alloc_block() will always dirty the inode when blocks
5952 * are allocated to the file.
5953 *
5954 * If the inode is marked synchronous, we don't honour that here - doing
5955 * so would cause a commit on atime updates, which we don't bother doing.
5956 * We handle synchronous inodes at the highest possible level.
5957 */
ext4_dirty_inode(struct inode * inode,int flags)5958 void ext4_dirty_inode(struct inode *inode, int flags)
5959 {
5960 handle_t *handle;
5961
5962 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
5963 if (IS_ERR(handle))
5964 return;
5965 ext4_mark_inode_dirty(handle, inode);
5966 ext4_journal_stop(handle);
5967 }
5968
ext4_change_inode_journal_flag(struct inode * inode,int val)5969 int ext4_change_inode_journal_flag(struct inode *inode, int val)
5970 {
5971 journal_t *journal;
5972 handle_t *handle;
5973 int err;
5974 int alloc_ctx;
5975
5976 /*
5977 * We have to be very careful here: changing a data block's
5978 * journaling status dynamically is dangerous. If we write a
5979 * data block to the journal, change the status and then delete
5980 * that block, we risk forgetting to revoke the old log record
5981 * from the journal and so a subsequent replay can corrupt data.
5982 * So, first we make sure that the journal is empty and that
5983 * nobody is changing anything.
5984 */
5985
5986 journal = EXT4_JOURNAL(inode);
5987 if (!journal)
5988 return 0;
5989 if (is_journal_aborted(journal))
5990 return -EROFS;
5991
5992 /* Wait for all existing dio workers */
5993 inode_dio_wait(inode);
5994
5995 /*
5996 * Before flushing the journal and switching inode's aops, we have
5997 * to flush all dirty data the inode has. There can be outstanding
5998 * delayed allocations, there can be unwritten extents created by
5999 * fallocate or buffered writes in dioread_nolock mode covered by
6000 * dirty data which can be converted only after flushing the dirty
6001 * data (and journalled aops don't know how to handle these cases).
6002 */
6003 if (val) {
6004 filemap_invalidate_lock(inode->i_mapping);
6005 err = filemap_write_and_wait(inode->i_mapping);
6006 if (err < 0) {
6007 filemap_invalidate_unlock(inode->i_mapping);
6008 return err;
6009 }
6010 }
6011
6012 alloc_ctx = ext4_writepages_down_write(inode->i_sb);
6013 jbd2_journal_lock_updates(journal);
6014
6015 /*
6016 * OK, there are no updates running now, and all cached data is
6017 * synced to disk. We are now in a completely consistent state
6018 * which doesn't have anything in the journal, and we know that
6019 * no filesystem updates are running, so it is safe to modify
6020 * the inode's in-core data-journaling state flag now.
6021 */
6022
6023 if (val)
6024 ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
6025 else {
6026 err = jbd2_journal_flush(journal, 0);
6027 if (err < 0) {
6028 jbd2_journal_unlock_updates(journal);
6029 ext4_writepages_up_write(inode->i_sb, alloc_ctx);
6030 return err;
6031 }
6032 ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
6033 }
6034 ext4_set_aops(inode);
6035
6036 jbd2_journal_unlock_updates(journal);
6037 ext4_writepages_up_write(inode->i_sb, alloc_ctx);
6038
6039 if (val)
6040 filemap_invalidate_unlock(inode->i_mapping);
6041
6042 /* Finally we can mark the inode as dirty. */
6043
6044 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
6045 if (IS_ERR(handle))
6046 return PTR_ERR(handle);
6047
6048 ext4_fc_mark_ineligible(inode->i_sb,
6049 EXT4_FC_REASON_JOURNAL_FLAG_CHANGE, handle);
6050 err = ext4_mark_inode_dirty(handle, inode);
6051 ext4_handle_sync(handle);
6052 ext4_journal_stop(handle);
6053 ext4_std_error(inode->i_sb, err);
6054
6055 return err;
6056 }
6057
ext4_bh_unmapped(handle_t * handle,struct inode * inode,struct buffer_head * bh)6058 static int ext4_bh_unmapped(handle_t *handle, struct inode *inode,
6059 struct buffer_head *bh)
6060 {
6061 return !buffer_mapped(bh);
6062 }
6063
ext4_page_mkwrite(struct vm_fault * vmf)6064 vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf)
6065 {
6066 struct vm_area_struct *vma = vmf->vma;
6067 struct folio *folio = page_folio(vmf->page);
6068 loff_t size;
6069 unsigned long len;
6070 int err;
6071 vm_fault_t ret;
6072 struct file *file = vma->vm_file;
6073 struct inode *inode = file_inode(file);
6074 struct address_space *mapping = inode->i_mapping;
6075 handle_t *handle;
6076 get_block_t *get_block;
6077 int retries = 0;
6078
6079 if (unlikely(IS_IMMUTABLE(inode)))
6080 return VM_FAULT_SIGBUS;
6081
6082 sb_start_pagefault(inode->i_sb);
6083 file_update_time(vma->vm_file);
6084
6085 filemap_invalidate_lock_shared(mapping);
6086
6087 err = ext4_convert_inline_data(inode);
6088 if (err)
6089 goto out_ret;
6090
6091 /*
6092 * On data journalling we skip straight to the transaction handle:
6093 * there's no delalloc; page truncated will be checked later; the
6094 * early return w/ all buffers mapped (calculates size/len) can't
6095 * be used; and there's no dioread_nolock, so only ext4_get_block.
6096 */
6097 if (ext4_should_journal_data(inode))
6098 goto retry_alloc;
6099
6100 /* Delalloc case is easy... */
6101 if (test_opt(inode->i_sb, DELALLOC) &&
6102 !ext4_nonda_switch(inode->i_sb)) {
6103 do {
6104 err = block_page_mkwrite(vma, vmf,
6105 ext4_da_get_block_prep);
6106 } while (err == -ENOSPC &&
6107 ext4_should_retry_alloc(inode->i_sb, &retries));
6108 goto out_ret;
6109 }
6110
6111 folio_lock(folio);
6112 size = i_size_read(inode);
6113 /* Page got truncated from under us? */
6114 if (folio->mapping != mapping || folio_pos(folio) > size) {
6115 folio_unlock(folio);
6116 ret = VM_FAULT_NOPAGE;
6117 goto out;
6118 }
6119
6120 len = folio_size(folio);
6121 if (folio_pos(folio) + len > size)
6122 len = size - folio_pos(folio);
6123 /*
6124 * Return if we have all the buffers mapped. This avoids the need to do
6125 * journal_start/journal_stop which can block and take a long time
6126 *
6127 * This cannot be done for data journalling, as we have to add the
6128 * inode to the transaction's list to writeprotect pages on commit.
6129 */
6130 if (folio_buffers(folio)) {
6131 if (!ext4_walk_page_buffers(NULL, inode, folio_buffers(folio),
6132 0, len, NULL,
6133 ext4_bh_unmapped)) {
6134 /* Wait so that we don't change page under IO */
6135 folio_wait_stable(folio);
6136 ret = VM_FAULT_LOCKED;
6137 goto out;
6138 }
6139 }
6140 folio_unlock(folio);
6141 /* OK, we need to fill the hole... */
6142 if (ext4_should_dioread_nolock(inode))
6143 get_block = ext4_get_block_unwritten;
6144 else
6145 get_block = ext4_get_block;
6146 retry_alloc:
6147 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
6148 ext4_writepage_trans_blocks(inode));
6149 if (IS_ERR(handle)) {
6150 ret = VM_FAULT_SIGBUS;
6151 goto out;
6152 }
6153 /*
6154 * Data journalling can't use block_page_mkwrite() because it
6155 * will set_buffer_dirty() before do_journal_get_write_access()
6156 * thus might hit warning messages for dirty metadata buffers.
6157 */
6158 if (!ext4_should_journal_data(inode)) {
6159 err = block_page_mkwrite(vma, vmf, get_block);
6160 } else {
6161 folio_lock(folio);
6162 size = i_size_read(inode);
6163 /* Page got truncated from under us? */
6164 if (folio->mapping != mapping || folio_pos(folio) > size) {
6165 ret = VM_FAULT_NOPAGE;
6166 goto out_error;
6167 }
6168
6169 len = folio_size(folio);
6170 if (folio_pos(folio) + len > size)
6171 len = size - folio_pos(folio);
6172
6173 err = __block_write_begin(&folio->page, 0, len, ext4_get_block);
6174 if (!err) {
6175 ret = VM_FAULT_SIGBUS;
6176 if (ext4_journal_folio_buffers(handle, folio, len))
6177 goto out_error;
6178 } else {
6179 folio_unlock(folio);
6180 }
6181 }
6182 ext4_journal_stop(handle);
6183 if (err == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
6184 goto retry_alloc;
6185 out_ret:
6186 ret = vmf_fs_error(err);
6187 out:
6188 filemap_invalidate_unlock_shared(mapping);
6189 sb_end_pagefault(inode->i_sb);
6190 return ret;
6191 out_error:
6192 folio_unlock(folio);
6193 ext4_journal_stop(handle);
6194 goto out;
6195 }
6196