file.c (8632987380765dee716d460640aa58d58d52998e) file.c (a1e09b03e6f5c1d713c88259909137c0fd264ede)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * fs/f2fs/file.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 */
8#include <linux/fs.h>

--- 10 unchanged lines hidden (view full) ---

19#include <linux/pagevec.h>
20#include <linux/uio.h>
21#include <linux/uuid.h>
22#include <linux/file.h>
23#include <linux/nls.h>
24#include <linux/sched/signal.h>
25#include <linux/fileattr.h>
26#include <linux/fadvise.h>
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * fs/f2fs/file.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 */
8#include <linux/fs.h>

--- 10 unchanged lines hidden (view full) ---

19#include <linux/pagevec.h>
20#include <linux/uio.h>
21#include <linux/uuid.h>
22#include <linux/file.h>
23#include <linux/nls.h>
24#include <linux/sched/signal.h>
25#include <linux/fileattr.h>
26#include <linux/fadvise.h>
27#include <linux/iomap.h>
27
28#include "f2fs.h"
29#include "node.h"
30#include "segment.h"
31#include "xattr.h"
32#include "acl.h"
33#include "gc.h"
34#include "iostat.h"

--- 1647 unchanged lines hidden (view full) ---

1682 down_write(&sbi->pin_sem);
1683
1684 f2fs_lock_op(sbi);
1685 f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
1686 f2fs_unlock_op(sbi);
1687
1688 map.m_seg_type = CURSEG_COLD_DATA_PINNED;
1689 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
28
29#include "f2fs.h"
30#include "node.h"
31#include "segment.h"
32#include "xattr.h"
33#include "acl.h"
34#include "gc.h"
35#include "iostat.h"

--- 1647 unchanged lines hidden (view full) ---

1683 down_write(&sbi->pin_sem);
1684
1685 f2fs_lock_op(sbi);
1686 f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
1687 f2fs_unlock_op(sbi);
1688
1689 map.m_seg_type = CURSEG_COLD_DATA_PINNED;
1690 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
1691 file_dont_truncate(inode);
1690
1691 up_write(&sbi->pin_sem);
1692
1693 expanded += map.m_len;
1694 sec_len -= map.m_len;
1695 map.m_lblk += map.m_len;
1696 if (!err && sec_len)
1697 goto next_alloc;

--- 2515 unchanged lines hidden (view full) ---

4213 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
4214 return -EIO;
4215 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
4216 return -ENOSPC;
4217
4218 return __f2fs_ioctl(filp, cmd, arg);
4219}
4220
1692
1693 up_write(&sbi->pin_sem);
1694
1695 expanded += map.m_len;
1696 sec_len -= map.m_len;
1697 map.m_lblk += map.m_len;
1698 if (!err && sec_len)
1699 goto next_alloc;

--- 2515 unchanged lines hidden (view full) ---

4215 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
4216 return -EIO;
4217 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
4218 return -ENOSPC;
4219
4220 return __f2fs_ioctl(filp, cmd, arg);
4221}
4222
4221static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
4223/*
4224 * Return %true if the given read or write request should use direct I/O, or
4225 * %false if it should use buffered I/O.
4226 */
4227static bool f2fs_should_use_dio(struct inode *inode, struct kiocb *iocb,
4228 struct iov_iter *iter)
4222{
4229{
4230 unsigned int align;
4231
4232 if (!(iocb->ki_flags & IOCB_DIRECT))
4233 return false;
4234
4235 if (f2fs_force_buffered_io(inode, iocb, iter))
4236 return false;
4237
4238 /*
4239 * Direct I/O not aligned to the disk's logical_block_size will be
4240 * attempted, but will fail with -EINVAL.
4241 *
4242 * f2fs additionally requires that direct I/O be aligned to the
4243 * filesystem block size, which is often a stricter requirement.
4244 * However, f2fs traditionally falls back to buffered I/O on requests
4245 * that are logical_block_size-aligned but not fs-block aligned.
4246 *
4247 * The below logic implements this behavior.
4248 */
4249 align = iocb->ki_pos | iov_iter_alignment(iter);
4250 if (!IS_ALIGNED(align, i_blocksize(inode)) &&
4251 IS_ALIGNED(align, bdev_logical_block_size(inode->i_sb->s_bdev)))
4252 return false;
4253
4254 return true;
4255}
4256
4257static int f2fs_dio_read_end_io(struct kiocb *iocb, ssize_t size, int error,
4258 unsigned int flags)
4259{
4260 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(iocb->ki_filp));
4261
4262 dec_page_count(sbi, F2FS_DIO_READ);
4263 if (error)
4264 return error;
4265 f2fs_update_iostat(sbi, APP_DIRECT_READ_IO, size);
4266 return 0;
4267}
4268
4269static const struct iomap_dio_ops f2fs_iomap_dio_read_ops = {
4270 .end_io = f2fs_dio_read_end_io,
4271};
4272
4273static ssize_t f2fs_dio_read_iter(struct kiocb *iocb, struct iov_iter *to)
4274{
4223 struct file *file = iocb->ki_filp;
4224 struct inode *inode = file_inode(file);
4275 struct file *file = iocb->ki_filp;
4276 struct inode *inode = file_inode(file);
4225 int ret;
4277 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4278 struct f2fs_inode_info *fi = F2FS_I(inode);
4279 const loff_t pos = iocb->ki_pos;
4280 const size_t count = iov_iter_count(to);
4281 struct iomap_dio *dio;
4282 ssize_t ret;
4226
4283
4284 if (count == 0)
4285 return 0; /* skip atime update */
4286
4287 trace_f2fs_direct_IO_enter(inode, pos, count, READ);
4288
4289 if (iocb->ki_flags & IOCB_NOWAIT) {
4290 if (!down_read_trylock(&fi->i_gc_rwsem[READ])) {
4291 ret = -EAGAIN;
4292 goto out;
4293 }
4294 } else {
4295 down_read(&fi->i_gc_rwsem[READ]);
4296 }
4297
4298 /*
4299 * We have to use __iomap_dio_rw() and iomap_dio_complete() instead of
4300 * the higher-level function iomap_dio_rw() in order to ensure that the
4301 * F2FS_DIO_READ counter will be decremented correctly in all cases.
4302 */
4303 inc_page_count(sbi, F2FS_DIO_READ);
4304 dio = __iomap_dio_rw(iocb, to, &f2fs_iomap_ops,
4305 &f2fs_iomap_dio_read_ops, 0, 0);
4306 if (IS_ERR_OR_NULL(dio)) {
4307 ret = PTR_ERR_OR_ZERO(dio);
4308 if (ret != -EIOCBQUEUED)
4309 dec_page_count(sbi, F2FS_DIO_READ);
4310 } else {
4311 ret = iomap_dio_complete(dio);
4312 }
4313
4314 up_read(&fi->i_gc_rwsem[READ]);
4315
4316 file_accessed(file);
4317out:
4318 trace_f2fs_direct_IO_exit(inode, pos, count, READ, ret);
4319 return ret;
4320}
4321
4322static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
4323{
4324 struct inode *inode = file_inode(iocb->ki_filp);
4325 ssize_t ret;
4326
4227 if (!f2fs_is_compress_backend_ready(inode))
4228 return -EOPNOTSUPP;
4229
4327 if (!f2fs_is_compress_backend_ready(inode))
4328 return -EOPNOTSUPP;
4329
4230 ret = generic_file_read_iter(iocb, iter);
4330 if (f2fs_should_use_dio(inode, iocb, to))
4331 return f2fs_dio_read_iter(iocb, to);
4231
4332
4333 ret = filemap_read(iocb, to, 0);
4232 if (ret > 0)
4334 if (ret > 0)
4233 f2fs_update_iostat(F2FS_I_SB(inode), APP_READ_IO, ret);
4234
4335 f2fs_update_iostat(F2FS_I_SB(inode), APP_BUFFERED_READ_IO, ret);
4235 return ret;
4236}
4237
4336 return ret;
4337}
4338
4238static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
4339static ssize_t f2fs_write_checks(struct kiocb *iocb, struct iov_iter *from)
4239{
4240 struct file *file = iocb->ki_filp;
4241 struct inode *inode = file_inode(file);
4340{
4341 struct file *file = iocb->ki_filp;
4342 struct inode *inode = file_inode(file);
4242 ssize_t ret;
4343 ssize_t count;
4344 int err;
4243
4345
4244 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
4245 ret = -EIO;
4246 goto out;
4346 if (IS_IMMUTABLE(inode))
4347 return -EPERM;
4348
4349 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
4350 return -EPERM;
4351
4352 count = generic_write_checks(iocb, from);
4353 if (count <= 0)
4354 return count;
4355
4356 err = file_modified(file);
4357 if (err)
4358 return err;
4359 return count;
4360}
4361
4362/*
4363 * Preallocate blocks for a write request, if it is possible and helpful to do
4364 * so. Returns a positive number if blocks may have been preallocated, 0 if no
4365 * blocks were preallocated, or a negative errno value if something went
4366 * seriously wrong. Also sets FI_PREALLOCATED_ALL on the inode if *all* the
4367 * requested blocks (not just some of them) have been allocated.
4368 */
4369static int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *iter,
4370 bool dio)
4371{
4372 struct inode *inode = file_inode(iocb->ki_filp);
4373 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4374 const loff_t pos = iocb->ki_pos;
4375 const size_t count = iov_iter_count(iter);
4376 struct f2fs_map_blocks map = {};
4377 int flag;
4378 int ret;
4379
4380 /* If it will be an out-of-place direct write, don't bother. */
4381 if (dio && f2fs_lfs_mode(sbi))
4382 return 0;
4383 /*
4384 * Don't preallocate holes aligned to DIO_SKIP_HOLES which turns into
4385 * buffered IO, if DIO meets any holes.
4386 */
4387 if (dio && i_size_read(inode) &&
4388 (F2FS_BYTES_TO_BLK(pos) < F2FS_BLK_ALIGN(i_size_read(inode))))
4389 return 0;
4390
4391 /* No-wait I/O can't allocate blocks. */
4392 if (iocb->ki_flags & IOCB_NOWAIT)
4393 return 0;
4394
4395 /* If it will be a short write, don't bother. */
4396 if (fault_in_iov_iter_readable(iter, count))
4397 return 0;
4398
4399 if (f2fs_has_inline_data(inode)) {
4400 /* If the data will fit inline, don't bother. */
4401 if (pos + count <= MAX_INLINE_DATA(inode))
4402 return 0;
4403 ret = f2fs_convert_inline_inode(inode);
4404 if (ret)
4405 return ret;
4247 }
4248
4406 }
4407
4249 if (!f2fs_is_compress_backend_ready(inode)) {
4250 ret = -EOPNOTSUPP;
4251 goto out;
4408 /* Do not preallocate blocks that will be written partially in 4KB. */
4409 map.m_lblk = F2FS_BLK_ALIGN(pos);
4410 map.m_len = F2FS_BYTES_TO_BLK(pos + count);
4411 if (map.m_len > map.m_lblk)
4412 map.m_len -= map.m_lblk;
4413 else
4414 map.m_len = 0;
4415 map.m_may_create = true;
4416 if (dio) {
4417 map.m_seg_type = f2fs_rw_hint_to_seg_type(inode->i_write_hint);
4418 flag = F2FS_GET_BLOCK_PRE_DIO;
4419 } else {
4420 map.m_seg_type = NO_CHECK_TYPE;
4421 flag = F2FS_GET_BLOCK_PRE_AIO;
4252 }
4253
4422 }
4423
4424 ret = f2fs_map_blocks(inode, &map, 1, flag);
4425 /* -ENOSPC|-EDQUOT are fine to report the number of allocated blocks. */
4426 if (ret < 0 && !((ret == -ENOSPC || ret == -EDQUOT) && map.m_len > 0))
4427 return ret;
4428 if (ret == 0)
4429 set_inode_flag(inode, FI_PREALLOCATED_ALL);
4430 return map.m_len;
4431}
4432
4433static ssize_t f2fs_buffered_write_iter(struct kiocb *iocb,
4434 struct iov_iter *from)
4435{
4436 struct file *file = iocb->ki_filp;
4437 struct inode *inode = file_inode(file);
4438 ssize_t ret;
4439
4440 if (iocb->ki_flags & IOCB_NOWAIT)
4441 return -EOPNOTSUPP;
4442
4443 current->backing_dev_info = inode_to_bdi(inode);
4444 ret = generic_perform_write(file, from, iocb->ki_pos);
4445 current->backing_dev_info = NULL;
4446
4447 if (ret > 0) {
4448 iocb->ki_pos += ret;
4449 f2fs_update_iostat(F2FS_I_SB(inode), APP_BUFFERED_IO, ret);
4450 }
4451 return ret;
4452}
4453
4454static int f2fs_dio_write_end_io(struct kiocb *iocb, ssize_t size, int error,
4455 unsigned int flags)
4456{
4457 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(iocb->ki_filp));
4458
4459 dec_page_count(sbi, F2FS_DIO_WRITE);
4460 if (error)
4461 return error;
4462 f2fs_update_iostat(sbi, APP_DIRECT_IO, size);
4463 return 0;
4464}
4465
4466static const struct iomap_dio_ops f2fs_iomap_dio_write_ops = {
4467 .end_io = f2fs_dio_write_end_io,
4468};
4469
4470static ssize_t f2fs_dio_write_iter(struct kiocb *iocb, struct iov_iter *from,
4471 bool *may_need_sync)
4472{
4473 struct file *file = iocb->ki_filp;
4474 struct inode *inode = file_inode(file);
4475 struct f2fs_inode_info *fi = F2FS_I(inode);
4476 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4477 const bool do_opu = f2fs_lfs_mode(sbi);
4478 const int whint_mode = F2FS_OPTION(sbi).whint_mode;
4479 const loff_t pos = iocb->ki_pos;
4480 const ssize_t count = iov_iter_count(from);
4481 const enum rw_hint hint = iocb->ki_hint;
4482 unsigned int dio_flags;
4483 struct iomap_dio *dio;
4484 ssize_t ret;
4485
4486 trace_f2fs_direct_IO_enter(inode, pos, count, WRITE);
4487
4254 if (iocb->ki_flags & IOCB_NOWAIT) {
4488 if (iocb->ki_flags & IOCB_NOWAIT) {
4255 if (!inode_trylock(inode)) {
4489 /* f2fs_convert_inline_inode() and block allocation can block */
4490 if (f2fs_has_inline_data(inode) ||
4491 !f2fs_overwrite_io(inode, pos, count)) {
4256 ret = -EAGAIN;
4257 goto out;
4258 }
4492 ret = -EAGAIN;
4493 goto out;
4494 }
4495
4496 if (!down_read_trylock(&fi->i_gc_rwsem[WRITE])) {
4497 ret = -EAGAIN;
4498 goto out;
4499 }
4500 if (do_opu && !down_read_trylock(&fi->i_gc_rwsem[READ])) {
4501 up_read(&fi->i_gc_rwsem[WRITE]);
4502 ret = -EAGAIN;
4503 goto out;
4504 }
4259 } else {
4505 } else {
4260 inode_lock(inode);
4261 }
4506 ret = f2fs_convert_inline_inode(inode);
4507 if (ret)
4508 goto out;
4262
4509
4263 if (unlikely(IS_IMMUTABLE(inode))) {
4264 ret = -EPERM;
4265 goto unlock;
4510 down_read(&fi->i_gc_rwsem[WRITE]);
4511 if (do_opu)
4512 down_read(&fi->i_gc_rwsem[READ]);
4266 }
4513 }
4514 if (whint_mode == WHINT_MODE_OFF)
4515 iocb->ki_hint = WRITE_LIFE_NOT_SET;
4267
4516
4268 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
4269 ret = -EPERM;
4270 goto unlock;
4517 /*
4518 * We have to use __iomap_dio_rw() and iomap_dio_complete() instead of
4519 * the higher-level function iomap_dio_rw() in order to ensure that the
4520 * F2FS_DIO_WRITE counter will be decremented correctly in all cases.
4521 */
4522 inc_page_count(sbi, F2FS_DIO_WRITE);
4523 dio_flags = 0;
4524 if (pos + count > inode->i_size)
4525 dio_flags |= IOMAP_DIO_FORCE_WAIT;
4526 dio = __iomap_dio_rw(iocb, from, &f2fs_iomap_ops,
4527 &f2fs_iomap_dio_write_ops, dio_flags, 0);
4528 if (IS_ERR_OR_NULL(dio)) {
4529 ret = PTR_ERR_OR_ZERO(dio);
4530 if (ret == -ENOTBLK)
4531 ret = 0;
4532 if (ret != -EIOCBQUEUED)
4533 dec_page_count(sbi, F2FS_DIO_WRITE);
4534 } else {
4535 ret = iomap_dio_complete(dio);
4271 }
4272
4536 }
4537
4273 ret = generic_write_checks(iocb, from);
4274 if (ret > 0) {
4275 bool preallocated = false;
4276 size_t target_size = 0;
4277 int err;
4538 if (whint_mode == WHINT_MODE_OFF)
4539 iocb->ki_hint = hint;
4540 if (do_opu)
4541 up_read(&fi->i_gc_rwsem[READ]);
4542 up_read(&fi->i_gc_rwsem[WRITE]);
4278
4543
4279 if (fault_in_iov_iter_readable(from, iov_iter_count(from)))
4280 set_inode_flag(inode, FI_NO_PREALLOC);
4544 if (ret < 0)
4545 goto out;
4546 if (pos + ret > inode->i_size)
4547 f2fs_i_size_write(inode, pos + ret);
4548 if (!do_opu)
4549 set_inode_flag(inode, FI_UPDATE_WRITE);
4281
4550
4282 if ((iocb->ki_flags & IOCB_NOWAIT)) {
4283 if (!f2fs_overwrite_io(inode, iocb->ki_pos,
4284 iov_iter_count(from)) ||
4285 f2fs_has_inline_data(inode) ||
4286 f2fs_force_buffered_io(inode, iocb, from)) {
4287 clear_inode_flag(inode, FI_NO_PREALLOC);
4288 inode_unlock(inode);
4289 ret = -EAGAIN;
4551 if (iov_iter_count(from)) {
4552 ssize_t ret2;
4553 loff_t bufio_start_pos = iocb->ki_pos;
4554
4555 /*
4556 * The direct write was partial, so we need to fall back to a
4557 * buffered write for the remainder.
4558 */
4559
4560 ret2 = f2fs_buffered_write_iter(iocb, from);
4561 if (iov_iter_count(from))
4562 f2fs_write_failed(inode, iocb->ki_pos);
4563 if (ret2 < 0)
4564 goto out;
4565
4566 /*
4567 * Ensure that the pagecache pages are written to disk and
4568 * invalidated to preserve the expected O_DIRECT semantics.
4569 */
4570 if (ret2 > 0) {
4571 loff_t bufio_end_pos = bufio_start_pos + ret2 - 1;
4572
4573 ret += ret2;
4574
4575 ret2 = filemap_write_and_wait_range(file->f_mapping,
4576 bufio_start_pos,
4577 bufio_end_pos);
4578 if (ret2 < 0)
4290 goto out;
4579 goto out;
4291 }
4292 goto write;
4580 invalidate_mapping_pages(file->f_mapping,
4581 bufio_start_pos >> PAGE_SHIFT,
4582 bufio_end_pos >> PAGE_SHIFT);
4293 }
4583 }
4584 } else {
4585 /* iomap_dio_rw() already handled the generic_write_sync(). */
4586 *may_need_sync = false;
4587 }
4588out:
4589 trace_f2fs_direct_IO_exit(inode, pos, count, WRITE, ret);
4590 return ret;
4591}
4294
4592
4295 if (is_inode_flag_set(inode, FI_NO_PREALLOC))
4296 goto write;
4593static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
4594{
4595 struct inode *inode = file_inode(iocb->ki_filp);
4596 const loff_t orig_pos = iocb->ki_pos;
4597 const size_t orig_count = iov_iter_count(from);
4598 loff_t target_size;
4599 bool dio;
4600 bool may_need_sync = true;
4601 int preallocated;
4602 ssize_t ret;
4297
4603
4298 if (iocb->ki_flags & IOCB_DIRECT) {
4299 /*
4300 * Convert inline data for Direct I/O before entering
4301 * f2fs_direct_IO().
4302 */
4303 err = f2fs_convert_inline_inode(inode);
4304 if (err)
4305 goto out_err;
4306 /*
4307 * If force_buffere_io() is true, we have to allocate
4308 * blocks all the time, since f2fs_direct_IO will fall
4309 * back to buffered IO.
4310 */
4311 if (!f2fs_force_buffered_io(inode, iocb, from) &&
4312 f2fs_lfs_mode(F2FS_I_SB(inode)))
4313 goto write;
4314 }
4315 preallocated = true;
4316 target_size = iocb->ki_pos + iov_iter_count(from);
4604 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
4605 ret = -EIO;
4606 goto out;
4607 }
4317
4608
4318 err = f2fs_preallocate_blocks(iocb, from);
4319 if (err) {
4320out_err:
4321 clear_inode_flag(inode, FI_NO_PREALLOC);
4322 inode_unlock(inode);
4323 ret = err;
4609 if (!f2fs_is_compress_backend_ready(inode)) {
4610 ret = -EOPNOTSUPP;
4611 goto out;
4612 }
4613
4614 if (iocb->ki_flags & IOCB_NOWAIT) {
4615 if (!inode_trylock(inode)) {
4616 ret = -EAGAIN;
4324 goto out;
4325 }
4617 goto out;
4618 }
4326write:
4327 ret = __generic_file_write_iter(iocb, from);
4328 clear_inode_flag(inode, FI_NO_PREALLOC);
4619 } else {
4620 inode_lock(inode);
4621 }
4329
4622
4330 /* if we couldn't write data, we should deallocate blocks. */
4331 if (preallocated && i_size_read(inode) < target_size) {
4332 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4333 filemap_invalidate_lock(inode->i_mapping);
4334 f2fs_truncate(inode);
4335 filemap_invalidate_unlock(inode->i_mapping);
4336 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4337 }
4623 ret = f2fs_write_checks(iocb, from);
4624 if (ret <= 0)
4625 goto out_unlock;
4338
4626
4339 if (ret > 0)
4340 f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
4627 /* Determine whether we will do a direct write or a buffered write. */
4628 dio = f2fs_should_use_dio(inode, iocb, from);
4629
4630 /* Possibly preallocate the blocks for the write. */
4631 target_size = iocb->ki_pos + iov_iter_count(from);
4632 preallocated = f2fs_preallocate_blocks(iocb, from, dio);
4633 if (preallocated < 0)
4634 ret = preallocated;
4635 else
4636 /* Do the actual write. */
4637 ret = dio ?
4638 f2fs_dio_write_iter(iocb, from, &may_need_sync):
4639 f2fs_buffered_write_iter(iocb, from);
4640
4641 /* Don't leave any preallocated blocks around past i_size. */
4642 if (preallocated && i_size_read(inode) < target_size) {
4643 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4644 filemap_invalidate_lock(inode->i_mapping);
4645 if (!f2fs_truncate(inode))
4646 file_dont_truncate(inode);
4647 filemap_invalidate_unlock(inode->i_mapping);
4648 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4649 } else {
4650 file_dont_truncate(inode);
4341 }
4651 }
4342unlock:
4652
4653 clear_inode_flag(inode, FI_PREALLOCATED_ALL);
4654out_unlock:
4343 inode_unlock(inode);
4344out:
4655 inode_unlock(inode);
4656out:
4345 trace_f2fs_file_write_iter(inode, iocb->ki_pos,
4346 iov_iter_count(from), ret);
4347 if (ret > 0)
4657 trace_f2fs_file_write_iter(inode, orig_pos, orig_count, ret);
4658 if (ret > 0 && may_need_sync)
4348 ret = generic_write_sync(iocb, ret);
4349 return ret;
4350}
4351
4352static int f2fs_file_fadvise(struct file *filp, loff_t offset, loff_t len,
4353 int advice)
4354{
4355 struct inode *inode;

--- 154 unchanged lines hidden ---
4659 ret = generic_write_sync(iocb, ret);
4660 return ret;
4661}
4662
4663static int f2fs_file_fadvise(struct file *filp, loff_t offset, loff_t len,
4664 int advice)
4665{
4666 struct inode *inode;

--- 154 unchanged lines hidden ---