Lines Matching +full:a +full:- +full:z

1 // SPDX-License-Identifier: GPL-2.0
32 struct zonefs_zone *z = zonefs_inode_zone(inode); in zonefs_read_iomap_begin() local
33 struct super_block *sb = inode->i_sb; in zonefs_read_iomap_begin()
38 * act as if there is a hole up to the file maximum size. in zonefs_read_iomap_begin()
40 mutex_lock(&zi->i_truncate_mutex); in zonefs_read_iomap_begin()
41 iomap->bdev = inode->i_sb->s_bdev; in zonefs_read_iomap_begin()
42 iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize); in zonefs_read_iomap_begin()
44 if (iomap->offset >= isize) { in zonefs_read_iomap_begin()
45 iomap->type = IOMAP_HOLE; in zonefs_read_iomap_begin()
46 iomap->addr = IOMAP_NULL_ADDR; in zonefs_read_iomap_begin()
47 iomap->length = length; in zonefs_read_iomap_begin()
49 iomap->type = IOMAP_MAPPED; in zonefs_read_iomap_begin()
50 iomap->addr = (z->z_sector << SECTOR_SHIFT) + iomap->offset; in zonefs_read_iomap_begin()
51 iomap->length = isize - iomap->offset; in zonefs_read_iomap_begin()
53 mutex_unlock(&zi->i_truncate_mutex); in zonefs_read_iomap_begin()
69 struct zonefs_zone *z = zonefs_inode_zone(inode); in zonefs_write_iomap_begin() local
70 struct super_block *sb = inode->i_sb; in zonefs_write_iomap_begin()
74 if (WARN_ON_ONCE(offset + length > z->z_capacity)) in zonefs_write_iomap_begin()
75 return -EIO; in zonefs_write_iomap_begin()
79 * checked when writes are issued, so warn if we see a page writeback in zonefs_write_iomap_begin()
82 if (WARN_ON_ONCE(zonefs_zone_is_seq(z) && !(flags & IOMAP_DIRECT))) in zonefs_write_iomap_begin()
83 return -EIO; in zonefs_write_iomap_begin()
90 mutex_lock(&zi->i_truncate_mutex); in zonefs_write_iomap_begin()
91 iomap->bdev = inode->i_sb->s_bdev; in zonefs_write_iomap_begin()
92 iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize); in zonefs_write_iomap_begin()
93 iomap->addr = (z->z_sector << SECTOR_SHIFT) + iomap->offset; in zonefs_write_iomap_begin()
95 if (iomap->offset >= isize) { in zonefs_write_iomap_begin()
96 iomap->type = IOMAP_UNWRITTEN; in zonefs_write_iomap_begin()
97 iomap->length = z->z_capacity - iomap->offset; in zonefs_write_iomap_begin()
99 iomap->type = IOMAP_MAPPED; in zonefs_write_iomap_begin()
100 iomap->length = isize - iomap->offset; in zonefs_write_iomap_begin()
102 mutex_unlock(&zi->i_truncate_mutex); in zonefs_write_iomap_begin()
130 struct zonefs_zone *z = zonefs_inode_zone(inode); in zonefs_write_map_blocks() local
132 if (WARN_ON_ONCE(zonefs_zone_is_seq(z))) in zonefs_write_map_blocks()
133 return -EIO; in zonefs_write_map_blocks()
135 return -EIO; in zonefs_write_map_blocks()
138 if (offset >= wpc->iomap.offset && in zonefs_write_map_blocks()
139 offset < wpc->iomap.offset + wpc->iomap.length) in zonefs_write_map_blocks()
143 z->z_capacity - offset, in zonefs_write_map_blocks()
144 IOMAP_WRITE, &wpc->iomap, NULL); in zonefs_write_map_blocks()
165 zonefs_err(inode->i_sb, in zonefs_swap_activate()
166 "swap file: not a conventional zone file\n"); in zonefs_swap_activate()
167 return -EINVAL; in zonefs_swap_activate()
190 struct zonefs_zone *z = zonefs_inode_zone(inode); in zonefs_file_truncate() local
197 * only down to a 0 size, which is equivalent to a zone reset, and to in zonefs_file_truncate()
198 * the maximum file size, which is equivalent to a zone finish. in zonefs_file_truncate()
200 if (!zonefs_zone_is_seq(z)) in zonefs_file_truncate()
201 return -EPERM; in zonefs_file_truncate()
205 else if (isize == z->z_capacity) in zonefs_file_truncate()
208 return -EPERM; in zonefs_file_truncate()
213 filemap_invalidate_lock(inode->i_mapping); in zonefs_file_truncate()
216 mutex_lock(&zi->i_truncate_mutex); in zonefs_file_truncate()
230 if (z->z_flags & ZONEFS_ZONE_OPEN) { in zonefs_file_truncate()
232 * Truncating a zone to EMPTY or FULL is the equivalent of in zonefs_file_truncate()
233 * closing the zone. For a truncation to 0, we need to in zonefs_file_truncate()
234 * re-open the zone to ensure new writes can be processed. in zonefs_file_truncate()
235 * For a truncation to the maximum file size, the zone is in zonefs_file_truncate()
242 z->z_flags &= ~ZONEFS_ZONE_OPEN; in zonefs_file_truncate()
247 z->z_wpoffset = isize; in zonefs_file_truncate()
251 mutex_unlock(&zi->i_truncate_mutex); in zonefs_file_truncate()
252 filemap_invalidate_unlock(inode->i_mapping); in zonefs_file_truncate()
264 return -EPERM; in zonefs_file_fsync()
273 ret = blkdev_issue_flush(inode->i_sb->s_bdev); in zonefs_file_fsync()
283 struct inode *inode = file_inode(vmf->vma->vm_file); in zonefs_filemap_page_mkwrite()
296 sb_start_pagefault(inode->i_sb); in zonefs_filemap_page_mkwrite()
297 file_update_time(vmf->vma->vm_file); in zonefs_filemap_page_mkwrite()
300 filemap_invalidate_lock_shared(inode->i_mapping); in zonefs_filemap_page_mkwrite()
302 filemap_invalidate_unlock_shared(inode->i_mapping); in zonefs_filemap_page_mkwrite()
304 sb_end_pagefault(inode->i_sb); in zonefs_filemap_page_mkwrite()
323 (vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) in zonefs_file_mmap()
324 return -EINVAL; in zonefs_file_mmap()
327 vma->vm_ops = &zonefs_file_vm_ops; in zonefs_file_mmap()
347 struct inode *inode = file_inode(iocb->ki_filp); in zonefs_file_write_dio_end_io()
363 * but that is not a problem since a write completed in zonefs_file_write_dio_end_io()
368 mutex_lock(&zi->i_truncate_mutex); in zonefs_file_write_dio_end_io()
369 if (i_size_read(inode) < iocb->ki_pos + size) { in zonefs_file_write_dio_end_io()
370 zonefs_update_stats(inode, iocb->ki_pos + size); in zonefs_file_write_dio_end_io()
371 zonefs_i_size_write(inode, iocb->ki_pos + size); in zonefs_file_write_dio_end_io()
373 mutex_unlock(&zi->i_truncate_mutex); in zonefs_file_write_dio_end_io()
385 * limit it becomes a short access. If it exceeds the limit, return -EFBIG.
391 struct zonefs_zone *z = zonefs_inode_zone(inode); in zonefs_write_check_limits() local
393 loff_t max_size = z->z_capacity; in zonefs_write_check_limits()
398 return -EFBIG; in zonefs_write_check_limits()
400 count = min(count, limit - pos); in zonefs_write_check_limits()
403 if (!(file->f_flags & O_LARGEFILE)) in zonefs_write_check_limits()
407 return -EFBIG; in zonefs_write_check_limits()
409 return min(count, max_size - pos); in zonefs_write_check_limits()
414 struct file *file = iocb->ki_filp; in zonefs_write_checks()
417 struct zonefs_zone *z = zonefs_inode_zone(inode); in zonefs_write_checks() local
421 return -ETXTBSY; in zonefs_write_checks()
426 if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT)) in zonefs_write_checks()
427 return -EINVAL; in zonefs_write_checks()
429 if (iocb->ki_flags & IOCB_APPEND) { in zonefs_write_checks()
430 if (zonefs_zone_is_cnv(z)) in zonefs_write_checks()
431 return -EINVAL; in zonefs_write_checks()
432 mutex_lock(&zi->i_truncate_mutex); in zonefs_write_checks()
433 iocb->ki_pos = z->z_wpoffset; in zonefs_write_checks()
434 mutex_unlock(&zi->i_truncate_mutex); in zonefs_write_checks()
437 count = zonefs_write_check_limits(file, iocb->ki_pos, in zonefs_write_checks()
451 * case if a block IO scheduler implementing the ELEVATOR_F_ZBD_SEQ_WRITE
452 * elevator feature is being used (e.g. mq-deadline). The block layer always
458 struct inode *inode = file_inode(iocb->ki_filp); in zonefs_file_dio_write()
460 struct zonefs_zone *z = zonefs_inode_zone(inode); in zonefs_file_dio_write() local
461 struct super_block *sb = inode->i_sb; in zonefs_file_dio_write()
469 if (zonefs_zone_is_seq(z) && !is_sync_kiocb(iocb) && in zonefs_file_dio_write()
470 (iocb->ki_flags & IOCB_NOWAIT)) in zonefs_file_dio_write()
471 return -EOPNOTSUPP; in zonefs_file_dio_write()
473 if (iocb->ki_flags & IOCB_NOWAIT) { in zonefs_file_dio_write()
475 return -EAGAIN; in zonefs_file_dio_write()
486 if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) { in zonefs_file_dio_write()
487 ret = -EINVAL; in zonefs_file_dio_write()
492 if (zonefs_zone_is_seq(z)) { in zonefs_file_dio_write()
493 mutex_lock(&zi->i_truncate_mutex); in zonefs_file_dio_write()
494 if (iocb->ki_pos != z->z_wpoffset) { in zonefs_file_dio_write()
495 mutex_unlock(&zi->i_truncate_mutex); in zonefs_file_dio_write()
496 ret = -EINVAL; in zonefs_file_dio_write()
505 z->z_wpoffset += count; in zonefs_file_dio_write()
507 mutex_unlock(&zi->i_truncate_mutex); in zonefs_file_dio_write()
517 if (ret == -ENOTBLK) in zonefs_file_dio_write()
518 ret = -EBUSY; in zonefs_file_dio_write()
521 * For a failed IO or partial completion, trigger error recovery in zonefs_file_dio_write()
522 * to update the zone write pointer offset to a correct value. in zonefs_file_dio_write()
528 if (zonefs_zone_is_seq(z)) { in zonefs_file_dio_write()
530 ret = -EIO; in zonefs_file_dio_write()
531 if (ret < 0 && ret != -EIOCBQUEUED) in zonefs_file_dio_write()
544 struct inode *inode = file_inode(iocb->ki_filp); in zonefs_file_buffered_write()
552 return -EIO; in zonefs_file_buffered_write()
554 if (iocb->ki_flags & IOCB_NOWAIT) { in zonefs_file_buffered_write()
556 return -EAGAIN; in zonefs_file_buffered_write()
566 if (ret == -EIO) in zonefs_file_buffered_write()
579 struct inode *inode = file_inode(iocb->ki_filp); in zonefs_file_write_iter()
580 struct zonefs_zone *z = zonefs_inode_zone(inode); in zonefs_file_write_iter() local
583 return -EPERM; in zonefs_file_write_iter()
585 if (sb_rdonly(inode->i_sb)) in zonefs_file_write_iter()
586 return -EROFS; in zonefs_file_write_iter()
589 if (iocb->ki_pos >= z->z_capacity) in zonefs_file_write_iter()
590 return -EFBIG; in zonefs_file_write_iter()
592 if (iocb->ki_flags & IOCB_DIRECT) { in zonefs_file_write_iter()
595 if (ret != -ENOTBLK) in zonefs_file_write_iter()
606 zonefs_io_error(file_inode(iocb->ki_filp), false); in zonefs_file_read_dio_end_io()
619 struct inode *inode = file_inode(iocb->ki_filp); in zonefs_file_read_iter()
621 struct zonefs_zone *z = zonefs_inode_zone(inode); in zonefs_file_read_iter() local
622 struct super_block *sb = inode->i_sb; in zonefs_file_read_iter()
627 if (unlikely(IS_IMMUTABLE(inode) && !(inode->i_mode & 0777))) in zonefs_file_read_iter()
628 return -EPERM; in zonefs_file_read_iter()
630 if (iocb->ki_pos >= z->z_capacity) in zonefs_file_read_iter()
633 if (iocb->ki_flags & IOCB_NOWAIT) { in zonefs_file_read_iter()
635 return -EAGAIN; in zonefs_file_read_iter()
641 mutex_lock(&zi->i_truncate_mutex); in zonefs_file_read_iter()
643 if (iocb->ki_pos >= isize) { in zonefs_file_read_iter()
644 mutex_unlock(&zi->i_truncate_mutex); in zonefs_file_read_iter()
648 iov_iter_truncate(to, isize - iocb->ki_pos); in zonefs_file_read_iter()
649 mutex_unlock(&zi->i_truncate_mutex); in zonefs_file_read_iter()
651 if (iocb->ki_flags & IOCB_DIRECT) { in zonefs_file_read_iter()
654 if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) { in zonefs_file_read_iter()
655 ret = -EINVAL; in zonefs_file_read_iter()
658 file_accessed(iocb->ki_filp); in zonefs_file_read_iter()
663 if (ret == -EIO) in zonefs_file_read_iter()
679 struct zonefs_zone *z = zonefs_inode_zone(inode); in zonefs_file_splice_read() local
684 if (unlikely(IS_IMMUTABLE(inode) && !(inode->i_mode & 0777))) in zonefs_file_splice_read()
685 return -EPERM; in zonefs_file_splice_read()
687 if (*ppos >= z->z_capacity) in zonefs_file_splice_read()
693 mutex_lock(&zi->i_truncate_mutex); in zonefs_file_splice_read()
698 len = min_t(loff_t, len, isize - *ppos); in zonefs_file_splice_read()
699 mutex_unlock(&zi->i_truncate_mutex); in zonefs_file_splice_read()
703 if (ret == -EIO) in zonefs_file_splice_read()
720 if (!(file->f_mode & FMODE_WRITE)) in zonefs_seq_file_need_wro()
729 struct zonefs_zone *z = zonefs_inode_zone(inode); in zonefs_seq_file_write_open() local
732 mutex_lock(&zi->i_truncate_mutex); in zonefs_seq_file_write_open()
734 if (!zi->i_wr_refcnt) { in zonefs_seq_file_write_open()
735 struct zonefs_sb_info *sbi = ZONEFS_SB(inode->i_sb); in zonefs_seq_file_write_open()
736 unsigned int wro = atomic_inc_return(&sbi->s_wro_seq_files); in zonefs_seq_file_write_open()
738 if (sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) { in zonefs_seq_file_write_open()
740 if (sbi->s_max_wro_seq_files in zonefs_seq_file_write_open()
741 && wro > sbi->s_max_wro_seq_files) { in zonefs_seq_file_write_open()
742 atomic_dec(&sbi->s_wro_seq_files); in zonefs_seq_file_write_open()
743 ret = -EBUSY; in zonefs_seq_file_write_open()
747 if (i_size_read(inode) < z->z_capacity) { in zonefs_seq_file_write_open()
751 atomic_dec(&sbi->s_wro_seq_files); in zonefs_seq_file_write_open()
754 z->z_flags |= ZONEFS_ZONE_OPEN; in zonefs_seq_file_write_open()
760 zi->i_wr_refcnt++; in zonefs_seq_file_write_open()
763 mutex_unlock(&zi->i_truncate_mutex); in zonefs_seq_file_write_open()
772 file->f_mode |= FMODE_CAN_ODIRECT; in zonefs_file_open()
786 struct zonefs_zone *z = zonefs_inode_zone(inode); in zonefs_seq_file_write_close() local
787 struct super_block *sb = inode->i_sb; in zonefs_seq_file_write_close()
791 mutex_lock(&zi->i_truncate_mutex); in zonefs_seq_file_write_close()
793 zi->i_wr_refcnt--; in zonefs_seq_file_write_close()
794 if (zi->i_wr_refcnt) in zonefs_seq_file_write_close()
802 if (z->z_flags & ZONEFS_ZONE_OPEN) { in zonefs_seq_file_write_close()
807 * Leaving zones explicitly open may lead to a state in zonefs_seq_file_write_close()
810 * read-only. in zonefs_seq_file_write_close()
812 if (z->z_flags & ZONEFS_ZONE_OPEN && in zonefs_seq_file_write_close()
813 !(sb->s_flags & SB_RDONLY)) { in zonefs_seq_file_write_close()
816 z->z_sector, ret); in zonefs_seq_file_write_close()
818 "remounting filesystem read-only\n"); in zonefs_seq_file_write_close()
819 sb->s_flags |= SB_RDONLY; in zonefs_seq_file_write_close()
824 z->z_flags &= ~ZONEFS_ZONE_OPEN; in zonefs_seq_file_write_close()
828 atomic_dec(&sbi->s_wro_seq_files); in zonefs_seq_file_write_close()
831 mutex_unlock(&zi->i_truncate_mutex); in zonefs_seq_file_write_close()
837 * If we explicitly open a zone we must close it again as well, but the in zonefs_file_release()
839 * the zone has gone offline or read-only). Make sure we don't fail the in zonefs_file_release()
840 * close(2) for user-space. in zonefs_file_release()