xref: /openbmc/linux/fs/zonefs/super.c (revision 78bb17f7)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Simple file system for zoned block devices exposing zones as files.
4  *
5  * Copyright (C) 2019 Western Digital Corporation or its affiliates.
6  */
7 #include <linux/module.h>
8 #include <linux/fs.h>
9 #include <linux/magic.h>
10 #include <linux/iomap.h>
11 #include <linux/init.h>
12 #include <linux/slab.h>
13 #include <linux/blkdev.h>
14 #include <linux/statfs.h>
15 #include <linux/writeback.h>
16 #include <linux/quotaops.h>
17 #include <linux/seq_file.h>
18 #include <linux/parser.h>
19 #include <linux/uio.h>
20 #include <linux/mman.h>
21 #include <linux/sched/mm.h>
22 #include <linux/crc32.h>
23 
24 #include "zonefs.h"
25 
26 static int zonefs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
27 			      unsigned int flags, struct iomap *iomap,
28 			      struct iomap *srcmap)
29 {
30 	struct zonefs_inode_info *zi = ZONEFS_I(inode);
31 	struct super_block *sb = inode->i_sb;
32 	loff_t isize;
33 
34 	/* All I/Os should always be within the file maximum size */
35 	if (WARN_ON_ONCE(offset + length > zi->i_max_size))
36 		return -EIO;
37 
38 	/*
39 	 * Sequential zones can only accept direct writes. This is already
40 	 * checked when writes are issued, so warn if we see a page writeback
41 	 * operation.
42 	 */
43 	if (WARN_ON_ONCE(zi->i_ztype == ZONEFS_ZTYPE_SEQ &&
44 			 (flags & IOMAP_WRITE) && !(flags & IOMAP_DIRECT)))
45 		return -EIO;
46 
47 	/*
48 	 * For conventional zones, all blocks are always mapped. For sequential
49 	 * zones, all blocks after always mapped below the inode size (zone
50 	 * write pointer) and unwriten beyond.
51 	 */
52 	mutex_lock(&zi->i_truncate_mutex);
53 	isize = i_size_read(inode);
54 	if (offset >= isize)
55 		iomap->type = IOMAP_UNWRITTEN;
56 	else
57 		iomap->type = IOMAP_MAPPED;
58 	if (flags & IOMAP_WRITE)
59 		length = zi->i_max_size - offset;
60 	else
61 		length = min(length, isize - offset);
62 	mutex_unlock(&zi->i_truncate_mutex);
63 
64 	iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize);
65 	iomap->length = ALIGN(offset + length, sb->s_blocksize) - iomap->offset;
66 	iomap->bdev = inode->i_sb->s_bdev;
67 	iomap->addr = (zi->i_zsector << SECTOR_SHIFT) + iomap->offset;
68 
69 	return 0;
70 }
71 
72 static const struct iomap_ops zonefs_iomap_ops = {
73 	.iomap_begin	= zonefs_iomap_begin,
74 };
75 
76 static int zonefs_readpage(struct file *unused, struct page *page)
77 {
78 	return iomap_readpage(page, &zonefs_iomap_ops);
79 }
80 
81 static void zonefs_readahead(struct readahead_control *rac)
82 {
83 	iomap_readahead(rac, &zonefs_iomap_ops);
84 }
85 
86 /*
87  * Map blocks for page writeback. This is used only on conventional zone files,
88  * which implies that the page range can only be within the fixed inode size.
89  */
90 static int zonefs_map_blocks(struct iomap_writepage_ctx *wpc,
91 			     struct inode *inode, loff_t offset)
92 {
93 	struct zonefs_inode_info *zi = ZONEFS_I(inode);
94 
95 	if (WARN_ON_ONCE(zi->i_ztype != ZONEFS_ZTYPE_CNV))
96 		return -EIO;
97 	if (WARN_ON_ONCE(offset >= i_size_read(inode)))
98 		return -EIO;
99 
100 	/* If the mapping is already OK, nothing needs to be done */
101 	if (offset >= wpc->iomap.offset &&
102 	    offset < wpc->iomap.offset + wpc->iomap.length)
103 		return 0;
104 
105 	return zonefs_iomap_begin(inode, offset, zi->i_max_size - offset,
106 				  IOMAP_WRITE, &wpc->iomap, NULL);
107 }
108 
109 static const struct iomap_writeback_ops zonefs_writeback_ops = {
110 	.map_blocks		= zonefs_map_blocks,
111 };
112 
113 static int zonefs_writepage(struct page *page, struct writeback_control *wbc)
114 {
115 	struct iomap_writepage_ctx wpc = { };
116 
117 	return iomap_writepage(page, wbc, &wpc, &zonefs_writeback_ops);
118 }
119 
120 static int zonefs_writepages(struct address_space *mapping,
121 			     struct writeback_control *wbc)
122 {
123 	struct iomap_writepage_ctx wpc = { };
124 
125 	return iomap_writepages(mapping, wbc, &wpc, &zonefs_writeback_ops);
126 }
127 
128 static const struct address_space_operations zonefs_file_aops = {
129 	.readpage		= zonefs_readpage,
130 	.readahead		= zonefs_readahead,
131 	.writepage		= zonefs_writepage,
132 	.writepages		= zonefs_writepages,
133 	.set_page_dirty		= iomap_set_page_dirty,
134 	.releasepage		= iomap_releasepage,
135 	.invalidatepage		= iomap_invalidatepage,
136 	.migratepage		= iomap_migrate_page,
137 	.is_partially_uptodate	= iomap_is_partially_uptodate,
138 	.error_remove_page	= generic_error_remove_page,
139 	.direct_IO		= noop_direct_IO,
140 };
141 
142 static void zonefs_update_stats(struct inode *inode, loff_t new_isize)
143 {
144 	struct super_block *sb = inode->i_sb;
145 	struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
146 	loff_t old_isize = i_size_read(inode);
147 	loff_t nr_blocks;
148 
149 	if (new_isize == old_isize)
150 		return;
151 
152 	spin_lock(&sbi->s_lock);
153 
154 	/*
155 	 * This may be called for an update after an IO error.
156 	 * So beware of the values seen.
157 	 */
158 	if (new_isize < old_isize) {
159 		nr_blocks = (old_isize - new_isize) >> sb->s_blocksize_bits;
160 		if (sbi->s_used_blocks > nr_blocks)
161 			sbi->s_used_blocks -= nr_blocks;
162 		else
163 			sbi->s_used_blocks = 0;
164 	} else {
165 		sbi->s_used_blocks +=
166 			(new_isize - old_isize) >> sb->s_blocksize_bits;
167 		if (sbi->s_used_blocks > sbi->s_blocks)
168 			sbi->s_used_blocks = sbi->s_blocks;
169 	}
170 
171 	spin_unlock(&sbi->s_lock);
172 }
173 
174 /*
175  * Check a zone condition and adjust its file inode access permissions for
176  * offline and readonly zones. Return the inode size corresponding to the
177  * amount of readable data in the zone.
178  */
179 static loff_t zonefs_check_zone_condition(struct inode *inode,
180 					  struct blk_zone *zone, bool warn,
181 					  bool mount)
182 {
183 	struct zonefs_inode_info *zi = ZONEFS_I(inode);
184 
185 	switch (zone->cond) {
186 	case BLK_ZONE_COND_OFFLINE:
187 		/*
188 		 * Dead zone: make the inode immutable, disable all accesses
189 		 * and set the file size to 0 (zone wp set to zone start).
190 		 */
191 		if (warn)
192 			zonefs_warn(inode->i_sb, "inode %lu: offline zone\n",
193 				    inode->i_ino);
194 		inode->i_flags |= S_IMMUTABLE;
195 		inode->i_mode &= ~0777;
196 		zone->wp = zone->start;
197 		return 0;
198 	case BLK_ZONE_COND_READONLY:
199 		/*
200 		 * The write pointer of read-only zones is invalid. If such a
201 		 * zone is found during mount, the file size cannot be retrieved
202 		 * so we treat the zone as offline (mount == true case).
203 		 * Otherwise, keep the file size as it was when last updated
204 		 * so that the user can recover data. In both cases, writes are
205 		 * always disabled for the zone.
206 		 */
207 		if (warn)
208 			zonefs_warn(inode->i_sb, "inode %lu: read-only zone\n",
209 				    inode->i_ino);
210 		inode->i_flags |= S_IMMUTABLE;
211 		if (mount) {
212 			zone->cond = BLK_ZONE_COND_OFFLINE;
213 			inode->i_mode &= ~0777;
214 			zone->wp = zone->start;
215 			return 0;
216 		}
217 		inode->i_mode &= ~0222;
218 		return i_size_read(inode);
219 	default:
220 		if (zi->i_ztype == ZONEFS_ZTYPE_CNV)
221 			return zi->i_max_size;
222 		return (zone->wp - zone->start) << SECTOR_SHIFT;
223 	}
224 }
225 
226 struct zonefs_ioerr_data {
227 	struct inode	*inode;
228 	bool		write;
229 };
230 
231 static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
232 			      void *data)
233 {
234 	struct zonefs_ioerr_data *err = data;
235 	struct inode *inode = err->inode;
236 	struct zonefs_inode_info *zi = ZONEFS_I(inode);
237 	struct super_block *sb = inode->i_sb;
238 	struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
239 	loff_t isize, data_size;
240 
241 	/*
242 	 * Check the zone condition: if the zone is not "bad" (offline or
243 	 * read-only), read errors are simply signaled to the IO issuer as long
244 	 * as there is no inconsistency between the inode size and the amount of
245 	 * data writen in the zone (data_size).
246 	 */
247 	data_size = zonefs_check_zone_condition(inode, zone, true, false);
248 	isize = i_size_read(inode);
249 	if (zone->cond != BLK_ZONE_COND_OFFLINE &&
250 	    zone->cond != BLK_ZONE_COND_READONLY &&
251 	    !err->write && isize == data_size)
252 		return 0;
253 
254 	/*
255 	 * At this point, we detected either a bad zone or an inconsistency
256 	 * between the inode size and the amount of data written in the zone.
257 	 * For the latter case, the cause may be a write IO error or an external
258 	 * action on the device. Two error patterns exist:
259 	 * 1) The inode size is lower than the amount of data in the zone:
260 	 *    a write operation partially failed and data was writen at the end
261 	 *    of the file. This can happen in the case of a large direct IO
262 	 *    needing several BIOs and/or write requests to be processed.
263 	 * 2) The inode size is larger than the amount of data in the zone:
264 	 *    this can happen with a deferred write error with the use of the
265 	 *    device side write cache after getting successful write IO
266 	 *    completions. Other possibilities are (a) an external corruption,
267 	 *    e.g. an application reset the zone directly, or (b) the device
268 	 *    has a serious problem (e.g. firmware bug).
269 	 *
270 	 * In all cases, warn about inode size inconsistency and handle the
271 	 * IO error according to the zone condition and to the mount options.
272 	 */
273 	if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && isize != data_size)
274 		zonefs_warn(sb, "inode %lu: invalid size %lld (should be %lld)\n",
275 			    inode->i_ino, isize, data_size);
276 
277 	/*
278 	 * First handle bad zones signaled by hardware. The mount options
279 	 * errors=zone-ro and errors=zone-offline result in changing the
280 	 * zone condition to read-only and offline respectively, as if the
281 	 * condition was signaled by the hardware.
282 	 */
283 	if (zone->cond == BLK_ZONE_COND_OFFLINE ||
284 	    sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZOL) {
285 		zonefs_warn(sb, "inode %lu: read/write access disabled\n",
286 			    inode->i_ino);
287 		if (zone->cond != BLK_ZONE_COND_OFFLINE) {
288 			zone->cond = BLK_ZONE_COND_OFFLINE;
289 			data_size = zonefs_check_zone_condition(inode, zone,
290 								false, false);
291 		}
292 	} else if (zone->cond == BLK_ZONE_COND_READONLY ||
293 		   sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZRO) {
294 		zonefs_warn(sb, "inode %lu: write access disabled\n",
295 			    inode->i_ino);
296 		if (zone->cond != BLK_ZONE_COND_READONLY) {
297 			zone->cond = BLK_ZONE_COND_READONLY;
298 			data_size = zonefs_check_zone_condition(inode, zone,
299 								false, false);
300 		}
301 	}
302 
303 	/*
304 	 * If error=remount-ro was specified, any error result in remounting
305 	 * the volume as read-only.
306 	 */
307 	if ((sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_RO) && !sb_rdonly(sb)) {
308 		zonefs_warn(sb, "remounting filesystem read-only\n");
309 		sb->s_flags |= SB_RDONLY;
310 	}
311 
312 	/*
313 	 * Update block usage stats and the inode size  to prevent access to
314 	 * invalid data.
315 	 */
316 	zonefs_update_stats(inode, data_size);
317 	i_size_write(inode, data_size);
318 	zi->i_wpoffset = data_size;
319 
320 	return 0;
321 }
322 
323 /*
324  * When an file IO error occurs, check the file zone to see if there is a change
325  * in the zone condition (e.g. offline or read-only). For a failed write to a
326  * sequential zone, the zone write pointer position must also be checked to
327  * eventually correct the file size and zonefs inode write pointer offset
328  * (which can be out of sync with the drive due to partial write failures).
329  */
330 static void zonefs_io_error(struct inode *inode, bool write)
331 {
332 	struct zonefs_inode_info *zi = ZONEFS_I(inode);
333 	struct super_block *sb = inode->i_sb;
334 	struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
335 	unsigned int noio_flag;
336 	unsigned int nr_zones =
337 		zi->i_max_size >> (sbi->s_zone_sectors_shift + SECTOR_SHIFT);
338 	struct zonefs_ioerr_data err = {
339 		.inode = inode,
340 		.write = write,
341 	};
342 	int ret;
343 
344 	mutex_lock(&zi->i_truncate_mutex);
345 
346 	/*
347 	 * Memory allocations in blkdev_report_zones() can trigger a memory
348 	 * reclaim which may in turn cause a recursion into zonefs as well as
349 	 * struct request allocations for the same device. The former case may
350 	 * end up in a deadlock on the inode truncate mutex, while the latter
351 	 * may prevent IO forward progress. Executing the report zones under
352 	 * the GFP_NOIO context avoids both problems.
353 	 */
354 	noio_flag = memalloc_noio_save();
355 	ret = blkdev_report_zones(sb->s_bdev, zi->i_zsector, nr_zones,
356 				  zonefs_io_error_cb, &err);
357 	if (ret != nr_zones)
358 		zonefs_err(sb, "Get inode %lu zone information failed %d\n",
359 			   inode->i_ino, ret);
360 	memalloc_noio_restore(noio_flag);
361 
362 	mutex_unlock(&zi->i_truncate_mutex);
363 }
364 
365 static int zonefs_file_truncate(struct inode *inode, loff_t isize)
366 {
367 	struct zonefs_inode_info *zi = ZONEFS_I(inode);
368 	loff_t old_isize;
369 	enum req_opf op;
370 	int ret = 0;
371 
372 	/*
373 	 * Only sequential zone files can be truncated and truncation is allowed
374 	 * only down to a 0 size, which is equivalent to a zone reset, and to
375 	 * the maximum file size, which is equivalent to a zone finish.
376 	 */
377 	if (zi->i_ztype != ZONEFS_ZTYPE_SEQ)
378 		return -EPERM;
379 
380 	if (!isize)
381 		op = REQ_OP_ZONE_RESET;
382 	else if (isize == zi->i_max_size)
383 		op = REQ_OP_ZONE_FINISH;
384 	else
385 		return -EPERM;
386 
387 	inode_dio_wait(inode);
388 
389 	/* Serialize against page faults */
390 	down_write(&zi->i_mmap_sem);
391 
392 	/* Serialize against zonefs_iomap_begin() */
393 	mutex_lock(&zi->i_truncate_mutex);
394 
395 	old_isize = i_size_read(inode);
396 	if (isize == old_isize)
397 		goto unlock;
398 
399 	ret = blkdev_zone_mgmt(inode->i_sb->s_bdev, op, zi->i_zsector,
400 			       zi->i_max_size >> SECTOR_SHIFT, GFP_NOFS);
401 	if (ret) {
402 		zonefs_err(inode->i_sb,
403 			   "Zone management operation at %llu failed %d",
404 			   zi->i_zsector, ret);
405 		goto unlock;
406 	}
407 
408 	zonefs_update_stats(inode, isize);
409 	truncate_setsize(inode, isize);
410 	zi->i_wpoffset = isize;
411 
412 unlock:
413 	mutex_unlock(&zi->i_truncate_mutex);
414 	up_write(&zi->i_mmap_sem);
415 
416 	return ret;
417 }
418 
419 static int zonefs_inode_setattr(struct dentry *dentry, struct iattr *iattr)
420 {
421 	struct inode *inode = d_inode(dentry);
422 	int ret;
423 
424 	if (unlikely(IS_IMMUTABLE(inode)))
425 		return -EPERM;
426 
427 	ret = setattr_prepare(dentry, iattr);
428 	if (ret)
429 		return ret;
430 
431 	/*
432 	 * Since files and directories cannot be created nor deleted, do not
433 	 * allow setting any write attributes on the sub-directories grouping
434 	 * files by zone type.
435 	 */
436 	if ((iattr->ia_valid & ATTR_MODE) && S_ISDIR(inode->i_mode) &&
437 	    (iattr->ia_mode & 0222))
438 		return -EPERM;
439 
440 	if (((iattr->ia_valid & ATTR_UID) &&
441 	     !uid_eq(iattr->ia_uid, inode->i_uid)) ||
442 	    ((iattr->ia_valid & ATTR_GID) &&
443 	     !gid_eq(iattr->ia_gid, inode->i_gid))) {
444 		ret = dquot_transfer(inode, iattr);
445 		if (ret)
446 			return ret;
447 	}
448 
449 	if (iattr->ia_valid & ATTR_SIZE) {
450 		ret = zonefs_file_truncate(inode, iattr->ia_size);
451 		if (ret)
452 			return ret;
453 	}
454 
455 	setattr_copy(inode, iattr);
456 
457 	return 0;
458 }
459 
460 static const struct inode_operations zonefs_file_inode_operations = {
461 	.setattr	= zonefs_inode_setattr,
462 };
463 
464 static int zonefs_file_fsync(struct file *file, loff_t start, loff_t end,
465 			     int datasync)
466 {
467 	struct inode *inode = file_inode(file);
468 	int ret = 0;
469 
470 	if (unlikely(IS_IMMUTABLE(inode)))
471 		return -EPERM;
472 
473 	/*
474 	 * Since only direct writes are allowed in sequential files, page cache
475 	 * flush is needed only for conventional zone files.
476 	 */
477 	if (ZONEFS_I(inode)->i_ztype == ZONEFS_ZTYPE_CNV)
478 		ret = file_write_and_wait_range(file, start, end);
479 	if (!ret)
480 		ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
481 
482 	if (ret)
483 		zonefs_io_error(inode, true);
484 
485 	return ret;
486 }
487 
488 static vm_fault_t zonefs_filemap_fault(struct vm_fault *vmf)
489 {
490 	struct zonefs_inode_info *zi = ZONEFS_I(file_inode(vmf->vma->vm_file));
491 	vm_fault_t ret;
492 
493 	down_read(&zi->i_mmap_sem);
494 	ret = filemap_fault(vmf);
495 	up_read(&zi->i_mmap_sem);
496 
497 	return ret;
498 }
499 
500 static vm_fault_t zonefs_filemap_page_mkwrite(struct vm_fault *vmf)
501 {
502 	struct inode *inode = file_inode(vmf->vma->vm_file);
503 	struct zonefs_inode_info *zi = ZONEFS_I(inode);
504 	vm_fault_t ret;
505 
506 	if (unlikely(IS_IMMUTABLE(inode)))
507 		return VM_FAULT_SIGBUS;
508 
509 	/*
510 	 * Sanity check: only conventional zone files can have shared
511 	 * writeable mappings.
512 	 */
513 	if (WARN_ON_ONCE(zi->i_ztype != ZONEFS_ZTYPE_CNV))
514 		return VM_FAULT_NOPAGE;
515 
516 	sb_start_pagefault(inode->i_sb);
517 	file_update_time(vmf->vma->vm_file);
518 
519 	/* Serialize against truncates */
520 	down_read(&zi->i_mmap_sem);
521 	ret = iomap_page_mkwrite(vmf, &zonefs_iomap_ops);
522 	up_read(&zi->i_mmap_sem);
523 
524 	sb_end_pagefault(inode->i_sb);
525 	return ret;
526 }
527 
528 static const struct vm_operations_struct zonefs_file_vm_ops = {
529 	.fault		= zonefs_filemap_fault,
530 	.map_pages	= filemap_map_pages,
531 	.page_mkwrite	= zonefs_filemap_page_mkwrite,
532 };
533 
534 static int zonefs_file_mmap(struct file *file, struct vm_area_struct *vma)
535 {
536 	/*
537 	 * Conventional zones accept random writes, so their files can support
538 	 * shared writable mappings. For sequential zone files, only read
539 	 * mappings are possible since there are no guarantees for write
540 	 * ordering between msync() and page cache writeback.
541 	 */
542 	if (ZONEFS_I(file_inode(file))->i_ztype == ZONEFS_ZTYPE_SEQ &&
543 	    (vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
544 		return -EINVAL;
545 
546 	file_accessed(file);
547 	vma->vm_ops = &zonefs_file_vm_ops;
548 
549 	return 0;
550 }
551 
552 static loff_t zonefs_file_llseek(struct file *file, loff_t offset, int whence)
553 {
554 	loff_t isize = i_size_read(file_inode(file));
555 
556 	/*
557 	 * Seeks are limited to below the zone size for conventional zones
558 	 * and below the zone write pointer for sequential zones. In both
559 	 * cases, this limit is the inode size.
560 	 */
561 	return generic_file_llseek_size(file, offset, whence, isize, isize);
562 }
563 
564 static int zonefs_file_write_dio_end_io(struct kiocb *iocb, ssize_t size,
565 					int error, unsigned int flags)
566 {
567 	struct inode *inode = file_inode(iocb->ki_filp);
568 	struct zonefs_inode_info *zi = ZONEFS_I(inode);
569 
570 	if (error) {
571 		zonefs_io_error(inode, true);
572 		return error;
573 	}
574 
575 	if (size && zi->i_ztype != ZONEFS_ZTYPE_CNV) {
576 		/*
577 		 * Note that we may be seeing completions out of order,
578 		 * but that is not a problem since a write completed
579 		 * successfully necessarily means that all preceding writes
580 		 * were also successful. So we can safely increase the inode
581 		 * size to the write end location.
582 		 */
583 		mutex_lock(&zi->i_truncate_mutex);
584 		if (i_size_read(inode) < iocb->ki_pos + size) {
585 			zonefs_update_stats(inode, iocb->ki_pos + size);
586 			i_size_write(inode, iocb->ki_pos + size);
587 		}
588 		mutex_unlock(&zi->i_truncate_mutex);
589 	}
590 
591 	return 0;
592 }
593 
594 static const struct iomap_dio_ops zonefs_write_dio_ops = {
595 	.end_io			= zonefs_file_write_dio_end_io,
596 };
597 
598 /*
599  * Handle direct writes. For sequential zone files, this is the only possible
600  * write path. For these files, check that the user is issuing writes
601  * sequentially from the end of the file. This code assumes that the block layer
602  * delivers write requests to the device in sequential order. This is always the
603  * case if a block IO scheduler implementing the ELEVATOR_F_ZBD_SEQ_WRITE
604  * elevator feature is being used (e.g. mq-deadline). The block layer always
605  * automatically select such an elevator for zoned block devices during the
606  * device initialization.
607  */
608 static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
609 {
610 	struct inode *inode = file_inode(iocb->ki_filp);
611 	struct zonefs_inode_info *zi = ZONEFS_I(inode);
612 	struct super_block *sb = inode->i_sb;
613 	size_t count;
614 	ssize_t ret;
615 
616 	/*
617 	 * For async direct IOs to sequential zone files, refuse IOCB_NOWAIT
618 	 * as this can cause write reordering (e.g. the first aio gets EAGAIN
619 	 * on the inode lock but the second goes through but is now unaligned).
620 	 */
621 	if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && !is_sync_kiocb(iocb) &&
622 	    (iocb->ki_flags & IOCB_NOWAIT))
623 		return -EOPNOTSUPP;
624 
625 	if (iocb->ki_flags & IOCB_NOWAIT) {
626 		if (!inode_trylock(inode))
627 			return -EAGAIN;
628 	} else {
629 		inode_lock(inode);
630 	}
631 
632 	ret = generic_write_checks(iocb, from);
633 	if (ret <= 0)
634 		goto inode_unlock;
635 
636 	iov_iter_truncate(from, zi->i_max_size - iocb->ki_pos);
637 	count = iov_iter_count(from);
638 
639 	if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) {
640 		ret = -EINVAL;
641 		goto inode_unlock;
642 	}
643 
644 	/* Enforce sequential writes (append only) in sequential zones */
645 	mutex_lock(&zi->i_truncate_mutex);
646 	if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && iocb->ki_pos != zi->i_wpoffset) {
647 		mutex_unlock(&zi->i_truncate_mutex);
648 		ret = -EINVAL;
649 		goto inode_unlock;
650 	}
651 	mutex_unlock(&zi->i_truncate_mutex);
652 
653 	ret = iomap_dio_rw(iocb, from, &zonefs_iomap_ops,
654 			   &zonefs_write_dio_ops, is_sync_kiocb(iocb));
655 	if (zi->i_ztype == ZONEFS_ZTYPE_SEQ &&
656 	    (ret > 0 || ret == -EIOCBQUEUED)) {
657 		if (ret > 0)
658 			count = ret;
659 		mutex_lock(&zi->i_truncate_mutex);
660 		zi->i_wpoffset += count;
661 		mutex_unlock(&zi->i_truncate_mutex);
662 	}
663 
664 inode_unlock:
665 	inode_unlock(inode);
666 
667 	return ret;
668 }
669 
670 static ssize_t zonefs_file_buffered_write(struct kiocb *iocb,
671 					  struct iov_iter *from)
672 {
673 	struct inode *inode = file_inode(iocb->ki_filp);
674 	struct zonefs_inode_info *zi = ZONEFS_I(inode);
675 	ssize_t ret;
676 
677 	/*
678 	 * Direct IO writes are mandatory for sequential zone files so that the
679 	 * write IO issuing order is preserved.
680 	 */
681 	if (zi->i_ztype != ZONEFS_ZTYPE_CNV)
682 		return -EIO;
683 
684 	if (iocb->ki_flags & IOCB_NOWAIT) {
685 		if (!inode_trylock(inode))
686 			return -EAGAIN;
687 	} else {
688 		inode_lock(inode);
689 	}
690 
691 	ret = generic_write_checks(iocb, from);
692 	if (ret <= 0)
693 		goto inode_unlock;
694 
695 	iov_iter_truncate(from, zi->i_max_size - iocb->ki_pos);
696 
697 	ret = iomap_file_buffered_write(iocb, from, &zonefs_iomap_ops);
698 	if (ret > 0)
699 		iocb->ki_pos += ret;
700 	else if (ret == -EIO)
701 		zonefs_io_error(inode, true);
702 
703 inode_unlock:
704 	inode_unlock(inode);
705 	if (ret > 0)
706 		ret = generic_write_sync(iocb, ret);
707 
708 	return ret;
709 }
710 
711 static ssize_t zonefs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
712 {
713 	struct inode *inode = file_inode(iocb->ki_filp);
714 
715 	if (unlikely(IS_IMMUTABLE(inode)))
716 		return -EPERM;
717 
718 	if (sb_rdonly(inode->i_sb))
719 		return -EROFS;
720 
721 	/* Write operations beyond the zone size are not allowed */
722 	if (iocb->ki_pos >= ZONEFS_I(inode)->i_max_size)
723 		return -EFBIG;
724 
725 	if (iocb->ki_flags & IOCB_DIRECT)
726 		return zonefs_file_dio_write(iocb, from);
727 
728 	return zonefs_file_buffered_write(iocb, from);
729 }
730 
731 static int zonefs_file_read_dio_end_io(struct kiocb *iocb, ssize_t size,
732 				       int error, unsigned int flags)
733 {
734 	if (error) {
735 		zonefs_io_error(file_inode(iocb->ki_filp), false);
736 		return error;
737 	}
738 
739 	return 0;
740 }
741 
742 static const struct iomap_dio_ops zonefs_read_dio_ops = {
743 	.end_io			= zonefs_file_read_dio_end_io,
744 };
745 
746 static ssize_t zonefs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
747 {
748 	struct inode *inode = file_inode(iocb->ki_filp);
749 	struct zonefs_inode_info *zi = ZONEFS_I(inode);
750 	struct super_block *sb = inode->i_sb;
751 	loff_t isize;
752 	ssize_t ret;
753 
754 	/* Offline zones cannot be read */
755 	if (unlikely(IS_IMMUTABLE(inode) && !(inode->i_mode & 0777)))
756 		return -EPERM;
757 
758 	if (iocb->ki_pos >= zi->i_max_size)
759 		return 0;
760 
761 	if (iocb->ki_flags & IOCB_NOWAIT) {
762 		if (!inode_trylock_shared(inode))
763 			return -EAGAIN;
764 	} else {
765 		inode_lock_shared(inode);
766 	}
767 
768 	/* Limit read operations to written data */
769 	mutex_lock(&zi->i_truncate_mutex);
770 	isize = i_size_read(inode);
771 	if (iocb->ki_pos >= isize) {
772 		mutex_unlock(&zi->i_truncate_mutex);
773 		ret = 0;
774 		goto inode_unlock;
775 	}
776 	iov_iter_truncate(to, isize - iocb->ki_pos);
777 	mutex_unlock(&zi->i_truncate_mutex);
778 
779 	if (iocb->ki_flags & IOCB_DIRECT) {
780 		size_t count = iov_iter_count(to);
781 
782 		if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) {
783 			ret = -EINVAL;
784 			goto inode_unlock;
785 		}
786 		file_accessed(iocb->ki_filp);
787 		ret = iomap_dio_rw(iocb, to, &zonefs_iomap_ops,
788 				   &zonefs_read_dio_ops, is_sync_kiocb(iocb));
789 	} else {
790 		ret = generic_file_read_iter(iocb, to);
791 		if (ret == -EIO)
792 			zonefs_io_error(inode, false);
793 	}
794 
795 inode_unlock:
796 	inode_unlock_shared(inode);
797 
798 	return ret;
799 }
800 
801 static const struct file_operations zonefs_file_operations = {
802 	.open		= generic_file_open,
803 	.fsync		= zonefs_file_fsync,
804 	.mmap		= zonefs_file_mmap,
805 	.llseek		= zonefs_file_llseek,
806 	.read_iter	= zonefs_file_read_iter,
807 	.write_iter	= zonefs_file_write_iter,
808 	.splice_read	= generic_file_splice_read,
809 	.splice_write	= iter_file_splice_write,
810 	.iopoll		= iomap_dio_iopoll,
811 };
812 
813 static struct kmem_cache *zonefs_inode_cachep;
814 
815 static struct inode *zonefs_alloc_inode(struct super_block *sb)
816 {
817 	struct zonefs_inode_info *zi;
818 
819 	zi = kmem_cache_alloc(zonefs_inode_cachep, GFP_KERNEL);
820 	if (!zi)
821 		return NULL;
822 
823 	inode_init_once(&zi->i_vnode);
824 	mutex_init(&zi->i_truncate_mutex);
825 	init_rwsem(&zi->i_mmap_sem);
826 
827 	return &zi->i_vnode;
828 }
829 
830 static void zonefs_free_inode(struct inode *inode)
831 {
832 	kmem_cache_free(zonefs_inode_cachep, ZONEFS_I(inode));
833 }
834 
835 /*
836  * File system stat.
837  */
838 static int zonefs_statfs(struct dentry *dentry, struct kstatfs *buf)
839 {
840 	struct super_block *sb = dentry->d_sb;
841 	struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
842 	enum zonefs_ztype t;
843 	u64 fsid;
844 
845 	buf->f_type = ZONEFS_MAGIC;
846 	buf->f_bsize = sb->s_blocksize;
847 	buf->f_namelen = ZONEFS_NAME_MAX;
848 
849 	spin_lock(&sbi->s_lock);
850 
851 	buf->f_blocks = sbi->s_blocks;
852 	if (WARN_ON(sbi->s_used_blocks > sbi->s_blocks))
853 		buf->f_bfree = 0;
854 	else
855 		buf->f_bfree = buf->f_blocks - sbi->s_used_blocks;
856 	buf->f_bavail = buf->f_bfree;
857 
858 	for (t = 0; t < ZONEFS_ZTYPE_MAX; t++) {
859 		if (sbi->s_nr_files[t])
860 			buf->f_files += sbi->s_nr_files[t] + 1;
861 	}
862 	buf->f_ffree = 0;
863 
864 	spin_unlock(&sbi->s_lock);
865 
866 	fsid = le64_to_cpup((void *)sbi->s_uuid.b) ^
867 		le64_to_cpup((void *)sbi->s_uuid.b + sizeof(u64));
868 	buf->f_fsid.val[0] = (u32)fsid;
869 	buf->f_fsid.val[1] = (u32)(fsid >> 32);
870 
871 	return 0;
872 }
873 
874 enum {
875 	Opt_errors_ro, Opt_errors_zro, Opt_errors_zol, Opt_errors_repair,
876 	Opt_err,
877 };
878 
879 static const match_table_t tokens = {
880 	{ Opt_errors_ro,	"errors=remount-ro"},
881 	{ Opt_errors_zro,	"errors=zone-ro"},
882 	{ Opt_errors_zol,	"errors=zone-offline"},
883 	{ Opt_errors_repair,	"errors=repair"},
884 	{ Opt_err,		NULL}
885 };
886 
887 static int zonefs_parse_options(struct super_block *sb, char *options)
888 {
889 	struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
890 	substring_t args[MAX_OPT_ARGS];
891 	char *p;
892 
893 	if (!options)
894 		return 0;
895 
896 	while ((p = strsep(&options, ",")) != NULL) {
897 		int token;
898 
899 		if (!*p)
900 			continue;
901 
902 		token = match_token(p, tokens, args);
903 		switch (token) {
904 		case Opt_errors_ro:
905 			sbi->s_mount_opts &= ~ZONEFS_MNTOPT_ERRORS_MASK;
906 			sbi->s_mount_opts |= ZONEFS_MNTOPT_ERRORS_RO;
907 			break;
908 		case Opt_errors_zro:
909 			sbi->s_mount_opts &= ~ZONEFS_MNTOPT_ERRORS_MASK;
910 			sbi->s_mount_opts |= ZONEFS_MNTOPT_ERRORS_ZRO;
911 			break;
912 		case Opt_errors_zol:
913 			sbi->s_mount_opts &= ~ZONEFS_MNTOPT_ERRORS_MASK;
914 			sbi->s_mount_opts |= ZONEFS_MNTOPT_ERRORS_ZOL;
915 			break;
916 		case Opt_errors_repair:
917 			sbi->s_mount_opts &= ~ZONEFS_MNTOPT_ERRORS_MASK;
918 			sbi->s_mount_opts |= ZONEFS_MNTOPT_ERRORS_REPAIR;
919 			break;
920 		default:
921 			return -EINVAL;
922 		}
923 	}
924 
925 	return 0;
926 }
927 
928 static int zonefs_show_options(struct seq_file *seq, struct dentry *root)
929 {
930 	struct zonefs_sb_info *sbi = ZONEFS_SB(root->d_sb);
931 
932 	if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_RO)
933 		seq_puts(seq, ",errors=remount-ro");
934 	if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZRO)
935 		seq_puts(seq, ",errors=zone-ro");
936 	if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZOL)
937 		seq_puts(seq, ",errors=zone-offline");
938 	if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_REPAIR)
939 		seq_puts(seq, ",errors=repair");
940 
941 	return 0;
942 }
943 
944 static int zonefs_remount(struct super_block *sb, int *flags, char *data)
945 {
946 	sync_filesystem(sb);
947 
948 	return zonefs_parse_options(sb, data);
949 }
950 
951 static const struct super_operations zonefs_sops = {
952 	.alloc_inode	= zonefs_alloc_inode,
953 	.free_inode	= zonefs_free_inode,
954 	.statfs		= zonefs_statfs,
955 	.remount_fs	= zonefs_remount,
956 	.show_options	= zonefs_show_options,
957 };
958 
959 static const struct inode_operations zonefs_dir_inode_operations = {
960 	.lookup		= simple_lookup,
961 	.setattr	= zonefs_inode_setattr,
962 };
963 
964 static void zonefs_init_dir_inode(struct inode *parent, struct inode *inode,
965 				  enum zonefs_ztype type)
966 {
967 	struct super_block *sb = parent->i_sb;
968 
969 	inode->i_ino = blkdev_nr_zones(sb->s_bdev->bd_disk) + type + 1;
970 	inode_init_owner(inode, parent, S_IFDIR | 0555);
971 	inode->i_op = &zonefs_dir_inode_operations;
972 	inode->i_fop = &simple_dir_operations;
973 	set_nlink(inode, 2);
974 	inc_nlink(parent);
975 }
976 
977 static void zonefs_init_file_inode(struct inode *inode, struct blk_zone *zone,
978 				   enum zonefs_ztype type)
979 {
980 	struct super_block *sb = inode->i_sb;
981 	struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
982 	struct zonefs_inode_info *zi = ZONEFS_I(inode);
983 
984 	inode->i_ino = zone->start >> sbi->s_zone_sectors_shift;
985 	inode->i_mode = S_IFREG | sbi->s_perm;
986 
987 	zi->i_ztype = type;
988 	zi->i_zsector = zone->start;
989 	zi->i_max_size = min_t(loff_t, MAX_LFS_FILESIZE,
990 			       zone->len << SECTOR_SHIFT);
991 	zi->i_wpoffset = zonefs_check_zone_condition(inode, zone, true, true);
992 
993 	inode->i_uid = sbi->s_uid;
994 	inode->i_gid = sbi->s_gid;
995 	inode->i_size = zi->i_wpoffset;
996 	inode->i_blocks = zone->len;
997 
998 	inode->i_op = &zonefs_file_inode_operations;
999 	inode->i_fop = &zonefs_file_operations;
1000 	inode->i_mapping->a_ops = &zonefs_file_aops;
1001 
1002 	sb->s_maxbytes = max(zi->i_max_size, sb->s_maxbytes);
1003 	sbi->s_blocks += zi->i_max_size >> sb->s_blocksize_bits;
1004 	sbi->s_used_blocks += zi->i_wpoffset >> sb->s_blocksize_bits;
1005 }
1006 
1007 static struct dentry *zonefs_create_inode(struct dentry *parent,
1008 					const char *name, struct blk_zone *zone,
1009 					enum zonefs_ztype type)
1010 {
1011 	struct inode *dir = d_inode(parent);
1012 	struct dentry *dentry;
1013 	struct inode *inode;
1014 
1015 	dentry = d_alloc_name(parent, name);
1016 	if (!dentry)
1017 		return NULL;
1018 
1019 	inode = new_inode(parent->d_sb);
1020 	if (!inode)
1021 		goto dput;
1022 
1023 	inode->i_ctime = inode->i_mtime = inode->i_atime = dir->i_ctime;
1024 	if (zone)
1025 		zonefs_init_file_inode(inode, zone, type);
1026 	else
1027 		zonefs_init_dir_inode(dir, inode, type);
1028 	d_add(dentry, inode);
1029 	dir->i_size++;
1030 
1031 	return dentry;
1032 
1033 dput:
1034 	dput(dentry);
1035 
1036 	return NULL;
1037 }
1038 
1039 struct zonefs_zone_data {
1040 	struct super_block	*sb;
1041 	unsigned int		nr_zones[ZONEFS_ZTYPE_MAX];
1042 	struct blk_zone		*zones;
1043 };
1044 
1045 /*
1046  * Create a zone group and populate it with zone files.
1047  */
1048 static int zonefs_create_zgroup(struct zonefs_zone_data *zd,
1049 				enum zonefs_ztype type)
1050 {
1051 	struct super_block *sb = zd->sb;
1052 	struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
1053 	struct blk_zone *zone, *next, *end;
1054 	const char *zgroup_name;
1055 	char *file_name;
1056 	struct dentry *dir;
1057 	unsigned int n = 0;
1058 	int ret = -ENOMEM;
1059 
1060 	/* If the group is empty, there is nothing to do */
1061 	if (!zd->nr_zones[type])
1062 		return 0;
1063 
1064 	file_name = kmalloc(ZONEFS_NAME_MAX, GFP_KERNEL);
1065 	if (!file_name)
1066 		return -ENOMEM;
1067 
1068 	if (type == ZONEFS_ZTYPE_CNV)
1069 		zgroup_name = "cnv";
1070 	else
1071 		zgroup_name = "seq";
1072 
1073 	dir = zonefs_create_inode(sb->s_root, zgroup_name, NULL, type);
1074 	if (!dir)
1075 		goto free;
1076 
1077 	/*
1078 	 * The first zone contains the super block: skip it.
1079 	 */
1080 	end = zd->zones + blkdev_nr_zones(sb->s_bdev->bd_disk);
1081 	for (zone = &zd->zones[1]; zone < end; zone = next) {
1082 
1083 		next = zone + 1;
1084 		if (zonefs_zone_type(zone) != type)
1085 			continue;
1086 
1087 		/*
1088 		 * For conventional zones, contiguous zones can be aggregated
1089 		 * together to form larger files. Note that this overwrites the
1090 		 * length of the first zone of the set of contiguous zones
1091 		 * aggregated together. If one offline or read-only zone is
1092 		 * found, assume that all zones aggregated have the same
1093 		 * condition.
1094 		 */
1095 		if (type == ZONEFS_ZTYPE_CNV &&
1096 		    (sbi->s_features & ZONEFS_F_AGGRCNV)) {
1097 			for (; next < end; next++) {
1098 				if (zonefs_zone_type(next) != type)
1099 					break;
1100 				zone->len += next->len;
1101 				if (next->cond == BLK_ZONE_COND_READONLY &&
1102 				    zone->cond != BLK_ZONE_COND_OFFLINE)
1103 					zone->cond = BLK_ZONE_COND_READONLY;
1104 				else if (next->cond == BLK_ZONE_COND_OFFLINE)
1105 					zone->cond = BLK_ZONE_COND_OFFLINE;
1106 			}
1107 		}
1108 
1109 		/*
1110 		 * Use the file number within its group as file name.
1111 		 */
1112 		snprintf(file_name, ZONEFS_NAME_MAX - 1, "%u", n);
1113 		if (!zonefs_create_inode(dir, file_name, zone, type))
1114 			goto free;
1115 
1116 		n++;
1117 	}
1118 
1119 	zonefs_info(sb, "Zone group \"%s\" has %u file%s\n",
1120 		    zgroup_name, n, n > 1 ? "s" : "");
1121 
1122 	sbi->s_nr_files[type] = n;
1123 	ret = 0;
1124 
1125 free:
1126 	kfree(file_name);
1127 
1128 	return ret;
1129 }
1130 
1131 static int zonefs_get_zone_info_cb(struct blk_zone *zone, unsigned int idx,
1132 				   void *data)
1133 {
1134 	struct zonefs_zone_data *zd = data;
1135 
1136 	/*
1137 	 * Count the number of usable zones: the first zone at index 0 contains
1138 	 * the super block and is ignored.
1139 	 */
1140 	switch (zone->type) {
1141 	case BLK_ZONE_TYPE_CONVENTIONAL:
1142 		zone->wp = zone->start + zone->len;
1143 		if (idx)
1144 			zd->nr_zones[ZONEFS_ZTYPE_CNV]++;
1145 		break;
1146 	case BLK_ZONE_TYPE_SEQWRITE_REQ:
1147 	case BLK_ZONE_TYPE_SEQWRITE_PREF:
1148 		if (idx)
1149 			zd->nr_zones[ZONEFS_ZTYPE_SEQ]++;
1150 		break;
1151 	default:
1152 		zonefs_err(zd->sb, "Unsupported zone type 0x%x\n",
1153 			   zone->type);
1154 		return -EIO;
1155 	}
1156 
1157 	memcpy(&zd->zones[idx], zone, sizeof(struct blk_zone));
1158 
1159 	return 0;
1160 }
1161 
1162 static int zonefs_get_zone_info(struct zonefs_zone_data *zd)
1163 {
1164 	struct block_device *bdev = zd->sb->s_bdev;
1165 	int ret;
1166 
1167 	zd->zones = kvcalloc(blkdev_nr_zones(bdev->bd_disk),
1168 			     sizeof(struct blk_zone), GFP_KERNEL);
1169 	if (!zd->zones)
1170 		return -ENOMEM;
1171 
1172 	/* Get zones information from the device */
1173 	ret = blkdev_report_zones(bdev, 0, BLK_ALL_ZONES,
1174 				  zonefs_get_zone_info_cb, zd);
1175 	if (ret < 0) {
1176 		zonefs_err(zd->sb, "Zone report failed %d\n", ret);
1177 		return ret;
1178 	}
1179 
1180 	if (ret != blkdev_nr_zones(bdev->bd_disk)) {
1181 		zonefs_err(zd->sb, "Invalid zone report (%d/%u zones)\n",
1182 			   ret, blkdev_nr_zones(bdev->bd_disk));
1183 		return -EIO;
1184 	}
1185 
1186 	return 0;
1187 }
1188 
1189 static inline void zonefs_cleanup_zone_info(struct zonefs_zone_data *zd)
1190 {
1191 	kvfree(zd->zones);
1192 }
1193 
1194 /*
1195  * Read super block information from the device.
1196  */
1197 static int zonefs_read_super(struct super_block *sb)
1198 {
1199 	struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
1200 	struct zonefs_super *super;
1201 	u32 crc, stored_crc;
1202 	struct page *page;
1203 	struct bio_vec bio_vec;
1204 	struct bio bio;
1205 	int ret;
1206 
1207 	page = alloc_page(GFP_KERNEL);
1208 	if (!page)
1209 		return -ENOMEM;
1210 
1211 	bio_init(&bio, &bio_vec, 1);
1212 	bio.bi_iter.bi_sector = 0;
1213 	bio.bi_opf = REQ_OP_READ;
1214 	bio_set_dev(&bio, sb->s_bdev);
1215 	bio_add_page(&bio, page, PAGE_SIZE, 0);
1216 
1217 	ret = submit_bio_wait(&bio);
1218 	if (ret)
1219 		goto free_page;
1220 
1221 	super = kmap(page);
1222 
1223 	ret = -EINVAL;
1224 	if (le32_to_cpu(super->s_magic) != ZONEFS_MAGIC)
1225 		goto unmap;
1226 
1227 	stored_crc = le32_to_cpu(super->s_crc);
1228 	super->s_crc = 0;
1229 	crc = crc32(~0U, (unsigned char *)super, sizeof(struct zonefs_super));
1230 	if (crc != stored_crc) {
1231 		zonefs_err(sb, "Invalid checksum (Expected 0x%08x, got 0x%08x)",
1232 			   crc, stored_crc);
1233 		goto unmap;
1234 	}
1235 
1236 	sbi->s_features = le64_to_cpu(super->s_features);
1237 	if (sbi->s_features & ~ZONEFS_F_DEFINED_FEATURES) {
1238 		zonefs_err(sb, "Unknown features set 0x%llx\n",
1239 			   sbi->s_features);
1240 		goto unmap;
1241 	}
1242 
1243 	if (sbi->s_features & ZONEFS_F_UID) {
1244 		sbi->s_uid = make_kuid(current_user_ns(),
1245 				       le32_to_cpu(super->s_uid));
1246 		if (!uid_valid(sbi->s_uid)) {
1247 			zonefs_err(sb, "Invalid UID feature\n");
1248 			goto unmap;
1249 		}
1250 	}
1251 
1252 	if (sbi->s_features & ZONEFS_F_GID) {
1253 		sbi->s_gid = make_kgid(current_user_ns(),
1254 				       le32_to_cpu(super->s_gid));
1255 		if (!gid_valid(sbi->s_gid)) {
1256 			zonefs_err(sb, "Invalid GID feature\n");
1257 			goto unmap;
1258 		}
1259 	}
1260 
1261 	if (sbi->s_features & ZONEFS_F_PERM)
1262 		sbi->s_perm = le32_to_cpu(super->s_perm);
1263 
1264 	if (memchr_inv(super->s_reserved, 0, sizeof(super->s_reserved))) {
1265 		zonefs_err(sb, "Reserved area is being used\n");
1266 		goto unmap;
1267 	}
1268 
1269 	uuid_copy(&sbi->s_uuid, (uuid_t *)super->s_uuid);
1270 	ret = 0;
1271 
1272 unmap:
1273 	kunmap(page);
1274 free_page:
1275 	__free_page(page);
1276 
1277 	return ret;
1278 }
1279 
1280 /*
1281  * Check that the device is zoned. If it is, get the list of zones and create
1282  * sub-directories and files according to the device zone configuration and
1283  * format options.
1284  */
1285 static int zonefs_fill_super(struct super_block *sb, void *data, int silent)
1286 {
1287 	struct zonefs_zone_data zd;
1288 	struct zonefs_sb_info *sbi;
1289 	struct inode *inode;
1290 	enum zonefs_ztype t;
1291 	int ret;
1292 
1293 	if (!bdev_is_zoned(sb->s_bdev)) {
1294 		zonefs_err(sb, "Not a zoned block device\n");
1295 		return -EINVAL;
1296 	}
1297 
1298 	/*
1299 	 * Initialize super block information: the maximum file size is updated
1300 	 * when the zone files are created so that the format option
1301 	 * ZONEFS_F_AGGRCNV which increases the maximum file size of a file
1302 	 * beyond the zone size is taken into account.
1303 	 */
1304 	sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
1305 	if (!sbi)
1306 		return -ENOMEM;
1307 
1308 	spin_lock_init(&sbi->s_lock);
1309 	sb->s_fs_info = sbi;
1310 	sb->s_magic = ZONEFS_MAGIC;
1311 	sb->s_maxbytes = 0;
1312 	sb->s_op = &zonefs_sops;
1313 	sb->s_time_gran	= 1;
1314 
1315 	/*
1316 	 * The block size is set to the device physical sector size to ensure
1317 	 * that write operations on 512e devices (512B logical block and 4KB
1318 	 * physical block) are always aligned to the device physical blocks,
1319 	 * as mandated by the ZBC/ZAC specifications.
1320 	 */
1321 	sb_set_blocksize(sb, bdev_physical_block_size(sb->s_bdev));
1322 	sbi->s_zone_sectors_shift = ilog2(bdev_zone_sectors(sb->s_bdev));
1323 	sbi->s_uid = GLOBAL_ROOT_UID;
1324 	sbi->s_gid = GLOBAL_ROOT_GID;
1325 	sbi->s_perm = 0640;
1326 	sbi->s_mount_opts = ZONEFS_MNTOPT_ERRORS_RO;
1327 
1328 	ret = zonefs_read_super(sb);
1329 	if (ret)
1330 		return ret;
1331 
1332 	ret = zonefs_parse_options(sb, data);
1333 	if (ret)
1334 		return ret;
1335 
1336 	memset(&zd, 0, sizeof(struct zonefs_zone_data));
1337 	zd.sb = sb;
1338 	ret = zonefs_get_zone_info(&zd);
1339 	if (ret)
1340 		goto cleanup;
1341 
1342 	zonefs_info(sb, "Mounting %u zones",
1343 		    blkdev_nr_zones(sb->s_bdev->bd_disk));
1344 
1345 	/* Create root directory inode */
1346 	ret = -ENOMEM;
1347 	inode = new_inode(sb);
1348 	if (!inode)
1349 		goto cleanup;
1350 
1351 	inode->i_ino = blkdev_nr_zones(sb->s_bdev->bd_disk);
1352 	inode->i_mode = S_IFDIR | 0555;
1353 	inode->i_ctime = inode->i_mtime = inode->i_atime = current_time(inode);
1354 	inode->i_op = &zonefs_dir_inode_operations;
1355 	inode->i_fop = &simple_dir_operations;
1356 	set_nlink(inode, 2);
1357 
1358 	sb->s_root = d_make_root(inode);
1359 	if (!sb->s_root)
1360 		goto cleanup;
1361 
1362 	/* Create and populate files in zone groups directories */
1363 	for (t = 0; t < ZONEFS_ZTYPE_MAX; t++) {
1364 		ret = zonefs_create_zgroup(&zd, t);
1365 		if (ret)
1366 			break;
1367 	}
1368 
1369 cleanup:
1370 	zonefs_cleanup_zone_info(&zd);
1371 
1372 	return ret;
1373 }
1374 
1375 static struct dentry *zonefs_mount(struct file_system_type *fs_type,
1376 				   int flags, const char *dev_name, void *data)
1377 {
1378 	return mount_bdev(fs_type, flags, dev_name, data, zonefs_fill_super);
1379 }
1380 
1381 static void zonefs_kill_super(struct super_block *sb)
1382 {
1383 	struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
1384 
1385 	if (sb->s_root)
1386 		d_genocide(sb->s_root);
1387 	kill_block_super(sb);
1388 	kfree(sbi);
1389 }
1390 
1391 /*
1392  * File system definition and registration.
1393  */
1394 static struct file_system_type zonefs_type = {
1395 	.owner		= THIS_MODULE,
1396 	.name		= "zonefs",
1397 	.mount		= zonefs_mount,
1398 	.kill_sb	= zonefs_kill_super,
1399 	.fs_flags	= FS_REQUIRES_DEV,
1400 };
1401 
1402 static int __init zonefs_init_inodecache(void)
1403 {
1404 	zonefs_inode_cachep = kmem_cache_create("zonefs_inode_cache",
1405 			sizeof(struct zonefs_inode_info), 0,
1406 			(SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT),
1407 			NULL);
1408 	if (zonefs_inode_cachep == NULL)
1409 		return -ENOMEM;
1410 	return 0;
1411 }
1412 
1413 static void zonefs_destroy_inodecache(void)
1414 {
1415 	/*
1416 	 * Make sure all delayed rcu free inodes are flushed before we
1417 	 * destroy the inode cache.
1418 	 */
1419 	rcu_barrier();
1420 	kmem_cache_destroy(zonefs_inode_cachep);
1421 }
1422 
1423 static int __init zonefs_init(void)
1424 {
1425 	int ret;
1426 
1427 	BUILD_BUG_ON(sizeof(struct zonefs_super) != ZONEFS_SUPER_SIZE);
1428 
1429 	ret = zonefs_init_inodecache();
1430 	if (ret)
1431 		return ret;
1432 
1433 	ret = register_filesystem(&zonefs_type);
1434 	if (ret) {
1435 		zonefs_destroy_inodecache();
1436 		return ret;
1437 	}
1438 
1439 	return 0;
1440 }
1441 
1442 static void __exit zonefs_exit(void)
1443 {
1444 	zonefs_destroy_inodecache();
1445 	unregister_filesystem(&zonefs_type);
1446 }
1447 
1448 MODULE_AUTHOR("Damien Le Moal");
1449 MODULE_DESCRIPTION("Zone file system for zoned block devices");
1450 MODULE_LICENSE("GPL");
1451 module_init(zonefs_init);
1452 module_exit(zonefs_exit);
1453