xref: /openbmc/linux/fs/gfs2/file.c (revision 56d06fa2)
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9 
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/completion.h>
13 #include <linux/buffer_head.h>
14 #include <linux/pagemap.h>
15 #include <linux/uio.h>
16 #include <linux/blkdev.h>
17 #include <linux/mm.h>
18 #include <linux/mount.h>
19 #include <linux/fs.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <linux/falloc.h>
22 #include <linux/swap.h>
23 #include <linux/crc32.h>
24 #include <linux/writeback.h>
25 #include <asm/uaccess.h>
26 #include <linux/dlm.h>
27 #include <linux/dlm_plock.h>
28 #include <linux/delay.h>
29 
30 #include "gfs2.h"
31 #include "incore.h"
32 #include "bmap.h"
33 #include "dir.h"
34 #include "glock.h"
35 #include "glops.h"
36 #include "inode.h"
37 #include "log.h"
38 #include "meta_io.h"
39 #include "quota.h"
40 #include "rgrp.h"
41 #include "trans.h"
42 #include "util.h"
43 
44 /**
45  * gfs2_llseek - seek to a location in a file
46  * @file: the file
47  * @offset: the offset
48  * @whence: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
49  *
50  * SEEK_END requires the glock for the file because it references the
51  * file's size.
52  *
53  * Returns: The new offset, or errno
54  */
55 
56 static loff_t gfs2_llseek(struct file *file, loff_t offset, int whence)
57 {
58 	struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
59 	struct gfs2_holder i_gh;
60 	loff_t error;
61 
62 	switch (whence) {
63 	case SEEK_END: /* These reference inode->i_size */
64 	case SEEK_DATA:
65 	case SEEK_HOLE:
66 		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
67 					   &i_gh);
68 		if (!error) {
69 			error = generic_file_llseek(file, offset, whence);
70 			gfs2_glock_dq_uninit(&i_gh);
71 		}
72 		break;
73 	case SEEK_CUR:
74 	case SEEK_SET:
75 		error = generic_file_llseek(file, offset, whence);
76 		break;
77 	default:
78 		error = -EINVAL;
79 	}
80 
81 	return error;
82 }
83 
84 /**
85  * gfs2_readdir - Iterator for a directory
86  * @file: The directory to read from
87  * @ctx: What to feed directory entries to
88  *
89  * Returns: errno
90  */
91 
92 static int gfs2_readdir(struct file *file, struct dir_context *ctx)
93 {
94 	struct inode *dir = file->f_mapping->host;
95 	struct gfs2_inode *dip = GFS2_I(dir);
96 	struct gfs2_holder d_gh;
97 	int error;
98 
99 	error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
100 	if (error)
101 		return error;
102 
103 	error = gfs2_dir_read(dir, ctx, &file->f_ra);
104 
105 	gfs2_glock_dq_uninit(&d_gh);
106 
107 	return error;
108 }
109 
110 /**
111  * fsflags_cvt
112  * @table: A table of 32 u32 flags
113  * @val: a 32 bit value to convert
114  *
115  * This function can be used to convert between fsflags values and
116  * GFS2's own flags values.
117  *
118  * Returns: the converted flags
119  */
120 static u32 fsflags_cvt(const u32 *table, u32 val)
121 {
122 	u32 res = 0;
123 	while(val) {
124 		if (val & 1)
125 			res |= *table;
126 		table++;
127 		val >>= 1;
128 	}
129 	return res;
130 }
131 
132 static const u32 fsflags_to_gfs2[32] = {
133 	[3] = GFS2_DIF_SYNC,
134 	[4] = GFS2_DIF_IMMUTABLE,
135 	[5] = GFS2_DIF_APPENDONLY,
136 	[7] = GFS2_DIF_NOATIME,
137 	[12] = GFS2_DIF_EXHASH,
138 	[14] = GFS2_DIF_INHERIT_JDATA,
139 	[17] = GFS2_DIF_TOPDIR,
140 };
141 
142 static const u32 gfs2_to_fsflags[32] = {
143 	[gfs2fl_Sync] = FS_SYNC_FL,
144 	[gfs2fl_Immutable] = FS_IMMUTABLE_FL,
145 	[gfs2fl_AppendOnly] = FS_APPEND_FL,
146 	[gfs2fl_NoAtime] = FS_NOATIME_FL,
147 	[gfs2fl_ExHash] = FS_INDEX_FL,
148 	[gfs2fl_TopLevel] = FS_TOPDIR_FL,
149 	[gfs2fl_InheritJdata] = FS_JOURNAL_DATA_FL,
150 };
151 
152 static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
153 {
154 	struct inode *inode = file_inode(filp);
155 	struct gfs2_inode *ip = GFS2_I(inode);
156 	struct gfs2_holder gh;
157 	int error;
158 	u32 fsflags;
159 
160 	gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
161 	error = gfs2_glock_nq(&gh);
162 	if (error)
163 		return error;
164 
165 	fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_diskflags);
166 	if (!S_ISDIR(inode->i_mode) && ip->i_diskflags & GFS2_DIF_JDATA)
167 		fsflags |= FS_JOURNAL_DATA_FL;
168 	if (put_user(fsflags, ptr))
169 		error = -EFAULT;
170 
171 	gfs2_glock_dq(&gh);
172 	gfs2_holder_uninit(&gh);
173 	return error;
174 }
175 
176 void gfs2_set_inode_flags(struct inode *inode)
177 {
178 	struct gfs2_inode *ip = GFS2_I(inode);
179 	unsigned int flags = inode->i_flags;
180 
181 	flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_NOSEC);
182 	if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode))
183 		flags |= S_NOSEC;
184 	if (ip->i_diskflags & GFS2_DIF_IMMUTABLE)
185 		flags |= S_IMMUTABLE;
186 	if (ip->i_diskflags & GFS2_DIF_APPENDONLY)
187 		flags |= S_APPEND;
188 	if (ip->i_diskflags & GFS2_DIF_NOATIME)
189 		flags |= S_NOATIME;
190 	if (ip->i_diskflags & GFS2_DIF_SYNC)
191 		flags |= S_SYNC;
192 	inode->i_flags = flags;
193 }
194 
195 /* Flags that can be set by user space */
196 #define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA|			\
197 			     GFS2_DIF_IMMUTABLE|		\
198 			     GFS2_DIF_APPENDONLY|		\
199 			     GFS2_DIF_NOATIME|			\
200 			     GFS2_DIF_SYNC|			\
201 			     GFS2_DIF_SYSTEM|			\
202 			     GFS2_DIF_TOPDIR|			\
203 			     GFS2_DIF_INHERIT_JDATA)
204 
205 /**
206  * do_gfs2_set_flags - set flags on an inode
207  * @filp: file pointer
208  * @reqflags: The flags to set
209  * @mask: Indicates which flags are valid
210  *
211  */
212 static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
213 {
214 	struct inode *inode = file_inode(filp);
215 	struct gfs2_inode *ip = GFS2_I(inode);
216 	struct gfs2_sbd *sdp = GFS2_SB(inode);
217 	struct buffer_head *bh;
218 	struct gfs2_holder gh;
219 	int error;
220 	u32 new_flags, flags;
221 
222 	error = mnt_want_write_file(filp);
223 	if (error)
224 		return error;
225 
226 	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
227 	if (error)
228 		goto out_drop_write;
229 
230 	error = -EACCES;
231 	if (!inode_owner_or_capable(inode))
232 		goto out;
233 
234 	error = 0;
235 	flags = ip->i_diskflags;
236 	new_flags = (flags & ~mask) | (reqflags & mask);
237 	if ((new_flags ^ flags) == 0)
238 		goto out;
239 
240 	error = -EINVAL;
241 	if ((new_flags ^ flags) & ~GFS2_FLAGS_USER_SET)
242 		goto out;
243 
244 	error = -EPERM;
245 	if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE))
246 		goto out;
247 	if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY))
248 		goto out;
249 	if (((new_flags ^ flags) & GFS2_DIF_IMMUTABLE) &&
250 	    !capable(CAP_LINUX_IMMUTABLE))
251 		goto out;
252 	if (!IS_IMMUTABLE(inode)) {
253 		error = gfs2_permission(inode, MAY_WRITE);
254 		if (error)
255 			goto out;
256 	}
257 	if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
258 		if (flags & GFS2_DIF_JDATA)
259 			gfs2_log_flush(sdp, ip->i_gl, NORMAL_FLUSH);
260 		error = filemap_fdatawrite(inode->i_mapping);
261 		if (error)
262 			goto out;
263 		error = filemap_fdatawait(inode->i_mapping);
264 		if (error)
265 			goto out;
266 	}
267 	error = gfs2_trans_begin(sdp, RES_DINODE, 0);
268 	if (error)
269 		goto out;
270 	error = gfs2_meta_inode_buffer(ip, &bh);
271 	if (error)
272 		goto out_trans_end;
273 	gfs2_trans_add_meta(ip->i_gl, bh);
274 	ip->i_diskflags = new_flags;
275 	gfs2_dinode_out(ip, bh->b_data);
276 	brelse(bh);
277 	gfs2_set_inode_flags(inode);
278 	gfs2_set_aops(inode);
279 out_trans_end:
280 	gfs2_trans_end(sdp);
281 out:
282 	gfs2_glock_dq_uninit(&gh);
283 out_drop_write:
284 	mnt_drop_write_file(filp);
285 	return error;
286 }
287 
288 static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
289 {
290 	struct inode *inode = file_inode(filp);
291 	u32 fsflags, gfsflags;
292 
293 	if (get_user(fsflags, ptr))
294 		return -EFAULT;
295 
296 	gfsflags = fsflags_cvt(fsflags_to_gfs2, fsflags);
297 	if (!S_ISDIR(inode->i_mode)) {
298 		gfsflags &= ~GFS2_DIF_TOPDIR;
299 		if (gfsflags & GFS2_DIF_INHERIT_JDATA)
300 			gfsflags ^= (GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA);
301 		return do_gfs2_set_flags(filp, gfsflags, ~GFS2_DIF_SYSTEM);
302 	}
303 	return do_gfs2_set_flags(filp, gfsflags, ~(GFS2_DIF_SYSTEM | GFS2_DIF_JDATA));
304 }
305 
306 static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
307 {
308 	switch(cmd) {
309 	case FS_IOC_GETFLAGS:
310 		return gfs2_get_flags(filp, (u32 __user *)arg);
311 	case FS_IOC_SETFLAGS:
312 		return gfs2_set_flags(filp, (u32 __user *)arg);
313 	case FITRIM:
314 		return gfs2_fitrim(filp, (void __user *)arg);
315 	}
316 	return -ENOTTY;
317 }
318 
319 /**
320  * gfs2_size_hint - Give a hint to the size of a write request
321  * @filep: The struct file
322  * @offset: The file offset of the write
323  * @size: The length of the write
324  *
325  * When we are about to do a write, this function records the total
326  * write size in order to provide a suitable hint to the lower layers
327  * about how many blocks will be required.
328  *
329  */
330 
331 static void gfs2_size_hint(struct file *filep, loff_t offset, size_t size)
332 {
333 	struct inode *inode = file_inode(filep);
334 	struct gfs2_sbd *sdp = GFS2_SB(inode);
335 	struct gfs2_inode *ip = GFS2_I(inode);
336 	size_t blks = (size + sdp->sd_sb.sb_bsize - 1) >> sdp->sd_sb.sb_bsize_shift;
337 	int hint = min_t(size_t, INT_MAX, blks);
338 
339 	if (hint > atomic_read(&ip->i_res.rs_sizehint))
340 		atomic_set(&ip->i_res.rs_sizehint, hint);
341 }
342 
343 /**
344  * gfs2_allocate_page_backing - Use bmap to allocate blocks
345  * @page: The (locked) page to allocate backing for
346  *
347  * We try to allocate all the blocks required for the page in
348  * one go. This might fail for various reasons, so we keep
349  * trying until all the blocks to back this page are allocated.
350  * If some of the blocks are already allocated, thats ok too.
351  */
352 
353 static int gfs2_allocate_page_backing(struct page *page)
354 {
355 	struct inode *inode = page->mapping->host;
356 	struct buffer_head bh;
357 	unsigned long size = PAGE_SIZE;
358 	u64 lblock = page->index << (PAGE_SHIFT - inode->i_blkbits);
359 
360 	do {
361 		bh.b_state = 0;
362 		bh.b_size = size;
363 		gfs2_block_map(inode, lblock, &bh, 1);
364 		if (!buffer_mapped(&bh))
365 			return -EIO;
366 		size -= bh.b_size;
367 		lblock += (bh.b_size >> inode->i_blkbits);
368 	} while(size > 0);
369 	return 0;
370 }
371 
372 /**
373  * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable
374  * @vma: The virtual memory area
375  * @vmf: The virtual memory fault containing the page to become writable
376  *
377  * When the page becomes writable, we need to ensure that we have
378  * blocks allocated on disk to back that page.
379  */
380 
381 static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
382 {
383 	struct page *page = vmf->page;
384 	struct inode *inode = file_inode(vma->vm_file);
385 	struct gfs2_inode *ip = GFS2_I(inode);
386 	struct gfs2_sbd *sdp = GFS2_SB(inode);
387 	struct gfs2_alloc_parms ap = { .aflags = 0, };
388 	unsigned long last_index;
389 	u64 pos = page->index << PAGE_SHIFT;
390 	unsigned int data_blocks, ind_blocks, rblocks;
391 	struct gfs2_holder gh;
392 	loff_t size;
393 	int ret;
394 
395 	sb_start_pagefault(inode->i_sb);
396 
397 	/* Update file times before taking page lock */
398 	file_update_time(vma->vm_file);
399 
400 	ret = gfs2_rsqa_alloc(ip);
401 	if (ret)
402 		goto out;
403 
404 	gfs2_size_hint(vma->vm_file, pos, PAGE_SIZE);
405 
406 	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
407 	ret = gfs2_glock_nq(&gh);
408 	if (ret)
409 		goto out_uninit;
410 
411 	set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
412 	set_bit(GIF_SW_PAGED, &ip->i_flags);
413 
414 	if (!gfs2_write_alloc_required(ip, pos, PAGE_SIZE)) {
415 		lock_page(page);
416 		if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
417 			ret = -EAGAIN;
418 			unlock_page(page);
419 		}
420 		goto out_unlock;
421 	}
422 
423 	ret = gfs2_rindex_update(sdp);
424 	if (ret)
425 		goto out_unlock;
426 
427 	gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks);
428 	ap.target = data_blocks + ind_blocks;
429 	ret = gfs2_quota_lock_check(ip, &ap);
430 	if (ret)
431 		goto out_unlock;
432 	ret = gfs2_inplace_reserve(ip, &ap);
433 	if (ret)
434 		goto out_quota_unlock;
435 
436 	rblocks = RES_DINODE + ind_blocks;
437 	if (gfs2_is_jdata(ip))
438 		rblocks += data_blocks ? data_blocks : 1;
439 	if (ind_blocks || data_blocks) {
440 		rblocks += RES_STATFS + RES_QUOTA;
441 		rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
442 	}
443 	ret = gfs2_trans_begin(sdp, rblocks, 0);
444 	if (ret)
445 		goto out_trans_fail;
446 
447 	lock_page(page);
448 	ret = -EINVAL;
449 	size = i_size_read(inode);
450 	last_index = (size - 1) >> PAGE_SHIFT;
451 	/* Check page index against inode size */
452 	if (size == 0 || (page->index > last_index))
453 		goto out_trans_end;
454 
455 	ret = -EAGAIN;
456 	/* If truncated, we must retry the operation, we may have raced
457 	 * with the glock demotion code.
458 	 */
459 	if (!PageUptodate(page) || page->mapping != inode->i_mapping)
460 		goto out_trans_end;
461 
462 	/* Unstuff, if required, and allocate backing blocks for page */
463 	ret = 0;
464 	if (gfs2_is_stuffed(ip))
465 		ret = gfs2_unstuff_dinode(ip, page);
466 	if (ret == 0)
467 		ret = gfs2_allocate_page_backing(page);
468 
469 out_trans_end:
470 	if (ret)
471 		unlock_page(page);
472 	gfs2_trans_end(sdp);
473 out_trans_fail:
474 	gfs2_inplace_release(ip);
475 out_quota_unlock:
476 	gfs2_quota_unlock(ip);
477 out_unlock:
478 	gfs2_glock_dq(&gh);
479 out_uninit:
480 	gfs2_holder_uninit(&gh);
481 	if (ret == 0) {
482 		set_page_dirty(page);
483 		wait_for_stable_page(page);
484 	}
485 out:
486 	sb_end_pagefault(inode->i_sb);
487 	return block_page_mkwrite_return(ret);
488 }
489 
490 static const struct vm_operations_struct gfs2_vm_ops = {
491 	.fault = filemap_fault,
492 	.map_pages = filemap_map_pages,
493 	.page_mkwrite = gfs2_page_mkwrite,
494 };
495 
496 /**
497  * gfs2_mmap -
498  * @file: The file to map
499  * @vma: The VMA which described the mapping
500  *
501  * There is no need to get a lock here unless we should be updating
502  * atime. We ignore any locking errors since the only consequence is
503  * a missed atime update (which will just be deferred until later).
504  *
505  * Returns: 0
506  */
507 
508 static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
509 {
510 	struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
511 
512 	if (!(file->f_flags & O_NOATIME) &&
513 	    !IS_NOATIME(&ip->i_inode)) {
514 		struct gfs2_holder i_gh;
515 		int error;
516 
517 		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
518 					   &i_gh);
519 		if (error)
520 			return error;
521 		/* grab lock to update inode */
522 		gfs2_glock_dq_uninit(&i_gh);
523 		file_accessed(file);
524 	}
525 	vma->vm_ops = &gfs2_vm_ops;
526 
527 	return 0;
528 }
529 
530 /**
531  * gfs2_open_common - This is common to open and atomic_open
532  * @inode: The inode being opened
533  * @file: The file being opened
534  *
535  * This maybe called under a glock or not depending upon how it has
536  * been called. We must always be called under a glock for regular
537  * files, however. For other file types, it does not matter whether
538  * we hold the glock or not.
539  *
540  * Returns: Error code or 0 for success
541  */
542 
543 int gfs2_open_common(struct inode *inode, struct file *file)
544 {
545 	struct gfs2_file *fp;
546 	int ret;
547 
548 	if (S_ISREG(inode->i_mode)) {
549 		ret = generic_file_open(inode, file);
550 		if (ret)
551 			return ret;
552 	}
553 
554 	fp = kzalloc(sizeof(struct gfs2_file), GFP_NOFS);
555 	if (!fp)
556 		return -ENOMEM;
557 
558 	mutex_init(&fp->f_fl_mutex);
559 
560 	gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
561 	file->private_data = fp;
562 	return 0;
563 }
564 
565 /**
566  * gfs2_open - open a file
567  * @inode: the inode to open
568  * @file: the struct file for this opening
569  *
570  * After atomic_open, this function is only used for opening files
571  * which are already cached. We must still get the glock for regular
572  * files to ensure that we have the file size uptodate for the large
573  * file check which is in the common code. That is only an issue for
574  * regular files though.
575  *
576  * Returns: errno
577  */
578 
579 static int gfs2_open(struct inode *inode, struct file *file)
580 {
581 	struct gfs2_inode *ip = GFS2_I(inode);
582 	struct gfs2_holder i_gh;
583 	int error;
584 	bool need_unlock = false;
585 
586 	if (S_ISREG(ip->i_inode.i_mode)) {
587 		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
588 					   &i_gh);
589 		if (error)
590 			return error;
591 		need_unlock = true;
592 	}
593 
594 	error = gfs2_open_common(inode, file);
595 
596 	if (need_unlock)
597 		gfs2_glock_dq_uninit(&i_gh);
598 
599 	return error;
600 }
601 
602 /**
603  * gfs2_release - called to close a struct file
604  * @inode: the inode the struct file belongs to
605  * @file: the struct file being closed
606  *
607  * Returns: errno
608  */
609 
610 static int gfs2_release(struct inode *inode, struct file *file)
611 {
612 	struct gfs2_inode *ip = GFS2_I(inode);
613 
614 	kfree(file->private_data);
615 	file->private_data = NULL;
616 
617 	if (!(file->f_mode & FMODE_WRITE))
618 		return 0;
619 
620 	gfs2_rsqa_delete(ip, &inode->i_writecount);
621 	return 0;
622 }
623 
624 /**
625  * gfs2_fsync - sync the dirty data for a file (across the cluster)
626  * @file: the file that points to the dentry
627  * @start: the start position in the file to sync
628  * @end: the end position in the file to sync
629  * @datasync: set if we can ignore timestamp changes
630  *
631  * We split the data flushing here so that we don't wait for the data
632  * until after we've also sent the metadata to disk. Note that for
633  * data=ordered, we will write & wait for the data at the log flush
634  * stage anyway, so this is unlikely to make much of a difference
635  * except in the data=writeback case.
636  *
637  * If the fdatawrite fails due to any reason except -EIO, we will
638  * continue the remainder of the fsync, although we'll still report
639  * the error at the end. This is to match filemap_write_and_wait_range()
640  * behaviour.
641  *
642  * Returns: errno
643  */
644 
645 static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
646 		      int datasync)
647 {
648 	struct address_space *mapping = file->f_mapping;
649 	struct inode *inode = mapping->host;
650 	int sync_state = inode->i_state & I_DIRTY_ALL;
651 	struct gfs2_inode *ip = GFS2_I(inode);
652 	int ret = 0, ret1 = 0;
653 
654 	if (mapping->nrpages) {
655 		ret1 = filemap_fdatawrite_range(mapping, start, end);
656 		if (ret1 == -EIO)
657 			return ret1;
658 	}
659 
660 	if (!gfs2_is_jdata(ip))
661 		sync_state &= ~I_DIRTY_PAGES;
662 	if (datasync)
663 		sync_state &= ~(I_DIRTY_SYNC | I_DIRTY_TIME);
664 
665 	if (sync_state) {
666 		ret = sync_inode_metadata(inode, 1);
667 		if (ret)
668 			return ret;
669 		if (gfs2_is_jdata(ip))
670 			filemap_write_and_wait(mapping);
671 		gfs2_ail_flush(ip->i_gl, 1);
672 	}
673 
674 	if (mapping->nrpages)
675 		ret = filemap_fdatawait_range(mapping, start, end);
676 
677 	return ret ? ret : ret1;
678 }
679 
680 /**
681  * gfs2_file_write_iter - Perform a write to a file
682  * @iocb: The io context
683  * @iov: The data to write
684  * @nr_segs: Number of @iov segments
685  * @pos: The file position
686  *
687  * We have to do a lock/unlock here to refresh the inode size for
688  * O_APPEND writes, otherwise we can land up writing at the wrong
689  * offset. There is still a race, but provided the app is using its
690  * own file locking, this will make O_APPEND work as expected.
691  *
692  */
693 
694 static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
695 {
696 	struct file *file = iocb->ki_filp;
697 	struct gfs2_inode *ip = GFS2_I(file_inode(file));
698 	int ret;
699 
700 	ret = gfs2_rsqa_alloc(ip);
701 	if (ret)
702 		return ret;
703 
704 	gfs2_size_hint(file, iocb->ki_pos, iov_iter_count(from));
705 
706 	if (iocb->ki_flags & IOCB_APPEND) {
707 		struct gfs2_holder gh;
708 
709 		ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
710 		if (ret)
711 			return ret;
712 		gfs2_glock_dq_uninit(&gh);
713 	}
714 
715 	return generic_file_write_iter(iocb, from);
716 }
717 
718 static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
719 			   int mode)
720 {
721 	struct gfs2_inode *ip = GFS2_I(inode);
722 	struct buffer_head *dibh;
723 	int error;
724 	unsigned int nr_blks;
725 	sector_t lblock = offset >> inode->i_blkbits;
726 
727 	error = gfs2_meta_inode_buffer(ip, &dibh);
728 	if (unlikely(error))
729 		return error;
730 
731 	gfs2_trans_add_meta(ip->i_gl, dibh);
732 
733 	if (gfs2_is_stuffed(ip)) {
734 		error = gfs2_unstuff_dinode(ip, NULL);
735 		if (unlikely(error))
736 			goto out;
737 	}
738 
739 	while (len) {
740 		struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
741 		bh_map.b_size = len;
742 		set_buffer_zeronew(&bh_map);
743 
744 		error = gfs2_block_map(inode, lblock, &bh_map, 1);
745 		if (unlikely(error))
746 			goto out;
747 		len -= bh_map.b_size;
748 		nr_blks = bh_map.b_size >> inode->i_blkbits;
749 		lblock += nr_blks;
750 		if (!buffer_new(&bh_map))
751 			continue;
752 		if (unlikely(!buffer_zeronew(&bh_map))) {
753 			error = -EIO;
754 			goto out;
755 		}
756 	}
757 out:
758 	brelse(dibh);
759 	return error;
760 }
761 /**
762  * calc_max_reserv() - Reverse of write_calc_reserv. Given a number of
763  *                     blocks, determine how many bytes can be written.
764  * @ip:          The inode in question.
765  * @len:         Max cap of bytes. What we return in *len must be <= this.
766  * @data_blocks: Compute and return the number of data blocks needed
767  * @ind_blocks:  Compute and return the number of indirect blocks needed
768  * @max_blocks:  The total blocks available to work with.
769  *
770  * Returns: void, but @len, @data_blocks and @ind_blocks are filled in.
771  */
772 static void calc_max_reserv(struct gfs2_inode *ip, loff_t *len,
773 			    unsigned int *data_blocks, unsigned int *ind_blocks,
774 			    unsigned int max_blocks)
775 {
776 	loff_t max = *len;
777 	const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
778 	unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
779 
780 	for (tmp = max_data; tmp > sdp->sd_diptrs;) {
781 		tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
782 		max_data -= tmp;
783 	}
784 
785 	*data_blocks = max_data;
786 	*ind_blocks = max_blocks - max_data;
787 	*len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
788 	if (*len > max) {
789 		*len = max;
790 		gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
791 	}
792 }
793 
794 static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
795 {
796 	struct inode *inode = file_inode(file);
797 	struct gfs2_sbd *sdp = GFS2_SB(inode);
798 	struct gfs2_inode *ip = GFS2_I(inode);
799 	struct gfs2_alloc_parms ap = { .aflags = 0, };
800 	unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
801 	loff_t bytes, max_bytes, max_blks = UINT_MAX;
802 	int error;
803 	const loff_t pos = offset;
804 	const loff_t count = len;
805 	loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1);
806 	loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
807 	loff_t max_chunk_size = UINT_MAX & bsize_mask;
808 
809 	next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
810 
811 	offset &= bsize_mask;
812 
813 	len = next - offset;
814 	bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
815 	if (!bytes)
816 		bytes = UINT_MAX;
817 	bytes &= bsize_mask;
818 	if (bytes == 0)
819 		bytes = sdp->sd_sb.sb_bsize;
820 
821 	gfs2_size_hint(file, offset, len);
822 
823 	gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks);
824 	ap.min_target = data_blocks + ind_blocks;
825 
826 	while (len > 0) {
827 		if (len < bytes)
828 			bytes = len;
829 		if (!gfs2_write_alloc_required(ip, offset, bytes)) {
830 			len -= bytes;
831 			offset += bytes;
832 			continue;
833 		}
834 
835 		/* We need to determine how many bytes we can actually
836 		 * fallocate without exceeding quota or going over the
837 		 * end of the fs. We start off optimistically by assuming
838 		 * we can write max_bytes */
839 		max_bytes = (len > max_chunk_size) ? max_chunk_size : len;
840 
841 		/* Since max_bytes is most likely a theoretical max, we
842 		 * calculate a more realistic 'bytes' to serve as a good
843 		 * starting point for the number of bytes we may be able
844 		 * to write */
845 		gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
846 		ap.target = data_blocks + ind_blocks;
847 
848 		error = gfs2_quota_lock_check(ip, &ap);
849 		if (error)
850 			return error;
851 		/* ap.allowed tells us how many blocks quota will allow
852 		 * us to write. Check if this reduces max_blks */
853 		if (ap.allowed && ap.allowed < max_blks)
854 			max_blks = ap.allowed;
855 
856 		error = gfs2_inplace_reserve(ip, &ap);
857 		if (error)
858 			goto out_qunlock;
859 
860 		/* check if the selected rgrp limits our max_blks further */
861 		if (ap.allowed && ap.allowed < max_blks)
862 			max_blks = ap.allowed;
863 
864 		/* Almost done. Calculate bytes that can be written using
865 		 * max_blks. We also recompute max_bytes, data_blocks and
866 		 * ind_blocks */
867 		calc_max_reserv(ip, &max_bytes, &data_blocks,
868 				&ind_blocks, max_blks);
869 
870 		rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
871 			  RES_RG_HDR + gfs2_rg_blocks(ip, data_blocks + ind_blocks);
872 		if (gfs2_is_jdata(ip))
873 			rblocks += data_blocks ? data_blocks : 1;
874 
875 		error = gfs2_trans_begin(sdp, rblocks,
876 					 PAGE_SIZE/sdp->sd_sb.sb_bsize);
877 		if (error)
878 			goto out_trans_fail;
879 
880 		error = fallocate_chunk(inode, offset, max_bytes, mode);
881 		gfs2_trans_end(sdp);
882 
883 		if (error)
884 			goto out_trans_fail;
885 
886 		len -= max_bytes;
887 		offset += max_bytes;
888 		gfs2_inplace_release(ip);
889 		gfs2_quota_unlock(ip);
890 	}
891 
892 	if (!(mode & FALLOC_FL_KEEP_SIZE) && (pos + count) > inode->i_size) {
893 		i_size_write(inode, pos + count);
894 		file_update_time(file);
895 		mark_inode_dirty(inode);
896 	}
897 
898 	return generic_write_sync(file, pos, count);
899 
900 out_trans_fail:
901 	gfs2_inplace_release(ip);
902 out_qunlock:
903 	gfs2_quota_unlock(ip);
904 	return error;
905 }
906 
907 static long gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
908 {
909 	struct inode *inode = file_inode(file);
910 	struct gfs2_inode *ip = GFS2_I(inode);
911 	struct gfs2_holder gh;
912 	int ret;
913 
914 	if ((mode & ~FALLOC_FL_KEEP_SIZE) || gfs2_is_jdata(ip))
915 		return -EOPNOTSUPP;
916 
917 	inode_lock(inode);
918 
919 	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
920 	ret = gfs2_glock_nq(&gh);
921 	if (ret)
922 		goto out_uninit;
923 
924 	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
925 	    (offset + len) > inode->i_size) {
926 		ret = inode_newsize_ok(inode, offset + len);
927 		if (ret)
928 			goto out_unlock;
929 	}
930 
931 	ret = get_write_access(inode);
932 	if (ret)
933 		goto out_unlock;
934 
935 	ret = gfs2_rsqa_alloc(ip);
936 	if (ret)
937 		goto out_putw;
938 
939 	ret = __gfs2_fallocate(file, mode, offset, len);
940 	if (ret)
941 		gfs2_rs_deltree(&ip->i_res);
942 
943 out_putw:
944 	put_write_access(inode);
945 out_unlock:
946 	gfs2_glock_dq(&gh);
947 out_uninit:
948 	gfs2_holder_uninit(&gh);
949 	inode_unlock(inode);
950 	return ret;
951 }
952 
953 static ssize_t gfs2_file_splice_write(struct pipe_inode_info *pipe,
954 				      struct file *out, loff_t *ppos,
955 				      size_t len, unsigned int flags)
956 {
957 	int error;
958 	struct gfs2_inode *ip = GFS2_I(out->f_mapping->host);
959 
960 	error = gfs2_rsqa_alloc(ip);
961 	if (error)
962 		return (ssize_t)error;
963 
964 	gfs2_size_hint(out, *ppos, len);
965 
966 	return iter_file_splice_write(pipe, out, ppos, len, flags);
967 }
968 
969 #ifdef CONFIG_GFS2_FS_LOCKING_DLM
970 
971 /**
972  * gfs2_lock - acquire/release a posix lock on a file
973  * @file: the file pointer
974  * @cmd: either modify or retrieve lock state, possibly wait
975  * @fl: type and range of lock
976  *
977  * Returns: errno
978  */
979 
980 static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
981 {
982 	struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
983 	struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
984 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
985 
986 	if (!(fl->fl_flags & FL_POSIX))
987 		return -ENOLCK;
988 	if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK)
989 		return -ENOLCK;
990 
991 	if (cmd == F_CANCELLK) {
992 		/* Hack: */
993 		cmd = F_SETLK;
994 		fl->fl_type = F_UNLCK;
995 	}
996 	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
997 		if (fl->fl_type == F_UNLCK)
998 			locks_lock_file_wait(file, fl);
999 		return -EIO;
1000 	}
1001 	if (IS_GETLK(cmd))
1002 		return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
1003 	else if (fl->fl_type == F_UNLCK)
1004 		return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
1005 	else
1006 		return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
1007 }
1008 
1009 static int do_flock(struct file *file, int cmd, struct file_lock *fl)
1010 {
1011 	struct gfs2_file *fp = file->private_data;
1012 	struct gfs2_holder *fl_gh = &fp->f_fl_gh;
1013 	struct gfs2_inode *ip = GFS2_I(file_inode(file));
1014 	struct gfs2_glock *gl;
1015 	unsigned int state;
1016 	u16 flags;
1017 	int error = 0;
1018 	int sleeptime;
1019 
1020 	state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
1021 	flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY_1CB) | GL_EXACT;
1022 
1023 	mutex_lock(&fp->f_fl_mutex);
1024 
1025 	gl = fl_gh->gh_gl;
1026 	if (gl) {
1027 		if (fl_gh->gh_state == state)
1028 			goto out;
1029 		locks_lock_file_wait(file,
1030 				     &(struct file_lock) {
1031 					     .fl_type = F_UNLCK,
1032 					     .fl_flags = FL_FLOCK
1033 				     });
1034 		gfs2_glock_dq(fl_gh);
1035 		gfs2_holder_reinit(state, flags, fl_gh);
1036 	} else {
1037 		error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
1038 				       &gfs2_flock_glops, CREATE, &gl);
1039 		if (error)
1040 			goto out;
1041 		gfs2_holder_init(gl, state, flags, fl_gh);
1042 		gfs2_glock_put(gl);
1043 	}
1044 	for (sleeptime = 1; sleeptime <= 4; sleeptime <<= 1) {
1045 		error = gfs2_glock_nq(fl_gh);
1046 		if (error != GLR_TRYFAILED)
1047 			break;
1048 		fl_gh->gh_flags = LM_FLAG_TRY | GL_EXACT;
1049 		fl_gh->gh_error = 0;
1050 		msleep(sleeptime);
1051 	}
1052 	if (error) {
1053 		gfs2_holder_uninit(fl_gh);
1054 		if (error == GLR_TRYFAILED)
1055 			error = -EAGAIN;
1056 	} else {
1057 		error = locks_lock_file_wait(file, fl);
1058 		gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
1059 	}
1060 
1061 out:
1062 	mutex_unlock(&fp->f_fl_mutex);
1063 	return error;
1064 }
1065 
1066 static void do_unflock(struct file *file, struct file_lock *fl)
1067 {
1068 	struct gfs2_file *fp = file->private_data;
1069 	struct gfs2_holder *fl_gh = &fp->f_fl_gh;
1070 
1071 	mutex_lock(&fp->f_fl_mutex);
1072 	locks_lock_file_wait(file, fl);
1073 	if (fl_gh->gh_gl) {
1074 		gfs2_glock_dq(fl_gh);
1075 		gfs2_holder_uninit(fl_gh);
1076 	}
1077 	mutex_unlock(&fp->f_fl_mutex);
1078 }
1079 
1080 /**
1081  * gfs2_flock - acquire/release a flock lock on a file
1082  * @file: the file pointer
1083  * @cmd: either modify or retrieve lock state, possibly wait
1084  * @fl: type and range of lock
1085  *
1086  * Returns: errno
1087  */
1088 
1089 static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
1090 {
1091 	if (!(fl->fl_flags & FL_FLOCK))
1092 		return -ENOLCK;
1093 	if (fl->fl_type & LOCK_MAND)
1094 		return -EOPNOTSUPP;
1095 
1096 	if (fl->fl_type == F_UNLCK) {
1097 		do_unflock(file, fl);
1098 		return 0;
1099 	} else {
1100 		return do_flock(file, cmd, fl);
1101 	}
1102 }
1103 
1104 const struct file_operations gfs2_file_fops = {
1105 	.llseek		= gfs2_llseek,
1106 	.read_iter	= generic_file_read_iter,
1107 	.write_iter	= gfs2_file_write_iter,
1108 	.unlocked_ioctl	= gfs2_ioctl,
1109 	.mmap		= gfs2_mmap,
1110 	.open		= gfs2_open,
1111 	.release	= gfs2_release,
1112 	.fsync		= gfs2_fsync,
1113 	.lock		= gfs2_lock,
1114 	.flock		= gfs2_flock,
1115 	.splice_read	= generic_file_splice_read,
1116 	.splice_write	= gfs2_file_splice_write,
1117 	.setlease	= simple_nosetlease,
1118 	.fallocate	= gfs2_fallocate,
1119 };
1120 
1121 const struct file_operations gfs2_dir_fops = {
1122 	.iterate	= gfs2_readdir,
1123 	.unlocked_ioctl	= gfs2_ioctl,
1124 	.open		= gfs2_open,
1125 	.release	= gfs2_release,
1126 	.fsync		= gfs2_fsync,
1127 	.lock		= gfs2_lock,
1128 	.flock		= gfs2_flock,
1129 	.llseek		= default_llseek,
1130 };
1131 
1132 #endif /* CONFIG_GFS2_FS_LOCKING_DLM */
1133 
1134 const struct file_operations gfs2_file_fops_nolock = {
1135 	.llseek		= gfs2_llseek,
1136 	.read_iter	= generic_file_read_iter,
1137 	.write_iter	= gfs2_file_write_iter,
1138 	.unlocked_ioctl	= gfs2_ioctl,
1139 	.mmap		= gfs2_mmap,
1140 	.open		= gfs2_open,
1141 	.release	= gfs2_release,
1142 	.fsync		= gfs2_fsync,
1143 	.splice_read	= generic_file_splice_read,
1144 	.splice_write	= gfs2_file_splice_write,
1145 	.setlease	= generic_setlease,
1146 	.fallocate	= gfs2_fallocate,
1147 };
1148 
1149 const struct file_operations gfs2_dir_fops_nolock = {
1150 	.iterate	= gfs2_readdir,
1151 	.unlocked_ioctl	= gfs2_ioctl,
1152 	.open		= gfs2_open,
1153 	.release	= gfs2_release,
1154 	.fsync		= gfs2_fsync,
1155 	.llseek		= default_llseek,
1156 };
1157 
1158