xref: /openbmc/linux/fs/gfs2/file.c (revision ca481398)
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9 
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/completion.h>
13 #include <linux/buffer_head.h>
14 #include <linux/pagemap.h>
15 #include <linux/uio.h>
16 #include <linux/blkdev.h>
17 #include <linux/mm.h>
18 #include <linux/mount.h>
19 #include <linux/fs.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <linux/falloc.h>
22 #include <linux/swap.h>
23 #include <linux/crc32.h>
24 #include <linux/writeback.h>
25 #include <linux/uaccess.h>
26 #include <linux/dlm.h>
27 #include <linux/dlm_plock.h>
28 #include <linux/delay.h>
29 
30 #include "gfs2.h"
31 #include "incore.h"
32 #include "bmap.h"
33 #include "dir.h"
34 #include "glock.h"
35 #include "glops.h"
36 #include "inode.h"
37 #include "log.h"
38 #include "meta_io.h"
39 #include "quota.h"
40 #include "rgrp.h"
41 #include "trans.h"
42 #include "util.h"
43 
44 /**
45  * gfs2_llseek - seek to a location in a file
46  * @file: the file
47  * @offset: the offset
48  * @whence: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
49  *
50  * SEEK_END requires the glock for the file because it references the
51  * file's size.
52  *
53  * Returns: The new offset, or errno
54  */
55 
56 static loff_t gfs2_llseek(struct file *file, loff_t offset, int whence)
57 {
58 	struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
59 	struct gfs2_holder i_gh;
60 	loff_t error;
61 
62 	switch (whence) {
63 	case SEEK_END: /* These reference inode->i_size */
64 	case SEEK_DATA:
65 	case SEEK_HOLE:
66 		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
67 					   &i_gh);
68 		if (!error) {
69 			error = generic_file_llseek(file, offset, whence);
70 			gfs2_glock_dq_uninit(&i_gh);
71 		}
72 		break;
73 	case SEEK_CUR:
74 	case SEEK_SET:
75 		error = generic_file_llseek(file, offset, whence);
76 		break;
77 	default:
78 		error = -EINVAL;
79 	}
80 
81 	return error;
82 }
83 
84 /**
85  * gfs2_readdir - Iterator for a directory
86  * @file: The directory to read from
87  * @ctx: What to feed directory entries to
88  *
89  * Returns: errno
90  */
91 
92 static int gfs2_readdir(struct file *file, struct dir_context *ctx)
93 {
94 	struct inode *dir = file->f_mapping->host;
95 	struct gfs2_inode *dip = GFS2_I(dir);
96 	struct gfs2_holder d_gh;
97 	int error;
98 
99 	error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
100 	if (error)
101 		return error;
102 
103 	error = gfs2_dir_read(dir, ctx, &file->f_ra);
104 
105 	gfs2_glock_dq_uninit(&d_gh);
106 
107 	return error;
108 }
109 
110 /**
111  * fsflags_cvt
112  * @table: A table of 32 u32 flags
113  * @val: a 32 bit value to convert
114  *
115  * This function can be used to convert between fsflags values and
116  * GFS2's own flags values.
117  *
118  * Returns: the converted flags
119  */
120 static u32 fsflags_cvt(const u32 *table, u32 val)
121 {
122 	u32 res = 0;
123 	while(val) {
124 		if (val & 1)
125 			res |= *table;
126 		table++;
127 		val >>= 1;
128 	}
129 	return res;
130 }
131 
132 static const u32 fsflags_to_gfs2[32] = {
133 	[3] = GFS2_DIF_SYNC,
134 	[4] = GFS2_DIF_IMMUTABLE,
135 	[5] = GFS2_DIF_APPENDONLY,
136 	[7] = GFS2_DIF_NOATIME,
137 	[12] = GFS2_DIF_EXHASH,
138 	[14] = GFS2_DIF_INHERIT_JDATA,
139 	[17] = GFS2_DIF_TOPDIR,
140 };
141 
142 static const u32 gfs2_to_fsflags[32] = {
143 	[gfs2fl_Sync] = FS_SYNC_FL,
144 	[gfs2fl_Immutable] = FS_IMMUTABLE_FL,
145 	[gfs2fl_AppendOnly] = FS_APPEND_FL,
146 	[gfs2fl_NoAtime] = FS_NOATIME_FL,
147 	[gfs2fl_ExHash] = FS_INDEX_FL,
148 	[gfs2fl_TopLevel] = FS_TOPDIR_FL,
149 	[gfs2fl_InheritJdata] = FS_JOURNAL_DATA_FL,
150 };
151 
152 static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
153 {
154 	struct inode *inode = file_inode(filp);
155 	struct gfs2_inode *ip = GFS2_I(inode);
156 	struct gfs2_holder gh;
157 	int error;
158 	u32 fsflags;
159 
160 	gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
161 	error = gfs2_glock_nq(&gh);
162 	if (error)
163 		goto out_uninit;
164 
165 	fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_diskflags);
166 	if (!S_ISDIR(inode->i_mode) && ip->i_diskflags & GFS2_DIF_JDATA)
167 		fsflags |= FS_JOURNAL_DATA_FL;
168 	if (put_user(fsflags, ptr))
169 		error = -EFAULT;
170 
171 	gfs2_glock_dq(&gh);
172 out_uninit:
173 	gfs2_holder_uninit(&gh);
174 	return error;
175 }
176 
177 void gfs2_set_inode_flags(struct inode *inode)
178 {
179 	struct gfs2_inode *ip = GFS2_I(inode);
180 	unsigned int flags = inode->i_flags;
181 
182 	flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_NOSEC);
183 	if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode))
184 		flags |= S_NOSEC;
185 	if (ip->i_diskflags & GFS2_DIF_IMMUTABLE)
186 		flags |= S_IMMUTABLE;
187 	if (ip->i_diskflags & GFS2_DIF_APPENDONLY)
188 		flags |= S_APPEND;
189 	if (ip->i_diskflags & GFS2_DIF_NOATIME)
190 		flags |= S_NOATIME;
191 	if (ip->i_diskflags & GFS2_DIF_SYNC)
192 		flags |= S_SYNC;
193 	inode->i_flags = flags;
194 }
195 
196 /* Flags that can be set by user space */
197 #define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA|			\
198 			     GFS2_DIF_IMMUTABLE|		\
199 			     GFS2_DIF_APPENDONLY|		\
200 			     GFS2_DIF_NOATIME|			\
201 			     GFS2_DIF_SYNC|			\
202 			     GFS2_DIF_SYSTEM|			\
203 			     GFS2_DIF_TOPDIR|			\
204 			     GFS2_DIF_INHERIT_JDATA)
205 
206 /**
207  * do_gfs2_set_flags - set flags on an inode
208  * @filp: file pointer
209  * @reqflags: The flags to set
210  * @mask: Indicates which flags are valid
211  *
212  */
213 static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
214 {
215 	struct inode *inode = file_inode(filp);
216 	struct gfs2_inode *ip = GFS2_I(inode);
217 	struct gfs2_sbd *sdp = GFS2_SB(inode);
218 	struct buffer_head *bh;
219 	struct gfs2_holder gh;
220 	int error;
221 	u32 new_flags, flags;
222 
223 	error = mnt_want_write_file(filp);
224 	if (error)
225 		return error;
226 
227 	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
228 	if (error)
229 		goto out_drop_write;
230 
231 	error = -EACCES;
232 	if (!inode_owner_or_capable(inode))
233 		goto out;
234 
235 	error = 0;
236 	flags = ip->i_diskflags;
237 	new_flags = (flags & ~mask) | (reqflags & mask);
238 	if ((new_flags ^ flags) == 0)
239 		goto out;
240 
241 	error = -EINVAL;
242 	if ((new_flags ^ flags) & ~GFS2_FLAGS_USER_SET)
243 		goto out;
244 
245 	error = -EPERM;
246 	if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE))
247 		goto out;
248 	if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY))
249 		goto out;
250 	if (((new_flags ^ flags) & GFS2_DIF_IMMUTABLE) &&
251 	    !capable(CAP_LINUX_IMMUTABLE))
252 		goto out;
253 	if (!IS_IMMUTABLE(inode)) {
254 		error = gfs2_permission(inode, MAY_WRITE);
255 		if (error)
256 			goto out;
257 	}
258 	if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
259 		if (flags & GFS2_DIF_JDATA)
260 			gfs2_log_flush(sdp, ip->i_gl, NORMAL_FLUSH);
261 		error = filemap_fdatawrite(inode->i_mapping);
262 		if (error)
263 			goto out;
264 		error = filemap_fdatawait(inode->i_mapping);
265 		if (error)
266 			goto out;
267 	}
268 	error = gfs2_trans_begin(sdp, RES_DINODE, 0);
269 	if (error)
270 		goto out;
271 	error = gfs2_meta_inode_buffer(ip, &bh);
272 	if (error)
273 		goto out_trans_end;
274 	gfs2_trans_add_meta(ip->i_gl, bh);
275 	ip->i_diskflags = new_flags;
276 	gfs2_dinode_out(ip, bh->b_data);
277 	brelse(bh);
278 	gfs2_set_inode_flags(inode);
279 	gfs2_set_aops(inode);
280 out_trans_end:
281 	gfs2_trans_end(sdp);
282 out:
283 	gfs2_glock_dq_uninit(&gh);
284 out_drop_write:
285 	mnt_drop_write_file(filp);
286 	return error;
287 }
288 
289 static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
290 {
291 	struct inode *inode = file_inode(filp);
292 	u32 fsflags, gfsflags;
293 
294 	if (get_user(fsflags, ptr))
295 		return -EFAULT;
296 
297 	gfsflags = fsflags_cvt(fsflags_to_gfs2, fsflags);
298 	if (!S_ISDIR(inode->i_mode)) {
299 		gfsflags &= ~GFS2_DIF_TOPDIR;
300 		if (gfsflags & GFS2_DIF_INHERIT_JDATA)
301 			gfsflags ^= (GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA);
302 		return do_gfs2_set_flags(filp, gfsflags, ~GFS2_DIF_SYSTEM);
303 	}
304 	return do_gfs2_set_flags(filp, gfsflags, ~(GFS2_DIF_SYSTEM | GFS2_DIF_JDATA));
305 }
306 
307 static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
308 {
309 	switch(cmd) {
310 	case FS_IOC_GETFLAGS:
311 		return gfs2_get_flags(filp, (u32 __user *)arg);
312 	case FS_IOC_SETFLAGS:
313 		return gfs2_set_flags(filp, (u32 __user *)arg);
314 	case FITRIM:
315 		return gfs2_fitrim(filp, (void __user *)arg);
316 	}
317 	return -ENOTTY;
318 }
319 
320 /**
321  * gfs2_size_hint - Give a hint to the size of a write request
322  * @filep: The struct file
323  * @offset: The file offset of the write
324  * @size: The length of the write
325  *
326  * When we are about to do a write, this function records the total
327  * write size in order to provide a suitable hint to the lower layers
328  * about how many blocks will be required.
329  *
330  */
331 
332 static void gfs2_size_hint(struct file *filep, loff_t offset, size_t size)
333 {
334 	struct inode *inode = file_inode(filep);
335 	struct gfs2_sbd *sdp = GFS2_SB(inode);
336 	struct gfs2_inode *ip = GFS2_I(inode);
337 	size_t blks = (size + sdp->sd_sb.sb_bsize - 1) >> sdp->sd_sb.sb_bsize_shift;
338 	int hint = min_t(size_t, INT_MAX, blks);
339 
340 	if (hint > atomic_read(&ip->i_res.rs_sizehint))
341 		atomic_set(&ip->i_res.rs_sizehint, hint);
342 }
343 
344 /**
345  * gfs2_allocate_page_backing - Use bmap to allocate blocks
346  * @page: The (locked) page to allocate backing for
347  *
348  * We try to allocate all the blocks required for the page in
349  * one go. This might fail for various reasons, so we keep
350  * trying until all the blocks to back this page are allocated.
351  * If some of the blocks are already allocated, thats ok too.
352  */
353 
354 static int gfs2_allocate_page_backing(struct page *page)
355 {
356 	struct inode *inode = page->mapping->host;
357 	struct buffer_head bh;
358 	unsigned long size = PAGE_SIZE;
359 	u64 lblock = page->index << (PAGE_SHIFT - inode->i_blkbits);
360 
361 	do {
362 		bh.b_state = 0;
363 		bh.b_size = size;
364 		gfs2_block_map(inode, lblock, &bh, 1);
365 		if (!buffer_mapped(&bh))
366 			return -EIO;
367 		size -= bh.b_size;
368 		lblock += (bh.b_size >> inode->i_blkbits);
369 	} while(size > 0);
370 	return 0;
371 }
372 
373 /**
374  * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable
375  * @vma: The virtual memory area
376  * @vmf: The virtual memory fault containing the page to become writable
377  *
378  * When the page becomes writable, we need to ensure that we have
379  * blocks allocated on disk to back that page.
380  */
381 
382 static int gfs2_page_mkwrite(struct vm_fault *vmf)
383 {
384 	struct page *page = vmf->page;
385 	struct inode *inode = file_inode(vmf->vma->vm_file);
386 	struct gfs2_inode *ip = GFS2_I(inode);
387 	struct gfs2_sbd *sdp = GFS2_SB(inode);
388 	struct gfs2_alloc_parms ap = { .aflags = 0, };
389 	unsigned long last_index;
390 	u64 pos = page->index << PAGE_SHIFT;
391 	unsigned int data_blocks, ind_blocks, rblocks;
392 	struct gfs2_holder gh;
393 	loff_t size;
394 	int ret;
395 
396 	sb_start_pagefault(inode->i_sb);
397 
398 	ret = gfs2_rsqa_alloc(ip);
399 	if (ret)
400 		goto out;
401 
402 	gfs2_size_hint(vmf->vma->vm_file, pos, PAGE_SIZE);
403 
404 	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
405 	ret = gfs2_glock_nq(&gh);
406 	if (ret)
407 		goto out_uninit;
408 
409 	/* Update file times before taking page lock */
410 	file_update_time(vmf->vma->vm_file);
411 
412 	set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
413 	set_bit(GIF_SW_PAGED, &ip->i_flags);
414 
415 	if (!gfs2_write_alloc_required(ip, pos, PAGE_SIZE)) {
416 		lock_page(page);
417 		if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
418 			ret = -EAGAIN;
419 			unlock_page(page);
420 		}
421 		goto out_unlock;
422 	}
423 
424 	ret = gfs2_rindex_update(sdp);
425 	if (ret)
426 		goto out_unlock;
427 
428 	gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks);
429 	ap.target = data_blocks + ind_blocks;
430 	ret = gfs2_quota_lock_check(ip, &ap);
431 	if (ret)
432 		goto out_unlock;
433 	ret = gfs2_inplace_reserve(ip, &ap);
434 	if (ret)
435 		goto out_quota_unlock;
436 
437 	rblocks = RES_DINODE + ind_blocks;
438 	if (gfs2_is_jdata(ip))
439 		rblocks += data_blocks ? data_blocks : 1;
440 	if (ind_blocks || data_blocks) {
441 		rblocks += RES_STATFS + RES_QUOTA;
442 		rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
443 	}
444 	ret = gfs2_trans_begin(sdp, rblocks, 0);
445 	if (ret)
446 		goto out_trans_fail;
447 
448 	lock_page(page);
449 	ret = -EINVAL;
450 	size = i_size_read(inode);
451 	last_index = (size - 1) >> PAGE_SHIFT;
452 	/* Check page index against inode size */
453 	if (size == 0 || (page->index > last_index))
454 		goto out_trans_end;
455 
456 	ret = -EAGAIN;
457 	/* If truncated, we must retry the operation, we may have raced
458 	 * with the glock demotion code.
459 	 */
460 	if (!PageUptodate(page) || page->mapping != inode->i_mapping)
461 		goto out_trans_end;
462 
463 	/* Unstuff, if required, and allocate backing blocks for page */
464 	ret = 0;
465 	if (gfs2_is_stuffed(ip))
466 		ret = gfs2_unstuff_dinode(ip, page);
467 	if (ret == 0)
468 		ret = gfs2_allocate_page_backing(page);
469 
470 out_trans_end:
471 	if (ret)
472 		unlock_page(page);
473 	gfs2_trans_end(sdp);
474 out_trans_fail:
475 	gfs2_inplace_release(ip);
476 out_quota_unlock:
477 	gfs2_quota_unlock(ip);
478 out_unlock:
479 	gfs2_glock_dq(&gh);
480 out_uninit:
481 	gfs2_holder_uninit(&gh);
482 	if (ret == 0) {
483 		set_page_dirty(page);
484 		wait_for_stable_page(page);
485 	}
486 out:
487 	sb_end_pagefault(inode->i_sb);
488 	return block_page_mkwrite_return(ret);
489 }
490 
491 static const struct vm_operations_struct gfs2_vm_ops = {
492 	.fault = filemap_fault,
493 	.map_pages = filemap_map_pages,
494 	.page_mkwrite = gfs2_page_mkwrite,
495 };
496 
497 /**
498  * gfs2_mmap -
499  * @file: The file to map
500  * @vma: The VMA which described the mapping
501  *
502  * There is no need to get a lock here unless we should be updating
503  * atime. We ignore any locking errors since the only consequence is
504  * a missed atime update (which will just be deferred until later).
505  *
506  * Returns: 0
507  */
508 
509 static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
510 {
511 	struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
512 
513 	if (!(file->f_flags & O_NOATIME) &&
514 	    !IS_NOATIME(&ip->i_inode)) {
515 		struct gfs2_holder i_gh;
516 		int error;
517 
518 		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
519 					   &i_gh);
520 		if (error)
521 			return error;
522 		/* grab lock to update inode */
523 		gfs2_glock_dq_uninit(&i_gh);
524 		file_accessed(file);
525 	}
526 	vma->vm_ops = &gfs2_vm_ops;
527 
528 	return 0;
529 }
530 
531 /**
532  * gfs2_open_common - This is common to open and atomic_open
533  * @inode: The inode being opened
534  * @file: The file being opened
535  *
536  * This maybe called under a glock or not depending upon how it has
537  * been called. We must always be called under a glock for regular
538  * files, however. For other file types, it does not matter whether
539  * we hold the glock or not.
540  *
541  * Returns: Error code or 0 for success
542  */
543 
544 int gfs2_open_common(struct inode *inode, struct file *file)
545 {
546 	struct gfs2_file *fp;
547 	int ret;
548 
549 	if (S_ISREG(inode->i_mode)) {
550 		ret = generic_file_open(inode, file);
551 		if (ret)
552 			return ret;
553 	}
554 
555 	fp = kzalloc(sizeof(struct gfs2_file), GFP_NOFS);
556 	if (!fp)
557 		return -ENOMEM;
558 
559 	mutex_init(&fp->f_fl_mutex);
560 
561 	gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
562 	file->private_data = fp;
563 	return 0;
564 }
565 
566 /**
567  * gfs2_open - open a file
568  * @inode: the inode to open
569  * @file: the struct file for this opening
570  *
571  * After atomic_open, this function is only used for opening files
572  * which are already cached. We must still get the glock for regular
573  * files to ensure that we have the file size uptodate for the large
574  * file check which is in the common code. That is only an issue for
575  * regular files though.
576  *
577  * Returns: errno
578  */
579 
580 static int gfs2_open(struct inode *inode, struct file *file)
581 {
582 	struct gfs2_inode *ip = GFS2_I(inode);
583 	struct gfs2_holder i_gh;
584 	int error;
585 	bool need_unlock = false;
586 
587 	if (S_ISREG(ip->i_inode.i_mode)) {
588 		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
589 					   &i_gh);
590 		if (error)
591 			return error;
592 		need_unlock = true;
593 	}
594 
595 	error = gfs2_open_common(inode, file);
596 
597 	if (need_unlock)
598 		gfs2_glock_dq_uninit(&i_gh);
599 
600 	return error;
601 }
602 
603 /**
604  * gfs2_release - called to close a struct file
605  * @inode: the inode the struct file belongs to
606  * @file: the struct file being closed
607  *
608  * Returns: errno
609  */
610 
611 static int gfs2_release(struct inode *inode, struct file *file)
612 {
613 	struct gfs2_inode *ip = GFS2_I(inode);
614 
615 	kfree(file->private_data);
616 	file->private_data = NULL;
617 
618 	if (!(file->f_mode & FMODE_WRITE))
619 		return 0;
620 
621 	gfs2_rsqa_delete(ip, &inode->i_writecount);
622 	return 0;
623 }
624 
625 /**
626  * gfs2_fsync - sync the dirty data for a file (across the cluster)
627  * @file: the file that points to the dentry
628  * @start: the start position in the file to sync
629  * @end: the end position in the file to sync
630  * @datasync: set if we can ignore timestamp changes
631  *
632  * We split the data flushing here so that we don't wait for the data
633  * until after we've also sent the metadata to disk. Note that for
634  * data=ordered, we will write & wait for the data at the log flush
635  * stage anyway, so this is unlikely to make much of a difference
636  * except in the data=writeback case.
637  *
638  * If the fdatawrite fails due to any reason except -EIO, we will
639  * continue the remainder of the fsync, although we'll still report
640  * the error at the end. This is to match filemap_write_and_wait_range()
641  * behaviour.
642  *
643  * Returns: errno
644  */
645 
646 static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
647 		      int datasync)
648 {
649 	struct address_space *mapping = file->f_mapping;
650 	struct inode *inode = mapping->host;
651 	int sync_state = inode->i_state & I_DIRTY_ALL;
652 	struct gfs2_inode *ip = GFS2_I(inode);
653 	int ret = 0, ret1 = 0;
654 
655 	if (mapping->nrpages) {
656 		ret1 = filemap_fdatawrite_range(mapping, start, end);
657 		if (ret1 == -EIO)
658 			return ret1;
659 	}
660 
661 	if (!gfs2_is_jdata(ip))
662 		sync_state &= ~I_DIRTY_PAGES;
663 	if (datasync)
664 		sync_state &= ~(I_DIRTY_SYNC | I_DIRTY_TIME);
665 
666 	if (sync_state) {
667 		ret = sync_inode_metadata(inode, 1);
668 		if (ret)
669 			return ret;
670 		if (gfs2_is_jdata(ip))
671 			ret = file_write_and_wait(file);
672 		if (ret)
673 			return ret;
674 		gfs2_ail_flush(ip->i_gl, 1);
675 	}
676 
677 	if (mapping->nrpages)
678 		ret = file_fdatawait_range(file, start, end);
679 
680 	return ret ? ret : ret1;
681 }
682 
683 /**
684  * gfs2_file_write_iter - Perform a write to a file
685  * @iocb: The io context
686  * @iov: The data to write
687  * @nr_segs: Number of @iov segments
688  * @pos: The file position
689  *
690  * We have to do a lock/unlock here to refresh the inode size for
691  * O_APPEND writes, otherwise we can land up writing at the wrong
692  * offset. There is still a race, but provided the app is using its
693  * own file locking, this will make O_APPEND work as expected.
694  *
695  */
696 
697 static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
698 {
699 	struct file *file = iocb->ki_filp;
700 	struct gfs2_inode *ip = GFS2_I(file_inode(file));
701 	int ret;
702 
703 	ret = gfs2_rsqa_alloc(ip);
704 	if (ret)
705 		return ret;
706 
707 	gfs2_size_hint(file, iocb->ki_pos, iov_iter_count(from));
708 
709 	if (iocb->ki_flags & IOCB_APPEND) {
710 		struct gfs2_holder gh;
711 
712 		ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
713 		if (ret)
714 			return ret;
715 		gfs2_glock_dq_uninit(&gh);
716 	}
717 
718 	return generic_file_write_iter(iocb, from);
719 }
720 
721 static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
722 			   int mode)
723 {
724 	struct gfs2_inode *ip = GFS2_I(inode);
725 	struct buffer_head *dibh;
726 	int error;
727 	unsigned int nr_blks;
728 	sector_t lblock = offset >> inode->i_blkbits;
729 
730 	error = gfs2_meta_inode_buffer(ip, &dibh);
731 	if (unlikely(error))
732 		return error;
733 
734 	gfs2_trans_add_meta(ip->i_gl, dibh);
735 
736 	if (gfs2_is_stuffed(ip)) {
737 		error = gfs2_unstuff_dinode(ip, NULL);
738 		if (unlikely(error))
739 			goto out;
740 	}
741 
742 	while (len) {
743 		struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
744 		bh_map.b_size = len;
745 		set_buffer_zeronew(&bh_map);
746 
747 		error = gfs2_block_map(inode, lblock, &bh_map, 1);
748 		if (unlikely(error))
749 			goto out;
750 		len -= bh_map.b_size;
751 		nr_blks = bh_map.b_size >> inode->i_blkbits;
752 		lblock += nr_blks;
753 		if (!buffer_new(&bh_map))
754 			continue;
755 		if (unlikely(!buffer_zeronew(&bh_map))) {
756 			error = -EIO;
757 			goto out;
758 		}
759 	}
760 out:
761 	brelse(dibh);
762 	return error;
763 }
764 /**
765  * calc_max_reserv() - Reverse of write_calc_reserv. Given a number of
766  *                     blocks, determine how many bytes can be written.
767  * @ip:          The inode in question.
768  * @len:         Max cap of bytes. What we return in *len must be <= this.
769  * @data_blocks: Compute and return the number of data blocks needed
770  * @ind_blocks:  Compute and return the number of indirect blocks needed
771  * @max_blocks:  The total blocks available to work with.
772  *
773  * Returns: void, but @len, @data_blocks and @ind_blocks are filled in.
774  */
775 static void calc_max_reserv(struct gfs2_inode *ip, loff_t *len,
776 			    unsigned int *data_blocks, unsigned int *ind_blocks,
777 			    unsigned int max_blocks)
778 {
779 	loff_t max = *len;
780 	const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
781 	unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
782 
783 	for (tmp = max_data; tmp > sdp->sd_diptrs;) {
784 		tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
785 		max_data -= tmp;
786 	}
787 
788 	*data_blocks = max_data;
789 	*ind_blocks = max_blocks - max_data;
790 	*len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
791 	if (*len > max) {
792 		*len = max;
793 		gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
794 	}
795 }
796 
797 static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
798 {
799 	struct inode *inode = file_inode(file);
800 	struct gfs2_sbd *sdp = GFS2_SB(inode);
801 	struct gfs2_inode *ip = GFS2_I(inode);
802 	struct gfs2_alloc_parms ap = { .aflags = 0, };
803 	unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
804 	loff_t bytes, max_bytes, max_blks = UINT_MAX;
805 	int error;
806 	const loff_t pos = offset;
807 	const loff_t count = len;
808 	loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1);
809 	loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
810 	loff_t max_chunk_size = UINT_MAX & bsize_mask;
811 
812 	next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
813 
814 	offset &= bsize_mask;
815 
816 	len = next - offset;
817 	bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
818 	if (!bytes)
819 		bytes = UINT_MAX;
820 	bytes &= bsize_mask;
821 	if (bytes == 0)
822 		bytes = sdp->sd_sb.sb_bsize;
823 
824 	gfs2_size_hint(file, offset, len);
825 
826 	gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks);
827 	ap.min_target = data_blocks + ind_blocks;
828 
829 	while (len > 0) {
830 		if (len < bytes)
831 			bytes = len;
832 		if (!gfs2_write_alloc_required(ip, offset, bytes)) {
833 			len -= bytes;
834 			offset += bytes;
835 			continue;
836 		}
837 
838 		/* We need to determine how many bytes we can actually
839 		 * fallocate without exceeding quota or going over the
840 		 * end of the fs. We start off optimistically by assuming
841 		 * we can write max_bytes */
842 		max_bytes = (len > max_chunk_size) ? max_chunk_size : len;
843 
844 		/* Since max_bytes is most likely a theoretical max, we
845 		 * calculate a more realistic 'bytes' to serve as a good
846 		 * starting point for the number of bytes we may be able
847 		 * to write */
848 		gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
849 		ap.target = data_blocks + ind_blocks;
850 
851 		error = gfs2_quota_lock_check(ip, &ap);
852 		if (error)
853 			return error;
854 		/* ap.allowed tells us how many blocks quota will allow
855 		 * us to write. Check if this reduces max_blks */
856 		if (ap.allowed && ap.allowed < max_blks)
857 			max_blks = ap.allowed;
858 
859 		error = gfs2_inplace_reserve(ip, &ap);
860 		if (error)
861 			goto out_qunlock;
862 
863 		/* check if the selected rgrp limits our max_blks further */
864 		if (ap.allowed && ap.allowed < max_blks)
865 			max_blks = ap.allowed;
866 
867 		/* Almost done. Calculate bytes that can be written using
868 		 * max_blks. We also recompute max_bytes, data_blocks and
869 		 * ind_blocks */
870 		calc_max_reserv(ip, &max_bytes, &data_blocks,
871 				&ind_blocks, max_blks);
872 
873 		rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
874 			  RES_RG_HDR + gfs2_rg_blocks(ip, data_blocks + ind_blocks);
875 		if (gfs2_is_jdata(ip))
876 			rblocks += data_blocks ? data_blocks : 1;
877 
878 		error = gfs2_trans_begin(sdp, rblocks,
879 					 PAGE_SIZE/sdp->sd_sb.sb_bsize);
880 		if (error)
881 			goto out_trans_fail;
882 
883 		error = fallocate_chunk(inode, offset, max_bytes, mode);
884 		gfs2_trans_end(sdp);
885 
886 		if (error)
887 			goto out_trans_fail;
888 
889 		len -= max_bytes;
890 		offset += max_bytes;
891 		gfs2_inplace_release(ip);
892 		gfs2_quota_unlock(ip);
893 	}
894 
895 	if (!(mode & FALLOC_FL_KEEP_SIZE) && (pos + count) > inode->i_size) {
896 		i_size_write(inode, pos + count);
897 		file_update_time(file);
898 		mark_inode_dirty(inode);
899 	}
900 
901 	if ((file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host))
902 		return vfs_fsync_range(file, pos, pos + count - 1,
903 			       (file->f_flags & __O_SYNC) ? 0 : 1);
904 	return 0;
905 
906 out_trans_fail:
907 	gfs2_inplace_release(ip);
908 out_qunlock:
909 	gfs2_quota_unlock(ip);
910 	return error;
911 }
912 
913 static long gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
914 {
915 	struct inode *inode = file_inode(file);
916 	struct gfs2_sbd *sdp = GFS2_SB(inode);
917 	struct gfs2_inode *ip = GFS2_I(inode);
918 	struct gfs2_holder gh;
919 	int ret;
920 
921 	if (mode & ~FALLOC_FL_KEEP_SIZE)
922 		return -EOPNOTSUPP;
923 	/* fallocate is needed by gfs2_grow to reserve space in the rindex */
924 	if (gfs2_is_jdata(ip) && inode != sdp->sd_rindex)
925 		return -EOPNOTSUPP;
926 
927 	inode_lock(inode);
928 
929 	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
930 	ret = gfs2_glock_nq(&gh);
931 	if (ret)
932 		goto out_uninit;
933 
934 	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
935 	    (offset + len) > inode->i_size) {
936 		ret = inode_newsize_ok(inode, offset + len);
937 		if (ret)
938 			goto out_unlock;
939 	}
940 
941 	ret = get_write_access(inode);
942 	if (ret)
943 		goto out_unlock;
944 
945 	ret = gfs2_rsqa_alloc(ip);
946 	if (ret)
947 		goto out_putw;
948 
949 	ret = __gfs2_fallocate(file, mode, offset, len);
950 	if (ret)
951 		gfs2_rs_deltree(&ip->i_res);
952 
953 out_putw:
954 	put_write_access(inode);
955 out_unlock:
956 	gfs2_glock_dq(&gh);
957 out_uninit:
958 	gfs2_holder_uninit(&gh);
959 	inode_unlock(inode);
960 	return ret;
961 }
962 
963 static ssize_t gfs2_file_splice_write(struct pipe_inode_info *pipe,
964 				      struct file *out, loff_t *ppos,
965 				      size_t len, unsigned int flags)
966 {
967 	int error;
968 	struct gfs2_inode *ip = GFS2_I(out->f_mapping->host);
969 
970 	error = gfs2_rsqa_alloc(ip);
971 	if (error)
972 		return (ssize_t)error;
973 
974 	gfs2_size_hint(out, *ppos, len);
975 
976 	return iter_file_splice_write(pipe, out, ppos, len, flags);
977 }
978 
979 #ifdef CONFIG_GFS2_FS_LOCKING_DLM
980 
981 /**
982  * gfs2_lock - acquire/release a posix lock on a file
983  * @file: the file pointer
984  * @cmd: either modify or retrieve lock state, possibly wait
985  * @fl: type and range of lock
986  *
987  * Returns: errno
988  */
989 
990 static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
991 {
992 	struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
993 	struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
994 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
995 
996 	if (!(fl->fl_flags & FL_POSIX))
997 		return -ENOLCK;
998 	if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK)
999 		return -ENOLCK;
1000 
1001 	if (cmd == F_CANCELLK) {
1002 		/* Hack: */
1003 		cmd = F_SETLK;
1004 		fl->fl_type = F_UNLCK;
1005 	}
1006 	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
1007 		if (fl->fl_type == F_UNLCK)
1008 			locks_lock_file_wait(file, fl);
1009 		return -EIO;
1010 	}
1011 	if (IS_GETLK(cmd))
1012 		return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
1013 	else if (fl->fl_type == F_UNLCK)
1014 		return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
1015 	else
1016 		return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
1017 }
1018 
1019 static int do_flock(struct file *file, int cmd, struct file_lock *fl)
1020 {
1021 	struct gfs2_file *fp = file->private_data;
1022 	struct gfs2_holder *fl_gh = &fp->f_fl_gh;
1023 	struct gfs2_inode *ip = GFS2_I(file_inode(file));
1024 	struct gfs2_glock *gl;
1025 	unsigned int state;
1026 	u16 flags;
1027 	int error = 0;
1028 	int sleeptime;
1029 
1030 	state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
1031 	flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY_1CB) | GL_EXACT;
1032 
1033 	mutex_lock(&fp->f_fl_mutex);
1034 
1035 	if (gfs2_holder_initialized(fl_gh)) {
1036 		if (fl_gh->gh_state == state)
1037 			goto out;
1038 		locks_lock_file_wait(file,
1039 				     &(struct file_lock) {
1040 					     .fl_type = F_UNLCK,
1041 					     .fl_flags = FL_FLOCK
1042 				     });
1043 		gfs2_glock_dq(fl_gh);
1044 		gfs2_holder_reinit(state, flags, fl_gh);
1045 	} else {
1046 		error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
1047 				       &gfs2_flock_glops, CREATE, &gl);
1048 		if (error)
1049 			goto out;
1050 		gfs2_holder_init(gl, state, flags, fl_gh);
1051 		gfs2_glock_put(gl);
1052 	}
1053 	for (sleeptime = 1; sleeptime <= 4; sleeptime <<= 1) {
1054 		error = gfs2_glock_nq(fl_gh);
1055 		if (error != GLR_TRYFAILED)
1056 			break;
1057 		fl_gh->gh_flags = LM_FLAG_TRY | GL_EXACT;
1058 		fl_gh->gh_error = 0;
1059 		msleep(sleeptime);
1060 	}
1061 	if (error) {
1062 		gfs2_holder_uninit(fl_gh);
1063 		if (error == GLR_TRYFAILED)
1064 			error = -EAGAIN;
1065 	} else {
1066 		error = locks_lock_file_wait(file, fl);
1067 		gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
1068 	}
1069 
1070 out:
1071 	mutex_unlock(&fp->f_fl_mutex);
1072 	return error;
1073 }
1074 
1075 static void do_unflock(struct file *file, struct file_lock *fl)
1076 {
1077 	struct gfs2_file *fp = file->private_data;
1078 	struct gfs2_holder *fl_gh = &fp->f_fl_gh;
1079 
1080 	mutex_lock(&fp->f_fl_mutex);
1081 	locks_lock_file_wait(file, fl);
1082 	if (gfs2_holder_initialized(fl_gh)) {
1083 		gfs2_glock_dq(fl_gh);
1084 		gfs2_holder_uninit(fl_gh);
1085 	}
1086 	mutex_unlock(&fp->f_fl_mutex);
1087 }
1088 
1089 /**
1090  * gfs2_flock - acquire/release a flock lock on a file
1091  * @file: the file pointer
1092  * @cmd: either modify or retrieve lock state, possibly wait
1093  * @fl: type and range of lock
1094  *
1095  * Returns: errno
1096  */
1097 
1098 static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
1099 {
1100 	if (!(fl->fl_flags & FL_FLOCK))
1101 		return -ENOLCK;
1102 	if (fl->fl_type & LOCK_MAND)
1103 		return -EOPNOTSUPP;
1104 
1105 	if (fl->fl_type == F_UNLCK) {
1106 		do_unflock(file, fl);
1107 		return 0;
1108 	} else {
1109 		return do_flock(file, cmd, fl);
1110 	}
1111 }
1112 
1113 const struct file_operations gfs2_file_fops = {
1114 	.llseek		= gfs2_llseek,
1115 	.read_iter	= generic_file_read_iter,
1116 	.write_iter	= gfs2_file_write_iter,
1117 	.unlocked_ioctl	= gfs2_ioctl,
1118 	.mmap		= gfs2_mmap,
1119 	.open		= gfs2_open,
1120 	.release	= gfs2_release,
1121 	.fsync		= gfs2_fsync,
1122 	.lock		= gfs2_lock,
1123 	.flock		= gfs2_flock,
1124 	.splice_read	= generic_file_splice_read,
1125 	.splice_write	= gfs2_file_splice_write,
1126 	.setlease	= simple_nosetlease,
1127 	.fallocate	= gfs2_fallocate,
1128 };
1129 
1130 const struct file_operations gfs2_dir_fops = {
1131 	.iterate_shared	= gfs2_readdir,
1132 	.unlocked_ioctl	= gfs2_ioctl,
1133 	.open		= gfs2_open,
1134 	.release	= gfs2_release,
1135 	.fsync		= gfs2_fsync,
1136 	.lock		= gfs2_lock,
1137 	.flock		= gfs2_flock,
1138 	.llseek		= default_llseek,
1139 };
1140 
1141 #endif /* CONFIG_GFS2_FS_LOCKING_DLM */
1142 
1143 const struct file_operations gfs2_file_fops_nolock = {
1144 	.llseek		= gfs2_llseek,
1145 	.read_iter	= generic_file_read_iter,
1146 	.write_iter	= gfs2_file_write_iter,
1147 	.unlocked_ioctl	= gfs2_ioctl,
1148 	.mmap		= gfs2_mmap,
1149 	.open		= gfs2_open,
1150 	.release	= gfs2_release,
1151 	.fsync		= gfs2_fsync,
1152 	.splice_read	= generic_file_splice_read,
1153 	.splice_write	= gfs2_file_splice_write,
1154 	.setlease	= generic_setlease,
1155 	.fallocate	= gfs2_fallocate,
1156 };
1157 
1158 const struct file_operations gfs2_dir_fops_nolock = {
1159 	.iterate_shared	= gfs2_readdir,
1160 	.unlocked_ioctl	= gfs2_ioctl,
1161 	.open		= gfs2_open,
1162 	.release	= gfs2_release,
1163 	.fsync		= gfs2_fsync,
1164 	.llseek		= default_llseek,
1165 };
1166 
1167