xref: /openbmc/linux/fs/gfs2/file.c (revision 8c0b9ee8)
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9 
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/completion.h>
13 #include <linux/buffer_head.h>
14 #include <linux/pagemap.h>
15 #include <linux/uio.h>
16 #include <linux/blkdev.h>
17 #include <linux/mm.h>
18 #include <linux/mount.h>
19 #include <linux/fs.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <linux/falloc.h>
22 #include <linux/swap.h>
23 #include <linux/crc32.h>
24 #include <linux/writeback.h>
25 #include <asm/uaccess.h>
26 #include <linux/dlm.h>
27 #include <linux/dlm_plock.h>
28 #include <linux/aio.h>
29 #include <linux/delay.h>
30 
31 #include "gfs2.h"
32 #include "incore.h"
33 #include "bmap.h"
34 #include "dir.h"
35 #include "glock.h"
36 #include "glops.h"
37 #include "inode.h"
38 #include "log.h"
39 #include "meta_io.h"
40 #include "quota.h"
41 #include "rgrp.h"
42 #include "trans.h"
43 #include "util.h"
44 
45 /**
46  * gfs2_llseek - seek to a location in a file
47  * @file: the file
48  * @offset: the offset
49  * @whence: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
50  *
51  * SEEK_END requires the glock for the file because it references the
52  * file's size.
53  *
54  * Returns: The new offset, or errno
55  */
56 
57 static loff_t gfs2_llseek(struct file *file, loff_t offset, int whence)
58 {
59 	struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
60 	struct gfs2_holder i_gh;
61 	loff_t error;
62 
63 	switch (whence) {
64 	case SEEK_END: /* These reference inode->i_size */
65 	case SEEK_DATA:
66 	case SEEK_HOLE:
67 		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
68 					   &i_gh);
69 		if (!error) {
70 			error = generic_file_llseek(file, offset, whence);
71 			gfs2_glock_dq_uninit(&i_gh);
72 		}
73 		break;
74 	case SEEK_CUR:
75 	case SEEK_SET:
76 		error = generic_file_llseek(file, offset, whence);
77 		break;
78 	default:
79 		error = -EINVAL;
80 	}
81 
82 	return error;
83 }
84 
85 /**
86  * gfs2_readdir - Iterator for a directory
87  * @file: The directory to read from
88  * @ctx: What to feed directory entries to
89  *
90  * Returns: errno
91  */
92 
93 static int gfs2_readdir(struct file *file, struct dir_context *ctx)
94 {
95 	struct inode *dir = file->f_mapping->host;
96 	struct gfs2_inode *dip = GFS2_I(dir);
97 	struct gfs2_holder d_gh;
98 	int error;
99 
100 	error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
101 	if (error)
102 		return error;
103 
104 	error = gfs2_dir_read(dir, ctx, &file->f_ra);
105 
106 	gfs2_glock_dq_uninit(&d_gh);
107 
108 	return error;
109 }
110 
111 /**
112  * fsflags_cvt
113  * @table: A table of 32 u32 flags
114  * @val: a 32 bit value to convert
115  *
116  * This function can be used to convert between fsflags values and
117  * GFS2's own flags values.
118  *
119  * Returns: the converted flags
120  */
121 static u32 fsflags_cvt(const u32 *table, u32 val)
122 {
123 	u32 res = 0;
124 	while(val) {
125 		if (val & 1)
126 			res |= *table;
127 		table++;
128 		val >>= 1;
129 	}
130 	return res;
131 }
132 
133 static const u32 fsflags_to_gfs2[32] = {
134 	[3] = GFS2_DIF_SYNC,
135 	[4] = GFS2_DIF_IMMUTABLE,
136 	[5] = GFS2_DIF_APPENDONLY,
137 	[7] = GFS2_DIF_NOATIME,
138 	[12] = GFS2_DIF_EXHASH,
139 	[14] = GFS2_DIF_INHERIT_JDATA,
140 	[17] = GFS2_DIF_TOPDIR,
141 };
142 
143 static const u32 gfs2_to_fsflags[32] = {
144 	[gfs2fl_Sync] = FS_SYNC_FL,
145 	[gfs2fl_Immutable] = FS_IMMUTABLE_FL,
146 	[gfs2fl_AppendOnly] = FS_APPEND_FL,
147 	[gfs2fl_NoAtime] = FS_NOATIME_FL,
148 	[gfs2fl_ExHash] = FS_INDEX_FL,
149 	[gfs2fl_TopLevel] = FS_TOPDIR_FL,
150 	[gfs2fl_InheritJdata] = FS_JOURNAL_DATA_FL,
151 };
152 
153 static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
154 {
155 	struct inode *inode = file_inode(filp);
156 	struct gfs2_inode *ip = GFS2_I(inode);
157 	struct gfs2_holder gh;
158 	int error;
159 	u32 fsflags;
160 
161 	gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
162 	error = gfs2_glock_nq(&gh);
163 	if (error)
164 		return error;
165 
166 	fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_diskflags);
167 	if (!S_ISDIR(inode->i_mode) && ip->i_diskflags & GFS2_DIF_JDATA)
168 		fsflags |= FS_JOURNAL_DATA_FL;
169 	if (put_user(fsflags, ptr))
170 		error = -EFAULT;
171 
172 	gfs2_glock_dq(&gh);
173 	gfs2_holder_uninit(&gh);
174 	return error;
175 }
176 
177 void gfs2_set_inode_flags(struct inode *inode)
178 {
179 	struct gfs2_inode *ip = GFS2_I(inode);
180 	unsigned int flags = inode->i_flags;
181 
182 	flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_NOSEC);
183 	if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode))
184 		inode->i_flags |= S_NOSEC;
185 	if (ip->i_diskflags & GFS2_DIF_IMMUTABLE)
186 		flags |= S_IMMUTABLE;
187 	if (ip->i_diskflags & GFS2_DIF_APPENDONLY)
188 		flags |= S_APPEND;
189 	if (ip->i_diskflags & GFS2_DIF_NOATIME)
190 		flags |= S_NOATIME;
191 	if (ip->i_diskflags & GFS2_DIF_SYNC)
192 		flags |= S_SYNC;
193 	inode->i_flags = flags;
194 }
195 
196 /* Flags that can be set by user space */
197 #define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA|			\
198 			     GFS2_DIF_IMMUTABLE|		\
199 			     GFS2_DIF_APPENDONLY|		\
200 			     GFS2_DIF_NOATIME|			\
201 			     GFS2_DIF_SYNC|			\
202 			     GFS2_DIF_SYSTEM|			\
203 			     GFS2_DIF_TOPDIR|			\
204 			     GFS2_DIF_INHERIT_JDATA)
205 
206 /**
207  * do_gfs2_set_flags - set flags on an inode
208  * @filp: file pointer
209  * @reqflags: The flags to set
210  * @mask: Indicates which flags are valid
211  *
212  */
213 static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
214 {
215 	struct inode *inode = file_inode(filp);
216 	struct gfs2_inode *ip = GFS2_I(inode);
217 	struct gfs2_sbd *sdp = GFS2_SB(inode);
218 	struct buffer_head *bh;
219 	struct gfs2_holder gh;
220 	int error;
221 	u32 new_flags, flags;
222 
223 	error = mnt_want_write_file(filp);
224 	if (error)
225 		return error;
226 
227 	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
228 	if (error)
229 		goto out_drop_write;
230 
231 	error = -EACCES;
232 	if (!inode_owner_or_capable(inode))
233 		goto out;
234 
235 	error = 0;
236 	flags = ip->i_diskflags;
237 	new_flags = (flags & ~mask) | (reqflags & mask);
238 	if ((new_flags ^ flags) == 0)
239 		goto out;
240 
241 	error = -EINVAL;
242 	if ((new_flags ^ flags) & ~GFS2_FLAGS_USER_SET)
243 		goto out;
244 
245 	error = -EPERM;
246 	if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE))
247 		goto out;
248 	if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY))
249 		goto out;
250 	if (((new_flags ^ flags) & GFS2_DIF_IMMUTABLE) &&
251 	    !capable(CAP_LINUX_IMMUTABLE))
252 		goto out;
253 	if (!IS_IMMUTABLE(inode)) {
254 		error = gfs2_permission(inode, MAY_WRITE);
255 		if (error)
256 			goto out;
257 	}
258 	if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
259 		if (flags & GFS2_DIF_JDATA)
260 			gfs2_log_flush(sdp, ip->i_gl, NORMAL_FLUSH);
261 		error = filemap_fdatawrite(inode->i_mapping);
262 		if (error)
263 			goto out;
264 		error = filemap_fdatawait(inode->i_mapping);
265 		if (error)
266 			goto out;
267 	}
268 	error = gfs2_trans_begin(sdp, RES_DINODE, 0);
269 	if (error)
270 		goto out;
271 	error = gfs2_meta_inode_buffer(ip, &bh);
272 	if (error)
273 		goto out_trans_end;
274 	gfs2_trans_add_meta(ip->i_gl, bh);
275 	ip->i_diskflags = new_flags;
276 	gfs2_dinode_out(ip, bh->b_data);
277 	brelse(bh);
278 	gfs2_set_inode_flags(inode);
279 	gfs2_set_aops(inode);
280 out_trans_end:
281 	gfs2_trans_end(sdp);
282 out:
283 	gfs2_glock_dq_uninit(&gh);
284 out_drop_write:
285 	mnt_drop_write_file(filp);
286 	return error;
287 }
288 
289 static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
290 {
291 	struct inode *inode = file_inode(filp);
292 	u32 fsflags, gfsflags;
293 
294 	if (get_user(fsflags, ptr))
295 		return -EFAULT;
296 
297 	gfsflags = fsflags_cvt(fsflags_to_gfs2, fsflags);
298 	if (!S_ISDIR(inode->i_mode)) {
299 		gfsflags &= ~GFS2_DIF_TOPDIR;
300 		if (gfsflags & GFS2_DIF_INHERIT_JDATA)
301 			gfsflags ^= (GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA);
302 		return do_gfs2_set_flags(filp, gfsflags, ~0);
303 	}
304 	return do_gfs2_set_flags(filp, gfsflags, ~GFS2_DIF_JDATA);
305 }
306 
307 static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
308 {
309 	switch(cmd) {
310 	case FS_IOC_GETFLAGS:
311 		return gfs2_get_flags(filp, (u32 __user *)arg);
312 	case FS_IOC_SETFLAGS:
313 		return gfs2_set_flags(filp, (u32 __user *)arg);
314 	case FITRIM:
315 		return gfs2_fitrim(filp, (void __user *)arg);
316 	}
317 	return -ENOTTY;
318 }
319 
320 /**
321  * gfs2_size_hint - Give a hint to the size of a write request
322  * @filep: The struct file
323  * @offset: The file offset of the write
324  * @size: The length of the write
325  *
326  * When we are about to do a write, this function records the total
327  * write size in order to provide a suitable hint to the lower layers
328  * about how many blocks will be required.
329  *
330  */
331 
332 static void gfs2_size_hint(struct file *filep, loff_t offset, size_t size)
333 {
334 	struct inode *inode = file_inode(filep);
335 	struct gfs2_sbd *sdp = GFS2_SB(inode);
336 	struct gfs2_inode *ip = GFS2_I(inode);
337 	size_t blks = (size + sdp->sd_sb.sb_bsize - 1) >> sdp->sd_sb.sb_bsize_shift;
338 	int hint = min_t(size_t, INT_MAX, blks);
339 
340 	if (hint > atomic_read(&ip->i_res->rs_sizehint))
341 		atomic_set(&ip->i_res->rs_sizehint, hint);
342 }
343 
344 /**
345  * gfs2_allocate_page_backing - Use bmap to allocate blocks
346  * @page: The (locked) page to allocate backing for
347  *
348  * We try to allocate all the blocks required for the page in
349  * one go. This might fail for various reasons, so we keep
350  * trying until all the blocks to back this page are allocated.
351  * If some of the blocks are already allocated, thats ok too.
352  */
353 
354 static int gfs2_allocate_page_backing(struct page *page)
355 {
356 	struct inode *inode = page->mapping->host;
357 	struct buffer_head bh;
358 	unsigned long size = PAGE_CACHE_SIZE;
359 	u64 lblock = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
360 
361 	do {
362 		bh.b_state = 0;
363 		bh.b_size = size;
364 		gfs2_block_map(inode, lblock, &bh, 1);
365 		if (!buffer_mapped(&bh))
366 			return -EIO;
367 		size -= bh.b_size;
368 		lblock += (bh.b_size >> inode->i_blkbits);
369 	} while(size > 0);
370 	return 0;
371 }
372 
373 /**
374  * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable
375  * @vma: The virtual memory area
376  * @vmf: The virtual memory fault containing the page to become writable
377  *
378  * When the page becomes writable, we need to ensure that we have
379  * blocks allocated on disk to back that page.
380  */
381 
382 static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
383 {
384 	struct page *page = vmf->page;
385 	struct inode *inode = file_inode(vma->vm_file);
386 	struct gfs2_inode *ip = GFS2_I(inode);
387 	struct gfs2_sbd *sdp = GFS2_SB(inode);
388 	struct gfs2_alloc_parms ap = { .aflags = 0, };
389 	unsigned long last_index;
390 	u64 pos = page->index << PAGE_CACHE_SHIFT;
391 	unsigned int data_blocks, ind_blocks, rblocks;
392 	struct gfs2_holder gh;
393 	loff_t size;
394 	int ret;
395 
396 	sb_start_pagefault(inode->i_sb);
397 
398 	/* Update file times before taking page lock */
399 	file_update_time(vma->vm_file);
400 
401 	ret = get_write_access(inode);
402 	if (ret)
403 		goto out;
404 
405 	ret = gfs2_rs_alloc(ip);
406 	if (ret)
407 		goto out_write_access;
408 
409 	gfs2_size_hint(vma->vm_file, pos, PAGE_CACHE_SIZE);
410 
411 	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
412 	ret = gfs2_glock_nq(&gh);
413 	if (ret)
414 		goto out_uninit;
415 
416 	set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
417 	set_bit(GIF_SW_PAGED, &ip->i_flags);
418 
419 	if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE)) {
420 		lock_page(page);
421 		if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
422 			ret = -EAGAIN;
423 			unlock_page(page);
424 		}
425 		goto out_unlock;
426 	}
427 
428 	ret = gfs2_rindex_update(sdp);
429 	if (ret)
430 		goto out_unlock;
431 
432 	ret = gfs2_quota_lock_check(ip);
433 	if (ret)
434 		goto out_unlock;
435 	gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
436 	ap.target = data_blocks + ind_blocks;
437 	ret = gfs2_inplace_reserve(ip, &ap);
438 	if (ret)
439 		goto out_quota_unlock;
440 
441 	rblocks = RES_DINODE + ind_blocks;
442 	if (gfs2_is_jdata(ip))
443 		rblocks += data_blocks ? data_blocks : 1;
444 	if (ind_blocks || data_blocks) {
445 		rblocks += RES_STATFS + RES_QUOTA;
446 		rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
447 	}
448 	ret = gfs2_trans_begin(sdp, rblocks, 0);
449 	if (ret)
450 		goto out_trans_fail;
451 
452 	lock_page(page);
453 	ret = -EINVAL;
454 	size = i_size_read(inode);
455 	last_index = (size - 1) >> PAGE_CACHE_SHIFT;
456 	/* Check page index against inode size */
457 	if (size == 0 || (page->index > last_index))
458 		goto out_trans_end;
459 
460 	ret = -EAGAIN;
461 	/* If truncated, we must retry the operation, we may have raced
462 	 * with the glock demotion code.
463 	 */
464 	if (!PageUptodate(page) || page->mapping != inode->i_mapping)
465 		goto out_trans_end;
466 
467 	/* Unstuff, if required, and allocate backing blocks for page */
468 	ret = 0;
469 	if (gfs2_is_stuffed(ip))
470 		ret = gfs2_unstuff_dinode(ip, page);
471 	if (ret == 0)
472 		ret = gfs2_allocate_page_backing(page);
473 
474 out_trans_end:
475 	if (ret)
476 		unlock_page(page);
477 	gfs2_trans_end(sdp);
478 out_trans_fail:
479 	gfs2_inplace_release(ip);
480 out_quota_unlock:
481 	gfs2_quota_unlock(ip);
482 out_unlock:
483 	gfs2_glock_dq(&gh);
484 out_uninit:
485 	gfs2_holder_uninit(&gh);
486 	if (ret == 0) {
487 		set_page_dirty(page);
488 		wait_for_stable_page(page);
489 	}
490 out_write_access:
491 	put_write_access(inode);
492 out:
493 	sb_end_pagefault(inode->i_sb);
494 	return block_page_mkwrite_return(ret);
495 }
496 
497 static const struct vm_operations_struct gfs2_vm_ops = {
498 	.fault = filemap_fault,
499 	.map_pages = filemap_map_pages,
500 	.page_mkwrite = gfs2_page_mkwrite,
501 };
502 
503 /**
504  * gfs2_mmap -
505  * @file: The file to map
506  * @vma: The VMA which described the mapping
507  *
508  * There is no need to get a lock here unless we should be updating
509  * atime. We ignore any locking errors since the only consequence is
510  * a missed atime update (which will just be deferred until later).
511  *
512  * Returns: 0
513  */
514 
515 static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
516 {
517 	struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
518 
519 	if (!(file->f_flags & O_NOATIME) &&
520 	    !IS_NOATIME(&ip->i_inode)) {
521 		struct gfs2_holder i_gh;
522 		int error;
523 
524 		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
525 					   &i_gh);
526 		if (error)
527 			return error;
528 		/* grab lock to update inode */
529 		gfs2_glock_dq_uninit(&i_gh);
530 		file_accessed(file);
531 	}
532 	vma->vm_ops = &gfs2_vm_ops;
533 
534 	return 0;
535 }
536 
537 /**
538  * gfs2_open_common - This is common to open and atomic_open
539  * @inode: The inode being opened
540  * @file: The file being opened
541  *
542  * This maybe called under a glock or not depending upon how it has
543  * been called. We must always be called under a glock for regular
544  * files, however. For other file types, it does not matter whether
545  * we hold the glock or not.
546  *
547  * Returns: Error code or 0 for success
548  */
549 
550 int gfs2_open_common(struct inode *inode, struct file *file)
551 {
552 	struct gfs2_file *fp;
553 	int ret;
554 
555 	if (S_ISREG(inode->i_mode)) {
556 		ret = generic_file_open(inode, file);
557 		if (ret)
558 			return ret;
559 	}
560 
561 	fp = kzalloc(sizeof(struct gfs2_file), GFP_NOFS);
562 	if (!fp)
563 		return -ENOMEM;
564 
565 	mutex_init(&fp->f_fl_mutex);
566 
567 	gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
568 	file->private_data = fp;
569 	return 0;
570 }
571 
572 /**
573  * gfs2_open - open a file
574  * @inode: the inode to open
575  * @file: the struct file for this opening
576  *
577  * After atomic_open, this function is only used for opening files
578  * which are already cached. We must still get the glock for regular
579  * files to ensure that we have the file size uptodate for the large
580  * file check which is in the common code. That is only an issue for
581  * regular files though.
582  *
583  * Returns: errno
584  */
585 
586 static int gfs2_open(struct inode *inode, struct file *file)
587 {
588 	struct gfs2_inode *ip = GFS2_I(inode);
589 	struct gfs2_holder i_gh;
590 	int error;
591 	bool need_unlock = false;
592 
593 	if (S_ISREG(ip->i_inode.i_mode)) {
594 		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
595 					   &i_gh);
596 		if (error)
597 			return error;
598 		need_unlock = true;
599 	}
600 
601 	error = gfs2_open_common(inode, file);
602 
603 	if (need_unlock)
604 		gfs2_glock_dq_uninit(&i_gh);
605 
606 	return error;
607 }
608 
609 /**
610  * gfs2_release - called to close a struct file
611  * @inode: the inode the struct file belongs to
612  * @file: the struct file being closed
613  *
614  * Returns: errno
615  */
616 
617 static int gfs2_release(struct inode *inode, struct file *file)
618 {
619 	struct gfs2_inode *ip = GFS2_I(inode);
620 
621 	kfree(file->private_data);
622 	file->private_data = NULL;
623 
624 	if (!(file->f_mode & FMODE_WRITE))
625 		return 0;
626 
627 	gfs2_rs_delete(ip, &inode->i_writecount);
628 	return 0;
629 }
630 
631 /**
632  * gfs2_fsync - sync the dirty data for a file (across the cluster)
633  * @file: the file that points to the dentry
634  * @start: the start position in the file to sync
635  * @end: the end position in the file to sync
636  * @datasync: set if we can ignore timestamp changes
637  *
638  * We split the data flushing here so that we don't wait for the data
639  * until after we've also sent the metadata to disk. Note that for
640  * data=ordered, we will write & wait for the data at the log flush
641  * stage anyway, so this is unlikely to make much of a difference
642  * except in the data=writeback case.
643  *
644  * If the fdatawrite fails due to any reason except -EIO, we will
645  * continue the remainder of the fsync, although we'll still report
646  * the error at the end. This is to match filemap_write_and_wait_range()
647  * behaviour.
648  *
649  * Returns: errno
650  */
651 
652 static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
653 		      int datasync)
654 {
655 	struct address_space *mapping = file->f_mapping;
656 	struct inode *inode = mapping->host;
657 	int sync_state = inode->i_state & I_DIRTY_ALL;
658 	struct gfs2_inode *ip = GFS2_I(inode);
659 	int ret = 0, ret1 = 0;
660 
661 	if (mapping->nrpages) {
662 		ret1 = filemap_fdatawrite_range(mapping, start, end);
663 		if (ret1 == -EIO)
664 			return ret1;
665 	}
666 
667 	if (!gfs2_is_jdata(ip))
668 		sync_state &= ~I_DIRTY_PAGES;
669 	if (datasync)
670 		sync_state &= ~(I_DIRTY_SYNC | I_DIRTY_TIME);
671 
672 	if (sync_state) {
673 		ret = sync_inode_metadata(inode, 1);
674 		if (ret)
675 			return ret;
676 		if (gfs2_is_jdata(ip))
677 			filemap_write_and_wait(mapping);
678 		gfs2_ail_flush(ip->i_gl, 1);
679 	}
680 
681 	if (mapping->nrpages)
682 		ret = filemap_fdatawait_range(mapping, start, end);
683 
684 	return ret ? ret : ret1;
685 }
686 
687 /**
688  * gfs2_file_write_iter - Perform a write to a file
689  * @iocb: The io context
690  * @iov: The data to write
691  * @nr_segs: Number of @iov segments
692  * @pos: The file position
693  *
694  * We have to do a lock/unlock here to refresh the inode size for
695  * O_APPEND writes, otherwise we can land up writing at the wrong
696  * offset. There is still a race, but provided the app is using its
697  * own file locking, this will make O_APPEND work as expected.
698  *
699  */
700 
701 static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
702 {
703 	struct file *file = iocb->ki_filp;
704 	struct gfs2_inode *ip = GFS2_I(file_inode(file));
705 	int ret;
706 
707 	ret = gfs2_rs_alloc(ip);
708 	if (ret)
709 		return ret;
710 
711 	gfs2_size_hint(file, iocb->ki_pos, iov_iter_count(from));
712 
713 	if (file->f_flags & O_APPEND) {
714 		struct gfs2_holder gh;
715 
716 		ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
717 		if (ret)
718 			return ret;
719 		gfs2_glock_dq_uninit(&gh);
720 	}
721 
722 	return generic_file_write_iter(iocb, from);
723 }
724 
725 static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
726 			   int mode)
727 {
728 	struct gfs2_inode *ip = GFS2_I(inode);
729 	struct buffer_head *dibh;
730 	int error;
731 	unsigned int nr_blks;
732 	sector_t lblock = offset >> inode->i_blkbits;
733 
734 	error = gfs2_meta_inode_buffer(ip, &dibh);
735 	if (unlikely(error))
736 		return error;
737 
738 	gfs2_trans_add_meta(ip->i_gl, dibh);
739 
740 	if (gfs2_is_stuffed(ip)) {
741 		error = gfs2_unstuff_dinode(ip, NULL);
742 		if (unlikely(error))
743 			goto out;
744 	}
745 
746 	while (len) {
747 		struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
748 		bh_map.b_size = len;
749 		set_buffer_zeronew(&bh_map);
750 
751 		error = gfs2_block_map(inode, lblock, &bh_map, 1);
752 		if (unlikely(error))
753 			goto out;
754 		len -= bh_map.b_size;
755 		nr_blks = bh_map.b_size >> inode->i_blkbits;
756 		lblock += nr_blks;
757 		if (!buffer_new(&bh_map))
758 			continue;
759 		if (unlikely(!buffer_zeronew(&bh_map))) {
760 			error = -EIO;
761 			goto out;
762 		}
763 	}
764 out:
765 	brelse(dibh);
766 	return error;
767 }
768 
769 static void calc_max_reserv(struct gfs2_inode *ip, loff_t max, loff_t *len,
770 			    unsigned int *data_blocks, unsigned int *ind_blocks)
771 {
772 	const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
773 	unsigned int max_blocks = ip->i_rgd->rd_free_clone;
774 	unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
775 
776 	for (tmp = max_data; tmp > sdp->sd_diptrs;) {
777 		tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
778 		max_data -= tmp;
779 	}
780 	/* This calculation isn't the exact reverse of gfs2_write_calc_reserve,
781 	   so it might end up with fewer data blocks */
782 	if (max_data <= *data_blocks)
783 		return;
784 	*data_blocks = max_data;
785 	*ind_blocks = max_blocks - max_data;
786 	*len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
787 	if (*len > max) {
788 		*len = max;
789 		gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
790 	}
791 }
792 
793 static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
794 {
795 	struct inode *inode = file_inode(file);
796 	struct gfs2_sbd *sdp = GFS2_SB(inode);
797 	struct gfs2_inode *ip = GFS2_I(inode);
798 	struct gfs2_alloc_parms ap = { .aflags = 0, };
799 	unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
800 	loff_t bytes, max_bytes;
801 	int error;
802 	const loff_t pos = offset;
803 	const loff_t count = len;
804 	loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1);
805 	loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
806 	loff_t max_chunk_size = UINT_MAX & bsize_mask;
807 
808 	next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
809 
810 	offset &= bsize_mask;
811 
812 	len = next - offset;
813 	bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
814 	if (!bytes)
815 		bytes = UINT_MAX;
816 	bytes &= bsize_mask;
817 	if (bytes == 0)
818 		bytes = sdp->sd_sb.sb_bsize;
819 
820 	gfs2_size_hint(file, offset, len);
821 
822 	while (len > 0) {
823 		if (len < bytes)
824 			bytes = len;
825 		if (!gfs2_write_alloc_required(ip, offset, bytes)) {
826 			len -= bytes;
827 			offset += bytes;
828 			continue;
829 		}
830 		error = gfs2_quota_lock_check(ip);
831 		if (error)
832 			return error;
833 retry:
834 		gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
835 
836 		ap.target = data_blocks + ind_blocks;
837 		error = gfs2_inplace_reserve(ip, &ap);
838 		if (error) {
839 			if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) {
840 				bytes >>= 1;
841 				bytes &= bsize_mask;
842 				if (bytes == 0)
843 					bytes = sdp->sd_sb.sb_bsize;
844 				goto retry;
845 			}
846 			goto out_qunlock;
847 		}
848 		max_bytes = bytes;
849 		calc_max_reserv(ip, (len > max_chunk_size)? max_chunk_size: len,
850 				&max_bytes, &data_blocks, &ind_blocks);
851 
852 		rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
853 			  RES_RG_HDR + gfs2_rg_blocks(ip, data_blocks + ind_blocks);
854 		if (gfs2_is_jdata(ip))
855 			rblocks += data_blocks ? data_blocks : 1;
856 
857 		error = gfs2_trans_begin(sdp, rblocks,
858 					 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
859 		if (error)
860 			goto out_trans_fail;
861 
862 		error = fallocate_chunk(inode, offset, max_bytes, mode);
863 		gfs2_trans_end(sdp);
864 
865 		if (error)
866 			goto out_trans_fail;
867 
868 		len -= max_bytes;
869 		offset += max_bytes;
870 		gfs2_inplace_release(ip);
871 		gfs2_quota_unlock(ip);
872 	}
873 
874 	if (!(mode & FALLOC_FL_KEEP_SIZE) && (pos + count) > inode->i_size) {
875 		i_size_write(inode, pos + count);
876 		/* Marks the inode as dirty */
877 		file_update_time(file);
878 	}
879 
880 	return generic_write_sync(file, pos, count);
881 
882 out_trans_fail:
883 	gfs2_inplace_release(ip);
884 out_qunlock:
885 	gfs2_quota_unlock(ip);
886 	return error;
887 }
888 
889 static long gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
890 {
891 	struct inode *inode = file_inode(file);
892 	struct gfs2_inode *ip = GFS2_I(inode);
893 	struct gfs2_holder gh;
894 	int ret;
895 
896 	if (mode & ~FALLOC_FL_KEEP_SIZE)
897 		return -EOPNOTSUPP;
898 
899 	mutex_lock(&inode->i_mutex);
900 
901 	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
902 	ret = gfs2_glock_nq(&gh);
903 	if (ret)
904 		goto out_uninit;
905 
906 	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
907 	    (offset + len) > inode->i_size) {
908 		ret = inode_newsize_ok(inode, offset + len);
909 		if (ret)
910 			goto out_unlock;
911 	}
912 
913 	ret = get_write_access(inode);
914 	if (ret)
915 		goto out_unlock;
916 
917 	ret = gfs2_rs_alloc(ip);
918 	if (ret)
919 		goto out_putw;
920 
921 	ret = __gfs2_fallocate(file, mode, offset, len);
922 	if (ret)
923 		gfs2_rs_deltree(ip->i_res);
924 out_putw:
925 	put_write_access(inode);
926 out_unlock:
927 	gfs2_glock_dq(&gh);
928 out_uninit:
929 	gfs2_holder_uninit(&gh);
930 	mutex_unlock(&inode->i_mutex);
931 	return ret;
932 }
933 
934 #ifdef CONFIG_GFS2_FS_LOCKING_DLM
935 
936 /**
937  * gfs2_lock - acquire/release a posix lock on a file
938  * @file: the file pointer
939  * @cmd: either modify or retrieve lock state, possibly wait
940  * @fl: type and range of lock
941  *
942  * Returns: errno
943  */
944 
945 static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
946 {
947 	struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
948 	struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
949 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
950 
951 	if (!(fl->fl_flags & FL_POSIX))
952 		return -ENOLCK;
953 	if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK)
954 		return -ENOLCK;
955 
956 	if (cmd == F_CANCELLK) {
957 		/* Hack: */
958 		cmd = F_SETLK;
959 		fl->fl_type = F_UNLCK;
960 	}
961 	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
962 		if (fl->fl_type == F_UNLCK)
963 			posix_lock_file_wait(file, fl);
964 		return -EIO;
965 	}
966 	if (IS_GETLK(cmd))
967 		return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
968 	else if (fl->fl_type == F_UNLCK)
969 		return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
970 	else
971 		return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
972 }
973 
974 static int do_flock(struct file *file, int cmd, struct file_lock *fl)
975 {
976 	struct gfs2_file *fp = file->private_data;
977 	struct gfs2_holder *fl_gh = &fp->f_fl_gh;
978 	struct gfs2_inode *ip = GFS2_I(file_inode(file));
979 	struct gfs2_glock *gl;
980 	unsigned int state;
981 	int flags;
982 	int error = 0;
983 	int sleeptime;
984 
985 	state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
986 	flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY_1CB) | GL_EXACT;
987 
988 	mutex_lock(&fp->f_fl_mutex);
989 
990 	gl = fl_gh->gh_gl;
991 	if (gl) {
992 		if (fl_gh->gh_state == state)
993 			goto out;
994 		flock_lock_file_wait(file,
995 				     &(struct file_lock){.fl_type = F_UNLCK});
996 		gfs2_glock_dq(fl_gh);
997 		gfs2_holder_reinit(state, flags, fl_gh);
998 	} else {
999 		error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
1000 				       &gfs2_flock_glops, CREATE, &gl);
1001 		if (error)
1002 			goto out;
1003 		gfs2_holder_init(gl, state, flags, fl_gh);
1004 		gfs2_glock_put(gl);
1005 	}
1006 	for (sleeptime = 1; sleeptime <= 4; sleeptime <<= 1) {
1007 		error = gfs2_glock_nq(fl_gh);
1008 		if (error != GLR_TRYFAILED)
1009 			break;
1010 		fl_gh->gh_flags = LM_FLAG_TRY | GL_EXACT;
1011 		fl_gh->gh_error = 0;
1012 		msleep(sleeptime);
1013 	}
1014 	if (error) {
1015 		gfs2_holder_uninit(fl_gh);
1016 		if (error == GLR_TRYFAILED)
1017 			error = -EAGAIN;
1018 	} else {
1019 		error = flock_lock_file_wait(file, fl);
1020 		gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
1021 	}
1022 
1023 out:
1024 	mutex_unlock(&fp->f_fl_mutex);
1025 	return error;
1026 }
1027 
1028 static void do_unflock(struct file *file, struct file_lock *fl)
1029 {
1030 	struct gfs2_file *fp = file->private_data;
1031 	struct gfs2_holder *fl_gh = &fp->f_fl_gh;
1032 
1033 	mutex_lock(&fp->f_fl_mutex);
1034 	flock_lock_file_wait(file, fl);
1035 	if (fl_gh->gh_gl) {
1036 		gfs2_glock_dq(fl_gh);
1037 		gfs2_holder_uninit(fl_gh);
1038 	}
1039 	mutex_unlock(&fp->f_fl_mutex);
1040 }
1041 
1042 /**
1043  * gfs2_flock - acquire/release a flock lock on a file
1044  * @file: the file pointer
1045  * @cmd: either modify or retrieve lock state, possibly wait
1046  * @fl: type and range of lock
1047  *
1048  * Returns: errno
1049  */
1050 
1051 static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
1052 {
1053 	if (!(fl->fl_flags & FL_FLOCK))
1054 		return -ENOLCK;
1055 	if (fl->fl_type & LOCK_MAND)
1056 		return -EOPNOTSUPP;
1057 
1058 	if (fl->fl_type == F_UNLCK) {
1059 		do_unflock(file, fl);
1060 		return 0;
1061 	} else {
1062 		return do_flock(file, cmd, fl);
1063 	}
1064 }
1065 
1066 const struct file_operations gfs2_file_fops = {
1067 	.llseek		= gfs2_llseek,
1068 	.read		= new_sync_read,
1069 	.read_iter	= generic_file_read_iter,
1070 	.write		= new_sync_write,
1071 	.write_iter	= gfs2_file_write_iter,
1072 	.unlocked_ioctl	= gfs2_ioctl,
1073 	.mmap		= gfs2_mmap,
1074 	.open		= gfs2_open,
1075 	.release	= gfs2_release,
1076 	.fsync		= gfs2_fsync,
1077 	.lock		= gfs2_lock,
1078 	.flock		= gfs2_flock,
1079 	.splice_read	= generic_file_splice_read,
1080 	.splice_write	= iter_file_splice_write,
1081 	.setlease	= simple_nosetlease,
1082 	.fallocate	= gfs2_fallocate,
1083 };
1084 
1085 const struct file_operations gfs2_dir_fops = {
1086 	.iterate	= gfs2_readdir,
1087 	.unlocked_ioctl	= gfs2_ioctl,
1088 	.open		= gfs2_open,
1089 	.release	= gfs2_release,
1090 	.fsync		= gfs2_fsync,
1091 	.lock		= gfs2_lock,
1092 	.flock		= gfs2_flock,
1093 	.llseek		= default_llseek,
1094 };
1095 
1096 #endif /* CONFIG_GFS2_FS_LOCKING_DLM */
1097 
1098 const struct file_operations gfs2_file_fops_nolock = {
1099 	.llseek		= gfs2_llseek,
1100 	.read		= new_sync_read,
1101 	.read_iter	= generic_file_read_iter,
1102 	.write		= new_sync_write,
1103 	.write_iter	= gfs2_file_write_iter,
1104 	.unlocked_ioctl	= gfs2_ioctl,
1105 	.mmap		= gfs2_mmap,
1106 	.open		= gfs2_open,
1107 	.release	= gfs2_release,
1108 	.fsync		= gfs2_fsync,
1109 	.splice_read	= generic_file_splice_read,
1110 	.splice_write	= iter_file_splice_write,
1111 	.setlease	= generic_setlease,
1112 	.fallocate	= gfs2_fallocate,
1113 };
1114 
1115 const struct file_operations gfs2_dir_fops_nolock = {
1116 	.iterate	= gfs2_readdir,
1117 	.unlocked_ioctl	= gfs2_ioctl,
1118 	.open		= gfs2_open,
1119 	.release	= gfs2_release,
1120 	.fsync		= gfs2_fsync,
1121 	.llseek		= default_llseek,
1122 };
1123 
1124