xref: /openbmc/linux/fs/gfs2/file.c (revision afb46f79)
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9 
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/completion.h>
13 #include <linux/buffer_head.h>
14 #include <linux/pagemap.h>
15 #include <linux/uio.h>
16 #include <linux/blkdev.h>
17 #include <linux/mm.h>
18 #include <linux/mount.h>
19 #include <linux/fs.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <linux/falloc.h>
22 #include <linux/swap.h>
23 #include <linux/crc32.h>
24 #include <linux/writeback.h>
25 #include <asm/uaccess.h>
26 #include <linux/dlm.h>
27 #include <linux/dlm_plock.h>
28 #include <linux/aio.h>
29 
30 #include "gfs2.h"
31 #include "incore.h"
32 #include "bmap.h"
33 #include "dir.h"
34 #include "glock.h"
35 #include "glops.h"
36 #include "inode.h"
37 #include "log.h"
38 #include "meta_io.h"
39 #include "quota.h"
40 #include "rgrp.h"
41 #include "trans.h"
42 #include "util.h"
43 
44 /**
45  * gfs2_llseek - seek to a location in a file
46  * @file: the file
47  * @offset: the offset
48  * @whence: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
49  *
50  * SEEK_END requires the glock for the file because it references the
51  * file's size.
52  *
53  * Returns: The new offset, or errno
54  */
55 
56 static loff_t gfs2_llseek(struct file *file, loff_t offset, int whence)
57 {
58 	struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
59 	struct gfs2_holder i_gh;
60 	loff_t error;
61 
62 	switch (whence) {
63 	case SEEK_END: /* These reference inode->i_size */
64 	case SEEK_DATA:
65 	case SEEK_HOLE:
66 		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
67 					   &i_gh);
68 		if (!error) {
69 			error = generic_file_llseek(file, offset, whence);
70 			gfs2_glock_dq_uninit(&i_gh);
71 		}
72 		break;
73 	case SEEK_CUR:
74 	case SEEK_SET:
75 		error = generic_file_llseek(file, offset, whence);
76 		break;
77 	default:
78 		error = -EINVAL;
79 	}
80 
81 	return error;
82 }
83 
84 /**
85  * gfs2_readdir - Iterator for a directory
86  * @file: The directory to read from
87  * @ctx: What to feed directory entries to
88  *
89  * Returns: errno
90  */
91 
92 static int gfs2_readdir(struct file *file, struct dir_context *ctx)
93 {
94 	struct inode *dir = file->f_mapping->host;
95 	struct gfs2_inode *dip = GFS2_I(dir);
96 	struct gfs2_holder d_gh;
97 	int error;
98 
99 	error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
100 	if (error)
101 		return error;
102 
103 	error = gfs2_dir_read(dir, ctx, &file->f_ra);
104 
105 	gfs2_glock_dq_uninit(&d_gh);
106 
107 	return error;
108 }
109 
110 /**
111  * fsflags_cvt
112  * @table: A table of 32 u32 flags
113  * @val: a 32 bit value to convert
114  *
115  * This function can be used to convert between fsflags values and
116  * GFS2's own flags values.
117  *
118  * Returns: the converted flags
119  */
120 static u32 fsflags_cvt(const u32 *table, u32 val)
121 {
122 	u32 res = 0;
123 	while(val) {
124 		if (val & 1)
125 			res |= *table;
126 		table++;
127 		val >>= 1;
128 	}
129 	return res;
130 }
131 
132 static const u32 fsflags_to_gfs2[32] = {
133 	[3] = GFS2_DIF_SYNC,
134 	[4] = GFS2_DIF_IMMUTABLE,
135 	[5] = GFS2_DIF_APPENDONLY,
136 	[7] = GFS2_DIF_NOATIME,
137 	[12] = GFS2_DIF_EXHASH,
138 	[14] = GFS2_DIF_INHERIT_JDATA,
139 	[17] = GFS2_DIF_TOPDIR,
140 };
141 
142 static const u32 gfs2_to_fsflags[32] = {
143 	[gfs2fl_Sync] = FS_SYNC_FL,
144 	[gfs2fl_Immutable] = FS_IMMUTABLE_FL,
145 	[gfs2fl_AppendOnly] = FS_APPEND_FL,
146 	[gfs2fl_NoAtime] = FS_NOATIME_FL,
147 	[gfs2fl_ExHash] = FS_INDEX_FL,
148 	[gfs2fl_TopLevel] = FS_TOPDIR_FL,
149 	[gfs2fl_InheritJdata] = FS_JOURNAL_DATA_FL,
150 };
151 
152 static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
153 {
154 	struct inode *inode = file_inode(filp);
155 	struct gfs2_inode *ip = GFS2_I(inode);
156 	struct gfs2_holder gh;
157 	int error;
158 	u32 fsflags;
159 
160 	gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
161 	error = gfs2_glock_nq(&gh);
162 	if (error)
163 		return error;
164 
165 	fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_diskflags);
166 	if (!S_ISDIR(inode->i_mode) && ip->i_diskflags & GFS2_DIF_JDATA)
167 		fsflags |= FS_JOURNAL_DATA_FL;
168 	if (put_user(fsflags, ptr))
169 		error = -EFAULT;
170 
171 	gfs2_glock_dq(&gh);
172 	gfs2_holder_uninit(&gh);
173 	return error;
174 }
175 
176 void gfs2_set_inode_flags(struct inode *inode)
177 {
178 	struct gfs2_inode *ip = GFS2_I(inode);
179 	unsigned int flags = inode->i_flags;
180 
181 	flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_NOSEC);
182 	if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode))
183 		inode->i_flags |= S_NOSEC;
184 	if (ip->i_diskflags & GFS2_DIF_IMMUTABLE)
185 		flags |= S_IMMUTABLE;
186 	if (ip->i_diskflags & GFS2_DIF_APPENDONLY)
187 		flags |= S_APPEND;
188 	if (ip->i_diskflags & GFS2_DIF_NOATIME)
189 		flags |= S_NOATIME;
190 	if (ip->i_diskflags & GFS2_DIF_SYNC)
191 		flags |= S_SYNC;
192 	inode->i_flags = flags;
193 }
194 
195 /* Flags that can be set by user space */
196 #define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA|			\
197 			     GFS2_DIF_IMMUTABLE|		\
198 			     GFS2_DIF_APPENDONLY|		\
199 			     GFS2_DIF_NOATIME|			\
200 			     GFS2_DIF_SYNC|			\
201 			     GFS2_DIF_SYSTEM|			\
202 			     GFS2_DIF_TOPDIR|			\
203 			     GFS2_DIF_INHERIT_JDATA)
204 
205 /**
206  * gfs2_set_flags - set flags on an inode
207  * @inode: The inode
208  * @flags: The flags to set
209  * @mask: Indicates which flags are valid
210  *
211  */
212 static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
213 {
214 	struct inode *inode = file_inode(filp);
215 	struct gfs2_inode *ip = GFS2_I(inode);
216 	struct gfs2_sbd *sdp = GFS2_SB(inode);
217 	struct buffer_head *bh;
218 	struct gfs2_holder gh;
219 	int error;
220 	u32 new_flags, flags;
221 
222 	error = mnt_want_write_file(filp);
223 	if (error)
224 		return error;
225 
226 	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
227 	if (error)
228 		goto out_drop_write;
229 
230 	error = -EACCES;
231 	if (!inode_owner_or_capable(inode))
232 		goto out;
233 
234 	error = 0;
235 	flags = ip->i_diskflags;
236 	new_flags = (flags & ~mask) | (reqflags & mask);
237 	if ((new_flags ^ flags) == 0)
238 		goto out;
239 
240 	error = -EINVAL;
241 	if ((new_flags ^ flags) & ~GFS2_FLAGS_USER_SET)
242 		goto out;
243 
244 	error = -EPERM;
245 	if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE))
246 		goto out;
247 	if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY))
248 		goto out;
249 	if (((new_flags ^ flags) & GFS2_DIF_IMMUTABLE) &&
250 	    !capable(CAP_LINUX_IMMUTABLE))
251 		goto out;
252 	if (!IS_IMMUTABLE(inode)) {
253 		error = gfs2_permission(inode, MAY_WRITE);
254 		if (error)
255 			goto out;
256 	}
257 	if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
258 		if (flags & GFS2_DIF_JDATA)
259 			gfs2_log_flush(sdp, ip->i_gl);
260 		error = filemap_fdatawrite(inode->i_mapping);
261 		if (error)
262 			goto out;
263 		error = filemap_fdatawait(inode->i_mapping);
264 		if (error)
265 			goto out;
266 	}
267 	error = gfs2_trans_begin(sdp, RES_DINODE, 0);
268 	if (error)
269 		goto out;
270 	error = gfs2_meta_inode_buffer(ip, &bh);
271 	if (error)
272 		goto out_trans_end;
273 	gfs2_trans_add_meta(ip->i_gl, bh);
274 	ip->i_diskflags = new_flags;
275 	gfs2_dinode_out(ip, bh->b_data);
276 	brelse(bh);
277 	gfs2_set_inode_flags(inode);
278 	gfs2_set_aops(inode);
279 out_trans_end:
280 	gfs2_trans_end(sdp);
281 out:
282 	gfs2_glock_dq_uninit(&gh);
283 out_drop_write:
284 	mnt_drop_write_file(filp);
285 	return error;
286 }
287 
288 static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
289 {
290 	struct inode *inode = file_inode(filp);
291 	u32 fsflags, gfsflags;
292 
293 	if (get_user(fsflags, ptr))
294 		return -EFAULT;
295 
296 	gfsflags = fsflags_cvt(fsflags_to_gfs2, fsflags);
297 	if (!S_ISDIR(inode->i_mode)) {
298 		gfsflags &= ~GFS2_DIF_TOPDIR;
299 		if (gfsflags & GFS2_DIF_INHERIT_JDATA)
300 			gfsflags ^= (GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA);
301 		return do_gfs2_set_flags(filp, gfsflags, ~0);
302 	}
303 	return do_gfs2_set_flags(filp, gfsflags, ~GFS2_DIF_JDATA);
304 }
305 
306 static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
307 {
308 	switch(cmd) {
309 	case FS_IOC_GETFLAGS:
310 		return gfs2_get_flags(filp, (u32 __user *)arg);
311 	case FS_IOC_SETFLAGS:
312 		return gfs2_set_flags(filp, (u32 __user *)arg);
313 	case FITRIM:
314 		return gfs2_fitrim(filp, (void __user *)arg);
315 	}
316 	return -ENOTTY;
317 }
318 
319 /**
320  * gfs2_size_hint - Give a hint to the size of a write request
321  * @file: The struct file
322  * @offset: The file offset of the write
323  * @size: The length of the write
324  *
325  * When we are about to do a write, this function records the total
326  * write size in order to provide a suitable hint to the lower layers
327  * about how many blocks will be required.
328  *
329  */
330 
331 static void gfs2_size_hint(struct file *filep, loff_t offset, size_t size)
332 {
333 	struct inode *inode = file_inode(filep);
334 	struct gfs2_sbd *sdp = GFS2_SB(inode);
335 	struct gfs2_inode *ip = GFS2_I(inode);
336 	size_t blks = (size + sdp->sd_sb.sb_bsize - 1) >> sdp->sd_sb.sb_bsize_shift;
337 	int hint = min_t(size_t, INT_MAX, blks);
338 
339 	atomic_set(&ip->i_res->rs_sizehint, hint);
340 }
341 
342 /**
343  * gfs2_allocate_page_backing - Use bmap to allocate blocks
344  * @page: The (locked) page to allocate backing for
345  *
346  * We try to allocate all the blocks required for the page in
347  * one go. This might fail for various reasons, so we keep
348  * trying until all the blocks to back this page are allocated.
349  * If some of the blocks are already allocated, thats ok too.
350  */
351 
352 static int gfs2_allocate_page_backing(struct page *page)
353 {
354 	struct inode *inode = page->mapping->host;
355 	struct buffer_head bh;
356 	unsigned long size = PAGE_CACHE_SIZE;
357 	u64 lblock = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
358 
359 	do {
360 		bh.b_state = 0;
361 		bh.b_size = size;
362 		gfs2_block_map(inode, lblock, &bh, 1);
363 		if (!buffer_mapped(&bh))
364 			return -EIO;
365 		size -= bh.b_size;
366 		lblock += (bh.b_size >> inode->i_blkbits);
367 	} while(size > 0);
368 	return 0;
369 }
370 
371 /**
372  * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable
373  * @vma: The virtual memory area
374  * @page: The page which is about to become writable
375  *
376  * When the page becomes writable, we need to ensure that we have
377  * blocks allocated on disk to back that page.
378  */
379 
380 static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
381 {
382 	struct page *page = vmf->page;
383 	struct inode *inode = file_inode(vma->vm_file);
384 	struct gfs2_inode *ip = GFS2_I(inode);
385 	struct gfs2_sbd *sdp = GFS2_SB(inode);
386 	struct gfs2_alloc_parms ap = { .aflags = 0, };
387 	unsigned long last_index;
388 	u64 pos = page->index << PAGE_CACHE_SHIFT;
389 	unsigned int data_blocks, ind_blocks, rblocks;
390 	struct gfs2_holder gh;
391 	loff_t size;
392 	int ret;
393 
394 	sb_start_pagefault(inode->i_sb);
395 
396 	/* Update file times before taking page lock */
397 	file_update_time(vma->vm_file);
398 
399 	ret = get_write_access(inode);
400 	if (ret)
401 		goto out;
402 
403 	ret = gfs2_rs_alloc(ip);
404 	if (ret)
405 		goto out_write_access;
406 
407 	gfs2_size_hint(vma->vm_file, pos, PAGE_CACHE_SIZE);
408 
409 	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
410 	ret = gfs2_glock_nq(&gh);
411 	if (ret)
412 		goto out_uninit;
413 
414 	set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
415 	set_bit(GIF_SW_PAGED, &ip->i_flags);
416 
417 	if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE)) {
418 		lock_page(page);
419 		if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
420 			ret = -EAGAIN;
421 			unlock_page(page);
422 		}
423 		goto out_unlock;
424 	}
425 
426 	ret = gfs2_rindex_update(sdp);
427 	if (ret)
428 		goto out_unlock;
429 
430 	ret = gfs2_quota_lock_check(ip);
431 	if (ret)
432 		goto out_unlock;
433 	gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
434 	ap.target = data_blocks + ind_blocks;
435 	ret = gfs2_inplace_reserve(ip, &ap);
436 	if (ret)
437 		goto out_quota_unlock;
438 
439 	rblocks = RES_DINODE + ind_blocks;
440 	if (gfs2_is_jdata(ip))
441 		rblocks += data_blocks ? data_blocks : 1;
442 	if (ind_blocks || data_blocks) {
443 		rblocks += RES_STATFS + RES_QUOTA;
444 		rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
445 	}
446 	ret = gfs2_trans_begin(sdp, rblocks, 0);
447 	if (ret)
448 		goto out_trans_fail;
449 
450 	lock_page(page);
451 	ret = -EINVAL;
452 	size = i_size_read(inode);
453 	last_index = (size - 1) >> PAGE_CACHE_SHIFT;
454 	/* Check page index against inode size */
455 	if (size == 0 || (page->index > last_index))
456 		goto out_trans_end;
457 
458 	ret = -EAGAIN;
459 	/* If truncated, we must retry the operation, we may have raced
460 	 * with the glock demotion code.
461 	 */
462 	if (!PageUptodate(page) || page->mapping != inode->i_mapping)
463 		goto out_trans_end;
464 
465 	/* Unstuff, if required, and allocate backing blocks for page */
466 	ret = 0;
467 	if (gfs2_is_stuffed(ip))
468 		ret = gfs2_unstuff_dinode(ip, page);
469 	if (ret == 0)
470 		ret = gfs2_allocate_page_backing(page);
471 
472 out_trans_end:
473 	if (ret)
474 		unlock_page(page);
475 	gfs2_trans_end(sdp);
476 out_trans_fail:
477 	gfs2_inplace_release(ip);
478 out_quota_unlock:
479 	gfs2_quota_unlock(ip);
480 out_unlock:
481 	gfs2_glock_dq(&gh);
482 out_uninit:
483 	gfs2_holder_uninit(&gh);
484 	if (ret == 0) {
485 		set_page_dirty(page);
486 		wait_for_stable_page(page);
487 	}
488 out_write_access:
489 	put_write_access(inode);
490 out:
491 	sb_end_pagefault(inode->i_sb);
492 	return block_page_mkwrite_return(ret);
493 }
494 
495 static const struct vm_operations_struct gfs2_vm_ops = {
496 	.fault = filemap_fault,
497 	.map_pages = filemap_map_pages,
498 	.page_mkwrite = gfs2_page_mkwrite,
499 	.remap_pages = generic_file_remap_pages,
500 };
501 
502 /**
503  * gfs2_mmap -
504  * @file: The file to map
505  * @vma: The VMA which described the mapping
506  *
507  * There is no need to get a lock here unless we should be updating
508  * atime. We ignore any locking errors since the only consequence is
509  * a missed atime update (which will just be deferred until later).
510  *
511  * Returns: 0
512  */
513 
514 static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
515 {
516 	struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
517 
518 	if (!(file->f_flags & O_NOATIME) &&
519 	    !IS_NOATIME(&ip->i_inode)) {
520 		struct gfs2_holder i_gh;
521 		int error;
522 
523 		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
524 					   &i_gh);
525 		if (error)
526 			return error;
527 		/* grab lock to update inode */
528 		gfs2_glock_dq_uninit(&i_gh);
529 		file_accessed(file);
530 	}
531 	vma->vm_ops = &gfs2_vm_ops;
532 
533 	return 0;
534 }
535 
536 /**
537  * gfs2_open_common - This is common to open and atomic_open
538  * @inode: The inode being opened
539  * @file: The file being opened
540  *
541  * This maybe called under a glock or not depending upon how it has
542  * been called. We must always be called under a glock for regular
543  * files, however. For other file types, it does not matter whether
544  * we hold the glock or not.
545  *
546  * Returns: Error code or 0 for success
547  */
548 
549 int gfs2_open_common(struct inode *inode, struct file *file)
550 {
551 	struct gfs2_file *fp;
552 	int ret;
553 
554 	if (S_ISREG(inode->i_mode)) {
555 		ret = generic_file_open(inode, file);
556 		if (ret)
557 			return ret;
558 	}
559 
560 	fp = kzalloc(sizeof(struct gfs2_file), GFP_NOFS);
561 	if (!fp)
562 		return -ENOMEM;
563 
564 	mutex_init(&fp->f_fl_mutex);
565 
566 	gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
567 	file->private_data = fp;
568 	return 0;
569 }
570 
571 /**
572  * gfs2_open - open a file
573  * @inode: the inode to open
574  * @file: the struct file for this opening
575  *
576  * After atomic_open, this function is only used for opening files
577  * which are already cached. We must still get the glock for regular
578  * files to ensure that we have the file size uptodate for the large
579  * file check which is in the common code. That is only an issue for
580  * regular files though.
581  *
582  * Returns: errno
583  */
584 
585 static int gfs2_open(struct inode *inode, struct file *file)
586 {
587 	struct gfs2_inode *ip = GFS2_I(inode);
588 	struct gfs2_holder i_gh;
589 	int error;
590 	bool need_unlock = false;
591 
592 	if (S_ISREG(ip->i_inode.i_mode)) {
593 		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
594 					   &i_gh);
595 		if (error)
596 			return error;
597 		need_unlock = true;
598 	}
599 
600 	error = gfs2_open_common(inode, file);
601 
602 	if (need_unlock)
603 		gfs2_glock_dq_uninit(&i_gh);
604 
605 	return error;
606 }
607 
608 /**
609  * gfs2_release - called to close a struct file
610  * @inode: the inode the struct file belongs to
611  * @file: the struct file being closed
612  *
613  * Returns: errno
614  */
615 
616 static int gfs2_release(struct inode *inode, struct file *file)
617 {
618 	struct gfs2_inode *ip = GFS2_I(inode);
619 
620 	kfree(file->private_data);
621 	file->private_data = NULL;
622 
623 	if (!(file->f_mode & FMODE_WRITE))
624 		return 0;
625 
626 	gfs2_rs_delete(ip, &inode->i_writecount);
627 	return 0;
628 }
629 
630 /**
631  * gfs2_fsync - sync the dirty data for a file (across the cluster)
632  * @file: the file that points to the dentry
633  * @start: the start position in the file to sync
634  * @end: the end position in the file to sync
635  * @datasync: set if we can ignore timestamp changes
636  *
637  * We split the data flushing here so that we don't wait for the data
638  * until after we've also sent the metadata to disk. Note that for
639  * data=ordered, we will write & wait for the data at the log flush
640  * stage anyway, so this is unlikely to make much of a difference
641  * except in the data=writeback case.
642  *
643  * If the fdatawrite fails due to any reason except -EIO, we will
644  * continue the remainder of the fsync, although we'll still report
645  * the error at the end. This is to match filemap_write_and_wait_range()
646  * behaviour.
647  *
648  * Returns: errno
649  */
650 
651 static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
652 		      int datasync)
653 {
654 	struct address_space *mapping = file->f_mapping;
655 	struct inode *inode = mapping->host;
656 	int sync_state = inode->i_state & I_DIRTY;
657 	struct gfs2_inode *ip = GFS2_I(inode);
658 	int ret = 0, ret1 = 0;
659 
660 	if (mapping->nrpages) {
661 		ret1 = filemap_fdatawrite_range(mapping, start, end);
662 		if (ret1 == -EIO)
663 			return ret1;
664 	}
665 
666 	if (!gfs2_is_jdata(ip))
667 		sync_state &= ~I_DIRTY_PAGES;
668 	if (datasync)
669 		sync_state &= ~I_DIRTY_SYNC;
670 
671 	if (sync_state) {
672 		ret = sync_inode_metadata(inode, 1);
673 		if (ret)
674 			return ret;
675 		if (gfs2_is_jdata(ip))
676 			filemap_write_and_wait(mapping);
677 		gfs2_ail_flush(ip->i_gl, 1);
678 	}
679 
680 	if (mapping->nrpages)
681 		ret = filemap_fdatawait_range(mapping, start, end);
682 
683 	return ret ? ret : ret1;
684 }
685 
686 /**
687  * gfs2_file_aio_write - Perform a write to a file
688  * @iocb: The io context
689  * @iov: The data to write
690  * @nr_segs: Number of @iov segments
691  * @pos: The file position
692  *
693  * We have to do a lock/unlock here to refresh the inode size for
694  * O_APPEND writes, otherwise we can land up writing at the wrong
695  * offset. There is still a race, but provided the app is using its
696  * own file locking, this will make O_APPEND work as expected.
697  *
698  */
699 
700 static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
701 				   unsigned long nr_segs, loff_t pos)
702 {
703 	struct file *file = iocb->ki_filp;
704 	size_t writesize = iov_length(iov, nr_segs);
705 	struct gfs2_inode *ip = GFS2_I(file_inode(file));
706 	int ret;
707 
708 	ret = gfs2_rs_alloc(ip);
709 	if (ret)
710 		return ret;
711 
712 	gfs2_size_hint(file, pos, writesize);
713 
714 	if (file->f_flags & O_APPEND) {
715 		struct gfs2_holder gh;
716 
717 		ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
718 		if (ret)
719 			return ret;
720 		gfs2_glock_dq_uninit(&gh);
721 	}
722 
723 	return generic_file_aio_write(iocb, iov, nr_segs, pos);
724 }
725 
726 static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
727 			   int mode)
728 {
729 	struct gfs2_inode *ip = GFS2_I(inode);
730 	struct buffer_head *dibh;
731 	int error;
732 	loff_t size = len;
733 	unsigned int nr_blks;
734 	sector_t lblock = offset >> inode->i_blkbits;
735 
736 	error = gfs2_meta_inode_buffer(ip, &dibh);
737 	if (unlikely(error))
738 		return error;
739 
740 	gfs2_trans_add_meta(ip->i_gl, dibh);
741 
742 	if (gfs2_is_stuffed(ip)) {
743 		error = gfs2_unstuff_dinode(ip, NULL);
744 		if (unlikely(error))
745 			goto out;
746 	}
747 
748 	while (len) {
749 		struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
750 		bh_map.b_size = len;
751 		set_buffer_zeronew(&bh_map);
752 
753 		error = gfs2_block_map(inode, lblock, &bh_map, 1);
754 		if (unlikely(error))
755 			goto out;
756 		len -= bh_map.b_size;
757 		nr_blks = bh_map.b_size >> inode->i_blkbits;
758 		lblock += nr_blks;
759 		if (!buffer_new(&bh_map))
760 			continue;
761 		if (unlikely(!buffer_zeronew(&bh_map))) {
762 			error = -EIO;
763 			goto out;
764 		}
765 	}
766 	if (offset + size > inode->i_size && !(mode & FALLOC_FL_KEEP_SIZE))
767 		i_size_write(inode, offset + size);
768 
769 	mark_inode_dirty(inode);
770 
771 out:
772 	brelse(dibh);
773 	return error;
774 }
775 
776 static void calc_max_reserv(struct gfs2_inode *ip, loff_t max, loff_t *len,
777 			    unsigned int *data_blocks, unsigned int *ind_blocks)
778 {
779 	const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
780 	unsigned int max_blocks = ip->i_rgd->rd_free_clone;
781 	unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
782 
783 	for (tmp = max_data; tmp > sdp->sd_diptrs;) {
784 		tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
785 		max_data -= tmp;
786 	}
787 	/* This calculation isn't the exact reverse of gfs2_write_calc_reserve,
788 	   so it might end up with fewer data blocks */
789 	if (max_data <= *data_blocks)
790 		return;
791 	*data_blocks = max_data;
792 	*ind_blocks = max_blocks - max_data;
793 	*len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
794 	if (*len > max) {
795 		*len = max;
796 		gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
797 	}
798 }
799 
800 static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
801 			   loff_t len)
802 {
803 	struct inode *inode = file_inode(file);
804 	struct gfs2_sbd *sdp = GFS2_SB(inode);
805 	struct gfs2_inode *ip = GFS2_I(inode);
806 	struct gfs2_alloc_parms ap = { .aflags = 0, };
807 	unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
808 	loff_t bytes, max_bytes;
809 	int error;
810 	const loff_t pos = offset;
811 	const loff_t count = len;
812 	loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1);
813 	loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
814 	loff_t max_chunk_size = UINT_MAX & bsize_mask;
815 	struct gfs2_holder gh;
816 
817 	next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
818 
819 	/* We only support the FALLOC_FL_KEEP_SIZE mode */
820 	if (mode & ~FALLOC_FL_KEEP_SIZE)
821 		return -EOPNOTSUPP;
822 
823 	offset &= bsize_mask;
824 
825 	len = next - offset;
826 	bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
827 	if (!bytes)
828 		bytes = UINT_MAX;
829 	bytes &= bsize_mask;
830 	if (bytes == 0)
831 		bytes = sdp->sd_sb.sb_bsize;
832 
833 	error = gfs2_rs_alloc(ip);
834 	if (error)
835 		return error;
836 
837 	mutex_lock(&inode->i_mutex);
838 
839 	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
840 	error = gfs2_glock_nq(&gh);
841 	if (unlikely(error))
842 		goto out_uninit;
843 
844 	gfs2_size_hint(file, offset, len);
845 
846 	while (len > 0) {
847 		if (len < bytes)
848 			bytes = len;
849 		if (!gfs2_write_alloc_required(ip, offset, bytes)) {
850 			len -= bytes;
851 			offset += bytes;
852 			continue;
853 		}
854 		error = gfs2_quota_lock_check(ip);
855 		if (error)
856 			goto out_unlock;
857 
858 retry:
859 		gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
860 
861 		ap.target = data_blocks + ind_blocks;
862 		error = gfs2_inplace_reserve(ip, &ap);
863 		if (error) {
864 			if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) {
865 				bytes >>= 1;
866 				bytes &= bsize_mask;
867 				if (bytes == 0)
868 					bytes = sdp->sd_sb.sb_bsize;
869 				goto retry;
870 			}
871 			goto out_qunlock;
872 		}
873 		max_bytes = bytes;
874 		calc_max_reserv(ip, (len > max_chunk_size)? max_chunk_size: len,
875 				&max_bytes, &data_blocks, &ind_blocks);
876 
877 		rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
878 			  RES_RG_HDR + gfs2_rg_blocks(ip, data_blocks + ind_blocks);
879 		if (gfs2_is_jdata(ip))
880 			rblocks += data_blocks ? data_blocks : 1;
881 
882 		error = gfs2_trans_begin(sdp, rblocks,
883 					 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
884 		if (error)
885 			goto out_trans_fail;
886 
887 		error = fallocate_chunk(inode, offset, max_bytes, mode);
888 		gfs2_trans_end(sdp);
889 
890 		if (error)
891 			goto out_trans_fail;
892 
893 		len -= max_bytes;
894 		offset += max_bytes;
895 		gfs2_inplace_release(ip);
896 		gfs2_quota_unlock(ip);
897 	}
898 
899 	if (error == 0)
900 		error = generic_write_sync(file, pos, count);
901 	goto out_unlock;
902 
903 out_trans_fail:
904 	gfs2_inplace_release(ip);
905 out_qunlock:
906 	gfs2_quota_unlock(ip);
907 out_unlock:
908 	gfs2_glock_dq(&gh);
909 out_uninit:
910 	gfs2_holder_uninit(&gh);
911 	mutex_unlock(&inode->i_mutex);
912 	return error;
913 }
914 
915 #ifdef CONFIG_GFS2_FS_LOCKING_DLM
916 
917 /**
918  * gfs2_setlease - acquire/release a file lease
919  * @file: the file pointer
920  * @arg: lease type
921  * @fl: file lock
922  *
923  * We don't currently have a way to enforce a lease across the whole
924  * cluster; until we do, disable leases (by just returning -EINVAL),
925  * unless the administrator has requested purely local locking.
926  *
927  * Locking: called under i_lock
928  *
929  * Returns: errno
930  */
931 
932 static int gfs2_setlease(struct file *file, long arg, struct file_lock **fl)
933 {
934 	return -EINVAL;
935 }
936 
937 /**
938  * gfs2_lock - acquire/release a posix lock on a file
939  * @file: the file pointer
940  * @cmd: either modify or retrieve lock state, possibly wait
941  * @fl: type and range of lock
942  *
943  * Returns: errno
944  */
945 
946 static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
947 {
948 	struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
949 	struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
950 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
951 
952 	if (!(fl->fl_flags & FL_POSIX))
953 		return -ENOLCK;
954 	if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK)
955 		return -ENOLCK;
956 
957 	if (cmd == F_CANCELLK) {
958 		/* Hack: */
959 		cmd = F_SETLK;
960 		fl->fl_type = F_UNLCK;
961 	}
962 	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
963 		if (fl->fl_type == F_UNLCK)
964 			posix_lock_file_wait(file, fl);
965 		return -EIO;
966 	}
967 	if (IS_GETLK(cmd))
968 		return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
969 	else if (fl->fl_type == F_UNLCK)
970 		return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
971 	else
972 		return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
973 }
974 
975 static int do_flock(struct file *file, int cmd, struct file_lock *fl)
976 {
977 	struct gfs2_file *fp = file->private_data;
978 	struct gfs2_holder *fl_gh = &fp->f_fl_gh;
979 	struct gfs2_inode *ip = GFS2_I(file_inode(file));
980 	struct gfs2_glock *gl;
981 	unsigned int state;
982 	int flags;
983 	int error = 0;
984 
985 	state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
986 	flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE;
987 
988 	mutex_lock(&fp->f_fl_mutex);
989 
990 	gl = fl_gh->gh_gl;
991 	if (gl) {
992 		if (fl_gh->gh_state == state)
993 			goto out;
994 		flock_lock_file_wait(file,
995 				     &(struct file_lock){.fl_type = F_UNLCK});
996 		gfs2_glock_dq_wait(fl_gh);
997 		gfs2_holder_reinit(state, flags, fl_gh);
998 	} else {
999 		error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
1000 				       &gfs2_flock_glops, CREATE, &gl);
1001 		if (error)
1002 			goto out;
1003 		gfs2_holder_init(gl, state, flags, fl_gh);
1004 		gfs2_glock_put(gl);
1005 	}
1006 	error = gfs2_glock_nq(fl_gh);
1007 	if (error) {
1008 		gfs2_holder_uninit(fl_gh);
1009 		if (error == GLR_TRYFAILED)
1010 			error = -EAGAIN;
1011 	} else {
1012 		error = flock_lock_file_wait(file, fl);
1013 		gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
1014 	}
1015 
1016 out:
1017 	mutex_unlock(&fp->f_fl_mutex);
1018 	return error;
1019 }
1020 
1021 static void do_unflock(struct file *file, struct file_lock *fl)
1022 {
1023 	struct gfs2_file *fp = file->private_data;
1024 	struct gfs2_holder *fl_gh = &fp->f_fl_gh;
1025 
1026 	mutex_lock(&fp->f_fl_mutex);
1027 	flock_lock_file_wait(file, fl);
1028 	if (fl_gh->gh_gl) {
1029 		gfs2_glock_dq_wait(fl_gh);
1030 		gfs2_holder_uninit(fl_gh);
1031 	}
1032 	mutex_unlock(&fp->f_fl_mutex);
1033 }
1034 
1035 /**
1036  * gfs2_flock - acquire/release a flock lock on a file
1037  * @file: the file pointer
1038  * @cmd: either modify or retrieve lock state, possibly wait
1039  * @fl: type and range of lock
1040  *
1041  * Returns: errno
1042  */
1043 
1044 static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
1045 {
1046 	if (!(fl->fl_flags & FL_FLOCK))
1047 		return -ENOLCK;
1048 	if (fl->fl_type & LOCK_MAND)
1049 		return -EOPNOTSUPP;
1050 
1051 	if (fl->fl_type == F_UNLCK) {
1052 		do_unflock(file, fl);
1053 		return 0;
1054 	} else {
1055 		return do_flock(file, cmd, fl);
1056 	}
1057 }
1058 
1059 const struct file_operations gfs2_file_fops = {
1060 	.llseek		= gfs2_llseek,
1061 	.read		= do_sync_read,
1062 	.aio_read	= generic_file_aio_read,
1063 	.write		= do_sync_write,
1064 	.aio_write	= gfs2_file_aio_write,
1065 	.unlocked_ioctl	= gfs2_ioctl,
1066 	.mmap		= gfs2_mmap,
1067 	.open		= gfs2_open,
1068 	.release	= gfs2_release,
1069 	.fsync		= gfs2_fsync,
1070 	.lock		= gfs2_lock,
1071 	.flock		= gfs2_flock,
1072 	.splice_read	= generic_file_splice_read,
1073 	.splice_write	= generic_file_splice_write,
1074 	.setlease	= gfs2_setlease,
1075 	.fallocate	= gfs2_fallocate,
1076 };
1077 
1078 const struct file_operations gfs2_dir_fops = {
1079 	.iterate	= gfs2_readdir,
1080 	.unlocked_ioctl	= gfs2_ioctl,
1081 	.open		= gfs2_open,
1082 	.release	= gfs2_release,
1083 	.fsync		= gfs2_fsync,
1084 	.lock		= gfs2_lock,
1085 	.flock		= gfs2_flock,
1086 	.llseek		= default_llseek,
1087 };
1088 
1089 #endif /* CONFIG_GFS2_FS_LOCKING_DLM */
1090 
1091 const struct file_operations gfs2_file_fops_nolock = {
1092 	.llseek		= gfs2_llseek,
1093 	.read		= do_sync_read,
1094 	.aio_read	= generic_file_aio_read,
1095 	.write		= do_sync_write,
1096 	.aio_write	= gfs2_file_aio_write,
1097 	.unlocked_ioctl	= gfs2_ioctl,
1098 	.mmap		= gfs2_mmap,
1099 	.open		= gfs2_open,
1100 	.release	= gfs2_release,
1101 	.fsync		= gfs2_fsync,
1102 	.splice_read	= generic_file_splice_read,
1103 	.splice_write	= generic_file_splice_write,
1104 	.setlease	= generic_setlease,
1105 	.fallocate	= gfs2_fallocate,
1106 };
1107 
1108 const struct file_operations gfs2_dir_fops_nolock = {
1109 	.iterate	= gfs2_readdir,
1110 	.unlocked_ioctl	= gfs2_ioctl,
1111 	.open		= gfs2_open,
1112 	.release	= gfs2_release,
1113 	.fsync		= gfs2_fsync,
1114 	.llseek		= default_llseek,
1115 };
1116 
1117