xref: /openbmc/linux/fs/gfs2/file.c (revision 95e9fd10)
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9 
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/completion.h>
13 #include <linux/buffer_head.h>
14 #include <linux/pagemap.h>
15 #include <linux/uio.h>
16 #include <linux/blkdev.h>
17 #include <linux/mm.h>
18 #include <linux/mount.h>
19 #include <linux/fs.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <linux/falloc.h>
22 #include <linux/swap.h>
23 #include <linux/crc32.h>
24 #include <linux/writeback.h>
25 #include <asm/uaccess.h>
26 #include <linux/dlm.h>
27 #include <linux/dlm_plock.h>
28 
29 #include "gfs2.h"
30 #include "incore.h"
31 #include "bmap.h"
32 #include "dir.h"
33 #include "glock.h"
34 #include "glops.h"
35 #include "inode.h"
36 #include "log.h"
37 #include "meta_io.h"
38 #include "quota.h"
39 #include "rgrp.h"
40 #include "trans.h"
41 #include "util.h"
42 
43 /**
44  * gfs2_llseek - seek to a location in a file
45  * @file: the file
46  * @offset: the offset
47  * @origin: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
48  *
49  * SEEK_END requires the glock for the file because it references the
50  * file's size.
51  *
52  * Returns: The new offset, or errno
53  */
54 
55 static loff_t gfs2_llseek(struct file *file, loff_t offset, int origin)
56 {
57 	struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
58 	struct gfs2_holder i_gh;
59 	loff_t error;
60 
61 	switch (origin) {
62 	case SEEK_END: /* These reference inode->i_size */
63 	case SEEK_DATA:
64 	case SEEK_HOLE:
65 		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
66 					   &i_gh);
67 		if (!error) {
68 			error = generic_file_llseek(file, offset, origin);
69 			gfs2_glock_dq_uninit(&i_gh);
70 		}
71 		break;
72 	case SEEK_CUR:
73 	case SEEK_SET:
74 		error = generic_file_llseek(file, offset, origin);
75 		break;
76 	default:
77 		error = -EINVAL;
78 	}
79 
80 	return error;
81 }
82 
83 /**
84  * gfs2_readdir - Read directory entries from a directory
85  * @file: The directory to read from
86  * @dirent: Buffer for dirents
87  * @filldir: Function used to do the copying
88  *
89  * Returns: errno
90  */
91 
92 static int gfs2_readdir(struct file *file, void *dirent, filldir_t filldir)
93 {
94 	struct inode *dir = file->f_mapping->host;
95 	struct gfs2_inode *dip = GFS2_I(dir);
96 	struct gfs2_holder d_gh;
97 	u64 offset = file->f_pos;
98 	int error;
99 
100 	gfs2_holder_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
101 	error = gfs2_glock_nq(&d_gh);
102 	if (error) {
103 		gfs2_holder_uninit(&d_gh);
104 		return error;
105 	}
106 
107 	error = gfs2_dir_read(dir, &offset, dirent, filldir, &file->f_ra);
108 
109 	gfs2_glock_dq_uninit(&d_gh);
110 
111 	file->f_pos = offset;
112 
113 	return error;
114 }
115 
116 /**
117  * fsflags_cvt
118  * @table: A table of 32 u32 flags
119  * @val: a 32 bit value to convert
120  *
121  * This function can be used to convert between fsflags values and
122  * GFS2's own flags values.
123  *
124  * Returns: the converted flags
125  */
126 static u32 fsflags_cvt(const u32 *table, u32 val)
127 {
128 	u32 res = 0;
129 	while(val) {
130 		if (val & 1)
131 			res |= *table;
132 		table++;
133 		val >>= 1;
134 	}
135 	return res;
136 }
137 
138 static const u32 fsflags_to_gfs2[32] = {
139 	[3] = GFS2_DIF_SYNC,
140 	[4] = GFS2_DIF_IMMUTABLE,
141 	[5] = GFS2_DIF_APPENDONLY,
142 	[7] = GFS2_DIF_NOATIME,
143 	[12] = GFS2_DIF_EXHASH,
144 	[14] = GFS2_DIF_INHERIT_JDATA,
145 	[17] = GFS2_DIF_TOPDIR,
146 };
147 
148 static const u32 gfs2_to_fsflags[32] = {
149 	[gfs2fl_Sync] = FS_SYNC_FL,
150 	[gfs2fl_Immutable] = FS_IMMUTABLE_FL,
151 	[gfs2fl_AppendOnly] = FS_APPEND_FL,
152 	[gfs2fl_NoAtime] = FS_NOATIME_FL,
153 	[gfs2fl_ExHash] = FS_INDEX_FL,
154 	[gfs2fl_TopLevel] = FS_TOPDIR_FL,
155 	[gfs2fl_InheritJdata] = FS_JOURNAL_DATA_FL,
156 };
157 
158 static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
159 {
160 	struct inode *inode = filp->f_path.dentry->d_inode;
161 	struct gfs2_inode *ip = GFS2_I(inode);
162 	struct gfs2_holder gh;
163 	int error;
164 	u32 fsflags;
165 
166 	gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
167 	error = gfs2_glock_nq(&gh);
168 	if (error)
169 		return error;
170 
171 	fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_diskflags);
172 	if (!S_ISDIR(inode->i_mode) && ip->i_diskflags & GFS2_DIF_JDATA)
173 		fsflags |= FS_JOURNAL_DATA_FL;
174 	if (put_user(fsflags, ptr))
175 		error = -EFAULT;
176 
177 	gfs2_glock_dq(&gh);
178 	gfs2_holder_uninit(&gh);
179 	return error;
180 }
181 
182 void gfs2_set_inode_flags(struct inode *inode)
183 {
184 	struct gfs2_inode *ip = GFS2_I(inode);
185 	unsigned int flags = inode->i_flags;
186 
187 	flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_NOSEC);
188 	if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode))
189 		inode->i_flags |= S_NOSEC;
190 	if (ip->i_diskflags & GFS2_DIF_IMMUTABLE)
191 		flags |= S_IMMUTABLE;
192 	if (ip->i_diskflags & GFS2_DIF_APPENDONLY)
193 		flags |= S_APPEND;
194 	if (ip->i_diskflags & GFS2_DIF_NOATIME)
195 		flags |= S_NOATIME;
196 	if (ip->i_diskflags & GFS2_DIF_SYNC)
197 		flags |= S_SYNC;
198 	inode->i_flags = flags;
199 }
200 
201 /* Flags that can be set by user space */
202 #define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA|			\
203 			     GFS2_DIF_IMMUTABLE|		\
204 			     GFS2_DIF_APPENDONLY|		\
205 			     GFS2_DIF_NOATIME|			\
206 			     GFS2_DIF_SYNC|			\
207 			     GFS2_DIF_SYSTEM|			\
208 			     GFS2_DIF_TOPDIR|			\
209 			     GFS2_DIF_INHERIT_JDATA)
210 
211 /**
212  * gfs2_set_flags - set flags on an inode
213  * @inode: The inode
214  * @flags: The flags to set
215  * @mask: Indicates which flags are valid
216  *
217  */
218 static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
219 {
220 	struct inode *inode = filp->f_path.dentry->d_inode;
221 	struct gfs2_inode *ip = GFS2_I(inode);
222 	struct gfs2_sbd *sdp = GFS2_SB(inode);
223 	struct buffer_head *bh;
224 	struct gfs2_holder gh;
225 	int error;
226 	u32 new_flags, flags;
227 
228 	error = mnt_want_write_file(filp);
229 	if (error)
230 		return error;
231 
232 	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
233 	if (error)
234 		goto out_drop_write;
235 
236 	error = -EACCES;
237 	if (!inode_owner_or_capable(inode))
238 		goto out;
239 
240 	error = 0;
241 	flags = ip->i_diskflags;
242 	new_flags = (flags & ~mask) | (reqflags & mask);
243 	if ((new_flags ^ flags) == 0)
244 		goto out;
245 
246 	error = -EINVAL;
247 	if ((new_flags ^ flags) & ~GFS2_FLAGS_USER_SET)
248 		goto out;
249 
250 	error = -EPERM;
251 	if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE))
252 		goto out;
253 	if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY))
254 		goto out;
255 	if (((new_flags ^ flags) & GFS2_DIF_IMMUTABLE) &&
256 	    !capable(CAP_LINUX_IMMUTABLE))
257 		goto out;
258 	if (!IS_IMMUTABLE(inode)) {
259 		error = gfs2_permission(inode, MAY_WRITE);
260 		if (error)
261 			goto out;
262 	}
263 	if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
264 		if (flags & GFS2_DIF_JDATA)
265 			gfs2_log_flush(sdp, ip->i_gl);
266 		error = filemap_fdatawrite(inode->i_mapping);
267 		if (error)
268 			goto out;
269 		error = filemap_fdatawait(inode->i_mapping);
270 		if (error)
271 			goto out;
272 	}
273 	error = gfs2_trans_begin(sdp, RES_DINODE, 0);
274 	if (error)
275 		goto out;
276 	error = gfs2_meta_inode_buffer(ip, &bh);
277 	if (error)
278 		goto out_trans_end;
279 	gfs2_trans_add_bh(ip->i_gl, bh, 1);
280 	ip->i_diskflags = new_flags;
281 	gfs2_dinode_out(ip, bh->b_data);
282 	brelse(bh);
283 	gfs2_set_inode_flags(inode);
284 	gfs2_set_aops(inode);
285 out_trans_end:
286 	gfs2_trans_end(sdp);
287 out:
288 	gfs2_glock_dq_uninit(&gh);
289 out_drop_write:
290 	mnt_drop_write_file(filp);
291 	return error;
292 }
293 
294 static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
295 {
296 	struct inode *inode = filp->f_path.dentry->d_inode;
297 	u32 fsflags, gfsflags;
298 
299 	if (get_user(fsflags, ptr))
300 		return -EFAULT;
301 
302 	gfsflags = fsflags_cvt(fsflags_to_gfs2, fsflags);
303 	if (!S_ISDIR(inode->i_mode)) {
304 		gfsflags &= ~GFS2_DIF_TOPDIR;
305 		if (gfsflags & GFS2_DIF_INHERIT_JDATA)
306 			gfsflags ^= (GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA);
307 		return do_gfs2_set_flags(filp, gfsflags, ~0);
308 	}
309 	return do_gfs2_set_flags(filp, gfsflags, ~GFS2_DIF_JDATA);
310 }
311 
312 static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
313 {
314 	switch(cmd) {
315 	case FS_IOC_GETFLAGS:
316 		return gfs2_get_flags(filp, (u32 __user *)arg);
317 	case FS_IOC_SETFLAGS:
318 		return gfs2_set_flags(filp, (u32 __user *)arg);
319 	case FITRIM:
320 		return gfs2_fitrim(filp, (void __user *)arg);
321 	}
322 	return -ENOTTY;
323 }
324 
325 /**
326  * gfs2_allocate_page_backing - Use bmap to allocate blocks
327  * @page: The (locked) page to allocate backing for
328  *
329  * We try to allocate all the blocks required for the page in
330  * one go. This might fail for various reasons, so we keep
331  * trying until all the blocks to back this page are allocated.
332  * If some of the blocks are already allocated, thats ok too.
333  */
334 
335 static int gfs2_allocate_page_backing(struct page *page)
336 {
337 	struct inode *inode = page->mapping->host;
338 	struct buffer_head bh;
339 	unsigned long size = PAGE_CACHE_SIZE;
340 	u64 lblock = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
341 
342 	do {
343 		bh.b_state = 0;
344 		bh.b_size = size;
345 		gfs2_block_map(inode, lblock, &bh, 1);
346 		if (!buffer_mapped(&bh))
347 			return -EIO;
348 		size -= bh.b_size;
349 		lblock += (bh.b_size >> inode->i_blkbits);
350 	} while(size > 0);
351 	return 0;
352 }
353 
354 /**
355  * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable
356  * @vma: The virtual memory area
357  * @page: The page which is about to become writable
358  *
359  * When the page becomes writable, we need to ensure that we have
360  * blocks allocated on disk to back that page.
361  */
362 
363 static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
364 {
365 	struct page *page = vmf->page;
366 	struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
367 	struct gfs2_inode *ip = GFS2_I(inode);
368 	struct gfs2_sbd *sdp = GFS2_SB(inode);
369 	unsigned long last_index;
370 	u64 pos = page->index << PAGE_CACHE_SHIFT;
371 	unsigned int data_blocks, ind_blocks, rblocks;
372 	struct gfs2_holder gh;
373 	loff_t size;
374 	int ret;
375 
376 	sb_start_pagefault(inode->i_sb);
377 
378 	/* Update file times before taking page lock */
379 	file_update_time(vma->vm_file);
380 
381 	ret = gfs2_rs_alloc(ip);
382 	if (ret)
383 		return ret;
384 
385 	atomic_set(&ip->i_res->rs_sizehint,
386 		   PAGE_CACHE_SIZE >> sdp->sd_sb.sb_bsize_shift);
387 
388 	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
389 	ret = gfs2_glock_nq(&gh);
390 	if (ret)
391 		goto out;
392 
393 	set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
394 	set_bit(GIF_SW_PAGED, &ip->i_flags);
395 
396 	if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE)) {
397 		lock_page(page);
398 		if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
399 			ret = -EAGAIN;
400 			unlock_page(page);
401 		}
402 		goto out_unlock;
403 	}
404 
405 	ret = gfs2_rindex_update(sdp);
406 	if (ret)
407 		goto out_unlock;
408 
409 	ret = gfs2_quota_lock_check(ip);
410 	if (ret)
411 		goto out_unlock;
412 	gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
413 	ret = gfs2_inplace_reserve(ip, data_blocks + ind_blocks);
414 	if (ret)
415 		goto out_quota_unlock;
416 
417 	rblocks = RES_DINODE + ind_blocks;
418 	if (gfs2_is_jdata(ip))
419 		rblocks += data_blocks ? data_blocks : 1;
420 	if (ind_blocks || data_blocks) {
421 		rblocks += RES_STATFS + RES_QUOTA;
422 		rblocks += gfs2_rg_blocks(ip);
423 	}
424 	ret = gfs2_trans_begin(sdp, rblocks, 0);
425 	if (ret)
426 		goto out_trans_fail;
427 
428 	lock_page(page);
429 	ret = -EINVAL;
430 	size = i_size_read(inode);
431 	last_index = (size - 1) >> PAGE_CACHE_SHIFT;
432 	/* Check page index against inode size */
433 	if (size == 0 || (page->index > last_index))
434 		goto out_trans_end;
435 
436 	ret = -EAGAIN;
437 	/* If truncated, we must retry the operation, we may have raced
438 	 * with the glock demotion code.
439 	 */
440 	if (!PageUptodate(page) || page->mapping != inode->i_mapping)
441 		goto out_trans_end;
442 
443 	/* Unstuff, if required, and allocate backing blocks for page */
444 	ret = 0;
445 	if (gfs2_is_stuffed(ip))
446 		ret = gfs2_unstuff_dinode(ip, page);
447 	if (ret == 0)
448 		ret = gfs2_allocate_page_backing(page);
449 
450 out_trans_end:
451 	if (ret)
452 		unlock_page(page);
453 	gfs2_trans_end(sdp);
454 out_trans_fail:
455 	gfs2_inplace_release(ip);
456 out_quota_unlock:
457 	gfs2_quota_unlock(ip);
458 out_unlock:
459 	gfs2_glock_dq(&gh);
460 out:
461 	gfs2_holder_uninit(&gh);
462 	if (ret == 0) {
463 		set_page_dirty(page);
464 		wait_on_page_writeback(page);
465 	}
466 	sb_end_pagefault(inode->i_sb);
467 	return block_page_mkwrite_return(ret);
468 }
469 
470 static const struct vm_operations_struct gfs2_vm_ops = {
471 	.fault = filemap_fault,
472 	.page_mkwrite = gfs2_page_mkwrite,
473 };
474 
475 /**
476  * gfs2_mmap -
477  * @file: The file to map
478  * @vma: The VMA which described the mapping
479  *
480  * There is no need to get a lock here unless we should be updating
481  * atime. We ignore any locking errors since the only consequence is
482  * a missed atime update (which will just be deferred until later).
483  *
484  * Returns: 0
485  */
486 
487 static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
488 {
489 	struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
490 
491 	if (!(file->f_flags & O_NOATIME) &&
492 	    !IS_NOATIME(&ip->i_inode)) {
493 		struct gfs2_holder i_gh;
494 		int error;
495 
496 		gfs2_holder_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
497 		error = gfs2_glock_nq(&i_gh);
498 		if (error == 0) {
499 			file_accessed(file);
500 			gfs2_glock_dq(&i_gh);
501 		}
502 		gfs2_holder_uninit(&i_gh);
503 		if (error)
504 			return error;
505 	}
506 	vma->vm_ops = &gfs2_vm_ops;
507 	vma->vm_flags |= VM_CAN_NONLINEAR;
508 
509 	return 0;
510 }
511 
512 /**
513  * gfs2_open - open a file
514  * @inode: the inode to open
515  * @file: the struct file for this opening
516  *
517  * Returns: errno
518  */
519 
520 static int gfs2_open(struct inode *inode, struct file *file)
521 {
522 	struct gfs2_inode *ip = GFS2_I(inode);
523 	struct gfs2_holder i_gh;
524 	struct gfs2_file *fp;
525 	int error;
526 
527 	fp = kzalloc(sizeof(struct gfs2_file), GFP_KERNEL);
528 	if (!fp)
529 		return -ENOMEM;
530 
531 	mutex_init(&fp->f_fl_mutex);
532 
533 	gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
534 	file->private_data = fp;
535 
536 	if (S_ISREG(ip->i_inode.i_mode)) {
537 		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
538 					   &i_gh);
539 		if (error)
540 			goto fail;
541 
542 		if (!(file->f_flags & O_LARGEFILE) &&
543 		    i_size_read(inode) > MAX_NON_LFS) {
544 			error = -EOVERFLOW;
545 			goto fail_gunlock;
546 		}
547 
548 		gfs2_glock_dq_uninit(&i_gh);
549 	}
550 
551 	return 0;
552 
553 fail_gunlock:
554 	gfs2_glock_dq_uninit(&i_gh);
555 fail:
556 	file->private_data = NULL;
557 	kfree(fp);
558 	return error;
559 }
560 
561 /**
562  * gfs2_release - called to close a struct file
563  * @inode: the inode the struct file belongs to
564  * @file: the struct file being closed
565  *
566  * Returns: errno
567  */
568 
569 static int gfs2_release(struct inode *inode, struct file *file)
570 {
571 	struct gfs2_inode *ip = GFS2_I(inode);
572 
573 	kfree(file->private_data);
574 	file->private_data = NULL;
575 
576 	if ((file->f_mode & FMODE_WRITE) &&
577 	    (atomic_read(&inode->i_writecount) == 1))
578 		gfs2_rs_delete(ip);
579 
580 	return 0;
581 }
582 
583 /**
584  * gfs2_fsync - sync the dirty data for a file (across the cluster)
585  * @file: the file that points to the dentry
586  * @start: the start position in the file to sync
587  * @end: the end position in the file to sync
588  * @datasync: set if we can ignore timestamp changes
589  *
590  * We split the data flushing here so that we don't wait for the data
591  * until after we've also sent the metadata to disk. Note that for
592  * data=ordered, we will write & wait for the data at the log flush
593  * stage anyway, so this is unlikely to make much of a difference
594  * except in the data=writeback case.
595  *
596  * If the fdatawrite fails due to any reason except -EIO, we will
597  * continue the remainder of the fsync, although we'll still report
598  * the error at the end. This is to match filemap_write_and_wait_range()
599  * behaviour.
600  *
601  * Returns: errno
602  */
603 
604 static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
605 		      int datasync)
606 {
607 	struct address_space *mapping = file->f_mapping;
608 	struct inode *inode = mapping->host;
609 	int sync_state = inode->i_state & (I_DIRTY_SYNC|I_DIRTY_DATASYNC);
610 	struct gfs2_inode *ip = GFS2_I(inode);
611 	int ret = 0, ret1 = 0;
612 
613 	if (mapping->nrpages) {
614 		ret1 = filemap_fdatawrite_range(mapping, start, end);
615 		if (ret1 == -EIO)
616 			return ret1;
617 	}
618 
619 	if (datasync)
620 		sync_state &= ~I_DIRTY_SYNC;
621 
622 	if (sync_state) {
623 		ret = sync_inode_metadata(inode, 1);
624 		if (ret)
625 			return ret;
626 		if (gfs2_is_jdata(ip))
627 			filemap_write_and_wait(mapping);
628 		gfs2_ail_flush(ip->i_gl, 1);
629 	}
630 
631 	if (mapping->nrpages)
632 		ret = filemap_fdatawait_range(mapping, start, end);
633 
634 	return ret ? ret : ret1;
635 }
636 
637 /**
638  * gfs2_file_aio_write - Perform a write to a file
639  * @iocb: The io context
640  * @iov: The data to write
641  * @nr_segs: Number of @iov segments
642  * @pos: The file position
643  *
644  * We have to do a lock/unlock here to refresh the inode size for
645  * O_APPEND writes, otherwise we can land up writing at the wrong
646  * offset. There is still a race, but provided the app is using its
647  * own file locking, this will make O_APPEND work as expected.
648  *
649  */
650 
651 static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
652 				   unsigned long nr_segs, loff_t pos)
653 {
654 	struct file *file = iocb->ki_filp;
655 	size_t writesize = iov_length(iov, nr_segs);
656 	struct dentry *dentry = file->f_dentry;
657 	struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
658 	struct gfs2_sbd *sdp;
659 	int ret;
660 
661 	sdp = GFS2_SB(file->f_mapping->host);
662 	ret = gfs2_rs_alloc(ip);
663 	if (ret)
664 		return ret;
665 
666 	atomic_set(&ip->i_res->rs_sizehint, writesize >> sdp->sd_sb.sb_bsize_shift);
667 	if (file->f_flags & O_APPEND) {
668 		struct gfs2_holder gh;
669 
670 		ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
671 		if (ret)
672 			return ret;
673 		gfs2_glock_dq_uninit(&gh);
674 	}
675 
676 	return generic_file_aio_write(iocb, iov, nr_segs, pos);
677 }
678 
679 static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
680 			   int mode)
681 {
682 	struct gfs2_inode *ip = GFS2_I(inode);
683 	struct buffer_head *dibh;
684 	int error;
685 	loff_t size = len;
686 	unsigned int nr_blks;
687 	sector_t lblock = offset >> inode->i_blkbits;
688 
689 	error = gfs2_meta_inode_buffer(ip, &dibh);
690 	if (unlikely(error))
691 		return error;
692 
693 	gfs2_trans_add_bh(ip->i_gl, dibh, 1);
694 
695 	if (gfs2_is_stuffed(ip)) {
696 		error = gfs2_unstuff_dinode(ip, NULL);
697 		if (unlikely(error))
698 			goto out;
699 	}
700 
701 	while (len) {
702 		struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
703 		bh_map.b_size = len;
704 		set_buffer_zeronew(&bh_map);
705 
706 		error = gfs2_block_map(inode, lblock, &bh_map, 1);
707 		if (unlikely(error))
708 			goto out;
709 		len -= bh_map.b_size;
710 		nr_blks = bh_map.b_size >> inode->i_blkbits;
711 		lblock += nr_blks;
712 		if (!buffer_new(&bh_map))
713 			continue;
714 		if (unlikely(!buffer_zeronew(&bh_map))) {
715 			error = -EIO;
716 			goto out;
717 		}
718 	}
719 	if (offset + size > inode->i_size && !(mode & FALLOC_FL_KEEP_SIZE))
720 		i_size_write(inode, offset + size);
721 
722 	mark_inode_dirty(inode);
723 
724 out:
725 	brelse(dibh);
726 	return error;
727 }
728 
729 static void calc_max_reserv(struct gfs2_inode *ip, loff_t max, loff_t *len,
730 			    unsigned int *data_blocks, unsigned int *ind_blocks)
731 {
732 	const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
733 	unsigned int max_blocks = ip->i_rgd->rd_free_clone;
734 	unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
735 
736 	for (tmp = max_data; tmp > sdp->sd_diptrs;) {
737 		tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
738 		max_data -= tmp;
739 	}
740 	/* This calculation isn't the exact reverse of gfs2_write_calc_reserve,
741 	   so it might end up with fewer data blocks */
742 	if (max_data <= *data_blocks)
743 		return;
744 	*data_blocks = max_data;
745 	*ind_blocks = max_blocks - max_data;
746 	*len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
747 	if (*len > max) {
748 		*len = max;
749 		gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
750 	}
751 }
752 
753 static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
754 			   loff_t len)
755 {
756 	struct inode *inode = file->f_path.dentry->d_inode;
757 	struct gfs2_sbd *sdp = GFS2_SB(inode);
758 	struct gfs2_inode *ip = GFS2_I(inode);
759 	unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
760 	loff_t bytes, max_bytes;
761 	int error;
762 	const loff_t pos = offset;
763 	const loff_t count = len;
764 	loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1);
765 	loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
766 	loff_t max_chunk_size = UINT_MAX & bsize_mask;
767 	next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
768 
769 	/* We only support the FALLOC_FL_KEEP_SIZE mode */
770 	if (mode & ~FALLOC_FL_KEEP_SIZE)
771 		return -EOPNOTSUPP;
772 
773 	offset &= bsize_mask;
774 
775 	len = next - offset;
776 	bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
777 	if (!bytes)
778 		bytes = UINT_MAX;
779 	bytes &= bsize_mask;
780 	if (bytes == 0)
781 		bytes = sdp->sd_sb.sb_bsize;
782 
783 	error = gfs2_rs_alloc(ip);
784 	if (error)
785 		return error;
786 
787 	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
788 	error = gfs2_glock_nq(&ip->i_gh);
789 	if (unlikely(error))
790 		goto out_uninit;
791 
792 	atomic_set(&ip->i_res->rs_sizehint, len >> sdp->sd_sb.sb_bsize_shift);
793 
794 	while (len > 0) {
795 		if (len < bytes)
796 			bytes = len;
797 		if (!gfs2_write_alloc_required(ip, offset, bytes)) {
798 			len -= bytes;
799 			offset += bytes;
800 			continue;
801 		}
802 		error = gfs2_quota_lock_check(ip);
803 		if (error)
804 			goto out_unlock;
805 
806 retry:
807 		gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
808 
809 		error = gfs2_inplace_reserve(ip, data_blocks + ind_blocks);
810 		if (error) {
811 			if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) {
812 				bytes >>= 1;
813 				bytes &= bsize_mask;
814 				if (bytes == 0)
815 					bytes = sdp->sd_sb.sb_bsize;
816 				goto retry;
817 			}
818 			goto out_qunlock;
819 		}
820 		max_bytes = bytes;
821 		calc_max_reserv(ip, (len > max_chunk_size)? max_chunk_size: len,
822 				&max_bytes, &data_blocks, &ind_blocks);
823 
824 		rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
825 			  RES_RG_HDR + gfs2_rg_blocks(ip);
826 		if (gfs2_is_jdata(ip))
827 			rblocks += data_blocks ? data_blocks : 1;
828 
829 		error = gfs2_trans_begin(sdp, rblocks,
830 					 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
831 		if (error)
832 			goto out_trans_fail;
833 
834 		error = fallocate_chunk(inode, offset, max_bytes, mode);
835 		gfs2_trans_end(sdp);
836 
837 		if (error)
838 			goto out_trans_fail;
839 
840 		len -= max_bytes;
841 		offset += max_bytes;
842 		gfs2_inplace_release(ip);
843 		gfs2_quota_unlock(ip);
844 	}
845 
846 	if (error == 0)
847 		error = generic_write_sync(file, pos, count);
848 	goto out_unlock;
849 
850 out_trans_fail:
851 	gfs2_inplace_release(ip);
852 out_qunlock:
853 	gfs2_quota_unlock(ip);
854 out_unlock:
855 	gfs2_glock_dq(&ip->i_gh);
856 out_uninit:
857 	gfs2_holder_uninit(&ip->i_gh);
858 	return error;
859 }
860 
861 #ifdef CONFIG_GFS2_FS_LOCKING_DLM
862 
863 /**
864  * gfs2_setlease - acquire/release a file lease
865  * @file: the file pointer
866  * @arg: lease type
867  * @fl: file lock
868  *
869  * We don't currently have a way to enforce a lease across the whole
870  * cluster; until we do, disable leases (by just returning -EINVAL),
871  * unless the administrator has requested purely local locking.
872  *
873  * Locking: called under lock_flocks
874  *
875  * Returns: errno
876  */
877 
878 static int gfs2_setlease(struct file *file, long arg, struct file_lock **fl)
879 {
880 	return -EINVAL;
881 }
882 
883 /**
884  * gfs2_lock - acquire/release a posix lock on a file
885  * @file: the file pointer
886  * @cmd: either modify or retrieve lock state, possibly wait
887  * @fl: type and range of lock
888  *
889  * Returns: errno
890  */
891 
892 static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
893 {
894 	struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
895 	struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
896 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
897 
898 	if (!(fl->fl_flags & FL_POSIX))
899 		return -ENOLCK;
900 	if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK)
901 		return -ENOLCK;
902 
903 	if (cmd == F_CANCELLK) {
904 		/* Hack: */
905 		cmd = F_SETLK;
906 		fl->fl_type = F_UNLCK;
907 	}
908 	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
909 		return -EIO;
910 	if (IS_GETLK(cmd))
911 		return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
912 	else if (fl->fl_type == F_UNLCK)
913 		return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
914 	else
915 		return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
916 }
917 
918 static int do_flock(struct file *file, int cmd, struct file_lock *fl)
919 {
920 	struct gfs2_file *fp = file->private_data;
921 	struct gfs2_holder *fl_gh = &fp->f_fl_gh;
922 	struct gfs2_inode *ip = GFS2_I(file->f_path.dentry->d_inode);
923 	struct gfs2_glock *gl;
924 	unsigned int state;
925 	int flags;
926 	int error = 0;
927 
928 	state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
929 	flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE;
930 
931 	mutex_lock(&fp->f_fl_mutex);
932 
933 	gl = fl_gh->gh_gl;
934 	if (gl) {
935 		if (fl_gh->gh_state == state)
936 			goto out;
937 		flock_lock_file_wait(file,
938 				     &(struct file_lock){.fl_type = F_UNLCK});
939 		gfs2_glock_dq_wait(fl_gh);
940 		gfs2_holder_reinit(state, flags, fl_gh);
941 	} else {
942 		error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
943 				       &gfs2_flock_glops, CREATE, &gl);
944 		if (error)
945 			goto out;
946 		gfs2_holder_init(gl, state, flags, fl_gh);
947 		gfs2_glock_put(gl);
948 	}
949 	error = gfs2_glock_nq(fl_gh);
950 	if (error) {
951 		gfs2_holder_uninit(fl_gh);
952 		if (error == GLR_TRYFAILED)
953 			error = -EAGAIN;
954 	} else {
955 		error = flock_lock_file_wait(file, fl);
956 		gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
957 	}
958 
959 out:
960 	mutex_unlock(&fp->f_fl_mutex);
961 	return error;
962 }
963 
964 static void do_unflock(struct file *file, struct file_lock *fl)
965 {
966 	struct gfs2_file *fp = file->private_data;
967 	struct gfs2_holder *fl_gh = &fp->f_fl_gh;
968 
969 	mutex_lock(&fp->f_fl_mutex);
970 	flock_lock_file_wait(file, fl);
971 	if (fl_gh->gh_gl) {
972 		gfs2_glock_dq_wait(fl_gh);
973 		gfs2_holder_uninit(fl_gh);
974 	}
975 	mutex_unlock(&fp->f_fl_mutex);
976 }
977 
978 /**
979  * gfs2_flock - acquire/release a flock lock on a file
980  * @file: the file pointer
981  * @cmd: either modify or retrieve lock state, possibly wait
982  * @fl: type and range of lock
983  *
984  * Returns: errno
985  */
986 
987 static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
988 {
989 	if (!(fl->fl_flags & FL_FLOCK))
990 		return -ENOLCK;
991 	if (fl->fl_type & LOCK_MAND)
992 		return -EOPNOTSUPP;
993 
994 	if (fl->fl_type == F_UNLCK) {
995 		do_unflock(file, fl);
996 		return 0;
997 	} else {
998 		return do_flock(file, cmd, fl);
999 	}
1000 }
1001 
1002 const struct file_operations gfs2_file_fops = {
1003 	.llseek		= gfs2_llseek,
1004 	.read		= do_sync_read,
1005 	.aio_read	= generic_file_aio_read,
1006 	.write		= do_sync_write,
1007 	.aio_write	= gfs2_file_aio_write,
1008 	.unlocked_ioctl	= gfs2_ioctl,
1009 	.mmap		= gfs2_mmap,
1010 	.open		= gfs2_open,
1011 	.release	= gfs2_release,
1012 	.fsync		= gfs2_fsync,
1013 	.lock		= gfs2_lock,
1014 	.flock		= gfs2_flock,
1015 	.splice_read	= generic_file_splice_read,
1016 	.splice_write	= generic_file_splice_write,
1017 	.setlease	= gfs2_setlease,
1018 	.fallocate	= gfs2_fallocate,
1019 };
1020 
1021 const struct file_operations gfs2_dir_fops = {
1022 	.readdir	= gfs2_readdir,
1023 	.unlocked_ioctl	= gfs2_ioctl,
1024 	.open		= gfs2_open,
1025 	.release	= gfs2_release,
1026 	.fsync		= gfs2_fsync,
1027 	.lock		= gfs2_lock,
1028 	.flock		= gfs2_flock,
1029 	.llseek		= default_llseek,
1030 };
1031 
1032 #endif /* CONFIG_GFS2_FS_LOCKING_DLM */
1033 
1034 const struct file_operations gfs2_file_fops_nolock = {
1035 	.llseek		= gfs2_llseek,
1036 	.read		= do_sync_read,
1037 	.aio_read	= generic_file_aio_read,
1038 	.write		= do_sync_write,
1039 	.aio_write	= gfs2_file_aio_write,
1040 	.unlocked_ioctl	= gfs2_ioctl,
1041 	.mmap		= gfs2_mmap,
1042 	.open		= gfs2_open,
1043 	.release	= gfs2_release,
1044 	.fsync		= gfs2_fsync,
1045 	.splice_read	= generic_file_splice_read,
1046 	.splice_write	= generic_file_splice_write,
1047 	.setlease	= generic_setlease,
1048 	.fallocate	= gfs2_fallocate,
1049 };
1050 
1051 const struct file_operations gfs2_dir_fops_nolock = {
1052 	.readdir	= gfs2_readdir,
1053 	.unlocked_ioctl	= gfs2_ioctl,
1054 	.open		= gfs2_open,
1055 	.release	= gfs2_release,
1056 	.fsync		= gfs2_fsync,
1057 	.llseek		= default_llseek,
1058 };
1059 
1060