138c8a9a5SSteve French // SPDX-License-Identifier: LGPL-2.1
238c8a9a5SSteve French /*
338c8a9a5SSteve French *
438c8a9a5SSteve French * vfs operations that deal with files
538c8a9a5SSteve French *
638c8a9a5SSteve French * Copyright (C) International Business Machines Corp., 2002,2010
738c8a9a5SSteve French * Author(s): Steve French (sfrench@us.ibm.com)
838c8a9a5SSteve French * Jeremy Allison (jra@samba.org)
938c8a9a5SSteve French *
1038c8a9a5SSteve French */
1138c8a9a5SSteve French #include <linux/fs.h>
1238c8a9a5SSteve French #include <linux/filelock.h>
1338c8a9a5SSteve French #include <linux/backing-dev.h>
1438c8a9a5SSteve French #include <linux/stat.h>
1538c8a9a5SSteve French #include <linux/fcntl.h>
1638c8a9a5SSteve French #include <linux/pagemap.h>
1738c8a9a5SSteve French #include <linux/pagevec.h>
1838c8a9a5SSteve French #include <linux/writeback.h>
1938c8a9a5SSteve French #include <linux/task_io_accounting_ops.h>
2038c8a9a5SSteve French #include <linux/delay.h>
2138c8a9a5SSteve French #include <linux/mount.h>
2238c8a9a5SSteve French #include <linux/slab.h>
2338c8a9a5SSteve French #include <linux/swap.h>
2438c8a9a5SSteve French #include <linux/mm.h>
2538c8a9a5SSteve French #include <asm/div64.h>
2638c8a9a5SSteve French #include "cifsfs.h"
2738c8a9a5SSteve French #include "cifspdu.h"
2838c8a9a5SSteve French #include "cifsglob.h"
2938c8a9a5SSteve French #include "cifsproto.h"
3038c8a9a5SSteve French #include "smb2proto.h"
3138c8a9a5SSteve French #include "cifs_unicode.h"
3238c8a9a5SSteve French #include "cifs_debug.h"
3338c8a9a5SSteve French #include "cifs_fs_sb.h"
3438c8a9a5SSteve French #include "fscache.h"
3538c8a9a5SSteve French #include "smbdirect.h"
3638c8a9a5SSteve French #include "fs_context.h"
3738c8a9a5SSteve French #include "cifs_ioctl.h"
3838c8a9a5SSteve French #include "cached_dir.h"
3938c8a9a5SSteve French
4038c8a9a5SSteve French /*
4138c8a9a5SSteve French * Remove the dirty flags from a span of pages.
4238c8a9a5SSteve French */
cifs_undirty_folios(struct inode * inode,loff_t start,unsigned int len)4338c8a9a5SSteve French static void cifs_undirty_folios(struct inode *inode, loff_t start, unsigned int len)
4438c8a9a5SSteve French {
4538c8a9a5SSteve French struct address_space *mapping = inode->i_mapping;
4638c8a9a5SSteve French struct folio *folio;
4738c8a9a5SSteve French pgoff_t end;
4838c8a9a5SSteve French
4938c8a9a5SSteve French XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE);
5038c8a9a5SSteve French
5138c8a9a5SSteve French rcu_read_lock();
5238c8a9a5SSteve French
5338c8a9a5SSteve French end = (start + len - 1) / PAGE_SIZE;
5438c8a9a5SSteve French xas_for_each_marked(&xas, folio, end, PAGECACHE_TAG_DIRTY) {
5538c8a9a5SSteve French if (xas_retry(&xas, folio))
5638c8a9a5SSteve French continue;
5738c8a9a5SSteve French xas_pause(&xas);
5838c8a9a5SSteve French rcu_read_unlock();
5938c8a9a5SSteve French folio_lock(folio);
6038c8a9a5SSteve French folio_clear_dirty_for_io(folio);
6138c8a9a5SSteve French folio_unlock(folio);
6238c8a9a5SSteve French rcu_read_lock();
6338c8a9a5SSteve French }
6438c8a9a5SSteve French
6538c8a9a5SSteve French rcu_read_unlock();
6638c8a9a5SSteve French }
6738c8a9a5SSteve French
6838c8a9a5SSteve French /*
6938c8a9a5SSteve French * Completion of write to server.
7038c8a9a5SSteve French */
cifs_pages_written_back(struct inode * inode,loff_t start,unsigned int len)7138c8a9a5SSteve French void cifs_pages_written_back(struct inode *inode, loff_t start, unsigned int len)
7238c8a9a5SSteve French {
7338c8a9a5SSteve French struct address_space *mapping = inode->i_mapping;
7438c8a9a5SSteve French struct folio *folio;
7538c8a9a5SSteve French pgoff_t end;
7638c8a9a5SSteve French
7738c8a9a5SSteve French XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE);
7838c8a9a5SSteve French
7938c8a9a5SSteve French if (!len)
8038c8a9a5SSteve French return;
8138c8a9a5SSteve French
8238c8a9a5SSteve French rcu_read_lock();
8338c8a9a5SSteve French
8438c8a9a5SSteve French end = (start + len - 1) / PAGE_SIZE;
8538c8a9a5SSteve French xas_for_each(&xas, folio, end) {
8638c8a9a5SSteve French if (xas_retry(&xas, folio))
8738c8a9a5SSteve French continue;
8838c8a9a5SSteve French if (!folio_test_writeback(folio)) {
8938c8a9a5SSteve French WARN_ONCE(1, "bad %x @%llx page %lx %lx\n",
90d3c79235SDavid Howells len, start, folio->index, end);
9138c8a9a5SSteve French continue;
9238c8a9a5SSteve French }
9338c8a9a5SSteve French
9438c8a9a5SSteve French folio_detach_private(folio);
9538c8a9a5SSteve French folio_end_writeback(folio);
9638c8a9a5SSteve French }
9738c8a9a5SSteve French
9838c8a9a5SSteve French rcu_read_unlock();
9938c8a9a5SSteve French }
10038c8a9a5SSteve French
10138c8a9a5SSteve French /*
10238c8a9a5SSteve French * Failure of write to server.
10338c8a9a5SSteve French */
cifs_pages_write_failed(struct inode * inode,loff_t start,unsigned int len)10438c8a9a5SSteve French void cifs_pages_write_failed(struct inode *inode, loff_t start, unsigned int len)
10538c8a9a5SSteve French {
10638c8a9a5SSteve French struct address_space *mapping = inode->i_mapping;
10738c8a9a5SSteve French struct folio *folio;
10838c8a9a5SSteve French pgoff_t end;
10938c8a9a5SSteve French
11038c8a9a5SSteve French XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE);
11138c8a9a5SSteve French
11238c8a9a5SSteve French if (!len)
11338c8a9a5SSteve French return;
11438c8a9a5SSteve French
11538c8a9a5SSteve French rcu_read_lock();
11638c8a9a5SSteve French
11738c8a9a5SSteve French end = (start + len - 1) / PAGE_SIZE;
11838c8a9a5SSteve French xas_for_each(&xas, folio, end) {
11938c8a9a5SSteve French if (xas_retry(&xas, folio))
12038c8a9a5SSteve French continue;
12138c8a9a5SSteve French if (!folio_test_writeback(folio)) {
12238c8a9a5SSteve French WARN_ONCE(1, "bad %x @%llx page %lx %lx\n",
123d3c79235SDavid Howells len, start, folio->index, end);
12438c8a9a5SSteve French continue;
12538c8a9a5SSteve French }
12638c8a9a5SSteve French
12738c8a9a5SSteve French folio_set_error(folio);
12838c8a9a5SSteve French folio_end_writeback(folio);
12938c8a9a5SSteve French }
13038c8a9a5SSteve French
13138c8a9a5SSteve French rcu_read_unlock();
13238c8a9a5SSteve French }
13338c8a9a5SSteve French
13438c8a9a5SSteve French /*
13538c8a9a5SSteve French * Redirty pages after a temporary failure.
13638c8a9a5SSteve French */
cifs_pages_write_redirty(struct inode * inode,loff_t start,unsigned int len)13738c8a9a5SSteve French void cifs_pages_write_redirty(struct inode *inode, loff_t start, unsigned int len)
13838c8a9a5SSteve French {
13938c8a9a5SSteve French struct address_space *mapping = inode->i_mapping;
14038c8a9a5SSteve French struct folio *folio;
14138c8a9a5SSteve French pgoff_t end;
14238c8a9a5SSteve French
14338c8a9a5SSteve French XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE);
14438c8a9a5SSteve French
14538c8a9a5SSteve French if (!len)
14638c8a9a5SSteve French return;
14738c8a9a5SSteve French
14838c8a9a5SSteve French rcu_read_lock();
14938c8a9a5SSteve French
15038c8a9a5SSteve French end = (start + len - 1) / PAGE_SIZE;
15138c8a9a5SSteve French xas_for_each(&xas, folio, end) {
15238c8a9a5SSteve French if (!folio_test_writeback(folio)) {
15338c8a9a5SSteve French WARN_ONCE(1, "bad %x @%llx page %lx %lx\n",
154d3c79235SDavid Howells len, start, folio->index, end);
15538c8a9a5SSteve French continue;
15638c8a9a5SSteve French }
15738c8a9a5SSteve French
15838c8a9a5SSteve French filemap_dirty_folio(folio->mapping, folio);
15938c8a9a5SSteve French folio_end_writeback(folio);
16038c8a9a5SSteve French }
16138c8a9a5SSteve French
16238c8a9a5SSteve French rcu_read_unlock();
16338c8a9a5SSteve French }
16438c8a9a5SSteve French
16538c8a9a5SSteve French /*
16638c8a9a5SSteve French * Mark as invalid, all open files on tree connections since they
16738c8a9a5SSteve French * were closed when session to server was lost.
16838c8a9a5SSteve French */
16938c8a9a5SSteve French void
cifs_mark_open_files_invalid(struct cifs_tcon * tcon)17038c8a9a5SSteve French cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
17138c8a9a5SSteve French {
17238c8a9a5SSteve French struct cifsFileInfo *open_file = NULL;
17338c8a9a5SSteve French struct list_head *tmp;
17438c8a9a5SSteve French struct list_head *tmp1;
17538c8a9a5SSteve French
17638c8a9a5SSteve French /* only send once per connect */
17738c8a9a5SSteve French spin_lock(&tcon->tc_lock);
178cd743cfeSShyam Prasad N if (tcon->need_reconnect)
179cd743cfeSShyam Prasad N tcon->status = TID_NEED_RECON;
180cd743cfeSShyam Prasad N
18138c8a9a5SSteve French if (tcon->status != TID_NEED_RECON) {
18238c8a9a5SSteve French spin_unlock(&tcon->tc_lock);
18338c8a9a5SSteve French return;
18438c8a9a5SSteve French }
18538c8a9a5SSteve French tcon->status = TID_IN_FILES_INVALIDATE;
18638c8a9a5SSteve French spin_unlock(&tcon->tc_lock);
18738c8a9a5SSteve French
18838c8a9a5SSteve French /* list all files open on tree connection and mark them invalid */
18938c8a9a5SSteve French spin_lock(&tcon->open_file_lock);
19038c8a9a5SSteve French list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
19138c8a9a5SSteve French open_file = list_entry(tmp, struct cifsFileInfo, tlist);
19238c8a9a5SSteve French open_file->invalidHandle = true;
19338c8a9a5SSteve French open_file->oplock_break_cancelled = true;
19438c8a9a5SSteve French }
19538c8a9a5SSteve French spin_unlock(&tcon->open_file_lock);
19638c8a9a5SSteve French
19738c8a9a5SSteve French invalidate_all_cached_dirs(tcon);
19838c8a9a5SSteve French spin_lock(&tcon->tc_lock);
19938c8a9a5SSteve French if (tcon->status == TID_IN_FILES_INVALIDATE)
20038c8a9a5SSteve French tcon->status = TID_NEED_TCON;
20138c8a9a5SSteve French spin_unlock(&tcon->tc_lock);
20238c8a9a5SSteve French
20338c8a9a5SSteve French /*
20438c8a9a5SSteve French * BB Add call to invalidate_inodes(sb) for all superblocks mounted
20538c8a9a5SSteve French * to this tcon.
20638c8a9a5SSteve French */
20738c8a9a5SSteve French }
20838c8a9a5SSteve French
cifs_convert_flags(unsigned int flags,int rdwr_for_fscache)2090fdada1eSDavid Howells static inline int cifs_convert_flags(unsigned int flags, int rdwr_for_fscache)
21038c8a9a5SSteve French {
21138c8a9a5SSteve French if ((flags & O_ACCMODE) == O_RDONLY)
21238c8a9a5SSteve French return GENERIC_READ;
21338c8a9a5SSteve French else if ((flags & O_ACCMODE) == O_WRONLY)
2140fdada1eSDavid Howells return rdwr_for_fscache == 1 ? (GENERIC_READ | GENERIC_WRITE) : GENERIC_WRITE;
21538c8a9a5SSteve French else if ((flags & O_ACCMODE) == O_RDWR) {
21638c8a9a5SSteve French /* GENERIC_ALL is too much permission to request
21738c8a9a5SSteve French can cause unnecessary access denied on create */
21838c8a9a5SSteve French /* return GENERIC_ALL; */
21938c8a9a5SSteve French return (GENERIC_READ | GENERIC_WRITE);
22038c8a9a5SSteve French }
22138c8a9a5SSteve French
22238c8a9a5SSteve French return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
22338c8a9a5SSteve French FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
22438c8a9a5SSteve French FILE_READ_DATA);
22538c8a9a5SSteve French }
22638c8a9a5SSteve French
22738c8a9a5SSteve French #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
cifs_posix_convert_flags(unsigned int flags)22838c8a9a5SSteve French static u32 cifs_posix_convert_flags(unsigned int flags)
22938c8a9a5SSteve French {
23038c8a9a5SSteve French u32 posix_flags = 0;
23138c8a9a5SSteve French
23238c8a9a5SSteve French if ((flags & O_ACCMODE) == O_RDONLY)
23338c8a9a5SSteve French posix_flags = SMB_O_RDONLY;
23438c8a9a5SSteve French else if ((flags & O_ACCMODE) == O_WRONLY)
23538c8a9a5SSteve French posix_flags = SMB_O_WRONLY;
23638c8a9a5SSteve French else if ((flags & O_ACCMODE) == O_RDWR)
23738c8a9a5SSteve French posix_flags = SMB_O_RDWR;
23838c8a9a5SSteve French
23938c8a9a5SSteve French if (flags & O_CREAT) {
24038c8a9a5SSteve French posix_flags |= SMB_O_CREAT;
24138c8a9a5SSteve French if (flags & O_EXCL)
24238c8a9a5SSteve French posix_flags |= SMB_O_EXCL;
24338c8a9a5SSteve French } else if (flags & O_EXCL)
24438c8a9a5SSteve French cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
24538c8a9a5SSteve French current->comm, current->tgid);
24638c8a9a5SSteve French
24738c8a9a5SSteve French if (flags & O_TRUNC)
24838c8a9a5SSteve French posix_flags |= SMB_O_TRUNC;
24938c8a9a5SSteve French /* be safe and imply O_SYNC for O_DSYNC */
25038c8a9a5SSteve French if (flags & O_DSYNC)
25138c8a9a5SSteve French posix_flags |= SMB_O_SYNC;
25238c8a9a5SSteve French if (flags & O_DIRECTORY)
25338c8a9a5SSteve French posix_flags |= SMB_O_DIRECTORY;
25438c8a9a5SSteve French if (flags & O_NOFOLLOW)
25538c8a9a5SSteve French posix_flags |= SMB_O_NOFOLLOW;
25638c8a9a5SSteve French if (flags & O_DIRECT)
25738c8a9a5SSteve French posix_flags |= SMB_O_DIRECT;
25838c8a9a5SSteve French
25938c8a9a5SSteve French return posix_flags;
26038c8a9a5SSteve French }
26138c8a9a5SSteve French #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
26238c8a9a5SSteve French
cifs_get_disposition(unsigned int flags)26338c8a9a5SSteve French static inline int cifs_get_disposition(unsigned int flags)
26438c8a9a5SSteve French {
26538c8a9a5SSteve French if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
26638c8a9a5SSteve French return FILE_CREATE;
26738c8a9a5SSteve French else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
26838c8a9a5SSteve French return FILE_OVERWRITE_IF;
26938c8a9a5SSteve French else if ((flags & O_CREAT) == O_CREAT)
27038c8a9a5SSteve French return FILE_OPEN_IF;
27138c8a9a5SSteve French else if ((flags & O_TRUNC) == O_TRUNC)
27238c8a9a5SSteve French return FILE_OVERWRITE;
27338c8a9a5SSteve French else
27438c8a9a5SSteve French return FILE_OPEN;
27538c8a9a5SSteve French }
27638c8a9a5SSteve French
27738c8a9a5SSteve French #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
cifs_posix_open(const char * full_path,struct inode ** pinode,struct super_block * sb,int mode,unsigned int f_flags,__u32 * poplock,__u16 * pnetfid,unsigned int xid)27838c8a9a5SSteve French int cifs_posix_open(const char *full_path, struct inode **pinode,
27938c8a9a5SSteve French struct super_block *sb, int mode, unsigned int f_flags,
28038c8a9a5SSteve French __u32 *poplock, __u16 *pnetfid, unsigned int xid)
28138c8a9a5SSteve French {
28238c8a9a5SSteve French int rc;
28338c8a9a5SSteve French FILE_UNIX_BASIC_INFO *presp_data;
28438c8a9a5SSteve French __u32 posix_flags = 0;
28538c8a9a5SSteve French struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
28638c8a9a5SSteve French struct cifs_fattr fattr;
28738c8a9a5SSteve French struct tcon_link *tlink;
28838c8a9a5SSteve French struct cifs_tcon *tcon;
28938c8a9a5SSteve French
29038c8a9a5SSteve French cifs_dbg(FYI, "posix open %s\n", full_path);
29138c8a9a5SSteve French
29238c8a9a5SSteve French presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
29338c8a9a5SSteve French if (presp_data == NULL)
29438c8a9a5SSteve French return -ENOMEM;
29538c8a9a5SSteve French
29638c8a9a5SSteve French tlink = cifs_sb_tlink(cifs_sb);
29738c8a9a5SSteve French if (IS_ERR(tlink)) {
29838c8a9a5SSteve French rc = PTR_ERR(tlink);
29938c8a9a5SSteve French goto posix_open_ret;
30038c8a9a5SSteve French }
30138c8a9a5SSteve French
30238c8a9a5SSteve French tcon = tlink_tcon(tlink);
30338c8a9a5SSteve French mode &= ~current_umask();
30438c8a9a5SSteve French
30538c8a9a5SSteve French posix_flags = cifs_posix_convert_flags(f_flags);
30638c8a9a5SSteve French rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
30738c8a9a5SSteve French poplock, full_path, cifs_sb->local_nls,
30838c8a9a5SSteve French cifs_remap(cifs_sb));
30938c8a9a5SSteve French cifs_put_tlink(tlink);
31038c8a9a5SSteve French
31138c8a9a5SSteve French if (rc)
31238c8a9a5SSteve French goto posix_open_ret;
31338c8a9a5SSteve French
31438c8a9a5SSteve French if (presp_data->Type == cpu_to_le32(-1))
31538c8a9a5SSteve French goto posix_open_ret; /* open ok, caller does qpathinfo */
31638c8a9a5SSteve French
31738c8a9a5SSteve French if (!pinode)
31838c8a9a5SSteve French goto posix_open_ret; /* caller does not need info */
31938c8a9a5SSteve French
32038c8a9a5SSteve French cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
32138c8a9a5SSteve French
32238c8a9a5SSteve French /* get new inode and set it up */
32338c8a9a5SSteve French if (*pinode == NULL) {
32438c8a9a5SSteve French cifs_fill_uniqueid(sb, &fattr);
32538c8a9a5SSteve French *pinode = cifs_iget(sb, &fattr);
32638c8a9a5SSteve French if (!*pinode) {
32738c8a9a5SSteve French rc = -ENOMEM;
32838c8a9a5SSteve French goto posix_open_ret;
32938c8a9a5SSteve French }
33038c8a9a5SSteve French } else {
33138c8a9a5SSteve French cifs_revalidate_mapping(*pinode);
3329179aa27SBharath SM rc = cifs_fattr_to_inode(*pinode, &fattr, false);
33338c8a9a5SSteve French }
33438c8a9a5SSteve French
33538c8a9a5SSteve French posix_open_ret:
33638c8a9a5SSteve French kfree(presp_data);
33738c8a9a5SSteve French return rc;
33838c8a9a5SSteve French }
33938c8a9a5SSteve French #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
34038c8a9a5SSteve French
cifs_nt_open(const char * full_path,struct inode * inode,struct cifs_sb_info * cifs_sb,struct cifs_tcon * tcon,unsigned int f_flags,__u32 * oplock,struct cifs_fid * fid,unsigned int xid,struct cifs_open_info_data * buf)34138c8a9a5SSteve French static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
34238c8a9a5SSteve French struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
34338c8a9a5SSteve French struct cifs_fid *fid, unsigned int xid, struct cifs_open_info_data *buf)
34438c8a9a5SSteve French {
34538c8a9a5SSteve French int rc;
34638c8a9a5SSteve French int desired_access;
34738c8a9a5SSteve French int disposition;
34838c8a9a5SSteve French int create_options = CREATE_NOT_DIR;
34938c8a9a5SSteve French struct TCP_Server_Info *server = tcon->ses->server;
35038c8a9a5SSteve French struct cifs_open_parms oparms;
3510fdada1eSDavid Howells int rdwr_for_fscache = 0;
35238c8a9a5SSteve French
35338c8a9a5SSteve French if (!server->ops->open)
35438c8a9a5SSteve French return -ENOSYS;
35538c8a9a5SSteve French
3560fdada1eSDavid Howells /* If we're caching, we need to be able to fill in around partial writes. */
3570fdada1eSDavid Howells if (cifs_fscache_enabled(inode) && (f_flags & O_ACCMODE) == O_WRONLY)
3580fdada1eSDavid Howells rdwr_for_fscache = 1;
3590fdada1eSDavid Howells
3600fdada1eSDavid Howells desired_access = cifs_convert_flags(f_flags, rdwr_for_fscache);
36138c8a9a5SSteve French
36238c8a9a5SSteve French /*********************************************************************
36338c8a9a5SSteve French * open flag mapping table:
36438c8a9a5SSteve French *
36538c8a9a5SSteve French * POSIX Flag CIFS Disposition
36638c8a9a5SSteve French * ---------- ----------------
36738c8a9a5SSteve French * O_CREAT FILE_OPEN_IF
36838c8a9a5SSteve French * O_CREAT | O_EXCL FILE_CREATE
36938c8a9a5SSteve French * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
37038c8a9a5SSteve French * O_TRUNC FILE_OVERWRITE
37138c8a9a5SSteve French * none of the above FILE_OPEN
37238c8a9a5SSteve French *
37338c8a9a5SSteve French * Note that there is not a direct match between disposition
37438c8a9a5SSteve French * FILE_SUPERSEDE (ie create whether or not file exists although
37538c8a9a5SSteve French * O_CREAT | O_TRUNC is similar but truncates the existing
37638c8a9a5SSteve French * file rather than creating a new file as FILE_SUPERSEDE does
37738c8a9a5SSteve French * (which uses the attributes / metadata passed in on open call)
37838c8a9a5SSteve French *?
37938c8a9a5SSteve French *? O_SYNC is a reasonable match to CIFS writethrough flag
38038c8a9a5SSteve French *? and the read write flags match reasonably. O_LARGEFILE
38138c8a9a5SSteve French *? is irrelevant because largefile support is always used
38238c8a9a5SSteve French *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
38338c8a9a5SSteve French * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
38438c8a9a5SSteve French *********************************************************************/
38538c8a9a5SSteve French
38638c8a9a5SSteve French disposition = cifs_get_disposition(f_flags);
38738c8a9a5SSteve French
38838c8a9a5SSteve French /* BB pass O_SYNC flag through on file attributes .. BB */
38938c8a9a5SSteve French
39038c8a9a5SSteve French /* O_SYNC also has bit for O_DSYNC so following check picks up either */
39138c8a9a5SSteve French if (f_flags & O_SYNC)
39238c8a9a5SSteve French create_options |= CREATE_WRITE_THROUGH;
39338c8a9a5SSteve French
39438c8a9a5SSteve French if (f_flags & O_DIRECT)
39538c8a9a5SSteve French create_options |= CREATE_NO_BUFFER;
39638c8a9a5SSteve French
3970fdada1eSDavid Howells retry_open:
39838c8a9a5SSteve French oparms = (struct cifs_open_parms) {
39938c8a9a5SSteve French .tcon = tcon,
40038c8a9a5SSteve French .cifs_sb = cifs_sb,
40138c8a9a5SSteve French .desired_access = desired_access,
40238c8a9a5SSteve French .create_options = cifs_create_options(cifs_sb, create_options),
40338c8a9a5SSteve French .disposition = disposition,
40438c8a9a5SSteve French .path = full_path,
40538c8a9a5SSteve French .fid = fid,
40638c8a9a5SSteve French };
40738c8a9a5SSteve French
40838c8a9a5SSteve French rc = server->ops->open(xid, &oparms, oplock, buf);
4090fdada1eSDavid Howells if (rc) {
4100fdada1eSDavid Howells if (rc == -EACCES && rdwr_for_fscache == 1) {
4110fdada1eSDavid Howells desired_access = cifs_convert_flags(f_flags, 0);
4120fdada1eSDavid Howells rdwr_for_fscache = 2;
4130fdada1eSDavid Howells goto retry_open;
4140fdada1eSDavid Howells }
41538c8a9a5SSteve French return rc;
4160fdada1eSDavid Howells }
4170fdada1eSDavid Howells if (rdwr_for_fscache == 2)
4180fdada1eSDavid Howells cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
41938c8a9a5SSteve French
42038c8a9a5SSteve French /* TODO: Add support for calling posix query info but with passing in fid */
42138c8a9a5SSteve French if (tcon->unix_ext)
42238c8a9a5SSteve French rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
42338c8a9a5SSteve French xid);
42438c8a9a5SSteve French else
42538c8a9a5SSteve French rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
42638c8a9a5SSteve French xid, fid);
42738c8a9a5SSteve French
42838c8a9a5SSteve French if (rc) {
42938c8a9a5SSteve French server->ops->close(xid, tcon, fid);
43038c8a9a5SSteve French if (rc == -ESTALE)
43138c8a9a5SSteve French rc = -EOPENSTALE;
43238c8a9a5SSteve French }
43338c8a9a5SSteve French
43438c8a9a5SSteve French return rc;
43538c8a9a5SSteve French }
43638c8a9a5SSteve French
43738c8a9a5SSteve French static bool
cifs_has_mand_locks(struct cifsInodeInfo * cinode)43838c8a9a5SSteve French cifs_has_mand_locks(struct cifsInodeInfo *cinode)
43938c8a9a5SSteve French {
44038c8a9a5SSteve French struct cifs_fid_locks *cur;
44138c8a9a5SSteve French bool has_locks = false;
44238c8a9a5SSteve French
44338c8a9a5SSteve French down_read(&cinode->lock_sem);
44438c8a9a5SSteve French list_for_each_entry(cur, &cinode->llist, llist) {
44538c8a9a5SSteve French if (!list_empty(&cur->locks)) {
44638c8a9a5SSteve French has_locks = true;
44738c8a9a5SSteve French break;
44838c8a9a5SSteve French }
44938c8a9a5SSteve French }
45038c8a9a5SSteve French up_read(&cinode->lock_sem);
45138c8a9a5SSteve French return has_locks;
45238c8a9a5SSteve French }
45338c8a9a5SSteve French
45438c8a9a5SSteve French void
cifs_down_write(struct rw_semaphore * sem)45538c8a9a5SSteve French cifs_down_write(struct rw_semaphore *sem)
45638c8a9a5SSteve French {
45738c8a9a5SSteve French while (!down_write_trylock(sem))
45838c8a9a5SSteve French msleep(10);
45938c8a9a5SSteve French }
46038c8a9a5SSteve French
46138c8a9a5SSteve French static void cifsFileInfo_put_work(struct work_struct *work);
4626f17163bSRitvik Budhiraja void serverclose_work(struct work_struct *work);
46338c8a9a5SSteve French
cifs_new_fileinfo(struct cifs_fid * fid,struct file * file,struct tcon_link * tlink,__u32 oplock,const char * symlink_target)46438c8a9a5SSteve French struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
46538c8a9a5SSteve French struct tcon_link *tlink, __u32 oplock,
46638c8a9a5SSteve French const char *symlink_target)
46738c8a9a5SSteve French {
46838c8a9a5SSteve French struct dentry *dentry = file_dentry(file);
46938c8a9a5SSteve French struct inode *inode = d_inode(dentry);
47038c8a9a5SSteve French struct cifsInodeInfo *cinode = CIFS_I(inode);
47138c8a9a5SSteve French struct cifsFileInfo *cfile;
47238c8a9a5SSteve French struct cifs_fid_locks *fdlocks;
47338c8a9a5SSteve French struct cifs_tcon *tcon = tlink_tcon(tlink);
47438c8a9a5SSteve French struct TCP_Server_Info *server = tcon->ses->server;
47538c8a9a5SSteve French
47638c8a9a5SSteve French cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
47738c8a9a5SSteve French if (cfile == NULL)
47838c8a9a5SSteve French return cfile;
47938c8a9a5SSteve French
48038c8a9a5SSteve French fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
48138c8a9a5SSteve French if (!fdlocks) {
48238c8a9a5SSteve French kfree(cfile);
48338c8a9a5SSteve French return NULL;
48438c8a9a5SSteve French }
48538c8a9a5SSteve French
48638c8a9a5SSteve French if (symlink_target) {
48738c8a9a5SSteve French cfile->symlink_target = kstrdup(symlink_target, GFP_KERNEL);
48838c8a9a5SSteve French if (!cfile->symlink_target) {
48938c8a9a5SSteve French kfree(fdlocks);
49038c8a9a5SSteve French kfree(cfile);
49138c8a9a5SSteve French return NULL;
49238c8a9a5SSteve French }
49338c8a9a5SSteve French }
49438c8a9a5SSteve French
49538c8a9a5SSteve French INIT_LIST_HEAD(&fdlocks->locks);
49638c8a9a5SSteve French fdlocks->cfile = cfile;
49738c8a9a5SSteve French cfile->llist = fdlocks;
49838c8a9a5SSteve French
49938c8a9a5SSteve French cfile->count = 1;
50038c8a9a5SSteve French cfile->pid = current->tgid;
50138c8a9a5SSteve French cfile->uid = current_fsuid();
50238c8a9a5SSteve French cfile->dentry = dget(dentry);
50338c8a9a5SSteve French cfile->f_flags = file->f_flags;
50438c8a9a5SSteve French cfile->invalidHandle = false;
50538c8a9a5SSteve French cfile->deferred_close_scheduled = false;
50638c8a9a5SSteve French cfile->tlink = cifs_get_tlink(tlink);
50738c8a9a5SSteve French INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
50838c8a9a5SSteve French INIT_WORK(&cfile->put, cifsFileInfo_put_work);
5096f17163bSRitvik Budhiraja INIT_WORK(&cfile->serverclose, serverclose_work);
51038c8a9a5SSteve French INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close);
51138c8a9a5SSteve French mutex_init(&cfile->fh_mutex);
51238c8a9a5SSteve French spin_lock_init(&cfile->file_info_lock);
51338c8a9a5SSteve French
51438c8a9a5SSteve French cifs_sb_active(inode->i_sb);
51538c8a9a5SSteve French
51638c8a9a5SSteve French /*
51738c8a9a5SSteve French * If the server returned a read oplock and we have mandatory brlocks,
51838c8a9a5SSteve French * set oplock level to None.
51938c8a9a5SSteve French */
52038c8a9a5SSteve French if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
52138c8a9a5SSteve French cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
52238c8a9a5SSteve French oplock = 0;
52338c8a9a5SSteve French }
52438c8a9a5SSteve French
52538c8a9a5SSteve French cifs_down_write(&cinode->lock_sem);
52638c8a9a5SSteve French list_add(&fdlocks->llist, &cinode->llist);
52738c8a9a5SSteve French up_write(&cinode->lock_sem);
52838c8a9a5SSteve French
52938c8a9a5SSteve French spin_lock(&tcon->open_file_lock);
53038c8a9a5SSteve French if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
53138c8a9a5SSteve French oplock = fid->pending_open->oplock;
53238c8a9a5SSteve French list_del(&fid->pending_open->olist);
53338c8a9a5SSteve French
53438c8a9a5SSteve French fid->purge_cache = false;
53538c8a9a5SSteve French server->ops->set_fid(cfile, fid, oplock);
53638c8a9a5SSteve French
53738c8a9a5SSteve French list_add(&cfile->tlist, &tcon->openFileList);
53838c8a9a5SSteve French atomic_inc(&tcon->num_local_opens);
53938c8a9a5SSteve French
54038c8a9a5SSteve French /* if readable file instance put first in list*/
54138c8a9a5SSteve French spin_lock(&cinode->open_file_lock);
54238c8a9a5SSteve French if (file->f_mode & FMODE_READ)
54338c8a9a5SSteve French list_add(&cfile->flist, &cinode->openFileList);
54438c8a9a5SSteve French else
54538c8a9a5SSteve French list_add_tail(&cfile->flist, &cinode->openFileList);
54638c8a9a5SSteve French spin_unlock(&cinode->open_file_lock);
54738c8a9a5SSteve French spin_unlock(&tcon->open_file_lock);
54838c8a9a5SSteve French
54938c8a9a5SSteve French if (fid->purge_cache)
55038c8a9a5SSteve French cifs_zap_mapping(inode);
55138c8a9a5SSteve French
55238c8a9a5SSteve French file->private_data = cfile;
55338c8a9a5SSteve French return cfile;
55438c8a9a5SSteve French }
55538c8a9a5SSteve French
55638c8a9a5SSteve French struct cifsFileInfo *
cifsFileInfo_get(struct cifsFileInfo * cifs_file)55738c8a9a5SSteve French cifsFileInfo_get(struct cifsFileInfo *cifs_file)
55838c8a9a5SSteve French {
55938c8a9a5SSteve French spin_lock(&cifs_file->file_info_lock);
56038c8a9a5SSteve French cifsFileInfo_get_locked(cifs_file);
56138c8a9a5SSteve French spin_unlock(&cifs_file->file_info_lock);
56238c8a9a5SSteve French return cifs_file;
56338c8a9a5SSteve French }
56438c8a9a5SSteve French
cifsFileInfo_put_final(struct cifsFileInfo * cifs_file)56538c8a9a5SSteve French static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
56638c8a9a5SSteve French {
56738c8a9a5SSteve French struct inode *inode = d_inode(cifs_file->dentry);
56838c8a9a5SSteve French struct cifsInodeInfo *cifsi = CIFS_I(inode);
56938c8a9a5SSteve French struct cifsLockInfo *li, *tmp;
57038c8a9a5SSteve French struct super_block *sb = inode->i_sb;
57138c8a9a5SSteve French
57238c8a9a5SSteve French /*
57338c8a9a5SSteve French * Delete any outstanding lock records. We'll lose them when the file
57438c8a9a5SSteve French * is closed anyway.
57538c8a9a5SSteve French */
57638c8a9a5SSteve French cifs_down_write(&cifsi->lock_sem);
57738c8a9a5SSteve French list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
57838c8a9a5SSteve French list_del(&li->llist);
57938c8a9a5SSteve French cifs_del_lock_waiters(li);
58038c8a9a5SSteve French kfree(li);
58138c8a9a5SSteve French }
58238c8a9a5SSteve French list_del(&cifs_file->llist->llist);
58338c8a9a5SSteve French kfree(cifs_file->llist);
58438c8a9a5SSteve French up_write(&cifsi->lock_sem);
58538c8a9a5SSteve French
58638c8a9a5SSteve French cifs_put_tlink(cifs_file->tlink);
58738c8a9a5SSteve French dput(cifs_file->dentry);
58838c8a9a5SSteve French cifs_sb_deactive(sb);
58938c8a9a5SSteve French kfree(cifs_file->symlink_target);
59038c8a9a5SSteve French kfree(cifs_file);
59138c8a9a5SSteve French }
59238c8a9a5SSteve French
cifsFileInfo_put_work(struct work_struct * work)59338c8a9a5SSteve French static void cifsFileInfo_put_work(struct work_struct *work)
59438c8a9a5SSteve French {
59538c8a9a5SSteve French struct cifsFileInfo *cifs_file = container_of(work,
59638c8a9a5SSteve French struct cifsFileInfo, put);
59738c8a9a5SSteve French
59838c8a9a5SSteve French cifsFileInfo_put_final(cifs_file);
59938c8a9a5SSteve French }
60038c8a9a5SSteve French
serverclose_work(struct work_struct * work)6016f17163bSRitvik Budhiraja void serverclose_work(struct work_struct *work)
6026f17163bSRitvik Budhiraja {
6036f17163bSRitvik Budhiraja struct cifsFileInfo *cifs_file = container_of(work,
6046f17163bSRitvik Budhiraja struct cifsFileInfo, serverclose);
6056f17163bSRitvik Budhiraja
6066f17163bSRitvik Budhiraja struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
6076f17163bSRitvik Budhiraja
6086f17163bSRitvik Budhiraja struct TCP_Server_Info *server = tcon->ses->server;
6096f17163bSRitvik Budhiraja int rc = 0;
6106f17163bSRitvik Budhiraja int retries = 0;
6116f17163bSRitvik Budhiraja int MAX_RETRIES = 4;
6126f17163bSRitvik Budhiraja
6136f17163bSRitvik Budhiraja do {
6146f17163bSRitvik Budhiraja if (server->ops->close_getattr)
6156f17163bSRitvik Budhiraja rc = server->ops->close_getattr(0, tcon, cifs_file);
6166f17163bSRitvik Budhiraja else if (server->ops->close)
6176f17163bSRitvik Budhiraja rc = server->ops->close(0, tcon, &cifs_file->fid);
6186f17163bSRitvik Budhiraja
6196f17163bSRitvik Budhiraja if (rc == -EBUSY || rc == -EAGAIN) {
6206f17163bSRitvik Budhiraja retries++;
6216f17163bSRitvik Budhiraja msleep(250);
6226f17163bSRitvik Budhiraja }
6236f17163bSRitvik Budhiraja } while ((rc == -EBUSY || rc == -EAGAIN) && (retries < MAX_RETRIES)
6246f17163bSRitvik Budhiraja );
6256f17163bSRitvik Budhiraja
6266f17163bSRitvik Budhiraja if (retries == MAX_RETRIES)
6276f17163bSRitvik Budhiraja pr_warn("Serverclose failed %d times, giving up\n", MAX_RETRIES);
6286f17163bSRitvik Budhiraja
6296f17163bSRitvik Budhiraja if (cifs_file->offload)
6306f17163bSRitvik Budhiraja queue_work(fileinfo_put_wq, &cifs_file->put);
6316f17163bSRitvik Budhiraja else
6326f17163bSRitvik Budhiraja cifsFileInfo_put_final(cifs_file);
6336f17163bSRitvik Budhiraja }
6346f17163bSRitvik Budhiraja
63538c8a9a5SSteve French /**
63638c8a9a5SSteve French * cifsFileInfo_put - release a reference of file priv data
63738c8a9a5SSteve French *
63838c8a9a5SSteve French * Always potentially wait for oplock handler. See _cifsFileInfo_put().
63938c8a9a5SSteve French *
64038c8a9a5SSteve French * @cifs_file: cifs/smb3 specific info (eg refcounts) for an open file
64138c8a9a5SSteve French */
cifsFileInfo_put(struct cifsFileInfo * cifs_file)64238c8a9a5SSteve French void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
64338c8a9a5SSteve French {
64438c8a9a5SSteve French _cifsFileInfo_put(cifs_file, true, true);
64538c8a9a5SSteve French }
64638c8a9a5SSteve French
64738c8a9a5SSteve French /**
64838c8a9a5SSteve French * _cifsFileInfo_put - release a reference of file priv data
64938c8a9a5SSteve French *
65038c8a9a5SSteve French * This may involve closing the filehandle @cifs_file out on the
65138c8a9a5SSteve French * server. Must be called without holding tcon->open_file_lock,
65238c8a9a5SSteve French * cinode->open_file_lock and cifs_file->file_info_lock.
65338c8a9a5SSteve French *
65438c8a9a5SSteve French * If @wait_for_oplock_handler is true and we are releasing the last
65538c8a9a5SSteve French * reference, wait for any running oplock break handler of the file
65638c8a9a5SSteve French * and cancel any pending one.
65738c8a9a5SSteve French *
65838c8a9a5SSteve French * @cifs_file: cifs/smb3 specific info (eg refcounts) for an open file
65938c8a9a5SSteve French * @wait_oplock_handler: must be false if called from oplock_break_handler
66038c8a9a5SSteve French * @offload: not offloaded on close and oplock breaks
66138c8a9a5SSteve French *
66238c8a9a5SSteve French */
_cifsFileInfo_put(struct cifsFileInfo * cifs_file,bool wait_oplock_handler,bool offload)66338c8a9a5SSteve French void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
66438c8a9a5SSteve French bool wait_oplock_handler, bool offload)
66538c8a9a5SSteve French {
66638c8a9a5SSteve French struct inode *inode = d_inode(cifs_file->dentry);
66738c8a9a5SSteve French struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
66838c8a9a5SSteve French struct TCP_Server_Info *server = tcon->ses->server;
66938c8a9a5SSteve French struct cifsInodeInfo *cifsi = CIFS_I(inode);
67038c8a9a5SSteve French struct super_block *sb = inode->i_sb;
67138c8a9a5SSteve French struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
67238c8a9a5SSteve French struct cifs_fid fid = {};
67338c8a9a5SSteve French struct cifs_pending_open open;
67438c8a9a5SSteve French bool oplock_break_cancelled;
6756f17163bSRitvik Budhiraja bool serverclose_offloaded = false;
67638c8a9a5SSteve French
67738c8a9a5SSteve French spin_lock(&tcon->open_file_lock);
67838c8a9a5SSteve French spin_lock(&cifsi->open_file_lock);
67938c8a9a5SSteve French spin_lock(&cifs_file->file_info_lock);
6806f17163bSRitvik Budhiraja
6816f17163bSRitvik Budhiraja cifs_file->offload = offload;
68238c8a9a5SSteve French if (--cifs_file->count > 0) {
68338c8a9a5SSteve French spin_unlock(&cifs_file->file_info_lock);
68438c8a9a5SSteve French spin_unlock(&cifsi->open_file_lock);
68538c8a9a5SSteve French spin_unlock(&tcon->open_file_lock);
68638c8a9a5SSteve French return;
68738c8a9a5SSteve French }
68838c8a9a5SSteve French spin_unlock(&cifs_file->file_info_lock);
68938c8a9a5SSteve French
69038c8a9a5SSteve French if (server->ops->get_lease_key)
69138c8a9a5SSteve French server->ops->get_lease_key(inode, &fid);
69238c8a9a5SSteve French
69338c8a9a5SSteve French /* store open in pending opens to make sure we don't miss lease break */
69438c8a9a5SSteve French cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
69538c8a9a5SSteve French
69638c8a9a5SSteve French /* remove it from the lists */
69738c8a9a5SSteve French list_del(&cifs_file->flist);
69838c8a9a5SSteve French list_del(&cifs_file->tlist);
69938c8a9a5SSteve French atomic_dec(&tcon->num_local_opens);
70038c8a9a5SSteve French
70138c8a9a5SSteve French if (list_empty(&cifsi->openFileList)) {
70238c8a9a5SSteve French cifs_dbg(FYI, "closing last open instance for inode %p\n",
70338c8a9a5SSteve French d_inode(cifs_file->dentry));
70438c8a9a5SSteve French /*
70538c8a9a5SSteve French * In strict cache mode we need invalidate mapping on the last
70638c8a9a5SSteve French * close because it may cause a error when we open this file
70738c8a9a5SSteve French * again and get at least level II oplock.
70838c8a9a5SSteve French */
70938c8a9a5SSteve French if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
71038c8a9a5SSteve French set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
71138c8a9a5SSteve French cifs_set_oplock_level(cifsi, 0);
71238c8a9a5SSteve French }
71338c8a9a5SSteve French
71438c8a9a5SSteve French spin_unlock(&cifsi->open_file_lock);
71538c8a9a5SSteve French spin_unlock(&tcon->open_file_lock);
71638c8a9a5SSteve French
71738c8a9a5SSteve French oplock_break_cancelled = wait_oplock_handler ?
71838c8a9a5SSteve French cancel_work_sync(&cifs_file->oplock_break) : false;
71938c8a9a5SSteve French
72038c8a9a5SSteve French if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
72138c8a9a5SSteve French struct TCP_Server_Info *server = tcon->ses->server;
72238c8a9a5SSteve French unsigned int xid;
7236f17163bSRitvik Budhiraja int rc = 0;
72438c8a9a5SSteve French
72538c8a9a5SSteve French xid = get_xid();
72638c8a9a5SSteve French if (server->ops->close_getattr)
7276f17163bSRitvik Budhiraja rc = server->ops->close_getattr(xid, tcon, cifs_file);
72838c8a9a5SSteve French else if (server->ops->close)
7296f17163bSRitvik Budhiraja rc = server->ops->close(xid, tcon, &cifs_file->fid);
73038c8a9a5SSteve French _free_xid(xid);
7316f17163bSRitvik Budhiraja
7326f17163bSRitvik Budhiraja if (rc == -EBUSY || rc == -EAGAIN) {
7336f17163bSRitvik Budhiraja // Server close failed, hence offloading it as an async op
7346f17163bSRitvik Budhiraja queue_work(serverclose_wq, &cifs_file->serverclose);
7356f17163bSRitvik Budhiraja serverclose_offloaded = true;
7366f17163bSRitvik Budhiraja }
73738c8a9a5SSteve French }
73838c8a9a5SSteve French
73938c8a9a5SSteve French if (oplock_break_cancelled)
74038c8a9a5SSteve French cifs_done_oplock_break(cifsi);
74138c8a9a5SSteve French
74238c8a9a5SSteve French cifs_del_pending_open(&open);
74338c8a9a5SSteve French
7446f17163bSRitvik Budhiraja // if serverclose has been offloaded to wq (on failure), it will
7456f17163bSRitvik Budhiraja // handle offloading put as well. If serverclose not offloaded,
7466f17163bSRitvik Budhiraja // we need to handle offloading put here.
7476f17163bSRitvik Budhiraja if (!serverclose_offloaded) {
74838c8a9a5SSteve French if (offload)
74938c8a9a5SSteve French queue_work(fileinfo_put_wq, &cifs_file->put);
75038c8a9a5SSteve French else
75138c8a9a5SSteve French cifsFileInfo_put_final(cifs_file);
75238c8a9a5SSteve French }
7536f17163bSRitvik Budhiraja }
75438c8a9a5SSteve French
cifs_open(struct inode * inode,struct file * file)75538c8a9a5SSteve French int cifs_open(struct inode *inode, struct file *file)
75638c8a9a5SSteve French
75738c8a9a5SSteve French {
75838c8a9a5SSteve French int rc = -EACCES;
75938c8a9a5SSteve French unsigned int xid;
76038c8a9a5SSteve French __u32 oplock;
76138c8a9a5SSteve French struct cifs_sb_info *cifs_sb;
76238c8a9a5SSteve French struct TCP_Server_Info *server;
76338c8a9a5SSteve French struct cifs_tcon *tcon;
76438c8a9a5SSteve French struct tcon_link *tlink;
76538c8a9a5SSteve French struct cifsFileInfo *cfile = NULL;
76638c8a9a5SSteve French void *page;
76738c8a9a5SSteve French const char *full_path;
76838c8a9a5SSteve French bool posix_open_ok = false;
76938c8a9a5SSteve French struct cifs_fid fid = {};
77038c8a9a5SSteve French struct cifs_pending_open open;
77138c8a9a5SSteve French struct cifs_open_info_data data = {};
77238c8a9a5SSteve French
77338c8a9a5SSteve French xid = get_xid();
77438c8a9a5SSteve French
77538c8a9a5SSteve French cifs_sb = CIFS_SB(inode->i_sb);
77638c8a9a5SSteve French if (unlikely(cifs_forced_shutdown(cifs_sb))) {
77738c8a9a5SSteve French free_xid(xid);
77838c8a9a5SSteve French return -EIO;
77938c8a9a5SSteve French }
78038c8a9a5SSteve French
78138c8a9a5SSteve French tlink = cifs_sb_tlink(cifs_sb);
78238c8a9a5SSteve French if (IS_ERR(tlink)) {
78338c8a9a5SSteve French free_xid(xid);
78438c8a9a5SSteve French return PTR_ERR(tlink);
78538c8a9a5SSteve French }
78638c8a9a5SSteve French tcon = tlink_tcon(tlink);
78738c8a9a5SSteve French server = tcon->ses->server;
78838c8a9a5SSteve French
78938c8a9a5SSteve French page = alloc_dentry_path();
79038c8a9a5SSteve French full_path = build_path_from_dentry(file_dentry(file), page);
79138c8a9a5SSteve French if (IS_ERR(full_path)) {
79238c8a9a5SSteve French rc = PTR_ERR(full_path);
79338c8a9a5SSteve French goto out;
79438c8a9a5SSteve French }
79538c8a9a5SSteve French
79638c8a9a5SSteve French cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
79738c8a9a5SSteve French inode, file->f_flags, full_path);
79838c8a9a5SSteve French
79938c8a9a5SSteve French if (file->f_flags & O_DIRECT &&
80038c8a9a5SSteve French cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
80138c8a9a5SSteve French if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
80238c8a9a5SSteve French file->f_op = &cifs_file_direct_nobrl_ops;
80338c8a9a5SSteve French else
80438c8a9a5SSteve French file->f_op = &cifs_file_direct_ops;
80538c8a9a5SSteve French }
80638c8a9a5SSteve French
80738c8a9a5SSteve French /* Get the cached handle as SMB2 close is deferred */
80838c8a9a5SSteve French rc = cifs_get_readable_path(tcon, full_path, &cfile);
80938c8a9a5SSteve French if (rc == 0) {
81038c8a9a5SSteve French if (file->f_flags == cfile->f_flags) {
81138c8a9a5SSteve French file->private_data = cfile;
81238c8a9a5SSteve French spin_lock(&CIFS_I(inode)->deferred_lock);
81338c8a9a5SSteve French cifs_del_deferred_close(cfile);
81438c8a9a5SSteve French spin_unlock(&CIFS_I(inode)->deferred_lock);
81538c8a9a5SSteve French goto use_cache;
81638c8a9a5SSteve French } else {
81738c8a9a5SSteve French _cifsFileInfo_put(cfile, true, false);
81838c8a9a5SSteve French }
81938c8a9a5SSteve French }
82038c8a9a5SSteve French
82138c8a9a5SSteve French if (server->oplocks)
82238c8a9a5SSteve French oplock = REQ_OPLOCK;
82338c8a9a5SSteve French else
82438c8a9a5SSteve French oplock = 0;
82538c8a9a5SSteve French
82638c8a9a5SSteve French #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
82738c8a9a5SSteve French if (!tcon->broken_posix_open && tcon->unix_ext &&
82838c8a9a5SSteve French cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
82938c8a9a5SSteve French le64_to_cpu(tcon->fsUnixInfo.Capability))) {
83038c8a9a5SSteve French /* can not refresh inode info since size could be stale */
83138c8a9a5SSteve French rc = cifs_posix_open(full_path, &inode, inode->i_sb,
83238c8a9a5SSteve French cifs_sb->ctx->file_mode /* ignored */,
83338c8a9a5SSteve French file->f_flags, &oplock, &fid.netfid, xid);
83438c8a9a5SSteve French if (rc == 0) {
83538c8a9a5SSteve French cifs_dbg(FYI, "posix open succeeded\n");
83638c8a9a5SSteve French posix_open_ok = true;
83738c8a9a5SSteve French } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
83838c8a9a5SSteve French if (tcon->ses->serverNOS)
83938c8a9a5SSteve French cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
84038c8a9a5SSteve French tcon->ses->ip_addr,
84138c8a9a5SSteve French tcon->ses->serverNOS);
84238c8a9a5SSteve French tcon->broken_posix_open = true;
84338c8a9a5SSteve French } else if ((rc != -EIO) && (rc != -EREMOTE) &&
84438c8a9a5SSteve French (rc != -EOPNOTSUPP)) /* path not found or net err */
84538c8a9a5SSteve French goto out;
84638c8a9a5SSteve French /*
84738c8a9a5SSteve French * Else fallthrough to retry open the old way on network i/o
84838c8a9a5SSteve French * or DFS errors.
84938c8a9a5SSteve French */
85038c8a9a5SSteve French }
85138c8a9a5SSteve French #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
85238c8a9a5SSteve French
85338c8a9a5SSteve French if (server->ops->get_lease_key)
85438c8a9a5SSteve French server->ops->get_lease_key(inode, &fid);
85538c8a9a5SSteve French
85638c8a9a5SSteve French cifs_add_pending_open(&fid, tlink, &open);
85738c8a9a5SSteve French
85838c8a9a5SSteve French if (!posix_open_ok) {
85938c8a9a5SSteve French if (server->ops->get_lease_key)
86038c8a9a5SSteve French server->ops->get_lease_key(inode, &fid);
86138c8a9a5SSteve French
86238c8a9a5SSteve French rc = cifs_nt_open(full_path, inode, cifs_sb, tcon, file->f_flags, &oplock, &fid,
86338c8a9a5SSteve French xid, &data);
86438c8a9a5SSteve French if (rc) {
86538c8a9a5SSteve French cifs_del_pending_open(&open);
86638c8a9a5SSteve French goto out;
86738c8a9a5SSteve French }
86838c8a9a5SSteve French }
86938c8a9a5SSteve French
87038c8a9a5SSteve French cfile = cifs_new_fileinfo(&fid, file, tlink, oplock, data.symlink_target);
87138c8a9a5SSteve French if (cfile == NULL) {
87238c8a9a5SSteve French if (server->ops->close)
87338c8a9a5SSteve French server->ops->close(xid, tcon, &fid);
87438c8a9a5SSteve French cifs_del_pending_open(&open);
87538c8a9a5SSteve French rc = -ENOMEM;
87638c8a9a5SSteve French goto out;
87738c8a9a5SSteve French }
87838c8a9a5SSteve French
87938c8a9a5SSteve French #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
88038c8a9a5SSteve French if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
88138c8a9a5SSteve French /*
88238c8a9a5SSteve French * Time to set mode which we can not set earlier due to
88338c8a9a5SSteve French * problems creating new read-only files.
88438c8a9a5SSteve French */
88538c8a9a5SSteve French struct cifs_unix_set_info_args args = {
88638c8a9a5SSteve French .mode = inode->i_mode,
88738c8a9a5SSteve French .uid = INVALID_UID, /* no change */
88838c8a9a5SSteve French .gid = INVALID_GID, /* no change */
88938c8a9a5SSteve French .ctime = NO_CHANGE_64,
89038c8a9a5SSteve French .atime = NO_CHANGE_64,
89138c8a9a5SSteve French .mtime = NO_CHANGE_64,
89238c8a9a5SSteve French .device = 0,
89338c8a9a5SSteve French };
89438c8a9a5SSteve French CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
89538c8a9a5SSteve French cfile->pid);
89638c8a9a5SSteve French }
89738c8a9a5SSteve French #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
89838c8a9a5SSteve French
89938c8a9a5SSteve French use_cache:
90038c8a9a5SSteve French fscache_use_cookie(cifs_inode_cookie(file_inode(file)),
90138c8a9a5SSteve French file->f_mode & FMODE_WRITE);
9020fdada1eSDavid Howells if (!(file->f_flags & O_DIRECT))
9030fdada1eSDavid Howells goto out;
9040fdada1eSDavid Howells if ((file->f_flags & (O_ACCMODE | O_APPEND)) == O_RDONLY)
9050fdada1eSDavid Howells goto out;
9060fdada1eSDavid Howells cifs_invalidate_cache(file_inode(file), FSCACHE_INVAL_DIO_WRITE);
90738c8a9a5SSteve French
90838c8a9a5SSteve French out:
90938c8a9a5SSteve French free_dentry_path(page);
91038c8a9a5SSteve French free_xid(xid);
91138c8a9a5SSteve French cifs_put_tlink(tlink);
91238c8a9a5SSteve French cifs_free_open_info(&data);
91338c8a9a5SSteve French return rc;
91438c8a9a5SSteve French }
91538c8a9a5SSteve French
91638c8a9a5SSteve French #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
91738c8a9a5SSteve French static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
91838c8a9a5SSteve French #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
91938c8a9a5SSteve French
92038c8a9a5SSteve French /*
92138c8a9a5SSteve French * Try to reacquire byte range locks that were released when session
92238c8a9a5SSteve French * to server was lost.
92338c8a9a5SSteve French */
92438c8a9a5SSteve French static int
cifs_relock_file(struct cifsFileInfo * cfile)92538c8a9a5SSteve French cifs_relock_file(struct cifsFileInfo *cfile)
92638c8a9a5SSteve French {
92738c8a9a5SSteve French struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
92838c8a9a5SSteve French struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
92938c8a9a5SSteve French int rc = 0;
93038c8a9a5SSteve French #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
93138c8a9a5SSteve French struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
93238c8a9a5SSteve French #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
93338c8a9a5SSteve French
93438c8a9a5SSteve French down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
93538c8a9a5SSteve French if (cinode->can_cache_brlcks) {
93638c8a9a5SSteve French /* can cache locks - no need to relock */
93738c8a9a5SSteve French up_read(&cinode->lock_sem);
93838c8a9a5SSteve French return rc;
93938c8a9a5SSteve French }
94038c8a9a5SSteve French
94138c8a9a5SSteve French #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
94238c8a9a5SSteve French if (cap_unix(tcon->ses) &&
94338c8a9a5SSteve French (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
94438c8a9a5SSteve French ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
94538c8a9a5SSteve French rc = cifs_push_posix_locks(cfile);
94638c8a9a5SSteve French else
94738c8a9a5SSteve French #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
94838c8a9a5SSteve French rc = tcon->ses->server->ops->push_mand_locks(cfile);
94938c8a9a5SSteve French
95038c8a9a5SSteve French up_read(&cinode->lock_sem);
95138c8a9a5SSteve French return rc;
95238c8a9a5SSteve French }
95338c8a9a5SSteve French
95438c8a9a5SSteve French static int
cifs_reopen_file(struct cifsFileInfo * cfile,bool can_flush)95538c8a9a5SSteve French cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
95638c8a9a5SSteve French {
95738c8a9a5SSteve French int rc = -EACCES;
95838c8a9a5SSteve French unsigned int xid;
95938c8a9a5SSteve French __u32 oplock;
96038c8a9a5SSteve French struct cifs_sb_info *cifs_sb;
96138c8a9a5SSteve French struct cifs_tcon *tcon;
96238c8a9a5SSteve French struct TCP_Server_Info *server;
96338c8a9a5SSteve French struct cifsInodeInfo *cinode;
96438c8a9a5SSteve French struct inode *inode;
96538c8a9a5SSteve French void *page;
96638c8a9a5SSteve French const char *full_path;
96738c8a9a5SSteve French int desired_access;
96838c8a9a5SSteve French int disposition = FILE_OPEN;
96938c8a9a5SSteve French int create_options = CREATE_NOT_DIR;
97038c8a9a5SSteve French struct cifs_open_parms oparms;
9710fdada1eSDavid Howells int rdwr_for_fscache = 0;
97238c8a9a5SSteve French
97338c8a9a5SSteve French xid = get_xid();
97438c8a9a5SSteve French mutex_lock(&cfile->fh_mutex);
97538c8a9a5SSteve French if (!cfile->invalidHandle) {
97638c8a9a5SSteve French mutex_unlock(&cfile->fh_mutex);
97738c8a9a5SSteve French free_xid(xid);
97838c8a9a5SSteve French return 0;
97938c8a9a5SSteve French }
98038c8a9a5SSteve French
98138c8a9a5SSteve French inode = d_inode(cfile->dentry);
98238c8a9a5SSteve French cifs_sb = CIFS_SB(inode->i_sb);
98338c8a9a5SSteve French tcon = tlink_tcon(cfile->tlink);
98438c8a9a5SSteve French server = tcon->ses->server;
98538c8a9a5SSteve French
98638c8a9a5SSteve French /*
98738c8a9a5SSteve French * Can not grab rename sem here because various ops, including those
98838c8a9a5SSteve French * that already have the rename sem can end up causing writepage to get
98938c8a9a5SSteve French * called and if the server was down that means we end up here, and we
99038c8a9a5SSteve French * can never tell if the caller already has the rename_sem.
99138c8a9a5SSteve French */
99238c8a9a5SSteve French page = alloc_dentry_path();
99338c8a9a5SSteve French full_path = build_path_from_dentry(cfile->dentry, page);
99438c8a9a5SSteve French if (IS_ERR(full_path)) {
99538c8a9a5SSteve French mutex_unlock(&cfile->fh_mutex);
99638c8a9a5SSteve French free_dentry_path(page);
99738c8a9a5SSteve French free_xid(xid);
99838c8a9a5SSteve French return PTR_ERR(full_path);
99938c8a9a5SSteve French }
100038c8a9a5SSteve French
100138c8a9a5SSteve French cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
100238c8a9a5SSteve French inode, cfile->f_flags, full_path);
100338c8a9a5SSteve French
100438c8a9a5SSteve French if (tcon->ses->server->oplocks)
100538c8a9a5SSteve French oplock = REQ_OPLOCK;
100638c8a9a5SSteve French else
100738c8a9a5SSteve French oplock = 0;
100838c8a9a5SSteve French
100938c8a9a5SSteve French #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
101038c8a9a5SSteve French if (tcon->unix_ext && cap_unix(tcon->ses) &&
101138c8a9a5SSteve French (CIFS_UNIX_POSIX_PATH_OPS_CAP &
101238c8a9a5SSteve French le64_to_cpu(tcon->fsUnixInfo.Capability))) {
101338c8a9a5SSteve French /*
101438c8a9a5SSteve French * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
101538c8a9a5SSteve French * original open. Must mask them off for a reopen.
101638c8a9a5SSteve French */
101738c8a9a5SSteve French unsigned int oflags = cfile->f_flags &
101838c8a9a5SSteve French ~(O_CREAT | O_EXCL | O_TRUNC);
101938c8a9a5SSteve French
102038c8a9a5SSteve French rc = cifs_posix_open(full_path, NULL, inode->i_sb,
102138c8a9a5SSteve French cifs_sb->ctx->file_mode /* ignored */,
102238c8a9a5SSteve French oflags, &oplock, &cfile->fid.netfid, xid);
102338c8a9a5SSteve French if (rc == 0) {
102438c8a9a5SSteve French cifs_dbg(FYI, "posix reopen succeeded\n");
102538c8a9a5SSteve French oparms.reconnect = true;
102638c8a9a5SSteve French goto reopen_success;
102738c8a9a5SSteve French }
102838c8a9a5SSteve French /*
102938c8a9a5SSteve French * fallthrough to retry open the old way on errors, especially
103038c8a9a5SSteve French * in the reconnect path it is important to retry hard
103138c8a9a5SSteve French */
103238c8a9a5SSteve French }
103338c8a9a5SSteve French #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
103438c8a9a5SSteve French
10350fdada1eSDavid Howells /* If we're caching, we need to be able to fill in around partial writes. */
10360fdada1eSDavid Howells if (cifs_fscache_enabled(inode) && (cfile->f_flags & O_ACCMODE) == O_WRONLY)
10370fdada1eSDavid Howells rdwr_for_fscache = 1;
10380fdada1eSDavid Howells
10390fdada1eSDavid Howells desired_access = cifs_convert_flags(cfile->f_flags, rdwr_for_fscache);
104038c8a9a5SSteve French
104138c8a9a5SSteve French /* O_SYNC also has bit for O_DSYNC so following check picks up either */
104238c8a9a5SSteve French if (cfile->f_flags & O_SYNC)
104338c8a9a5SSteve French create_options |= CREATE_WRITE_THROUGH;
104438c8a9a5SSteve French
104538c8a9a5SSteve French if (cfile->f_flags & O_DIRECT)
104638c8a9a5SSteve French create_options |= CREATE_NO_BUFFER;
104738c8a9a5SSteve French
104838c8a9a5SSteve French if (server->ops->get_lease_key)
104938c8a9a5SSteve French server->ops->get_lease_key(inode, &cfile->fid);
105038c8a9a5SSteve French
10510fdada1eSDavid Howells retry_open:
105238c8a9a5SSteve French oparms = (struct cifs_open_parms) {
105338c8a9a5SSteve French .tcon = tcon,
105438c8a9a5SSteve French .cifs_sb = cifs_sb,
105538c8a9a5SSteve French .desired_access = desired_access,
105638c8a9a5SSteve French .create_options = cifs_create_options(cifs_sb, create_options),
105738c8a9a5SSteve French .disposition = disposition,
105838c8a9a5SSteve French .path = full_path,
105938c8a9a5SSteve French .fid = &cfile->fid,
106038c8a9a5SSteve French .reconnect = true,
106138c8a9a5SSteve French };
106238c8a9a5SSteve French
106338c8a9a5SSteve French /*
106438c8a9a5SSteve French * Can not refresh inode by passing in file_info buf to be returned by
106538c8a9a5SSteve French * ops->open and then calling get_inode_info with returned buf since
106638c8a9a5SSteve French * file might have write behind data that needs to be flushed and server
106738c8a9a5SSteve French * version of file size can be stale. If we knew for sure that inode was
106838c8a9a5SSteve French * not dirty locally we could do this.
106938c8a9a5SSteve French */
107038c8a9a5SSteve French rc = server->ops->open(xid, &oparms, &oplock, NULL);
107138c8a9a5SSteve French if (rc == -ENOENT && oparms.reconnect == false) {
107238c8a9a5SSteve French /* durable handle timeout is expired - open the file again */
107338c8a9a5SSteve French rc = server->ops->open(xid, &oparms, &oplock, NULL);
107438c8a9a5SSteve French /* indicate that we need to relock the file */
107538c8a9a5SSteve French oparms.reconnect = true;
107638c8a9a5SSteve French }
10770fdada1eSDavid Howells if (rc == -EACCES && rdwr_for_fscache == 1) {
10780fdada1eSDavid Howells desired_access = cifs_convert_flags(cfile->f_flags, 0);
10790fdada1eSDavid Howells rdwr_for_fscache = 2;
10800fdada1eSDavid Howells goto retry_open;
10810fdada1eSDavid Howells }
108238c8a9a5SSteve French
108338c8a9a5SSteve French if (rc) {
108438c8a9a5SSteve French mutex_unlock(&cfile->fh_mutex);
108538c8a9a5SSteve French cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
108638c8a9a5SSteve French cifs_dbg(FYI, "oplock: %d\n", oplock);
108738c8a9a5SSteve French goto reopen_error_exit;
108838c8a9a5SSteve French }
108938c8a9a5SSteve French
10900fdada1eSDavid Howells if (rdwr_for_fscache == 2)
10910fdada1eSDavid Howells cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
10920fdada1eSDavid Howells
109338c8a9a5SSteve French #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
109438c8a9a5SSteve French reopen_success:
109538c8a9a5SSteve French #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
109638c8a9a5SSteve French cfile->invalidHandle = false;
109738c8a9a5SSteve French mutex_unlock(&cfile->fh_mutex);
109838c8a9a5SSteve French cinode = CIFS_I(inode);
109938c8a9a5SSteve French
110038c8a9a5SSteve French if (can_flush) {
110138c8a9a5SSteve French rc = filemap_write_and_wait(inode->i_mapping);
110238c8a9a5SSteve French if (!is_interrupt_error(rc))
110338c8a9a5SSteve French mapping_set_error(inode->i_mapping, rc);
110438c8a9a5SSteve French
110502bcf865SSteve French if (tcon->posix_extensions) {
110602bcf865SSteve French rc = smb311_posix_get_inode_info(&inode, full_path,
110702bcf865SSteve French NULL, inode->i_sb, xid);
110802bcf865SSteve French } else if (tcon->unix_ext) {
110938c8a9a5SSteve French rc = cifs_get_inode_info_unix(&inode, full_path,
111038c8a9a5SSteve French inode->i_sb, xid);
111102bcf865SSteve French } else {
111238c8a9a5SSteve French rc = cifs_get_inode_info(&inode, full_path, NULL,
111338c8a9a5SSteve French inode->i_sb, xid, NULL);
111438c8a9a5SSteve French }
111502bcf865SSteve French }
111638c8a9a5SSteve French /*
111738c8a9a5SSteve French * Else we are writing out data to server already and could deadlock if
111838c8a9a5SSteve French * we tried to flush data, and since we do not know if we have data that
111938c8a9a5SSteve French * would invalidate the current end of file on the server we can not go
112038c8a9a5SSteve French * to the server to get the new inode info.
112138c8a9a5SSteve French */
112238c8a9a5SSteve French
112338c8a9a5SSteve French /*
112438c8a9a5SSteve French * If the server returned a read oplock and we have mandatory brlocks,
112538c8a9a5SSteve French * set oplock level to None.
112638c8a9a5SSteve French */
112738c8a9a5SSteve French if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
112838c8a9a5SSteve French cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
112938c8a9a5SSteve French oplock = 0;
113038c8a9a5SSteve French }
113138c8a9a5SSteve French
113238c8a9a5SSteve French server->ops->set_fid(cfile, &cfile->fid, oplock);
113338c8a9a5SSteve French if (oparms.reconnect)
113438c8a9a5SSteve French cifs_relock_file(cfile);
113538c8a9a5SSteve French
113638c8a9a5SSteve French reopen_error_exit:
113738c8a9a5SSteve French free_dentry_path(page);
113838c8a9a5SSteve French free_xid(xid);
113938c8a9a5SSteve French return rc;
114038c8a9a5SSteve French }
114138c8a9a5SSteve French
smb2_deferred_work_close(struct work_struct * work)114238c8a9a5SSteve French void smb2_deferred_work_close(struct work_struct *work)
114338c8a9a5SSteve French {
114438c8a9a5SSteve French struct cifsFileInfo *cfile = container_of(work,
114538c8a9a5SSteve French struct cifsFileInfo, deferred.work);
114638c8a9a5SSteve French
114738c8a9a5SSteve French spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
114838c8a9a5SSteve French cifs_del_deferred_close(cfile);
114938c8a9a5SSteve French cfile->deferred_close_scheduled = false;
115038c8a9a5SSteve French spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
115138c8a9a5SSteve French _cifsFileInfo_put(cfile, true, false);
115238c8a9a5SSteve French }
115338c8a9a5SSteve French
115491cdeb0dSBharath SM static bool
smb2_can_defer_close(struct inode * inode,struct cifs_deferred_close * dclose)115591cdeb0dSBharath SM smb2_can_defer_close(struct inode *inode, struct cifs_deferred_close *dclose)
115691cdeb0dSBharath SM {
115791cdeb0dSBharath SM struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
115891cdeb0dSBharath SM struct cifsInodeInfo *cinode = CIFS_I(inode);
115991cdeb0dSBharath SM
116091cdeb0dSBharath SM return (cifs_sb->ctx->closetimeo && cinode->lease_granted && dclose &&
116191cdeb0dSBharath SM (cinode->oplock == CIFS_CACHE_RHW_FLG ||
116291cdeb0dSBharath SM cinode->oplock == CIFS_CACHE_RH_FLG) &&
116391cdeb0dSBharath SM !test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags));
116491cdeb0dSBharath SM
116591cdeb0dSBharath SM }
116691cdeb0dSBharath SM
cifs_close(struct inode * inode,struct file * file)116738c8a9a5SSteve French int cifs_close(struct inode *inode, struct file *file)
116838c8a9a5SSteve French {
116938c8a9a5SSteve French struct cifsFileInfo *cfile;
117038c8a9a5SSteve French struct cifsInodeInfo *cinode = CIFS_I(inode);
117138c8a9a5SSteve French struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
117238c8a9a5SSteve French struct cifs_deferred_close *dclose;
117338c8a9a5SSteve French
117438c8a9a5SSteve French cifs_fscache_unuse_inode_cookie(inode, file->f_mode & FMODE_WRITE);
117538c8a9a5SSteve French
117638c8a9a5SSteve French if (file->private_data != NULL) {
117738c8a9a5SSteve French cfile = file->private_data;
117838c8a9a5SSteve French file->private_data = NULL;
117938c8a9a5SSteve French dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
118091cdeb0dSBharath SM if ((cfile->status_file_deleted == false) &&
118191cdeb0dSBharath SM (smb2_can_defer_close(inode, dclose))) {
118238c8a9a5SSteve French if (test_and_clear_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) {
118323171df5SJeff Layton inode_set_mtime_to_ts(inode,
118423171df5SJeff Layton inode_set_ctime_current(inode));
118538c8a9a5SSteve French }
118638c8a9a5SSteve French spin_lock(&cinode->deferred_lock);
118738c8a9a5SSteve French cifs_add_deferred_close(cfile, dclose);
118838c8a9a5SSteve French if (cfile->deferred_close_scheduled &&
118938c8a9a5SSteve French delayed_work_pending(&cfile->deferred)) {
119038c8a9a5SSteve French /*
119138c8a9a5SSteve French * If there is no pending work, mod_delayed_work queues new work.
119238c8a9a5SSteve French * So, Increase the ref count to avoid use-after-free.
119338c8a9a5SSteve French */
119438c8a9a5SSteve French if (!mod_delayed_work(deferredclose_wq,
119538c8a9a5SSteve French &cfile->deferred, cifs_sb->ctx->closetimeo))
119638c8a9a5SSteve French cifsFileInfo_get(cfile);
119738c8a9a5SSteve French } else {
119838c8a9a5SSteve French /* Deferred close for files */
119938c8a9a5SSteve French queue_delayed_work(deferredclose_wq,
120038c8a9a5SSteve French &cfile->deferred, cifs_sb->ctx->closetimeo);
120138c8a9a5SSteve French cfile->deferred_close_scheduled = true;
120238c8a9a5SSteve French spin_unlock(&cinode->deferred_lock);
120338c8a9a5SSteve French return 0;
120438c8a9a5SSteve French }
120538c8a9a5SSteve French spin_unlock(&cinode->deferred_lock);
120638c8a9a5SSteve French _cifsFileInfo_put(cfile, true, false);
120738c8a9a5SSteve French } else {
120838c8a9a5SSteve French _cifsFileInfo_put(cfile, true, false);
120938c8a9a5SSteve French kfree(dclose);
121038c8a9a5SSteve French }
121138c8a9a5SSteve French }
121238c8a9a5SSteve French
121338c8a9a5SSteve French /* return code from the ->release op is always ignored */
121438c8a9a5SSteve French return 0;
121538c8a9a5SSteve French }
121638c8a9a5SSteve French
121738c8a9a5SSteve French void
cifs_reopen_persistent_handles(struct cifs_tcon * tcon)121838c8a9a5SSteve French cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
121938c8a9a5SSteve French {
122038c8a9a5SSteve French struct cifsFileInfo *open_file, *tmp;
122138c8a9a5SSteve French struct list_head tmp_list;
122238c8a9a5SSteve French
122338c8a9a5SSteve French if (!tcon->use_persistent || !tcon->need_reopen_files)
122438c8a9a5SSteve French return;
122538c8a9a5SSteve French
122638c8a9a5SSteve French tcon->need_reopen_files = false;
122738c8a9a5SSteve French
122838c8a9a5SSteve French cifs_dbg(FYI, "Reopen persistent handles\n");
122938c8a9a5SSteve French INIT_LIST_HEAD(&tmp_list);
123038c8a9a5SSteve French
123138c8a9a5SSteve French /* list all files open on tree connection, reopen resilient handles */
123238c8a9a5SSteve French spin_lock(&tcon->open_file_lock);
123338c8a9a5SSteve French list_for_each_entry(open_file, &tcon->openFileList, tlist) {
123438c8a9a5SSteve French if (!open_file->invalidHandle)
123538c8a9a5SSteve French continue;
123638c8a9a5SSteve French cifsFileInfo_get(open_file);
123738c8a9a5SSteve French list_add_tail(&open_file->rlist, &tmp_list);
123838c8a9a5SSteve French }
123938c8a9a5SSteve French spin_unlock(&tcon->open_file_lock);
124038c8a9a5SSteve French
124138c8a9a5SSteve French list_for_each_entry_safe(open_file, tmp, &tmp_list, rlist) {
124238c8a9a5SSteve French if (cifs_reopen_file(open_file, false /* do not flush */))
124338c8a9a5SSteve French tcon->need_reopen_files = true;
124438c8a9a5SSteve French list_del_init(&open_file->rlist);
124538c8a9a5SSteve French cifsFileInfo_put(open_file);
124638c8a9a5SSteve French }
124738c8a9a5SSteve French }
124838c8a9a5SSteve French
cifs_closedir(struct inode * inode,struct file * file)124938c8a9a5SSteve French int cifs_closedir(struct inode *inode, struct file *file)
125038c8a9a5SSteve French {
125138c8a9a5SSteve French int rc = 0;
125238c8a9a5SSteve French unsigned int xid;
125338c8a9a5SSteve French struct cifsFileInfo *cfile = file->private_data;
125438c8a9a5SSteve French struct cifs_tcon *tcon;
125538c8a9a5SSteve French struct TCP_Server_Info *server;
125638c8a9a5SSteve French char *buf;
125738c8a9a5SSteve French
125838c8a9a5SSteve French cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
125938c8a9a5SSteve French
126038c8a9a5SSteve French if (cfile == NULL)
126138c8a9a5SSteve French return rc;
126238c8a9a5SSteve French
126338c8a9a5SSteve French xid = get_xid();
126438c8a9a5SSteve French tcon = tlink_tcon(cfile->tlink);
126538c8a9a5SSteve French server = tcon->ses->server;
126638c8a9a5SSteve French
126738c8a9a5SSteve French cifs_dbg(FYI, "Freeing private data in close dir\n");
126838c8a9a5SSteve French spin_lock(&cfile->file_info_lock);
126938c8a9a5SSteve French if (server->ops->dir_needs_close(cfile)) {
127038c8a9a5SSteve French cfile->invalidHandle = true;
127138c8a9a5SSteve French spin_unlock(&cfile->file_info_lock);
127238c8a9a5SSteve French if (server->ops->close_dir)
127338c8a9a5SSteve French rc = server->ops->close_dir(xid, tcon, &cfile->fid);
127438c8a9a5SSteve French else
127538c8a9a5SSteve French rc = -ENOSYS;
127638c8a9a5SSteve French cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
127738c8a9a5SSteve French /* not much we can do if it fails anyway, ignore rc */
127838c8a9a5SSteve French rc = 0;
127938c8a9a5SSteve French } else
128038c8a9a5SSteve French spin_unlock(&cfile->file_info_lock);
128138c8a9a5SSteve French
128238c8a9a5SSteve French buf = cfile->srch_inf.ntwrk_buf_start;
128338c8a9a5SSteve French if (buf) {
128438c8a9a5SSteve French cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
128538c8a9a5SSteve French cfile->srch_inf.ntwrk_buf_start = NULL;
128638c8a9a5SSteve French if (cfile->srch_inf.smallBuf)
128738c8a9a5SSteve French cifs_small_buf_release(buf);
128838c8a9a5SSteve French else
128938c8a9a5SSteve French cifs_buf_release(buf);
129038c8a9a5SSteve French }
129138c8a9a5SSteve French
129238c8a9a5SSteve French cifs_put_tlink(cfile->tlink);
129338c8a9a5SSteve French kfree(file->private_data);
129438c8a9a5SSteve French file->private_data = NULL;
129538c8a9a5SSteve French /* BB can we lock the filestruct while this is going on? */
129638c8a9a5SSteve French free_xid(xid);
129738c8a9a5SSteve French return rc;
129838c8a9a5SSteve French }
129938c8a9a5SSteve French
130038c8a9a5SSteve French static struct cifsLockInfo *
cifs_lock_init(__u64 offset,__u64 length,__u8 type,__u16 flags)130138c8a9a5SSteve French cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
130238c8a9a5SSteve French {
130338c8a9a5SSteve French struct cifsLockInfo *lock =
130438c8a9a5SSteve French kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
130538c8a9a5SSteve French if (!lock)
130638c8a9a5SSteve French return lock;
130738c8a9a5SSteve French lock->offset = offset;
130838c8a9a5SSteve French lock->length = length;
130938c8a9a5SSteve French lock->type = type;
131038c8a9a5SSteve French lock->pid = current->tgid;
131138c8a9a5SSteve French lock->flags = flags;
131238c8a9a5SSteve French INIT_LIST_HEAD(&lock->blist);
131338c8a9a5SSteve French init_waitqueue_head(&lock->block_q);
131438c8a9a5SSteve French return lock;
131538c8a9a5SSteve French }
131638c8a9a5SSteve French
131738c8a9a5SSteve French void
cifs_del_lock_waiters(struct cifsLockInfo * lock)131838c8a9a5SSteve French cifs_del_lock_waiters(struct cifsLockInfo *lock)
131938c8a9a5SSteve French {
132038c8a9a5SSteve French struct cifsLockInfo *li, *tmp;
132138c8a9a5SSteve French list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
132238c8a9a5SSteve French list_del_init(&li->blist);
132338c8a9a5SSteve French wake_up(&li->block_q);
132438c8a9a5SSteve French }
132538c8a9a5SSteve French }
132638c8a9a5SSteve French
132738c8a9a5SSteve French #define CIFS_LOCK_OP 0
132838c8a9a5SSteve French #define CIFS_READ_OP 1
132938c8a9a5SSteve French #define CIFS_WRITE_OP 2
133038c8a9a5SSteve French
133138c8a9a5SSteve French /* @rw_check : 0 - no op, 1 - read, 2 - write */
133238c8a9a5SSteve French static bool
cifs_find_fid_lock_conflict(struct cifs_fid_locks * fdlocks,__u64 offset,__u64 length,__u8 type,__u16 flags,struct cifsFileInfo * cfile,struct cifsLockInfo ** conf_lock,int rw_check)133338c8a9a5SSteve French cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
133438c8a9a5SSteve French __u64 length, __u8 type, __u16 flags,
133538c8a9a5SSteve French struct cifsFileInfo *cfile,
133638c8a9a5SSteve French struct cifsLockInfo **conf_lock, int rw_check)
133738c8a9a5SSteve French {
133838c8a9a5SSteve French struct cifsLockInfo *li;
133938c8a9a5SSteve French struct cifsFileInfo *cur_cfile = fdlocks->cfile;
134038c8a9a5SSteve French struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
134138c8a9a5SSteve French
134238c8a9a5SSteve French list_for_each_entry(li, &fdlocks->locks, llist) {
134338c8a9a5SSteve French if (offset + length <= li->offset ||
134438c8a9a5SSteve French offset >= li->offset + li->length)
134538c8a9a5SSteve French continue;
134638c8a9a5SSteve French if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
134738c8a9a5SSteve French server->ops->compare_fids(cfile, cur_cfile)) {
134838c8a9a5SSteve French /* shared lock prevents write op through the same fid */
134938c8a9a5SSteve French if (!(li->type & server->vals->shared_lock_type) ||
135038c8a9a5SSteve French rw_check != CIFS_WRITE_OP)
135138c8a9a5SSteve French continue;
135238c8a9a5SSteve French }
135338c8a9a5SSteve French if ((type & server->vals->shared_lock_type) &&
135438c8a9a5SSteve French ((server->ops->compare_fids(cfile, cur_cfile) &&
135538c8a9a5SSteve French current->tgid == li->pid) || type == li->type))
135638c8a9a5SSteve French continue;
135738c8a9a5SSteve French if (rw_check == CIFS_LOCK_OP &&
135838c8a9a5SSteve French (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
135938c8a9a5SSteve French server->ops->compare_fids(cfile, cur_cfile))
136038c8a9a5SSteve French continue;
136138c8a9a5SSteve French if (conf_lock)
136238c8a9a5SSteve French *conf_lock = li;
136338c8a9a5SSteve French return true;
136438c8a9a5SSteve French }
136538c8a9a5SSteve French return false;
136638c8a9a5SSteve French }
136738c8a9a5SSteve French
136838c8a9a5SSteve French bool
cifs_find_lock_conflict(struct cifsFileInfo * cfile,__u64 offset,__u64 length,__u8 type,__u16 flags,struct cifsLockInfo ** conf_lock,int rw_check)136938c8a9a5SSteve French cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
137038c8a9a5SSteve French __u8 type, __u16 flags,
137138c8a9a5SSteve French struct cifsLockInfo **conf_lock, int rw_check)
137238c8a9a5SSteve French {
137338c8a9a5SSteve French bool rc = false;
137438c8a9a5SSteve French struct cifs_fid_locks *cur;
137538c8a9a5SSteve French struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
137638c8a9a5SSteve French
137738c8a9a5SSteve French list_for_each_entry(cur, &cinode->llist, llist) {
137838c8a9a5SSteve French rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
137938c8a9a5SSteve French flags, cfile, conf_lock,
138038c8a9a5SSteve French rw_check);
138138c8a9a5SSteve French if (rc)
138238c8a9a5SSteve French break;
138338c8a9a5SSteve French }
138438c8a9a5SSteve French
138538c8a9a5SSteve French return rc;
138638c8a9a5SSteve French }
138738c8a9a5SSteve French
138838c8a9a5SSteve French /*
138938c8a9a5SSteve French * Check if there is another lock that prevents us to set the lock (mandatory
139038c8a9a5SSteve French * style). If such a lock exists, update the flock structure with its
139138c8a9a5SSteve French * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
139238c8a9a5SSteve French * or leave it the same if we can't. Returns 0 if we don't need to request to
139338c8a9a5SSteve French * the server or 1 otherwise.
139438c8a9a5SSteve French */
139538c8a9a5SSteve French static int
cifs_lock_test(struct cifsFileInfo * cfile,__u64 offset,__u64 length,__u8 type,struct file_lock * flock)139638c8a9a5SSteve French cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
139738c8a9a5SSteve French __u8 type, struct file_lock *flock)
139838c8a9a5SSteve French {
139938c8a9a5SSteve French int rc = 0;
140038c8a9a5SSteve French struct cifsLockInfo *conf_lock;
140138c8a9a5SSteve French struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
140238c8a9a5SSteve French struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
140338c8a9a5SSteve French bool exist;
140438c8a9a5SSteve French
140538c8a9a5SSteve French down_read(&cinode->lock_sem);
140638c8a9a5SSteve French
140738c8a9a5SSteve French exist = cifs_find_lock_conflict(cfile, offset, length, type,
140838c8a9a5SSteve French flock->fl_flags, &conf_lock,
140938c8a9a5SSteve French CIFS_LOCK_OP);
141038c8a9a5SSteve French if (exist) {
141138c8a9a5SSteve French flock->fl_start = conf_lock->offset;
141238c8a9a5SSteve French flock->fl_end = conf_lock->offset + conf_lock->length - 1;
141338c8a9a5SSteve French flock->fl_pid = conf_lock->pid;
141438c8a9a5SSteve French if (conf_lock->type & server->vals->shared_lock_type)
141538c8a9a5SSteve French flock->fl_type = F_RDLCK;
141638c8a9a5SSteve French else
141738c8a9a5SSteve French flock->fl_type = F_WRLCK;
141838c8a9a5SSteve French } else if (!cinode->can_cache_brlcks)
141938c8a9a5SSteve French rc = 1;
142038c8a9a5SSteve French else
142138c8a9a5SSteve French flock->fl_type = F_UNLCK;
142238c8a9a5SSteve French
142338c8a9a5SSteve French up_read(&cinode->lock_sem);
142438c8a9a5SSteve French return rc;
142538c8a9a5SSteve French }
142638c8a9a5SSteve French
142738c8a9a5SSteve French static void
cifs_lock_add(struct cifsFileInfo * cfile,struct cifsLockInfo * lock)142838c8a9a5SSteve French cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
142938c8a9a5SSteve French {
143038c8a9a5SSteve French struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
143138c8a9a5SSteve French cifs_down_write(&cinode->lock_sem);
143238c8a9a5SSteve French list_add_tail(&lock->llist, &cfile->llist->locks);
143338c8a9a5SSteve French up_write(&cinode->lock_sem);
143438c8a9a5SSteve French }
143538c8a9a5SSteve French
143638c8a9a5SSteve French /*
143738c8a9a5SSteve French * Set the byte-range lock (mandatory style). Returns:
143838c8a9a5SSteve French * 1) 0, if we set the lock and don't need to request to the server;
143938c8a9a5SSteve French * 2) 1, if no locks prevent us but we need to request to the server;
144038c8a9a5SSteve French * 3) -EACCES, if there is a lock that prevents us and wait is false.
144138c8a9a5SSteve French */
144238c8a9a5SSteve French static int
cifs_lock_add_if(struct cifsFileInfo * cfile,struct cifsLockInfo * lock,bool wait)144338c8a9a5SSteve French cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
144438c8a9a5SSteve French bool wait)
144538c8a9a5SSteve French {
144638c8a9a5SSteve French struct cifsLockInfo *conf_lock;
144738c8a9a5SSteve French struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
144838c8a9a5SSteve French bool exist;
144938c8a9a5SSteve French int rc = 0;
145038c8a9a5SSteve French
145138c8a9a5SSteve French try_again:
145238c8a9a5SSteve French exist = false;
145338c8a9a5SSteve French cifs_down_write(&cinode->lock_sem);
145438c8a9a5SSteve French
145538c8a9a5SSteve French exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
145638c8a9a5SSteve French lock->type, lock->flags, &conf_lock,
145738c8a9a5SSteve French CIFS_LOCK_OP);
145838c8a9a5SSteve French if (!exist && cinode->can_cache_brlcks) {
145938c8a9a5SSteve French list_add_tail(&lock->llist, &cfile->llist->locks);
146038c8a9a5SSteve French up_write(&cinode->lock_sem);
146138c8a9a5SSteve French return rc;
146238c8a9a5SSteve French }
146338c8a9a5SSteve French
146438c8a9a5SSteve French if (!exist)
146538c8a9a5SSteve French rc = 1;
146638c8a9a5SSteve French else if (!wait)
146738c8a9a5SSteve French rc = -EACCES;
146838c8a9a5SSteve French else {
146938c8a9a5SSteve French list_add_tail(&lock->blist, &conf_lock->blist);
147038c8a9a5SSteve French up_write(&cinode->lock_sem);
147138c8a9a5SSteve French rc = wait_event_interruptible(lock->block_q,
147238c8a9a5SSteve French (lock->blist.prev == &lock->blist) &&
147338c8a9a5SSteve French (lock->blist.next == &lock->blist));
147438c8a9a5SSteve French if (!rc)
147538c8a9a5SSteve French goto try_again;
147638c8a9a5SSteve French cifs_down_write(&cinode->lock_sem);
147738c8a9a5SSteve French list_del_init(&lock->blist);
147838c8a9a5SSteve French }
147938c8a9a5SSteve French
148038c8a9a5SSteve French up_write(&cinode->lock_sem);
148138c8a9a5SSteve French return rc;
148238c8a9a5SSteve French }
148338c8a9a5SSteve French
148438c8a9a5SSteve French #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
148538c8a9a5SSteve French /*
148638c8a9a5SSteve French * Check if there is another lock that prevents us to set the lock (posix
148738c8a9a5SSteve French * style). If such a lock exists, update the flock structure with its
148838c8a9a5SSteve French * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
148938c8a9a5SSteve French * or leave it the same if we can't. Returns 0 if we don't need to request to
149038c8a9a5SSteve French * the server or 1 otherwise.
149138c8a9a5SSteve French */
149238c8a9a5SSteve French static int
cifs_posix_lock_test(struct file * file,struct file_lock * flock)149338c8a9a5SSteve French cifs_posix_lock_test(struct file *file, struct file_lock *flock)
149438c8a9a5SSteve French {
149538c8a9a5SSteve French int rc = 0;
149638c8a9a5SSteve French struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
149738c8a9a5SSteve French unsigned char saved_type = flock->fl_type;
149838c8a9a5SSteve French
149938c8a9a5SSteve French if ((flock->fl_flags & FL_POSIX) == 0)
150038c8a9a5SSteve French return 1;
150138c8a9a5SSteve French
150238c8a9a5SSteve French down_read(&cinode->lock_sem);
150338c8a9a5SSteve French posix_test_lock(file, flock);
150438c8a9a5SSteve French
150538c8a9a5SSteve French if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
150638c8a9a5SSteve French flock->fl_type = saved_type;
150738c8a9a5SSteve French rc = 1;
150838c8a9a5SSteve French }
150938c8a9a5SSteve French
151038c8a9a5SSteve French up_read(&cinode->lock_sem);
151138c8a9a5SSteve French return rc;
151238c8a9a5SSteve French }
151338c8a9a5SSteve French
151438c8a9a5SSteve French /*
151538c8a9a5SSteve French * Set the byte-range lock (posix style). Returns:
151638c8a9a5SSteve French * 1) <0, if the error occurs while setting the lock;
151738c8a9a5SSteve French * 2) 0, if we set the lock and don't need to request to the server;
151838c8a9a5SSteve French * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock;
151938c8a9a5SSteve French * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server.
152038c8a9a5SSteve French */
152138c8a9a5SSteve French static int
cifs_posix_lock_set(struct file * file,struct file_lock * flock)152238c8a9a5SSteve French cifs_posix_lock_set(struct file *file, struct file_lock *flock)
152338c8a9a5SSteve French {
152438c8a9a5SSteve French struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
152538c8a9a5SSteve French int rc = FILE_LOCK_DEFERRED + 1;
152638c8a9a5SSteve French
152738c8a9a5SSteve French if ((flock->fl_flags & FL_POSIX) == 0)
152838c8a9a5SSteve French return rc;
152938c8a9a5SSteve French
153038c8a9a5SSteve French cifs_down_write(&cinode->lock_sem);
153138c8a9a5SSteve French if (!cinode->can_cache_brlcks) {
153238c8a9a5SSteve French up_write(&cinode->lock_sem);
153338c8a9a5SSteve French return rc;
153438c8a9a5SSteve French }
153538c8a9a5SSteve French
153638c8a9a5SSteve French rc = posix_lock_file(file, flock, NULL);
153738c8a9a5SSteve French up_write(&cinode->lock_sem);
153838c8a9a5SSteve French return rc;
153938c8a9a5SSteve French }
154038c8a9a5SSteve French
154138c8a9a5SSteve French int
cifs_push_mandatory_locks(struct cifsFileInfo * cfile)154238c8a9a5SSteve French cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
154338c8a9a5SSteve French {
154438c8a9a5SSteve French unsigned int xid;
154538c8a9a5SSteve French int rc = 0, stored_rc;
154638c8a9a5SSteve French struct cifsLockInfo *li, *tmp;
154738c8a9a5SSteve French struct cifs_tcon *tcon;
154838c8a9a5SSteve French unsigned int num, max_num, max_buf;
154938c8a9a5SSteve French LOCKING_ANDX_RANGE *buf, *cur;
155038c8a9a5SSteve French static const int types[] = {
155138c8a9a5SSteve French LOCKING_ANDX_LARGE_FILES,
155238c8a9a5SSteve French LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
155338c8a9a5SSteve French };
155438c8a9a5SSteve French int i;
155538c8a9a5SSteve French
155638c8a9a5SSteve French xid = get_xid();
155738c8a9a5SSteve French tcon = tlink_tcon(cfile->tlink);
155838c8a9a5SSteve French
155938c8a9a5SSteve French /*
156038c8a9a5SSteve French * Accessing maxBuf is racy with cifs_reconnect - need to store value
156138c8a9a5SSteve French * and check it before using.
156238c8a9a5SSteve French */
156338c8a9a5SSteve French max_buf = tcon->ses->server->maxBuf;
156438c8a9a5SSteve French if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
156538c8a9a5SSteve French free_xid(xid);
156638c8a9a5SSteve French return -EINVAL;
156738c8a9a5SSteve French }
156838c8a9a5SSteve French
156938c8a9a5SSteve French BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
157038c8a9a5SSteve French PAGE_SIZE);
157138c8a9a5SSteve French max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
157238c8a9a5SSteve French PAGE_SIZE);
157338c8a9a5SSteve French max_num = (max_buf - sizeof(struct smb_hdr)) /
157438c8a9a5SSteve French sizeof(LOCKING_ANDX_RANGE);
157538c8a9a5SSteve French buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
157638c8a9a5SSteve French if (!buf) {
157738c8a9a5SSteve French free_xid(xid);
157838c8a9a5SSteve French return -ENOMEM;
157938c8a9a5SSteve French }
158038c8a9a5SSteve French
158138c8a9a5SSteve French for (i = 0; i < 2; i++) {
158238c8a9a5SSteve French cur = buf;
158338c8a9a5SSteve French num = 0;
158438c8a9a5SSteve French list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
158538c8a9a5SSteve French if (li->type != types[i])
158638c8a9a5SSteve French continue;
158738c8a9a5SSteve French cur->Pid = cpu_to_le16(li->pid);
158838c8a9a5SSteve French cur->LengthLow = cpu_to_le32((u32)li->length);
158938c8a9a5SSteve French cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
159038c8a9a5SSteve French cur->OffsetLow = cpu_to_le32((u32)li->offset);
159138c8a9a5SSteve French cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
159238c8a9a5SSteve French if (++num == max_num) {
159338c8a9a5SSteve French stored_rc = cifs_lockv(xid, tcon,
159438c8a9a5SSteve French cfile->fid.netfid,
159538c8a9a5SSteve French (__u8)li->type, 0, num,
159638c8a9a5SSteve French buf);
159738c8a9a5SSteve French if (stored_rc)
159838c8a9a5SSteve French rc = stored_rc;
159938c8a9a5SSteve French cur = buf;
160038c8a9a5SSteve French num = 0;
160138c8a9a5SSteve French } else
160238c8a9a5SSteve French cur++;
160338c8a9a5SSteve French }
160438c8a9a5SSteve French
160538c8a9a5SSteve French if (num) {
160638c8a9a5SSteve French stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
160738c8a9a5SSteve French (__u8)types[i], 0, num, buf);
160838c8a9a5SSteve French if (stored_rc)
160938c8a9a5SSteve French rc = stored_rc;
161038c8a9a5SSteve French }
161138c8a9a5SSteve French }
161238c8a9a5SSteve French
161338c8a9a5SSteve French kfree(buf);
161438c8a9a5SSteve French free_xid(xid);
161538c8a9a5SSteve French return rc;
161638c8a9a5SSteve French }
161738c8a9a5SSteve French
161838c8a9a5SSteve French static __u32
hash_lockowner(fl_owner_t owner)161938c8a9a5SSteve French hash_lockowner(fl_owner_t owner)
162038c8a9a5SSteve French {
162138c8a9a5SSteve French return cifs_lock_secret ^ hash32_ptr((const void *)owner);
162238c8a9a5SSteve French }
162338c8a9a5SSteve French #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
162438c8a9a5SSteve French
162538c8a9a5SSteve French struct lock_to_push {
162638c8a9a5SSteve French struct list_head llist;
162738c8a9a5SSteve French __u64 offset;
162838c8a9a5SSteve French __u64 length;
162938c8a9a5SSteve French __u32 pid;
163038c8a9a5SSteve French __u16 netfid;
163138c8a9a5SSteve French __u8 type;
163238c8a9a5SSteve French };
163338c8a9a5SSteve French
163438c8a9a5SSteve French #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
163538c8a9a5SSteve French static int
cifs_push_posix_locks(struct cifsFileInfo * cfile)163638c8a9a5SSteve French cifs_push_posix_locks(struct cifsFileInfo *cfile)
163738c8a9a5SSteve French {
163838c8a9a5SSteve French struct inode *inode = d_inode(cfile->dentry);
163938c8a9a5SSteve French struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
164038c8a9a5SSteve French struct file_lock *flock;
164138c8a9a5SSteve French struct file_lock_context *flctx = locks_inode_context(inode);
164238c8a9a5SSteve French unsigned int count = 0, i;
164338c8a9a5SSteve French int rc = 0, xid, type;
164438c8a9a5SSteve French struct list_head locks_to_send, *el;
164538c8a9a5SSteve French struct lock_to_push *lck, *tmp;
164638c8a9a5SSteve French __u64 length;
164738c8a9a5SSteve French
164838c8a9a5SSteve French xid = get_xid();
164938c8a9a5SSteve French
165038c8a9a5SSteve French if (!flctx)
165138c8a9a5SSteve French goto out;
165238c8a9a5SSteve French
165338c8a9a5SSteve French spin_lock(&flctx->flc_lock);
165438c8a9a5SSteve French list_for_each(el, &flctx->flc_posix) {
165538c8a9a5SSteve French count++;
165638c8a9a5SSteve French }
165738c8a9a5SSteve French spin_unlock(&flctx->flc_lock);
165838c8a9a5SSteve French
165938c8a9a5SSteve French INIT_LIST_HEAD(&locks_to_send);
166038c8a9a5SSteve French
166138c8a9a5SSteve French /*
166238c8a9a5SSteve French * Allocating count locks is enough because no FL_POSIX locks can be
166338c8a9a5SSteve French * added to the list while we are holding cinode->lock_sem that
166438c8a9a5SSteve French * protects locking operations of this inode.
166538c8a9a5SSteve French */
166638c8a9a5SSteve French for (i = 0; i < count; i++) {
166738c8a9a5SSteve French lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
166838c8a9a5SSteve French if (!lck) {
166938c8a9a5SSteve French rc = -ENOMEM;
167038c8a9a5SSteve French goto err_out;
167138c8a9a5SSteve French }
167238c8a9a5SSteve French list_add_tail(&lck->llist, &locks_to_send);
167338c8a9a5SSteve French }
167438c8a9a5SSteve French
167538c8a9a5SSteve French el = locks_to_send.next;
167638c8a9a5SSteve French spin_lock(&flctx->flc_lock);
167738c8a9a5SSteve French list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
167838c8a9a5SSteve French if (el == &locks_to_send) {
167938c8a9a5SSteve French /*
168038c8a9a5SSteve French * The list ended. We don't have enough allocated
168138c8a9a5SSteve French * structures - something is really wrong.
168238c8a9a5SSteve French */
168338c8a9a5SSteve French cifs_dbg(VFS, "Can't push all brlocks!\n");
168438c8a9a5SSteve French break;
168538c8a9a5SSteve French }
168638c8a9a5SSteve French length = cifs_flock_len(flock);
168738c8a9a5SSteve French if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
168838c8a9a5SSteve French type = CIFS_RDLCK;
168938c8a9a5SSteve French else
169038c8a9a5SSteve French type = CIFS_WRLCK;
169138c8a9a5SSteve French lck = list_entry(el, struct lock_to_push, llist);
169238c8a9a5SSteve French lck->pid = hash_lockowner(flock->fl_owner);
169338c8a9a5SSteve French lck->netfid = cfile->fid.netfid;
169438c8a9a5SSteve French lck->length = length;
169538c8a9a5SSteve French lck->type = type;
169638c8a9a5SSteve French lck->offset = flock->fl_start;
169738c8a9a5SSteve French }
169838c8a9a5SSteve French spin_unlock(&flctx->flc_lock);
169938c8a9a5SSteve French
170038c8a9a5SSteve French list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
170138c8a9a5SSteve French int stored_rc;
170238c8a9a5SSteve French
170338c8a9a5SSteve French stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
170438c8a9a5SSteve French lck->offset, lck->length, NULL,
170538c8a9a5SSteve French lck->type, 0);
170638c8a9a5SSteve French if (stored_rc)
170738c8a9a5SSteve French rc = stored_rc;
170838c8a9a5SSteve French list_del(&lck->llist);
170938c8a9a5SSteve French kfree(lck);
171038c8a9a5SSteve French }
171138c8a9a5SSteve French
171238c8a9a5SSteve French out:
171338c8a9a5SSteve French free_xid(xid);
171438c8a9a5SSteve French return rc;
171538c8a9a5SSteve French err_out:
171638c8a9a5SSteve French list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
171738c8a9a5SSteve French list_del(&lck->llist);
171838c8a9a5SSteve French kfree(lck);
171938c8a9a5SSteve French }
172038c8a9a5SSteve French goto out;
172138c8a9a5SSteve French }
172238c8a9a5SSteve French #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
172338c8a9a5SSteve French
172438c8a9a5SSteve French static int
cifs_push_locks(struct cifsFileInfo * cfile)172538c8a9a5SSteve French cifs_push_locks(struct cifsFileInfo *cfile)
172638c8a9a5SSteve French {
172738c8a9a5SSteve French struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
172838c8a9a5SSteve French struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
172938c8a9a5SSteve French int rc = 0;
173038c8a9a5SSteve French #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
173138c8a9a5SSteve French struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
173238c8a9a5SSteve French #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
173338c8a9a5SSteve French
173438c8a9a5SSteve French /* we are going to update can_cache_brlcks here - need a write access */
173538c8a9a5SSteve French cifs_down_write(&cinode->lock_sem);
173638c8a9a5SSteve French if (!cinode->can_cache_brlcks) {
173738c8a9a5SSteve French up_write(&cinode->lock_sem);
173838c8a9a5SSteve French return rc;
173938c8a9a5SSteve French }
174038c8a9a5SSteve French
174138c8a9a5SSteve French #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
174238c8a9a5SSteve French if (cap_unix(tcon->ses) &&
174338c8a9a5SSteve French (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
174438c8a9a5SSteve French ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
174538c8a9a5SSteve French rc = cifs_push_posix_locks(cfile);
174638c8a9a5SSteve French else
174738c8a9a5SSteve French #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
174838c8a9a5SSteve French rc = tcon->ses->server->ops->push_mand_locks(cfile);
174938c8a9a5SSteve French
175038c8a9a5SSteve French cinode->can_cache_brlcks = false;
175138c8a9a5SSteve French up_write(&cinode->lock_sem);
175238c8a9a5SSteve French return rc;
175338c8a9a5SSteve French }
175438c8a9a5SSteve French
175538c8a9a5SSteve French static void
cifs_read_flock(struct file_lock * flock,__u32 * type,int * lock,int * unlock,bool * wait_flag,struct TCP_Server_Info * server)175638c8a9a5SSteve French cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
175738c8a9a5SSteve French bool *wait_flag, struct TCP_Server_Info *server)
175838c8a9a5SSteve French {
175938c8a9a5SSteve French if (flock->fl_flags & FL_POSIX)
176038c8a9a5SSteve French cifs_dbg(FYI, "Posix\n");
176138c8a9a5SSteve French if (flock->fl_flags & FL_FLOCK)
176238c8a9a5SSteve French cifs_dbg(FYI, "Flock\n");
176338c8a9a5SSteve French if (flock->fl_flags & FL_SLEEP) {
176438c8a9a5SSteve French cifs_dbg(FYI, "Blocking lock\n");
176538c8a9a5SSteve French *wait_flag = true;
176638c8a9a5SSteve French }
176738c8a9a5SSteve French if (flock->fl_flags & FL_ACCESS)
176838c8a9a5SSteve French cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
176938c8a9a5SSteve French if (flock->fl_flags & FL_LEASE)
177038c8a9a5SSteve French cifs_dbg(FYI, "Lease on file - not implemented yet\n");
177138c8a9a5SSteve French if (flock->fl_flags &
177238c8a9a5SSteve French (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
177338c8a9a5SSteve French FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
177438c8a9a5SSteve French cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
177538c8a9a5SSteve French
177638c8a9a5SSteve French *type = server->vals->large_lock_type;
177738c8a9a5SSteve French if (flock->fl_type == F_WRLCK) {
177838c8a9a5SSteve French cifs_dbg(FYI, "F_WRLCK\n");
177938c8a9a5SSteve French *type |= server->vals->exclusive_lock_type;
178038c8a9a5SSteve French *lock = 1;
178138c8a9a5SSteve French } else if (flock->fl_type == F_UNLCK) {
178238c8a9a5SSteve French cifs_dbg(FYI, "F_UNLCK\n");
178338c8a9a5SSteve French *type |= server->vals->unlock_lock_type;
178438c8a9a5SSteve French *unlock = 1;
178538c8a9a5SSteve French /* Check if unlock includes more than one lock range */
178638c8a9a5SSteve French } else if (flock->fl_type == F_RDLCK) {
178738c8a9a5SSteve French cifs_dbg(FYI, "F_RDLCK\n");
178838c8a9a5SSteve French *type |= server->vals->shared_lock_type;
178938c8a9a5SSteve French *lock = 1;
179038c8a9a5SSteve French } else if (flock->fl_type == F_EXLCK) {
179138c8a9a5SSteve French cifs_dbg(FYI, "F_EXLCK\n");
179238c8a9a5SSteve French *type |= server->vals->exclusive_lock_type;
179338c8a9a5SSteve French *lock = 1;
179438c8a9a5SSteve French } else if (flock->fl_type == F_SHLCK) {
179538c8a9a5SSteve French cifs_dbg(FYI, "F_SHLCK\n");
179638c8a9a5SSteve French *type |= server->vals->shared_lock_type;
179738c8a9a5SSteve French *lock = 1;
179838c8a9a5SSteve French } else
179938c8a9a5SSteve French cifs_dbg(FYI, "Unknown type of lock\n");
180038c8a9a5SSteve French }
180138c8a9a5SSteve French
180238c8a9a5SSteve French static int
cifs_getlk(struct file * file,struct file_lock * flock,__u32 type,bool wait_flag,bool posix_lck,unsigned int xid)180338c8a9a5SSteve French cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
180438c8a9a5SSteve French bool wait_flag, bool posix_lck, unsigned int xid)
180538c8a9a5SSteve French {
180638c8a9a5SSteve French int rc = 0;
180738c8a9a5SSteve French __u64 length = cifs_flock_len(flock);
180838c8a9a5SSteve French struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
180938c8a9a5SSteve French struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
181038c8a9a5SSteve French struct TCP_Server_Info *server = tcon->ses->server;
181138c8a9a5SSteve French #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
181238c8a9a5SSteve French __u16 netfid = cfile->fid.netfid;
181338c8a9a5SSteve French
181438c8a9a5SSteve French if (posix_lck) {
181538c8a9a5SSteve French int posix_lock_type;
181638c8a9a5SSteve French
181738c8a9a5SSteve French rc = cifs_posix_lock_test(file, flock);
181838c8a9a5SSteve French if (!rc)
181938c8a9a5SSteve French return rc;
182038c8a9a5SSteve French
182138c8a9a5SSteve French if (type & server->vals->shared_lock_type)
182238c8a9a5SSteve French posix_lock_type = CIFS_RDLCK;
182338c8a9a5SSteve French else
182438c8a9a5SSteve French posix_lock_type = CIFS_WRLCK;
182538c8a9a5SSteve French rc = CIFSSMBPosixLock(xid, tcon, netfid,
182638c8a9a5SSteve French hash_lockowner(flock->fl_owner),
182738c8a9a5SSteve French flock->fl_start, length, flock,
182838c8a9a5SSteve French posix_lock_type, wait_flag);
182938c8a9a5SSteve French return rc;
183038c8a9a5SSteve French }
183138c8a9a5SSteve French #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
183238c8a9a5SSteve French
183338c8a9a5SSteve French rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
183438c8a9a5SSteve French if (!rc)
183538c8a9a5SSteve French return rc;
183638c8a9a5SSteve French
183738c8a9a5SSteve French /* BB we could chain these into one lock request BB */
183838c8a9a5SSteve French rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
183938c8a9a5SSteve French 1, 0, false);
184038c8a9a5SSteve French if (rc == 0) {
184138c8a9a5SSteve French rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
184238c8a9a5SSteve French type, 0, 1, false);
184338c8a9a5SSteve French flock->fl_type = F_UNLCK;
184438c8a9a5SSteve French if (rc != 0)
184538c8a9a5SSteve French cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
184638c8a9a5SSteve French rc);
184738c8a9a5SSteve French return 0;
184838c8a9a5SSteve French }
184938c8a9a5SSteve French
185038c8a9a5SSteve French if (type & server->vals->shared_lock_type) {
185138c8a9a5SSteve French flock->fl_type = F_WRLCK;
185238c8a9a5SSteve French return 0;
185338c8a9a5SSteve French }
185438c8a9a5SSteve French
185538c8a9a5SSteve French type &= ~server->vals->exclusive_lock_type;
185638c8a9a5SSteve French
185738c8a9a5SSteve French rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
185838c8a9a5SSteve French type | server->vals->shared_lock_type,
185938c8a9a5SSteve French 1, 0, false);
186038c8a9a5SSteve French if (rc == 0) {
186138c8a9a5SSteve French rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
186238c8a9a5SSteve French type | server->vals->shared_lock_type, 0, 1, false);
186338c8a9a5SSteve French flock->fl_type = F_RDLCK;
186438c8a9a5SSteve French if (rc != 0)
186538c8a9a5SSteve French cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
186638c8a9a5SSteve French rc);
186738c8a9a5SSteve French } else
186838c8a9a5SSteve French flock->fl_type = F_WRLCK;
186938c8a9a5SSteve French
187038c8a9a5SSteve French return 0;
187138c8a9a5SSteve French }
187238c8a9a5SSteve French
187338c8a9a5SSteve French void
cifs_move_llist(struct list_head * source,struct list_head * dest)187438c8a9a5SSteve French cifs_move_llist(struct list_head *source, struct list_head *dest)
187538c8a9a5SSteve French {
187638c8a9a5SSteve French struct list_head *li, *tmp;
187738c8a9a5SSteve French list_for_each_safe(li, tmp, source)
187838c8a9a5SSteve French list_move(li, dest);
187938c8a9a5SSteve French }
188038c8a9a5SSteve French
188138c8a9a5SSteve French void
cifs_free_llist(struct list_head * llist)188238c8a9a5SSteve French cifs_free_llist(struct list_head *llist)
188338c8a9a5SSteve French {
188438c8a9a5SSteve French struct cifsLockInfo *li, *tmp;
188538c8a9a5SSteve French list_for_each_entry_safe(li, tmp, llist, llist) {
188638c8a9a5SSteve French cifs_del_lock_waiters(li);
188738c8a9a5SSteve French list_del(&li->llist);
188838c8a9a5SSteve French kfree(li);
188938c8a9a5SSteve French }
189038c8a9a5SSteve French }
189138c8a9a5SSteve French
189238c8a9a5SSteve French #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
189338c8a9a5SSteve French int
cifs_unlock_range(struct cifsFileInfo * cfile,struct file_lock * flock,unsigned int xid)189438c8a9a5SSteve French cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
189538c8a9a5SSteve French unsigned int xid)
189638c8a9a5SSteve French {
189738c8a9a5SSteve French int rc = 0, stored_rc;
189838c8a9a5SSteve French static const int types[] = {
189938c8a9a5SSteve French LOCKING_ANDX_LARGE_FILES,
190038c8a9a5SSteve French LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
190138c8a9a5SSteve French };
190238c8a9a5SSteve French unsigned int i;
190338c8a9a5SSteve French unsigned int max_num, num, max_buf;
190438c8a9a5SSteve French LOCKING_ANDX_RANGE *buf, *cur;
190538c8a9a5SSteve French struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
190638c8a9a5SSteve French struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
190738c8a9a5SSteve French struct cifsLockInfo *li, *tmp;
190838c8a9a5SSteve French __u64 length = cifs_flock_len(flock);
190938c8a9a5SSteve French struct list_head tmp_llist;
191038c8a9a5SSteve French
191138c8a9a5SSteve French INIT_LIST_HEAD(&tmp_llist);
191238c8a9a5SSteve French
191338c8a9a5SSteve French /*
191438c8a9a5SSteve French * Accessing maxBuf is racy with cifs_reconnect - need to store value
191538c8a9a5SSteve French * and check it before using.
191638c8a9a5SSteve French */
191738c8a9a5SSteve French max_buf = tcon->ses->server->maxBuf;
191838c8a9a5SSteve French if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
191938c8a9a5SSteve French return -EINVAL;
192038c8a9a5SSteve French
192138c8a9a5SSteve French BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
192238c8a9a5SSteve French PAGE_SIZE);
192338c8a9a5SSteve French max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
192438c8a9a5SSteve French PAGE_SIZE);
192538c8a9a5SSteve French max_num = (max_buf - sizeof(struct smb_hdr)) /
192638c8a9a5SSteve French sizeof(LOCKING_ANDX_RANGE);
192738c8a9a5SSteve French buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
192838c8a9a5SSteve French if (!buf)
192938c8a9a5SSteve French return -ENOMEM;
193038c8a9a5SSteve French
193138c8a9a5SSteve French cifs_down_write(&cinode->lock_sem);
193238c8a9a5SSteve French for (i = 0; i < 2; i++) {
193338c8a9a5SSteve French cur = buf;
193438c8a9a5SSteve French num = 0;
193538c8a9a5SSteve French list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
193638c8a9a5SSteve French if (flock->fl_start > li->offset ||
193738c8a9a5SSteve French (flock->fl_start + length) <
193838c8a9a5SSteve French (li->offset + li->length))
193938c8a9a5SSteve French continue;
194038c8a9a5SSteve French if (current->tgid != li->pid)
194138c8a9a5SSteve French continue;
194238c8a9a5SSteve French if (types[i] != li->type)
194338c8a9a5SSteve French continue;
194438c8a9a5SSteve French if (cinode->can_cache_brlcks) {
194538c8a9a5SSteve French /*
194638c8a9a5SSteve French * We can cache brlock requests - simply remove
194738c8a9a5SSteve French * a lock from the file's list.
194838c8a9a5SSteve French */
194938c8a9a5SSteve French list_del(&li->llist);
195038c8a9a5SSteve French cifs_del_lock_waiters(li);
195138c8a9a5SSteve French kfree(li);
195238c8a9a5SSteve French continue;
195338c8a9a5SSteve French }
195438c8a9a5SSteve French cur->Pid = cpu_to_le16(li->pid);
195538c8a9a5SSteve French cur->LengthLow = cpu_to_le32((u32)li->length);
195638c8a9a5SSteve French cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
195738c8a9a5SSteve French cur->OffsetLow = cpu_to_le32((u32)li->offset);
195838c8a9a5SSteve French cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
195938c8a9a5SSteve French /*
196038c8a9a5SSteve French * We need to save a lock here to let us add it again to
196138c8a9a5SSteve French * the file's list if the unlock range request fails on
196238c8a9a5SSteve French * the server.
196338c8a9a5SSteve French */
196438c8a9a5SSteve French list_move(&li->llist, &tmp_llist);
196538c8a9a5SSteve French if (++num == max_num) {
196638c8a9a5SSteve French stored_rc = cifs_lockv(xid, tcon,
196738c8a9a5SSteve French cfile->fid.netfid,
196838c8a9a5SSteve French li->type, num, 0, buf);
196938c8a9a5SSteve French if (stored_rc) {
197038c8a9a5SSteve French /*
197138c8a9a5SSteve French * We failed on the unlock range
197238c8a9a5SSteve French * request - add all locks from the tmp
197338c8a9a5SSteve French * list to the head of the file's list.
197438c8a9a5SSteve French */
197538c8a9a5SSteve French cifs_move_llist(&tmp_llist,
197638c8a9a5SSteve French &cfile->llist->locks);
197738c8a9a5SSteve French rc = stored_rc;
197838c8a9a5SSteve French } else
197938c8a9a5SSteve French /*
198038c8a9a5SSteve French * The unlock range request succeed -
198138c8a9a5SSteve French * free the tmp list.
198238c8a9a5SSteve French */
198338c8a9a5SSteve French cifs_free_llist(&tmp_llist);
198438c8a9a5SSteve French cur = buf;
198538c8a9a5SSteve French num = 0;
198638c8a9a5SSteve French } else
198738c8a9a5SSteve French cur++;
198838c8a9a5SSteve French }
198938c8a9a5SSteve French if (num) {
199038c8a9a5SSteve French stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
199138c8a9a5SSteve French types[i], num, 0, buf);
199238c8a9a5SSteve French if (stored_rc) {
199338c8a9a5SSteve French cifs_move_llist(&tmp_llist,
199438c8a9a5SSteve French &cfile->llist->locks);
199538c8a9a5SSteve French rc = stored_rc;
199638c8a9a5SSteve French } else
199738c8a9a5SSteve French cifs_free_llist(&tmp_llist);
199838c8a9a5SSteve French }
199938c8a9a5SSteve French }
200038c8a9a5SSteve French
200138c8a9a5SSteve French up_write(&cinode->lock_sem);
200238c8a9a5SSteve French kfree(buf);
200338c8a9a5SSteve French return rc;
200438c8a9a5SSteve French }
200538c8a9a5SSteve French #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
200638c8a9a5SSteve French
200738c8a9a5SSteve French static int
cifs_setlk(struct file * file,struct file_lock * flock,__u32 type,bool wait_flag,bool posix_lck,int lock,int unlock,unsigned int xid)200838c8a9a5SSteve French cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
200938c8a9a5SSteve French bool wait_flag, bool posix_lck, int lock, int unlock,
201038c8a9a5SSteve French unsigned int xid)
201138c8a9a5SSteve French {
201238c8a9a5SSteve French int rc = 0;
201338c8a9a5SSteve French __u64 length = cifs_flock_len(flock);
201438c8a9a5SSteve French struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
201538c8a9a5SSteve French struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
201638c8a9a5SSteve French struct TCP_Server_Info *server = tcon->ses->server;
201738c8a9a5SSteve French struct inode *inode = d_inode(cfile->dentry);
201838c8a9a5SSteve French
201938c8a9a5SSteve French #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
202038c8a9a5SSteve French if (posix_lck) {
202138c8a9a5SSteve French int posix_lock_type;
202238c8a9a5SSteve French
202338c8a9a5SSteve French rc = cifs_posix_lock_set(file, flock);
202438c8a9a5SSteve French if (rc <= FILE_LOCK_DEFERRED)
202538c8a9a5SSteve French return rc;
202638c8a9a5SSteve French
202738c8a9a5SSteve French if (type & server->vals->shared_lock_type)
202838c8a9a5SSteve French posix_lock_type = CIFS_RDLCK;
202938c8a9a5SSteve French else
203038c8a9a5SSteve French posix_lock_type = CIFS_WRLCK;
203138c8a9a5SSteve French
203238c8a9a5SSteve French if (unlock == 1)
203338c8a9a5SSteve French posix_lock_type = CIFS_UNLCK;
203438c8a9a5SSteve French
203538c8a9a5SSteve French rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
203638c8a9a5SSteve French hash_lockowner(flock->fl_owner),
203738c8a9a5SSteve French flock->fl_start, length,
203838c8a9a5SSteve French NULL, posix_lock_type, wait_flag);
203938c8a9a5SSteve French goto out;
204038c8a9a5SSteve French }
204138c8a9a5SSteve French #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
204238c8a9a5SSteve French if (lock) {
204338c8a9a5SSteve French struct cifsLockInfo *lock;
204438c8a9a5SSteve French
204538c8a9a5SSteve French lock = cifs_lock_init(flock->fl_start, length, type,
204638c8a9a5SSteve French flock->fl_flags);
204738c8a9a5SSteve French if (!lock)
204838c8a9a5SSteve French return -ENOMEM;
204938c8a9a5SSteve French
205038c8a9a5SSteve French rc = cifs_lock_add_if(cfile, lock, wait_flag);
205138c8a9a5SSteve French if (rc < 0) {
205238c8a9a5SSteve French kfree(lock);
205338c8a9a5SSteve French return rc;
205438c8a9a5SSteve French }
205538c8a9a5SSteve French if (!rc)
205638c8a9a5SSteve French goto out;
205738c8a9a5SSteve French
205838c8a9a5SSteve French /*
205938c8a9a5SSteve French * Windows 7 server can delay breaking lease from read to None
206038c8a9a5SSteve French * if we set a byte-range lock on a file - break it explicitly
206138c8a9a5SSteve French * before sending the lock to the server to be sure the next
206238c8a9a5SSteve French * read won't conflict with non-overlapted locks due to
206338c8a9a5SSteve French * pagereading.
206438c8a9a5SSteve French */
206538c8a9a5SSteve French if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
206638c8a9a5SSteve French CIFS_CACHE_READ(CIFS_I(inode))) {
206738c8a9a5SSteve French cifs_zap_mapping(inode);
206838c8a9a5SSteve French cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
206938c8a9a5SSteve French inode);
207038c8a9a5SSteve French CIFS_I(inode)->oplock = 0;
207138c8a9a5SSteve French }
207238c8a9a5SSteve French
207338c8a9a5SSteve French rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
207438c8a9a5SSteve French type, 1, 0, wait_flag);
207538c8a9a5SSteve French if (rc) {
207638c8a9a5SSteve French kfree(lock);
207738c8a9a5SSteve French return rc;
207838c8a9a5SSteve French }
207938c8a9a5SSteve French
208038c8a9a5SSteve French cifs_lock_add(cfile, lock);
208138c8a9a5SSteve French } else if (unlock)
208238c8a9a5SSteve French rc = server->ops->mand_unlock_range(cfile, flock, xid);
208338c8a9a5SSteve French
208438c8a9a5SSteve French out:
208538c8a9a5SSteve French if ((flock->fl_flags & FL_POSIX) || (flock->fl_flags & FL_FLOCK)) {
208638c8a9a5SSteve French /*
208738c8a9a5SSteve French * If this is a request to remove all locks because we
208838c8a9a5SSteve French * are closing the file, it doesn't matter if the
208938c8a9a5SSteve French * unlocking failed as both cifs.ko and the SMB server
209038c8a9a5SSteve French * remove the lock on file close
209138c8a9a5SSteve French */
209238c8a9a5SSteve French if (rc) {
209338c8a9a5SSteve French cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
209438c8a9a5SSteve French if (!(flock->fl_flags & FL_CLOSE))
209538c8a9a5SSteve French return rc;
209638c8a9a5SSteve French }
209738c8a9a5SSteve French rc = locks_lock_file_wait(file, flock);
209838c8a9a5SSteve French }
209938c8a9a5SSteve French return rc;
210038c8a9a5SSteve French }
210138c8a9a5SSteve French
cifs_flock(struct file * file,int cmd,struct file_lock * fl)210238c8a9a5SSteve French int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
210338c8a9a5SSteve French {
210438c8a9a5SSteve French int rc, xid;
210538c8a9a5SSteve French int lock = 0, unlock = 0;
210638c8a9a5SSteve French bool wait_flag = false;
210738c8a9a5SSteve French bool posix_lck = false;
210838c8a9a5SSteve French struct cifs_sb_info *cifs_sb;
210938c8a9a5SSteve French struct cifs_tcon *tcon;
211038c8a9a5SSteve French struct cifsFileInfo *cfile;
211138c8a9a5SSteve French __u32 type;
211238c8a9a5SSteve French
211338c8a9a5SSteve French xid = get_xid();
211438c8a9a5SSteve French
211538c8a9a5SSteve French if (!(fl->fl_flags & FL_FLOCK)) {
211638c8a9a5SSteve French rc = -ENOLCK;
211738c8a9a5SSteve French free_xid(xid);
211838c8a9a5SSteve French return rc;
211938c8a9a5SSteve French }
212038c8a9a5SSteve French
212138c8a9a5SSteve French cfile = (struct cifsFileInfo *)file->private_data;
212238c8a9a5SSteve French tcon = tlink_tcon(cfile->tlink);
212338c8a9a5SSteve French
212438c8a9a5SSteve French cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag,
212538c8a9a5SSteve French tcon->ses->server);
212638c8a9a5SSteve French cifs_sb = CIFS_FILE_SB(file);
212738c8a9a5SSteve French
212838c8a9a5SSteve French if (cap_unix(tcon->ses) &&
212938c8a9a5SSteve French (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
213038c8a9a5SSteve French ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
213138c8a9a5SSteve French posix_lck = true;
213238c8a9a5SSteve French
213338c8a9a5SSteve French if (!lock && !unlock) {
213438c8a9a5SSteve French /*
213538c8a9a5SSteve French * if no lock or unlock then nothing to do since we do not
213638c8a9a5SSteve French * know what it is
213738c8a9a5SSteve French */
213838c8a9a5SSteve French rc = -EOPNOTSUPP;
213938c8a9a5SSteve French free_xid(xid);
214038c8a9a5SSteve French return rc;
214138c8a9a5SSteve French }
214238c8a9a5SSteve French
214338c8a9a5SSteve French rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
214438c8a9a5SSteve French xid);
214538c8a9a5SSteve French free_xid(xid);
214638c8a9a5SSteve French return rc;
214738c8a9a5SSteve French
214838c8a9a5SSteve French
214938c8a9a5SSteve French }
215038c8a9a5SSteve French
cifs_lock(struct file * file,int cmd,struct file_lock * flock)215138c8a9a5SSteve French int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
215238c8a9a5SSteve French {
215338c8a9a5SSteve French int rc, xid;
215438c8a9a5SSteve French int lock = 0, unlock = 0;
215538c8a9a5SSteve French bool wait_flag = false;
215638c8a9a5SSteve French bool posix_lck = false;
215738c8a9a5SSteve French struct cifs_sb_info *cifs_sb;
215838c8a9a5SSteve French struct cifs_tcon *tcon;
215938c8a9a5SSteve French struct cifsFileInfo *cfile;
216038c8a9a5SSteve French __u32 type;
216138c8a9a5SSteve French
216238c8a9a5SSteve French rc = -EACCES;
216338c8a9a5SSteve French xid = get_xid();
216438c8a9a5SSteve French
216538c8a9a5SSteve French cifs_dbg(FYI, "%s: %pD2 cmd=0x%x type=0x%x flags=0x%x r=%lld:%lld\n", __func__, file, cmd,
216638c8a9a5SSteve French flock->fl_flags, flock->fl_type, (long long)flock->fl_start,
216738c8a9a5SSteve French (long long)flock->fl_end);
216838c8a9a5SSteve French
216938c8a9a5SSteve French cfile = (struct cifsFileInfo *)file->private_data;
217038c8a9a5SSteve French tcon = tlink_tcon(cfile->tlink);
217138c8a9a5SSteve French
217238c8a9a5SSteve French cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
217338c8a9a5SSteve French tcon->ses->server);
217438c8a9a5SSteve French cifs_sb = CIFS_FILE_SB(file);
217538c8a9a5SSteve French set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
217638c8a9a5SSteve French
217738c8a9a5SSteve French if (cap_unix(tcon->ses) &&
217838c8a9a5SSteve French (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
217938c8a9a5SSteve French ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
218038c8a9a5SSteve French posix_lck = true;
218138c8a9a5SSteve French /*
218238c8a9a5SSteve French * BB add code here to normalize offset and length to account for
218338c8a9a5SSteve French * negative length which we can not accept over the wire.
218438c8a9a5SSteve French */
218538c8a9a5SSteve French if (IS_GETLK(cmd)) {
218638c8a9a5SSteve French rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
218738c8a9a5SSteve French free_xid(xid);
218838c8a9a5SSteve French return rc;
218938c8a9a5SSteve French }
219038c8a9a5SSteve French
219138c8a9a5SSteve French if (!lock && !unlock) {
219238c8a9a5SSteve French /*
219338c8a9a5SSteve French * if no lock or unlock then nothing to do since we do not
219438c8a9a5SSteve French * know what it is
219538c8a9a5SSteve French */
219638c8a9a5SSteve French free_xid(xid);
219738c8a9a5SSteve French return -EOPNOTSUPP;
219838c8a9a5SSteve French }
219938c8a9a5SSteve French
220038c8a9a5SSteve French rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
220138c8a9a5SSteve French xid);
220238c8a9a5SSteve French free_xid(xid);
220338c8a9a5SSteve French return rc;
220438c8a9a5SSteve French }
220538c8a9a5SSteve French
220638c8a9a5SSteve French /*
220738c8a9a5SSteve French * update the file size (if needed) after a write. Should be called with
220838c8a9a5SSteve French * the inode->i_lock held
220938c8a9a5SSteve French */
221038c8a9a5SSteve French void
cifs_update_eof(struct cifsInodeInfo * cifsi,loff_t offset,unsigned int bytes_written)221138c8a9a5SSteve French cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
221238c8a9a5SSteve French unsigned int bytes_written)
221338c8a9a5SSteve French {
221438c8a9a5SSteve French loff_t end_of_write = offset + bytes_written;
221538c8a9a5SSteve French
221638c8a9a5SSteve French if (end_of_write > cifsi->server_eof)
221738c8a9a5SSteve French cifsi->server_eof = end_of_write;
221838c8a9a5SSteve French }
221938c8a9a5SSteve French
222038c8a9a5SSteve French static ssize_t
cifs_write(struct cifsFileInfo * open_file,__u32 pid,const char * write_data,size_t write_size,loff_t * offset)222138c8a9a5SSteve French cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
222238c8a9a5SSteve French size_t write_size, loff_t *offset)
222338c8a9a5SSteve French {
222438c8a9a5SSteve French int rc = 0;
222538c8a9a5SSteve French unsigned int bytes_written = 0;
222638c8a9a5SSteve French unsigned int total_written;
222738c8a9a5SSteve French struct cifs_tcon *tcon;
222838c8a9a5SSteve French struct TCP_Server_Info *server;
222938c8a9a5SSteve French unsigned int xid;
223038c8a9a5SSteve French struct dentry *dentry = open_file->dentry;
223138c8a9a5SSteve French struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry));
223238c8a9a5SSteve French struct cifs_io_parms io_parms = {0};
223338c8a9a5SSteve French
223438c8a9a5SSteve French cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
223538c8a9a5SSteve French write_size, *offset, dentry);
223638c8a9a5SSteve French
223738c8a9a5SSteve French tcon = tlink_tcon(open_file->tlink);
223838c8a9a5SSteve French server = tcon->ses->server;
223938c8a9a5SSteve French
224038c8a9a5SSteve French if (!server->ops->sync_write)
224138c8a9a5SSteve French return -ENOSYS;
224238c8a9a5SSteve French
224338c8a9a5SSteve French xid = get_xid();
224438c8a9a5SSteve French
224538c8a9a5SSteve French for (total_written = 0; write_size > total_written;
224638c8a9a5SSteve French total_written += bytes_written) {
224738c8a9a5SSteve French rc = -EAGAIN;
224838c8a9a5SSteve French while (rc == -EAGAIN) {
224938c8a9a5SSteve French struct kvec iov[2];
225038c8a9a5SSteve French unsigned int len;
225138c8a9a5SSteve French
225238c8a9a5SSteve French if (open_file->invalidHandle) {
225338c8a9a5SSteve French /* we could deadlock if we called
225438c8a9a5SSteve French filemap_fdatawait from here so tell
225538c8a9a5SSteve French reopen_file not to flush data to
225638c8a9a5SSteve French server now */
225738c8a9a5SSteve French rc = cifs_reopen_file(open_file, false);
225838c8a9a5SSteve French if (rc != 0)
225938c8a9a5SSteve French break;
226038c8a9a5SSteve French }
226138c8a9a5SSteve French
226238c8a9a5SSteve French len = min(server->ops->wp_retry_size(d_inode(dentry)),
226338c8a9a5SSteve French (unsigned int)write_size - total_written);
226438c8a9a5SSteve French /* iov[0] is reserved for smb header */
226538c8a9a5SSteve French iov[1].iov_base = (char *)write_data + total_written;
226638c8a9a5SSteve French iov[1].iov_len = len;
226738c8a9a5SSteve French io_parms.pid = pid;
226838c8a9a5SSteve French io_parms.tcon = tcon;
226938c8a9a5SSteve French io_parms.offset = *offset;
227038c8a9a5SSteve French io_parms.length = len;
227138c8a9a5SSteve French rc = server->ops->sync_write(xid, &open_file->fid,
227238c8a9a5SSteve French &io_parms, &bytes_written, iov, 1);
227338c8a9a5SSteve French }
227438c8a9a5SSteve French if (rc || (bytes_written == 0)) {
227538c8a9a5SSteve French if (total_written)
227638c8a9a5SSteve French break;
227738c8a9a5SSteve French else {
227838c8a9a5SSteve French free_xid(xid);
227938c8a9a5SSteve French return rc;
228038c8a9a5SSteve French }
228138c8a9a5SSteve French } else {
228238c8a9a5SSteve French spin_lock(&d_inode(dentry)->i_lock);
228338c8a9a5SSteve French cifs_update_eof(cifsi, *offset, bytes_written);
228438c8a9a5SSteve French spin_unlock(&d_inode(dentry)->i_lock);
228538c8a9a5SSteve French *offset += bytes_written;
228638c8a9a5SSteve French }
228738c8a9a5SSteve French }
228838c8a9a5SSteve French
228938c8a9a5SSteve French cifs_stats_bytes_written(tcon, total_written);
229038c8a9a5SSteve French
229138c8a9a5SSteve French if (total_written > 0) {
229238c8a9a5SSteve French spin_lock(&d_inode(dentry)->i_lock);
229338c8a9a5SSteve French if (*offset > d_inode(dentry)->i_size) {
229438c8a9a5SSteve French i_size_write(d_inode(dentry), *offset);
229538c8a9a5SSteve French d_inode(dentry)->i_blocks = (512 - 1 + *offset) >> 9;
229638c8a9a5SSteve French }
229738c8a9a5SSteve French spin_unlock(&d_inode(dentry)->i_lock);
229838c8a9a5SSteve French }
229938c8a9a5SSteve French mark_inode_dirty_sync(d_inode(dentry));
230038c8a9a5SSteve French free_xid(xid);
230138c8a9a5SSteve French return total_written;
230238c8a9a5SSteve French }
230338c8a9a5SSteve French
find_readable_file(struct cifsInodeInfo * cifs_inode,bool fsuid_only)230438c8a9a5SSteve French struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
230538c8a9a5SSteve French bool fsuid_only)
230638c8a9a5SSteve French {
230738c8a9a5SSteve French struct cifsFileInfo *open_file = NULL;
230838c8a9a5SSteve French struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
230938c8a9a5SSteve French
231038c8a9a5SSteve French /* only filter by fsuid on multiuser mounts */
231138c8a9a5SSteve French if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
231238c8a9a5SSteve French fsuid_only = false;
231338c8a9a5SSteve French
231438c8a9a5SSteve French spin_lock(&cifs_inode->open_file_lock);
231538c8a9a5SSteve French /* we could simply get the first_list_entry since write-only entries
231638c8a9a5SSteve French are always at the end of the list but since the first entry might
231738c8a9a5SSteve French have a close pending, we go through the whole list */
231838c8a9a5SSteve French list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
231938c8a9a5SSteve French if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
232038c8a9a5SSteve French continue;
232138c8a9a5SSteve French if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
232238c8a9a5SSteve French if ((!open_file->invalidHandle)) {
232338c8a9a5SSteve French /* found a good file */
232438c8a9a5SSteve French /* lock it so it will not be closed on us */
232538c8a9a5SSteve French cifsFileInfo_get(open_file);
232638c8a9a5SSteve French spin_unlock(&cifs_inode->open_file_lock);
232738c8a9a5SSteve French return open_file;
232838c8a9a5SSteve French } /* else might as well continue, and look for
232938c8a9a5SSteve French another, or simply have the caller reopen it
233038c8a9a5SSteve French again rather than trying to fix this handle */
233138c8a9a5SSteve French } else /* write only file */
233238c8a9a5SSteve French break; /* write only files are last so must be done */
233338c8a9a5SSteve French }
233438c8a9a5SSteve French spin_unlock(&cifs_inode->open_file_lock);
233538c8a9a5SSteve French return NULL;
233638c8a9a5SSteve French }
233738c8a9a5SSteve French
233838c8a9a5SSteve French /* Return -EBADF if no handle is found and general rc otherwise */
233938c8a9a5SSteve French int
cifs_get_writable_file(struct cifsInodeInfo * cifs_inode,int flags,struct cifsFileInfo ** ret_file)234038c8a9a5SSteve French cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
234138c8a9a5SSteve French struct cifsFileInfo **ret_file)
234238c8a9a5SSteve French {
234338c8a9a5SSteve French struct cifsFileInfo *open_file, *inv_file = NULL;
234438c8a9a5SSteve French struct cifs_sb_info *cifs_sb;
234538c8a9a5SSteve French bool any_available = false;
234638c8a9a5SSteve French int rc = -EBADF;
234738c8a9a5SSteve French unsigned int refind = 0;
234838c8a9a5SSteve French bool fsuid_only = flags & FIND_WR_FSUID_ONLY;
234938c8a9a5SSteve French bool with_delete = flags & FIND_WR_WITH_DELETE;
235038c8a9a5SSteve French *ret_file = NULL;
235138c8a9a5SSteve French
235238c8a9a5SSteve French /*
235338c8a9a5SSteve French * Having a null inode here (because mapping->host was set to zero by
235438c8a9a5SSteve French * the VFS or MM) should not happen but we had reports of on oops (due
235538c8a9a5SSteve French * to it being zero) during stress testcases so we need to check for it
235638c8a9a5SSteve French */
235738c8a9a5SSteve French
235838c8a9a5SSteve French if (cifs_inode == NULL) {
235938c8a9a5SSteve French cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
236038c8a9a5SSteve French dump_stack();
236138c8a9a5SSteve French return rc;
236238c8a9a5SSteve French }
236338c8a9a5SSteve French
236438c8a9a5SSteve French cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
236538c8a9a5SSteve French
236638c8a9a5SSteve French /* only filter by fsuid on multiuser mounts */
236738c8a9a5SSteve French if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
236838c8a9a5SSteve French fsuid_only = false;
236938c8a9a5SSteve French
237038c8a9a5SSteve French spin_lock(&cifs_inode->open_file_lock);
237138c8a9a5SSteve French refind_writable:
237238c8a9a5SSteve French if (refind > MAX_REOPEN_ATT) {
237338c8a9a5SSteve French spin_unlock(&cifs_inode->open_file_lock);
237438c8a9a5SSteve French return rc;
237538c8a9a5SSteve French }
237638c8a9a5SSteve French list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
237738c8a9a5SSteve French if (!any_available && open_file->pid != current->tgid)
237838c8a9a5SSteve French continue;
237938c8a9a5SSteve French if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
238038c8a9a5SSteve French continue;
238138c8a9a5SSteve French if (with_delete && !(open_file->fid.access & DELETE))
238238c8a9a5SSteve French continue;
238338c8a9a5SSteve French if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
238438c8a9a5SSteve French if (!open_file->invalidHandle) {
238538c8a9a5SSteve French /* found a good writable file */
238638c8a9a5SSteve French cifsFileInfo_get(open_file);
238738c8a9a5SSteve French spin_unlock(&cifs_inode->open_file_lock);
238838c8a9a5SSteve French *ret_file = open_file;
238938c8a9a5SSteve French return 0;
239038c8a9a5SSteve French } else {
239138c8a9a5SSteve French if (!inv_file)
239238c8a9a5SSteve French inv_file = open_file;
239338c8a9a5SSteve French }
239438c8a9a5SSteve French }
239538c8a9a5SSteve French }
239638c8a9a5SSteve French /* couldn't find useable FH with same pid, try any available */
239738c8a9a5SSteve French if (!any_available) {
239838c8a9a5SSteve French any_available = true;
239938c8a9a5SSteve French goto refind_writable;
240038c8a9a5SSteve French }
240138c8a9a5SSteve French
240238c8a9a5SSteve French if (inv_file) {
240338c8a9a5SSteve French any_available = false;
240438c8a9a5SSteve French cifsFileInfo_get(inv_file);
240538c8a9a5SSteve French }
240638c8a9a5SSteve French
240738c8a9a5SSteve French spin_unlock(&cifs_inode->open_file_lock);
240838c8a9a5SSteve French
240938c8a9a5SSteve French if (inv_file) {
241038c8a9a5SSteve French rc = cifs_reopen_file(inv_file, false);
241138c8a9a5SSteve French if (!rc) {
241238c8a9a5SSteve French *ret_file = inv_file;
241338c8a9a5SSteve French return 0;
241438c8a9a5SSteve French }
241538c8a9a5SSteve French
241638c8a9a5SSteve French spin_lock(&cifs_inode->open_file_lock);
241738c8a9a5SSteve French list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
241838c8a9a5SSteve French spin_unlock(&cifs_inode->open_file_lock);
241938c8a9a5SSteve French cifsFileInfo_put(inv_file);
242038c8a9a5SSteve French ++refind;
242138c8a9a5SSteve French inv_file = NULL;
242238c8a9a5SSteve French spin_lock(&cifs_inode->open_file_lock);
242338c8a9a5SSteve French goto refind_writable;
242438c8a9a5SSteve French }
242538c8a9a5SSteve French
242638c8a9a5SSteve French return rc;
242738c8a9a5SSteve French }
242838c8a9a5SSteve French
242938c8a9a5SSteve French struct cifsFileInfo *
find_writable_file(struct cifsInodeInfo * cifs_inode,int flags)243038c8a9a5SSteve French find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
243138c8a9a5SSteve French {
243238c8a9a5SSteve French struct cifsFileInfo *cfile;
243338c8a9a5SSteve French int rc;
243438c8a9a5SSteve French
243538c8a9a5SSteve French rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
243638c8a9a5SSteve French if (rc)
243738c8a9a5SSteve French cifs_dbg(FYI, "Couldn't find writable handle rc=%d\n", rc);
243838c8a9a5SSteve French
243938c8a9a5SSteve French return cfile;
244038c8a9a5SSteve French }
244138c8a9a5SSteve French
244238c8a9a5SSteve French int
cifs_get_writable_path(struct cifs_tcon * tcon,const char * name,int flags,struct cifsFileInfo ** ret_file)244338c8a9a5SSteve French cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
244438c8a9a5SSteve French int flags,
244538c8a9a5SSteve French struct cifsFileInfo **ret_file)
244638c8a9a5SSteve French {
244738c8a9a5SSteve French struct cifsFileInfo *cfile;
244838c8a9a5SSteve French void *page = alloc_dentry_path();
244938c8a9a5SSteve French
245038c8a9a5SSteve French *ret_file = NULL;
245138c8a9a5SSteve French
245238c8a9a5SSteve French spin_lock(&tcon->open_file_lock);
245338c8a9a5SSteve French list_for_each_entry(cfile, &tcon->openFileList, tlist) {
245438c8a9a5SSteve French struct cifsInodeInfo *cinode;
245538c8a9a5SSteve French const char *full_path = build_path_from_dentry(cfile->dentry, page);
245638c8a9a5SSteve French if (IS_ERR(full_path)) {
245738c8a9a5SSteve French spin_unlock(&tcon->open_file_lock);
245838c8a9a5SSteve French free_dentry_path(page);
245938c8a9a5SSteve French return PTR_ERR(full_path);
246038c8a9a5SSteve French }
246138c8a9a5SSteve French if (strcmp(full_path, name))
246238c8a9a5SSteve French continue;
246338c8a9a5SSteve French
246438c8a9a5SSteve French cinode = CIFS_I(d_inode(cfile->dentry));
246538c8a9a5SSteve French spin_unlock(&tcon->open_file_lock);
246638c8a9a5SSteve French free_dentry_path(page);
246738c8a9a5SSteve French return cifs_get_writable_file(cinode, flags, ret_file);
246838c8a9a5SSteve French }
246938c8a9a5SSteve French
247038c8a9a5SSteve French spin_unlock(&tcon->open_file_lock);
247138c8a9a5SSteve French free_dentry_path(page);
247238c8a9a5SSteve French return -ENOENT;
247338c8a9a5SSteve French }
247438c8a9a5SSteve French
247538c8a9a5SSteve French int
cifs_get_readable_path(struct cifs_tcon * tcon,const char * name,struct cifsFileInfo ** ret_file)247638c8a9a5SSteve French cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
247738c8a9a5SSteve French struct cifsFileInfo **ret_file)
247838c8a9a5SSteve French {
247938c8a9a5SSteve French struct cifsFileInfo *cfile;
248038c8a9a5SSteve French void *page = alloc_dentry_path();
248138c8a9a5SSteve French
248238c8a9a5SSteve French *ret_file = NULL;
248338c8a9a5SSteve French
248438c8a9a5SSteve French spin_lock(&tcon->open_file_lock);
248538c8a9a5SSteve French list_for_each_entry(cfile, &tcon->openFileList, tlist) {
248638c8a9a5SSteve French struct cifsInodeInfo *cinode;
248738c8a9a5SSteve French const char *full_path = build_path_from_dentry(cfile->dentry, page);
248838c8a9a5SSteve French if (IS_ERR(full_path)) {
248938c8a9a5SSteve French spin_unlock(&tcon->open_file_lock);
249038c8a9a5SSteve French free_dentry_path(page);
249138c8a9a5SSteve French return PTR_ERR(full_path);
249238c8a9a5SSteve French }
249338c8a9a5SSteve French if (strcmp(full_path, name))
249438c8a9a5SSteve French continue;
249538c8a9a5SSteve French
249638c8a9a5SSteve French cinode = CIFS_I(d_inode(cfile->dentry));
249738c8a9a5SSteve French spin_unlock(&tcon->open_file_lock);
249838c8a9a5SSteve French free_dentry_path(page);
249938c8a9a5SSteve French *ret_file = find_readable_file(cinode, 0);
250038c8a9a5SSteve French return *ret_file ? 0 : -ENOENT;
250138c8a9a5SSteve French }
250238c8a9a5SSteve French
250338c8a9a5SSteve French spin_unlock(&tcon->open_file_lock);
250438c8a9a5SSteve French free_dentry_path(page);
250538c8a9a5SSteve French return -ENOENT;
250638c8a9a5SSteve French }
250738c8a9a5SSteve French
250838c8a9a5SSteve French void
cifs_writedata_release(struct kref * refcount)250938c8a9a5SSteve French cifs_writedata_release(struct kref *refcount)
251038c8a9a5SSteve French {
251138c8a9a5SSteve French struct cifs_writedata *wdata = container_of(refcount,
251238c8a9a5SSteve French struct cifs_writedata, refcount);
251338c8a9a5SSteve French #ifdef CONFIG_CIFS_SMB_DIRECT
251438c8a9a5SSteve French if (wdata->mr) {
251538c8a9a5SSteve French smbd_deregister_mr(wdata->mr);
251638c8a9a5SSteve French wdata->mr = NULL;
251738c8a9a5SSteve French }
251838c8a9a5SSteve French #endif
251938c8a9a5SSteve French
252038c8a9a5SSteve French if (wdata->cfile)
252138c8a9a5SSteve French cifsFileInfo_put(wdata->cfile);
252238c8a9a5SSteve French
252338c8a9a5SSteve French kfree(wdata);
252438c8a9a5SSteve French }
252538c8a9a5SSteve French
252638c8a9a5SSteve French /*
252738c8a9a5SSteve French * Write failed with a retryable error. Resend the write request. It's also
252838c8a9a5SSteve French * possible that the page was redirtied so re-clean the page.
252938c8a9a5SSteve French */
253038c8a9a5SSteve French static void
cifs_writev_requeue(struct cifs_writedata * wdata)253138c8a9a5SSteve French cifs_writev_requeue(struct cifs_writedata *wdata)
253238c8a9a5SSteve French {
253338c8a9a5SSteve French int rc = 0;
253438c8a9a5SSteve French struct inode *inode = d_inode(wdata->cfile->dentry);
253538c8a9a5SSteve French struct TCP_Server_Info *server;
253638c8a9a5SSteve French unsigned int rest_len = wdata->bytes;
253738c8a9a5SSteve French loff_t fpos = wdata->offset;
253838c8a9a5SSteve French
253938c8a9a5SSteve French server = tlink_tcon(wdata->cfile->tlink)->ses->server;
254038c8a9a5SSteve French do {
254138c8a9a5SSteve French struct cifs_writedata *wdata2;
254238c8a9a5SSteve French unsigned int wsize, cur_len;
254338c8a9a5SSteve French
254438c8a9a5SSteve French wsize = server->ops->wp_retry_size(inode);
254538c8a9a5SSteve French if (wsize < rest_len) {
254638c8a9a5SSteve French if (wsize < PAGE_SIZE) {
254738c8a9a5SSteve French rc = -EOPNOTSUPP;
254838c8a9a5SSteve French break;
254938c8a9a5SSteve French }
255038c8a9a5SSteve French cur_len = min(round_down(wsize, PAGE_SIZE), rest_len);
255138c8a9a5SSteve French } else {
255238c8a9a5SSteve French cur_len = rest_len;
255338c8a9a5SSteve French }
255438c8a9a5SSteve French
255538c8a9a5SSteve French wdata2 = cifs_writedata_alloc(cifs_writev_complete);
255638c8a9a5SSteve French if (!wdata2) {
255738c8a9a5SSteve French rc = -ENOMEM;
255838c8a9a5SSteve French break;
255938c8a9a5SSteve French }
256038c8a9a5SSteve French
256138c8a9a5SSteve French wdata2->sync_mode = wdata->sync_mode;
256238c8a9a5SSteve French wdata2->offset = fpos;
256338c8a9a5SSteve French wdata2->bytes = cur_len;
256438c8a9a5SSteve French wdata2->iter = wdata->iter;
256538c8a9a5SSteve French
256638c8a9a5SSteve French iov_iter_advance(&wdata2->iter, fpos - wdata->offset);
256738c8a9a5SSteve French iov_iter_truncate(&wdata2->iter, wdata2->bytes);
256838c8a9a5SSteve French
256938c8a9a5SSteve French if (iov_iter_is_xarray(&wdata2->iter))
257038c8a9a5SSteve French /* Check for pages having been redirtied and clean
257138c8a9a5SSteve French * them. We can do this by walking the xarray. If
257238c8a9a5SSteve French * it's not an xarray, then it's a DIO and we shouldn't
257338c8a9a5SSteve French * be mucking around with the page bits.
257438c8a9a5SSteve French */
257538c8a9a5SSteve French cifs_undirty_folios(inode, fpos, cur_len);
257638c8a9a5SSteve French
257738c8a9a5SSteve French rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY,
257838c8a9a5SSteve French &wdata2->cfile);
257938c8a9a5SSteve French if (!wdata2->cfile) {
258038c8a9a5SSteve French cifs_dbg(VFS, "No writable handle to retry writepages rc=%d\n",
258138c8a9a5SSteve French rc);
258238c8a9a5SSteve French if (!is_retryable_error(rc))
258338c8a9a5SSteve French rc = -EBADF;
258438c8a9a5SSteve French } else {
258538c8a9a5SSteve French wdata2->pid = wdata2->cfile->pid;
258638c8a9a5SSteve French rc = server->ops->async_writev(wdata2,
258738c8a9a5SSteve French cifs_writedata_release);
258838c8a9a5SSteve French }
258938c8a9a5SSteve French
259038c8a9a5SSteve French kref_put(&wdata2->refcount, cifs_writedata_release);
259138c8a9a5SSteve French if (rc) {
259238c8a9a5SSteve French if (is_retryable_error(rc))
259338c8a9a5SSteve French continue;
259438c8a9a5SSteve French fpos += cur_len;
259538c8a9a5SSteve French rest_len -= cur_len;
259638c8a9a5SSteve French break;
259738c8a9a5SSteve French }
259838c8a9a5SSteve French
259938c8a9a5SSteve French fpos += cur_len;
260038c8a9a5SSteve French rest_len -= cur_len;
260138c8a9a5SSteve French } while (rest_len > 0);
260238c8a9a5SSteve French
260338c8a9a5SSteve French /* Clean up remaining pages from the original wdata */
260438c8a9a5SSteve French if (iov_iter_is_xarray(&wdata->iter))
260538c8a9a5SSteve French cifs_pages_write_failed(inode, fpos, rest_len);
260638c8a9a5SSteve French
260738c8a9a5SSteve French if (rc != 0 && !is_retryable_error(rc))
260838c8a9a5SSteve French mapping_set_error(inode->i_mapping, rc);
260938c8a9a5SSteve French kref_put(&wdata->refcount, cifs_writedata_release);
261038c8a9a5SSteve French }
261138c8a9a5SSteve French
261238c8a9a5SSteve French void
cifs_writev_complete(struct work_struct * work)261338c8a9a5SSteve French cifs_writev_complete(struct work_struct *work)
261438c8a9a5SSteve French {
261538c8a9a5SSteve French struct cifs_writedata *wdata = container_of(work,
261638c8a9a5SSteve French struct cifs_writedata, work);
261738c8a9a5SSteve French struct inode *inode = d_inode(wdata->cfile->dentry);
261838c8a9a5SSteve French
261938c8a9a5SSteve French if (wdata->result == 0) {
262038c8a9a5SSteve French spin_lock(&inode->i_lock);
262138c8a9a5SSteve French cifs_update_eof(CIFS_I(inode), wdata->offset, wdata->bytes);
262238c8a9a5SSteve French spin_unlock(&inode->i_lock);
262338c8a9a5SSteve French cifs_stats_bytes_written(tlink_tcon(wdata->cfile->tlink),
262438c8a9a5SSteve French wdata->bytes);
262538c8a9a5SSteve French } else if (wdata->sync_mode == WB_SYNC_ALL && wdata->result == -EAGAIN)
262638c8a9a5SSteve French return cifs_writev_requeue(wdata);
262738c8a9a5SSteve French
262838c8a9a5SSteve French if (wdata->result == -EAGAIN)
262938c8a9a5SSteve French cifs_pages_write_redirty(inode, wdata->offset, wdata->bytes);
263038c8a9a5SSteve French else if (wdata->result < 0)
263138c8a9a5SSteve French cifs_pages_write_failed(inode, wdata->offset, wdata->bytes);
263238c8a9a5SSteve French else
263338c8a9a5SSteve French cifs_pages_written_back(inode, wdata->offset, wdata->bytes);
263438c8a9a5SSteve French
263538c8a9a5SSteve French if (wdata->result != -EAGAIN)
263638c8a9a5SSteve French mapping_set_error(inode->i_mapping, wdata->result);
263738c8a9a5SSteve French kref_put(&wdata->refcount, cifs_writedata_release);
263838c8a9a5SSteve French }
263938c8a9a5SSteve French
cifs_writedata_alloc(work_func_t complete)264038c8a9a5SSteve French struct cifs_writedata *cifs_writedata_alloc(work_func_t complete)
264138c8a9a5SSteve French {
264238c8a9a5SSteve French struct cifs_writedata *wdata;
264338c8a9a5SSteve French
264438c8a9a5SSteve French wdata = kzalloc(sizeof(*wdata), GFP_NOFS);
264538c8a9a5SSteve French if (wdata != NULL) {
264638c8a9a5SSteve French kref_init(&wdata->refcount);
264738c8a9a5SSteve French INIT_LIST_HEAD(&wdata->list);
264838c8a9a5SSteve French init_completion(&wdata->done);
264938c8a9a5SSteve French INIT_WORK(&wdata->work, complete);
265038c8a9a5SSteve French }
265138c8a9a5SSteve French return wdata;
265238c8a9a5SSteve French }
265338c8a9a5SSteve French
cifs_partialpagewrite(struct page * page,unsigned from,unsigned to)265438c8a9a5SSteve French static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
265538c8a9a5SSteve French {
265638c8a9a5SSteve French struct address_space *mapping = page->mapping;
265738c8a9a5SSteve French loff_t offset = (loff_t)page->index << PAGE_SHIFT;
265838c8a9a5SSteve French char *write_data;
265938c8a9a5SSteve French int rc = -EFAULT;
266038c8a9a5SSteve French int bytes_written = 0;
266138c8a9a5SSteve French struct inode *inode;
266238c8a9a5SSteve French struct cifsFileInfo *open_file;
266338c8a9a5SSteve French
266438c8a9a5SSteve French if (!mapping || !mapping->host)
266538c8a9a5SSteve French return -EFAULT;
266638c8a9a5SSteve French
266738c8a9a5SSteve French inode = page->mapping->host;
266838c8a9a5SSteve French
266938c8a9a5SSteve French offset += (loff_t)from;
267038c8a9a5SSteve French write_data = kmap(page);
267138c8a9a5SSteve French write_data += from;
267238c8a9a5SSteve French
267338c8a9a5SSteve French if ((to > PAGE_SIZE) || (from > to)) {
267438c8a9a5SSteve French kunmap(page);
267538c8a9a5SSteve French return -EIO;
267638c8a9a5SSteve French }
267738c8a9a5SSteve French
267838c8a9a5SSteve French /* racing with truncate? */
267938c8a9a5SSteve French if (offset > mapping->host->i_size) {
268038c8a9a5SSteve French kunmap(page);
268138c8a9a5SSteve French return 0; /* don't care */
268238c8a9a5SSteve French }
268338c8a9a5SSteve French
268438c8a9a5SSteve French /* check to make sure that we are not extending the file */
268538c8a9a5SSteve French if (mapping->host->i_size - offset < (loff_t)to)
268638c8a9a5SSteve French to = (unsigned)(mapping->host->i_size - offset);
268738c8a9a5SSteve French
268838c8a9a5SSteve French rc = cifs_get_writable_file(CIFS_I(mapping->host), FIND_WR_ANY,
268938c8a9a5SSteve French &open_file);
269038c8a9a5SSteve French if (!rc) {
269138c8a9a5SSteve French bytes_written = cifs_write(open_file, open_file->pid,
269238c8a9a5SSteve French write_data, to - from, &offset);
269338c8a9a5SSteve French cifsFileInfo_put(open_file);
269438c8a9a5SSteve French /* Does mm or vfs already set times? */
269523171df5SJeff Layton simple_inode_init_ts(inode);
269638c8a9a5SSteve French if ((bytes_written > 0) && (offset))
269738c8a9a5SSteve French rc = 0;
269838c8a9a5SSteve French else if (bytes_written < 0)
269938c8a9a5SSteve French rc = bytes_written;
270038c8a9a5SSteve French else
270138c8a9a5SSteve French rc = -EFAULT;
270238c8a9a5SSteve French } else {
270338c8a9a5SSteve French cifs_dbg(FYI, "No writable handle for write page rc=%d\n", rc);
270438c8a9a5SSteve French if (!is_retryable_error(rc))
270538c8a9a5SSteve French rc = -EIO;
270638c8a9a5SSteve French }
270738c8a9a5SSteve French
270838c8a9a5SSteve French kunmap(page);
270938c8a9a5SSteve French return rc;
271038c8a9a5SSteve French }
271138c8a9a5SSteve French
271238c8a9a5SSteve French /*
271338c8a9a5SSteve French * Extend the region to be written back to include subsequent contiguously
271438c8a9a5SSteve French * dirty pages if possible, but don't sleep while doing so.
271538c8a9a5SSteve French */
cifs_extend_writeback(struct address_space * mapping,struct xa_state * xas,long * _count,loff_t start,int max_pages,loff_t max_len,size_t * _len)271638c8a9a5SSteve French static void cifs_extend_writeback(struct address_space *mapping,
2717e45deec3SDavid Howells struct xa_state *xas,
271838c8a9a5SSteve French long *_count,
271938c8a9a5SSteve French loff_t start,
272038c8a9a5SSteve French int max_pages,
2721e45deec3SDavid Howells loff_t max_len,
2722e45deec3SDavid Howells size_t *_len)
272338c8a9a5SSteve French {
272438c8a9a5SSteve French struct folio_batch batch;
272538c8a9a5SSteve French struct folio *folio;
2726e45deec3SDavid Howells unsigned int nr_pages;
2727e45deec3SDavid Howells pgoff_t index = (start + *_len) / PAGE_SIZE;
2728e45deec3SDavid Howells size_t len;
272938c8a9a5SSteve French bool stop = true;
273038c8a9a5SSteve French unsigned int i;
273138c8a9a5SSteve French
273238c8a9a5SSteve French folio_batch_init(&batch);
273338c8a9a5SSteve French
273438c8a9a5SSteve French do {
273538c8a9a5SSteve French /* Firstly, we gather up a batch of contiguous dirty pages
273638c8a9a5SSteve French * under the RCU read lock - but we can't clear the dirty flags
273738c8a9a5SSteve French * there if any of those pages are mapped.
273838c8a9a5SSteve French */
273938c8a9a5SSteve French rcu_read_lock();
274038c8a9a5SSteve French
2741e45deec3SDavid Howells xas_for_each(xas, folio, ULONG_MAX) {
274238c8a9a5SSteve French stop = true;
2743e45deec3SDavid Howells if (xas_retry(xas, folio))
274438c8a9a5SSteve French continue;
274538c8a9a5SSteve French if (xa_is_value(folio))
274638c8a9a5SSteve French break;
2747e45deec3SDavid Howells if (folio->index != index) {
2748e45deec3SDavid Howells xas_reset(xas);
274938c8a9a5SSteve French break;
2750e45deec3SDavid Howells }
2751e45deec3SDavid Howells
2752*16380f52SYang Shi if (!folio_try_get(folio)) {
2753e45deec3SDavid Howells xas_reset(xas);
275438c8a9a5SSteve French continue;
275538c8a9a5SSteve French }
275638c8a9a5SSteve French nr_pages = folio_nr_pages(folio);
2757e45deec3SDavid Howells if (nr_pages > max_pages) {
2758e45deec3SDavid Howells xas_reset(xas);
275938c8a9a5SSteve French break;
2760e45deec3SDavid Howells }
276138c8a9a5SSteve French
276238c8a9a5SSteve French /* Has the page moved or been split? */
2763e45deec3SDavid Howells if (unlikely(folio != xas_reload(xas))) {
276438c8a9a5SSteve French folio_put(folio);
2765e45deec3SDavid Howells xas_reset(xas);
276638c8a9a5SSteve French break;
276738c8a9a5SSteve French }
276838c8a9a5SSteve French
276938c8a9a5SSteve French if (!folio_trylock(folio)) {
277038c8a9a5SSteve French folio_put(folio);
2771e45deec3SDavid Howells xas_reset(xas);
277238c8a9a5SSteve French break;
277338c8a9a5SSteve French }
2774e45deec3SDavid Howells if (!folio_test_dirty(folio) ||
2775e45deec3SDavid Howells folio_test_writeback(folio)) {
277638c8a9a5SSteve French folio_unlock(folio);
277738c8a9a5SSteve French folio_put(folio);
2778e45deec3SDavid Howells xas_reset(xas);
277938c8a9a5SSteve French break;
278038c8a9a5SSteve French }
278138c8a9a5SSteve French
278238c8a9a5SSteve French max_pages -= nr_pages;
2783e45deec3SDavid Howells len = folio_size(folio);
278438c8a9a5SSteve French stop = false;
278538c8a9a5SSteve French
278638c8a9a5SSteve French index += nr_pages;
2787e45deec3SDavid Howells *_count -= nr_pages;
2788e45deec3SDavid Howells *_len += len;
2789e45deec3SDavid Howells if (max_pages <= 0 || *_len >= max_len || *_count <= 0)
2790e45deec3SDavid Howells stop = true;
2791e45deec3SDavid Howells
279238c8a9a5SSteve French if (!folio_batch_add(&batch, folio))
279338c8a9a5SSteve French break;
279438c8a9a5SSteve French if (stop)
279538c8a9a5SSteve French break;
279638c8a9a5SSteve French }
279738c8a9a5SSteve French
2798e45deec3SDavid Howells xas_pause(xas);
279938c8a9a5SSteve French rcu_read_unlock();
280038c8a9a5SSteve French
280138c8a9a5SSteve French /* Now, if we obtained any pages, we can shift them to being
280238c8a9a5SSteve French * writable and mark them for caching.
280338c8a9a5SSteve French */
280438c8a9a5SSteve French if (!folio_batch_count(&batch))
280538c8a9a5SSteve French break;
280638c8a9a5SSteve French
280738c8a9a5SSteve French for (i = 0; i < folio_batch_count(&batch); i++) {
280838c8a9a5SSteve French folio = batch.folios[i];
280938c8a9a5SSteve French /* The folio should be locked, dirty and not undergoing
281038c8a9a5SSteve French * writeback from the loop above.
281138c8a9a5SSteve French */
281238c8a9a5SSteve French if (!folio_clear_dirty_for_io(folio))
281338c8a9a5SSteve French WARN_ON(1);
28142e411c57SMatthew Wilcox (Oracle) folio_start_writeback(folio);
281538c8a9a5SSteve French folio_unlock(folio);
281638c8a9a5SSteve French }
281738c8a9a5SSteve French
281838c8a9a5SSteve French folio_batch_release(&batch);
281938c8a9a5SSteve French cond_resched();
282038c8a9a5SSteve French } while (!stop);
282138c8a9a5SSteve French }
282238c8a9a5SSteve French
282338c8a9a5SSteve French /*
282438c8a9a5SSteve French * Write back the locked page and any subsequent non-locked dirty pages.
282538c8a9a5SSteve French */
cifs_write_back_from_locked_folio(struct address_space * mapping,struct writeback_control * wbc,struct xa_state * xas,struct folio * folio,unsigned long long start,unsigned long long end)282638c8a9a5SSteve French static ssize_t cifs_write_back_from_locked_folio(struct address_space *mapping,
282738c8a9a5SSteve French struct writeback_control *wbc,
2828e45deec3SDavid Howells struct xa_state *xas,
282938c8a9a5SSteve French struct folio *folio,
2830e45deec3SDavid Howells unsigned long long start,
2831e45deec3SDavid Howells unsigned long long end)
283238c8a9a5SSteve French {
283338c8a9a5SSteve French struct inode *inode = mapping->host;
283438c8a9a5SSteve French struct TCP_Server_Info *server;
283538c8a9a5SSteve French struct cifs_writedata *wdata;
283638c8a9a5SSteve French struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
283738c8a9a5SSteve French struct cifs_credits credits_on_stack;
283838c8a9a5SSteve French struct cifs_credits *credits = &credits_on_stack;
283938c8a9a5SSteve French struct cifsFileInfo *cfile = NULL;
2840e45deec3SDavid Howells unsigned long long i_size = i_size_read(inode), max_len;
2841e45deec3SDavid Howells unsigned int xid, wsize;
2842e45deec3SDavid Howells size_t len = folio_size(folio);
284338c8a9a5SSteve French long count = wbc->nr_to_write;
284438c8a9a5SSteve French int rc;
284538c8a9a5SSteve French
284638c8a9a5SSteve French /* The folio should be locked, dirty and not undergoing writeback. */
2847e45deec3SDavid Howells if (!folio_clear_dirty_for_io(folio))
2848e45deec3SDavid Howells WARN_ON_ONCE(1);
28492e411c57SMatthew Wilcox (Oracle) folio_start_writeback(folio);
285038c8a9a5SSteve French
285138c8a9a5SSteve French count -= folio_nr_pages(folio);
285238c8a9a5SSteve French
285338c8a9a5SSteve French xid = get_xid();
285438c8a9a5SSteve French server = cifs_pick_channel(cifs_sb_master_tcon(cifs_sb)->ses);
285538c8a9a5SSteve French
285638c8a9a5SSteve French rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY, &cfile);
285738c8a9a5SSteve French if (rc) {
285838c8a9a5SSteve French cifs_dbg(VFS, "No writable handle in writepages rc=%d\n", rc);
285938c8a9a5SSteve French goto err_xid;
286038c8a9a5SSteve French }
286138c8a9a5SSteve French
286238c8a9a5SSteve French rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->wsize,
286338c8a9a5SSteve French &wsize, credits);
286438c8a9a5SSteve French if (rc != 0)
286538c8a9a5SSteve French goto err_close;
286638c8a9a5SSteve French
286738c8a9a5SSteve French wdata = cifs_writedata_alloc(cifs_writev_complete);
286838c8a9a5SSteve French if (!wdata) {
286938c8a9a5SSteve French rc = -ENOMEM;
287038c8a9a5SSteve French goto err_uncredit;
287138c8a9a5SSteve French }
287238c8a9a5SSteve French
287338c8a9a5SSteve French wdata->sync_mode = wbc->sync_mode;
287438c8a9a5SSteve French wdata->offset = folio_pos(folio);
287538c8a9a5SSteve French wdata->pid = cfile->pid;
287638c8a9a5SSteve French wdata->credits = credits_on_stack;
287738c8a9a5SSteve French wdata->cfile = cfile;
287838c8a9a5SSteve French wdata->server = server;
287938c8a9a5SSteve French cfile = NULL;
288038c8a9a5SSteve French
2881e45deec3SDavid Howells /* Find all consecutive lockable dirty pages that have contiguous
2882e45deec3SDavid Howells * written regions, stopping when we find a page that is not
2883e45deec3SDavid Howells * immediately lockable, is not dirty or is missing, or we reach the
2884e45deec3SDavid Howells * end of the range.
288538c8a9a5SSteve French */
288638c8a9a5SSteve French if (start < i_size) {
288738c8a9a5SSteve French /* Trim the write to the EOF; the extra data is ignored. Also
288838c8a9a5SSteve French * put an upper limit on the size of a single storedata op.
288938c8a9a5SSteve French */
289038c8a9a5SSteve French max_len = wsize;
289138c8a9a5SSteve French max_len = min_t(unsigned long long, max_len, end - start + 1);
289238c8a9a5SSteve French max_len = min_t(unsigned long long, max_len, i_size - start);
289338c8a9a5SSteve French
289438c8a9a5SSteve French if (len < max_len) {
289538c8a9a5SSteve French int max_pages = INT_MAX;
289638c8a9a5SSteve French
289738c8a9a5SSteve French #ifdef CONFIG_CIFS_SMB_DIRECT
289838c8a9a5SSteve French if (server->smbd_conn)
289938c8a9a5SSteve French max_pages = server->smbd_conn->max_frmr_depth;
290038c8a9a5SSteve French #endif
290138c8a9a5SSteve French max_pages -= folio_nr_pages(folio);
290238c8a9a5SSteve French
290338c8a9a5SSteve French if (max_pages > 0)
2904e45deec3SDavid Howells cifs_extend_writeback(mapping, xas, &count, start,
290538c8a9a5SSteve French max_pages, max_len, &len);
290638c8a9a5SSteve French }
290738c8a9a5SSteve French }
2908e45deec3SDavid Howells len = min_t(unsigned long long, len, i_size - start);
290938c8a9a5SSteve French
291038c8a9a5SSteve French /* We now have a contiguous set of dirty pages, each with writeback
291138c8a9a5SSteve French * set; the first page is still locked at this point, but all the rest
291238c8a9a5SSteve French * have been unlocked.
291338c8a9a5SSteve French */
291438c8a9a5SSteve French folio_unlock(folio);
2915e45deec3SDavid Howells wdata->bytes = len;
291638c8a9a5SSteve French
291738c8a9a5SSteve French if (start < i_size) {
291838c8a9a5SSteve French iov_iter_xarray(&wdata->iter, ITER_SOURCE, &mapping->i_pages,
291938c8a9a5SSteve French start, len);
292038c8a9a5SSteve French
292138c8a9a5SSteve French rc = adjust_credits(wdata->server, &wdata->credits, wdata->bytes);
292238c8a9a5SSteve French if (rc)
292338c8a9a5SSteve French goto err_wdata;
292438c8a9a5SSteve French
292538c8a9a5SSteve French if (wdata->cfile->invalidHandle)
292638c8a9a5SSteve French rc = -EAGAIN;
292738c8a9a5SSteve French else
292838c8a9a5SSteve French rc = wdata->server->ops->async_writev(wdata,
292938c8a9a5SSteve French cifs_writedata_release);
293038c8a9a5SSteve French if (rc >= 0) {
293138c8a9a5SSteve French kref_put(&wdata->refcount, cifs_writedata_release);
293238c8a9a5SSteve French goto err_close;
293338c8a9a5SSteve French }
293438c8a9a5SSteve French } else {
293538c8a9a5SSteve French /* The dirty region was entirely beyond the EOF. */
293638c8a9a5SSteve French cifs_pages_written_back(inode, start, len);
293738c8a9a5SSteve French rc = 0;
293838c8a9a5SSteve French }
293938c8a9a5SSteve French
294038c8a9a5SSteve French err_wdata:
294138c8a9a5SSteve French kref_put(&wdata->refcount, cifs_writedata_release);
294238c8a9a5SSteve French err_uncredit:
294338c8a9a5SSteve French add_credits_and_wake_if(server, credits, 0);
294438c8a9a5SSteve French err_close:
294538c8a9a5SSteve French if (cfile)
294638c8a9a5SSteve French cifsFileInfo_put(cfile);
294738c8a9a5SSteve French err_xid:
294838c8a9a5SSteve French free_xid(xid);
294938c8a9a5SSteve French if (rc == 0) {
295038c8a9a5SSteve French wbc->nr_to_write = count;
295138c8a9a5SSteve French rc = len;
295238c8a9a5SSteve French } else if (is_retryable_error(rc)) {
295338c8a9a5SSteve French cifs_pages_write_redirty(inode, start, len);
295438c8a9a5SSteve French } else {
295538c8a9a5SSteve French cifs_pages_write_failed(inode, start, len);
295638c8a9a5SSteve French mapping_set_error(mapping, rc);
295738c8a9a5SSteve French }
295838c8a9a5SSteve French /* Indication to update ctime and mtime as close is deferred */
295938c8a9a5SSteve French set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags);
296038c8a9a5SSteve French return rc;
296138c8a9a5SSteve French }
296238c8a9a5SSteve French
296338c8a9a5SSteve French /*
296438c8a9a5SSteve French * write a region of pages back to the server
296538c8a9a5SSteve French */
cifs_writepages_begin(struct address_space * mapping,struct writeback_control * wbc,struct xa_state * xas,unsigned long long * _start,unsigned long long end)2966e45deec3SDavid Howells static ssize_t cifs_writepages_begin(struct address_space *mapping,
296738c8a9a5SSteve French struct writeback_control *wbc,
2968e45deec3SDavid Howells struct xa_state *xas,
2969e45deec3SDavid Howells unsigned long long *_start,
2970e45deec3SDavid Howells unsigned long long end)
297138c8a9a5SSteve French {
2972e45deec3SDavid Howells struct folio *folio;
2973e45deec3SDavid Howells unsigned long long start = *_start;
2974e45deec3SDavid Howells ssize_t ret;
297538c8a9a5SSteve French int skips = 0;
297638c8a9a5SSteve French
2977e45deec3SDavid Howells search_again:
2978e45deec3SDavid Howells /* Find the first dirty page. */
2979e45deec3SDavid Howells rcu_read_lock();
298038c8a9a5SSteve French
2981e45deec3SDavid Howells for (;;) {
2982e45deec3SDavid Howells folio = xas_find_marked(xas, end / PAGE_SIZE, PAGECACHE_TAG_DIRTY);
2983e45deec3SDavid Howells if (xas_retry(xas, folio) || xa_is_value(folio))
2984e45deec3SDavid Howells continue;
2985e45deec3SDavid Howells if (!folio)
298638c8a9a5SSteve French break;
298738c8a9a5SSteve French
2988*16380f52SYang Shi if (!folio_try_get(folio)) {
2989e45deec3SDavid Howells xas_reset(xas);
2990e45deec3SDavid Howells continue;
2991e45deec3SDavid Howells }
299238c8a9a5SSteve French
2993e45deec3SDavid Howells if (unlikely(folio != xas_reload(xas))) {
2994e45deec3SDavid Howells folio_put(folio);
2995e45deec3SDavid Howells xas_reset(xas);
2996e45deec3SDavid Howells continue;
2997e45deec3SDavid Howells }
2998e45deec3SDavid Howells
2999e45deec3SDavid Howells xas_pause(xas);
3000e45deec3SDavid Howells break;
3001e45deec3SDavid Howells }
3002e45deec3SDavid Howells rcu_read_unlock();
3003e45deec3SDavid Howells if (!folio)
3004e45deec3SDavid Howells return 0;
3005e45deec3SDavid Howells
300638c8a9a5SSteve French start = folio_pos(folio); /* May regress with THPs */
300738c8a9a5SSteve French
3008e45deec3SDavid Howells /* At this point we hold neither the i_pages lock nor the page lock:
3009e45deec3SDavid Howells * the page may be truncated or invalidated (changing page->mapping to
3010e45deec3SDavid Howells * NULL), or even swizzled back from swapper_space to tmpfs file
3011e45deec3SDavid Howells * mapping
301238c8a9a5SSteve French */
3013e45deec3SDavid Howells lock_again:
301438c8a9a5SSteve French if (wbc->sync_mode != WB_SYNC_NONE) {
301538c8a9a5SSteve French ret = folio_lock_killable(folio);
301638c8a9a5SSteve French if (ret < 0)
3017e45deec3SDavid Howells return ret;
301838c8a9a5SSteve French } else {
301938c8a9a5SSteve French if (!folio_trylock(folio))
3020e45deec3SDavid Howells goto search_again;
302138c8a9a5SSteve French }
302238c8a9a5SSteve French
3023d3c79235SDavid Howells if (folio->mapping != mapping ||
302438c8a9a5SSteve French !folio_test_dirty(folio)) {
302538c8a9a5SSteve French start += folio_size(folio);
302638c8a9a5SSteve French folio_unlock(folio);
3027e45deec3SDavid Howells goto search_again;
302838c8a9a5SSteve French }
302938c8a9a5SSteve French
303038c8a9a5SSteve French if (folio_test_writeback(folio) ||
303138c8a9a5SSteve French folio_test_fscache(folio)) {
303238c8a9a5SSteve French folio_unlock(folio);
3033e45deec3SDavid Howells if (wbc->sync_mode != WB_SYNC_NONE) {
303438c8a9a5SSteve French folio_wait_writeback(folio);
303538c8a9a5SSteve French #ifdef CONFIG_CIFS_FSCACHE
303638c8a9a5SSteve French folio_wait_fscache(folio);
303738c8a9a5SSteve French #endif
3038e45deec3SDavid Howells goto lock_again;
303938c8a9a5SSteve French }
304038c8a9a5SSteve French
3041e45deec3SDavid Howells start += folio_size(folio);
3042e45deec3SDavid Howells if (wbc->sync_mode == WB_SYNC_NONE) {
304338c8a9a5SSteve French if (skips >= 5 || need_resched()) {
304438c8a9a5SSteve French ret = 0;
3045e45deec3SDavid Howells goto out;
304638c8a9a5SSteve French }
304738c8a9a5SSteve French skips++;
3048e45deec3SDavid Howells }
3049e45deec3SDavid Howells goto search_again;
305038c8a9a5SSteve French }
305138c8a9a5SSteve French
3052e45deec3SDavid Howells ret = cifs_write_back_from_locked_folio(mapping, wbc, xas, folio, start, end);
3053e45deec3SDavid Howells out:
3054e45deec3SDavid Howells if (ret > 0)
3055e45deec3SDavid Howells *_start = start + ret;
3056e45deec3SDavid Howells return ret;
3057e45deec3SDavid Howells }
305838c8a9a5SSteve French
3059e45deec3SDavid Howells /*
3060e45deec3SDavid Howells * Write a region of pages back to the server
3061e45deec3SDavid Howells */
cifs_writepages_region(struct address_space * mapping,struct writeback_control * wbc,unsigned long long * _start,unsigned long long end)3062e45deec3SDavid Howells static int cifs_writepages_region(struct address_space *mapping,
3063e45deec3SDavid Howells struct writeback_control *wbc,
3064e45deec3SDavid Howells unsigned long long *_start,
3065e45deec3SDavid Howells unsigned long long end)
3066e45deec3SDavid Howells {
3067e45deec3SDavid Howells ssize_t ret;
3068e45deec3SDavid Howells
3069e45deec3SDavid Howells XA_STATE(xas, &mapping->i_pages, *_start / PAGE_SIZE);
3070e45deec3SDavid Howells
3071e45deec3SDavid Howells do {
3072e45deec3SDavid Howells ret = cifs_writepages_begin(mapping, wbc, &xas, _start, end);
3073e45deec3SDavid Howells if (ret > 0 && wbc->nr_to_write > 0)
3074e45deec3SDavid Howells cond_resched();
3075e45deec3SDavid Howells } while (ret > 0 && wbc->nr_to_write > 0);
3076e45deec3SDavid Howells
3077e45deec3SDavid Howells return ret > 0 ? 0 : ret;
307838c8a9a5SSteve French }
307938c8a9a5SSteve French
308038c8a9a5SSteve French /*
308138c8a9a5SSteve French * Write some of the pending data back to the server
308238c8a9a5SSteve French */
cifs_writepages(struct address_space * mapping,struct writeback_control * wbc)308338c8a9a5SSteve French static int cifs_writepages(struct address_space *mapping,
308438c8a9a5SSteve French struct writeback_control *wbc)
308538c8a9a5SSteve French {
3086e45deec3SDavid Howells loff_t start, end;
308738c8a9a5SSteve French int ret;
308838c8a9a5SSteve French
308938c8a9a5SSteve French /* We have to be careful as we can end up racing with setattr()
309038c8a9a5SSteve French * truncating the pagecache since the caller doesn't take a lock here
309138c8a9a5SSteve French * to prevent it.
309238c8a9a5SSteve French */
309338c8a9a5SSteve French
3094e45deec3SDavid Howells if (wbc->range_cyclic && mapping->writeback_index) {
309538c8a9a5SSteve French start = mapping->writeback_index * PAGE_SIZE;
3096e45deec3SDavid Howells ret = cifs_writepages_region(mapping, wbc, &start, LLONG_MAX);
3097e45deec3SDavid Howells if (ret < 0)
3098e45deec3SDavid Howells goto out;
3099e45deec3SDavid Howells
3100e45deec3SDavid Howells if (wbc->nr_to_write <= 0) {
3101e45deec3SDavid Howells mapping->writeback_index = start / PAGE_SIZE;
3102e45deec3SDavid Howells goto out;
310338c8a9a5SSteve French }
310438c8a9a5SSteve French
3105e45deec3SDavid Howells start = 0;
3106e45deec3SDavid Howells end = mapping->writeback_index * PAGE_SIZE;
3107e45deec3SDavid Howells mapping->writeback_index = 0;
3108e45deec3SDavid Howells ret = cifs_writepages_region(mapping, wbc, &start, end);
3109e45deec3SDavid Howells if (ret == 0)
3110e45deec3SDavid Howells mapping->writeback_index = start / PAGE_SIZE;
3111e45deec3SDavid Howells } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
3112e45deec3SDavid Howells start = 0;
3113e45deec3SDavid Howells ret = cifs_writepages_region(mapping, wbc, &start, LLONG_MAX);
3114e45deec3SDavid Howells if (wbc->nr_to_write > 0 && ret == 0)
3115e45deec3SDavid Howells mapping->writeback_index = start / PAGE_SIZE;
3116e45deec3SDavid Howells } else {
3117e45deec3SDavid Howells start = wbc->range_start;
3118e45deec3SDavid Howells ret = cifs_writepages_region(mapping, wbc, &start, wbc->range_end);
3119e45deec3SDavid Howells }
3120e45deec3SDavid Howells
3121e45deec3SDavid Howells out:
312238c8a9a5SSteve French return ret;
312338c8a9a5SSteve French }
312438c8a9a5SSteve French
312538c8a9a5SSteve French static int
cifs_writepage_locked(struct page * page,struct writeback_control * wbc)312638c8a9a5SSteve French cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
312738c8a9a5SSteve French {
312838c8a9a5SSteve French int rc;
312938c8a9a5SSteve French unsigned int xid;
313038c8a9a5SSteve French
313138c8a9a5SSteve French xid = get_xid();
313238c8a9a5SSteve French /* BB add check for wbc flags */
313338c8a9a5SSteve French get_page(page);
313438c8a9a5SSteve French if (!PageUptodate(page))
313538c8a9a5SSteve French cifs_dbg(FYI, "ppw - page not up to date\n");
313638c8a9a5SSteve French
313738c8a9a5SSteve French /*
313838c8a9a5SSteve French * Set the "writeback" flag, and clear "dirty" in the radix tree.
313938c8a9a5SSteve French *
314038c8a9a5SSteve French * A writepage() implementation always needs to do either this,
314138c8a9a5SSteve French * or re-dirty the page with "redirty_page_for_writepage()" in
314238c8a9a5SSteve French * the case of a failure.
314338c8a9a5SSteve French *
314438c8a9a5SSteve French * Just unlocking the page will cause the radix tree tag-bits
314538c8a9a5SSteve French * to fail to update with the state of the page correctly.
314638c8a9a5SSteve French */
314738c8a9a5SSteve French set_page_writeback(page);
314838c8a9a5SSteve French retry_write:
314938c8a9a5SSteve French rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
315038c8a9a5SSteve French if (is_retryable_error(rc)) {
315138c8a9a5SSteve French if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
315238c8a9a5SSteve French goto retry_write;
315338c8a9a5SSteve French redirty_page_for_writepage(wbc, page);
315438c8a9a5SSteve French } else if (rc != 0) {
315538c8a9a5SSteve French SetPageError(page);
315638c8a9a5SSteve French mapping_set_error(page->mapping, rc);
315738c8a9a5SSteve French } else {
315838c8a9a5SSteve French SetPageUptodate(page);
315938c8a9a5SSteve French }
316038c8a9a5SSteve French end_page_writeback(page);
316138c8a9a5SSteve French put_page(page);
316238c8a9a5SSteve French free_xid(xid);
316338c8a9a5SSteve French return rc;
316438c8a9a5SSteve French }
316538c8a9a5SSteve French
cifs_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct page * page,void * fsdata)316638c8a9a5SSteve French static int cifs_write_end(struct file *file, struct address_space *mapping,
316738c8a9a5SSteve French loff_t pos, unsigned len, unsigned copied,
316838c8a9a5SSteve French struct page *page, void *fsdata)
316938c8a9a5SSteve French {
317038c8a9a5SSteve French int rc;
317138c8a9a5SSteve French struct inode *inode = mapping->host;
317238c8a9a5SSteve French struct cifsFileInfo *cfile = file->private_data;
317338c8a9a5SSteve French struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
317438c8a9a5SSteve French struct folio *folio = page_folio(page);
317538c8a9a5SSteve French __u32 pid;
317638c8a9a5SSteve French
317738c8a9a5SSteve French if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
317838c8a9a5SSteve French pid = cfile->pid;
317938c8a9a5SSteve French else
318038c8a9a5SSteve French pid = current->tgid;
318138c8a9a5SSteve French
318238c8a9a5SSteve French cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
318338c8a9a5SSteve French page, pos, copied);
318438c8a9a5SSteve French
318538c8a9a5SSteve French if (folio_test_checked(folio)) {
318638c8a9a5SSteve French if (copied == len)
318738c8a9a5SSteve French folio_mark_uptodate(folio);
318838c8a9a5SSteve French folio_clear_checked(folio);
318938c8a9a5SSteve French } else if (!folio_test_uptodate(folio) && copied == PAGE_SIZE)
319038c8a9a5SSteve French folio_mark_uptodate(folio);
319138c8a9a5SSteve French
319238c8a9a5SSteve French if (!folio_test_uptodate(folio)) {
319338c8a9a5SSteve French char *page_data;
319438c8a9a5SSteve French unsigned offset = pos & (PAGE_SIZE - 1);
319538c8a9a5SSteve French unsigned int xid;
319638c8a9a5SSteve French
319738c8a9a5SSteve French xid = get_xid();
319838c8a9a5SSteve French /* this is probably better than directly calling
319938c8a9a5SSteve French partialpage_write since in this function the file handle is
320038c8a9a5SSteve French known which we might as well leverage */
320138c8a9a5SSteve French /* BB check if anything else missing out of ppw
320238c8a9a5SSteve French such as updating last write time */
320338c8a9a5SSteve French page_data = kmap(page);
320438c8a9a5SSteve French rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
320538c8a9a5SSteve French /* if (rc < 0) should we set writebehind rc? */
320638c8a9a5SSteve French kunmap(page);
320738c8a9a5SSteve French
320838c8a9a5SSteve French free_xid(xid);
320938c8a9a5SSteve French } else {
321038c8a9a5SSteve French rc = copied;
321138c8a9a5SSteve French pos += copied;
321238c8a9a5SSteve French set_page_dirty(page);
321338c8a9a5SSteve French }
321438c8a9a5SSteve French
321538c8a9a5SSteve French if (rc > 0) {
321638c8a9a5SSteve French spin_lock(&inode->i_lock);
321738c8a9a5SSteve French if (pos > inode->i_size) {
3218e4232010SSteve French loff_t additional_blocks = (512 - 1 + copied) >> 9;
3219e4232010SSteve French
322038c8a9a5SSteve French i_size_write(inode, pos);
3221e4232010SSteve French /*
3222e4232010SSteve French * Estimate new allocation size based on the amount written.
3223e4232010SSteve French * This will be updated from server on close (and on queryinfo)
3224e4232010SSteve French */
3225e4232010SSteve French inode->i_blocks = min_t(blkcnt_t, (512 - 1 + pos) >> 9,
3226e4232010SSteve French inode->i_blocks + additional_blocks);
322738c8a9a5SSteve French }
322838c8a9a5SSteve French spin_unlock(&inode->i_lock);
322938c8a9a5SSteve French }
323038c8a9a5SSteve French
323138c8a9a5SSteve French unlock_page(page);
323238c8a9a5SSteve French put_page(page);
323338c8a9a5SSteve French /* Indication to update ctime and mtime as close is deferred */
323438c8a9a5SSteve French set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags);
323538c8a9a5SSteve French
323638c8a9a5SSteve French return rc;
323738c8a9a5SSteve French }
323838c8a9a5SSteve French
cifs_strict_fsync(struct file * file,loff_t start,loff_t end,int datasync)323938c8a9a5SSteve French int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
324038c8a9a5SSteve French int datasync)
324138c8a9a5SSteve French {
324238c8a9a5SSteve French unsigned int xid;
324338c8a9a5SSteve French int rc = 0;
324438c8a9a5SSteve French struct cifs_tcon *tcon;
324538c8a9a5SSteve French struct TCP_Server_Info *server;
324638c8a9a5SSteve French struct cifsFileInfo *smbfile = file->private_data;
324738c8a9a5SSteve French struct inode *inode = file_inode(file);
324838c8a9a5SSteve French struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
324938c8a9a5SSteve French
325038c8a9a5SSteve French rc = file_write_and_wait_range(file, start, end);
325138c8a9a5SSteve French if (rc) {
325238c8a9a5SSteve French trace_cifs_fsync_err(inode->i_ino, rc);
325338c8a9a5SSteve French return rc;
325438c8a9a5SSteve French }
325538c8a9a5SSteve French
325638c8a9a5SSteve French xid = get_xid();
325738c8a9a5SSteve French
325838c8a9a5SSteve French cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
325938c8a9a5SSteve French file, datasync);
326038c8a9a5SSteve French
326138c8a9a5SSteve French if (!CIFS_CACHE_READ(CIFS_I(inode))) {
326238c8a9a5SSteve French rc = cifs_zap_mapping(inode);
326338c8a9a5SSteve French if (rc) {
326438c8a9a5SSteve French cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
326538c8a9a5SSteve French rc = 0; /* don't care about it in fsync */
326638c8a9a5SSteve French }
326738c8a9a5SSteve French }
326838c8a9a5SSteve French
326938c8a9a5SSteve French tcon = tlink_tcon(smbfile->tlink);
327038c8a9a5SSteve French if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
327138c8a9a5SSteve French server = tcon->ses->server;
327238c8a9a5SSteve French if (server->ops->flush == NULL) {
327338c8a9a5SSteve French rc = -ENOSYS;
327438c8a9a5SSteve French goto strict_fsync_exit;
327538c8a9a5SSteve French }
327638c8a9a5SSteve French
327738c8a9a5SSteve French if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
327838c8a9a5SSteve French smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
327938c8a9a5SSteve French if (smbfile) {
328038c8a9a5SSteve French rc = server->ops->flush(xid, tcon, &smbfile->fid);
328138c8a9a5SSteve French cifsFileInfo_put(smbfile);
328238c8a9a5SSteve French } else
328338c8a9a5SSteve French cifs_dbg(FYI, "ignore fsync for file not open for write\n");
328438c8a9a5SSteve French } else
328538c8a9a5SSteve French rc = server->ops->flush(xid, tcon, &smbfile->fid);
328638c8a9a5SSteve French }
328738c8a9a5SSteve French
328838c8a9a5SSteve French strict_fsync_exit:
328938c8a9a5SSteve French free_xid(xid);
329038c8a9a5SSteve French return rc;
329138c8a9a5SSteve French }
329238c8a9a5SSteve French
cifs_fsync(struct file * file,loff_t start,loff_t end,int datasync)329338c8a9a5SSteve French int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
329438c8a9a5SSteve French {
329538c8a9a5SSteve French unsigned int xid;
329638c8a9a5SSteve French int rc = 0;
329738c8a9a5SSteve French struct cifs_tcon *tcon;
329838c8a9a5SSteve French struct TCP_Server_Info *server;
329938c8a9a5SSteve French struct cifsFileInfo *smbfile = file->private_data;
330038c8a9a5SSteve French struct inode *inode = file_inode(file);
330138c8a9a5SSteve French struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
330238c8a9a5SSteve French
330338c8a9a5SSteve French rc = file_write_and_wait_range(file, start, end);
330438c8a9a5SSteve French if (rc) {
330538c8a9a5SSteve French trace_cifs_fsync_err(file_inode(file)->i_ino, rc);
330638c8a9a5SSteve French return rc;
330738c8a9a5SSteve French }
330838c8a9a5SSteve French
330938c8a9a5SSteve French xid = get_xid();
331038c8a9a5SSteve French
331138c8a9a5SSteve French cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
331238c8a9a5SSteve French file, datasync);
331338c8a9a5SSteve French
331438c8a9a5SSteve French tcon = tlink_tcon(smbfile->tlink);
331538c8a9a5SSteve French if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
331638c8a9a5SSteve French server = tcon->ses->server;
331738c8a9a5SSteve French if (server->ops->flush == NULL) {
331838c8a9a5SSteve French rc = -ENOSYS;
331938c8a9a5SSteve French goto fsync_exit;
332038c8a9a5SSteve French }
332138c8a9a5SSteve French
332238c8a9a5SSteve French if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
332338c8a9a5SSteve French smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
332438c8a9a5SSteve French if (smbfile) {
332538c8a9a5SSteve French rc = server->ops->flush(xid, tcon, &smbfile->fid);
332638c8a9a5SSteve French cifsFileInfo_put(smbfile);
332738c8a9a5SSteve French } else
332838c8a9a5SSteve French cifs_dbg(FYI, "ignore fsync for file not open for write\n");
332938c8a9a5SSteve French } else
333038c8a9a5SSteve French rc = server->ops->flush(xid, tcon, &smbfile->fid);
333138c8a9a5SSteve French }
333238c8a9a5SSteve French
333338c8a9a5SSteve French fsync_exit:
333438c8a9a5SSteve French free_xid(xid);
333538c8a9a5SSteve French return rc;
333638c8a9a5SSteve French }
333738c8a9a5SSteve French
333838c8a9a5SSteve French /*
333938c8a9a5SSteve French * As file closes, flush all cached write data for this inode checking
334038c8a9a5SSteve French * for write behind errors.
334138c8a9a5SSteve French */
cifs_flush(struct file * file,fl_owner_t id)334238c8a9a5SSteve French int cifs_flush(struct file *file, fl_owner_t id)
334338c8a9a5SSteve French {
334438c8a9a5SSteve French struct inode *inode = file_inode(file);
334538c8a9a5SSteve French int rc = 0;
334638c8a9a5SSteve French
334738c8a9a5SSteve French if (file->f_mode & FMODE_WRITE)
334838c8a9a5SSteve French rc = filemap_write_and_wait(inode->i_mapping);
334938c8a9a5SSteve French
335038c8a9a5SSteve French cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
335138c8a9a5SSteve French if (rc) {
335238c8a9a5SSteve French /* get more nuanced writeback errors */
335338c8a9a5SSteve French rc = filemap_check_wb_err(file->f_mapping, 0);
335438c8a9a5SSteve French trace_cifs_flush_err(inode->i_ino, rc);
335538c8a9a5SSteve French }
335638c8a9a5SSteve French return rc;
335738c8a9a5SSteve French }
335838c8a9a5SSteve French
335938c8a9a5SSteve French static void
cifs_uncached_writedata_release(struct kref * refcount)336038c8a9a5SSteve French cifs_uncached_writedata_release(struct kref *refcount)
336138c8a9a5SSteve French {
336238c8a9a5SSteve French struct cifs_writedata *wdata = container_of(refcount,
336338c8a9a5SSteve French struct cifs_writedata, refcount);
336438c8a9a5SSteve French
336538c8a9a5SSteve French kref_put(&wdata->ctx->refcount, cifs_aio_ctx_release);
336638c8a9a5SSteve French cifs_writedata_release(refcount);
336738c8a9a5SSteve French }
336838c8a9a5SSteve French
336938c8a9a5SSteve French static void collect_uncached_write_data(struct cifs_aio_ctx *ctx);
337038c8a9a5SSteve French
337138c8a9a5SSteve French static void
cifs_uncached_writev_complete(struct work_struct * work)337238c8a9a5SSteve French cifs_uncached_writev_complete(struct work_struct *work)
337338c8a9a5SSteve French {
337438c8a9a5SSteve French struct cifs_writedata *wdata = container_of(work,
337538c8a9a5SSteve French struct cifs_writedata, work);
337638c8a9a5SSteve French struct inode *inode = d_inode(wdata->cfile->dentry);
337738c8a9a5SSteve French struct cifsInodeInfo *cifsi = CIFS_I(inode);
337838c8a9a5SSteve French
337938c8a9a5SSteve French spin_lock(&inode->i_lock);
338038c8a9a5SSteve French cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
338138c8a9a5SSteve French if (cifsi->server_eof > inode->i_size)
338238c8a9a5SSteve French i_size_write(inode, cifsi->server_eof);
338338c8a9a5SSteve French spin_unlock(&inode->i_lock);
338438c8a9a5SSteve French
338538c8a9a5SSteve French complete(&wdata->done);
338638c8a9a5SSteve French collect_uncached_write_data(wdata->ctx);
338738c8a9a5SSteve French /* the below call can possibly free the last ref to aio ctx */
338838c8a9a5SSteve French kref_put(&wdata->refcount, cifs_uncached_writedata_release);
338938c8a9a5SSteve French }
339038c8a9a5SSteve French
339138c8a9a5SSteve French static int
cifs_resend_wdata(struct cifs_writedata * wdata,struct list_head * wdata_list,struct cifs_aio_ctx * ctx)339238c8a9a5SSteve French cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list,
339338c8a9a5SSteve French struct cifs_aio_ctx *ctx)
339438c8a9a5SSteve French {
339538c8a9a5SSteve French unsigned int wsize;
339638c8a9a5SSteve French struct cifs_credits credits;
339738c8a9a5SSteve French int rc;
339838c8a9a5SSteve French struct TCP_Server_Info *server = wdata->server;
339938c8a9a5SSteve French
340038c8a9a5SSteve French do {
340138c8a9a5SSteve French if (wdata->cfile->invalidHandle) {
340238c8a9a5SSteve French rc = cifs_reopen_file(wdata->cfile, false);
340338c8a9a5SSteve French if (rc == -EAGAIN)
340438c8a9a5SSteve French continue;
340538c8a9a5SSteve French else if (rc)
340638c8a9a5SSteve French break;
340738c8a9a5SSteve French }
340838c8a9a5SSteve French
340938c8a9a5SSteve French
341038c8a9a5SSteve French /*
341138c8a9a5SSteve French * Wait for credits to resend this wdata.
341238c8a9a5SSteve French * Note: we are attempting to resend the whole wdata not in
341338c8a9a5SSteve French * segments
341438c8a9a5SSteve French */
341538c8a9a5SSteve French do {
341638c8a9a5SSteve French rc = server->ops->wait_mtu_credits(server, wdata->bytes,
341738c8a9a5SSteve French &wsize, &credits);
341838c8a9a5SSteve French if (rc)
341938c8a9a5SSteve French goto fail;
342038c8a9a5SSteve French
342138c8a9a5SSteve French if (wsize < wdata->bytes) {
342238c8a9a5SSteve French add_credits_and_wake_if(server, &credits, 0);
342338c8a9a5SSteve French msleep(1000);
342438c8a9a5SSteve French }
342538c8a9a5SSteve French } while (wsize < wdata->bytes);
342638c8a9a5SSteve French wdata->credits = credits;
342738c8a9a5SSteve French
342838c8a9a5SSteve French rc = adjust_credits(server, &wdata->credits, wdata->bytes);
342938c8a9a5SSteve French
343038c8a9a5SSteve French if (!rc) {
343138c8a9a5SSteve French if (wdata->cfile->invalidHandle)
343238c8a9a5SSteve French rc = -EAGAIN;
343338c8a9a5SSteve French else {
3434cdd7870aSShyam Prasad N wdata->replay = true;
343538c8a9a5SSteve French #ifdef CONFIG_CIFS_SMB_DIRECT
343638c8a9a5SSteve French if (wdata->mr) {
343738c8a9a5SSteve French wdata->mr->need_invalidate = true;
343838c8a9a5SSteve French smbd_deregister_mr(wdata->mr);
343938c8a9a5SSteve French wdata->mr = NULL;
344038c8a9a5SSteve French }
344138c8a9a5SSteve French #endif
344238c8a9a5SSteve French rc = server->ops->async_writev(wdata,
344338c8a9a5SSteve French cifs_uncached_writedata_release);
344438c8a9a5SSteve French }
344538c8a9a5SSteve French }
344638c8a9a5SSteve French
344738c8a9a5SSteve French /* If the write was successfully sent, we are done */
344838c8a9a5SSteve French if (!rc) {
344938c8a9a5SSteve French list_add_tail(&wdata->list, wdata_list);
345038c8a9a5SSteve French return 0;
345138c8a9a5SSteve French }
345238c8a9a5SSteve French
345338c8a9a5SSteve French /* Roll back credits and retry if needed */
345438c8a9a5SSteve French add_credits_and_wake_if(server, &wdata->credits, 0);
345538c8a9a5SSteve French } while (rc == -EAGAIN);
345638c8a9a5SSteve French
345738c8a9a5SSteve French fail:
345838c8a9a5SSteve French kref_put(&wdata->refcount, cifs_uncached_writedata_release);
345938c8a9a5SSteve French return rc;
346038c8a9a5SSteve French }
346138c8a9a5SSteve French
346238c8a9a5SSteve French /*
346338c8a9a5SSteve French * Select span of a bvec iterator we're going to use. Limit it by both maximum
346438c8a9a5SSteve French * size and maximum number of segments.
346538c8a9a5SSteve French */
cifs_limit_bvec_subset(const struct iov_iter * iter,size_t max_size,size_t max_segs,unsigned int * _nsegs)346638c8a9a5SSteve French static size_t cifs_limit_bvec_subset(const struct iov_iter *iter, size_t max_size,
346738c8a9a5SSteve French size_t max_segs, unsigned int *_nsegs)
346838c8a9a5SSteve French {
346938c8a9a5SSteve French const struct bio_vec *bvecs = iter->bvec;
347038c8a9a5SSteve French unsigned int nbv = iter->nr_segs, ix = 0, nsegs = 0;
347138c8a9a5SSteve French size_t len, span = 0, n = iter->count;
347238c8a9a5SSteve French size_t skip = iter->iov_offset;
347338c8a9a5SSteve French
347438c8a9a5SSteve French if (WARN_ON(!iov_iter_is_bvec(iter)) || n == 0)
347538c8a9a5SSteve French return 0;
347638c8a9a5SSteve French
347738c8a9a5SSteve French while (n && ix < nbv && skip) {
347838c8a9a5SSteve French len = bvecs[ix].bv_len;
347938c8a9a5SSteve French if (skip < len)
348038c8a9a5SSteve French break;
348138c8a9a5SSteve French skip -= len;
348238c8a9a5SSteve French n -= len;
348338c8a9a5SSteve French ix++;
348438c8a9a5SSteve French }
348538c8a9a5SSteve French
348638c8a9a5SSteve French while (n && ix < nbv) {
348738c8a9a5SSteve French len = min3(n, bvecs[ix].bv_len - skip, max_size);
348838c8a9a5SSteve French span += len;
348938c8a9a5SSteve French max_size -= len;
349038c8a9a5SSteve French nsegs++;
349138c8a9a5SSteve French ix++;
349238c8a9a5SSteve French if (max_size == 0 || nsegs >= max_segs)
349338c8a9a5SSteve French break;
349438c8a9a5SSteve French skip = 0;
349538c8a9a5SSteve French n -= len;
349638c8a9a5SSteve French }
349738c8a9a5SSteve French
349838c8a9a5SSteve French *_nsegs = nsegs;
349938c8a9a5SSteve French return span;
350038c8a9a5SSteve French }
350138c8a9a5SSteve French
350238c8a9a5SSteve French static int
cifs_write_from_iter(loff_t fpos,size_t len,struct iov_iter * from,struct cifsFileInfo * open_file,struct cifs_sb_info * cifs_sb,struct list_head * wdata_list,struct cifs_aio_ctx * ctx)350338c8a9a5SSteve French cifs_write_from_iter(loff_t fpos, size_t len, struct iov_iter *from,
350438c8a9a5SSteve French struct cifsFileInfo *open_file,
350538c8a9a5SSteve French struct cifs_sb_info *cifs_sb, struct list_head *wdata_list,
350638c8a9a5SSteve French struct cifs_aio_ctx *ctx)
350738c8a9a5SSteve French {
350838c8a9a5SSteve French int rc = 0;
350938c8a9a5SSteve French size_t cur_len, max_len;
351038c8a9a5SSteve French struct cifs_writedata *wdata;
351138c8a9a5SSteve French pid_t pid;
351238c8a9a5SSteve French struct TCP_Server_Info *server;
351338c8a9a5SSteve French unsigned int xid, max_segs = INT_MAX;
351438c8a9a5SSteve French
351538c8a9a5SSteve French if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
351638c8a9a5SSteve French pid = open_file->pid;
351738c8a9a5SSteve French else
351838c8a9a5SSteve French pid = current->tgid;
351938c8a9a5SSteve French
352038c8a9a5SSteve French server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
352138c8a9a5SSteve French xid = get_xid();
352238c8a9a5SSteve French
352338c8a9a5SSteve French #ifdef CONFIG_CIFS_SMB_DIRECT
352438c8a9a5SSteve French if (server->smbd_conn)
352538c8a9a5SSteve French max_segs = server->smbd_conn->max_frmr_depth;
352638c8a9a5SSteve French #endif
352738c8a9a5SSteve French
352838c8a9a5SSteve French do {
352938c8a9a5SSteve French struct cifs_credits credits_on_stack;
353038c8a9a5SSteve French struct cifs_credits *credits = &credits_on_stack;
353138c8a9a5SSteve French unsigned int wsize, nsegs = 0;
353238c8a9a5SSteve French
353338c8a9a5SSteve French if (signal_pending(current)) {
353438c8a9a5SSteve French rc = -EINTR;
353538c8a9a5SSteve French break;
353638c8a9a5SSteve French }
353738c8a9a5SSteve French
353838c8a9a5SSteve French if (open_file->invalidHandle) {
353938c8a9a5SSteve French rc = cifs_reopen_file(open_file, false);
354038c8a9a5SSteve French if (rc == -EAGAIN)
354138c8a9a5SSteve French continue;
354238c8a9a5SSteve French else if (rc)
354338c8a9a5SSteve French break;
354438c8a9a5SSteve French }
354538c8a9a5SSteve French
354638c8a9a5SSteve French rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->wsize,
354738c8a9a5SSteve French &wsize, credits);
354838c8a9a5SSteve French if (rc)
354938c8a9a5SSteve French break;
355038c8a9a5SSteve French
355138c8a9a5SSteve French max_len = min_t(const size_t, len, wsize);
355238c8a9a5SSteve French if (!max_len) {
355338c8a9a5SSteve French rc = -EAGAIN;
355438c8a9a5SSteve French add_credits_and_wake_if(server, credits, 0);
355538c8a9a5SSteve French break;
355638c8a9a5SSteve French }
355738c8a9a5SSteve French
355838c8a9a5SSteve French cur_len = cifs_limit_bvec_subset(from, max_len, max_segs, &nsegs);
355938c8a9a5SSteve French cifs_dbg(FYI, "write_from_iter len=%zx/%zx nsegs=%u/%lu/%u\n",
356038c8a9a5SSteve French cur_len, max_len, nsegs, from->nr_segs, max_segs);
356138c8a9a5SSteve French if (cur_len == 0) {
356238c8a9a5SSteve French rc = -EIO;
356338c8a9a5SSteve French add_credits_and_wake_if(server, credits, 0);
356438c8a9a5SSteve French break;
356538c8a9a5SSteve French }
356638c8a9a5SSteve French
356738c8a9a5SSteve French wdata = cifs_writedata_alloc(cifs_uncached_writev_complete);
356838c8a9a5SSteve French if (!wdata) {
356938c8a9a5SSteve French rc = -ENOMEM;
357038c8a9a5SSteve French add_credits_and_wake_if(server, credits, 0);
357138c8a9a5SSteve French break;
357238c8a9a5SSteve French }
357338c8a9a5SSteve French
357438c8a9a5SSteve French wdata->sync_mode = WB_SYNC_ALL;
357538c8a9a5SSteve French wdata->offset = (__u64)fpos;
357638c8a9a5SSteve French wdata->cfile = cifsFileInfo_get(open_file);
357738c8a9a5SSteve French wdata->server = server;
357838c8a9a5SSteve French wdata->pid = pid;
357938c8a9a5SSteve French wdata->bytes = cur_len;
358038c8a9a5SSteve French wdata->credits = credits_on_stack;
358138c8a9a5SSteve French wdata->iter = *from;
358238c8a9a5SSteve French wdata->ctx = ctx;
358338c8a9a5SSteve French kref_get(&ctx->refcount);
358438c8a9a5SSteve French
358538c8a9a5SSteve French iov_iter_truncate(&wdata->iter, cur_len);
358638c8a9a5SSteve French
358738c8a9a5SSteve French rc = adjust_credits(server, &wdata->credits, wdata->bytes);
358838c8a9a5SSteve French
358938c8a9a5SSteve French if (!rc) {
359038c8a9a5SSteve French if (wdata->cfile->invalidHandle)
359138c8a9a5SSteve French rc = -EAGAIN;
359238c8a9a5SSteve French else
359338c8a9a5SSteve French rc = server->ops->async_writev(wdata,
359438c8a9a5SSteve French cifs_uncached_writedata_release);
359538c8a9a5SSteve French }
359638c8a9a5SSteve French
359738c8a9a5SSteve French if (rc) {
359838c8a9a5SSteve French add_credits_and_wake_if(server, &wdata->credits, 0);
359938c8a9a5SSteve French kref_put(&wdata->refcount,
360038c8a9a5SSteve French cifs_uncached_writedata_release);
360138c8a9a5SSteve French if (rc == -EAGAIN)
360238c8a9a5SSteve French continue;
360338c8a9a5SSteve French break;
360438c8a9a5SSteve French }
360538c8a9a5SSteve French
360638c8a9a5SSteve French list_add_tail(&wdata->list, wdata_list);
360738c8a9a5SSteve French iov_iter_advance(from, cur_len);
360838c8a9a5SSteve French fpos += cur_len;
360938c8a9a5SSteve French len -= cur_len;
361038c8a9a5SSteve French } while (len > 0);
361138c8a9a5SSteve French
361238c8a9a5SSteve French free_xid(xid);
361338c8a9a5SSteve French return rc;
361438c8a9a5SSteve French }
361538c8a9a5SSteve French
collect_uncached_write_data(struct cifs_aio_ctx * ctx)361638c8a9a5SSteve French static void collect_uncached_write_data(struct cifs_aio_ctx *ctx)
361738c8a9a5SSteve French {
361838c8a9a5SSteve French struct cifs_writedata *wdata, *tmp;
361938c8a9a5SSteve French struct cifs_tcon *tcon;
362038c8a9a5SSteve French struct cifs_sb_info *cifs_sb;
362138c8a9a5SSteve French struct dentry *dentry = ctx->cfile->dentry;
362238c8a9a5SSteve French ssize_t rc;
362338c8a9a5SSteve French
362438c8a9a5SSteve French tcon = tlink_tcon(ctx->cfile->tlink);
362538c8a9a5SSteve French cifs_sb = CIFS_SB(dentry->d_sb);
362638c8a9a5SSteve French
362738c8a9a5SSteve French mutex_lock(&ctx->aio_mutex);
362838c8a9a5SSteve French
362938c8a9a5SSteve French if (list_empty(&ctx->list)) {
363038c8a9a5SSteve French mutex_unlock(&ctx->aio_mutex);
363138c8a9a5SSteve French return;
363238c8a9a5SSteve French }
363338c8a9a5SSteve French
363438c8a9a5SSteve French rc = ctx->rc;
363538c8a9a5SSteve French /*
363638c8a9a5SSteve French * Wait for and collect replies for any successful sends in order of
363738c8a9a5SSteve French * increasing offset. Once an error is hit, then return without waiting
363838c8a9a5SSteve French * for any more replies.
363938c8a9a5SSteve French */
364038c8a9a5SSteve French restart_loop:
364138c8a9a5SSteve French list_for_each_entry_safe(wdata, tmp, &ctx->list, list) {
364238c8a9a5SSteve French if (!rc) {
364338c8a9a5SSteve French if (!try_wait_for_completion(&wdata->done)) {
364438c8a9a5SSteve French mutex_unlock(&ctx->aio_mutex);
364538c8a9a5SSteve French return;
364638c8a9a5SSteve French }
364738c8a9a5SSteve French
364838c8a9a5SSteve French if (wdata->result)
364938c8a9a5SSteve French rc = wdata->result;
365038c8a9a5SSteve French else
365138c8a9a5SSteve French ctx->total_len += wdata->bytes;
365238c8a9a5SSteve French
365338c8a9a5SSteve French /* resend call if it's a retryable error */
365438c8a9a5SSteve French if (rc == -EAGAIN) {
365538c8a9a5SSteve French struct list_head tmp_list;
365638c8a9a5SSteve French struct iov_iter tmp_from = ctx->iter;
365738c8a9a5SSteve French
365838c8a9a5SSteve French INIT_LIST_HEAD(&tmp_list);
365938c8a9a5SSteve French list_del_init(&wdata->list);
366038c8a9a5SSteve French
366138c8a9a5SSteve French if (ctx->direct_io)
366238c8a9a5SSteve French rc = cifs_resend_wdata(
366338c8a9a5SSteve French wdata, &tmp_list, ctx);
366438c8a9a5SSteve French else {
366538c8a9a5SSteve French iov_iter_advance(&tmp_from,
366638c8a9a5SSteve French wdata->offset - ctx->pos);
366738c8a9a5SSteve French
366838c8a9a5SSteve French rc = cifs_write_from_iter(wdata->offset,
366938c8a9a5SSteve French wdata->bytes, &tmp_from,
367038c8a9a5SSteve French ctx->cfile, cifs_sb, &tmp_list,
367138c8a9a5SSteve French ctx);
367238c8a9a5SSteve French
367338c8a9a5SSteve French kref_put(&wdata->refcount,
367438c8a9a5SSteve French cifs_uncached_writedata_release);
367538c8a9a5SSteve French }
367638c8a9a5SSteve French
367738c8a9a5SSteve French list_splice(&tmp_list, &ctx->list);
367838c8a9a5SSteve French goto restart_loop;
367938c8a9a5SSteve French }
368038c8a9a5SSteve French }
368138c8a9a5SSteve French list_del_init(&wdata->list);
368238c8a9a5SSteve French kref_put(&wdata->refcount, cifs_uncached_writedata_release);
368338c8a9a5SSteve French }
368438c8a9a5SSteve French
368538c8a9a5SSteve French cifs_stats_bytes_written(tcon, ctx->total_len);
368638c8a9a5SSteve French set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags);
368738c8a9a5SSteve French
368838c8a9a5SSteve French ctx->rc = (rc == 0) ? ctx->total_len : rc;
368938c8a9a5SSteve French
369038c8a9a5SSteve French mutex_unlock(&ctx->aio_mutex);
369138c8a9a5SSteve French
369238c8a9a5SSteve French if (ctx->iocb && ctx->iocb->ki_complete)
369338c8a9a5SSteve French ctx->iocb->ki_complete(ctx->iocb, ctx->rc);
369438c8a9a5SSteve French else
369538c8a9a5SSteve French complete(&ctx->done);
369638c8a9a5SSteve French }
369738c8a9a5SSteve French
__cifs_writev(struct kiocb * iocb,struct iov_iter * from,bool direct)369838c8a9a5SSteve French static ssize_t __cifs_writev(
369938c8a9a5SSteve French struct kiocb *iocb, struct iov_iter *from, bool direct)
370038c8a9a5SSteve French {
370138c8a9a5SSteve French struct file *file = iocb->ki_filp;
370238c8a9a5SSteve French ssize_t total_written = 0;
370338c8a9a5SSteve French struct cifsFileInfo *cfile;
370438c8a9a5SSteve French struct cifs_tcon *tcon;
370538c8a9a5SSteve French struct cifs_sb_info *cifs_sb;
370638c8a9a5SSteve French struct cifs_aio_ctx *ctx;
370738c8a9a5SSteve French int rc;
370838c8a9a5SSteve French
370938c8a9a5SSteve French rc = generic_write_checks(iocb, from);
371038c8a9a5SSteve French if (rc <= 0)
371138c8a9a5SSteve French return rc;
371238c8a9a5SSteve French
371338c8a9a5SSteve French cifs_sb = CIFS_FILE_SB(file);
371438c8a9a5SSteve French cfile = file->private_data;
371538c8a9a5SSteve French tcon = tlink_tcon(cfile->tlink);
371638c8a9a5SSteve French
371738c8a9a5SSteve French if (!tcon->ses->server->ops->async_writev)
371838c8a9a5SSteve French return -ENOSYS;
371938c8a9a5SSteve French
372038c8a9a5SSteve French ctx = cifs_aio_ctx_alloc();
372138c8a9a5SSteve French if (!ctx)
372238c8a9a5SSteve French return -ENOMEM;
372338c8a9a5SSteve French
372438c8a9a5SSteve French ctx->cfile = cifsFileInfo_get(cfile);
372538c8a9a5SSteve French
372638c8a9a5SSteve French if (!is_sync_kiocb(iocb))
372738c8a9a5SSteve French ctx->iocb = iocb;
372838c8a9a5SSteve French
372938c8a9a5SSteve French ctx->pos = iocb->ki_pos;
373038c8a9a5SSteve French ctx->direct_io = direct;
373138c8a9a5SSteve French ctx->nr_pinned_pages = 0;
373238c8a9a5SSteve French
373338c8a9a5SSteve French if (user_backed_iter(from)) {
373438c8a9a5SSteve French /*
373538c8a9a5SSteve French * Extract IOVEC/UBUF-type iterators to a BVEC-type iterator as
373638c8a9a5SSteve French * they contain references to the calling process's virtual
373738c8a9a5SSteve French * memory layout which won't be available in an async worker
373838c8a9a5SSteve French * thread. This also takes a pin on every folio involved.
373938c8a9a5SSteve French */
374038c8a9a5SSteve French rc = netfs_extract_user_iter(from, iov_iter_count(from),
374138c8a9a5SSteve French &ctx->iter, 0);
374238c8a9a5SSteve French if (rc < 0) {
374338c8a9a5SSteve French kref_put(&ctx->refcount, cifs_aio_ctx_release);
374438c8a9a5SSteve French return rc;
374538c8a9a5SSteve French }
374638c8a9a5SSteve French
374738c8a9a5SSteve French ctx->nr_pinned_pages = rc;
374838c8a9a5SSteve French ctx->bv = (void *)ctx->iter.bvec;
374938c8a9a5SSteve French ctx->bv_need_unpin = iov_iter_extract_will_pin(from);
375038c8a9a5SSteve French } else if ((iov_iter_is_bvec(from) || iov_iter_is_kvec(from)) &&
375138c8a9a5SSteve French !is_sync_kiocb(iocb)) {
375238c8a9a5SSteve French /*
375338c8a9a5SSteve French * If the op is asynchronous, we need to copy the list attached
375438c8a9a5SSteve French * to a BVEC/KVEC-type iterator, but we assume that the storage
375538c8a9a5SSteve French * will be pinned by the caller; in any case, we may or may not
375638c8a9a5SSteve French * be able to pin the pages, so we don't try.
375738c8a9a5SSteve French */
375838c8a9a5SSteve French ctx->bv = (void *)dup_iter(&ctx->iter, from, GFP_KERNEL);
375938c8a9a5SSteve French if (!ctx->bv) {
376038c8a9a5SSteve French kref_put(&ctx->refcount, cifs_aio_ctx_release);
376138c8a9a5SSteve French return -ENOMEM;
376238c8a9a5SSteve French }
376338c8a9a5SSteve French } else {
376438c8a9a5SSteve French /*
376538c8a9a5SSteve French * Otherwise, we just pass the iterator down as-is and rely on
376638c8a9a5SSteve French * the caller to make sure the pages referred to by the
376738c8a9a5SSteve French * iterator don't evaporate.
376838c8a9a5SSteve French */
376938c8a9a5SSteve French ctx->iter = *from;
377038c8a9a5SSteve French }
377138c8a9a5SSteve French
377238c8a9a5SSteve French ctx->len = iov_iter_count(&ctx->iter);
377338c8a9a5SSteve French
377438c8a9a5SSteve French /* grab a lock here due to read response handlers can access ctx */
377538c8a9a5SSteve French mutex_lock(&ctx->aio_mutex);
377638c8a9a5SSteve French
377738c8a9a5SSteve French rc = cifs_write_from_iter(iocb->ki_pos, ctx->len, &ctx->iter,
377838c8a9a5SSteve French cfile, cifs_sb, &ctx->list, ctx);
377938c8a9a5SSteve French
378038c8a9a5SSteve French /*
378138c8a9a5SSteve French * If at least one write was successfully sent, then discard any rc
378238c8a9a5SSteve French * value from the later writes. If the other write succeeds, then
378338c8a9a5SSteve French * we'll end up returning whatever was written. If it fails, then
378438c8a9a5SSteve French * we'll get a new rc value from that.
378538c8a9a5SSteve French */
378638c8a9a5SSteve French if (!list_empty(&ctx->list))
378738c8a9a5SSteve French rc = 0;
378838c8a9a5SSteve French
378938c8a9a5SSteve French mutex_unlock(&ctx->aio_mutex);
379038c8a9a5SSteve French
379138c8a9a5SSteve French if (rc) {
379238c8a9a5SSteve French kref_put(&ctx->refcount, cifs_aio_ctx_release);
379338c8a9a5SSteve French return rc;
379438c8a9a5SSteve French }
379538c8a9a5SSteve French
379638c8a9a5SSteve French if (!is_sync_kiocb(iocb)) {
379738c8a9a5SSteve French kref_put(&ctx->refcount, cifs_aio_ctx_release);
379838c8a9a5SSteve French return -EIOCBQUEUED;
379938c8a9a5SSteve French }
380038c8a9a5SSteve French
380138c8a9a5SSteve French rc = wait_for_completion_killable(&ctx->done);
380238c8a9a5SSteve French if (rc) {
380338c8a9a5SSteve French mutex_lock(&ctx->aio_mutex);
380438c8a9a5SSteve French ctx->rc = rc = -EINTR;
380538c8a9a5SSteve French total_written = ctx->total_len;
380638c8a9a5SSteve French mutex_unlock(&ctx->aio_mutex);
380738c8a9a5SSteve French } else {
380838c8a9a5SSteve French rc = ctx->rc;
380938c8a9a5SSteve French total_written = ctx->total_len;
381038c8a9a5SSteve French }
381138c8a9a5SSteve French
381238c8a9a5SSteve French kref_put(&ctx->refcount, cifs_aio_ctx_release);
381338c8a9a5SSteve French
381438c8a9a5SSteve French if (unlikely(!total_written))
381538c8a9a5SSteve French return rc;
381638c8a9a5SSteve French
381738c8a9a5SSteve French iocb->ki_pos += total_written;
381838c8a9a5SSteve French return total_written;
381938c8a9a5SSteve French }
382038c8a9a5SSteve French
cifs_direct_writev(struct kiocb * iocb,struct iov_iter * from)382138c8a9a5SSteve French ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from)
382238c8a9a5SSteve French {
382338c8a9a5SSteve French struct file *file = iocb->ki_filp;
382438c8a9a5SSteve French
382538c8a9a5SSteve French cifs_revalidate_mapping(file->f_inode);
382638c8a9a5SSteve French return __cifs_writev(iocb, from, true);
382738c8a9a5SSteve French }
382838c8a9a5SSteve French
cifs_user_writev(struct kiocb * iocb,struct iov_iter * from)382938c8a9a5SSteve French ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
383038c8a9a5SSteve French {
383138c8a9a5SSteve French return __cifs_writev(iocb, from, false);
383238c8a9a5SSteve French }
383338c8a9a5SSteve French
383438c8a9a5SSteve French static ssize_t
cifs_writev(struct kiocb * iocb,struct iov_iter * from)383538c8a9a5SSteve French cifs_writev(struct kiocb *iocb, struct iov_iter *from)
383638c8a9a5SSteve French {
383738c8a9a5SSteve French struct file *file = iocb->ki_filp;
383838c8a9a5SSteve French struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
383938c8a9a5SSteve French struct inode *inode = file->f_mapping->host;
384038c8a9a5SSteve French struct cifsInodeInfo *cinode = CIFS_I(inode);
384138c8a9a5SSteve French struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
384238c8a9a5SSteve French ssize_t rc;
384338c8a9a5SSteve French
384438c8a9a5SSteve French inode_lock(inode);
384538c8a9a5SSteve French /*
384638c8a9a5SSteve French * We need to hold the sem to be sure nobody modifies lock list
384738c8a9a5SSteve French * with a brlock that prevents writing.
384838c8a9a5SSteve French */
384938c8a9a5SSteve French down_read(&cinode->lock_sem);
385038c8a9a5SSteve French
385138c8a9a5SSteve French rc = generic_write_checks(iocb, from);
385238c8a9a5SSteve French if (rc <= 0)
385338c8a9a5SSteve French goto out;
385438c8a9a5SSteve French
385538c8a9a5SSteve French if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
385638c8a9a5SSteve French server->vals->exclusive_lock_type, 0,
385738c8a9a5SSteve French NULL, CIFS_WRITE_OP))
385838c8a9a5SSteve French rc = __generic_file_write_iter(iocb, from);
385938c8a9a5SSteve French else
386038c8a9a5SSteve French rc = -EACCES;
386138c8a9a5SSteve French out:
386238c8a9a5SSteve French up_read(&cinode->lock_sem);
386338c8a9a5SSteve French inode_unlock(inode);
386438c8a9a5SSteve French
386538c8a9a5SSteve French if (rc > 0)
386638c8a9a5SSteve French rc = generic_write_sync(iocb, rc);
386738c8a9a5SSteve French return rc;
386838c8a9a5SSteve French }
386938c8a9a5SSteve French
387038c8a9a5SSteve French ssize_t
cifs_strict_writev(struct kiocb * iocb,struct iov_iter * from)387138c8a9a5SSteve French cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
387238c8a9a5SSteve French {
387338c8a9a5SSteve French struct inode *inode = file_inode(iocb->ki_filp);
387438c8a9a5SSteve French struct cifsInodeInfo *cinode = CIFS_I(inode);
387538c8a9a5SSteve French struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
387638c8a9a5SSteve French struct cifsFileInfo *cfile = (struct cifsFileInfo *)
387738c8a9a5SSteve French iocb->ki_filp->private_data;
387838c8a9a5SSteve French struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
387938c8a9a5SSteve French ssize_t written;
388038c8a9a5SSteve French
388138c8a9a5SSteve French written = cifs_get_writer(cinode);
388238c8a9a5SSteve French if (written)
388338c8a9a5SSteve French return written;
388438c8a9a5SSteve French
388538c8a9a5SSteve French if (CIFS_CACHE_WRITE(cinode)) {
388638c8a9a5SSteve French if (cap_unix(tcon->ses) &&
388738c8a9a5SSteve French (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
388838c8a9a5SSteve French && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
388938c8a9a5SSteve French written = generic_file_write_iter(iocb, from);
389038c8a9a5SSteve French goto out;
389138c8a9a5SSteve French }
389238c8a9a5SSteve French written = cifs_writev(iocb, from);
389338c8a9a5SSteve French goto out;
389438c8a9a5SSteve French }
389538c8a9a5SSteve French /*
389638c8a9a5SSteve French * For non-oplocked files in strict cache mode we need to write the data
389738c8a9a5SSteve French * to the server exactly from the pos to pos+len-1 rather than flush all
389838c8a9a5SSteve French * affected pages because it may cause a error with mandatory locks on
389938c8a9a5SSteve French * these pages but not on the region from pos to ppos+len-1.
390038c8a9a5SSteve French */
390138c8a9a5SSteve French written = cifs_user_writev(iocb, from);
390238c8a9a5SSteve French if (CIFS_CACHE_READ(cinode)) {
390338c8a9a5SSteve French /*
390438c8a9a5SSteve French * We have read level caching and we have just sent a write
390538c8a9a5SSteve French * request to the server thus making data in the cache stale.
390638c8a9a5SSteve French * Zap the cache and set oplock/lease level to NONE to avoid
390738c8a9a5SSteve French * reading stale data from the cache. All subsequent read
390838c8a9a5SSteve French * operations will read new data from the server.
390938c8a9a5SSteve French */
391038c8a9a5SSteve French cifs_zap_mapping(inode);
391138c8a9a5SSteve French cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
391238c8a9a5SSteve French inode);
391338c8a9a5SSteve French cinode->oplock = 0;
391438c8a9a5SSteve French }
391538c8a9a5SSteve French out:
391638c8a9a5SSteve French cifs_put_writer(cinode);
391738c8a9a5SSteve French return written;
391838c8a9a5SSteve French }
391938c8a9a5SSteve French
cifs_readdata_alloc(work_func_t complete)392038c8a9a5SSteve French static struct cifs_readdata *cifs_readdata_alloc(work_func_t complete)
392138c8a9a5SSteve French {
392238c8a9a5SSteve French struct cifs_readdata *rdata;
392338c8a9a5SSteve French
392438c8a9a5SSteve French rdata = kzalloc(sizeof(*rdata), GFP_KERNEL);
392538c8a9a5SSteve French if (rdata) {
392638c8a9a5SSteve French kref_init(&rdata->refcount);
392738c8a9a5SSteve French INIT_LIST_HEAD(&rdata->list);
392838c8a9a5SSteve French init_completion(&rdata->done);
392938c8a9a5SSteve French INIT_WORK(&rdata->work, complete);
393038c8a9a5SSteve French }
393138c8a9a5SSteve French
393238c8a9a5SSteve French return rdata;
393338c8a9a5SSteve French }
393438c8a9a5SSteve French
393538c8a9a5SSteve French void
cifs_readdata_release(struct kref * refcount)393638c8a9a5SSteve French cifs_readdata_release(struct kref *refcount)
393738c8a9a5SSteve French {
393838c8a9a5SSteve French struct cifs_readdata *rdata = container_of(refcount,
393938c8a9a5SSteve French struct cifs_readdata, refcount);
394038c8a9a5SSteve French
394138c8a9a5SSteve French if (rdata->ctx)
394238c8a9a5SSteve French kref_put(&rdata->ctx->refcount, cifs_aio_ctx_release);
394338c8a9a5SSteve French #ifdef CONFIG_CIFS_SMB_DIRECT
394438c8a9a5SSteve French if (rdata->mr) {
394538c8a9a5SSteve French smbd_deregister_mr(rdata->mr);
394638c8a9a5SSteve French rdata->mr = NULL;
394738c8a9a5SSteve French }
394838c8a9a5SSteve French #endif
394938c8a9a5SSteve French if (rdata->cfile)
395038c8a9a5SSteve French cifsFileInfo_put(rdata->cfile);
395138c8a9a5SSteve French
395238c8a9a5SSteve French kfree(rdata);
395338c8a9a5SSteve French }
395438c8a9a5SSteve French
395538c8a9a5SSteve French static void collect_uncached_read_data(struct cifs_aio_ctx *ctx);
395638c8a9a5SSteve French
395738c8a9a5SSteve French static void
cifs_uncached_readv_complete(struct work_struct * work)395838c8a9a5SSteve French cifs_uncached_readv_complete(struct work_struct *work)
395938c8a9a5SSteve French {
396038c8a9a5SSteve French struct cifs_readdata *rdata = container_of(work,
396138c8a9a5SSteve French struct cifs_readdata, work);
396238c8a9a5SSteve French
396338c8a9a5SSteve French complete(&rdata->done);
396438c8a9a5SSteve French collect_uncached_read_data(rdata->ctx);
396538c8a9a5SSteve French /* the below call can possibly free the last ref to aio ctx */
396638c8a9a5SSteve French kref_put(&rdata->refcount, cifs_readdata_release);
396738c8a9a5SSteve French }
396838c8a9a5SSteve French
cifs_resend_rdata(struct cifs_readdata * rdata,struct list_head * rdata_list,struct cifs_aio_ctx * ctx)396938c8a9a5SSteve French static int cifs_resend_rdata(struct cifs_readdata *rdata,
397038c8a9a5SSteve French struct list_head *rdata_list,
397138c8a9a5SSteve French struct cifs_aio_ctx *ctx)
397238c8a9a5SSteve French {
397338c8a9a5SSteve French unsigned int rsize;
397438c8a9a5SSteve French struct cifs_credits credits;
397538c8a9a5SSteve French int rc;
397638c8a9a5SSteve French struct TCP_Server_Info *server;
397738c8a9a5SSteve French
397838c8a9a5SSteve French /* XXX: should we pick a new channel here? */
397938c8a9a5SSteve French server = rdata->server;
398038c8a9a5SSteve French
398138c8a9a5SSteve French do {
398238c8a9a5SSteve French if (rdata->cfile->invalidHandle) {
398338c8a9a5SSteve French rc = cifs_reopen_file(rdata->cfile, true);
398438c8a9a5SSteve French if (rc == -EAGAIN)
398538c8a9a5SSteve French continue;
398638c8a9a5SSteve French else if (rc)
398738c8a9a5SSteve French break;
398838c8a9a5SSteve French }
398938c8a9a5SSteve French
399038c8a9a5SSteve French /*
399138c8a9a5SSteve French * Wait for credits to resend this rdata.
399238c8a9a5SSteve French * Note: we are attempting to resend the whole rdata not in
399338c8a9a5SSteve French * segments
399438c8a9a5SSteve French */
399538c8a9a5SSteve French do {
399638c8a9a5SSteve French rc = server->ops->wait_mtu_credits(server, rdata->bytes,
399738c8a9a5SSteve French &rsize, &credits);
399838c8a9a5SSteve French
399938c8a9a5SSteve French if (rc)
400038c8a9a5SSteve French goto fail;
400138c8a9a5SSteve French
400238c8a9a5SSteve French if (rsize < rdata->bytes) {
400338c8a9a5SSteve French add_credits_and_wake_if(server, &credits, 0);
400438c8a9a5SSteve French msleep(1000);
400538c8a9a5SSteve French }
400638c8a9a5SSteve French } while (rsize < rdata->bytes);
400738c8a9a5SSteve French rdata->credits = credits;
400838c8a9a5SSteve French
400938c8a9a5SSteve French rc = adjust_credits(server, &rdata->credits, rdata->bytes);
401038c8a9a5SSteve French if (!rc) {
401138c8a9a5SSteve French if (rdata->cfile->invalidHandle)
401238c8a9a5SSteve French rc = -EAGAIN;
401338c8a9a5SSteve French else {
401438c8a9a5SSteve French #ifdef CONFIG_CIFS_SMB_DIRECT
401538c8a9a5SSteve French if (rdata->mr) {
401638c8a9a5SSteve French rdata->mr->need_invalidate = true;
401738c8a9a5SSteve French smbd_deregister_mr(rdata->mr);
401838c8a9a5SSteve French rdata->mr = NULL;
401938c8a9a5SSteve French }
402038c8a9a5SSteve French #endif
402138c8a9a5SSteve French rc = server->ops->async_readv(rdata);
402238c8a9a5SSteve French }
402338c8a9a5SSteve French }
402438c8a9a5SSteve French
402538c8a9a5SSteve French /* If the read was successfully sent, we are done */
402638c8a9a5SSteve French if (!rc) {
402738c8a9a5SSteve French /* Add to aio pending list */
402838c8a9a5SSteve French list_add_tail(&rdata->list, rdata_list);
402938c8a9a5SSteve French return 0;
403038c8a9a5SSteve French }
403138c8a9a5SSteve French
403238c8a9a5SSteve French /* Roll back credits and retry if needed */
403338c8a9a5SSteve French add_credits_and_wake_if(server, &rdata->credits, 0);
403438c8a9a5SSteve French } while (rc == -EAGAIN);
403538c8a9a5SSteve French
403638c8a9a5SSteve French fail:
403738c8a9a5SSteve French kref_put(&rdata->refcount, cifs_readdata_release);
403838c8a9a5SSteve French return rc;
403938c8a9a5SSteve French }
404038c8a9a5SSteve French
404138c8a9a5SSteve French static int
cifs_send_async_read(loff_t fpos,size_t len,struct cifsFileInfo * open_file,struct cifs_sb_info * cifs_sb,struct list_head * rdata_list,struct cifs_aio_ctx * ctx)404238c8a9a5SSteve French cifs_send_async_read(loff_t fpos, size_t len, struct cifsFileInfo *open_file,
404338c8a9a5SSteve French struct cifs_sb_info *cifs_sb, struct list_head *rdata_list,
404438c8a9a5SSteve French struct cifs_aio_ctx *ctx)
404538c8a9a5SSteve French {
404638c8a9a5SSteve French struct cifs_readdata *rdata;
404738c8a9a5SSteve French unsigned int rsize, nsegs, max_segs = INT_MAX;
404838c8a9a5SSteve French struct cifs_credits credits_on_stack;
404938c8a9a5SSteve French struct cifs_credits *credits = &credits_on_stack;
405038c8a9a5SSteve French size_t cur_len, max_len;
405138c8a9a5SSteve French int rc;
405238c8a9a5SSteve French pid_t pid;
405338c8a9a5SSteve French struct TCP_Server_Info *server;
405438c8a9a5SSteve French
405538c8a9a5SSteve French server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
405638c8a9a5SSteve French
405738c8a9a5SSteve French #ifdef CONFIG_CIFS_SMB_DIRECT
405838c8a9a5SSteve French if (server->smbd_conn)
405938c8a9a5SSteve French max_segs = server->smbd_conn->max_frmr_depth;
406038c8a9a5SSteve French #endif
406138c8a9a5SSteve French
406238c8a9a5SSteve French if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
406338c8a9a5SSteve French pid = open_file->pid;
406438c8a9a5SSteve French else
406538c8a9a5SSteve French pid = current->tgid;
406638c8a9a5SSteve French
406738c8a9a5SSteve French do {
406838c8a9a5SSteve French if (open_file->invalidHandle) {
406938c8a9a5SSteve French rc = cifs_reopen_file(open_file, true);
407038c8a9a5SSteve French if (rc == -EAGAIN)
407138c8a9a5SSteve French continue;
407238c8a9a5SSteve French else if (rc)
407338c8a9a5SSteve French break;
407438c8a9a5SSteve French }
407538c8a9a5SSteve French
407638c8a9a5SSteve French if (cifs_sb->ctx->rsize == 0)
407738c8a9a5SSteve French cifs_sb->ctx->rsize =
407838c8a9a5SSteve French server->ops->negotiate_rsize(tlink_tcon(open_file->tlink),
407938c8a9a5SSteve French cifs_sb->ctx);
408038c8a9a5SSteve French
408138c8a9a5SSteve French rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
408238c8a9a5SSteve French &rsize, credits);
408338c8a9a5SSteve French if (rc)
408438c8a9a5SSteve French break;
408538c8a9a5SSteve French
408638c8a9a5SSteve French max_len = min_t(size_t, len, rsize);
408738c8a9a5SSteve French
408838c8a9a5SSteve French cur_len = cifs_limit_bvec_subset(&ctx->iter, max_len,
408938c8a9a5SSteve French max_segs, &nsegs);
409038c8a9a5SSteve French cifs_dbg(FYI, "read-to-iter len=%zx/%zx nsegs=%u/%lu/%u\n",
409138c8a9a5SSteve French cur_len, max_len, nsegs, ctx->iter.nr_segs, max_segs);
409238c8a9a5SSteve French if (cur_len == 0) {
409338c8a9a5SSteve French rc = -EIO;
409438c8a9a5SSteve French add_credits_and_wake_if(server, credits, 0);
409538c8a9a5SSteve French break;
409638c8a9a5SSteve French }
409738c8a9a5SSteve French
409838c8a9a5SSteve French rdata = cifs_readdata_alloc(cifs_uncached_readv_complete);
409938c8a9a5SSteve French if (!rdata) {
410038c8a9a5SSteve French add_credits_and_wake_if(server, credits, 0);
410138c8a9a5SSteve French rc = -ENOMEM;
410238c8a9a5SSteve French break;
410338c8a9a5SSteve French }
410438c8a9a5SSteve French
410538c8a9a5SSteve French rdata->server = server;
410638c8a9a5SSteve French rdata->cfile = cifsFileInfo_get(open_file);
410738c8a9a5SSteve French rdata->offset = fpos;
410838c8a9a5SSteve French rdata->bytes = cur_len;
410938c8a9a5SSteve French rdata->pid = pid;
411038c8a9a5SSteve French rdata->credits = credits_on_stack;
411138c8a9a5SSteve French rdata->ctx = ctx;
411238c8a9a5SSteve French kref_get(&ctx->refcount);
411338c8a9a5SSteve French
411438c8a9a5SSteve French rdata->iter = ctx->iter;
411538c8a9a5SSteve French iov_iter_truncate(&rdata->iter, cur_len);
411638c8a9a5SSteve French
411738c8a9a5SSteve French rc = adjust_credits(server, &rdata->credits, rdata->bytes);
411838c8a9a5SSteve French
411938c8a9a5SSteve French if (!rc) {
412038c8a9a5SSteve French if (rdata->cfile->invalidHandle)
412138c8a9a5SSteve French rc = -EAGAIN;
412238c8a9a5SSteve French else
412338c8a9a5SSteve French rc = server->ops->async_readv(rdata);
412438c8a9a5SSteve French }
412538c8a9a5SSteve French
412638c8a9a5SSteve French if (rc) {
412738c8a9a5SSteve French add_credits_and_wake_if(server, &rdata->credits, 0);
412838c8a9a5SSteve French kref_put(&rdata->refcount, cifs_readdata_release);
412938c8a9a5SSteve French if (rc == -EAGAIN)
413038c8a9a5SSteve French continue;
413138c8a9a5SSteve French break;
413238c8a9a5SSteve French }
413338c8a9a5SSteve French
413438c8a9a5SSteve French list_add_tail(&rdata->list, rdata_list);
413538c8a9a5SSteve French iov_iter_advance(&ctx->iter, cur_len);
413638c8a9a5SSteve French fpos += cur_len;
413738c8a9a5SSteve French len -= cur_len;
413838c8a9a5SSteve French } while (len > 0);
413938c8a9a5SSteve French
414038c8a9a5SSteve French return rc;
414138c8a9a5SSteve French }
414238c8a9a5SSteve French
414338c8a9a5SSteve French static void
collect_uncached_read_data(struct cifs_aio_ctx * ctx)414438c8a9a5SSteve French collect_uncached_read_data(struct cifs_aio_ctx *ctx)
414538c8a9a5SSteve French {
414638c8a9a5SSteve French struct cifs_readdata *rdata, *tmp;
414738c8a9a5SSteve French struct cifs_sb_info *cifs_sb;
414838c8a9a5SSteve French int rc;
414938c8a9a5SSteve French
415038c8a9a5SSteve French cifs_sb = CIFS_SB(ctx->cfile->dentry->d_sb);
415138c8a9a5SSteve French
415238c8a9a5SSteve French mutex_lock(&ctx->aio_mutex);
415338c8a9a5SSteve French
415438c8a9a5SSteve French if (list_empty(&ctx->list)) {
415538c8a9a5SSteve French mutex_unlock(&ctx->aio_mutex);
415638c8a9a5SSteve French return;
415738c8a9a5SSteve French }
415838c8a9a5SSteve French
415938c8a9a5SSteve French rc = ctx->rc;
416038c8a9a5SSteve French /* the loop below should proceed in the order of increasing offsets */
416138c8a9a5SSteve French again:
416238c8a9a5SSteve French list_for_each_entry_safe(rdata, tmp, &ctx->list, list) {
416338c8a9a5SSteve French if (!rc) {
416438c8a9a5SSteve French if (!try_wait_for_completion(&rdata->done)) {
416538c8a9a5SSteve French mutex_unlock(&ctx->aio_mutex);
416638c8a9a5SSteve French return;
416738c8a9a5SSteve French }
416838c8a9a5SSteve French
416938c8a9a5SSteve French if (rdata->result == -EAGAIN) {
417038c8a9a5SSteve French /* resend call if it's a retryable error */
417138c8a9a5SSteve French struct list_head tmp_list;
417238c8a9a5SSteve French unsigned int got_bytes = rdata->got_bytes;
417338c8a9a5SSteve French
417438c8a9a5SSteve French list_del_init(&rdata->list);
417538c8a9a5SSteve French INIT_LIST_HEAD(&tmp_list);
417638c8a9a5SSteve French
417738c8a9a5SSteve French if (ctx->direct_io) {
417838c8a9a5SSteve French /*
417938c8a9a5SSteve French * Re-use rdata as this is a
418038c8a9a5SSteve French * direct I/O
418138c8a9a5SSteve French */
418238c8a9a5SSteve French rc = cifs_resend_rdata(
418338c8a9a5SSteve French rdata,
418438c8a9a5SSteve French &tmp_list, ctx);
418538c8a9a5SSteve French } else {
418638c8a9a5SSteve French rc = cifs_send_async_read(
418738c8a9a5SSteve French rdata->offset + got_bytes,
418838c8a9a5SSteve French rdata->bytes - got_bytes,
418938c8a9a5SSteve French rdata->cfile, cifs_sb,
419038c8a9a5SSteve French &tmp_list, ctx);
419138c8a9a5SSteve French
419238c8a9a5SSteve French kref_put(&rdata->refcount,
419338c8a9a5SSteve French cifs_readdata_release);
419438c8a9a5SSteve French }
419538c8a9a5SSteve French
419638c8a9a5SSteve French list_splice(&tmp_list, &ctx->list);
419738c8a9a5SSteve French
419838c8a9a5SSteve French goto again;
419938c8a9a5SSteve French } else if (rdata->result)
420038c8a9a5SSteve French rc = rdata->result;
420138c8a9a5SSteve French
420238c8a9a5SSteve French /* if there was a short read -- discard anything left */
420338c8a9a5SSteve French if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
420438c8a9a5SSteve French rc = -ENODATA;
420538c8a9a5SSteve French
420638c8a9a5SSteve French ctx->total_len += rdata->got_bytes;
420738c8a9a5SSteve French }
420838c8a9a5SSteve French list_del_init(&rdata->list);
420938c8a9a5SSteve French kref_put(&rdata->refcount, cifs_readdata_release);
421038c8a9a5SSteve French }
421138c8a9a5SSteve French
421238c8a9a5SSteve French /* mask nodata case */
421338c8a9a5SSteve French if (rc == -ENODATA)
421438c8a9a5SSteve French rc = 0;
421538c8a9a5SSteve French
421638c8a9a5SSteve French ctx->rc = (rc == 0) ? (ssize_t)ctx->total_len : rc;
421738c8a9a5SSteve French
421838c8a9a5SSteve French mutex_unlock(&ctx->aio_mutex);
421938c8a9a5SSteve French
422038c8a9a5SSteve French if (ctx->iocb && ctx->iocb->ki_complete)
422138c8a9a5SSteve French ctx->iocb->ki_complete(ctx->iocb, ctx->rc);
422238c8a9a5SSteve French else
422338c8a9a5SSteve French complete(&ctx->done);
422438c8a9a5SSteve French }
422538c8a9a5SSteve French
__cifs_readv(struct kiocb * iocb,struct iov_iter * to,bool direct)422638c8a9a5SSteve French static ssize_t __cifs_readv(
422738c8a9a5SSteve French struct kiocb *iocb, struct iov_iter *to, bool direct)
422838c8a9a5SSteve French {
422938c8a9a5SSteve French size_t len;
423038c8a9a5SSteve French struct file *file = iocb->ki_filp;
423138c8a9a5SSteve French struct cifs_sb_info *cifs_sb;
423238c8a9a5SSteve French struct cifsFileInfo *cfile;
423338c8a9a5SSteve French struct cifs_tcon *tcon;
423438c8a9a5SSteve French ssize_t rc, total_read = 0;
423538c8a9a5SSteve French loff_t offset = iocb->ki_pos;
423638c8a9a5SSteve French struct cifs_aio_ctx *ctx;
423738c8a9a5SSteve French
423838c8a9a5SSteve French len = iov_iter_count(to);
423938c8a9a5SSteve French if (!len)
424038c8a9a5SSteve French return 0;
424138c8a9a5SSteve French
424238c8a9a5SSteve French cifs_sb = CIFS_FILE_SB(file);
424338c8a9a5SSteve French cfile = file->private_data;
424438c8a9a5SSteve French tcon = tlink_tcon(cfile->tlink);
424538c8a9a5SSteve French
424638c8a9a5SSteve French if (!tcon->ses->server->ops->async_readv)
424738c8a9a5SSteve French return -ENOSYS;
424838c8a9a5SSteve French
424938c8a9a5SSteve French if ((file->f_flags & O_ACCMODE) == O_WRONLY)
425038c8a9a5SSteve French cifs_dbg(FYI, "attempting read on write only file instance\n");
425138c8a9a5SSteve French
425238c8a9a5SSteve French ctx = cifs_aio_ctx_alloc();
425338c8a9a5SSteve French if (!ctx)
425438c8a9a5SSteve French return -ENOMEM;
425538c8a9a5SSteve French
425638c8a9a5SSteve French ctx->pos = offset;
425738c8a9a5SSteve French ctx->direct_io = direct;
425838c8a9a5SSteve French ctx->len = len;
425938c8a9a5SSteve French ctx->cfile = cifsFileInfo_get(cfile);
426038c8a9a5SSteve French ctx->nr_pinned_pages = 0;
426138c8a9a5SSteve French
426238c8a9a5SSteve French if (!is_sync_kiocb(iocb))
426338c8a9a5SSteve French ctx->iocb = iocb;
426438c8a9a5SSteve French
426538c8a9a5SSteve French if (user_backed_iter(to)) {
426638c8a9a5SSteve French /*
426738c8a9a5SSteve French * Extract IOVEC/UBUF-type iterators to a BVEC-type iterator as
426838c8a9a5SSteve French * they contain references to the calling process's virtual
426938c8a9a5SSteve French * memory layout which won't be available in an async worker
427038c8a9a5SSteve French * thread. This also takes a pin on every folio involved.
427138c8a9a5SSteve French */
427238c8a9a5SSteve French rc = netfs_extract_user_iter(to, iov_iter_count(to),
427338c8a9a5SSteve French &ctx->iter, 0);
427438c8a9a5SSteve French if (rc < 0) {
427538c8a9a5SSteve French kref_put(&ctx->refcount, cifs_aio_ctx_release);
427638c8a9a5SSteve French return rc;
427738c8a9a5SSteve French }
427838c8a9a5SSteve French
427938c8a9a5SSteve French ctx->nr_pinned_pages = rc;
428038c8a9a5SSteve French ctx->bv = (void *)ctx->iter.bvec;
428138c8a9a5SSteve French ctx->bv_need_unpin = iov_iter_extract_will_pin(to);
428238c8a9a5SSteve French ctx->should_dirty = true;
428338c8a9a5SSteve French } else if ((iov_iter_is_bvec(to) || iov_iter_is_kvec(to)) &&
428438c8a9a5SSteve French !is_sync_kiocb(iocb)) {
428538c8a9a5SSteve French /*
428638c8a9a5SSteve French * If the op is asynchronous, we need to copy the list attached
428738c8a9a5SSteve French * to a BVEC/KVEC-type iterator, but we assume that the storage
428838c8a9a5SSteve French * will be retained by the caller; in any case, we may or may
428938c8a9a5SSteve French * not be able to pin the pages, so we don't try.
429038c8a9a5SSteve French */
429138c8a9a5SSteve French ctx->bv = (void *)dup_iter(&ctx->iter, to, GFP_KERNEL);
429238c8a9a5SSteve French if (!ctx->bv) {
429338c8a9a5SSteve French kref_put(&ctx->refcount, cifs_aio_ctx_release);
429438c8a9a5SSteve French return -ENOMEM;
429538c8a9a5SSteve French }
429638c8a9a5SSteve French } else {
429738c8a9a5SSteve French /*
429838c8a9a5SSteve French * Otherwise, we just pass the iterator down as-is and rely on
429938c8a9a5SSteve French * the caller to make sure the pages referred to by the
430038c8a9a5SSteve French * iterator don't evaporate.
430138c8a9a5SSteve French */
430238c8a9a5SSteve French ctx->iter = *to;
430338c8a9a5SSteve French }
430438c8a9a5SSteve French
430538c8a9a5SSteve French if (direct) {
430638c8a9a5SSteve French rc = filemap_write_and_wait_range(file->f_inode->i_mapping,
430738c8a9a5SSteve French offset, offset + len - 1);
430838c8a9a5SSteve French if (rc) {
430938c8a9a5SSteve French kref_put(&ctx->refcount, cifs_aio_ctx_release);
431038c8a9a5SSteve French return -EAGAIN;
431138c8a9a5SSteve French }
431238c8a9a5SSteve French }
431338c8a9a5SSteve French
431438c8a9a5SSteve French /* grab a lock here due to read response handlers can access ctx */
431538c8a9a5SSteve French mutex_lock(&ctx->aio_mutex);
431638c8a9a5SSteve French
431738c8a9a5SSteve French rc = cifs_send_async_read(offset, len, cfile, cifs_sb, &ctx->list, ctx);
431838c8a9a5SSteve French
431938c8a9a5SSteve French /* if at least one read request send succeeded, then reset rc */
432038c8a9a5SSteve French if (!list_empty(&ctx->list))
432138c8a9a5SSteve French rc = 0;
432238c8a9a5SSteve French
432338c8a9a5SSteve French mutex_unlock(&ctx->aio_mutex);
432438c8a9a5SSteve French
432538c8a9a5SSteve French if (rc) {
432638c8a9a5SSteve French kref_put(&ctx->refcount, cifs_aio_ctx_release);
432738c8a9a5SSteve French return rc;
432838c8a9a5SSteve French }
432938c8a9a5SSteve French
433038c8a9a5SSteve French if (!is_sync_kiocb(iocb)) {
433138c8a9a5SSteve French kref_put(&ctx->refcount, cifs_aio_ctx_release);
433238c8a9a5SSteve French return -EIOCBQUEUED;
433338c8a9a5SSteve French }
433438c8a9a5SSteve French
433538c8a9a5SSteve French rc = wait_for_completion_killable(&ctx->done);
433638c8a9a5SSteve French if (rc) {
433738c8a9a5SSteve French mutex_lock(&ctx->aio_mutex);
433838c8a9a5SSteve French ctx->rc = rc = -EINTR;
433938c8a9a5SSteve French total_read = ctx->total_len;
434038c8a9a5SSteve French mutex_unlock(&ctx->aio_mutex);
434138c8a9a5SSteve French } else {
434238c8a9a5SSteve French rc = ctx->rc;
434338c8a9a5SSteve French total_read = ctx->total_len;
434438c8a9a5SSteve French }
434538c8a9a5SSteve French
434638c8a9a5SSteve French kref_put(&ctx->refcount, cifs_aio_ctx_release);
434738c8a9a5SSteve French
434838c8a9a5SSteve French if (total_read) {
434938c8a9a5SSteve French iocb->ki_pos += total_read;
435038c8a9a5SSteve French return total_read;
435138c8a9a5SSteve French }
435238c8a9a5SSteve French return rc;
435338c8a9a5SSteve French }
435438c8a9a5SSteve French
cifs_direct_readv(struct kiocb * iocb,struct iov_iter * to)435538c8a9a5SSteve French ssize_t cifs_direct_readv(struct kiocb *iocb, struct iov_iter *to)
435638c8a9a5SSteve French {
435738c8a9a5SSteve French return __cifs_readv(iocb, to, true);
435838c8a9a5SSteve French }
435938c8a9a5SSteve French
cifs_user_readv(struct kiocb * iocb,struct iov_iter * to)436038c8a9a5SSteve French ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
436138c8a9a5SSteve French {
436238c8a9a5SSteve French return __cifs_readv(iocb, to, false);
436338c8a9a5SSteve French }
436438c8a9a5SSteve French
436538c8a9a5SSteve French ssize_t
cifs_strict_readv(struct kiocb * iocb,struct iov_iter * to)436638c8a9a5SSteve French cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
436738c8a9a5SSteve French {
436838c8a9a5SSteve French struct inode *inode = file_inode(iocb->ki_filp);
436938c8a9a5SSteve French struct cifsInodeInfo *cinode = CIFS_I(inode);
437038c8a9a5SSteve French struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
437138c8a9a5SSteve French struct cifsFileInfo *cfile = (struct cifsFileInfo *)
437238c8a9a5SSteve French iocb->ki_filp->private_data;
437338c8a9a5SSteve French struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
437438c8a9a5SSteve French int rc = -EACCES;
437538c8a9a5SSteve French
437638c8a9a5SSteve French /*
437738c8a9a5SSteve French * In strict cache mode we need to read from the server all the time
437838c8a9a5SSteve French * if we don't have level II oplock because the server can delay mtime
437938c8a9a5SSteve French * change - so we can't make a decision about inode invalidating.
438038c8a9a5SSteve French * And we can also fail with pagereading if there are mandatory locks
438138c8a9a5SSteve French * on pages affected by this read but not on the region from pos to
438238c8a9a5SSteve French * pos+len-1.
438338c8a9a5SSteve French */
438438c8a9a5SSteve French if (!CIFS_CACHE_READ(cinode))
438538c8a9a5SSteve French return cifs_user_readv(iocb, to);
438638c8a9a5SSteve French
438738c8a9a5SSteve French if (cap_unix(tcon->ses) &&
438838c8a9a5SSteve French (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
438938c8a9a5SSteve French ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
439038c8a9a5SSteve French return generic_file_read_iter(iocb, to);
439138c8a9a5SSteve French
439238c8a9a5SSteve French /*
439338c8a9a5SSteve French * We need to hold the sem to be sure nobody modifies lock list
439438c8a9a5SSteve French * with a brlock that prevents reading.
439538c8a9a5SSteve French */
439638c8a9a5SSteve French down_read(&cinode->lock_sem);
439738c8a9a5SSteve French if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
439838c8a9a5SSteve French tcon->ses->server->vals->shared_lock_type,
439938c8a9a5SSteve French 0, NULL, CIFS_READ_OP))
440038c8a9a5SSteve French rc = generic_file_read_iter(iocb, to);
440138c8a9a5SSteve French up_read(&cinode->lock_sem);
440238c8a9a5SSteve French return rc;
440338c8a9a5SSteve French }
440438c8a9a5SSteve French
440538c8a9a5SSteve French static ssize_t
cifs_read(struct file * file,char * read_data,size_t read_size,loff_t * offset)440638c8a9a5SSteve French cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
440738c8a9a5SSteve French {
440838c8a9a5SSteve French int rc = -EACCES;
440938c8a9a5SSteve French unsigned int bytes_read = 0;
441038c8a9a5SSteve French unsigned int total_read;
441138c8a9a5SSteve French unsigned int current_read_size;
441238c8a9a5SSteve French unsigned int rsize;
441338c8a9a5SSteve French struct cifs_sb_info *cifs_sb;
441438c8a9a5SSteve French struct cifs_tcon *tcon;
441538c8a9a5SSteve French struct TCP_Server_Info *server;
441638c8a9a5SSteve French unsigned int xid;
441738c8a9a5SSteve French char *cur_offset;
441838c8a9a5SSteve French struct cifsFileInfo *open_file;
441938c8a9a5SSteve French struct cifs_io_parms io_parms = {0};
442038c8a9a5SSteve French int buf_type = CIFS_NO_BUFFER;
442138c8a9a5SSteve French __u32 pid;
442238c8a9a5SSteve French
442338c8a9a5SSteve French xid = get_xid();
442438c8a9a5SSteve French cifs_sb = CIFS_FILE_SB(file);
442538c8a9a5SSteve French
442638c8a9a5SSteve French /* FIXME: set up handlers for larger reads and/or convert to async */
442738c8a9a5SSteve French rsize = min_t(unsigned int, cifs_sb->ctx->rsize, CIFSMaxBufSize);
442838c8a9a5SSteve French
442938c8a9a5SSteve French if (file->private_data == NULL) {
443038c8a9a5SSteve French rc = -EBADF;
443138c8a9a5SSteve French free_xid(xid);
443238c8a9a5SSteve French return rc;
443338c8a9a5SSteve French }
443438c8a9a5SSteve French open_file = file->private_data;
443538c8a9a5SSteve French tcon = tlink_tcon(open_file->tlink);
443638c8a9a5SSteve French server = cifs_pick_channel(tcon->ses);
443738c8a9a5SSteve French
443838c8a9a5SSteve French if (!server->ops->sync_read) {
443938c8a9a5SSteve French free_xid(xid);
444038c8a9a5SSteve French return -ENOSYS;
444138c8a9a5SSteve French }
444238c8a9a5SSteve French
444338c8a9a5SSteve French if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
444438c8a9a5SSteve French pid = open_file->pid;
444538c8a9a5SSteve French else
444638c8a9a5SSteve French pid = current->tgid;
444738c8a9a5SSteve French
444838c8a9a5SSteve French if ((file->f_flags & O_ACCMODE) == O_WRONLY)
444938c8a9a5SSteve French cifs_dbg(FYI, "attempting read on write only file instance\n");
445038c8a9a5SSteve French
445138c8a9a5SSteve French for (total_read = 0, cur_offset = read_data; read_size > total_read;
445238c8a9a5SSteve French total_read += bytes_read, cur_offset += bytes_read) {
445338c8a9a5SSteve French do {
445438c8a9a5SSteve French current_read_size = min_t(uint, read_size - total_read,
445538c8a9a5SSteve French rsize);
445638c8a9a5SSteve French /*
445738c8a9a5SSteve French * For windows me and 9x we do not want to request more
445838c8a9a5SSteve French * than it negotiated since it will refuse the read
445938c8a9a5SSteve French * then.
446038c8a9a5SSteve French */
446138c8a9a5SSteve French if (!(tcon->ses->capabilities &
446238c8a9a5SSteve French tcon->ses->server->vals->cap_large_files)) {
446338c8a9a5SSteve French current_read_size = min_t(uint,
446438c8a9a5SSteve French current_read_size, CIFSMaxBufSize);
446538c8a9a5SSteve French }
446638c8a9a5SSteve French if (open_file->invalidHandle) {
446738c8a9a5SSteve French rc = cifs_reopen_file(open_file, true);
446838c8a9a5SSteve French if (rc != 0)
446938c8a9a5SSteve French break;
447038c8a9a5SSteve French }
447138c8a9a5SSteve French io_parms.pid = pid;
447238c8a9a5SSteve French io_parms.tcon = tcon;
447338c8a9a5SSteve French io_parms.offset = *offset;
447438c8a9a5SSteve French io_parms.length = current_read_size;
447538c8a9a5SSteve French io_parms.server = server;
447638c8a9a5SSteve French rc = server->ops->sync_read(xid, &open_file->fid, &io_parms,
447738c8a9a5SSteve French &bytes_read, &cur_offset,
447838c8a9a5SSteve French &buf_type);
447938c8a9a5SSteve French } while (rc == -EAGAIN);
448038c8a9a5SSteve French
448138c8a9a5SSteve French if (rc || (bytes_read == 0)) {
448238c8a9a5SSteve French if (total_read) {
448338c8a9a5SSteve French break;
448438c8a9a5SSteve French } else {
448538c8a9a5SSteve French free_xid(xid);
448638c8a9a5SSteve French return rc;
448738c8a9a5SSteve French }
448838c8a9a5SSteve French } else {
448938c8a9a5SSteve French cifs_stats_bytes_read(tcon, total_read);
449038c8a9a5SSteve French *offset += bytes_read;
449138c8a9a5SSteve French }
449238c8a9a5SSteve French }
449338c8a9a5SSteve French free_xid(xid);
449438c8a9a5SSteve French return total_read;
449538c8a9a5SSteve French }
449638c8a9a5SSteve French
449738c8a9a5SSteve French /*
449838c8a9a5SSteve French * If the page is mmap'ed into a process' page tables, then we need to make
449938c8a9a5SSteve French * sure that it doesn't change while being written back.
450038c8a9a5SSteve French */
cifs_page_mkwrite(struct vm_fault * vmf)450138c8a9a5SSteve French static vm_fault_t cifs_page_mkwrite(struct vm_fault *vmf)
450238c8a9a5SSteve French {
450338c8a9a5SSteve French struct folio *folio = page_folio(vmf->page);
450438c8a9a5SSteve French
450538c8a9a5SSteve French /* Wait for the folio to be written to the cache before we allow it to
450638c8a9a5SSteve French * be modified. We then assume the entire folio will need writing back.
450738c8a9a5SSteve French */
450838c8a9a5SSteve French #ifdef CONFIG_CIFS_FSCACHE
450938c8a9a5SSteve French if (folio_test_fscache(folio) &&
451038c8a9a5SSteve French folio_wait_fscache_killable(folio) < 0)
451138c8a9a5SSteve French return VM_FAULT_RETRY;
451238c8a9a5SSteve French #endif
451338c8a9a5SSteve French
451438c8a9a5SSteve French folio_wait_writeback(folio);
451538c8a9a5SSteve French
451638c8a9a5SSteve French if (folio_lock_killable(folio) < 0)
451738c8a9a5SSteve French return VM_FAULT_RETRY;
451838c8a9a5SSteve French return VM_FAULT_LOCKED;
451938c8a9a5SSteve French }
452038c8a9a5SSteve French
452138c8a9a5SSteve French static const struct vm_operations_struct cifs_file_vm_ops = {
452238c8a9a5SSteve French .fault = filemap_fault,
452338c8a9a5SSteve French .map_pages = filemap_map_pages,
452438c8a9a5SSteve French .page_mkwrite = cifs_page_mkwrite,
452538c8a9a5SSteve French };
452638c8a9a5SSteve French
cifs_file_strict_mmap(struct file * file,struct vm_area_struct * vma)452738c8a9a5SSteve French int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
452838c8a9a5SSteve French {
452938c8a9a5SSteve French int xid, rc = 0;
453038c8a9a5SSteve French struct inode *inode = file_inode(file);
453138c8a9a5SSteve French
453238c8a9a5SSteve French xid = get_xid();
453338c8a9a5SSteve French
453438c8a9a5SSteve French if (!CIFS_CACHE_READ(CIFS_I(inode)))
453538c8a9a5SSteve French rc = cifs_zap_mapping(inode);
453638c8a9a5SSteve French if (!rc)
453738c8a9a5SSteve French rc = generic_file_mmap(file, vma);
453838c8a9a5SSteve French if (!rc)
453938c8a9a5SSteve French vma->vm_ops = &cifs_file_vm_ops;
454038c8a9a5SSteve French
454138c8a9a5SSteve French free_xid(xid);
454238c8a9a5SSteve French return rc;
454338c8a9a5SSteve French }
454438c8a9a5SSteve French
cifs_file_mmap(struct file * file,struct vm_area_struct * vma)454538c8a9a5SSteve French int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
454638c8a9a5SSteve French {
454738c8a9a5SSteve French int rc, xid;
454838c8a9a5SSteve French
454938c8a9a5SSteve French xid = get_xid();
455038c8a9a5SSteve French
455138c8a9a5SSteve French rc = cifs_revalidate_file(file);
455238c8a9a5SSteve French if (rc)
455338c8a9a5SSteve French cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
455438c8a9a5SSteve French rc);
455538c8a9a5SSteve French if (!rc)
455638c8a9a5SSteve French rc = generic_file_mmap(file, vma);
455738c8a9a5SSteve French if (!rc)
455838c8a9a5SSteve French vma->vm_ops = &cifs_file_vm_ops;
455938c8a9a5SSteve French
456038c8a9a5SSteve French free_xid(xid);
456138c8a9a5SSteve French return rc;
456238c8a9a5SSteve French }
456338c8a9a5SSteve French
456438c8a9a5SSteve French /*
456538c8a9a5SSteve French * Unlock a bunch of folios in the pagecache.
456638c8a9a5SSteve French */
cifs_unlock_folios(struct address_space * mapping,pgoff_t first,pgoff_t last)456738c8a9a5SSteve French static void cifs_unlock_folios(struct address_space *mapping, pgoff_t first, pgoff_t last)
456838c8a9a5SSteve French {
456938c8a9a5SSteve French struct folio *folio;
457038c8a9a5SSteve French XA_STATE(xas, &mapping->i_pages, first);
457138c8a9a5SSteve French
457238c8a9a5SSteve French rcu_read_lock();
457338c8a9a5SSteve French xas_for_each(&xas, folio, last) {
457438c8a9a5SSteve French folio_unlock(folio);
457538c8a9a5SSteve French }
457638c8a9a5SSteve French rcu_read_unlock();
457738c8a9a5SSteve French }
457838c8a9a5SSteve French
cifs_readahead_complete(struct work_struct * work)457938c8a9a5SSteve French static void cifs_readahead_complete(struct work_struct *work)
458038c8a9a5SSteve French {
458138c8a9a5SSteve French struct cifs_readdata *rdata = container_of(work,
458238c8a9a5SSteve French struct cifs_readdata, work);
458338c8a9a5SSteve French struct folio *folio;
458438c8a9a5SSteve French pgoff_t last;
458538c8a9a5SSteve French bool good = rdata->result == 0 || (rdata->result == -EAGAIN && rdata->got_bytes);
458638c8a9a5SSteve French
458738c8a9a5SSteve French XA_STATE(xas, &rdata->mapping->i_pages, rdata->offset / PAGE_SIZE);
458838c8a9a5SSteve French
458938c8a9a5SSteve French if (good)
459038c8a9a5SSteve French cifs_readahead_to_fscache(rdata->mapping->host,
459138c8a9a5SSteve French rdata->offset, rdata->bytes);
459238c8a9a5SSteve French
459338c8a9a5SSteve French if (iov_iter_count(&rdata->iter) > 0)
459438c8a9a5SSteve French iov_iter_zero(iov_iter_count(&rdata->iter), &rdata->iter);
459538c8a9a5SSteve French
459638c8a9a5SSteve French last = (rdata->offset + rdata->bytes - 1) / PAGE_SIZE;
459738c8a9a5SSteve French
459838c8a9a5SSteve French rcu_read_lock();
459938c8a9a5SSteve French xas_for_each(&xas, folio, last) {
460038c8a9a5SSteve French if (good) {
460138c8a9a5SSteve French flush_dcache_folio(folio);
460238c8a9a5SSteve French folio_mark_uptodate(folio);
460338c8a9a5SSteve French }
460438c8a9a5SSteve French folio_unlock(folio);
460538c8a9a5SSteve French }
460638c8a9a5SSteve French rcu_read_unlock();
460738c8a9a5SSteve French
460838c8a9a5SSteve French kref_put(&rdata->refcount, cifs_readdata_release);
460938c8a9a5SSteve French }
461038c8a9a5SSteve French
cifs_readahead(struct readahead_control * ractl)461138c8a9a5SSteve French static void cifs_readahead(struct readahead_control *ractl)
461238c8a9a5SSteve French {
461338c8a9a5SSteve French struct cifsFileInfo *open_file = ractl->file->private_data;
461438c8a9a5SSteve French struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(ractl->file);
461538c8a9a5SSteve French struct TCP_Server_Info *server;
461638c8a9a5SSteve French unsigned int xid, nr_pages, cache_nr_pages = 0;
461738c8a9a5SSteve French unsigned int ra_pages;
461838c8a9a5SSteve French pgoff_t next_cached = ULONG_MAX, ra_index;
461938c8a9a5SSteve French bool caching = fscache_cookie_enabled(cifs_inode_cookie(ractl->mapping->host)) &&
462038c8a9a5SSteve French cifs_inode_cookie(ractl->mapping->host)->cache_priv;
462138c8a9a5SSteve French bool check_cache = caching;
462238c8a9a5SSteve French pid_t pid;
462338c8a9a5SSteve French int rc = 0;
462438c8a9a5SSteve French
462538c8a9a5SSteve French /* Note that readahead_count() lags behind our dequeuing of pages from
462638c8a9a5SSteve French * the ractl, wo we have to keep track for ourselves.
462738c8a9a5SSteve French */
462838c8a9a5SSteve French ra_pages = readahead_count(ractl);
462938c8a9a5SSteve French ra_index = readahead_index(ractl);
463038c8a9a5SSteve French
463138c8a9a5SSteve French xid = get_xid();
463238c8a9a5SSteve French
463338c8a9a5SSteve French if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
463438c8a9a5SSteve French pid = open_file->pid;
463538c8a9a5SSteve French else
463638c8a9a5SSteve French pid = current->tgid;
463738c8a9a5SSteve French
463838c8a9a5SSteve French server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
463938c8a9a5SSteve French
464038c8a9a5SSteve French cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
464138c8a9a5SSteve French __func__, ractl->file, ractl->mapping, ra_pages);
464238c8a9a5SSteve French
464338c8a9a5SSteve French /*
464438c8a9a5SSteve French * Chop the readahead request up into rsize-sized read requests.
464538c8a9a5SSteve French */
464638c8a9a5SSteve French while ((nr_pages = ra_pages)) {
464738c8a9a5SSteve French unsigned int i, rsize;
464838c8a9a5SSteve French struct cifs_readdata *rdata;
464938c8a9a5SSteve French struct cifs_credits credits_on_stack;
465038c8a9a5SSteve French struct cifs_credits *credits = &credits_on_stack;
465138c8a9a5SSteve French struct folio *folio;
465238c8a9a5SSteve French pgoff_t fsize;
465338c8a9a5SSteve French
465438c8a9a5SSteve French /*
465538c8a9a5SSteve French * Find out if we have anything cached in the range of
465638c8a9a5SSteve French * interest, and if so, where the next chunk of cached data is.
465738c8a9a5SSteve French */
465838c8a9a5SSteve French if (caching) {
465938c8a9a5SSteve French if (check_cache) {
466038c8a9a5SSteve French rc = cifs_fscache_query_occupancy(
466138c8a9a5SSteve French ractl->mapping->host, ra_index, nr_pages,
466238c8a9a5SSteve French &next_cached, &cache_nr_pages);
466338c8a9a5SSteve French if (rc < 0)
466438c8a9a5SSteve French caching = false;
466538c8a9a5SSteve French check_cache = false;
466638c8a9a5SSteve French }
466738c8a9a5SSteve French
466838c8a9a5SSteve French if (ra_index == next_cached) {
466938c8a9a5SSteve French /*
467038c8a9a5SSteve French * TODO: Send a whole batch of pages to be read
467138c8a9a5SSteve French * by the cache.
467238c8a9a5SSteve French */
467338c8a9a5SSteve French folio = readahead_folio(ractl);
467438c8a9a5SSteve French fsize = folio_nr_pages(folio);
467538c8a9a5SSteve French ra_pages -= fsize;
467638c8a9a5SSteve French ra_index += fsize;
467738c8a9a5SSteve French if (cifs_readpage_from_fscache(ractl->mapping->host,
467838c8a9a5SSteve French &folio->page) < 0) {
467938c8a9a5SSteve French /*
468038c8a9a5SSteve French * TODO: Deal with cache read failure
468138c8a9a5SSteve French * here, but for the moment, delegate
468238c8a9a5SSteve French * that to readpage.
468338c8a9a5SSteve French */
468438c8a9a5SSteve French caching = false;
468538c8a9a5SSteve French }
468638c8a9a5SSteve French folio_unlock(folio);
468738c8a9a5SSteve French next_cached += fsize;
468838c8a9a5SSteve French cache_nr_pages -= fsize;
468938c8a9a5SSteve French if (cache_nr_pages == 0)
469038c8a9a5SSteve French check_cache = true;
469138c8a9a5SSteve French continue;
469238c8a9a5SSteve French }
469338c8a9a5SSteve French }
469438c8a9a5SSteve French
469538c8a9a5SSteve French if (open_file->invalidHandle) {
469638c8a9a5SSteve French rc = cifs_reopen_file(open_file, true);
469738c8a9a5SSteve French if (rc) {
469838c8a9a5SSteve French if (rc == -EAGAIN)
469938c8a9a5SSteve French continue;
470038c8a9a5SSteve French break;
470138c8a9a5SSteve French }
470238c8a9a5SSteve French }
470338c8a9a5SSteve French
470438c8a9a5SSteve French if (cifs_sb->ctx->rsize == 0)
470538c8a9a5SSteve French cifs_sb->ctx->rsize =
470638c8a9a5SSteve French server->ops->negotiate_rsize(tlink_tcon(open_file->tlink),
470738c8a9a5SSteve French cifs_sb->ctx);
470838c8a9a5SSteve French
470938c8a9a5SSteve French rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
471038c8a9a5SSteve French &rsize, credits);
471138c8a9a5SSteve French if (rc)
471238c8a9a5SSteve French break;
471338c8a9a5SSteve French nr_pages = min_t(size_t, rsize / PAGE_SIZE, ra_pages);
471438c8a9a5SSteve French if (next_cached != ULONG_MAX)
471538c8a9a5SSteve French nr_pages = min_t(size_t, nr_pages, next_cached - ra_index);
471638c8a9a5SSteve French
471738c8a9a5SSteve French /*
471838c8a9a5SSteve French * Give up immediately if rsize is too small to read an entire
471938c8a9a5SSteve French * page. The VFS will fall back to readpage. We should never
472038c8a9a5SSteve French * reach this point however since we set ra_pages to 0 when the
472138c8a9a5SSteve French * rsize is smaller than a cache page.
472238c8a9a5SSteve French */
472338c8a9a5SSteve French if (unlikely(!nr_pages)) {
472438c8a9a5SSteve French add_credits_and_wake_if(server, credits, 0);
472538c8a9a5SSteve French break;
472638c8a9a5SSteve French }
472738c8a9a5SSteve French
472838c8a9a5SSteve French rdata = cifs_readdata_alloc(cifs_readahead_complete);
472938c8a9a5SSteve French if (!rdata) {
473038c8a9a5SSteve French /* best to give up if we're out of mem */
473138c8a9a5SSteve French add_credits_and_wake_if(server, credits, 0);
473238c8a9a5SSteve French break;
473338c8a9a5SSteve French }
473438c8a9a5SSteve French
473538c8a9a5SSteve French rdata->offset = ra_index * PAGE_SIZE;
473638c8a9a5SSteve French rdata->bytes = nr_pages * PAGE_SIZE;
473738c8a9a5SSteve French rdata->cfile = cifsFileInfo_get(open_file);
473838c8a9a5SSteve French rdata->server = server;
473938c8a9a5SSteve French rdata->mapping = ractl->mapping;
474038c8a9a5SSteve French rdata->pid = pid;
474138c8a9a5SSteve French rdata->credits = credits_on_stack;
474238c8a9a5SSteve French
474338c8a9a5SSteve French for (i = 0; i < nr_pages; i++) {
474438c8a9a5SSteve French if (!readahead_folio(ractl))
474538c8a9a5SSteve French WARN_ON(1);
474638c8a9a5SSteve French }
474738c8a9a5SSteve French ra_pages -= nr_pages;
474838c8a9a5SSteve French ra_index += nr_pages;
474938c8a9a5SSteve French
475038c8a9a5SSteve French iov_iter_xarray(&rdata->iter, ITER_DEST, &rdata->mapping->i_pages,
475138c8a9a5SSteve French rdata->offset, rdata->bytes);
475238c8a9a5SSteve French
475338c8a9a5SSteve French rc = adjust_credits(server, &rdata->credits, rdata->bytes);
475438c8a9a5SSteve French if (!rc) {
475538c8a9a5SSteve French if (rdata->cfile->invalidHandle)
475638c8a9a5SSteve French rc = -EAGAIN;
475738c8a9a5SSteve French else
475838c8a9a5SSteve French rc = server->ops->async_readv(rdata);
475938c8a9a5SSteve French }
476038c8a9a5SSteve French
476138c8a9a5SSteve French if (rc) {
476238c8a9a5SSteve French add_credits_and_wake_if(server, &rdata->credits, 0);
476338c8a9a5SSteve French cifs_unlock_folios(rdata->mapping,
476438c8a9a5SSteve French rdata->offset / PAGE_SIZE,
476538c8a9a5SSteve French (rdata->offset + rdata->bytes - 1) / PAGE_SIZE);
476638c8a9a5SSteve French /* Fallback to the readpage in error/reconnect cases */
476738c8a9a5SSteve French kref_put(&rdata->refcount, cifs_readdata_release);
476838c8a9a5SSteve French break;
476938c8a9a5SSteve French }
477038c8a9a5SSteve French
477138c8a9a5SSteve French kref_put(&rdata->refcount, cifs_readdata_release);
477238c8a9a5SSteve French }
477338c8a9a5SSteve French
477438c8a9a5SSteve French free_xid(xid);
477538c8a9a5SSteve French }
477638c8a9a5SSteve French
477738c8a9a5SSteve French /*
477838c8a9a5SSteve French * cifs_readpage_worker must be called with the page pinned
477938c8a9a5SSteve French */
cifs_readpage_worker(struct file * file,struct page * page,loff_t * poffset)478038c8a9a5SSteve French static int cifs_readpage_worker(struct file *file, struct page *page,
478138c8a9a5SSteve French loff_t *poffset)
478238c8a9a5SSteve French {
478323171df5SJeff Layton struct inode *inode = file_inode(file);
478423171df5SJeff Layton struct timespec64 atime, mtime;
478538c8a9a5SSteve French char *read_data;
478638c8a9a5SSteve French int rc;
478738c8a9a5SSteve French
478838c8a9a5SSteve French /* Is the page cached? */
478923171df5SJeff Layton rc = cifs_readpage_from_fscache(inode, page);
479038c8a9a5SSteve French if (rc == 0)
479138c8a9a5SSteve French goto read_complete;
479238c8a9a5SSteve French
479338c8a9a5SSteve French read_data = kmap(page);
479438c8a9a5SSteve French /* for reads over a certain size could initiate async read ahead */
479538c8a9a5SSteve French
479638c8a9a5SSteve French rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
479738c8a9a5SSteve French
479838c8a9a5SSteve French if (rc < 0)
479938c8a9a5SSteve French goto io_error;
480038c8a9a5SSteve French else
480138c8a9a5SSteve French cifs_dbg(FYI, "Bytes read %d\n", rc);
480238c8a9a5SSteve French
480338c8a9a5SSteve French /* we do not want atime to be less than mtime, it broke some apps */
480423171df5SJeff Layton atime = inode_set_atime_to_ts(inode, current_time(inode));
480523171df5SJeff Layton mtime = inode_get_mtime(inode);
48069a498744SZizhi Wo if (timespec64_compare(&atime, &mtime) < 0)
480723171df5SJeff Layton inode_set_atime_to_ts(inode, inode_get_mtime(inode));
480838c8a9a5SSteve French
480938c8a9a5SSteve French if (PAGE_SIZE > rc)
481038c8a9a5SSteve French memset(read_data + rc, 0, PAGE_SIZE - rc);
481138c8a9a5SSteve French
481238c8a9a5SSteve French flush_dcache_page(page);
481338c8a9a5SSteve French SetPageUptodate(page);
481438c8a9a5SSteve French rc = 0;
481538c8a9a5SSteve French
481638c8a9a5SSteve French io_error:
481738c8a9a5SSteve French kunmap(page);
481838c8a9a5SSteve French
481938c8a9a5SSteve French read_complete:
482069513dd6SRussell Harmon via samba-technical unlock_page(page);
482138c8a9a5SSteve French return rc;
482238c8a9a5SSteve French }
482338c8a9a5SSteve French
cifs_read_folio(struct file * file,struct folio * folio)482438c8a9a5SSteve French static int cifs_read_folio(struct file *file, struct folio *folio)
482538c8a9a5SSteve French {
482638c8a9a5SSteve French struct page *page = &folio->page;
482738c8a9a5SSteve French loff_t offset = page_file_offset(page);
482838c8a9a5SSteve French int rc = -EACCES;
482938c8a9a5SSteve French unsigned int xid;
483038c8a9a5SSteve French
483138c8a9a5SSteve French xid = get_xid();
483238c8a9a5SSteve French
483338c8a9a5SSteve French if (file->private_data == NULL) {
483438c8a9a5SSteve French rc = -EBADF;
483538c8a9a5SSteve French free_xid(xid);
483638c8a9a5SSteve French return rc;
483738c8a9a5SSteve French }
483838c8a9a5SSteve French
483938c8a9a5SSteve French cifs_dbg(FYI, "read_folio %p at offset %d 0x%x\n",
484038c8a9a5SSteve French page, (int)offset, (int)offset);
484138c8a9a5SSteve French
484238c8a9a5SSteve French rc = cifs_readpage_worker(file, page, &offset);
484338c8a9a5SSteve French
484438c8a9a5SSteve French free_xid(xid);
484538c8a9a5SSteve French return rc;
484638c8a9a5SSteve French }
484738c8a9a5SSteve French
is_inode_writable(struct cifsInodeInfo * cifs_inode)484838c8a9a5SSteve French static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
484938c8a9a5SSteve French {
485038c8a9a5SSteve French struct cifsFileInfo *open_file;
485138c8a9a5SSteve French
485238c8a9a5SSteve French spin_lock(&cifs_inode->open_file_lock);
485338c8a9a5SSteve French list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
485438c8a9a5SSteve French if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
485538c8a9a5SSteve French spin_unlock(&cifs_inode->open_file_lock);
485638c8a9a5SSteve French return 1;
485738c8a9a5SSteve French }
485838c8a9a5SSteve French }
485938c8a9a5SSteve French spin_unlock(&cifs_inode->open_file_lock);
486038c8a9a5SSteve French return 0;
486138c8a9a5SSteve French }
486238c8a9a5SSteve French
486338c8a9a5SSteve French /* We do not want to update the file size from server for inodes
486438c8a9a5SSteve French open for write - to avoid races with writepage extending
486538c8a9a5SSteve French the file - in the future we could consider allowing
486638c8a9a5SSteve French refreshing the inode only on increases in the file size
486738c8a9a5SSteve French but this is tricky to do without racing with writebehind
486838c8a9a5SSteve French page caching in the current Linux kernel design */
is_size_safe_to_change(struct cifsInodeInfo * cifsInode,__u64 end_of_file,bool from_readdir)48699179aa27SBharath SM bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file,
48709179aa27SBharath SM bool from_readdir)
487138c8a9a5SSteve French {
487238c8a9a5SSteve French if (!cifsInode)
487338c8a9a5SSteve French return true;
487438c8a9a5SSteve French
48759179aa27SBharath SM if (is_inode_writable(cifsInode) ||
48769179aa27SBharath SM ((cifsInode->oplock & CIFS_CACHE_RW_FLG) != 0 && from_readdir)) {
487738c8a9a5SSteve French /* This inode is open for write at least once */
487838c8a9a5SSteve French struct cifs_sb_info *cifs_sb;
487938c8a9a5SSteve French
488038c8a9a5SSteve French cifs_sb = CIFS_SB(cifsInode->netfs.inode.i_sb);
488138c8a9a5SSteve French if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
488238c8a9a5SSteve French /* since no page cache to corrupt on directio
488338c8a9a5SSteve French we can change size safely */
488438c8a9a5SSteve French return true;
488538c8a9a5SSteve French }
488638c8a9a5SSteve French
488738c8a9a5SSteve French if (i_size_read(&cifsInode->netfs.inode) < end_of_file)
488838c8a9a5SSteve French return true;
488938c8a9a5SSteve French
489038c8a9a5SSteve French return false;
489138c8a9a5SSteve French } else
489238c8a9a5SSteve French return true;
489338c8a9a5SSteve French }
489438c8a9a5SSteve French
cifs_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,struct page ** pagep,void ** fsdata)489538c8a9a5SSteve French static int cifs_write_begin(struct file *file, struct address_space *mapping,
489638c8a9a5SSteve French loff_t pos, unsigned len,
489738c8a9a5SSteve French struct page **pagep, void **fsdata)
489838c8a9a5SSteve French {
489938c8a9a5SSteve French int oncethru = 0;
490038c8a9a5SSteve French pgoff_t index = pos >> PAGE_SHIFT;
490138c8a9a5SSteve French loff_t offset = pos & (PAGE_SIZE - 1);
490238c8a9a5SSteve French loff_t page_start = pos & PAGE_MASK;
490338c8a9a5SSteve French loff_t i_size;
490438c8a9a5SSteve French struct page *page;
490538c8a9a5SSteve French int rc = 0;
490638c8a9a5SSteve French
490738c8a9a5SSteve French cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
490838c8a9a5SSteve French
490938c8a9a5SSteve French start:
491038c8a9a5SSteve French page = grab_cache_page_write_begin(mapping, index);
491138c8a9a5SSteve French if (!page) {
491238c8a9a5SSteve French rc = -ENOMEM;
491338c8a9a5SSteve French goto out;
491438c8a9a5SSteve French }
491538c8a9a5SSteve French
491638c8a9a5SSteve French if (PageUptodate(page))
491738c8a9a5SSteve French goto out;
491838c8a9a5SSteve French
491938c8a9a5SSteve French /*
492038c8a9a5SSteve French * If we write a full page it will be up to date, no need to read from
492138c8a9a5SSteve French * the server. If the write is short, we'll end up doing a sync write
492238c8a9a5SSteve French * instead.
492338c8a9a5SSteve French */
492438c8a9a5SSteve French if (len == PAGE_SIZE)
492538c8a9a5SSteve French goto out;
492638c8a9a5SSteve French
492738c8a9a5SSteve French /*
492838c8a9a5SSteve French * optimize away the read when we have an oplock, and we're not
492938c8a9a5SSteve French * expecting to use any of the data we'd be reading in. That
493038c8a9a5SSteve French * is, when the page lies beyond the EOF, or straddles the EOF
493138c8a9a5SSteve French * and the write will cover all of the existing data.
493238c8a9a5SSteve French */
493338c8a9a5SSteve French if (CIFS_CACHE_READ(CIFS_I(mapping->host))) {
493438c8a9a5SSteve French i_size = i_size_read(mapping->host);
493538c8a9a5SSteve French if (page_start >= i_size ||
493638c8a9a5SSteve French (offset == 0 && (pos + len) >= i_size)) {
493738c8a9a5SSteve French zero_user_segments(page, 0, offset,
493838c8a9a5SSteve French offset + len,
493938c8a9a5SSteve French PAGE_SIZE);
494038c8a9a5SSteve French /*
494138c8a9a5SSteve French * PageChecked means that the parts of the page
494238c8a9a5SSteve French * to which we're not writing are considered up
494338c8a9a5SSteve French * to date. Once the data is copied to the
494438c8a9a5SSteve French * page, it can be set uptodate.
494538c8a9a5SSteve French */
494638c8a9a5SSteve French SetPageChecked(page);
494738c8a9a5SSteve French goto out;
494838c8a9a5SSteve French }
494938c8a9a5SSteve French }
495038c8a9a5SSteve French
495138c8a9a5SSteve French if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) {
495238c8a9a5SSteve French /*
495338c8a9a5SSteve French * might as well read a page, it is fast enough. If we get
495438c8a9a5SSteve French * an error, we don't need to return it. cifs_write_end will
495538c8a9a5SSteve French * do a sync write instead since PG_uptodate isn't set.
495638c8a9a5SSteve French */
495738c8a9a5SSteve French cifs_readpage_worker(file, page, &page_start);
495838c8a9a5SSteve French put_page(page);
495938c8a9a5SSteve French oncethru = 1;
496038c8a9a5SSteve French goto start;
496138c8a9a5SSteve French } else {
496238c8a9a5SSteve French /* we could try using another file handle if there is one -
496338c8a9a5SSteve French but how would we lock it to prevent close of that handle
496438c8a9a5SSteve French racing with this read? In any case
496538c8a9a5SSteve French this will be written out by write_end so is fine */
496638c8a9a5SSteve French }
496738c8a9a5SSteve French out:
496838c8a9a5SSteve French *pagep = page;
496938c8a9a5SSteve French return rc;
497038c8a9a5SSteve French }
497138c8a9a5SSteve French
cifs_release_folio(struct folio * folio,gfp_t gfp)497238c8a9a5SSteve French static bool cifs_release_folio(struct folio *folio, gfp_t gfp)
497338c8a9a5SSteve French {
497438c8a9a5SSteve French if (folio_test_private(folio))
497538c8a9a5SSteve French return 0;
497638c8a9a5SSteve French if (folio_test_fscache(folio)) {
497738c8a9a5SSteve French if (current_is_kswapd() || !(gfp & __GFP_FS))
497838c8a9a5SSteve French return false;
497938c8a9a5SSteve French folio_wait_fscache(folio);
498038c8a9a5SSteve French }
498138c8a9a5SSteve French fscache_note_page_release(cifs_inode_cookie(folio->mapping->host));
498238c8a9a5SSteve French return true;
498338c8a9a5SSteve French }
498438c8a9a5SSteve French
cifs_invalidate_folio(struct folio * folio,size_t offset,size_t length)498538c8a9a5SSteve French static void cifs_invalidate_folio(struct folio *folio, size_t offset,
498638c8a9a5SSteve French size_t length)
498738c8a9a5SSteve French {
498838c8a9a5SSteve French folio_wait_fscache(folio);
498938c8a9a5SSteve French }
499038c8a9a5SSteve French
cifs_launder_folio(struct folio * folio)499138c8a9a5SSteve French static int cifs_launder_folio(struct folio *folio)
499238c8a9a5SSteve French {
499338c8a9a5SSteve French int rc = 0;
499438c8a9a5SSteve French loff_t range_start = folio_pos(folio);
499538c8a9a5SSteve French loff_t range_end = range_start + folio_size(folio);
499638c8a9a5SSteve French struct writeback_control wbc = {
499738c8a9a5SSteve French .sync_mode = WB_SYNC_ALL,
499838c8a9a5SSteve French .nr_to_write = 0,
499938c8a9a5SSteve French .range_start = range_start,
500038c8a9a5SSteve French .range_end = range_end,
500138c8a9a5SSteve French };
500238c8a9a5SSteve French
500338c8a9a5SSteve French cifs_dbg(FYI, "Launder page: %lu\n", folio->index);
500438c8a9a5SSteve French
500538c8a9a5SSteve French if (folio_clear_dirty_for_io(folio))
500638c8a9a5SSteve French rc = cifs_writepage_locked(&folio->page, &wbc);
500738c8a9a5SSteve French
500838c8a9a5SSteve French folio_wait_fscache(folio);
500938c8a9a5SSteve French return rc;
501038c8a9a5SSteve French }
501138c8a9a5SSteve French
cifs_oplock_break(struct work_struct * work)501238c8a9a5SSteve French void cifs_oplock_break(struct work_struct *work)
501338c8a9a5SSteve French {
501438c8a9a5SSteve French struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
501538c8a9a5SSteve French oplock_break);
501638c8a9a5SSteve French struct inode *inode = d_inode(cfile->dentry);
5017e8f5f849SSteve French struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
501838c8a9a5SSteve French struct cifsInodeInfo *cinode = CIFS_I(inode);
5019e8f5f849SSteve French struct cifs_tcon *tcon;
5020e8f5f849SSteve French struct TCP_Server_Info *server;
5021e8f5f849SSteve French struct tcon_link *tlink;
502238c8a9a5SSteve French int rc = 0;
502338c8a9a5SSteve French bool purge_cache = false, oplock_break_cancelled;
502438c8a9a5SSteve French __u64 persistent_fid, volatile_fid;
502538c8a9a5SSteve French __u16 net_fid;
502638c8a9a5SSteve French
502738c8a9a5SSteve French wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
502838c8a9a5SSteve French TASK_UNINTERRUPTIBLE);
502938c8a9a5SSteve French
5030e8f5f849SSteve French tlink = cifs_sb_tlink(cifs_sb);
5031e8f5f849SSteve French if (IS_ERR(tlink))
5032e8f5f849SSteve French goto out;
5033e8f5f849SSteve French tcon = tlink_tcon(tlink);
5034e8f5f849SSteve French server = tcon->ses->server;
5035e8f5f849SSteve French
503638c8a9a5SSteve French server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
503738c8a9a5SSteve French cfile->oplock_epoch, &purge_cache);
503838c8a9a5SSteve French
503938c8a9a5SSteve French if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
504038c8a9a5SSteve French cifs_has_mand_locks(cinode)) {
504138c8a9a5SSteve French cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
504238c8a9a5SSteve French inode);
504338c8a9a5SSteve French cinode->oplock = 0;
504438c8a9a5SSteve French }
504538c8a9a5SSteve French
504638c8a9a5SSteve French if (inode && S_ISREG(inode->i_mode)) {
504738c8a9a5SSteve French if (CIFS_CACHE_READ(cinode))
504838c8a9a5SSteve French break_lease(inode, O_RDONLY);
504938c8a9a5SSteve French else
505038c8a9a5SSteve French break_lease(inode, O_WRONLY);
505138c8a9a5SSteve French rc = filemap_fdatawrite(inode->i_mapping);
505238c8a9a5SSteve French if (!CIFS_CACHE_READ(cinode) || purge_cache) {
505338c8a9a5SSteve French rc = filemap_fdatawait(inode->i_mapping);
505438c8a9a5SSteve French mapping_set_error(inode->i_mapping, rc);
505538c8a9a5SSteve French cifs_zap_mapping(inode);
505638c8a9a5SSteve French }
505738c8a9a5SSteve French cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
505838c8a9a5SSteve French if (CIFS_CACHE_WRITE(cinode))
505938c8a9a5SSteve French goto oplock_break_ack;
506038c8a9a5SSteve French }
506138c8a9a5SSteve French
506238c8a9a5SSteve French rc = cifs_push_locks(cfile);
506338c8a9a5SSteve French if (rc)
506438c8a9a5SSteve French cifs_dbg(VFS, "Push locks rc = %d\n", rc);
506538c8a9a5SSteve French
506638c8a9a5SSteve French oplock_break_ack:
506738c8a9a5SSteve French /*
506838c8a9a5SSteve French * When oplock break is received and there are no active
506938c8a9a5SSteve French * file handles but cached, then schedule deferred close immediately.
507038c8a9a5SSteve French * So, new open will not use cached handle.
507138c8a9a5SSteve French */
507238c8a9a5SSteve French
507338c8a9a5SSteve French if (!CIFS_CACHE_HANDLE(cinode) && !list_empty(&cinode->deferred_closes))
507438c8a9a5SSteve French cifs_close_deferred_file(cinode);
507538c8a9a5SSteve French
507638c8a9a5SSteve French persistent_fid = cfile->fid.persistent_fid;
507738c8a9a5SSteve French volatile_fid = cfile->fid.volatile_fid;
507838c8a9a5SSteve French net_fid = cfile->fid.netfid;
507938c8a9a5SSteve French oplock_break_cancelled = cfile->oplock_break_cancelled;
508038c8a9a5SSteve French
508138c8a9a5SSteve French _cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
508238c8a9a5SSteve French /*
5083da787d5bSBharath SM * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require
5084da787d5bSBharath SM * an acknowledgment to be sent when the file has already been closed.
508538c8a9a5SSteve French */
5086da787d5bSBharath SM spin_lock(&cinode->open_file_lock);
5087e8f5f849SSteve French /* check list empty since can race with kill_sb calling tree disconnect */
5088e8f5f849SSteve French if (!oplock_break_cancelled && !list_empty(&cinode->openFileList)) {
5089da787d5bSBharath SM spin_unlock(&cinode->open_file_lock);
5090e8f5f849SSteve French rc = server->ops->oplock_response(tcon, persistent_fid,
509138c8a9a5SSteve French volatile_fid, net_fid, cinode);
509238c8a9a5SSteve French cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
5093c774e677SSteve French } else
5094da787d5bSBharath SM spin_unlock(&cinode->open_file_lock);
509538c8a9a5SSteve French
5096e8f5f849SSteve French cifs_put_tlink(tlink);
5097e8f5f849SSteve French out:
509838c8a9a5SSteve French cifs_done_oplock_break(cinode);
509938c8a9a5SSteve French }
510038c8a9a5SSteve French
510138c8a9a5SSteve French /*
510238c8a9a5SSteve French * The presence of cifs_direct_io() in the address space ops vector
510338c8a9a5SSteve French * allowes open() O_DIRECT flags which would have failed otherwise.
510438c8a9a5SSteve French *
510538c8a9a5SSteve French * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
510638c8a9a5SSteve French * so this method should never be called.
510738c8a9a5SSteve French *
510838c8a9a5SSteve French * Direct IO is not yet supported in the cached mode.
510938c8a9a5SSteve French */
511038c8a9a5SSteve French static ssize_t
cifs_direct_io(struct kiocb * iocb,struct iov_iter * iter)511138c8a9a5SSteve French cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
511238c8a9a5SSteve French {
511338c8a9a5SSteve French /*
511438c8a9a5SSteve French * FIXME
511538c8a9a5SSteve French * Eventually need to support direct IO for non forcedirectio mounts
511638c8a9a5SSteve French */
511738c8a9a5SSteve French return -EINVAL;
511838c8a9a5SSteve French }
511938c8a9a5SSteve French
cifs_swap_activate(struct swap_info_struct * sis,struct file * swap_file,sector_t * span)512038c8a9a5SSteve French static int cifs_swap_activate(struct swap_info_struct *sis,
512138c8a9a5SSteve French struct file *swap_file, sector_t *span)
512238c8a9a5SSteve French {
512338c8a9a5SSteve French struct cifsFileInfo *cfile = swap_file->private_data;
512438c8a9a5SSteve French struct inode *inode = swap_file->f_mapping->host;
512538c8a9a5SSteve French unsigned long blocks;
512638c8a9a5SSteve French long long isize;
512738c8a9a5SSteve French
512838c8a9a5SSteve French cifs_dbg(FYI, "swap activate\n");
512938c8a9a5SSteve French
513038c8a9a5SSteve French if (!swap_file->f_mapping->a_ops->swap_rw)
513138c8a9a5SSteve French /* Cannot support swap */
513238c8a9a5SSteve French return -EINVAL;
513338c8a9a5SSteve French
513438c8a9a5SSteve French spin_lock(&inode->i_lock);
513538c8a9a5SSteve French blocks = inode->i_blocks;
513638c8a9a5SSteve French isize = inode->i_size;
513738c8a9a5SSteve French spin_unlock(&inode->i_lock);
513838c8a9a5SSteve French if (blocks*512 < isize) {
513938c8a9a5SSteve French pr_warn("swap activate: swapfile has holes\n");
514038c8a9a5SSteve French return -EINVAL;
514138c8a9a5SSteve French }
514238c8a9a5SSteve French *span = sis->pages;
514338c8a9a5SSteve French
514438c8a9a5SSteve French pr_warn_once("Swap support over SMB3 is experimental\n");
514538c8a9a5SSteve French
514638c8a9a5SSteve French /*
514738c8a9a5SSteve French * TODO: consider adding ACL (or documenting how) to prevent other
514838c8a9a5SSteve French * users (on this or other systems) from reading it
514938c8a9a5SSteve French */
515038c8a9a5SSteve French
515138c8a9a5SSteve French
515238c8a9a5SSteve French /* TODO: add sk_set_memalloc(inet) or similar */
515338c8a9a5SSteve French
515438c8a9a5SSteve French if (cfile)
515538c8a9a5SSteve French cfile->swapfile = true;
515638c8a9a5SSteve French /*
515738c8a9a5SSteve French * TODO: Since file already open, we can't open with DENY_ALL here
515838c8a9a5SSteve French * but we could add call to grab a byte range lock to prevent others
515938c8a9a5SSteve French * from reading or writing the file
516038c8a9a5SSteve French */
516138c8a9a5SSteve French
516238c8a9a5SSteve French sis->flags |= SWP_FS_OPS;
516338c8a9a5SSteve French return add_swap_extent(sis, 0, sis->max, 0);
516438c8a9a5SSteve French }
516538c8a9a5SSteve French
cifs_swap_deactivate(struct file * file)516638c8a9a5SSteve French static void cifs_swap_deactivate(struct file *file)
516738c8a9a5SSteve French {
516838c8a9a5SSteve French struct cifsFileInfo *cfile = file->private_data;
516938c8a9a5SSteve French
517038c8a9a5SSteve French cifs_dbg(FYI, "swap deactivate\n");
517138c8a9a5SSteve French
517238c8a9a5SSteve French /* TODO: undo sk_set_memalloc(inet) will eventually be needed */
517338c8a9a5SSteve French
517438c8a9a5SSteve French if (cfile)
517538c8a9a5SSteve French cfile->swapfile = false;
517638c8a9a5SSteve French
517738c8a9a5SSteve French /* do we need to unpin (or unlock) the file */
517838c8a9a5SSteve French }
517938c8a9a5SSteve French
518038c8a9a5SSteve French /*
518138c8a9a5SSteve French * Mark a page as having been made dirty and thus needing writeback. We also
518238c8a9a5SSteve French * need to pin the cache object to write back to.
518338c8a9a5SSteve French */
518438c8a9a5SSteve French #ifdef CONFIG_CIFS_FSCACHE
cifs_dirty_folio(struct address_space * mapping,struct folio * folio)518538c8a9a5SSteve French static bool cifs_dirty_folio(struct address_space *mapping, struct folio *folio)
518638c8a9a5SSteve French {
518738c8a9a5SSteve French return fscache_dirty_folio(mapping, folio,
518838c8a9a5SSteve French cifs_inode_cookie(mapping->host));
518938c8a9a5SSteve French }
519038c8a9a5SSteve French #else
519138c8a9a5SSteve French #define cifs_dirty_folio filemap_dirty_folio
519238c8a9a5SSteve French #endif
519338c8a9a5SSteve French
519438c8a9a5SSteve French const struct address_space_operations cifs_addr_ops = {
519538c8a9a5SSteve French .read_folio = cifs_read_folio,
519638c8a9a5SSteve French .readahead = cifs_readahead,
519738c8a9a5SSteve French .writepages = cifs_writepages,
519838c8a9a5SSteve French .write_begin = cifs_write_begin,
519938c8a9a5SSteve French .write_end = cifs_write_end,
520038c8a9a5SSteve French .dirty_folio = cifs_dirty_folio,
520138c8a9a5SSteve French .release_folio = cifs_release_folio,
520238c8a9a5SSteve French .direct_IO = cifs_direct_io,
520338c8a9a5SSteve French .invalidate_folio = cifs_invalidate_folio,
520438c8a9a5SSteve French .launder_folio = cifs_launder_folio,
520538c8a9a5SSteve French .migrate_folio = filemap_migrate_folio,
520638c8a9a5SSteve French /*
520738c8a9a5SSteve French * TODO: investigate and if useful we could add an is_dirty_writeback
520838c8a9a5SSteve French * helper if needed
520938c8a9a5SSteve French */
521038c8a9a5SSteve French .swap_activate = cifs_swap_activate,
521138c8a9a5SSteve French .swap_deactivate = cifs_swap_deactivate,
521238c8a9a5SSteve French };
521338c8a9a5SSteve French
521438c8a9a5SSteve French /*
521538c8a9a5SSteve French * cifs_readahead requires the server to support a buffer large enough to
521638c8a9a5SSteve French * contain the header plus one complete page of data. Otherwise, we need
521738c8a9a5SSteve French * to leave cifs_readahead out of the address space operations.
521838c8a9a5SSteve French */
521938c8a9a5SSteve French const struct address_space_operations cifs_addr_ops_smallbuf = {
522038c8a9a5SSteve French .read_folio = cifs_read_folio,
522138c8a9a5SSteve French .writepages = cifs_writepages,
522238c8a9a5SSteve French .write_begin = cifs_write_begin,
522338c8a9a5SSteve French .write_end = cifs_write_end,
522438c8a9a5SSteve French .dirty_folio = cifs_dirty_folio,
522538c8a9a5SSteve French .release_folio = cifs_release_folio,
522638c8a9a5SSteve French .invalidate_folio = cifs_invalidate_folio,
522738c8a9a5SSteve French .launder_folio = cifs_launder_folio,
522838c8a9a5SSteve French .migrate_folio = filemap_migrate_folio,
522938c8a9a5SSteve French };
5230