138c8a9a5SSteve French // SPDX-License-Identifier: LGPL-2.1 238c8a9a5SSteve French /* 338c8a9a5SSteve French * 438c8a9a5SSteve French * vfs operations that deal with files 538c8a9a5SSteve French * 638c8a9a5SSteve French * Copyright (C) International Business Machines Corp., 2002,2010 738c8a9a5SSteve French * Author(s): Steve French (sfrench@us.ibm.com) 838c8a9a5SSteve French * Jeremy Allison (jra@samba.org) 938c8a9a5SSteve French * 1038c8a9a5SSteve French */ 1138c8a9a5SSteve French #include <linux/fs.h> 1238c8a9a5SSteve French #include <linux/filelock.h> 1338c8a9a5SSteve French #include <linux/backing-dev.h> 1438c8a9a5SSteve French #include <linux/stat.h> 1538c8a9a5SSteve French #include <linux/fcntl.h> 1638c8a9a5SSteve French #include <linux/pagemap.h> 1738c8a9a5SSteve French #include <linux/pagevec.h> 1838c8a9a5SSteve French #include <linux/writeback.h> 1938c8a9a5SSteve French #include <linux/task_io_accounting_ops.h> 2038c8a9a5SSteve French #include <linux/delay.h> 2138c8a9a5SSteve French #include <linux/mount.h> 2238c8a9a5SSteve French #include <linux/slab.h> 2338c8a9a5SSteve French #include <linux/swap.h> 2438c8a9a5SSteve French #include <linux/mm.h> 2538c8a9a5SSteve French #include <asm/div64.h> 2638c8a9a5SSteve French #include "cifsfs.h" 2738c8a9a5SSteve French #include "cifspdu.h" 2838c8a9a5SSteve French #include "cifsglob.h" 2938c8a9a5SSteve French #include "cifsproto.h" 3038c8a9a5SSteve French #include "smb2proto.h" 3138c8a9a5SSteve French #include "cifs_unicode.h" 3238c8a9a5SSteve French #include "cifs_debug.h" 3338c8a9a5SSteve French #include "cifs_fs_sb.h" 3438c8a9a5SSteve French #include "fscache.h" 3538c8a9a5SSteve French #include "smbdirect.h" 3638c8a9a5SSteve French #include "fs_context.h" 3738c8a9a5SSteve French #include "cifs_ioctl.h" 3838c8a9a5SSteve French #include "cached_dir.h" 3938c8a9a5SSteve French 4038c8a9a5SSteve French /* 4138c8a9a5SSteve French * Remove the dirty flags from a span of pages. 4238c8a9a5SSteve French */ 4338c8a9a5SSteve French static void cifs_undirty_folios(struct inode *inode, loff_t start, unsigned int len) 4438c8a9a5SSteve French { 4538c8a9a5SSteve French struct address_space *mapping = inode->i_mapping; 4638c8a9a5SSteve French struct folio *folio; 4738c8a9a5SSteve French pgoff_t end; 4838c8a9a5SSteve French 4938c8a9a5SSteve French XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE); 5038c8a9a5SSteve French 5138c8a9a5SSteve French rcu_read_lock(); 5238c8a9a5SSteve French 5338c8a9a5SSteve French end = (start + len - 1) / PAGE_SIZE; 5438c8a9a5SSteve French xas_for_each_marked(&xas, folio, end, PAGECACHE_TAG_DIRTY) { 5538c8a9a5SSteve French if (xas_retry(&xas, folio)) 5638c8a9a5SSteve French continue; 5738c8a9a5SSteve French xas_pause(&xas); 5838c8a9a5SSteve French rcu_read_unlock(); 5938c8a9a5SSteve French folio_lock(folio); 6038c8a9a5SSteve French folio_clear_dirty_for_io(folio); 6138c8a9a5SSteve French folio_unlock(folio); 6238c8a9a5SSteve French rcu_read_lock(); 6338c8a9a5SSteve French } 6438c8a9a5SSteve French 6538c8a9a5SSteve French rcu_read_unlock(); 6638c8a9a5SSteve French } 6738c8a9a5SSteve French 6838c8a9a5SSteve French /* 6938c8a9a5SSteve French * Completion of write to server. 7038c8a9a5SSteve French */ 7138c8a9a5SSteve French void cifs_pages_written_back(struct inode *inode, loff_t start, unsigned int len) 7238c8a9a5SSteve French { 7338c8a9a5SSteve French struct address_space *mapping = inode->i_mapping; 7438c8a9a5SSteve French struct folio *folio; 7538c8a9a5SSteve French pgoff_t end; 7638c8a9a5SSteve French 7738c8a9a5SSteve French XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE); 7838c8a9a5SSteve French 7938c8a9a5SSteve French if (!len) 8038c8a9a5SSteve French return; 8138c8a9a5SSteve French 8238c8a9a5SSteve French rcu_read_lock(); 8338c8a9a5SSteve French 8438c8a9a5SSteve French end = (start + len - 1) / PAGE_SIZE; 8538c8a9a5SSteve French xas_for_each(&xas, folio, end) { 8638c8a9a5SSteve French if (xas_retry(&xas, folio)) 8738c8a9a5SSteve French continue; 8838c8a9a5SSteve French if (!folio_test_writeback(folio)) { 8938c8a9a5SSteve French WARN_ONCE(1, "bad %x @%llx page %lx %lx\n", 90d3c79235SDavid Howells len, start, folio->index, end); 9138c8a9a5SSteve French continue; 9238c8a9a5SSteve French } 9338c8a9a5SSteve French 9438c8a9a5SSteve French folio_detach_private(folio); 9538c8a9a5SSteve French folio_end_writeback(folio); 9638c8a9a5SSteve French } 9738c8a9a5SSteve French 9838c8a9a5SSteve French rcu_read_unlock(); 9938c8a9a5SSteve French } 10038c8a9a5SSteve French 10138c8a9a5SSteve French /* 10238c8a9a5SSteve French * Failure of write to server. 10338c8a9a5SSteve French */ 10438c8a9a5SSteve French void cifs_pages_write_failed(struct inode *inode, loff_t start, unsigned int len) 10538c8a9a5SSteve French { 10638c8a9a5SSteve French struct address_space *mapping = inode->i_mapping; 10738c8a9a5SSteve French struct folio *folio; 10838c8a9a5SSteve French pgoff_t end; 10938c8a9a5SSteve French 11038c8a9a5SSteve French XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE); 11138c8a9a5SSteve French 11238c8a9a5SSteve French if (!len) 11338c8a9a5SSteve French return; 11438c8a9a5SSteve French 11538c8a9a5SSteve French rcu_read_lock(); 11638c8a9a5SSteve French 11738c8a9a5SSteve French end = (start + len - 1) / PAGE_SIZE; 11838c8a9a5SSteve French xas_for_each(&xas, folio, end) { 11938c8a9a5SSteve French if (xas_retry(&xas, folio)) 12038c8a9a5SSteve French continue; 12138c8a9a5SSteve French if (!folio_test_writeback(folio)) { 12238c8a9a5SSteve French WARN_ONCE(1, "bad %x @%llx page %lx %lx\n", 123d3c79235SDavid Howells len, start, folio->index, end); 12438c8a9a5SSteve French continue; 12538c8a9a5SSteve French } 12638c8a9a5SSteve French 12738c8a9a5SSteve French folio_set_error(folio); 12838c8a9a5SSteve French folio_end_writeback(folio); 12938c8a9a5SSteve French } 13038c8a9a5SSteve French 13138c8a9a5SSteve French rcu_read_unlock(); 13238c8a9a5SSteve French } 13338c8a9a5SSteve French 13438c8a9a5SSteve French /* 13538c8a9a5SSteve French * Redirty pages after a temporary failure. 13638c8a9a5SSteve French */ 13738c8a9a5SSteve French void cifs_pages_write_redirty(struct inode *inode, loff_t start, unsigned int len) 13838c8a9a5SSteve French { 13938c8a9a5SSteve French struct address_space *mapping = inode->i_mapping; 14038c8a9a5SSteve French struct folio *folio; 14138c8a9a5SSteve French pgoff_t end; 14238c8a9a5SSteve French 14338c8a9a5SSteve French XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE); 14438c8a9a5SSteve French 14538c8a9a5SSteve French if (!len) 14638c8a9a5SSteve French return; 14738c8a9a5SSteve French 14838c8a9a5SSteve French rcu_read_lock(); 14938c8a9a5SSteve French 15038c8a9a5SSteve French end = (start + len - 1) / PAGE_SIZE; 15138c8a9a5SSteve French xas_for_each(&xas, folio, end) { 15238c8a9a5SSteve French if (!folio_test_writeback(folio)) { 15338c8a9a5SSteve French WARN_ONCE(1, "bad %x @%llx page %lx %lx\n", 154d3c79235SDavid Howells len, start, folio->index, end); 15538c8a9a5SSteve French continue; 15638c8a9a5SSteve French } 15738c8a9a5SSteve French 15838c8a9a5SSteve French filemap_dirty_folio(folio->mapping, folio); 15938c8a9a5SSteve French folio_end_writeback(folio); 16038c8a9a5SSteve French } 16138c8a9a5SSteve French 16238c8a9a5SSteve French rcu_read_unlock(); 16338c8a9a5SSteve French } 16438c8a9a5SSteve French 16538c8a9a5SSteve French /* 16638c8a9a5SSteve French * Mark as invalid, all open files on tree connections since they 16738c8a9a5SSteve French * were closed when session to server was lost. 16838c8a9a5SSteve French */ 16938c8a9a5SSteve French void 17038c8a9a5SSteve French cifs_mark_open_files_invalid(struct cifs_tcon *tcon) 17138c8a9a5SSteve French { 17238c8a9a5SSteve French struct cifsFileInfo *open_file = NULL; 17338c8a9a5SSteve French struct list_head *tmp; 17438c8a9a5SSteve French struct list_head *tmp1; 17538c8a9a5SSteve French 17638c8a9a5SSteve French /* only send once per connect */ 17738c8a9a5SSteve French spin_lock(&tcon->tc_lock); 178cd743cfeSShyam Prasad N if (tcon->need_reconnect) 179cd743cfeSShyam Prasad N tcon->status = TID_NEED_RECON; 180cd743cfeSShyam Prasad N 18138c8a9a5SSteve French if (tcon->status != TID_NEED_RECON) { 18238c8a9a5SSteve French spin_unlock(&tcon->tc_lock); 18338c8a9a5SSteve French return; 18438c8a9a5SSteve French } 18538c8a9a5SSteve French tcon->status = TID_IN_FILES_INVALIDATE; 18638c8a9a5SSteve French spin_unlock(&tcon->tc_lock); 18738c8a9a5SSteve French 18838c8a9a5SSteve French /* list all files open on tree connection and mark them invalid */ 18938c8a9a5SSteve French spin_lock(&tcon->open_file_lock); 19038c8a9a5SSteve French list_for_each_safe(tmp, tmp1, &tcon->openFileList) { 19138c8a9a5SSteve French open_file = list_entry(tmp, struct cifsFileInfo, tlist); 19238c8a9a5SSteve French open_file->invalidHandle = true; 19338c8a9a5SSteve French open_file->oplock_break_cancelled = true; 19438c8a9a5SSteve French } 19538c8a9a5SSteve French spin_unlock(&tcon->open_file_lock); 19638c8a9a5SSteve French 19738c8a9a5SSteve French invalidate_all_cached_dirs(tcon); 19838c8a9a5SSteve French spin_lock(&tcon->tc_lock); 19938c8a9a5SSteve French if (tcon->status == TID_IN_FILES_INVALIDATE) 20038c8a9a5SSteve French tcon->status = TID_NEED_TCON; 20138c8a9a5SSteve French spin_unlock(&tcon->tc_lock); 20238c8a9a5SSteve French 20338c8a9a5SSteve French /* 20438c8a9a5SSteve French * BB Add call to invalidate_inodes(sb) for all superblocks mounted 20538c8a9a5SSteve French * to this tcon. 20638c8a9a5SSteve French */ 20738c8a9a5SSteve French } 20838c8a9a5SSteve French 2090fdada1eSDavid Howells static inline int cifs_convert_flags(unsigned int flags, int rdwr_for_fscache) 21038c8a9a5SSteve French { 21138c8a9a5SSteve French if ((flags & O_ACCMODE) == O_RDONLY) 21238c8a9a5SSteve French return GENERIC_READ; 21338c8a9a5SSteve French else if ((flags & O_ACCMODE) == O_WRONLY) 2140fdada1eSDavid Howells return rdwr_for_fscache == 1 ? (GENERIC_READ | GENERIC_WRITE) : GENERIC_WRITE; 21538c8a9a5SSteve French else if ((flags & O_ACCMODE) == O_RDWR) { 21638c8a9a5SSteve French /* GENERIC_ALL is too much permission to request 21738c8a9a5SSteve French can cause unnecessary access denied on create */ 21838c8a9a5SSteve French /* return GENERIC_ALL; */ 21938c8a9a5SSteve French return (GENERIC_READ | GENERIC_WRITE); 22038c8a9a5SSteve French } 22138c8a9a5SSteve French 22238c8a9a5SSteve French return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES | 22338c8a9a5SSteve French FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA | 22438c8a9a5SSteve French FILE_READ_DATA); 22538c8a9a5SSteve French } 22638c8a9a5SSteve French 22738c8a9a5SSteve French #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 22838c8a9a5SSteve French static u32 cifs_posix_convert_flags(unsigned int flags) 22938c8a9a5SSteve French { 23038c8a9a5SSteve French u32 posix_flags = 0; 23138c8a9a5SSteve French 23238c8a9a5SSteve French if ((flags & O_ACCMODE) == O_RDONLY) 23338c8a9a5SSteve French posix_flags = SMB_O_RDONLY; 23438c8a9a5SSteve French else if ((flags & O_ACCMODE) == O_WRONLY) 23538c8a9a5SSteve French posix_flags = SMB_O_WRONLY; 23638c8a9a5SSteve French else if ((flags & O_ACCMODE) == O_RDWR) 23738c8a9a5SSteve French posix_flags = SMB_O_RDWR; 23838c8a9a5SSteve French 23938c8a9a5SSteve French if (flags & O_CREAT) { 24038c8a9a5SSteve French posix_flags |= SMB_O_CREAT; 24138c8a9a5SSteve French if (flags & O_EXCL) 24238c8a9a5SSteve French posix_flags |= SMB_O_EXCL; 24338c8a9a5SSteve French } else if (flags & O_EXCL) 24438c8a9a5SSteve French cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n", 24538c8a9a5SSteve French current->comm, current->tgid); 24638c8a9a5SSteve French 24738c8a9a5SSteve French if (flags & O_TRUNC) 24838c8a9a5SSteve French posix_flags |= SMB_O_TRUNC; 24938c8a9a5SSteve French /* be safe and imply O_SYNC for O_DSYNC */ 25038c8a9a5SSteve French if (flags & O_DSYNC) 25138c8a9a5SSteve French posix_flags |= SMB_O_SYNC; 25238c8a9a5SSteve French if (flags & O_DIRECTORY) 25338c8a9a5SSteve French posix_flags |= SMB_O_DIRECTORY; 25438c8a9a5SSteve French if (flags & O_NOFOLLOW) 25538c8a9a5SSteve French posix_flags |= SMB_O_NOFOLLOW; 25638c8a9a5SSteve French if (flags & O_DIRECT) 25738c8a9a5SSteve French posix_flags |= SMB_O_DIRECT; 25838c8a9a5SSteve French 25938c8a9a5SSteve French return posix_flags; 26038c8a9a5SSteve French } 26138c8a9a5SSteve French #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 26238c8a9a5SSteve French 26338c8a9a5SSteve French static inline int cifs_get_disposition(unsigned int flags) 26438c8a9a5SSteve French { 26538c8a9a5SSteve French if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) 26638c8a9a5SSteve French return FILE_CREATE; 26738c8a9a5SSteve French else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC)) 26838c8a9a5SSteve French return FILE_OVERWRITE_IF; 26938c8a9a5SSteve French else if ((flags & O_CREAT) == O_CREAT) 27038c8a9a5SSteve French return FILE_OPEN_IF; 27138c8a9a5SSteve French else if ((flags & O_TRUNC) == O_TRUNC) 27238c8a9a5SSteve French return FILE_OVERWRITE; 27338c8a9a5SSteve French else 27438c8a9a5SSteve French return FILE_OPEN; 27538c8a9a5SSteve French } 27638c8a9a5SSteve French 27738c8a9a5SSteve French #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 27838c8a9a5SSteve French int cifs_posix_open(const char *full_path, struct inode **pinode, 27938c8a9a5SSteve French struct super_block *sb, int mode, unsigned int f_flags, 28038c8a9a5SSteve French __u32 *poplock, __u16 *pnetfid, unsigned int xid) 28138c8a9a5SSteve French { 28238c8a9a5SSteve French int rc; 28338c8a9a5SSteve French FILE_UNIX_BASIC_INFO *presp_data; 28438c8a9a5SSteve French __u32 posix_flags = 0; 28538c8a9a5SSteve French struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 28638c8a9a5SSteve French struct cifs_fattr fattr; 28738c8a9a5SSteve French struct tcon_link *tlink; 28838c8a9a5SSteve French struct cifs_tcon *tcon; 28938c8a9a5SSteve French 29038c8a9a5SSteve French cifs_dbg(FYI, "posix open %s\n", full_path); 29138c8a9a5SSteve French 29238c8a9a5SSteve French presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL); 29338c8a9a5SSteve French if (presp_data == NULL) 29438c8a9a5SSteve French return -ENOMEM; 29538c8a9a5SSteve French 29638c8a9a5SSteve French tlink = cifs_sb_tlink(cifs_sb); 29738c8a9a5SSteve French if (IS_ERR(tlink)) { 29838c8a9a5SSteve French rc = PTR_ERR(tlink); 29938c8a9a5SSteve French goto posix_open_ret; 30038c8a9a5SSteve French } 30138c8a9a5SSteve French 30238c8a9a5SSteve French tcon = tlink_tcon(tlink); 30338c8a9a5SSteve French mode &= ~current_umask(); 30438c8a9a5SSteve French 30538c8a9a5SSteve French posix_flags = cifs_posix_convert_flags(f_flags); 30638c8a9a5SSteve French rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data, 30738c8a9a5SSteve French poplock, full_path, cifs_sb->local_nls, 30838c8a9a5SSteve French cifs_remap(cifs_sb)); 30938c8a9a5SSteve French cifs_put_tlink(tlink); 31038c8a9a5SSteve French 31138c8a9a5SSteve French if (rc) 31238c8a9a5SSteve French goto posix_open_ret; 31338c8a9a5SSteve French 31438c8a9a5SSteve French if (presp_data->Type == cpu_to_le32(-1)) 31538c8a9a5SSteve French goto posix_open_ret; /* open ok, caller does qpathinfo */ 31638c8a9a5SSteve French 31738c8a9a5SSteve French if (!pinode) 31838c8a9a5SSteve French goto posix_open_ret; /* caller does not need info */ 31938c8a9a5SSteve French 32038c8a9a5SSteve French cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb); 32138c8a9a5SSteve French 32238c8a9a5SSteve French /* get new inode and set it up */ 32338c8a9a5SSteve French if (*pinode == NULL) { 32438c8a9a5SSteve French cifs_fill_uniqueid(sb, &fattr); 32538c8a9a5SSteve French *pinode = cifs_iget(sb, &fattr); 32638c8a9a5SSteve French if (!*pinode) { 32738c8a9a5SSteve French rc = -ENOMEM; 32838c8a9a5SSteve French goto posix_open_ret; 32938c8a9a5SSteve French } 33038c8a9a5SSteve French } else { 33138c8a9a5SSteve French cifs_revalidate_mapping(*pinode); 3329179aa27SBharath SM rc = cifs_fattr_to_inode(*pinode, &fattr, false); 33338c8a9a5SSteve French } 33438c8a9a5SSteve French 33538c8a9a5SSteve French posix_open_ret: 33638c8a9a5SSteve French kfree(presp_data); 33738c8a9a5SSteve French return rc; 33838c8a9a5SSteve French } 33938c8a9a5SSteve French #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 34038c8a9a5SSteve French 34138c8a9a5SSteve French static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb, 34238c8a9a5SSteve French struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock, 34338c8a9a5SSteve French struct cifs_fid *fid, unsigned int xid, struct cifs_open_info_data *buf) 34438c8a9a5SSteve French { 34538c8a9a5SSteve French int rc; 34638c8a9a5SSteve French int desired_access; 34738c8a9a5SSteve French int disposition; 34838c8a9a5SSteve French int create_options = CREATE_NOT_DIR; 34938c8a9a5SSteve French struct TCP_Server_Info *server = tcon->ses->server; 35038c8a9a5SSteve French struct cifs_open_parms oparms; 3510fdada1eSDavid Howells int rdwr_for_fscache = 0; 35238c8a9a5SSteve French 35338c8a9a5SSteve French if (!server->ops->open) 35438c8a9a5SSteve French return -ENOSYS; 35538c8a9a5SSteve French 3560fdada1eSDavid Howells /* If we're caching, we need to be able to fill in around partial writes. */ 3570fdada1eSDavid Howells if (cifs_fscache_enabled(inode) && (f_flags & O_ACCMODE) == O_WRONLY) 3580fdada1eSDavid Howells rdwr_for_fscache = 1; 3590fdada1eSDavid Howells 3600fdada1eSDavid Howells desired_access = cifs_convert_flags(f_flags, rdwr_for_fscache); 36138c8a9a5SSteve French 36238c8a9a5SSteve French /********************************************************************* 36338c8a9a5SSteve French * open flag mapping table: 36438c8a9a5SSteve French * 36538c8a9a5SSteve French * POSIX Flag CIFS Disposition 36638c8a9a5SSteve French * ---------- ---------------- 36738c8a9a5SSteve French * O_CREAT FILE_OPEN_IF 36838c8a9a5SSteve French * O_CREAT | O_EXCL FILE_CREATE 36938c8a9a5SSteve French * O_CREAT | O_TRUNC FILE_OVERWRITE_IF 37038c8a9a5SSteve French * O_TRUNC FILE_OVERWRITE 37138c8a9a5SSteve French * none of the above FILE_OPEN 37238c8a9a5SSteve French * 37338c8a9a5SSteve French * Note that there is not a direct match between disposition 37438c8a9a5SSteve French * FILE_SUPERSEDE (ie create whether or not file exists although 37538c8a9a5SSteve French * O_CREAT | O_TRUNC is similar but truncates the existing 37638c8a9a5SSteve French * file rather than creating a new file as FILE_SUPERSEDE does 37738c8a9a5SSteve French * (which uses the attributes / metadata passed in on open call) 37838c8a9a5SSteve French *? 37938c8a9a5SSteve French *? O_SYNC is a reasonable match to CIFS writethrough flag 38038c8a9a5SSteve French *? and the read write flags match reasonably. O_LARGEFILE 38138c8a9a5SSteve French *? is irrelevant because largefile support is always used 38238c8a9a5SSteve French *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY, 38338c8a9a5SSteve French * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation 38438c8a9a5SSteve French *********************************************************************/ 38538c8a9a5SSteve French 38638c8a9a5SSteve French disposition = cifs_get_disposition(f_flags); 38738c8a9a5SSteve French 38838c8a9a5SSteve French /* BB pass O_SYNC flag through on file attributes .. BB */ 38938c8a9a5SSteve French 39038c8a9a5SSteve French /* O_SYNC also has bit for O_DSYNC so following check picks up either */ 39138c8a9a5SSteve French if (f_flags & O_SYNC) 39238c8a9a5SSteve French create_options |= CREATE_WRITE_THROUGH; 39338c8a9a5SSteve French 39438c8a9a5SSteve French if (f_flags & O_DIRECT) 39538c8a9a5SSteve French create_options |= CREATE_NO_BUFFER; 39638c8a9a5SSteve French 3970fdada1eSDavid Howells retry_open: 39838c8a9a5SSteve French oparms = (struct cifs_open_parms) { 39938c8a9a5SSteve French .tcon = tcon, 40038c8a9a5SSteve French .cifs_sb = cifs_sb, 40138c8a9a5SSteve French .desired_access = desired_access, 40238c8a9a5SSteve French .create_options = cifs_create_options(cifs_sb, create_options), 40338c8a9a5SSteve French .disposition = disposition, 40438c8a9a5SSteve French .path = full_path, 40538c8a9a5SSteve French .fid = fid, 40638c8a9a5SSteve French }; 40738c8a9a5SSteve French 40838c8a9a5SSteve French rc = server->ops->open(xid, &oparms, oplock, buf); 4090fdada1eSDavid Howells if (rc) { 4100fdada1eSDavid Howells if (rc == -EACCES && rdwr_for_fscache == 1) { 4110fdada1eSDavid Howells desired_access = cifs_convert_flags(f_flags, 0); 4120fdada1eSDavid Howells rdwr_for_fscache = 2; 4130fdada1eSDavid Howells goto retry_open; 4140fdada1eSDavid Howells } 41538c8a9a5SSteve French return rc; 4160fdada1eSDavid Howells } 4170fdada1eSDavid Howells if (rdwr_for_fscache == 2) 4180fdada1eSDavid Howells cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE); 41938c8a9a5SSteve French 42038c8a9a5SSteve French /* TODO: Add support for calling posix query info but with passing in fid */ 42138c8a9a5SSteve French if (tcon->unix_ext) 42238c8a9a5SSteve French rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb, 42338c8a9a5SSteve French xid); 42438c8a9a5SSteve French else 42538c8a9a5SSteve French rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb, 42638c8a9a5SSteve French xid, fid); 42738c8a9a5SSteve French 42838c8a9a5SSteve French if (rc) { 42938c8a9a5SSteve French server->ops->close(xid, tcon, fid); 43038c8a9a5SSteve French if (rc == -ESTALE) 43138c8a9a5SSteve French rc = -EOPENSTALE; 43238c8a9a5SSteve French } 43338c8a9a5SSteve French 43438c8a9a5SSteve French return rc; 43538c8a9a5SSteve French } 43638c8a9a5SSteve French 43738c8a9a5SSteve French static bool 43838c8a9a5SSteve French cifs_has_mand_locks(struct cifsInodeInfo *cinode) 43938c8a9a5SSteve French { 44038c8a9a5SSteve French struct cifs_fid_locks *cur; 44138c8a9a5SSteve French bool has_locks = false; 44238c8a9a5SSteve French 44338c8a9a5SSteve French down_read(&cinode->lock_sem); 44438c8a9a5SSteve French list_for_each_entry(cur, &cinode->llist, llist) { 44538c8a9a5SSteve French if (!list_empty(&cur->locks)) { 44638c8a9a5SSteve French has_locks = true; 44738c8a9a5SSteve French break; 44838c8a9a5SSteve French } 44938c8a9a5SSteve French } 45038c8a9a5SSteve French up_read(&cinode->lock_sem); 45138c8a9a5SSteve French return has_locks; 45238c8a9a5SSteve French } 45338c8a9a5SSteve French 45438c8a9a5SSteve French void 45538c8a9a5SSteve French cifs_down_write(struct rw_semaphore *sem) 45638c8a9a5SSteve French { 45738c8a9a5SSteve French while (!down_write_trylock(sem)) 45838c8a9a5SSteve French msleep(10); 45938c8a9a5SSteve French } 46038c8a9a5SSteve French 46138c8a9a5SSteve French static void cifsFileInfo_put_work(struct work_struct *work); 4626f17163bSRitvik Budhiraja void serverclose_work(struct work_struct *work); 46338c8a9a5SSteve French 46438c8a9a5SSteve French struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file, 46538c8a9a5SSteve French struct tcon_link *tlink, __u32 oplock, 46638c8a9a5SSteve French const char *symlink_target) 46738c8a9a5SSteve French { 46838c8a9a5SSteve French struct dentry *dentry = file_dentry(file); 46938c8a9a5SSteve French struct inode *inode = d_inode(dentry); 47038c8a9a5SSteve French struct cifsInodeInfo *cinode = CIFS_I(inode); 47138c8a9a5SSteve French struct cifsFileInfo *cfile; 47238c8a9a5SSteve French struct cifs_fid_locks *fdlocks; 47338c8a9a5SSteve French struct cifs_tcon *tcon = tlink_tcon(tlink); 47438c8a9a5SSteve French struct TCP_Server_Info *server = tcon->ses->server; 47538c8a9a5SSteve French 47638c8a9a5SSteve French cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL); 47738c8a9a5SSteve French if (cfile == NULL) 47838c8a9a5SSteve French return cfile; 47938c8a9a5SSteve French 48038c8a9a5SSteve French fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL); 48138c8a9a5SSteve French if (!fdlocks) { 48238c8a9a5SSteve French kfree(cfile); 48338c8a9a5SSteve French return NULL; 48438c8a9a5SSteve French } 48538c8a9a5SSteve French 48638c8a9a5SSteve French if (symlink_target) { 48738c8a9a5SSteve French cfile->symlink_target = kstrdup(symlink_target, GFP_KERNEL); 48838c8a9a5SSteve French if (!cfile->symlink_target) { 48938c8a9a5SSteve French kfree(fdlocks); 49038c8a9a5SSteve French kfree(cfile); 49138c8a9a5SSteve French return NULL; 49238c8a9a5SSteve French } 49338c8a9a5SSteve French } 49438c8a9a5SSteve French 49538c8a9a5SSteve French INIT_LIST_HEAD(&fdlocks->locks); 49638c8a9a5SSteve French fdlocks->cfile = cfile; 49738c8a9a5SSteve French cfile->llist = fdlocks; 49838c8a9a5SSteve French 49938c8a9a5SSteve French cfile->count = 1; 50038c8a9a5SSteve French cfile->pid = current->tgid; 50138c8a9a5SSteve French cfile->uid = current_fsuid(); 50238c8a9a5SSteve French cfile->dentry = dget(dentry); 50338c8a9a5SSteve French cfile->f_flags = file->f_flags; 504b6e27f7fSSteve French cfile->status_file_deleted = false; 50538c8a9a5SSteve French cfile->invalidHandle = false; 50638c8a9a5SSteve French cfile->deferred_close_scheduled = false; 50738c8a9a5SSteve French cfile->tlink = cifs_get_tlink(tlink); 50838c8a9a5SSteve French INIT_WORK(&cfile->oplock_break, cifs_oplock_break); 50938c8a9a5SSteve French INIT_WORK(&cfile->put, cifsFileInfo_put_work); 5106f17163bSRitvik Budhiraja INIT_WORK(&cfile->serverclose, serverclose_work); 51138c8a9a5SSteve French INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close); 51238c8a9a5SSteve French mutex_init(&cfile->fh_mutex); 51338c8a9a5SSteve French spin_lock_init(&cfile->file_info_lock); 51438c8a9a5SSteve French 51538c8a9a5SSteve French cifs_sb_active(inode->i_sb); 51638c8a9a5SSteve French 51738c8a9a5SSteve French /* 51838c8a9a5SSteve French * If the server returned a read oplock and we have mandatory brlocks, 51938c8a9a5SSteve French * set oplock level to None. 52038c8a9a5SSteve French */ 52138c8a9a5SSteve French if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) { 52238c8a9a5SSteve French cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n"); 52338c8a9a5SSteve French oplock = 0; 52438c8a9a5SSteve French } 52538c8a9a5SSteve French 52638c8a9a5SSteve French cifs_down_write(&cinode->lock_sem); 52738c8a9a5SSteve French list_add(&fdlocks->llist, &cinode->llist); 52838c8a9a5SSteve French up_write(&cinode->lock_sem); 52938c8a9a5SSteve French 53038c8a9a5SSteve French spin_lock(&tcon->open_file_lock); 53138c8a9a5SSteve French if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock) 53238c8a9a5SSteve French oplock = fid->pending_open->oplock; 53338c8a9a5SSteve French list_del(&fid->pending_open->olist); 53438c8a9a5SSteve French 53538c8a9a5SSteve French fid->purge_cache = false; 53638c8a9a5SSteve French server->ops->set_fid(cfile, fid, oplock); 53738c8a9a5SSteve French 53838c8a9a5SSteve French list_add(&cfile->tlist, &tcon->openFileList); 53938c8a9a5SSteve French atomic_inc(&tcon->num_local_opens); 54038c8a9a5SSteve French 54138c8a9a5SSteve French /* if readable file instance put first in list*/ 54238c8a9a5SSteve French spin_lock(&cinode->open_file_lock); 54338c8a9a5SSteve French if (file->f_mode & FMODE_READ) 54438c8a9a5SSteve French list_add(&cfile->flist, &cinode->openFileList); 54538c8a9a5SSteve French else 54638c8a9a5SSteve French list_add_tail(&cfile->flist, &cinode->openFileList); 54738c8a9a5SSteve French spin_unlock(&cinode->open_file_lock); 54838c8a9a5SSteve French spin_unlock(&tcon->open_file_lock); 54938c8a9a5SSteve French 55038c8a9a5SSteve French if (fid->purge_cache) 55138c8a9a5SSteve French cifs_zap_mapping(inode); 55238c8a9a5SSteve French 55338c8a9a5SSteve French file->private_data = cfile; 55438c8a9a5SSteve French return cfile; 55538c8a9a5SSteve French } 55638c8a9a5SSteve French 55738c8a9a5SSteve French struct cifsFileInfo * 55838c8a9a5SSteve French cifsFileInfo_get(struct cifsFileInfo *cifs_file) 55938c8a9a5SSteve French { 56038c8a9a5SSteve French spin_lock(&cifs_file->file_info_lock); 56138c8a9a5SSteve French cifsFileInfo_get_locked(cifs_file); 56238c8a9a5SSteve French spin_unlock(&cifs_file->file_info_lock); 56338c8a9a5SSteve French return cifs_file; 56438c8a9a5SSteve French } 56538c8a9a5SSteve French 56638c8a9a5SSteve French static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file) 56738c8a9a5SSteve French { 56838c8a9a5SSteve French struct inode *inode = d_inode(cifs_file->dentry); 56938c8a9a5SSteve French struct cifsInodeInfo *cifsi = CIFS_I(inode); 57038c8a9a5SSteve French struct cifsLockInfo *li, *tmp; 57138c8a9a5SSteve French struct super_block *sb = inode->i_sb; 57238c8a9a5SSteve French 57338c8a9a5SSteve French /* 57438c8a9a5SSteve French * Delete any outstanding lock records. We'll lose them when the file 57538c8a9a5SSteve French * is closed anyway. 57638c8a9a5SSteve French */ 57738c8a9a5SSteve French cifs_down_write(&cifsi->lock_sem); 57838c8a9a5SSteve French list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) { 57938c8a9a5SSteve French list_del(&li->llist); 58038c8a9a5SSteve French cifs_del_lock_waiters(li); 58138c8a9a5SSteve French kfree(li); 58238c8a9a5SSteve French } 58338c8a9a5SSteve French list_del(&cifs_file->llist->llist); 58438c8a9a5SSteve French kfree(cifs_file->llist); 58538c8a9a5SSteve French up_write(&cifsi->lock_sem); 58638c8a9a5SSteve French 58738c8a9a5SSteve French cifs_put_tlink(cifs_file->tlink); 58838c8a9a5SSteve French dput(cifs_file->dentry); 58938c8a9a5SSteve French cifs_sb_deactive(sb); 59038c8a9a5SSteve French kfree(cifs_file->symlink_target); 59138c8a9a5SSteve French kfree(cifs_file); 59238c8a9a5SSteve French } 59338c8a9a5SSteve French 59438c8a9a5SSteve French static void cifsFileInfo_put_work(struct work_struct *work) 59538c8a9a5SSteve French { 59638c8a9a5SSteve French struct cifsFileInfo *cifs_file = container_of(work, 59738c8a9a5SSteve French struct cifsFileInfo, put); 59838c8a9a5SSteve French 59938c8a9a5SSteve French cifsFileInfo_put_final(cifs_file); 60038c8a9a5SSteve French } 60138c8a9a5SSteve French 6026f17163bSRitvik Budhiraja void serverclose_work(struct work_struct *work) 6036f17163bSRitvik Budhiraja { 6046f17163bSRitvik Budhiraja struct cifsFileInfo *cifs_file = container_of(work, 6056f17163bSRitvik Budhiraja struct cifsFileInfo, serverclose); 6066f17163bSRitvik Budhiraja 6076f17163bSRitvik Budhiraja struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink); 6086f17163bSRitvik Budhiraja 6096f17163bSRitvik Budhiraja struct TCP_Server_Info *server = tcon->ses->server; 6106f17163bSRitvik Budhiraja int rc = 0; 6116f17163bSRitvik Budhiraja int retries = 0; 6126f17163bSRitvik Budhiraja int MAX_RETRIES = 4; 6136f17163bSRitvik Budhiraja 6146f17163bSRitvik Budhiraja do { 6156f17163bSRitvik Budhiraja if (server->ops->close_getattr) 6166f17163bSRitvik Budhiraja rc = server->ops->close_getattr(0, tcon, cifs_file); 6176f17163bSRitvik Budhiraja else if (server->ops->close) 6186f17163bSRitvik Budhiraja rc = server->ops->close(0, tcon, &cifs_file->fid); 6196f17163bSRitvik Budhiraja 6206f17163bSRitvik Budhiraja if (rc == -EBUSY || rc == -EAGAIN) { 6216f17163bSRitvik Budhiraja retries++; 6226f17163bSRitvik Budhiraja msleep(250); 6236f17163bSRitvik Budhiraja } 6246f17163bSRitvik Budhiraja } while ((rc == -EBUSY || rc == -EAGAIN) && (retries < MAX_RETRIES) 6256f17163bSRitvik Budhiraja ); 6266f17163bSRitvik Budhiraja 6276f17163bSRitvik Budhiraja if (retries == MAX_RETRIES) 6286f17163bSRitvik Budhiraja pr_warn("Serverclose failed %d times, giving up\n", MAX_RETRIES); 6296f17163bSRitvik Budhiraja 6306f17163bSRitvik Budhiraja if (cifs_file->offload) 6316f17163bSRitvik Budhiraja queue_work(fileinfo_put_wq, &cifs_file->put); 6326f17163bSRitvik Budhiraja else 6336f17163bSRitvik Budhiraja cifsFileInfo_put_final(cifs_file); 6346f17163bSRitvik Budhiraja } 6356f17163bSRitvik Budhiraja 63638c8a9a5SSteve French /** 63738c8a9a5SSteve French * cifsFileInfo_put - release a reference of file priv data 63838c8a9a5SSteve French * 63938c8a9a5SSteve French * Always potentially wait for oplock handler. See _cifsFileInfo_put(). 64038c8a9a5SSteve French * 64138c8a9a5SSteve French * @cifs_file: cifs/smb3 specific info (eg refcounts) for an open file 64238c8a9a5SSteve French */ 64338c8a9a5SSteve French void cifsFileInfo_put(struct cifsFileInfo *cifs_file) 64438c8a9a5SSteve French { 64538c8a9a5SSteve French _cifsFileInfo_put(cifs_file, true, true); 64638c8a9a5SSteve French } 64738c8a9a5SSteve French 64838c8a9a5SSteve French /** 64938c8a9a5SSteve French * _cifsFileInfo_put - release a reference of file priv data 65038c8a9a5SSteve French * 65138c8a9a5SSteve French * This may involve closing the filehandle @cifs_file out on the 65238c8a9a5SSteve French * server. Must be called without holding tcon->open_file_lock, 65338c8a9a5SSteve French * cinode->open_file_lock and cifs_file->file_info_lock. 65438c8a9a5SSteve French * 65538c8a9a5SSteve French * If @wait_for_oplock_handler is true and we are releasing the last 65638c8a9a5SSteve French * reference, wait for any running oplock break handler of the file 65738c8a9a5SSteve French * and cancel any pending one. 65838c8a9a5SSteve French * 65938c8a9a5SSteve French * @cifs_file: cifs/smb3 specific info (eg refcounts) for an open file 66038c8a9a5SSteve French * @wait_oplock_handler: must be false if called from oplock_break_handler 66138c8a9a5SSteve French * @offload: not offloaded on close and oplock breaks 66238c8a9a5SSteve French * 66338c8a9a5SSteve French */ 66438c8a9a5SSteve French void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, 66538c8a9a5SSteve French bool wait_oplock_handler, bool offload) 66638c8a9a5SSteve French { 66738c8a9a5SSteve French struct inode *inode = d_inode(cifs_file->dentry); 66838c8a9a5SSteve French struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink); 66938c8a9a5SSteve French struct TCP_Server_Info *server = tcon->ses->server; 67038c8a9a5SSteve French struct cifsInodeInfo *cifsi = CIFS_I(inode); 67138c8a9a5SSteve French struct super_block *sb = inode->i_sb; 67238c8a9a5SSteve French struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 67338c8a9a5SSteve French struct cifs_fid fid = {}; 67438c8a9a5SSteve French struct cifs_pending_open open; 67538c8a9a5SSteve French bool oplock_break_cancelled; 6766f17163bSRitvik Budhiraja bool serverclose_offloaded = false; 67738c8a9a5SSteve French 67838c8a9a5SSteve French spin_lock(&tcon->open_file_lock); 67938c8a9a5SSteve French spin_lock(&cifsi->open_file_lock); 68038c8a9a5SSteve French spin_lock(&cifs_file->file_info_lock); 6816f17163bSRitvik Budhiraja 6826f17163bSRitvik Budhiraja cifs_file->offload = offload; 68338c8a9a5SSteve French if (--cifs_file->count > 0) { 68438c8a9a5SSteve French spin_unlock(&cifs_file->file_info_lock); 68538c8a9a5SSteve French spin_unlock(&cifsi->open_file_lock); 68638c8a9a5SSteve French spin_unlock(&tcon->open_file_lock); 68738c8a9a5SSteve French return; 68838c8a9a5SSteve French } 68938c8a9a5SSteve French spin_unlock(&cifs_file->file_info_lock); 69038c8a9a5SSteve French 69138c8a9a5SSteve French if (server->ops->get_lease_key) 69238c8a9a5SSteve French server->ops->get_lease_key(inode, &fid); 69338c8a9a5SSteve French 69438c8a9a5SSteve French /* store open in pending opens to make sure we don't miss lease break */ 69538c8a9a5SSteve French cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open); 69638c8a9a5SSteve French 69738c8a9a5SSteve French /* remove it from the lists */ 69838c8a9a5SSteve French list_del(&cifs_file->flist); 69938c8a9a5SSteve French list_del(&cifs_file->tlist); 70038c8a9a5SSteve French atomic_dec(&tcon->num_local_opens); 70138c8a9a5SSteve French 70238c8a9a5SSteve French if (list_empty(&cifsi->openFileList)) { 70338c8a9a5SSteve French cifs_dbg(FYI, "closing last open instance for inode %p\n", 70438c8a9a5SSteve French d_inode(cifs_file->dentry)); 70538c8a9a5SSteve French /* 70638c8a9a5SSteve French * In strict cache mode we need invalidate mapping on the last 70738c8a9a5SSteve French * close because it may cause a error when we open this file 70838c8a9a5SSteve French * again and get at least level II oplock. 70938c8a9a5SSteve French */ 71038c8a9a5SSteve French if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) 71138c8a9a5SSteve French set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags); 71238c8a9a5SSteve French cifs_set_oplock_level(cifsi, 0); 71338c8a9a5SSteve French } 71438c8a9a5SSteve French 71538c8a9a5SSteve French spin_unlock(&cifsi->open_file_lock); 71638c8a9a5SSteve French spin_unlock(&tcon->open_file_lock); 71738c8a9a5SSteve French 71838c8a9a5SSteve French oplock_break_cancelled = wait_oplock_handler ? 71938c8a9a5SSteve French cancel_work_sync(&cifs_file->oplock_break) : false; 72038c8a9a5SSteve French 72138c8a9a5SSteve French if (!tcon->need_reconnect && !cifs_file->invalidHandle) { 72238c8a9a5SSteve French struct TCP_Server_Info *server = tcon->ses->server; 72338c8a9a5SSteve French unsigned int xid; 7246f17163bSRitvik Budhiraja int rc = 0; 72538c8a9a5SSteve French 72638c8a9a5SSteve French xid = get_xid(); 72738c8a9a5SSteve French if (server->ops->close_getattr) 7286f17163bSRitvik Budhiraja rc = server->ops->close_getattr(xid, tcon, cifs_file); 72938c8a9a5SSteve French else if (server->ops->close) 7306f17163bSRitvik Budhiraja rc = server->ops->close(xid, tcon, &cifs_file->fid); 73138c8a9a5SSteve French _free_xid(xid); 7326f17163bSRitvik Budhiraja 7336f17163bSRitvik Budhiraja if (rc == -EBUSY || rc == -EAGAIN) { 7346f17163bSRitvik Budhiraja // Server close failed, hence offloading it as an async op 7356f17163bSRitvik Budhiraja queue_work(serverclose_wq, &cifs_file->serverclose); 7366f17163bSRitvik Budhiraja serverclose_offloaded = true; 7376f17163bSRitvik Budhiraja } 73838c8a9a5SSteve French } 73938c8a9a5SSteve French 74038c8a9a5SSteve French if (oplock_break_cancelled) 74138c8a9a5SSteve French cifs_done_oplock_break(cifsi); 74238c8a9a5SSteve French 74338c8a9a5SSteve French cifs_del_pending_open(&open); 74438c8a9a5SSteve French 7456f17163bSRitvik Budhiraja // if serverclose has been offloaded to wq (on failure), it will 7466f17163bSRitvik Budhiraja // handle offloading put as well. If serverclose not offloaded, 7476f17163bSRitvik Budhiraja // we need to handle offloading put here. 7486f17163bSRitvik Budhiraja if (!serverclose_offloaded) { 74938c8a9a5SSteve French if (offload) 75038c8a9a5SSteve French queue_work(fileinfo_put_wq, &cifs_file->put); 75138c8a9a5SSteve French else 75238c8a9a5SSteve French cifsFileInfo_put_final(cifs_file); 75338c8a9a5SSteve French } 7546f17163bSRitvik Budhiraja } 75538c8a9a5SSteve French 75638c8a9a5SSteve French int cifs_open(struct inode *inode, struct file *file) 75738c8a9a5SSteve French 75838c8a9a5SSteve French { 75938c8a9a5SSteve French int rc = -EACCES; 76038c8a9a5SSteve French unsigned int xid; 76138c8a9a5SSteve French __u32 oplock; 76238c8a9a5SSteve French struct cifs_sb_info *cifs_sb; 76338c8a9a5SSteve French struct TCP_Server_Info *server; 76438c8a9a5SSteve French struct cifs_tcon *tcon; 76538c8a9a5SSteve French struct tcon_link *tlink; 76638c8a9a5SSteve French struct cifsFileInfo *cfile = NULL; 76738c8a9a5SSteve French void *page; 76838c8a9a5SSteve French const char *full_path; 76938c8a9a5SSteve French bool posix_open_ok = false; 77038c8a9a5SSteve French struct cifs_fid fid = {}; 77138c8a9a5SSteve French struct cifs_pending_open open; 77238c8a9a5SSteve French struct cifs_open_info_data data = {}; 77338c8a9a5SSteve French 77438c8a9a5SSteve French xid = get_xid(); 77538c8a9a5SSteve French 77638c8a9a5SSteve French cifs_sb = CIFS_SB(inode->i_sb); 77738c8a9a5SSteve French if (unlikely(cifs_forced_shutdown(cifs_sb))) { 77838c8a9a5SSteve French free_xid(xid); 77938c8a9a5SSteve French return -EIO; 78038c8a9a5SSteve French } 78138c8a9a5SSteve French 78238c8a9a5SSteve French tlink = cifs_sb_tlink(cifs_sb); 78338c8a9a5SSteve French if (IS_ERR(tlink)) { 78438c8a9a5SSteve French free_xid(xid); 78538c8a9a5SSteve French return PTR_ERR(tlink); 78638c8a9a5SSteve French } 78738c8a9a5SSteve French tcon = tlink_tcon(tlink); 78838c8a9a5SSteve French server = tcon->ses->server; 78938c8a9a5SSteve French 79038c8a9a5SSteve French page = alloc_dentry_path(); 79138c8a9a5SSteve French full_path = build_path_from_dentry(file_dentry(file), page); 79238c8a9a5SSteve French if (IS_ERR(full_path)) { 79338c8a9a5SSteve French rc = PTR_ERR(full_path); 79438c8a9a5SSteve French goto out; 79538c8a9a5SSteve French } 79638c8a9a5SSteve French 79738c8a9a5SSteve French cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n", 79838c8a9a5SSteve French inode, file->f_flags, full_path); 79938c8a9a5SSteve French 80038c8a9a5SSteve French if (file->f_flags & O_DIRECT && 80138c8a9a5SSteve French cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) { 80238c8a9a5SSteve French if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) 80338c8a9a5SSteve French file->f_op = &cifs_file_direct_nobrl_ops; 80438c8a9a5SSteve French else 80538c8a9a5SSteve French file->f_op = &cifs_file_direct_ops; 80638c8a9a5SSteve French } 80738c8a9a5SSteve French 80838c8a9a5SSteve French /* Get the cached handle as SMB2 close is deferred */ 80938c8a9a5SSteve French rc = cifs_get_readable_path(tcon, full_path, &cfile); 81038c8a9a5SSteve French if (rc == 0) { 81138c8a9a5SSteve French if (file->f_flags == cfile->f_flags) { 81238c8a9a5SSteve French file->private_data = cfile; 81338c8a9a5SSteve French spin_lock(&CIFS_I(inode)->deferred_lock); 81438c8a9a5SSteve French cifs_del_deferred_close(cfile); 81538c8a9a5SSteve French spin_unlock(&CIFS_I(inode)->deferred_lock); 81638c8a9a5SSteve French goto use_cache; 81738c8a9a5SSteve French } else { 81838c8a9a5SSteve French _cifsFileInfo_put(cfile, true, false); 81938c8a9a5SSteve French } 82038c8a9a5SSteve French } 82138c8a9a5SSteve French 82238c8a9a5SSteve French if (server->oplocks) 82338c8a9a5SSteve French oplock = REQ_OPLOCK; 82438c8a9a5SSteve French else 82538c8a9a5SSteve French oplock = 0; 82638c8a9a5SSteve French 82738c8a9a5SSteve French #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 82838c8a9a5SSteve French if (!tcon->broken_posix_open && tcon->unix_ext && 82938c8a9a5SSteve French cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP & 83038c8a9a5SSteve French le64_to_cpu(tcon->fsUnixInfo.Capability))) { 83138c8a9a5SSteve French /* can not refresh inode info since size could be stale */ 83238c8a9a5SSteve French rc = cifs_posix_open(full_path, &inode, inode->i_sb, 83338c8a9a5SSteve French cifs_sb->ctx->file_mode /* ignored */, 83438c8a9a5SSteve French file->f_flags, &oplock, &fid.netfid, xid); 83538c8a9a5SSteve French if (rc == 0) { 83638c8a9a5SSteve French cifs_dbg(FYI, "posix open succeeded\n"); 83738c8a9a5SSteve French posix_open_ok = true; 83838c8a9a5SSteve French } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) { 83938c8a9a5SSteve French if (tcon->ses->serverNOS) 84038c8a9a5SSteve French cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n", 84138c8a9a5SSteve French tcon->ses->ip_addr, 84238c8a9a5SSteve French tcon->ses->serverNOS); 84338c8a9a5SSteve French tcon->broken_posix_open = true; 84438c8a9a5SSteve French } else if ((rc != -EIO) && (rc != -EREMOTE) && 84538c8a9a5SSteve French (rc != -EOPNOTSUPP)) /* path not found or net err */ 84638c8a9a5SSteve French goto out; 84738c8a9a5SSteve French /* 84838c8a9a5SSteve French * Else fallthrough to retry open the old way on network i/o 84938c8a9a5SSteve French * or DFS errors. 85038c8a9a5SSteve French */ 85138c8a9a5SSteve French } 85238c8a9a5SSteve French #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 85338c8a9a5SSteve French 85438c8a9a5SSteve French if (server->ops->get_lease_key) 85538c8a9a5SSteve French server->ops->get_lease_key(inode, &fid); 85638c8a9a5SSteve French 85738c8a9a5SSteve French cifs_add_pending_open(&fid, tlink, &open); 85838c8a9a5SSteve French 85938c8a9a5SSteve French if (!posix_open_ok) { 86038c8a9a5SSteve French if (server->ops->get_lease_key) 86138c8a9a5SSteve French server->ops->get_lease_key(inode, &fid); 86238c8a9a5SSteve French 86338c8a9a5SSteve French rc = cifs_nt_open(full_path, inode, cifs_sb, tcon, file->f_flags, &oplock, &fid, 86438c8a9a5SSteve French xid, &data); 86538c8a9a5SSteve French if (rc) { 86638c8a9a5SSteve French cifs_del_pending_open(&open); 86738c8a9a5SSteve French goto out; 86838c8a9a5SSteve French } 86938c8a9a5SSteve French } 87038c8a9a5SSteve French 87138c8a9a5SSteve French cfile = cifs_new_fileinfo(&fid, file, tlink, oplock, data.symlink_target); 87238c8a9a5SSteve French if (cfile == NULL) { 87338c8a9a5SSteve French if (server->ops->close) 87438c8a9a5SSteve French server->ops->close(xid, tcon, &fid); 87538c8a9a5SSteve French cifs_del_pending_open(&open); 87638c8a9a5SSteve French rc = -ENOMEM; 87738c8a9a5SSteve French goto out; 87838c8a9a5SSteve French } 87938c8a9a5SSteve French 88038c8a9a5SSteve French #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 88138c8a9a5SSteve French if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) { 88238c8a9a5SSteve French /* 88338c8a9a5SSteve French * Time to set mode which we can not set earlier due to 88438c8a9a5SSteve French * problems creating new read-only files. 88538c8a9a5SSteve French */ 88638c8a9a5SSteve French struct cifs_unix_set_info_args args = { 88738c8a9a5SSteve French .mode = inode->i_mode, 88838c8a9a5SSteve French .uid = INVALID_UID, /* no change */ 88938c8a9a5SSteve French .gid = INVALID_GID, /* no change */ 89038c8a9a5SSteve French .ctime = NO_CHANGE_64, 89138c8a9a5SSteve French .atime = NO_CHANGE_64, 89238c8a9a5SSteve French .mtime = NO_CHANGE_64, 89338c8a9a5SSteve French .device = 0, 89438c8a9a5SSteve French }; 89538c8a9a5SSteve French CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid, 89638c8a9a5SSteve French cfile->pid); 89738c8a9a5SSteve French } 89838c8a9a5SSteve French #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 89938c8a9a5SSteve French 90038c8a9a5SSteve French use_cache: 90138c8a9a5SSteve French fscache_use_cookie(cifs_inode_cookie(file_inode(file)), 90238c8a9a5SSteve French file->f_mode & FMODE_WRITE); 9030fdada1eSDavid Howells if (!(file->f_flags & O_DIRECT)) 9040fdada1eSDavid Howells goto out; 9050fdada1eSDavid Howells if ((file->f_flags & (O_ACCMODE | O_APPEND)) == O_RDONLY) 9060fdada1eSDavid Howells goto out; 9070fdada1eSDavid Howells cifs_invalidate_cache(file_inode(file), FSCACHE_INVAL_DIO_WRITE); 90838c8a9a5SSteve French 90938c8a9a5SSteve French out: 91038c8a9a5SSteve French free_dentry_path(page); 91138c8a9a5SSteve French free_xid(xid); 91238c8a9a5SSteve French cifs_put_tlink(tlink); 91338c8a9a5SSteve French cifs_free_open_info(&data); 91438c8a9a5SSteve French return rc; 91538c8a9a5SSteve French } 91638c8a9a5SSteve French 91738c8a9a5SSteve French #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 91838c8a9a5SSteve French static int cifs_push_posix_locks(struct cifsFileInfo *cfile); 91938c8a9a5SSteve French #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 92038c8a9a5SSteve French 92138c8a9a5SSteve French /* 92238c8a9a5SSteve French * Try to reacquire byte range locks that were released when session 92338c8a9a5SSteve French * to server was lost. 92438c8a9a5SSteve French */ 92538c8a9a5SSteve French static int 92638c8a9a5SSteve French cifs_relock_file(struct cifsFileInfo *cfile) 92738c8a9a5SSteve French { 92838c8a9a5SSteve French struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); 92938c8a9a5SSteve French struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); 93038c8a9a5SSteve French int rc = 0; 93138c8a9a5SSteve French #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 93238c8a9a5SSteve French struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb); 93338c8a9a5SSteve French #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 93438c8a9a5SSteve French 93538c8a9a5SSteve French down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING); 93638c8a9a5SSteve French if (cinode->can_cache_brlcks) { 93738c8a9a5SSteve French /* can cache locks - no need to relock */ 93838c8a9a5SSteve French up_read(&cinode->lock_sem); 93938c8a9a5SSteve French return rc; 94038c8a9a5SSteve French } 94138c8a9a5SSteve French 94238c8a9a5SSteve French #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 94338c8a9a5SSteve French if (cap_unix(tcon->ses) && 94438c8a9a5SSteve French (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && 94538c8a9a5SSteve French ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) 94638c8a9a5SSteve French rc = cifs_push_posix_locks(cfile); 94738c8a9a5SSteve French else 94838c8a9a5SSteve French #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 94938c8a9a5SSteve French rc = tcon->ses->server->ops->push_mand_locks(cfile); 95038c8a9a5SSteve French 95138c8a9a5SSteve French up_read(&cinode->lock_sem); 95238c8a9a5SSteve French return rc; 95338c8a9a5SSteve French } 95438c8a9a5SSteve French 95538c8a9a5SSteve French static int 95638c8a9a5SSteve French cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush) 95738c8a9a5SSteve French { 95838c8a9a5SSteve French int rc = -EACCES; 95938c8a9a5SSteve French unsigned int xid; 96038c8a9a5SSteve French __u32 oplock; 96138c8a9a5SSteve French struct cifs_sb_info *cifs_sb; 96238c8a9a5SSteve French struct cifs_tcon *tcon; 96338c8a9a5SSteve French struct TCP_Server_Info *server; 96438c8a9a5SSteve French struct cifsInodeInfo *cinode; 96538c8a9a5SSteve French struct inode *inode; 96638c8a9a5SSteve French void *page; 96738c8a9a5SSteve French const char *full_path; 96838c8a9a5SSteve French int desired_access; 96938c8a9a5SSteve French int disposition = FILE_OPEN; 97038c8a9a5SSteve French int create_options = CREATE_NOT_DIR; 97138c8a9a5SSteve French struct cifs_open_parms oparms; 9720fdada1eSDavid Howells int rdwr_for_fscache = 0; 97338c8a9a5SSteve French 97438c8a9a5SSteve French xid = get_xid(); 97538c8a9a5SSteve French mutex_lock(&cfile->fh_mutex); 97638c8a9a5SSteve French if (!cfile->invalidHandle) { 97738c8a9a5SSteve French mutex_unlock(&cfile->fh_mutex); 97838c8a9a5SSteve French free_xid(xid); 97938c8a9a5SSteve French return 0; 98038c8a9a5SSteve French } 98138c8a9a5SSteve French 98238c8a9a5SSteve French inode = d_inode(cfile->dentry); 98338c8a9a5SSteve French cifs_sb = CIFS_SB(inode->i_sb); 98438c8a9a5SSteve French tcon = tlink_tcon(cfile->tlink); 98538c8a9a5SSteve French server = tcon->ses->server; 98638c8a9a5SSteve French 98738c8a9a5SSteve French /* 98838c8a9a5SSteve French * Can not grab rename sem here because various ops, including those 98938c8a9a5SSteve French * that already have the rename sem can end up causing writepage to get 99038c8a9a5SSteve French * called and if the server was down that means we end up here, and we 99138c8a9a5SSteve French * can never tell if the caller already has the rename_sem. 99238c8a9a5SSteve French */ 99338c8a9a5SSteve French page = alloc_dentry_path(); 99438c8a9a5SSteve French full_path = build_path_from_dentry(cfile->dentry, page); 99538c8a9a5SSteve French if (IS_ERR(full_path)) { 99638c8a9a5SSteve French mutex_unlock(&cfile->fh_mutex); 99738c8a9a5SSteve French free_dentry_path(page); 99838c8a9a5SSteve French free_xid(xid); 99938c8a9a5SSteve French return PTR_ERR(full_path); 100038c8a9a5SSteve French } 100138c8a9a5SSteve French 100238c8a9a5SSteve French cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n", 100338c8a9a5SSteve French inode, cfile->f_flags, full_path); 100438c8a9a5SSteve French 100538c8a9a5SSteve French if (tcon->ses->server->oplocks) 100638c8a9a5SSteve French oplock = REQ_OPLOCK; 100738c8a9a5SSteve French else 100838c8a9a5SSteve French oplock = 0; 100938c8a9a5SSteve French 101038c8a9a5SSteve French #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 101138c8a9a5SSteve French if (tcon->unix_ext && cap_unix(tcon->ses) && 101238c8a9a5SSteve French (CIFS_UNIX_POSIX_PATH_OPS_CAP & 101338c8a9a5SSteve French le64_to_cpu(tcon->fsUnixInfo.Capability))) { 101438c8a9a5SSteve French /* 101538c8a9a5SSteve French * O_CREAT, O_EXCL and O_TRUNC already had their effect on the 101638c8a9a5SSteve French * original open. Must mask them off for a reopen. 101738c8a9a5SSteve French */ 101838c8a9a5SSteve French unsigned int oflags = cfile->f_flags & 101938c8a9a5SSteve French ~(O_CREAT | O_EXCL | O_TRUNC); 102038c8a9a5SSteve French 102138c8a9a5SSteve French rc = cifs_posix_open(full_path, NULL, inode->i_sb, 102238c8a9a5SSteve French cifs_sb->ctx->file_mode /* ignored */, 102338c8a9a5SSteve French oflags, &oplock, &cfile->fid.netfid, xid); 102438c8a9a5SSteve French if (rc == 0) { 102538c8a9a5SSteve French cifs_dbg(FYI, "posix reopen succeeded\n"); 102638c8a9a5SSteve French oparms.reconnect = true; 102738c8a9a5SSteve French goto reopen_success; 102838c8a9a5SSteve French } 102938c8a9a5SSteve French /* 103038c8a9a5SSteve French * fallthrough to retry open the old way on errors, especially 103138c8a9a5SSteve French * in the reconnect path it is important to retry hard 103238c8a9a5SSteve French */ 103338c8a9a5SSteve French } 103438c8a9a5SSteve French #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 103538c8a9a5SSteve French 10360fdada1eSDavid Howells /* If we're caching, we need to be able to fill in around partial writes. */ 10370fdada1eSDavid Howells if (cifs_fscache_enabled(inode) && (cfile->f_flags & O_ACCMODE) == O_WRONLY) 10380fdada1eSDavid Howells rdwr_for_fscache = 1; 10390fdada1eSDavid Howells 10400fdada1eSDavid Howells desired_access = cifs_convert_flags(cfile->f_flags, rdwr_for_fscache); 104138c8a9a5SSteve French 104238c8a9a5SSteve French /* O_SYNC also has bit for O_DSYNC so following check picks up either */ 104338c8a9a5SSteve French if (cfile->f_flags & O_SYNC) 104438c8a9a5SSteve French create_options |= CREATE_WRITE_THROUGH; 104538c8a9a5SSteve French 104638c8a9a5SSteve French if (cfile->f_flags & O_DIRECT) 104738c8a9a5SSteve French create_options |= CREATE_NO_BUFFER; 104838c8a9a5SSteve French 104938c8a9a5SSteve French if (server->ops->get_lease_key) 105038c8a9a5SSteve French server->ops->get_lease_key(inode, &cfile->fid); 105138c8a9a5SSteve French 10520fdada1eSDavid Howells retry_open: 105338c8a9a5SSteve French oparms = (struct cifs_open_parms) { 105438c8a9a5SSteve French .tcon = tcon, 105538c8a9a5SSteve French .cifs_sb = cifs_sb, 105638c8a9a5SSteve French .desired_access = desired_access, 105738c8a9a5SSteve French .create_options = cifs_create_options(cifs_sb, create_options), 105838c8a9a5SSteve French .disposition = disposition, 105938c8a9a5SSteve French .path = full_path, 106038c8a9a5SSteve French .fid = &cfile->fid, 106138c8a9a5SSteve French .reconnect = true, 106238c8a9a5SSteve French }; 106338c8a9a5SSteve French 106438c8a9a5SSteve French /* 106538c8a9a5SSteve French * Can not refresh inode by passing in file_info buf to be returned by 106638c8a9a5SSteve French * ops->open and then calling get_inode_info with returned buf since 106738c8a9a5SSteve French * file might have write behind data that needs to be flushed and server 106838c8a9a5SSteve French * version of file size can be stale. If we knew for sure that inode was 106938c8a9a5SSteve French * not dirty locally we could do this. 107038c8a9a5SSteve French */ 107138c8a9a5SSteve French rc = server->ops->open(xid, &oparms, &oplock, NULL); 107238c8a9a5SSteve French if (rc == -ENOENT && oparms.reconnect == false) { 107338c8a9a5SSteve French /* durable handle timeout is expired - open the file again */ 107438c8a9a5SSteve French rc = server->ops->open(xid, &oparms, &oplock, NULL); 107538c8a9a5SSteve French /* indicate that we need to relock the file */ 107638c8a9a5SSteve French oparms.reconnect = true; 107738c8a9a5SSteve French } 10780fdada1eSDavid Howells if (rc == -EACCES && rdwr_for_fscache == 1) { 10790fdada1eSDavid Howells desired_access = cifs_convert_flags(cfile->f_flags, 0); 10800fdada1eSDavid Howells rdwr_for_fscache = 2; 10810fdada1eSDavid Howells goto retry_open; 10820fdada1eSDavid Howells } 108338c8a9a5SSteve French 108438c8a9a5SSteve French if (rc) { 108538c8a9a5SSteve French mutex_unlock(&cfile->fh_mutex); 108638c8a9a5SSteve French cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc); 108738c8a9a5SSteve French cifs_dbg(FYI, "oplock: %d\n", oplock); 108838c8a9a5SSteve French goto reopen_error_exit; 108938c8a9a5SSteve French } 109038c8a9a5SSteve French 10910fdada1eSDavid Howells if (rdwr_for_fscache == 2) 10920fdada1eSDavid Howells cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE); 10930fdada1eSDavid Howells 109438c8a9a5SSteve French #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 109538c8a9a5SSteve French reopen_success: 109638c8a9a5SSteve French #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 109738c8a9a5SSteve French cfile->invalidHandle = false; 109838c8a9a5SSteve French mutex_unlock(&cfile->fh_mutex); 109938c8a9a5SSteve French cinode = CIFS_I(inode); 110038c8a9a5SSteve French 110138c8a9a5SSteve French if (can_flush) { 110238c8a9a5SSteve French rc = filemap_write_and_wait(inode->i_mapping); 110338c8a9a5SSteve French if (!is_interrupt_error(rc)) 110438c8a9a5SSteve French mapping_set_error(inode->i_mapping, rc); 110538c8a9a5SSteve French 110602bcf865SSteve French if (tcon->posix_extensions) { 110702bcf865SSteve French rc = smb311_posix_get_inode_info(&inode, full_path, 110802bcf865SSteve French NULL, inode->i_sb, xid); 110902bcf865SSteve French } else if (tcon->unix_ext) { 111038c8a9a5SSteve French rc = cifs_get_inode_info_unix(&inode, full_path, 111138c8a9a5SSteve French inode->i_sb, xid); 111202bcf865SSteve French } else { 111338c8a9a5SSteve French rc = cifs_get_inode_info(&inode, full_path, NULL, 111438c8a9a5SSteve French inode->i_sb, xid, NULL); 111538c8a9a5SSteve French } 111602bcf865SSteve French } 111738c8a9a5SSteve French /* 111838c8a9a5SSteve French * Else we are writing out data to server already and could deadlock if 111938c8a9a5SSteve French * we tried to flush data, and since we do not know if we have data that 112038c8a9a5SSteve French * would invalidate the current end of file on the server we can not go 112138c8a9a5SSteve French * to the server to get the new inode info. 112238c8a9a5SSteve French */ 112338c8a9a5SSteve French 112438c8a9a5SSteve French /* 112538c8a9a5SSteve French * If the server returned a read oplock and we have mandatory brlocks, 112638c8a9a5SSteve French * set oplock level to None. 112738c8a9a5SSteve French */ 112838c8a9a5SSteve French if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) { 112938c8a9a5SSteve French cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n"); 113038c8a9a5SSteve French oplock = 0; 113138c8a9a5SSteve French } 113238c8a9a5SSteve French 113338c8a9a5SSteve French server->ops->set_fid(cfile, &cfile->fid, oplock); 113438c8a9a5SSteve French if (oparms.reconnect) 113538c8a9a5SSteve French cifs_relock_file(cfile); 113638c8a9a5SSteve French 113738c8a9a5SSteve French reopen_error_exit: 113838c8a9a5SSteve French free_dentry_path(page); 113938c8a9a5SSteve French free_xid(xid); 114038c8a9a5SSteve French return rc; 114138c8a9a5SSteve French } 114238c8a9a5SSteve French 114338c8a9a5SSteve French void smb2_deferred_work_close(struct work_struct *work) 114438c8a9a5SSteve French { 114538c8a9a5SSteve French struct cifsFileInfo *cfile = container_of(work, 114638c8a9a5SSteve French struct cifsFileInfo, deferred.work); 114738c8a9a5SSteve French 114838c8a9a5SSteve French spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock); 114938c8a9a5SSteve French cifs_del_deferred_close(cfile); 115038c8a9a5SSteve French cfile->deferred_close_scheduled = false; 115138c8a9a5SSteve French spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock); 115238c8a9a5SSteve French _cifsFileInfo_put(cfile, true, false); 115338c8a9a5SSteve French } 115438c8a9a5SSteve French 1155*91cdeb0dSBharath SM static bool 1156*91cdeb0dSBharath SM smb2_can_defer_close(struct inode *inode, struct cifs_deferred_close *dclose) 1157*91cdeb0dSBharath SM { 1158*91cdeb0dSBharath SM struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 1159*91cdeb0dSBharath SM struct cifsInodeInfo *cinode = CIFS_I(inode); 1160*91cdeb0dSBharath SM 1161*91cdeb0dSBharath SM return (cifs_sb->ctx->closetimeo && cinode->lease_granted && dclose && 1162*91cdeb0dSBharath SM (cinode->oplock == CIFS_CACHE_RHW_FLG || 1163*91cdeb0dSBharath SM cinode->oplock == CIFS_CACHE_RH_FLG) && 1164*91cdeb0dSBharath SM !test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags)); 1165*91cdeb0dSBharath SM 1166*91cdeb0dSBharath SM } 1167*91cdeb0dSBharath SM 116838c8a9a5SSteve French int cifs_close(struct inode *inode, struct file *file) 116938c8a9a5SSteve French { 117038c8a9a5SSteve French struct cifsFileInfo *cfile; 117138c8a9a5SSteve French struct cifsInodeInfo *cinode = CIFS_I(inode); 117238c8a9a5SSteve French struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 117338c8a9a5SSteve French struct cifs_deferred_close *dclose; 117438c8a9a5SSteve French 117538c8a9a5SSteve French cifs_fscache_unuse_inode_cookie(inode, file->f_mode & FMODE_WRITE); 117638c8a9a5SSteve French 117738c8a9a5SSteve French if (file->private_data != NULL) { 117838c8a9a5SSteve French cfile = file->private_data; 117938c8a9a5SSteve French file->private_data = NULL; 118038c8a9a5SSteve French dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL); 1181*91cdeb0dSBharath SM if ((cfile->status_file_deleted == false) && 1182*91cdeb0dSBharath SM (smb2_can_defer_close(inode, dclose))) { 118338c8a9a5SSteve French if (test_and_clear_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) { 118423171df5SJeff Layton inode_set_mtime_to_ts(inode, 118523171df5SJeff Layton inode_set_ctime_current(inode)); 118638c8a9a5SSteve French } 118738c8a9a5SSteve French spin_lock(&cinode->deferred_lock); 118838c8a9a5SSteve French cifs_add_deferred_close(cfile, dclose); 118938c8a9a5SSteve French if (cfile->deferred_close_scheduled && 119038c8a9a5SSteve French delayed_work_pending(&cfile->deferred)) { 119138c8a9a5SSteve French /* 119238c8a9a5SSteve French * If there is no pending work, mod_delayed_work queues new work. 119338c8a9a5SSteve French * So, Increase the ref count to avoid use-after-free. 119438c8a9a5SSteve French */ 119538c8a9a5SSteve French if (!mod_delayed_work(deferredclose_wq, 119638c8a9a5SSteve French &cfile->deferred, cifs_sb->ctx->closetimeo)) 119738c8a9a5SSteve French cifsFileInfo_get(cfile); 119838c8a9a5SSteve French } else { 119938c8a9a5SSteve French /* Deferred close for files */ 120038c8a9a5SSteve French queue_delayed_work(deferredclose_wq, 120138c8a9a5SSteve French &cfile->deferred, cifs_sb->ctx->closetimeo); 120238c8a9a5SSteve French cfile->deferred_close_scheduled = true; 120338c8a9a5SSteve French spin_unlock(&cinode->deferred_lock); 120438c8a9a5SSteve French return 0; 120538c8a9a5SSteve French } 120638c8a9a5SSteve French spin_unlock(&cinode->deferred_lock); 120738c8a9a5SSteve French _cifsFileInfo_put(cfile, true, false); 120838c8a9a5SSteve French } else { 120938c8a9a5SSteve French _cifsFileInfo_put(cfile, true, false); 121038c8a9a5SSteve French kfree(dclose); 121138c8a9a5SSteve French } 121238c8a9a5SSteve French } 121338c8a9a5SSteve French 121438c8a9a5SSteve French /* return code from the ->release op is always ignored */ 121538c8a9a5SSteve French return 0; 121638c8a9a5SSteve French } 121738c8a9a5SSteve French 121838c8a9a5SSteve French void 121938c8a9a5SSteve French cifs_reopen_persistent_handles(struct cifs_tcon *tcon) 122038c8a9a5SSteve French { 122138c8a9a5SSteve French struct cifsFileInfo *open_file, *tmp; 122238c8a9a5SSteve French struct list_head tmp_list; 122338c8a9a5SSteve French 122438c8a9a5SSteve French if (!tcon->use_persistent || !tcon->need_reopen_files) 122538c8a9a5SSteve French return; 122638c8a9a5SSteve French 122738c8a9a5SSteve French tcon->need_reopen_files = false; 122838c8a9a5SSteve French 122938c8a9a5SSteve French cifs_dbg(FYI, "Reopen persistent handles\n"); 123038c8a9a5SSteve French INIT_LIST_HEAD(&tmp_list); 123138c8a9a5SSteve French 123238c8a9a5SSteve French /* list all files open on tree connection, reopen resilient handles */ 123338c8a9a5SSteve French spin_lock(&tcon->open_file_lock); 123438c8a9a5SSteve French list_for_each_entry(open_file, &tcon->openFileList, tlist) { 123538c8a9a5SSteve French if (!open_file->invalidHandle) 123638c8a9a5SSteve French continue; 123738c8a9a5SSteve French cifsFileInfo_get(open_file); 123838c8a9a5SSteve French list_add_tail(&open_file->rlist, &tmp_list); 123938c8a9a5SSteve French } 124038c8a9a5SSteve French spin_unlock(&tcon->open_file_lock); 124138c8a9a5SSteve French 124238c8a9a5SSteve French list_for_each_entry_safe(open_file, tmp, &tmp_list, rlist) { 124338c8a9a5SSteve French if (cifs_reopen_file(open_file, false /* do not flush */)) 124438c8a9a5SSteve French tcon->need_reopen_files = true; 124538c8a9a5SSteve French list_del_init(&open_file->rlist); 124638c8a9a5SSteve French cifsFileInfo_put(open_file); 124738c8a9a5SSteve French } 124838c8a9a5SSteve French } 124938c8a9a5SSteve French 125038c8a9a5SSteve French int cifs_closedir(struct inode *inode, struct file *file) 125138c8a9a5SSteve French { 125238c8a9a5SSteve French int rc = 0; 125338c8a9a5SSteve French unsigned int xid; 125438c8a9a5SSteve French struct cifsFileInfo *cfile = file->private_data; 125538c8a9a5SSteve French struct cifs_tcon *tcon; 125638c8a9a5SSteve French struct TCP_Server_Info *server; 125738c8a9a5SSteve French char *buf; 125838c8a9a5SSteve French 125938c8a9a5SSteve French cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode); 126038c8a9a5SSteve French 126138c8a9a5SSteve French if (cfile == NULL) 126238c8a9a5SSteve French return rc; 126338c8a9a5SSteve French 126438c8a9a5SSteve French xid = get_xid(); 126538c8a9a5SSteve French tcon = tlink_tcon(cfile->tlink); 126638c8a9a5SSteve French server = tcon->ses->server; 126738c8a9a5SSteve French 126838c8a9a5SSteve French cifs_dbg(FYI, "Freeing private data in close dir\n"); 126938c8a9a5SSteve French spin_lock(&cfile->file_info_lock); 127038c8a9a5SSteve French if (server->ops->dir_needs_close(cfile)) { 127138c8a9a5SSteve French cfile->invalidHandle = true; 127238c8a9a5SSteve French spin_unlock(&cfile->file_info_lock); 127338c8a9a5SSteve French if (server->ops->close_dir) 127438c8a9a5SSteve French rc = server->ops->close_dir(xid, tcon, &cfile->fid); 127538c8a9a5SSteve French else 127638c8a9a5SSteve French rc = -ENOSYS; 127738c8a9a5SSteve French cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc); 127838c8a9a5SSteve French /* not much we can do if it fails anyway, ignore rc */ 127938c8a9a5SSteve French rc = 0; 128038c8a9a5SSteve French } else 128138c8a9a5SSteve French spin_unlock(&cfile->file_info_lock); 128238c8a9a5SSteve French 128338c8a9a5SSteve French buf = cfile->srch_inf.ntwrk_buf_start; 128438c8a9a5SSteve French if (buf) { 128538c8a9a5SSteve French cifs_dbg(FYI, "closedir free smb buf in srch struct\n"); 128638c8a9a5SSteve French cfile->srch_inf.ntwrk_buf_start = NULL; 128738c8a9a5SSteve French if (cfile->srch_inf.smallBuf) 128838c8a9a5SSteve French cifs_small_buf_release(buf); 128938c8a9a5SSteve French else 129038c8a9a5SSteve French cifs_buf_release(buf); 129138c8a9a5SSteve French } 129238c8a9a5SSteve French 129338c8a9a5SSteve French cifs_put_tlink(cfile->tlink); 129438c8a9a5SSteve French kfree(file->private_data); 129538c8a9a5SSteve French file->private_data = NULL; 129638c8a9a5SSteve French /* BB can we lock the filestruct while this is going on? */ 129738c8a9a5SSteve French free_xid(xid); 129838c8a9a5SSteve French return rc; 129938c8a9a5SSteve French } 130038c8a9a5SSteve French 130138c8a9a5SSteve French static struct cifsLockInfo * 130238c8a9a5SSteve French cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags) 130338c8a9a5SSteve French { 130438c8a9a5SSteve French struct cifsLockInfo *lock = 130538c8a9a5SSteve French kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL); 130638c8a9a5SSteve French if (!lock) 130738c8a9a5SSteve French return lock; 130838c8a9a5SSteve French lock->offset = offset; 130938c8a9a5SSteve French lock->length = length; 131038c8a9a5SSteve French lock->type = type; 131138c8a9a5SSteve French lock->pid = current->tgid; 131238c8a9a5SSteve French lock->flags = flags; 131338c8a9a5SSteve French INIT_LIST_HEAD(&lock->blist); 131438c8a9a5SSteve French init_waitqueue_head(&lock->block_q); 131538c8a9a5SSteve French return lock; 131638c8a9a5SSteve French } 131738c8a9a5SSteve French 131838c8a9a5SSteve French void 131938c8a9a5SSteve French cifs_del_lock_waiters(struct cifsLockInfo *lock) 132038c8a9a5SSteve French { 132138c8a9a5SSteve French struct cifsLockInfo *li, *tmp; 132238c8a9a5SSteve French list_for_each_entry_safe(li, tmp, &lock->blist, blist) { 132338c8a9a5SSteve French list_del_init(&li->blist); 132438c8a9a5SSteve French wake_up(&li->block_q); 132538c8a9a5SSteve French } 132638c8a9a5SSteve French } 132738c8a9a5SSteve French 132838c8a9a5SSteve French #define CIFS_LOCK_OP 0 132938c8a9a5SSteve French #define CIFS_READ_OP 1 133038c8a9a5SSteve French #define CIFS_WRITE_OP 2 133138c8a9a5SSteve French 133238c8a9a5SSteve French /* @rw_check : 0 - no op, 1 - read, 2 - write */ 133338c8a9a5SSteve French static bool 133438c8a9a5SSteve French cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset, 133538c8a9a5SSteve French __u64 length, __u8 type, __u16 flags, 133638c8a9a5SSteve French struct cifsFileInfo *cfile, 133738c8a9a5SSteve French struct cifsLockInfo **conf_lock, int rw_check) 133838c8a9a5SSteve French { 133938c8a9a5SSteve French struct cifsLockInfo *li; 134038c8a9a5SSteve French struct cifsFileInfo *cur_cfile = fdlocks->cfile; 134138c8a9a5SSteve French struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server; 134238c8a9a5SSteve French 134338c8a9a5SSteve French list_for_each_entry(li, &fdlocks->locks, llist) { 134438c8a9a5SSteve French if (offset + length <= li->offset || 134538c8a9a5SSteve French offset >= li->offset + li->length) 134638c8a9a5SSteve French continue; 134738c8a9a5SSteve French if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid && 134838c8a9a5SSteve French server->ops->compare_fids(cfile, cur_cfile)) { 134938c8a9a5SSteve French /* shared lock prevents write op through the same fid */ 135038c8a9a5SSteve French if (!(li->type & server->vals->shared_lock_type) || 135138c8a9a5SSteve French rw_check != CIFS_WRITE_OP) 135238c8a9a5SSteve French continue; 135338c8a9a5SSteve French } 135438c8a9a5SSteve French if ((type & server->vals->shared_lock_type) && 135538c8a9a5SSteve French ((server->ops->compare_fids(cfile, cur_cfile) && 135638c8a9a5SSteve French current->tgid == li->pid) || type == li->type)) 135738c8a9a5SSteve French continue; 135838c8a9a5SSteve French if (rw_check == CIFS_LOCK_OP && 135938c8a9a5SSteve French (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) && 136038c8a9a5SSteve French server->ops->compare_fids(cfile, cur_cfile)) 136138c8a9a5SSteve French continue; 136238c8a9a5SSteve French if (conf_lock) 136338c8a9a5SSteve French *conf_lock = li; 136438c8a9a5SSteve French return true; 136538c8a9a5SSteve French } 136638c8a9a5SSteve French return false; 136738c8a9a5SSteve French } 136838c8a9a5SSteve French 136938c8a9a5SSteve French bool 137038c8a9a5SSteve French cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length, 137138c8a9a5SSteve French __u8 type, __u16 flags, 137238c8a9a5SSteve French struct cifsLockInfo **conf_lock, int rw_check) 137338c8a9a5SSteve French { 137438c8a9a5SSteve French bool rc = false; 137538c8a9a5SSteve French struct cifs_fid_locks *cur; 137638c8a9a5SSteve French struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); 137738c8a9a5SSteve French 137838c8a9a5SSteve French list_for_each_entry(cur, &cinode->llist, llist) { 137938c8a9a5SSteve French rc = cifs_find_fid_lock_conflict(cur, offset, length, type, 138038c8a9a5SSteve French flags, cfile, conf_lock, 138138c8a9a5SSteve French rw_check); 138238c8a9a5SSteve French if (rc) 138338c8a9a5SSteve French break; 138438c8a9a5SSteve French } 138538c8a9a5SSteve French 138638c8a9a5SSteve French return rc; 138738c8a9a5SSteve French } 138838c8a9a5SSteve French 138938c8a9a5SSteve French /* 139038c8a9a5SSteve French * Check if there is another lock that prevents us to set the lock (mandatory 139138c8a9a5SSteve French * style). If such a lock exists, update the flock structure with its 139238c8a9a5SSteve French * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks 139338c8a9a5SSteve French * or leave it the same if we can't. Returns 0 if we don't need to request to 139438c8a9a5SSteve French * the server or 1 otherwise. 139538c8a9a5SSteve French */ 139638c8a9a5SSteve French static int 139738c8a9a5SSteve French cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length, 139838c8a9a5SSteve French __u8 type, struct file_lock *flock) 139938c8a9a5SSteve French { 140038c8a9a5SSteve French int rc = 0; 140138c8a9a5SSteve French struct cifsLockInfo *conf_lock; 140238c8a9a5SSteve French struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); 140338c8a9a5SSteve French struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server; 140438c8a9a5SSteve French bool exist; 140538c8a9a5SSteve French 140638c8a9a5SSteve French down_read(&cinode->lock_sem); 140738c8a9a5SSteve French 140838c8a9a5SSteve French exist = cifs_find_lock_conflict(cfile, offset, length, type, 140938c8a9a5SSteve French flock->fl_flags, &conf_lock, 141038c8a9a5SSteve French CIFS_LOCK_OP); 141138c8a9a5SSteve French if (exist) { 141238c8a9a5SSteve French flock->fl_start = conf_lock->offset; 141338c8a9a5SSteve French flock->fl_end = conf_lock->offset + conf_lock->length - 1; 141438c8a9a5SSteve French flock->fl_pid = conf_lock->pid; 141538c8a9a5SSteve French if (conf_lock->type & server->vals->shared_lock_type) 141638c8a9a5SSteve French flock->fl_type = F_RDLCK; 141738c8a9a5SSteve French else 141838c8a9a5SSteve French flock->fl_type = F_WRLCK; 141938c8a9a5SSteve French } else if (!cinode->can_cache_brlcks) 142038c8a9a5SSteve French rc = 1; 142138c8a9a5SSteve French else 142238c8a9a5SSteve French flock->fl_type = F_UNLCK; 142338c8a9a5SSteve French 142438c8a9a5SSteve French up_read(&cinode->lock_sem); 142538c8a9a5SSteve French return rc; 142638c8a9a5SSteve French } 142738c8a9a5SSteve French 142838c8a9a5SSteve French static void 142938c8a9a5SSteve French cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock) 143038c8a9a5SSteve French { 143138c8a9a5SSteve French struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); 143238c8a9a5SSteve French cifs_down_write(&cinode->lock_sem); 143338c8a9a5SSteve French list_add_tail(&lock->llist, &cfile->llist->locks); 143438c8a9a5SSteve French up_write(&cinode->lock_sem); 143538c8a9a5SSteve French } 143638c8a9a5SSteve French 143738c8a9a5SSteve French /* 143838c8a9a5SSteve French * Set the byte-range lock (mandatory style). Returns: 143938c8a9a5SSteve French * 1) 0, if we set the lock and don't need to request to the server; 144038c8a9a5SSteve French * 2) 1, if no locks prevent us but we need to request to the server; 144138c8a9a5SSteve French * 3) -EACCES, if there is a lock that prevents us and wait is false. 144238c8a9a5SSteve French */ 144338c8a9a5SSteve French static int 144438c8a9a5SSteve French cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock, 144538c8a9a5SSteve French bool wait) 144638c8a9a5SSteve French { 144738c8a9a5SSteve French struct cifsLockInfo *conf_lock; 144838c8a9a5SSteve French struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); 144938c8a9a5SSteve French bool exist; 145038c8a9a5SSteve French int rc = 0; 145138c8a9a5SSteve French 145238c8a9a5SSteve French try_again: 145338c8a9a5SSteve French exist = false; 145438c8a9a5SSteve French cifs_down_write(&cinode->lock_sem); 145538c8a9a5SSteve French 145638c8a9a5SSteve French exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length, 145738c8a9a5SSteve French lock->type, lock->flags, &conf_lock, 145838c8a9a5SSteve French CIFS_LOCK_OP); 145938c8a9a5SSteve French if (!exist && cinode->can_cache_brlcks) { 146038c8a9a5SSteve French list_add_tail(&lock->llist, &cfile->llist->locks); 146138c8a9a5SSteve French up_write(&cinode->lock_sem); 146238c8a9a5SSteve French return rc; 146338c8a9a5SSteve French } 146438c8a9a5SSteve French 146538c8a9a5SSteve French if (!exist) 146638c8a9a5SSteve French rc = 1; 146738c8a9a5SSteve French else if (!wait) 146838c8a9a5SSteve French rc = -EACCES; 146938c8a9a5SSteve French else { 147038c8a9a5SSteve French list_add_tail(&lock->blist, &conf_lock->blist); 147138c8a9a5SSteve French up_write(&cinode->lock_sem); 147238c8a9a5SSteve French rc = wait_event_interruptible(lock->block_q, 147338c8a9a5SSteve French (lock->blist.prev == &lock->blist) && 147438c8a9a5SSteve French (lock->blist.next == &lock->blist)); 147538c8a9a5SSteve French if (!rc) 147638c8a9a5SSteve French goto try_again; 147738c8a9a5SSteve French cifs_down_write(&cinode->lock_sem); 147838c8a9a5SSteve French list_del_init(&lock->blist); 147938c8a9a5SSteve French } 148038c8a9a5SSteve French 148138c8a9a5SSteve French up_write(&cinode->lock_sem); 148238c8a9a5SSteve French return rc; 148338c8a9a5SSteve French } 148438c8a9a5SSteve French 148538c8a9a5SSteve French #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 148638c8a9a5SSteve French /* 148738c8a9a5SSteve French * Check if there is another lock that prevents us to set the lock (posix 148838c8a9a5SSteve French * style). If such a lock exists, update the flock structure with its 148938c8a9a5SSteve French * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks 149038c8a9a5SSteve French * or leave it the same if we can't. Returns 0 if we don't need to request to 149138c8a9a5SSteve French * the server or 1 otherwise. 149238c8a9a5SSteve French */ 149338c8a9a5SSteve French static int 149438c8a9a5SSteve French cifs_posix_lock_test(struct file *file, struct file_lock *flock) 149538c8a9a5SSteve French { 149638c8a9a5SSteve French int rc = 0; 149738c8a9a5SSteve French struct cifsInodeInfo *cinode = CIFS_I(file_inode(file)); 149838c8a9a5SSteve French unsigned char saved_type = flock->fl_type; 149938c8a9a5SSteve French 150038c8a9a5SSteve French if ((flock->fl_flags & FL_POSIX) == 0) 150138c8a9a5SSteve French return 1; 150238c8a9a5SSteve French 150338c8a9a5SSteve French down_read(&cinode->lock_sem); 150438c8a9a5SSteve French posix_test_lock(file, flock); 150538c8a9a5SSteve French 150638c8a9a5SSteve French if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) { 150738c8a9a5SSteve French flock->fl_type = saved_type; 150838c8a9a5SSteve French rc = 1; 150938c8a9a5SSteve French } 151038c8a9a5SSteve French 151138c8a9a5SSteve French up_read(&cinode->lock_sem); 151238c8a9a5SSteve French return rc; 151338c8a9a5SSteve French } 151438c8a9a5SSteve French 151538c8a9a5SSteve French /* 151638c8a9a5SSteve French * Set the byte-range lock (posix style). Returns: 151738c8a9a5SSteve French * 1) <0, if the error occurs while setting the lock; 151838c8a9a5SSteve French * 2) 0, if we set the lock and don't need to request to the server; 151938c8a9a5SSteve French * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock; 152038c8a9a5SSteve French * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server. 152138c8a9a5SSteve French */ 152238c8a9a5SSteve French static int 152338c8a9a5SSteve French cifs_posix_lock_set(struct file *file, struct file_lock *flock) 152438c8a9a5SSteve French { 152538c8a9a5SSteve French struct cifsInodeInfo *cinode = CIFS_I(file_inode(file)); 152638c8a9a5SSteve French int rc = FILE_LOCK_DEFERRED + 1; 152738c8a9a5SSteve French 152838c8a9a5SSteve French if ((flock->fl_flags & FL_POSIX) == 0) 152938c8a9a5SSteve French return rc; 153038c8a9a5SSteve French 153138c8a9a5SSteve French cifs_down_write(&cinode->lock_sem); 153238c8a9a5SSteve French if (!cinode->can_cache_brlcks) { 153338c8a9a5SSteve French up_write(&cinode->lock_sem); 153438c8a9a5SSteve French return rc; 153538c8a9a5SSteve French } 153638c8a9a5SSteve French 153738c8a9a5SSteve French rc = posix_lock_file(file, flock, NULL); 153838c8a9a5SSteve French up_write(&cinode->lock_sem); 153938c8a9a5SSteve French return rc; 154038c8a9a5SSteve French } 154138c8a9a5SSteve French 154238c8a9a5SSteve French int 154338c8a9a5SSteve French cifs_push_mandatory_locks(struct cifsFileInfo *cfile) 154438c8a9a5SSteve French { 154538c8a9a5SSteve French unsigned int xid; 154638c8a9a5SSteve French int rc = 0, stored_rc; 154738c8a9a5SSteve French struct cifsLockInfo *li, *tmp; 154838c8a9a5SSteve French struct cifs_tcon *tcon; 154938c8a9a5SSteve French unsigned int num, max_num, max_buf; 155038c8a9a5SSteve French LOCKING_ANDX_RANGE *buf, *cur; 155138c8a9a5SSteve French static const int types[] = { 155238c8a9a5SSteve French LOCKING_ANDX_LARGE_FILES, 155338c8a9a5SSteve French LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES 155438c8a9a5SSteve French }; 155538c8a9a5SSteve French int i; 155638c8a9a5SSteve French 155738c8a9a5SSteve French xid = get_xid(); 155838c8a9a5SSteve French tcon = tlink_tcon(cfile->tlink); 155938c8a9a5SSteve French 156038c8a9a5SSteve French /* 156138c8a9a5SSteve French * Accessing maxBuf is racy with cifs_reconnect - need to store value 156238c8a9a5SSteve French * and check it before using. 156338c8a9a5SSteve French */ 156438c8a9a5SSteve French max_buf = tcon->ses->server->maxBuf; 156538c8a9a5SSteve French if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) { 156638c8a9a5SSteve French free_xid(xid); 156738c8a9a5SSteve French return -EINVAL; 156838c8a9a5SSteve French } 156938c8a9a5SSteve French 157038c8a9a5SSteve French BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) > 157138c8a9a5SSteve French PAGE_SIZE); 157238c8a9a5SSteve French max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr), 157338c8a9a5SSteve French PAGE_SIZE); 157438c8a9a5SSteve French max_num = (max_buf - sizeof(struct smb_hdr)) / 157538c8a9a5SSteve French sizeof(LOCKING_ANDX_RANGE); 157638c8a9a5SSteve French buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL); 157738c8a9a5SSteve French if (!buf) { 157838c8a9a5SSteve French free_xid(xid); 157938c8a9a5SSteve French return -ENOMEM; 158038c8a9a5SSteve French } 158138c8a9a5SSteve French 158238c8a9a5SSteve French for (i = 0; i < 2; i++) { 158338c8a9a5SSteve French cur = buf; 158438c8a9a5SSteve French num = 0; 158538c8a9a5SSteve French list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) { 158638c8a9a5SSteve French if (li->type != types[i]) 158738c8a9a5SSteve French continue; 158838c8a9a5SSteve French cur->Pid = cpu_to_le16(li->pid); 158938c8a9a5SSteve French cur->LengthLow = cpu_to_le32((u32)li->length); 159038c8a9a5SSteve French cur->LengthHigh = cpu_to_le32((u32)(li->length>>32)); 159138c8a9a5SSteve French cur->OffsetLow = cpu_to_le32((u32)li->offset); 159238c8a9a5SSteve French cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32)); 159338c8a9a5SSteve French if (++num == max_num) { 159438c8a9a5SSteve French stored_rc = cifs_lockv(xid, tcon, 159538c8a9a5SSteve French cfile->fid.netfid, 159638c8a9a5SSteve French (__u8)li->type, 0, num, 159738c8a9a5SSteve French buf); 159838c8a9a5SSteve French if (stored_rc) 159938c8a9a5SSteve French rc = stored_rc; 160038c8a9a5SSteve French cur = buf; 160138c8a9a5SSteve French num = 0; 160238c8a9a5SSteve French } else 160338c8a9a5SSteve French cur++; 160438c8a9a5SSteve French } 160538c8a9a5SSteve French 160638c8a9a5SSteve French if (num) { 160738c8a9a5SSteve French stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid, 160838c8a9a5SSteve French (__u8)types[i], 0, num, buf); 160938c8a9a5SSteve French if (stored_rc) 161038c8a9a5SSteve French rc = stored_rc; 161138c8a9a5SSteve French } 161238c8a9a5SSteve French } 161338c8a9a5SSteve French 161438c8a9a5SSteve French kfree(buf); 161538c8a9a5SSteve French free_xid(xid); 161638c8a9a5SSteve French return rc; 161738c8a9a5SSteve French } 161838c8a9a5SSteve French 161938c8a9a5SSteve French static __u32 162038c8a9a5SSteve French hash_lockowner(fl_owner_t owner) 162138c8a9a5SSteve French { 162238c8a9a5SSteve French return cifs_lock_secret ^ hash32_ptr((const void *)owner); 162338c8a9a5SSteve French } 162438c8a9a5SSteve French #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 162538c8a9a5SSteve French 162638c8a9a5SSteve French struct lock_to_push { 162738c8a9a5SSteve French struct list_head llist; 162838c8a9a5SSteve French __u64 offset; 162938c8a9a5SSteve French __u64 length; 163038c8a9a5SSteve French __u32 pid; 163138c8a9a5SSteve French __u16 netfid; 163238c8a9a5SSteve French __u8 type; 163338c8a9a5SSteve French }; 163438c8a9a5SSteve French 163538c8a9a5SSteve French #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 163638c8a9a5SSteve French static int 163738c8a9a5SSteve French cifs_push_posix_locks(struct cifsFileInfo *cfile) 163838c8a9a5SSteve French { 163938c8a9a5SSteve French struct inode *inode = d_inode(cfile->dentry); 164038c8a9a5SSteve French struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); 164138c8a9a5SSteve French struct file_lock *flock; 164238c8a9a5SSteve French struct file_lock_context *flctx = locks_inode_context(inode); 164338c8a9a5SSteve French unsigned int count = 0, i; 164438c8a9a5SSteve French int rc = 0, xid, type; 164538c8a9a5SSteve French struct list_head locks_to_send, *el; 164638c8a9a5SSteve French struct lock_to_push *lck, *tmp; 164738c8a9a5SSteve French __u64 length; 164838c8a9a5SSteve French 164938c8a9a5SSteve French xid = get_xid(); 165038c8a9a5SSteve French 165138c8a9a5SSteve French if (!flctx) 165238c8a9a5SSteve French goto out; 165338c8a9a5SSteve French 165438c8a9a5SSteve French spin_lock(&flctx->flc_lock); 165538c8a9a5SSteve French list_for_each(el, &flctx->flc_posix) { 165638c8a9a5SSteve French count++; 165738c8a9a5SSteve French } 165838c8a9a5SSteve French spin_unlock(&flctx->flc_lock); 165938c8a9a5SSteve French 166038c8a9a5SSteve French INIT_LIST_HEAD(&locks_to_send); 166138c8a9a5SSteve French 166238c8a9a5SSteve French /* 166338c8a9a5SSteve French * Allocating count locks is enough because no FL_POSIX locks can be 166438c8a9a5SSteve French * added to the list while we are holding cinode->lock_sem that 166538c8a9a5SSteve French * protects locking operations of this inode. 166638c8a9a5SSteve French */ 166738c8a9a5SSteve French for (i = 0; i < count; i++) { 166838c8a9a5SSteve French lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL); 166938c8a9a5SSteve French if (!lck) { 167038c8a9a5SSteve French rc = -ENOMEM; 167138c8a9a5SSteve French goto err_out; 167238c8a9a5SSteve French } 167338c8a9a5SSteve French list_add_tail(&lck->llist, &locks_to_send); 167438c8a9a5SSteve French } 167538c8a9a5SSteve French 167638c8a9a5SSteve French el = locks_to_send.next; 167738c8a9a5SSteve French spin_lock(&flctx->flc_lock); 167838c8a9a5SSteve French list_for_each_entry(flock, &flctx->flc_posix, fl_list) { 167938c8a9a5SSteve French if (el == &locks_to_send) { 168038c8a9a5SSteve French /* 168138c8a9a5SSteve French * The list ended. We don't have enough allocated 168238c8a9a5SSteve French * structures - something is really wrong. 168338c8a9a5SSteve French */ 168438c8a9a5SSteve French cifs_dbg(VFS, "Can't push all brlocks!\n"); 168538c8a9a5SSteve French break; 168638c8a9a5SSteve French } 168738c8a9a5SSteve French length = cifs_flock_len(flock); 168838c8a9a5SSteve French if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK) 168938c8a9a5SSteve French type = CIFS_RDLCK; 169038c8a9a5SSteve French else 169138c8a9a5SSteve French type = CIFS_WRLCK; 169238c8a9a5SSteve French lck = list_entry(el, struct lock_to_push, llist); 169338c8a9a5SSteve French lck->pid = hash_lockowner(flock->fl_owner); 169438c8a9a5SSteve French lck->netfid = cfile->fid.netfid; 169538c8a9a5SSteve French lck->length = length; 169638c8a9a5SSteve French lck->type = type; 169738c8a9a5SSteve French lck->offset = flock->fl_start; 169838c8a9a5SSteve French } 169938c8a9a5SSteve French spin_unlock(&flctx->flc_lock); 170038c8a9a5SSteve French 170138c8a9a5SSteve French list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) { 170238c8a9a5SSteve French int stored_rc; 170338c8a9a5SSteve French 170438c8a9a5SSteve French stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid, 170538c8a9a5SSteve French lck->offset, lck->length, NULL, 170638c8a9a5SSteve French lck->type, 0); 170738c8a9a5SSteve French if (stored_rc) 170838c8a9a5SSteve French rc = stored_rc; 170938c8a9a5SSteve French list_del(&lck->llist); 171038c8a9a5SSteve French kfree(lck); 171138c8a9a5SSteve French } 171238c8a9a5SSteve French 171338c8a9a5SSteve French out: 171438c8a9a5SSteve French free_xid(xid); 171538c8a9a5SSteve French return rc; 171638c8a9a5SSteve French err_out: 171738c8a9a5SSteve French list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) { 171838c8a9a5SSteve French list_del(&lck->llist); 171938c8a9a5SSteve French kfree(lck); 172038c8a9a5SSteve French } 172138c8a9a5SSteve French goto out; 172238c8a9a5SSteve French } 172338c8a9a5SSteve French #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 172438c8a9a5SSteve French 172538c8a9a5SSteve French static int 172638c8a9a5SSteve French cifs_push_locks(struct cifsFileInfo *cfile) 172738c8a9a5SSteve French { 172838c8a9a5SSteve French struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); 172938c8a9a5SSteve French struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); 173038c8a9a5SSteve French int rc = 0; 173138c8a9a5SSteve French #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 173238c8a9a5SSteve French struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb); 173338c8a9a5SSteve French #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 173438c8a9a5SSteve French 173538c8a9a5SSteve French /* we are going to update can_cache_brlcks here - need a write access */ 173638c8a9a5SSteve French cifs_down_write(&cinode->lock_sem); 173738c8a9a5SSteve French if (!cinode->can_cache_brlcks) { 173838c8a9a5SSteve French up_write(&cinode->lock_sem); 173938c8a9a5SSteve French return rc; 174038c8a9a5SSteve French } 174138c8a9a5SSteve French 174238c8a9a5SSteve French #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 174338c8a9a5SSteve French if (cap_unix(tcon->ses) && 174438c8a9a5SSteve French (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && 174538c8a9a5SSteve French ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) 174638c8a9a5SSteve French rc = cifs_push_posix_locks(cfile); 174738c8a9a5SSteve French else 174838c8a9a5SSteve French #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 174938c8a9a5SSteve French rc = tcon->ses->server->ops->push_mand_locks(cfile); 175038c8a9a5SSteve French 175138c8a9a5SSteve French cinode->can_cache_brlcks = false; 175238c8a9a5SSteve French up_write(&cinode->lock_sem); 175338c8a9a5SSteve French return rc; 175438c8a9a5SSteve French } 175538c8a9a5SSteve French 175638c8a9a5SSteve French static void 175738c8a9a5SSteve French cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock, 175838c8a9a5SSteve French bool *wait_flag, struct TCP_Server_Info *server) 175938c8a9a5SSteve French { 176038c8a9a5SSteve French if (flock->fl_flags & FL_POSIX) 176138c8a9a5SSteve French cifs_dbg(FYI, "Posix\n"); 176238c8a9a5SSteve French if (flock->fl_flags & FL_FLOCK) 176338c8a9a5SSteve French cifs_dbg(FYI, "Flock\n"); 176438c8a9a5SSteve French if (flock->fl_flags & FL_SLEEP) { 176538c8a9a5SSteve French cifs_dbg(FYI, "Blocking lock\n"); 176638c8a9a5SSteve French *wait_flag = true; 176738c8a9a5SSteve French } 176838c8a9a5SSteve French if (flock->fl_flags & FL_ACCESS) 176938c8a9a5SSteve French cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n"); 177038c8a9a5SSteve French if (flock->fl_flags & FL_LEASE) 177138c8a9a5SSteve French cifs_dbg(FYI, "Lease on file - not implemented yet\n"); 177238c8a9a5SSteve French if (flock->fl_flags & 177338c8a9a5SSteve French (~(FL_POSIX | FL_FLOCK | FL_SLEEP | 177438c8a9a5SSteve French FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK))) 177538c8a9a5SSteve French cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags); 177638c8a9a5SSteve French 177738c8a9a5SSteve French *type = server->vals->large_lock_type; 177838c8a9a5SSteve French if (flock->fl_type == F_WRLCK) { 177938c8a9a5SSteve French cifs_dbg(FYI, "F_WRLCK\n"); 178038c8a9a5SSteve French *type |= server->vals->exclusive_lock_type; 178138c8a9a5SSteve French *lock = 1; 178238c8a9a5SSteve French } else if (flock->fl_type == F_UNLCK) { 178338c8a9a5SSteve French cifs_dbg(FYI, "F_UNLCK\n"); 178438c8a9a5SSteve French *type |= server->vals->unlock_lock_type; 178538c8a9a5SSteve French *unlock = 1; 178638c8a9a5SSteve French /* Check if unlock includes more than one lock range */ 178738c8a9a5SSteve French } else if (flock->fl_type == F_RDLCK) { 178838c8a9a5SSteve French cifs_dbg(FYI, "F_RDLCK\n"); 178938c8a9a5SSteve French *type |= server->vals->shared_lock_type; 179038c8a9a5SSteve French *lock = 1; 179138c8a9a5SSteve French } else if (flock->fl_type == F_EXLCK) { 179238c8a9a5SSteve French cifs_dbg(FYI, "F_EXLCK\n"); 179338c8a9a5SSteve French *type |= server->vals->exclusive_lock_type; 179438c8a9a5SSteve French *lock = 1; 179538c8a9a5SSteve French } else if (flock->fl_type == F_SHLCK) { 179638c8a9a5SSteve French cifs_dbg(FYI, "F_SHLCK\n"); 179738c8a9a5SSteve French *type |= server->vals->shared_lock_type; 179838c8a9a5SSteve French *lock = 1; 179938c8a9a5SSteve French } else 180038c8a9a5SSteve French cifs_dbg(FYI, "Unknown type of lock\n"); 180138c8a9a5SSteve French } 180238c8a9a5SSteve French 180338c8a9a5SSteve French static int 180438c8a9a5SSteve French cifs_getlk(struct file *file, struct file_lock *flock, __u32 type, 180538c8a9a5SSteve French bool wait_flag, bool posix_lck, unsigned int xid) 180638c8a9a5SSteve French { 180738c8a9a5SSteve French int rc = 0; 180838c8a9a5SSteve French __u64 length = cifs_flock_len(flock); 180938c8a9a5SSteve French struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data; 181038c8a9a5SSteve French struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); 181138c8a9a5SSteve French struct TCP_Server_Info *server = tcon->ses->server; 181238c8a9a5SSteve French #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 181338c8a9a5SSteve French __u16 netfid = cfile->fid.netfid; 181438c8a9a5SSteve French 181538c8a9a5SSteve French if (posix_lck) { 181638c8a9a5SSteve French int posix_lock_type; 181738c8a9a5SSteve French 181838c8a9a5SSteve French rc = cifs_posix_lock_test(file, flock); 181938c8a9a5SSteve French if (!rc) 182038c8a9a5SSteve French return rc; 182138c8a9a5SSteve French 182238c8a9a5SSteve French if (type & server->vals->shared_lock_type) 182338c8a9a5SSteve French posix_lock_type = CIFS_RDLCK; 182438c8a9a5SSteve French else 182538c8a9a5SSteve French posix_lock_type = CIFS_WRLCK; 182638c8a9a5SSteve French rc = CIFSSMBPosixLock(xid, tcon, netfid, 182738c8a9a5SSteve French hash_lockowner(flock->fl_owner), 182838c8a9a5SSteve French flock->fl_start, length, flock, 182938c8a9a5SSteve French posix_lock_type, wait_flag); 183038c8a9a5SSteve French return rc; 183138c8a9a5SSteve French } 183238c8a9a5SSteve French #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 183338c8a9a5SSteve French 183438c8a9a5SSteve French rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock); 183538c8a9a5SSteve French if (!rc) 183638c8a9a5SSteve French return rc; 183738c8a9a5SSteve French 183838c8a9a5SSteve French /* BB we could chain these into one lock request BB */ 183938c8a9a5SSteve French rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type, 184038c8a9a5SSteve French 1, 0, false); 184138c8a9a5SSteve French if (rc == 0) { 184238c8a9a5SSteve French rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, 184338c8a9a5SSteve French type, 0, 1, false); 184438c8a9a5SSteve French flock->fl_type = F_UNLCK; 184538c8a9a5SSteve French if (rc != 0) 184638c8a9a5SSteve French cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n", 184738c8a9a5SSteve French rc); 184838c8a9a5SSteve French return 0; 184938c8a9a5SSteve French } 185038c8a9a5SSteve French 185138c8a9a5SSteve French if (type & server->vals->shared_lock_type) { 185238c8a9a5SSteve French flock->fl_type = F_WRLCK; 185338c8a9a5SSteve French return 0; 185438c8a9a5SSteve French } 185538c8a9a5SSteve French 185638c8a9a5SSteve French type &= ~server->vals->exclusive_lock_type; 185738c8a9a5SSteve French 185838c8a9a5SSteve French rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, 185938c8a9a5SSteve French type | server->vals->shared_lock_type, 186038c8a9a5SSteve French 1, 0, false); 186138c8a9a5SSteve French if (rc == 0) { 186238c8a9a5SSteve French rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, 186338c8a9a5SSteve French type | server->vals->shared_lock_type, 0, 1, false); 186438c8a9a5SSteve French flock->fl_type = F_RDLCK; 186538c8a9a5SSteve French if (rc != 0) 186638c8a9a5SSteve French cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n", 186738c8a9a5SSteve French rc); 186838c8a9a5SSteve French } else 186938c8a9a5SSteve French flock->fl_type = F_WRLCK; 187038c8a9a5SSteve French 187138c8a9a5SSteve French return 0; 187238c8a9a5SSteve French } 187338c8a9a5SSteve French 187438c8a9a5SSteve French void 187538c8a9a5SSteve French cifs_move_llist(struct list_head *source, struct list_head *dest) 187638c8a9a5SSteve French { 187738c8a9a5SSteve French struct list_head *li, *tmp; 187838c8a9a5SSteve French list_for_each_safe(li, tmp, source) 187938c8a9a5SSteve French list_move(li, dest); 188038c8a9a5SSteve French } 188138c8a9a5SSteve French 188238c8a9a5SSteve French void 188338c8a9a5SSteve French cifs_free_llist(struct list_head *llist) 188438c8a9a5SSteve French { 188538c8a9a5SSteve French struct cifsLockInfo *li, *tmp; 188638c8a9a5SSteve French list_for_each_entry_safe(li, tmp, llist, llist) { 188738c8a9a5SSteve French cifs_del_lock_waiters(li); 188838c8a9a5SSteve French list_del(&li->llist); 188938c8a9a5SSteve French kfree(li); 189038c8a9a5SSteve French } 189138c8a9a5SSteve French } 189238c8a9a5SSteve French 189338c8a9a5SSteve French #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 189438c8a9a5SSteve French int 189538c8a9a5SSteve French cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, 189638c8a9a5SSteve French unsigned int xid) 189738c8a9a5SSteve French { 189838c8a9a5SSteve French int rc = 0, stored_rc; 189938c8a9a5SSteve French static const int types[] = { 190038c8a9a5SSteve French LOCKING_ANDX_LARGE_FILES, 190138c8a9a5SSteve French LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES 190238c8a9a5SSteve French }; 190338c8a9a5SSteve French unsigned int i; 190438c8a9a5SSteve French unsigned int max_num, num, max_buf; 190538c8a9a5SSteve French LOCKING_ANDX_RANGE *buf, *cur; 190638c8a9a5SSteve French struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); 190738c8a9a5SSteve French struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); 190838c8a9a5SSteve French struct cifsLockInfo *li, *tmp; 190938c8a9a5SSteve French __u64 length = cifs_flock_len(flock); 191038c8a9a5SSteve French struct list_head tmp_llist; 191138c8a9a5SSteve French 191238c8a9a5SSteve French INIT_LIST_HEAD(&tmp_llist); 191338c8a9a5SSteve French 191438c8a9a5SSteve French /* 191538c8a9a5SSteve French * Accessing maxBuf is racy with cifs_reconnect - need to store value 191638c8a9a5SSteve French * and check it before using. 191738c8a9a5SSteve French */ 191838c8a9a5SSteve French max_buf = tcon->ses->server->maxBuf; 191938c8a9a5SSteve French if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) 192038c8a9a5SSteve French return -EINVAL; 192138c8a9a5SSteve French 192238c8a9a5SSteve French BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) > 192338c8a9a5SSteve French PAGE_SIZE); 192438c8a9a5SSteve French max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr), 192538c8a9a5SSteve French PAGE_SIZE); 192638c8a9a5SSteve French max_num = (max_buf - sizeof(struct smb_hdr)) / 192738c8a9a5SSteve French sizeof(LOCKING_ANDX_RANGE); 192838c8a9a5SSteve French buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL); 192938c8a9a5SSteve French if (!buf) 193038c8a9a5SSteve French return -ENOMEM; 193138c8a9a5SSteve French 193238c8a9a5SSteve French cifs_down_write(&cinode->lock_sem); 193338c8a9a5SSteve French for (i = 0; i < 2; i++) { 193438c8a9a5SSteve French cur = buf; 193538c8a9a5SSteve French num = 0; 193638c8a9a5SSteve French list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) { 193738c8a9a5SSteve French if (flock->fl_start > li->offset || 193838c8a9a5SSteve French (flock->fl_start + length) < 193938c8a9a5SSteve French (li->offset + li->length)) 194038c8a9a5SSteve French continue; 194138c8a9a5SSteve French if (current->tgid != li->pid) 194238c8a9a5SSteve French continue; 194338c8a9a5SSteve French if (types[i] != li->type) 194438c8a9a5SSteve French continue; 194538c8a9a5SSteve French if (cinode->can_cache_brlcks) { 194638c8a9a5SSteve French /* 194738c8a9a5SSteve French * We can cache brlock requests - simply remove 194838c8a9a5SSteve French * a lock from the file's list. 194938c8a9a5SSteve French */ 195038c8a9a5SSteve French list_del(&li->llist); 195138c8a9a5SSteve French cifs_del_lock_waiters(li); 195238c8a9a5SSteve French kfree(li); 195338c8a9a5SSteve French continue; 195438c8a9a5SSteve French } 195538c8a9a5SSteve French cur->Pid = cpu_to_le16(li->pid); 195638c8a9a5SSteve French cur->LengthLow = cpu_to_le32((u32)li->length); 195738c8a9a5SSteve French cur->LengthHigh = cpu_to_le32((u32)(li->length>>32)); 195838c8a9a5SSteve French cur->OffsetLow = cpu_to_le32((u32)li->offset); 195938c8a9a5SSteve French cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32)); 196038c8a9a5SSteve French /* 196138c8a9a5SSteve French * We need to save a lock here to let us add it again to 196238c8a9a5SSteve French * the file's list if the unlock range request fails on 196338c8a9a5SSteve French * the server. 196438c8a9a5SSteve French */ 196538c8a9a5SSteve French list_move(&li->llist, &tmp_llist); 196638c8a9a5SSteve French if (++num == max_num) { 196738c8a9a5SSteve French stored_rc = cifs_lockv(xid, tcon, 196838c8a9a5SSteve French cfile->fid.netfid, 196938c8a9a5SSteve French li->type, num, 0, buf); 197038c8a9a5SSteve French if (stored_rc) { 197138c8a9a5SSteve French /* 197238c8a9a5SSteve French * We failed on the unlock range 197338c8a9a5SSteve French * request - add all locks from the tmp 197438c8a9a5SSteve French * list to the head of the file's list. 197538c8a9a5SSteve French */ 197638c8a9a5SSteve French cifs_move_llist(&tmp_llist, 197738c8a9a5SSteve French &cfile->llist->locks); 197838c8a9a5SSteve French rc = stored_rc; 197938c8a9a5SSteve French } else 198038c8a9a5SSteve French /* 198138c8a9a5SSteve French * The unlock range request succeed - 198238c8a9a5SSteve French * free the tmp list. 198338c8a9a5SSteve French */ 198438c8a9a5SSteve French cifs_free_llist(&tmp_llist); 198538c8a9a5SSteve French cur = buf; 198638c8a9a5SSteve French num = 0; 198738c8a9a5SSteve French } else 198838c8a9a5SSteve French cur++; 198938c8a9a5SSteve French } 199038c8a9a5SSteve French if (num) { 199138c8a9a5SSteve French stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid, 199238c8a9a5SSteve French types[i], num, 0, buf); 199338c8a9a5SSteve French if (stored_rc) { 199438c8a9a5SSteve French cifs_move_llist(&tmp_llist, 199538c8a9a5SSteve French &cfile->llist->locks); 199638c8a9a5SSteve French rc = stored_rc; 199738c8a9a5SSteve French } else 199838c8a9a5SSteve French cifs_free_llist(&tmp_llist); 199938c8a9a5SSteve French } 200038c8a9a5SSteve French } 200138c8a9a5SSteve French 200238c8a9a5SSteve French up_write(&cinode->lock_sem); 200338c8a9a5SSteve French kfree(buf); 200438c8a9a5SSteve French return rc; 200538c8a9a5SSteve French } 200638c8a9a5SSteve French #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 200738c8a9a5SSteve French 200838c8a9a5SSteve French static int 200938c8a9a5SSteve French cifs_setlk(struct file *file, struct file_lock *flock, __u32 type, 201038c8a9a5SSteve French bool wait_flag, bool posix_lck, int lock, int unlock, 201138c8a9a5SSteve French unsigned int xid) 201238c8a9a5SSteve French { 201338c8a9a5SSteve French int rc = 0; 201438c8a9a5SSteve French __u64 length = cifs_flock_len(flock); 201538c8a9a5SSteve French struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data; 201638c8a9a5SSteve French struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); 201738c8a9a5SSteve French struct TCP_Server_Info *server = tcon->ses->server; 201838c8a9a5SSteve French struct inode *inode = d_inode(cfile->dentry); 201938c8a9a5SSteve French 202038c8a9a5SSteve French #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 202138c8a9a5SSteve French if (posix_lck) { 202238c8a9a5SSteve French int posix_lock_type; 202338c8a9a5SSteve French 202438c8a9a5SSteve French rc = cifs_posix_lock_set(file, flock); 202538c8a9a5SSteve French if (rc <= FILE_LOCK_DEFERRED) 202638c8a9a5SSteve French return rc; 202738c8a9a5SSteve French 202838c8a9a5SSteve French if (type & server->vals->shared_lock_type) 202938c8a9a5SSteve French posix_lock_type = CIFS_RDLCK; 203038c8a9a5SSteve French else 203138c8a9a5SSteve French posix_lock_type = CIFS_WRLCK; 203238c8a9a5SSteve French 203338c8a9a5SSteve French if (unlock == 1) 203438c8a9a5SSteve French posix_lock_type = CIFS_UNLCK; 203538c8a9a5SSteve French 203638c8a9a5SSteve French rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid, 203738c8a9a5SSteve French hash_lockowner(flock->fl_owner), 203838c8a9a5SSteve French flock->fl_start, length, 203938c8a9a5SSteve French NULL, posix_lock_type, wait_flag); 204038c8a9a5SSteve French goto out; 204138c8a9a5SSteve French } 204238c8a9a5SSteve French #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 204338c8a9a5SSteve French if (lock) { 204438c8a9a5SSteve French struct cifsLockInfo *lock; 204538c8a9a5SSteve French 204638c8a9a5SSteve French lock = cifs_lock_init(flock->fl_start, length, type, 204738c8a9a5SSteve French flock->fl_flags); 204838c8a9a5SSteve French if (!lock) 204938c8a9a5SSteve French return -ENOMEM; 205038c8a9a5SSteve French 205138c8a9a5SSteve French rc = cifs_lock_add_if(cfile, lock, wait_flag); 205238c8a9a5SSteve French if (rc < 0) { 205338c8a9a5SSteve French kfree(lock); 205438c8a9a5SSteve French return rc; 205538c8a9a5SSteve French } 205638c8a9a5SSteve French if (!rc) 205738c8a9a5SSteve French goto out; 205838c8a9a5SSteve French 205938c8a9a5SSteve French /* 206038c8a9a5SSteve French * Windows 7 server can delay breaking lease from read to None 206138c8a9a5SSteve French * if we set a byte-range lock on a file - break it explicitly 206238c8a9a5SSteve French * before sending the lock to the server to be sure the next 206338c8a9a5SSteve French * read won't conflict with non-overlapted locks due to 206438c8a9a5SSteve French * pagereading. 206538c8a9a5SSteve French */ 206638c8a9a5SSteve French if (!CIFS_CACHE_WRITE(CIFS_I(inode)) && 206738c8a9a5SSteve French CIFS_CACHE_READ(CIFS_I(inode))) { 206838c8a9a5SSteve French cifs_zap_mapping(inode); 206938c8a9a5SSteve French cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n", 207038c8a9a5SSteve French inode); 207138c8a9a5SSteve French CIFS_I(inode)->oplock = 0; 207238c8a9a5SSteve French } 207338c8a9a5SSteve French 207438c8a9a5SSteve French rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, 207538c8a9a5SSteve French type, 1, 0, wait_flag); 207638c8a9a5SSteve French if (rc) { 207738c8a9a5SSteve French kfree(lock); 207838c8a9a5SSteve French return rc; 207938c8a9a5SSteve French } 208038c8a9a5SSteve French 208138c8a9a5SSteve French cifs_lock_add(cfile, lock); 208238c8a9a5SSteve French } else if (unlock) 208338c8a9a5SSteve French rc = server->ops->mand_unlock_range(cfile, flock, xid); 208438c8a9a5SSteve French 208538c8a9a5SSteve French out: 208638c8a9a5SSteve French if ((flock->fl_flags & FL_POSIX) || (flock->fl_flags & FL_FLOCK)) { 208738c8a9a5SSteve French /* 208838c8a9a5SSteve French * If this is a request to remove all locks because we 208938c8a9a5SSteve French * are closing the file, it doesn't matter if the 209038c8a9a5SSteve French * unlocking failed as both cifs.ko and the SMB server 209138c8a9a5SSteve French * remove the lock on file close 209238c8a9a5SSteve French */ 209338c8a9a5SSteve French if (rc) { 209438c8a9a5SSteve French cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc); 209538c8a9a5SSteve French if (!(flock->fl_flags & FL_CLOSE)) 209638c8a9a5SSteve French return rc; 209738c8a9a5SSteve French } 209838c8a9a5SSteve French rc = locks_lock_file_wait(file, flock); 209938c8a9a5SSteve French } 210038c8a9a5SSteve French return rc; 210138c8a9a5SSteve French } 210238c8a9a5SSteve French 210338c8a9a5SSteve French int cifs_flock(struct file *file, int cmd, struct file_lock *fl) 210438c8a9a5SSteve French { 210538c8a9a5SSteve French int rc, xid; 210638c8a9a5SSteve French int lock = 0, unlock = 0; 210738c8a9a5SSteve French bool wait_flag = false; 210838c8a9a5SSteve French bool posix_lck = false; 210938c8a9a5SSteve French struct cifs_sb_info *cifs_sb; 211038c8a9a5SSteve French struct cifs_tcon *tcon; 211138c8a9a5SSteve French struct cifsFileInfo *cfile; 211238c8a9a5SSteve French __u32 type; 211338c8a9a5SSteve French 211438c8a9a5SSteve French xid = get_xid(); 211538c8a9a5SSteve French 211638c8a9a5SSteve French if (!(fl->fl_flags & FL_FLOCK)) { 211738c8a9a5SSteve French rc = -ENOLCK; 211838c8a9a5SSteve French free_xid(xid); 211938c8a9a5SSteve French return rc; 212038c8a9a5SSteve French } 212138c8a9a5SSteve French 212238c8a9a5SSteve French cfile = (struct cifsFileInfo *)file->private_data; 212338c8a9a5SSteve French tcon = tlink_tcon(cfile->tlink); 212438c8a9a5SSteve French 212538c8a9a5SSteve French cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag, 212638c8a9a5SSteve French tcon->ses->server); 212738c8a9a5SSteve French cifs_sb = CIFS_FILE_SB(file); 212838c8a9a5SSteve French 212938c8a9a5SSteve French if (cap_unix(tcon->ses) && 213038c8a9a5SSteve French (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && 213138c8a9a5SSteve French ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) 213238c8a9a5SSteve French posix_lck = true; 213338c8a9a5SSteve French 213438c8a9a5SSteve French if (!lock && !unlock) { 213538c8a9a5SSteve French /* 213638c8a9a5SSteve French * if no lock or unlock then nothing to do since we do not 213738c8a9a5SSteve French * know what it is 213838c8a9a5SSteve French */ 213938c8a9a5SSteve French rc = -EOPNOTSUPP; 214038c8a9a5SSteve French free_xid(xid); 214138c8a9a5SSteve French return rc; 214238c8a9a5SSteve French } 214338c8a9a5SSteve French 214438c8a9a5SSteve French rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock, 214538c8a9a5SSteve French xid); 214638c8a9a5SSteve French free_xid(xid); 214738c8a9a5SSteve French return rc; 214838c8a9a5SSteve French 214938c8a9a5SSteve French 215038c8a9a5SSteve French } 215138c8a9a5SSteve French 215238c8a9a5SSteve French int cifs_lock(struct file *file, int cmd, struct file_lock *flock) 215338c8a9a5SSteve French { 215438c8a9a5SSteve French int rc, xid; 215538c8a9a5SSteve French int lock = 0, unlock = 0; 215638c8a9a5SSteve French bool wait_flag = false; 215738c8a9a5SSteve French bool posix_lck = false; 215838c8a9a5SSteve French struct cifs_sb_info *cifs_sb; 215938c8a9a5SSteve French struct cifs_tcon *tcon; 216038c8a9a5SSteve French struct cifsFileInfo *cfile; 216138c8a9a5SSteve French __u32 type; 216238c8a9a5SSteve French 216338c8a9a5SSteve French rc = -EACCES; 216438c8a9a5SSteve French xid = get_xid(); 216538c8a9a5SSteve French 216638c8a9a5SSteve French cifs_dbg(FYI, "%s: %pD2 cmd=0x%x type=0x%x flags=0x%x r=%lld:%lld\n", __func__, file, cmd, 216738c8a9a5SSteve French flock->fl_flags, flock->fl_type, (long long)flock->fl_start, 216838c8a9a5SSteve French (long long)flock->fl_end); 216938c8a9a5SSteve French 217038c8a9a5SSteve French cfile = (struct cifsFileInfo *)file->private_data; 217138c8a9a5SSteve French tcon = tlink_tcon(cfile->tlink); 217238c8a9a5SSteve French 217338c8a9a5SSteve French cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag, 217438c8a9a5SSteve French tcon->ses->server); 217538c8a9a5SSteve French cifs_sb = CIFS_FILE_SB(file); 217638c8a9a5SSteve French set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags); 217738c8a9a5SSteve French 217838c8a9a5SSteve French if (cap_unix(tcon->ses) && 217938c8a9a5SSteve French (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && 218038c8a9a5SSteve French ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) 218138c8a9a5SSteve French posix_lck = true; 218238c8a9a5SSteve French /* 218338c8a9a5SSteve French * BB add code here to normalize offset and length to account for 218438c8a9a5SSteve French * negative length which we can not accept over the wire. 218538c8a9a5SSteve French */ 218638c8a9a5SSteve French if (IS_GETLK(cmd)) { 218738c8a9a5SSteve French rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid); 218838c8a9a5SSteve French free_xid(xid); 218938c8a9a5SSteve French return rc; 219038c8a9a5SSteve French } 219138c8a9a5SSteve French 219238c8a9a5SSteve French if (!lock && !unlock) { 219338c8a9a5SSteve French /* 219438c8a9a5SSteve French * if no lock or unlock then nothing to do since we do not 219538c8a9a5SSteve French * know what it is 219638c8a9a5SSteve French */ 219738c8a9a5SSteve French free_xid(xid); 219838c8a9a5SSteve French return -EOPNOTSUPP; 219938c8a9a5SSteve French } 220038c8a9a5SSteve French 220138c8a9a5SSteve French rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock, 220238c8a9a5SSteve French xid); 220338c8a9a5SSteve French free_xid(xid); 220438c8a9a5SSteve French return rc; 220538c8a9a5SSteve French } 220638c8a9a5SSteve French 220738c8a9a5SSteve French /* 220838c8a9a5SSteve French * update the file size (if needed) after a write. Should be called with 220938c8a9a5SSteve French * the inode->i_lock held 221038c8a9a5SSteve French */ 221138c8a9a5SSteve French void 221238c8a9a5SSteve French cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset, 221338c8a9a5SSteve French unsigned int bytes_written) 221438c8a9a5SSteve French { 221538c8a9a5SSteve French loff_t end_of_write = offset + bytes_written; 221638c8a9a5SSteve French 221738c8a9a5SSteve French if (end_of_write > cifsi->server_eof) 221838c8a9a5SSteve French cifsi->server_eof = end_of_write; 221938c8a9a5SSteve French } 222038c8a9a5SSteve French 222138c8a9a5SSteve French static ssize_t 222238c8a9a5SSteve French cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data, 222338c8a9a5SSteve French size_t write_size, loff_t *offset) 222438c8a9a5SSteve French { 222538c8a9a5SSteve French int rc = 0; 222638c8a9a5SSteve French unsigned int bytes_written = 0; 222738c8a9a5SSteve French unsigned int total_written; 222838c8a9a5SSteve French struct cifs_tcon *tcon; 222938c8a9a5SSteve French struct TCP_Server_Info *server; 223038c8a9a5SSteve French unsigned int xid; 223138c8a9a5SSteve French struct dentry *dentry = open_file->dentry; 223238c8a9a5SSteve French struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry)); 223338c8a9a5SSteve French struct cifs_io_parms io_parms = {0}; 223438c8a9a5SSteve French 223538c8a9a5SSteve French cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n", 223638c8a9a5SSteve French write_size, *offset, dentry); 223738c8a9a5SSteve French 223838c8a9a5SSteve French tcon = tlink_tcon(open_file->tlink); 223938c8a9a5SSteve French server = tcon->ses->server; 224038c8a9a5SSteve French 224138c8a9a5SSteve French if (!server->ops->sync_write) 224238c8a9a5SSteve French return -ENOSYS; 224338c8a9a5SSteve French 224438c8a9a5SSteve French xid = get_xid(); 224538c8a9a5SSteve French 224638c8a9a5SSteve French for (total_written = 0; write_size > total_written; 224738c8a9a5SSteve French total_written += bytes_written) { 224838c8a9a5SSteve French rc = -EAGAIN; 224938c8a9a5SSteve French while (rc == -EAGAIN) { 225038c8a9a5SSteve French struct kvec iov[2]; 225138c8a9a5SSteve French unsigned int len; 225238c8a9a5SSteve French 225338c8a9a5SSteve French if (open_file->invalidHandle) { 225438c8a9a5SSteve French /* we could deadlock if we called 225538c8a9a5SSteve French filemap_fdatawait from here so tell 225638c8a9a5SSteve French reopen_file not to flush data to 225738c8a9a5SSteve French server now */ 225838c8a9a5SSteve French rc = cifs_reopen_file(open_file, false); 225938c8a9a5SSteve French if (rc != 0) 226038c8a9a5SSteve French break; 226138c8a9a5SSteve French } 226238c8a9a5SSteve French 226338c8a9a5SSteve French len = min(server->ops->wp_retry_size(d_inode(dentry)), 226438c8a9a5SSteve French (unsigned int)write_size - total_written); 226538c8a9a5SSteve French /* iov[0] is reserved for smb header */ 226638c8a9a5SSteve French iov[1].iov_base = (char *)write_data + total_written; 226738c8a9a5SSteve French iov[1].iov_len = len; 226838c8a9a5SSteve French io_parms.pid = pid; 226938c8a9a5SSteve French io_parms.tcon = tcon; 227038c8a9a5SSteve French io_parms.offset = *offset; 227138c8a9a5SSteve French io_parms.length = len; 227238c8a9a5SSteve French rc = server->ops->sync_write(xid, &open_file->fid, 227338c8a9a5SSteve French &io_parms, &bytes_written, iov, 1); 227438c8a9a5SSteve French } 227538c8a9a5SSteve French if (rc || (bytes_written == 0)) { 227638c8a9a5SSteve French if (total_written) 227738c8a9a5SSteve French break; 227838c8a9a5SSteve French else { 227938c8a9a5SSteve French free_xid(xid); 228038c8a9a5SSteve French return rc; 228138c8a9a5SSteve French } 228238c8a9a5SSteve French } else { 228338c8a9a5SSteve French spin_lock(&d_inode(dentry)->i_lock); 228438c8a9a5SSteve French cifs_update_eof(cifsi, *offset, bytes_written); 228538c8a9a5SSteve French spin_unlock(&d_inode(dentry)->i_lock); 228638c8a9a5SSteve French *offset += bytes_written; 228738c8a9a5SSteve French } 228838c8a9a5SSteve French } 228938c8a9a5SSteve French 229038c8a9a5SSteve French cifs_stats_bytes_written(tcon, total_written); 229138c8a9a5SSteve French 229238c8a9a5SSteve French if (total_written > 0) { 229338c8a9a5SSteve French spin_lock(&d_inode(dentry)->i_lock); 229438c8a9a5SSteve French if (*offset > d_inode(dentry)->i_size) { 229538c8a9a5SSteve French i_size_write(d_inode(dentry), *offset); 229638c8a9a5SSteve French d_inode(dentry)->i_blocks = (512 - 1 + *offset) >> 9; 229738c8a9a5SSteve French } 229838c8a9a5SSteve French spin_unlock(&d_inode(dentry)->i_lock); 229938c8a9a5SSteve French } 230038c8a9a5SSteve French mark_inode_dirty_sync(d_inode(dentry)); 230138c8a9a5SSteve French free_xid(xid); 230238c8a9a5SSteve French return total_written; 230338c8a9a5SSteve French } 230438c8a9a5SSteve French 230538c8a9a5SSteve French struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode, 230638c8a9a5SSteve French bool fsuid_only) 230738c8a9a5SSteve French { 230838c8a9a5SSteve French struct cifsFileInfo *open_file = NULL; 230938c8a9a5SSteve French struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb); 231038c8a9a5SSteve French 231138c8a9a5SSteve French /* only filter by fsuid on multiuser mounts */ 231238c8a9a5SSteve French if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)) 231338c8a9a5SSteve French fsuid_only = false; 231438c8a9a5SSteve French 231538c8a9a5SSteve French spin_lock(&cifs_inode->open_file_lock); 231638c8a9a5SSteve French /* we could simply get the first_list_entry since write-only entries 231738c8a9a5SSteve French are always at the end of the list but since the first entry might 231838c8a9a5SSteve French have a close pending, we go through the whole list */ 231938c8a9a5SSteve French list_for_each_entry(open_file, &cifs_inode->openFileList, flist) { 232038c8a9a5SSteve French if (fsuid_only && !uid_eq(open_file->uid, current_fsuid())) 232138c8a9a5SSteve French continue; 232238c8a9a5SSteve French if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) { 232338c8a9a5SSteve French if ((!open_file->invalidHandle)) { 232438c8a9a5SSteve French /* found a good file */ 232538c8a9a5SSteve French /* lock it so it will not be closed on us */ 232638c8a9a5SSteve French cifsFileInfo_get(open_file); 232738c8a9a5SSteve French spin_unlock(&cifs_inode->open_file_lock); 232838c8a9a5SSteve French return open_file; 232938c8a9a5SSteve French } /* else might as well continue, and look for 233038c8a9a5SSteve French another, or simply have the caller reopen it 233138c8a9a5SSteve French again rather than trying to fix this handle */ 233238c8a9a5SSteve French } else /* write only file */ 233338c8a9a5SSteve French break; /* write only files are last so must be done */ 233438c8a9a5SSteve French } 233538c8a9a5SSteve French spin_unlock(&cifs_inode->open_file_lock); 233638c8a9a5SSteve French return NULL; 233738c8a9a5SSteve French } 233838c8a9a5SSteve French 233938c8a9a5SSteve French /* Return -EBADF if no handle is found and general rc otherwise */ 234038c8a9a5SSteve French int 234138c8a9a5SSteve French cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags, 234238c8a9a5SSteve French struct cifsFileInfo **ret_file) 234338c8a9a5SSteve French { 234438c8a9a5SSteve French struct cifsFileInfo *open_file, *inv_file = NULL; 234538c8a9a5SSteve French struct cifs_sb_info *cifs_sb; 234638c8a9a5SSteve French bool any_available = false; 234738c8a9a5SSteve French int rc = -EBADF; 234838c8a9a5SSteve French unsigned int refind = 0; 234938c8a9a5SSteve French bool fsuid_only = flags & FIND_WR_FSUID_ONLY; 235038c8a9a5SSteve French bool with_delete = flags & FIND_WR_WITH_DELETE; 235138c8a9a5SSteve French *ret_file = NULL; 235238c8a9a5SSteve French 235338c8a9a5SSteve French /* 235438c8a9a5SSteve French * Having a null inode here (because mapping->host was set to zero by 235538c8a9a5SSteve French * the VFS or MM) should not happen but we had reports of on oops (due 235638c8a9a5SSteve French * to it being zero) during stress testcases so we need to check for it 235738c8a9a5SSteve French */ 235838c8a9a5SSteve French 235938c8a9a5SSteve French if (cifs_inode == NULL) { 236038c8a9a5SSteve French cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n"); 236138c8a9a5SSteve French dump_stack(); 236238c8a9a5SSteve French return rc; 236338c8a9a5SSteve French } 236438c8a9a5SSteve French 236538c8a9a5SSteve French cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb); 236638c8a9a5SSteve French 236738c8a9a5SSteve French /* only filter by fsuid on multiuser mounts */ 236838c8a9a5SSteve French if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)) 236938c8a9a5SSteve French fsuid_only = false; 237038c8a9a5SSteve French 237138c8a9a5SSteve French spin_lock(&cifs_inode->open_file_lock); 237238c8a9a5SSteve French refind_writable: 237338c8a9a5SSteve French if (refind > MAX_REOPEN_ATT) { 237438c8a9a5SSteve French spin_unlock(&cifs_inode->open_file_lock); 237538c8a9a5SSteve French return rc; 237638c8a9a5SSteve French } 237738c8a9a5SSteve French list_for_each_entry(open_file, &cifs_inode->openFileList, flist) { 237838c8a9a5SSteve French if (!any_available && open_file->pid != current->tgid) 237938c8a9a5SSteve French continue; 238038c8a9a5SSteve French if (fsuid_only && !uid_eq(open_file->uid, current_fsuid())) 238138c8a9a5SSteve French continue; 238238c8a9a5SSteve French if (with_delete && !(open_file->fid.access & DELETE)) 238338c8a9a5SSteve French continue; 238438c8a9a5SSteve French if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) { 238538c8a9a5SSteve French if (!open_file->invalidHandle) { 238638c8a9a5SSteve French /* found a good writable file */ 238738c8a9a5SSteve French cifsFileInfo_get(open_file); 238838c8a9a5SSteve French spin_unlock(&cifs_inode->open_file_lock); 238938c8a9a5SSteve French *ret_file = open_file; 239038c8a9a5SSteve French return 0; 239138c8a9a5SSteve French } else { 239238c8a9a5SSteve French if (!inv_file) 239338c8a9a5SSteve French inv_file = open_file; 239438c8a9a5SSteve French } 239538c8a9a5SSteve French } 239638c8a9a5SSteve French } 239738c8a9a5SSteve French /* couldn't find useable FH with same pid, try any available */ 239838c8a9a5SSteve French if (!any_available) { 239938c8a9a5SSteve French any_available = true; 240038c8a9a5SSteve French goto refind_writable; 240138c8a9a5SSteve French } 240238c8a9a5SSteve French 240338c8a9a5SSteve French if (inv_file) { 240438c8a9a5SSteve French any_available = false; 240538c8a9a5SSteve French cifsFileInfo_get(inv_file); 240638c8a9a5SSteve French } 240738c8a9a5SSteve French 240838c8a9a5SSteve French spin_unlock(&cifs_inode->open_file_lock); 240938c8a9a5SSteve French 241038c8a9a5SSteve French if (inv_file) { 241138c8a9a5SSteve French rc = cifs_reopen_file(inv_file, false); 241238c8a9a5SSteve French if (!rc) { 241338c8a9a5SSteve French *ret_file = inv_file; 241438c8a9a5SSteve French return 0; 241538c8a9a5SSteve French } 241638c8a9a5SSteve French 241738c8a9a5SSteve French spin_lock(&cifs_inode->open_file_lock); 241838c8a9a5SSteve French list_move_tail(&inv_file->flist, &cifs_inode->openFileList); 241938c8a9a5SSteve French spin_unlock(&cifs_inode->open_file_lock); 242038c8a9a5SSteve French cifsFileInfo_put(inv_file); 242138c8a9a5SSteve French ++refind; 242238c8a9a5SSteve French inv_file = NULL; 242338c8a9a5SSteve French spin_lock(&cifs_inode->open_file_lock); 242438c8a9a5SSteve French goto refind_writable; 242538c8a9a5SSteve French } 242638c8a9a5SSteve French 242738c8a9a5SSteve French return rc; 242838c8a9a5SSteve French } 242938c8a9a5SSteve French 243038c8a9a5SSteve French struct cifsFileInfo * 243138c8a9a5SSteve French find_writable_file(struct cifsInodeInfo *cifs_inode, int flags) 243238c8a9a5SSteve French { 243338c8a9a5SSteve French struct cifsFileInfo *cfile; 243438c8a9a5SSteve French int rc; 243538c8a9a5SSteve French 243638c8a9a5SSteve French rc = cifs_get_writable_file(cifs_inode, flags, &cfile); 243738c8a9a5SSteve French if (rc) 243838c8a9a5SSteve French cifs_dbg(FYI, "Couldn't find writable handle rc=%d\n", rc); 243938c8a9a5SSteve French 244038c8a9a5SSteve French return cfile; 244138c8a9a5SSteve French } 244238c8a9a5SSteve French 244338c8a9a5SSteve French int 244438c8a9a5SSteve French cifs_get_writable_path(struct cifs_tcon *tcon, const char *name, 244538c8a9a5SSteve French int flags, 244638c8a9a5SSteve French struct cifsFileInfo **ret_file) 244738c8a9a5SSteve French { 244838c8a9a5SSteve French struct cifsFileInfo *cfile; 244938c8a9a5SSteve French void *page = alloc_dentry_path(); 245038c8a9a5SSteve French 245138c8a9a5SSteve French *ret_file = NULL; 245238c8a9a5SSteve French 245338c8a9a5SSteve French spin_lock(&tcon->open_file_lock); 245438c8a9a5SSteve French list_for_each_entry(cfile, &tcon->openFileList, tlist) { 245538c8a9a5SSteve French struct cifsInodeInfo *cinode; 245638c8a9a5SSteve French const char *full_path = build_path_from_dentry(cfile->dentry, page); 245738c8a9a5SSteve French if (IS_ERR(full_path)) { 245838c8a9a5SSteve French spin_unlock(&tcon->open_file_lock); 245938c8a9a5SSteve French free_dentry_path(page); 246038c8a9a5SSteve French return PTR_ERR(full_path); 246138c8a9a5SSteve French } 246238c8a9a5SSteve French if (strcmp(full_path, name)) 246338c8a9a5SSteve French continue; 246438c8a9a5SSteve French 246538c8a9a5SSteve French cinode = CIFS_I(d_inode(cfile->dentry)); 246638c8a9a5SSteve French spin_unlock(&tcon->open_file_lock); 246738c8a9a5SSteve French free_dentry_path(page); 246838c8a9a5SSteve French return cifs_get_writable_file(cinode, flags, ret_file); 246938c8a9a5SSteve French } 247038c8a9a5SSteve French 247138c8a9a5SSteve French spin_unlock(&tcon->open_file_lock); 247238c8a9a5SSteve French free_dentry_path(page); 247338c8a9a5SSteve French return -ENOENT; 247438c8a9a5SSteve French } 247538c8a9a5SSteve French 247638c8a9a5SSteve French int 247738c8a9a5SSteve French cifs_get_readable_path(struct cifs_tcon *tcon, const char *name, 247838c8a9a5SSteve French struct cifsFileInfo **ret_file) 247938c8a9a5SSteve French { 248038c8a9a5SSteve French struct cifsFileInfo *cfile; 248138c8a9a5SSteve French void *page = alloc_dentry_path(); 248238c8a9a5SSteve French 248338c8a9a5SSteve French *ret_file = NULL; 248438c8a9a5SSteve French 248538c8a9a5SSteve French spin_lock(&tcon->open_file_lock); 248638c8a9a5SSteve French list_for_each_entry(cfile, &tcon->openFileList, tlist) { 248738c8a9a5SSteve French struct cifsInodeInfo *cinode; 248838c8a9a5SSteve French const char *full_path = build_path_from_dentry(cfile->dentry, page); 248938c8a9a5SSteve French if (IS_ERR(full_path)) { 249038c8a9a5SSteve French spin_unlock(&tcon->open_file_lock); 249138c8a9a5SSteve French free_dentry_path(page); 249238c8a9a5SSteve French return PTR_ERR(full_path); 249338c8a9a5SSteve French } 249438c8a9a5SSteve French if (strcmp(full_path, name)) 249538c8a9a5SSteve French continue; 249638c8a9a5SSteve French 249738c8a9a5SSteve French cinode = CIFS_I(d_inode(cfile->dentry)); 249838c8a9a5SSteve French spin_unlock(&tcon->open_file_lock); 249938c8a9a5SSteve French free_dentry_path(page); 250038c8a9a5SSteve French *ret_file = find_readable_file(cinode, 0); 250138c8a9a5SSteve French return *ret_file ? 0 : -ENOENT; 250238c8a9a5SSteve French } 250338c8a9a5SSteve French 250438c8a9a5SSteve French spin_unlock(&tcon->open_file_lock); 250538c8a9a5SSteve French free_dentry_path(page); 250638c8a9a5SSteve French return -ENOENT; 250738c8a9a5SSteve French } 250838c8a9a5SSteve French 250938c8a9a5SSteve French void 251038c8a9a5SSteve French cifs_writedata_release(struct kref *refcount) 251138c8a9a5SSteve French { 251238c8a9a5SSteve French struct cifs_writedata *wdata = container_of(refcount, 251338c8a9a5SSteve French struct cifs_writedata, refcount); 251438c8a9a5SSteve French #ifdef CONFIG_CIFS_SMB_DIRECT 251538c8a9a5SSteve French if (wdata->mr) { 251638c8a9a5SSteve French smbd_deregister_mr(wdata->mr); 251738c8a9a5SSteve French wdata->mr = NULL; 251838c8a9a5SSteve French } 251938c8a9a5SSteve French #endif 252038c8a9a5SSteve French 252138c8a9a5SSteve French if (wdata->cfile) 252238c8a9a5SSteve French cifsFileInfo_put(wdata->cfile); 252338c8a9a5SSteve French 252438c8a9a5SSteve French kfree(wdata); 252538c8a9a5SSteve French } 252638c8a9a5SSteve French 252738c8a9a5SSteve French /* 252838c8a9a5SSteve French * Write failed with a retryable error. Resend the write request. It's also 252938c8a9a5SSteve French * possible that the page was redirtied so re-clean the page. 253038c8a9a5SSteve French */ 253138c8a9a5SSteve French static void 253238c8a9a5SSteve French cifs_writev_requeue(struct cifs_writedata *wdata) 253338c8a9a5SSteve French { 253438c8a9a5SSteve French int rc = 0; 253538c8a9a5SSteve French struct inode *inode = d_inode(wdata->cfile->dentry); 253638c8a9a5SSteve French struct TCP_Server_Info *server; 253738c8a9a5SSteve French unsigned int rest_len = wdata->bytes; 253838c8a9a5SSteve French loff_t fpos = wdata->offset; 253938c8a9a5SSteve French 254038c8a9a5SSteve French server = tlink_tcon(wdata->cfile->tlink)->ses->server; 254138c8a9a5SSteve French do { 254238c8a9a5SSteve French struct cifs_writedata *wdata2; 254338c8a9a5SSteve French unsigned int wsize, cur_len; 254438c8a9a5SSteve French 254538c8a9a5SSteve French wsize = server->ops->wp_retry_size(inode); 254638c8a9a5SSteve French if (wsize < rest_len) { 254738c8a9a5SSteve French if (wsize < PAGE_SIZE) { 254838c8a9a5SSteve French rc = -EOPNOTSUPP; 254938c8a9a5SSteve French break; 255038c8a9a5SSteve French } 255138c8a9a5SSteve French cur_len = min(round_down(wsize, PAGE_SIZE), rest_len); 255238c8a9a5SSteve French } else { 255338c8a9a5SSteve French cur_len = rest_len; 255438c8a9a5SSteve French } 255538c8a9a5SSteve French 255638c8a9a5SSteve French wdata2 = cifs_writedata_alloc(cifs_writev_complete); 255738c8a9a5SSteve French if (!wdata2) { 255838c8a9a5SSteve French rc = -ENOMEM; 255938c8a9a5SSteve French break; 256038c8a9a5SSteve French } 256138c8a9a5SSteve French 256238c8a9a5SSteve French wdata2->sync_mode = wdata->sync_mode; 256338c8a9a5SSteve French wdata2->offset = fpos; 256438c8a9a5SSteve French wdata2->bytes = cur_len; 256538c8a9a5SSteve French wdata2->iter = wdata->iter; 256638c8a9a5SSteve French 256738c8a9a5SSteve French iov_iter_advance(&wdata2->iter, fpos - wdata->offset); 256838c8a9a5SSteve French iov_iter_truncate(&wdata2->iter, wdata2->bytes); 256938c8a9a5SSteve French 257038c8a9a5SSteve French if (iov_iter_is_xarray(&wdata2->iter)) 257138c8a9a5SSteve French /* Check for pages having been redirtied and clean 257238c8a9a5SSteve French * them. We can do this by walking the xarray. If 257338c8a9a5SSteve French * it's not an xarray, then it's a DIO and we shouldn't 257438c8a9a5SSteve French * be mucking around with the page bits. 257538c8a9a5SSteve French */ 257638c8a9a5SSteve French cifs_undirty_folios(inode, fpos, cur_len); 257738c8a9a5SSteve French 257838c8a9a5SSteve French rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY, 257938c8a9a5SSteve French &wdata2->cfile); 258038c8a9a5SSteve French if (!wdata2->cfile) { 258138c8a9a5SSteve French cifs_dbg(VFS, "No writable handle to retry writepages rc=%d\n", 258238c8a9a5SSteve French rc); 258338c8a9a5SSteve French if (!is_retryable_error(rc)) 258438c8a9a5SSteve French rc = -EBADF; 258538c8a9a5SSteve French } else { 258638c8a9a5SSteve French wdata2->pid = wdata2->cfile->pid; 258738c8a9a5SSteve French rc = server->ops->async_writev(wdata2, 258838c8a9a5SSteve French cifs_writedata_release); 258938c8a9a5SSteve French } 259038c8a9a5SSteve French 259138c8a9a5SSteve French kref_put(&wdata2->refcount, cifs_writedata_release); 259238c8a9a5SSteve French if (rc) { 259338c8a9a5SSteve French if (is_retryable_error(rc)) 259438c8a9a5SSteve French continue; 259538c8a9a5SSteve French fpos += cur_len; 259638c8a9a5SSteve French rest_len -= cur_len; 259738c8a9a5SSteve French break; 259838c8a9a5SSteve French } 259938c8a9a5SSteve French 260038c8a9a5SSteve French fpos += cur_len; 260138c8a9a5SSteve French rest_len -= cur_len; 260238c8a9a5SSteve French } while (rest_len > 0); 260338c8a9a5SSteve French 260438c8a9a5SSteve French /* Clean up remaining pages from the original wdata */ 260538c8a9a5SSteve French if (iov_iter_is_xarray(&wdata->iter)) 260638c8a9a5SSteve French cifs_pages_write_failed(inode, fpos, rest_len); 260738c8a9a5SSteve French 260838c8a9a5SSteve French if (rc != 0 && !is_retryable_error(rc)) 260938c8a9a5SSteve French mapping_set_error(inode->i_mapping, rc); 261038c8a9a5SSteve French kref_put(&wdata->refcount, cifs_writedata_release); 261138c8a9a5SSteve French } 261238c8a9a5SSteve French 261338c8a9a5SSteve French void 261438c8a9a5SSteve French cifs_writev_complete(struct work_struct *work) 261538c8a9a5SSteve French { 261638c8a9a5SSteve French struct cifs_writedata *wdata = container_of(work, 261738c8a9a5SSteve French struct cifs_writedata, work); 261838c8a9a5SSteve French struct inode *inode = d_inode(wdata->cfile->dentry); 261938c8a9a5SSteve French 262038c8a9a5SSteve French if (wdata->result == 0) { 262138c8a9a5SSteve French spin_lock(&inode->i_lock); 262238c8a9a5SSteve French cifs_update_eof(CIFS_I(inode), wdata->offset, wdata->bytes); 262338c8a9a5SSteve French spin_unlock(&inode->i_lock); 262438c8a9a5SSteve French cifs_stats_bytes_written(tlink_tcon(wdata->cfile->tlink), 262538c8a9a5SSteve French wdata->bytes); 262638c8a9a5SSteve French } else if (wdata->sync_mode == WB_SYNC_ALL && wdata->result == -EAGAIN) 262738c8a9a5SSteve French return cifs_writev_requeue(wdata); 262838c8a9a5SSteve French 262938c8a9a5SSteve French if (wdata->result == -EAGAIN) 263038c8a9a5SSteve French cifs_pages_write_redirty(inode, wdata->offset, wdata->bytes); 263138c8a9a5SSteve French else if (wdata->result < 0) 263238c8a9a5SSteve French cifs_pages_write_failed(inode, wdata->offset, wdata->bytes); 263338c8a9a5SSteve French else 263438c8a9a5SSteve French cifs_pages_written_back(inode, wdata->offset, wdata->bytes); 263538c8a9a5SSteve French 263638c8a9a5SSteve French if (wdata->result != -EAGAIN) 263738c8a9a5SSteve French mapping_set_error(inode->i_mapping, wdata->result); 263838c8a9a5SSteve French kref_put(&wdata->refcount, cifs_writedata_release); 263938c8a9a5SSteve French } 264038c8a9a5SSteve French 264138c8a9a5SSteve French struct cifs_writedata *cifs_writedata_alloc(work_func_t complete) 264238c8a9a5SSteve French { 264338c8a9a5SSteve French struct cifs_writedata *wdata; 264438c8a9a5SSteve French 264538c8a9a5SSteve French wdata = kzalloc(sizeof(*wdata), GFP_NOFS); 264638c8a9a5SSteve French if (wdata != NULL) { 264738c8a9a5SSteve French kref_init(&wdata->refcount); 264838c8a9a5SSteve French INIT_LIST_HEAD(&wdata->list); 264938c8a9a5SSteve French init_completion(&wdata->done); 265038c8a9a5SSteve French INIT_WORK(&wdata->work, complete); 265138c8a9a5SSteve French } 265238c8a9a5SSteve French return wdata; 265338c8a9a5SSteve French } 265438c8a9a5SSteve French 265538c8a9a5SSteve French static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to) 265638c8a9a5SSteve French { 265738c8a9a5SSteve French struct address_space *mapping = page->mapping; 265838c8a9a5SSteve French loff_t offset = (loff_t)page->index << PAGE_SHIFT; 265938c8a9a5SSteve French char *write_data; 266038c8a9a5SSteve French int rc = -EFAULT; 266138c8a9a5SSteve French int bytes_written = 0; 266238c8a9a5SSteve French struct inode *inode; 266338c8a9a5SSteve French struct cifsFileInfo *open_file; 266438c8a9a5SSteve French 266538c8a9a5SSteve French if (!mapping || !mapping->host) 266638c8a9a5SSteve French return -EFAULT; 266738c8a9a5SSteve French 266838c8a9a5SSteve French inode = page->mapping->host; 266938c8a9a5SSteve French 267038c8a9a5SSteve French offset += (loff_t)from; 267138c8a9a5SSteve French write_data = kmap(page); 267238c8a9a5SSteve French write_data += from; 267338c8a9a5SSteve French 267438c8a9a5SSteve French if ((to > PAGE_SIZE) || (from > to)) { 267538c8a9a5SSteve French kunmap(page); 267638c8a9a5SSteve French return -EIO; 267738c8a9a5SSteve French } 267838c8a9a5SSteve French 267938c8a9a5SSteve French /* racing with truncate? */ 268038c8a9a5SSteve French if (offset > mapping->host->i_size) { 268138c8a9a5SSteve French kunmap(page); 268238c8a9a5SSteve French return 0; /* don't care */ 268338c8a9a5SSteve French } 268438c8a9a5SSteve French 268538c8a9a5SSteve French /* check to make sure that we are not extending the file */ 268638c8a9a5SSteve French if (mapping->host->i_size - offset < (loff_t)to) 268738c8a9a5SSteve French to = (unsigned)(mapping->host->i_size - offset); 268838c8a9a5SSteve French 268938c8a9a5SSteve French rc = cifs_get_writable_file(CIFS_I(mapping->host), FIND_WR_ANY, 269038c8a9a5SSteve French &open_file); 269138c8a9a5SSteve French if (!rc) { 269238c8a9a5SSteve French bytes_written = cifs_write(open_file, open_file->pid, 269338c8a9a5SSteve French write_data, to - from, &offset); 269438c8a9a5SSteve French cifsFileInfo_put(open_file); 269538c8a9a5SSteve French /* Does mm or vfs already set times? */ 269623171df5SJeff Layton simple_inode_init_ts(inode); 269738c8a9a5SSteve French if ((bytes_written > 0) && (offset)) 269838c8a9a5SSteve French rc = 0; 269938c8a9a5SSteve French else if (bytes_written < 0) 270038c8a9a5SSteve French rc = bytes_written; 270138c8a9a5SSteve French else 270238c8a9a5SSteve French rc = -EFAULT; 270338c8a9a5SSteve French } else { 270438c8a9a5SSteve French cifs_dbg(FYI, "No writable handle for write page rc=%d\n", rc); 270538c8a9a5SSteve French if (!is_retryable_error(rc)) 270638c8a9a5SSteve French rc = -EIO; 270738c8a9a5SSteve French } 270838c8a9a5SSteve French 270938c8a9a5SSteve French kunmap(page); 271038c8a9a5SSteve French return rc; 271138c8a9a5SSteve French } 271238c8a9a5SSteve French 271338c8a9a5SSteve French /* 271438c8a9a5SSteve French * Extend the region to be written back to include subsequent contiguously 271538c8a9a5SSteve French * dirty pages if possible, but don't sleep while doing so. 271638c8a9a5SSteve French */ 271738c8a9a5SSteve French static void cifs_extend_writeback(struct address_space *mapping, 2718e45deec3SDavid Howells struct xa_state *xas, 271938c8a9a5SSteve French long *_count, 272038c8a9a5SSteve French loff_t start, 272138c8a9a5SSteve French int max_pages, 2722e45deec3SDavid Howells loff_t max_len, 2723e45deec3SDavid Howells size_t *_len) 272438c8a9a5SSteve French { 272538c8a9a5SSteve French struct folio_batch batch; 272638c8a9a5SSteve French struct folio *folio; 2727e45deec3SDavid Howells unsigned int nr_pages; 2728e45deec3SDavid Howells pgoff_t index = (start + *_len) / PAGE_SIZE; 2729e45deec3SDavid Howells size_t len; 273038c8a9a5SSteve French bool stop = true; 273138c8a9a5SSteve French unsigned int i; 273238c8a9a5SSteve French 273338c8a9a5SSteve French folio_batch_init(&batch); 273438c8a9a5SSteve French 273538c8a9a5SSteve French do { 273638c8a9a5SSteve French /* Firstly, we gather up a batch of contiguous dirty pages 273738c8a9a5SSteve French * under the RCU read lock - but we can't clear the dirty flags 273838c8a9a5SSteve French * there if any of those pages are mapped. 273938c8a9a5SSteve French */ 274038c8a9a5SSteve French rcu_read_lock(); 274138c8a9a5SSteve French 2742e45deec3SDavid Howells xas_for_each(xas, folio, ULONG_MAX) { 274338c8a9a5SSteve French stop = true; 2744e45deec3SDavid Howells if (xas_retry(xas, folio)) 274538c8a9a5SSteve French continue; 274638c8a9a5SSteve French if (xa_is_value(folio)) 274738c8a9a5SSteve French break; 2748e45deec3SDavid Howells if (folio->index != index) { 2749e45deec3SDavid Howells xas_reset(xas); 275038c8a9a5SSteve French break; 2751e45deec3SDavid Howells } 2752e45deec3SDavid Howells 275338c8a9a5SSteve French if (!folio_try_get_rcu(folio)) { 2754e45deec3SDavid Howells xas_reset(xas); 275538c8a9a5SSteve French continue; 275638c8a9a5SSteve French } 275738c8a9a5SSteve French nr_pages = folio_nr_pages(folio); 2758e45deec3SDavid Howells if (nr_pages > max_pages) { 2759e45deec3SDavid Howells xas_reset(xas); 276038c8a9a5SSteve French break; 2761e45deec3SDavid Howells } 276238c8a9a5SSteve French 276338c8a9a5SSteve French /* Has the page moved or been split? */ 2764e45deec3SDavid Howells if (unlikely(folio != xas_reload(xas))) { 276538c8a9a5SSteve French folio_put(folio); 2766e45deec3SDavid Howells xas_reset(xas); 276738c8a9a5SSteve French break; 276838c8a9a5SSteve French } 276938c8a9a5SSteve French 277038c8a9a5SSteve French if (!folio_trylock(folio)) { 277138c8a9a5SSteve French folio_put(folio); 2772e45deec3SDavid Howells xas_reset(xas); 277338c8a9a5SSteve French break; 277438c8a9a5SSteve French } 2775e45deec3SDavid Howells if (!folio_test_dirty(folio) || 2776e45deec3SDavid Howells folio_test_writeback(folio)) { 277738c8a9a5SSteve French folio_unlock(folio); 277838c8a9a5SSteve French folio_put(folio); 2779e45deec3SDavid Howells xas_reset(xas); 278038c8a9a5SSteve French break; 278138c8a9a5SSteve French } 278238c8a9a5SSteve French 278338c8a9a5SSteve French max_pages -= nr_pages; 2784e45deec3SDavid Howells len = folio_size(folio); 278538c8a9a5SSteve French stop = false; 278638c8a9a5SSteve French 278738c8a9a5SSteve French index += nr_pages; 2788e45deec3SDavid Howells *_count -= nr_pages; 2789e45deec3SDavid Howells *_len += len; 2790e45deec3SDavid Howells if (max_pages <= 0 || *_len >= max_len || *_count <= 0) 2791e45deec3SDavid Howells stop = true; 2792e45deec3SDavid Howells 279338c8a9a5SSteve French if (!folio_batch_add(&batch, folio)) 279438c8a9a5SSteve French break; 279538c8a9a5SSteve French if (stop) 279638c8a9a5SSteve French break; 279738c8a9a5SSteve French } 279838c8a9a5SSteve French 2799e45deec3SDavid Howells xas_pause(xas); 280038c8a9a5SSteve French rcu_read_unlock(); 280138c8a9a5SSteve French 280238c8a9a5SSteve French /* Now, if we obtained any pages, we can shift them to being 280338c8a9a5SSteve French * writable and mark them for caching. 280438c8a9a5SSteve French */ 280538c8a9a5SSteve French if (!folio_batch_count(&batch)) 280638c8a9a5SSteve French break; 280738c8a9a5SSteve French 280838c8a9a5SSteve French for (i = 0; i < folio_batch_count(&batch); i++) { 280938c8a9a5SSteve French folio = batch.folios[i]; 281038c8a9a5SSteve French /* The folio should be locked, dirty and not undergoing 281138c8a9a5SSteve French * writeback from the loop above. 281238c8a9a5SSteve French */ 281338c8a9a5SSteve French if (!folio_clear_dirty_for_io(folio)) 281438c8a9a5SSteve French WARN_ON(1); 28152e411c57SMatthew Wilcox (Oracle) folio_start_writeback(folio); 281638c8a9a5SSteve French folio_unlock(folio); 281738c8a9a5SSteve French } 281838c8a9a5SSteve French 281938c8a9a5SSteve French folio_batch_release(&batch); 282038c8a9a5SSteve French cond_resched(); 282138c8a9a5SSteve French } while (!stop); 282238c8a9a5SSteve French } 282338c8a9a5SSteve French 282438c8a9a5SSteve French /* 282538c8a9a5SSteve French * Write back the locked page and any subsequent non-locked dirty pages. 282638c8a9a5SSteve French */ 282738c8a9a5SSteve French static ssize_t cifs_write_back_from_locked_folio(struct address_space *mapping, 282838c8a9a5SSteve French struct writeback_control *wbc, 2829e45deec3SDavid Howells struct xa_state *xas, 283038c8a9a5SSteve French struct folio *folio, 2831e45deec3SDavid Howells unsigned long long start, 2832e45deec3SDavid Howells unsigned long long end) 283338c8a9a5SSteve French { 283438c8a9a5SSteve French struct inode *inode = mapping->host; 283538c8a9a5SSteve French struct TCP_Server_Info *server; 283638c8a9a5SSteve French struct cifs_writedata *wdata; 283738c8a9a5SSteve French struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 283838c8a9a5SSteve French struct cifs_credits credits_on_stack; 283938c8a9a5SSteve French struct cifs_credits *credits = &credits_on_stack; 284038c8a9a5SSteve French struct cifsFileInfo *cfile = NULL; 2841e45deec3SDavid Howells unsigned long long i_size = i_size_read(inode), max_len; 2842e45deec3SDavid Howells unsigned int xid, wsize; 2843e45deec3SDavid Howells size_t len = folio_size(folio); 284438c8a9a5SSteve French long count = wbc->nr_to_write; 284538c8a9a5SSteve French int rc; 284638c8a9a5SSteve French 284738c8a9a5SSteve French /* The folio should be locked, dirty and not undergoing writeback. */ 2848e45deec3SDavid Howells if (!folio_clear_dirty_for_io(folio)) 2849e45deec3SDavid Howells WARN_ON_ONCE(1); 28502e411c57SMatthew Wilcox (Oracle) folio_start_writeback(folio); 285138c8a9a5SSteve French 285238c8a9a5SSteve French count -= folio_nr_pages(folio); 285338c8a9a5SSteve French 285438c8a9a5SSteve French xid = get_xid(); 285538c8a9a5SSteve French server = cifs_pick_channel(cifs_sb_master_tcon(cifs_sb)->ses); 285638c8a9a5SSteve French 285738c8a9a5SSteve French rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY, &cfile); 285838c8a9a5SSteve French if (rc) { 285938c8a9a5SSteve French cifs_dbg(VFS, "No writable handle in writepages rc=%d\n", rc); 286038c8a9a5SSteve French goto err_xid; 286138c8a9a5SSteve French } 286238c8a9a5SSteve French 286338c8a9a5SSteve French rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->wsize, 286438c8a9a5SSteve French &wsize, credits); 286538c8a9a5SSteve French if (rc != 0) 286638c8a9a5SSteve French goto err_close; 286738c8a9a5SSteve French 286838c8a9a5SSteve French wdata = cifs_writedata_alloc(cifs_writev_complete); 286938c8a9a5SSteve French if (!wdata) { 287038c8a9a5SSteve French rc = -ENOMEM; 287138c8a9a5SSteve French goto err_uncredit; 287238c8a9a5SSteve French } 287338c8a9a5SSteve French 287438c8a9a5SSteve French wdata->sync_mode = wbc->sync_mode; 287538c8a9a5SSteve French wdata->offset = folio_pos(folio); 287638c8a9a5SSteve French wdata->pid = cfile->pid; 287738c8a9a5SSteve French wdata->credits = credits_on_stack; 287838c8a9a5SSteve French wdata->cfile = cfile; 287938c8a9a5SSteve French wdata->server = server; 288038c8a9a5SSteve French cfile = NULL; 288138c8a9a5SSteve French 2882e45deec3SDavid Howells /* Find all consecutive lockable dirty pages that have contiguous 2883e45deec3SDavid Howells * written regions, stopping when we find a page that is not 2884e45deec3SDavid Howells * immediately lockable, is not dirty or is missing, or we reach the 2885e45deec3SDavid Howells * end of the range. 288638c8a9a5SSteve French */ 288738c8a9a5SSteve French if (start < i_size) { 288838c8a9a5SSteve French /* Trim the write to the EOF; the extra data is ignored. Also 288938c8a9a5SSteve French * put an upper limit on the size of a single storedata op. 289038c8a9a5SSteve French */ 289138c8a9a5SSteve French max_len = wsize; 289238c8a9a5SSteve French max_len = min_t(unsigned long long, max_len, end - start + 1); 289338c8a9a5SSteve French max_len = min_t(unsigned long long, max_len, i_size - start); 289438c8a9a5SSteve French 289538c8a9a5SSteve French if (len < max_len) { 289638c8a9a5SSteve French int max_pages = INT_MAX; 289738c8a9a5SSteve French 289838c8a9a5SSteve French #ifdef CONFIG_CIFS_SMB_DIRECT 289938c8a9a5SSteve French if (server->smbd_conn) 290038c8a9a5SSteve French max_pages = server->smbd_conn->max_frmr_depth; 290138c8a9a5SSteve French #endif 290238c8a9a5SSteve French max_pages -= folio_nr_pages(folio); 290338c8a9a5SSteve French 290438c8a9a5SSteve French if (max_pages > 0) 2905e45deec3SDavid Howells cifs_extend_writeback(mapping, xas, &count, start, 290638c8a9a5SSteve French max_pages, max_len, &len); 290738c8a9a5SSteve French } 290838c8a9a5SSteve French } 2909e45deec3SDavid Howells len = min_t(unsigned long long, len, i_size - start); 291038c8a9a5SSteve French 291138c8a9a5SSteve French /* We now have a contiguous set of dirty pages, each with writeback 291238c8a9a5SSteve French * set; the first page is still locked at this point, but all the rest 291338c8a9a5SSteve French * have been unlocked. 291438c8a9a5SSteve French */ 291538c8a9a5SSteve French folio_unlock(folio); 2916e45deec3SDavid Howells wdata->bytes = len; 291738c8a9a5SSteve French 291838c8a9a5SSteve French if (start < i_size) { 291938c8a9a5SSteve French iov_iter_xarray(&wdata->iter, ITER_SOURCE, &mapping->i_pages, 292038c8a9a5SSteve French start, len); 292138c8a9a5SSteve French 292238c8a9a5SSteve French rc = adjust_credits(wdata->server, &wdata->credits, wdata->bytes); 292338c8a9a5SSteve French if (rc) 292438c8a9a5SSteve French goto err_wdata; 292538c8a9a5SSteve French 292638c8a9a5SSteve French if (wdata->cfile->invalidHandle) 292738c8a9a5SSteve French rc = -EAGAIN; 292838c8a9a5SSteve French else 292938c8a9a5SSteve French rc = wdata->server->ops->async_writev(wdata, 293038c8a9a5SSteve French cifs_writedata_release); 293138c8a9a5SSteve French if (rc >= 0) { 293238c8a9a5SSteve French kref_put(&wdata->refcount, cifs_writedata_release); 293338c8a9a5SSteve French goto err_close; 293438c8a9a5SSteve French } 293538c8a9a5SSteve French } else { 293638c8a9a5SSteve French /* The dirty region was entirely beyond the EOF. */ 293738c8a9a5SSteve French cifs_pages_written_back(inode, start, len); 293838c8a9a5SSteve French rc = 0; 293938c8a9a5SSteve French } 294038c8a9a5SSteve French 294138c8a9a5SSteve French err_wdata: 294238c8a9a5SSteve French kref_put(&wdata->refcount, cifs_writedata_release); 294338c8a9a5SSteve French err_uncredit: 294438c8a9a5SSteve French add_credits_and_wake_if(server, credits, 0); 294538c8a9a5SSteve French err_close: 294638c8a9a5SSteve French if (cfile) 294738c8a9a5SSteve French cifsFileInfo_put(cfile); 294838c8a9a5SSteve French err_xid: 294938c8a9a5SSteve French free_xid(xid); 295038c8a9a5SSteve French if (rc == 0) { 295138c8a9a5SSteve French wbc->nr_to_write = count; 295238c8a9a5SSteve French rc = len; 295338c8a9a5SSteve French } else if (is_retryable_error(rc)) { 295438c8a9a5SSteve French cifs_pages_write_redirty(inode, start, len); 295538c8a9a5SSteve French } else { 295638c8a9a5SSteve French cifs_pages_write_failed(inode, start, len); 295738c8a9a5SSteve French mapping_set_error(mapping, rc); 295838c8a9a5SSteve French } 295938c8a9a5SSteve French /* Indication to update ctime and mtime as close is deferred */ 296038c8a9a5SSteve French set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags); 296138c8a9a5SSteve French return rc; 296238c8a9a5SSteve French } 296338c8a9a5SSteve French 296438c8a9a5SSteve French /* 296538c8a9a5SSteve French * write a region of pages back to the server 296638c8a9a5SSteve French */ 2967e45deec3SDavid Howells static ssize_t cifs_writepages_begin(struct address_space *mapping, 296838c8a9a5SSteve French struct writeback_control *wbc, 2969e45deec3SDavid Howells struct xa_state *xas, 2970e45deec3SDavid Howells unsigned long long *_start, 2971e45deec3SDavid Howells unsigned long long end) 297238c8a9a5SSteve French { 2973e45deec3SDavid Howells struct folio *folio; 2974e45deec3SDavid Howells unsigned long long start = *_start; 2975e45deec3SDavid Howells ssize_t ret; 297638c8a9a5SSteve French int skips = 0; 297738c8a9a5SSteve French 2978e45deec3SDavid Howells search_again: 2979e45deec3SDavid Howells /* Find the first dirty page. */ 2980e45deec3SDavid Howells rcu_read_lock(); 298138c8a9a5SSteve French 2982e45deec3SDavid Howells for (;;) { 2983e45deec3SDavid Howells folio = xas_find_marked(xas, end / PAGE_SIZE, PAGECACHE_TAG_DIRTY); 2984e45deec3SDavid Howells if (xas_retry(xas, folio) || xa_is_value(folio)) 2985e45deec3SDavid Howells continue; 2986e45deec3SDavid Howells if (!folio) 298738c8a9a5SSteve French break; 298838c8a9a5SSteve French 2989e45deec3SDavid Howells if (!folio_try_get_rcu(folio)) { 2990e45deec3SDavid Howells xas_reset(xas); 2991e45deec3SDavid Howells continue; 2992e45deec3SDavid Howells } 299338c8a9a5SSteve French 2994e45deec3SDavid Howells if (unlikely(folio != xas_reload(xas))) { 2995e45deec3SDavid Howells folio_put(folio); 2996e45deec3SDavid Howells xas_reset(xas); 2997e45deec3SDavid Howells continue; 2998e45deec3SDavid Howells } 2999e45deec3SDavid Howells 3000e45deec3SDavid Howells xas_pause(xas); 3001e45deec3SDavid Howells break; 3002e45deec3SDavid Howells } 3003e45deec3SDavid Howells rcu_read_unlock(); 3004e45deec3SDavid Howells if (!folio) 3005e45deec3SDavid Howells return 0; 3006e45deec3SDavid Howells 300738c8a9a5SSteve French start = folio_pos(folio); /* May regress with THPs */ 300838c8a9a5SSteve French 3009e45deec3SDavid Howells /* At this point we hold neither the i_pages lock nor the page lock: 3010e45deec3SDavid Howells * the page may be truncated or invalidated (changing page->mapping to 3011e45deec3SDavid Howells * NULL), or even swizzled back from swapper_space to tmpfs file 3012e45deec3SDavid Howells * mapping 301338c8a9a5SSteve French */ 3014e45deec3SDavid Howells lock_again: 301538c8a9a5SSteve French if (wbc->sync_mode != WB_SYNC_NONE) { 301638c8a9a5SSteve French ret = folio_lock_killable(folio); 301738c8a9a5SSteve French if (ret < 0) 3018e45deec3SDavid Howells return ret; 301938c8a9a5SSteve French } else { 302038c8a9a5SSteve French if (!folio_trylock(folio)) 3021e45deec3SDavid Howells goto search_again; 302238c8a9a5SSteve French } 302338c8a9a5SSteve French 3024d3c79235SDavid Howells if (folio->mapping != mapping || 302538c8a9a5SSteve French !folio_test_dirty(folio)) { 302638c8a9a5SSteve French start += folio_size(folio); 302738c8a9a5SSteve French folio_unlock(folio); 3028e45deec3SDavid Howells goto search_again; 302938c8a9a5SSteve French } 303038c8a9a5SSteve French 303138c8a9a5SSteve French if (folio_test_writeback(folio) || 303238c8a9a5SSteve French folio_test_fscache(folio)) { 303338c8a9a5SSteve French folio_unlock(folio); 3034e45deec3SDavid Howells if (wbc->sync_mode != WB_SYNC_NONE) { 303538c8a9a5SSteve French folio_wait_writeback(folio); 303638c8a9a5SSteve French #ifdef CONFIG_CIFS_FSCACHE 303738c8a9a5SSteve French folio_wait_fscache(folio); 303838c8a9a5SSteve French #endif 3039e45deec3SDavid Howells goto lock_again; 304038c8a9a5SSteve French } 304138c8a9a5SSteve French 3042e45deec3SDavid Howells start += folio_size(folio); 3043e45deec3SDavid Howells if (wbc->sync_mode == WB_SYNC_NONE) { 304438c8a9a5SSteve French if (skips >= 5 || need_resched()) { 304538c8a9a5SSteve French ret = 0; 3046e45deec3SDavid Howells goto out; 304738c8a9a5SSteve French } 304838c8a9a5SSteve French skips++; 3049e45deec3SDavid Howells } 3050e45deec3SDavid Howells goto search_again; 305138c8a9a5SSteve French } 305238c8a9a5SSteve French 3053e45deec3SDavid Howells ret = cifs_write_back_from_locked_folio(mapping, wbc, xas, folio, start, end); 3054e45deec3SDavid Howells out: 3055e45deec3SDavid Howells if (ret > 0) 3056e45deec3SDavid Howells *_start = start + ret; 3057e45deec3SDavid Howells return ret; 3058e45deec3SDavid Howells } 305938c8a9a5SSteve French 3060e45deec3SDavid Howells /* 3061e45deec3SDavid Howells * Write a region of pages back to the server 3062e45deec3SDavid Howells */ 3063e45deec3SDavid Howells static int cifs_writepages_region(struct address_space *mapping, 3064e45deec3SDavid Howells struct writeback_control *wbc, 3065e45deec3SDavid Howells unsigned long long *_start, 3066e45deec3SDavid Howells unsigned long long end) 3067e45deec3SDavid Howells { 3068e45deec3SDavid Howells ssize_t ret; 3069e45deec3SDavid Howells 3070e45deec3SDavid Howells XA_STATE(xas, &mapping->i_pages, *_start / PAGE_SIZE); 3071e45deec3SDavid Howells 3072e45deec3SDavid Howells do { 3073e45deec3SDavid Howells ret = cifs_writepages_begin(mapping, wbc, &xas, _start, end); 3074e45deec3SDavid Howells if (ret > 0 && wbc->nr_to_write > 0) 3075e45deec3SDavid Howells cond_resched(); 3076e45deec3SDavid Howells } while (ret > 0 && wbc->nr_to_write > 0); 3077e45deec3SDavid Howells 3078e45deec3SDavid Howells return ret > 0 ? 0 : ret; 307938c8a9a5SSteve French } 308038c8a9a5SSteve French 308138c8a9a5SSteve French /* 308238c8a9a5SSteve French * Write some of the pending data back to the server 308338c8a9a5SSteve French */ 308438c8a9a5SSteve French static int cifs_writepages(struct address_space *mapping, 308538c8a9a5SSteve French struct writeback_control *wbc) 308638c8a9a5SSteve French { 3087e45deec3SDavid Howells loff_t start, end; 308838c8a9a5SSteve French int ret; 308938c8a9a5SSteve French 309038c8a9a5SSteve French /* We have to be careful as we can end up racing with setattr() 309138c8a9a5SSteve French * truncating the pagecache since the caller doesn't take a lock here 309238c8a9a5SSteve French * to prevent it. 309338c8a9a5SSteve French */ 309438c8a9a5SSteve French 3095e45deec3SDavid Howells if (wbc->range_cyclic && mapping->writeback_index) { 309638c8a9a5SSteve French start = mapping->writeback_index * PAGE_SIZE; 3097e45deec3SDavid Howells ret = cifs_writepages_region(mapping, wbc, &start, LLONG_MAX); 3098e45deec3SDavid Howells if (ret < 0) 3099e45deec3SDavid Howells goto out; 3100e45deec3SDavid Howells 3101e45deec3SDavid Howells if (wbc->nr_to_write <= 0) { 3102e45deec3SDavid Howells mapping->writeback_index = start / PAGE_SIZE; 3103e45deec3SDavid Howells goto out; 310438c8a9a5SSteve French } 310538c8a9a5SSteve French 3106e45deec3SDavid Howells start = 0; 3107e45deec3SDavid Howells end = mapping->writeback_index * PAGE_SIZE; 3108e45deec3SDavid Howells mapping->writeback_index = 0; 3109e45deec3SDavid Howells ret = cifs_writepages_region(mapping, wbc, &start, end); 3110e45deec3SDavid Howells if (ret == 0) 3111e45deec3SDavid Howells mapping->writeback_index = start / PAGE_SIZE; 3112e45deec3SDavid Howells } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) { 3113e45deec3SDavid Howells start = 0; 3114e45deec3SDavid Howells ret = cifs_writepages_region(mapping, wbc, &start, LLONG_MAX); 3115e45deec3SDavid Howells if (wbc->nr_to_write > 0 && ret == 0) 3116e45deec3SDavid Howells mapping->writeback_index = start / PAGE_SIZE; 3117e45deec3SDavid Howells } else { 3118e45deec3SDavid Howells start = wbc->range_start; 3119e45deec3SDavid Howells ret = cifs_writepages_region(mapping, wbc, &start, wbc->range_end); 3120e45deec3SDavid Howells } 3121e45deec3SDavid Howells 3122e45deec3SDavid Howells out: 312338c8a9a5SSteve French return ret; 312438c8a9a5SSteve French } 312538c8a9a5SSteve French 312638c8a9a5SSteve French static int 312738c8a9a5SSteve French cifs_writepage_locked(struct page *page, struct writeback_control *wbc) 312838c8a9a5SSteve French { 312938c8a9a5SSteve French int rc; 313038c8a9a5SSteve French unsigned int xid; 313138c8a9a5SSteve French 313238c8a9a5SSteve French xid = get_xid(); 313338c8a9a5SSteve French /* BB add check for wbc flags */ 313438c8a9a5SSteve French get_page(page); 313538c8a9a5SSteve French if (!PageUptodate(page)) 313638c8a9a5SSteve French cifs_dbg(FYI, "ppw - page not up to date\n"); 313738c8a9a5SSteve French 313838c8a9a5SSteve French /* 313938c8a9a5SSteve French * Set the "writeback" flag, and clear "dirty" in the radix tree. 314038c8a9a5SSteve French * 314138c8a9a5SSteve French * A writepage() implementation always needs to do either this, 314238c8a9a5SSteve French * or re-dirty the page with "redirty_page_for_writepage()" in 314338c8a9a5SSteve French * the case of a failure. 314438c8a9a5SSteve French * 314538c8a9a5SSteve French * Just unlocking the page will cause the radix tree tag-bits 314638c8a9a5SSteve French * to fail to update with the state of the page correctly. 314738c8a9a5SSteve French */ 314838c8a9a5SSteve French set_page_writeback(page); 314938c8a9a5SSteve French retry_write: 315038c8a9a5SSteve French rc = cifs_partialpagewrite(page, 0, PAGE_SIZE); 315138c8a9a5SSteve French if (is_retryable_error(rc)) { 315238c8a9a5SSteve French if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) 315338c8a9a5SSteve French goto retry_write; 315438c8a9a5SSteve French redirty_page_for_writepage(wbc, page); 315538c8a9a5SSteve French } else if (rc != 0) { 315638c8a9a5SSteve French SetPageError(page); 315738c8a9a5SSteve French mapping_set_error(page->mapping, rc); 315838c8a9a5SSteve French } else { 315938c8a9a5SSteve French SetPageUptodate(page); 316038c8a9a5SSteve French } 316138c8a9a5SSteve French end_page_writeback(page); 316238c8a9a5SSteve French put_page(page); 316338c8a9a5SSteve French free_xid(xid); 316438c8a9a5SSteve French return rc; 316538c8a9a5SSteve French } 316638c8a9a5SSteve French 316738c8a9a5SSteve French static int cifs_write_end(struct file *file, struct address_space *mapping, 316838c8a9a5SSteve French loff_t pos, unsigned len, unsigned copied, 316938c8a9a5SSteve French struct page *page, void *fsdata) 317038c8a9a5SSteve French { 317138c8a9a5SSteve French int rc; 317238c8a9a5SSteve French struct inode *inode = mapping->host; 317338c8a9a5SSteve French struct cifsFileInfo *cfile = file->private_data; 317438c8a9a5SSteve French struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb); 317538c8a9a5SSteve French struct folio *folio = page_folio(page); 317638c8a9a5SSteve French __u32 pid; 317738c8a9a5SSteve French 317838c8a9a5SSteve French if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) 317938c8a9a5SSteve French pid = cfile->pid; 318038c8a9a5SSteve French else 318138c8a9a5SSteve French pid = current->tgid; 318238c8a9a5SSteve French 318338c8a9a5SSteve French cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n", 318438c8a9a5SSteve French page, pos, copied); 318538c8a9a5SSteve French 318638c8a9a5SSteve French if (folio_test_checked(folio)) { 318738c8a9a5SSteve French if (copied == len) 318838c8a9a5SSteve French folio_mark_uptodate(folio); 318938c8a9a5SSteve French folio_clear_checked(folio); 319038c8a9a5SSteve French } else if (!folio_test_uptodate(folio) && copied == PAGE_SIZE) 319138c8a9a5SSteve French folio_mark_uptodate(folio); 319238c8a9a5SSteve French 319338c8a9a5SSteve French if (!folio_test_uptodate(folio)) { 319438c8a9a5SSteve French char *page_data; 319538c8a9a5SSteve French unsigned offset = pos & (PAGE_SIZE - 1); 319638c8a9a5SSteve French unsigned int xid; 319738c8a9a5SSteve French 319838c8a9a5SSteve French xid = get_xid(); 319938c8a9a5SSteve French /* this is probably better than directly calling 320038c8a9a5SSteve French partialpage_write since in this function the file handle is 320138c8a9a5SSteve French known which we might as well leverage */ 320238c8a9a5SSteve French /* BB check if anything else missing out of ppw 320338c8a9a5SSteve French such as updating last write time */ 320438c8a9a5SSteve French page_data = kmap(page); 320538c8a9a5SSteve French rc = cifs_write(cfile, pid, page_data + offset, copied, &pos); 320638c8a9a5SSteve French /* if (rc < 0) should we set writebehind rc? */ 320738c8a9a5SSteve French kunmap(page); 320838c8a9a5SSteve French 320938c8a9a5SSteve French free_xid(xid); 321038c8a9a5SSteve French } else { 321138c8a9a5SSteve French rc = copied; 321238c8a9a5SSteve French pos += copied; 321338c8a9a5SSteve French set_page_dirty(page); 321438c8a9a5SSteve French } 321538c8a9a5SSteve French 321638c8a9a5SSteve French if (rc > 0) { 321738c8a9a5SSteve French spin_lock(&inode->i_lock); 321838c8a9a5SSteve French if (pos > inode->i_size) { 3219e4232010SSteve French loff_t additional_blocks = (512 - 1 + copied) >> 9; 3220e4232010SSteve French 322138c8a9a5SSteve French i_size_write(inode, pos); 3222e4232010SSteve French /* 3223e4232010SSteve French * Estimate new allocation size based on the amount written. 3224e4232010SSteve French * This will be updated from server on close (and on queryinfo) 3225e4232010SSteve French */ 3226e4232010SSteve French inode->i_blocks = min_t(blkcnt_t, (512 - 1 + pos) >> 9, 3227e4232010SSteve French inode->i_blocks + additional_blocks); 322838c8a9a5SSteve French } 322938c8a9a5SSteve French spin_unlock(&inode->i_lock); 323038c8a9a5SSteve French } 323138c8a9a5SSteve French 323238c8a9a5SSteve French unlock_page(page); 323338c8a9a5SSteve French put_page(page); 323438c8a9a5SSteve French /* Indication to update ctime and mtime as close is deferred */ 323538c8a9a5SSteve French set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags); 323638c8a9a5SSteve French 323738c8a9a5SSteve French return rc; 323838c8a9a5SSteve French } 323938c8a9a5SSteve French 324038c8a9a5SSteve French int cifs_strict_fsync(struct file *file, loff_t start, loff_t end, 324138c8a9a5SSteve French int datasync) 324238c8a9a5SSteve French { 324338c8a9a5SSteve French unsigned int xid; 324438c8a9a5SSteve French int rc = 0; 324538c8a9a5SSteve French struct cifs_tcon *tcon; 324638c8a9a5SSteve French struct TCP_Server_Info *server; 324738c8a9a5SSteve French struct cifsFileInfo *smbfile = file->private_data; 324838c8a9a5SSteve French struct inode *inode = file_inode(file); 324938c8a9a5SSteve French struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 325038c8a9a5SSteve French 325138c8a9a5SSteve French rc = file_write_and_wait_range(file, start, end); 325238c8a9a5SSteve French if (rc) { 325338c8a9a5SSteve French trace_cifs_fsync_err(inode->i_ino, rc); 325438c8a9a5SSteve French return rc; 325538c8a9a5SSteve French } 325638c8a9a5SSteve French 325738c8a9a5SSteve French xid = get_xid(); 325838c8a9a5SSteve French 325938c8a9a5SSteve French cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n", 326038c8a9a5SSteve French file, datasync); 326138c8a9a5SSteve French 326238c8a9a5SSteve French if (!CIFS_CACHE_READ(CIFS_I(inode))) { 326338c8a9a5SSteve French rc = cifs_zap_mapping(inode); 326438c8a9a5SSteve French if (rc) { 326538c8a9a5SSteve French cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc); 326638c8a9a5SSteve French rc = 0; /* don't care about it in fsync */ 326738c8a9a5SSteve French } 326838c8a9a5SSteve French } 326938c8a9a5SSteve French 327038c8a9a5SSteve French tcon = tlink_tcon(smbfile->tlink); 327138c8a9a5SSteve French if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) { 327238c8a9a5SSteve French server = tcon->ses->server; 327338c8a9a5SSteve French if (server->ops->flush == NULL) { 327438c8a9a5SSteve French rc = -ENOSYS; 327538c8a9a5SSteve French goto strict_fsync_exit; 327638c8a9a5SSteve French } 327738c8a9a5SSteve French 327838c8a9a5SSteve French if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) { 327938c8a9a5SSteve French smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY); 328038c8a9a5SSteve French if (smbfile) { 328138c8a9a5SSteve French rc = server->ops->flush(xid, tcon, &smbfile->fid); 328238c8a9a5SSteve French cifsFileInfo_put(smbfile); 328338c8a9a5SSteve French } else 328438c8a9a5SSteve French cifs_dbg(FYI, "ignore fsync for file not open for write\n"); 328538c8a9a5SSteve French } else 328638c8a9a5SSteve French rc = server->ops->flush(xid, tcon, &smbfile->fid); 328738c8a9a5SSteve French } 328838c8a9a5SSteve French 328938c8a9a5SSteve French strict_fsync_exit: 329038c8a9a5SSteve French free_xid(xid); 329138c8a9a5SSteve French return rc; 329238c8a9a5SSteve French } 329338c8a9a5SSteve French 329438c8a9a5SSteve French int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync) 329538c8a9a5SSteve French { 329638c8a9a5SSteve French unsigned int xid; 329738c8a9a5SSteve French int rc = 0; 329838c8a9a5SSteve French struct cifs_tcon *tcon; 329938c8a9a5SSteve French struct TCP_Server_Info *server; 330038c8a9a5SSteve French struct cifsFileInfo *smbfile = file->private_data; 330138c8a9a5SSteve French struct inode *inode = file_inode(file); 330238c8a9a5SSteve French struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file); 330338c8a9a5SSteve French 330438c8a9a5SSteve French rc = file_write_and_wait_range(file, start, end); 330538c8a9a5SSteve French if (rc) { 330638c8a9a5SSteve French trace_cifs_fsync_err(file_inode(file)->i_ino, rc); 330738c8a9a5SSteve French return rc; 330838c8a9a5SSteve French } 330938c8a9a5SSteve French 331038c8a9a5SSteve French xid = get_xid(); 331138c8a9a5SSteve French 331238c8a9a5SSteve French cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n", 331338c8a9a5SSteve French file, datasync); 331438c8a9a5SSteve French 331538c8a9a5SSteve French tcon = tlink_tcon(smbfile->tlink); 331638c8a9a5SSteve French if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) { 331738c8a9a5SSteve French server = tcon->ses->server; 331838c8a9a5SSteve French if (server->ops->flush == NULL) { 331938c8a9a5SSteve French rc = -ENOSYS; 332038c8a9a5SSteve French goto fsync_exit; 332138c8a9a5SSteve French } 332238c8a9a5SSteve French 332338c8a9a5SSteve French if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) { 332438c8a9a5SSteve French smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY); 332538c8a9a5SSteve French if (smbfile) { 332638c8a9a5SSteve French rc = server->ops->flush(xid, tcon, &smbfile->fid); 332738c8a9a5SSteve French cifsFileInfo_put(smbfile); 332838c8a9a5SSteve French } else 332938c8a9a5SSteve French cifs_dbg(FYI, "ignore fsync for file not open for write\n"); 333038c8a9a5SSteve French } else 333138c8a9a5SSteve French rc = server->ops->flush(xid, tcon, &smbfile->fid); 333238c8a9a5SSteve French } 333338c8a9a5SSteve French 333438c8a9a5SSteve French fsync_exit: 333538c8a9a5SSteve French free_xid(xid); 333638c8a9a5SSteve French return rc; 333738c8a9a5SSteve French } 333838c8a9a5SSteve French 333938c8a9a5SSteve French /* 334038c8a9a5SSteve French * As file closes, flush all cached write data for this inode checking 334138c8a9a5SSteve French * for write behind errors. 334238c8a9a5SSteve French */ 334338c8a9a5SSteve French int cifs_flush(struct file *file, fl_owner_t id) 334438c8a9a5SSteve French { 334538c8a9a5SSteve French struct inode *inode = file_inode(file); 334638c8a9a5SSteve French int rc = 0; 334738c8a9a5SSteve French 334838c8a9a5SSteve French if (file->f_mode & FMODE_WRITE) 334938c8a9a5SSteve French rc = filemap_write_and_wait(inode->i_mapping); 335038c8a9a5SSteve French 335138c8a9a5SSteve French cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc); 335238c8a9a5SSteve French if (rc) { 335338c8a9a5SSteve French /* get more nuanced writeback errors */ 335438c8a9a5SSteve French rc = filemap_check_wb_err(file->f_mapping, 0); 335538c8a9a5SSteve French trace_cifs_flush_err(inode->i_ino, rc); 335638c8a9a5SSteve French } 335738c8a9a5SSteve French return rc; 335838c8a9a5SSteve French } 335938c8a9a5SSteve French 336038c8a9a5SSteve French static void 336138c8a9a5SSteve French cifs_uncached_writedata_release(struct kref *refcount) 336238c8a9a5SSteve French { 336338c8a9a5SSteve French struct cifs_writedata *wdata = container_of(refcount, 336438c8a9a5SSteve French struct cifs_writedata, refcount); 336538c8a9a5SSteve French 336638c8a9a5SSteve French kref_put(&wdata->ctx->refcount, cifs_aio_ctx_release); 336738c8a9a5SSteve French cifs_writedata_release(refcount); 336838c8a9a5SSteve French } 336938c8a9a5SSteve French 337038c8a9a5SSteve French static void collect_uncached_write_data(struct cifs_aio_ctx *ctx); 337138c8a9a5SSteve French 337238c8a9a5SSteve French static void 337338c8a9a5SSteve French cifs_uncached_writev_complete(struct work_struct *work) 337438c8a9a5SSteve French { 337538c8a9a5SSteve French struct cifs_writedata *wdata = container_of(work, 337638c8a9a5SSteve French struct cifs_writedata, work); 337738c8a9a5SSteve French struct inode *inode = d_inode(wdata->cfile->dentry); 337838c8a9a5SSteve French struct cifsInodeInfo *cifsi = CIFS_I(inode); 337938c8a9a5SSteve French 338038c8a9a5SSteve French spin_lock(&inode->i_lock); 338138c8a9a5SSteve French cifs_update_eof(cifsi, wdata->offset, wdata->bytes); 338238c8a9a5SSteve French if (cifsi->server_eof > inode->i_size) 338338c8a9a5SSteve French i_size_write(inode, cifsi->server_eof); 338438c8a9a5SSteve French spin_unlock(&inode->i_lock); 338538c8a9a5SSteve French 338638c8a9a5SSteve French complete(&wdata->done); 338738c8a9a5SSteve French collect_uncached_write_data(wdata->ctx); 338838c8a9a5SSteve French /* the below call can possibly free the last ref to aio ctx */ 338938c8a9a5SSteve French kref_put(&wdata->refcount, cifs_uncached_writedata_release); 339038c8a9a5SSteve French } 339138c8a9a5SSteve French 339238c8a9a5SSteve French static int 339338c8a9a5SSteve French cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list, 339438c8a9a5SSteve French struct cifs_aio_ctx *ctx) 339538c8a9a5SSteve French { 339638c8a9a5SSteve French unsigned int wsize; 339738c8a9a5SSteve French struct cifs_credits credits; 339838c8a9a5SSteve French int rc; 339938c8a9a5SSteve French struct TCP_Server_Info *server = wdata->server; 340038c8a9a5SSteve French 340138c8a9a5SSteve French do { 340238c8a9a5SSteve French if (wdata->cfile->invalidHandle) { 340338c8a9a5SSteve French rc = cifs_reopen_file(wdata->cfile, false); 340438c8a9a5SSteve French if (rc == -EAGAIN) 340538c8a9a5SSteve French continue; 340638c8a9a5SSteve French else if (rc) 340738c8a9a5SSteve French break; 340838c8a9a5SSteve French } 340938c8a9a5SSteve French 341038c8a9a5SSteve French 341138c8a9a5SSteve French /* 341238c8a9a5SSteve French * Wait for credits to resend this wdata. 341338c8a9a5SSteve French * Note: we are attempting to resend the whole wdata not in 341438c8a9a5SSteve French * segments 341538c8a9a5SSteve French */ 341638c8a9a5SSteve French do { 341738c8a9a5SSteve French rc = server->ops->wait_mtu_credits(server, wdata->bytes, 341838c8a9a5SSteve French &wsize, &credits); 341938c8a9a5SSteve French if (rc) 342038c8a9a5SSteve French goto fail; 342138c8a9a5SSteve French 342238c8a9a5SSteve French if (wsize < wdata->bytes) { 342338c8a9a5SSteve French add_credits_and_wake_if(server, &credits, 0); 342438c8a9a5SSteve French msleep(1000); 342538c8a9a5SSteve French } 342638c8a9a5SSteve French } while (wsize < wdata->bytes); 342738c8a9a5SSteve French wdata->credits = credits; 342838c8a9a5SSteve French 342938c8a9a5SSteve French rc = adjust_credits(server, &wdata->credits, wdata->bytes); 343038c8a9a5SSteve French 343138c8a9a5SSteve French if (!rc) { 343238c8a9a5SSteve French if (wdata->cfile->invalidHandle) 343338c8a9a5SSteve French rc = -EAGAIN; 343438c8a9a5SSteve French else { 3435cdd7870aSShyam Prasad N wdata->replay = true; 343638c8a9a5SSteve French #ifdef CONFIG_CIFS_SMB_DIRECT 343738c8a9a5SSteve French if (wdata->mr) { 343838c8a9a5SSteve French wdata->mr->need_invalidate = true; 343938c8a9a5SSteve French smbd_deregister_mr(wdata->mr); 344038c8a9a5SSteve French wdata->mr = NULL; 344138c8a9a5SSteve French } 344238c8a9a5SSteve French #endif 344338c8a9a5SSteve French rc = server->ops->async_writev(wdata, 344438c8a9a5SSteve French cifs_uncached_writedata_release); 344538c8a9a5SSteve French } 344638c8a9a5SSteve French } 344738c8a9a5SSteve French 344838c8a9a5SSteve French /* If the write was successfully sent, we are done */ 344938c8a9a5SSteve French if (!rc) { 345038c8a9a5SSteve French list_add_tail(&wdata->list, wdata_list); 345138c8a9a5SSteve French return 0; 345238c8a9a5SSteve French } 345338c8a9a5SSteve French 345438c8a9a5SSteve French /* Roll back credits and retry if needed */ 345538c8a9a5SSteve French add_credits_and_wake_if(server, &wdata->credits, 0); 345638c8a9a5SSteve French } while (rc == -EAGAIN); 345738c8a9a5SSteve French 345838c8a9a5SSteve French fail: 345938c8a9a5SSteve French kref_put(&wdata->refcount, cifs_uncached_writedata_release); 346038c8a9a5SSteve French return rc; 346138c8a9a5SSteve French } 346238c8a9a5SSteve French 346338c8a9a5SSteve French /* 346438c8a9a5SSteve French * Select span of a bvec iterator we're going to use. Limit it by both maximum 346538c8a9a5SSteve French * size and maximum number of segments. 346638c8a9a5SSteve French */ 346738c8a9a5SSteve French static size_t cifs_limit_bvec_subset(const struct iov_iter *iter, size_t max_size, 346838c8a9a5SSteve French size_t max_segs, unsigned int *_nsegs) 346938c8a9a5SSteve French { 347038c8a9a5SSteve French const struct bio_vec *bvecs = iter->bvec; 347138c8a9a5SSteve French unsigned int nbv = iter->nr_segs, ix = 0, nsegs = 0; 347238c8a9a5SSteve French size_t len, span = 0, n = iter->count; 347338c8a9a5SSteve French size_t skip = iter->iov_offset; 347438c8a9a5SSteve French 347538c8a9a5SSteve French if (WARN_ON(!iov_iter_is_bvec(iter)) || n == 0) 347638c8a9a5SSteve French return 0; 347738c8a9a5SSteve French 347838c8a9a5SSteve French while (n && ix < nbv && skip) { 347938c8a9a5SSteve French len = bvecs[ix].bv_len; 348038c8a9a5SSteve French if (skip < len) 348138c8a9a5SSteve French break; 348238c8a9a5SSteve French skip -= len; 348338c8a9a5SSteve French n -= len; 348438c8a9a5SSteve French ix++; 348538c8a9a5SSteve French } 348638c8a9a5SSteve French 348738c8a9a5SSteve French while (n && ix < nbv) { 348838c8a9a5SSteve French len = min3(n, bvecs[ix].bv_len - skip, max_size); 348938c8a9a5SSteve French span += len; 349038c8a9a5SSteve French max_size -= len; 349138c8a9a5SSteve French nsegs++; 349238c8a9a5SSteve French ix++; 349338c8a9a5SSteve French if (max_size == 0 || nsegs >= max_segs) 349438c8a9a5SSteve French break; 349538c8a9a5SSteve French skip = 0; 349638c8a9a5SSteve French n -= len; 349738c8a9a5SSteve French } 349838c8a9a5SSteve French 349938c8a9a5SSteve French *_nsegs = nsegs; 350038c8a9a5SSteve French return span; 350138c8a9a5SSteve French } 350238c8a9a5SSteve French 350338c8a9a5SSteve French static int 350438c8a9a5SSteve French cifs_write_from_iter(loff_t fpos, size_t len, struct iov_iter *from, 350538c8a9a5SSteve French struct cifsFileInfo *open_file, 350638c8a9a5SSteve French struct cifs_sb_info *cifs_sb, struct list_head *wdata_list, 350738c8a9a5SSteve French struct cifs_aio_ctx *ctx) 350838c8a9a5SSteve French { 350938c8a9a5SSteve French int rc = 0; 351038c8a9a5SSteve French size_t cur_len, max_len; 351138c8a9a5SSteve French struct cifs_writedata *wdata; 351238c8a9a5SSteve French pid_t pid; 351338c8a9a5SSteve French struct TCP_Server_Info *server; 351438c8a9a5SSteve French unsigned int xid, max_segs = INT_MAX; 351538c8a9a5SSteve French 351638c8a9a5SSteve French if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) 351738c8a9a5SSteve French pid = open_file->pid; 351838c8a9a5SSteve French else 351938c8a9a5SSteve French pid = current->tgid; 352038c8a9a5SSteve French 352138c8a9a5SSteve French server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses); 352238c8a9a5SSteve French xid = get_xid(); 352338c8a9a5SSteve French 352438c8a9a5SSteve French #ifdef CONFIG_CIFS_SMB_DIRECT 352538c8a9a5SSteve French if (server->smbd_conn) 352638c8a9a5SSteve French max_segs = server->smbd_conn->max_frmr_depth; 352738c8a9a5SSteve French #endif 352838c8a9a5SSteve French 352938c8a9a5SSteve French do { 353038c8a9a5SSteve French struct cifs_credits credits_on_stack; 353138c8a9a5SSteve French struct cifs_credits *credits = &credits_on_stack; 353238c8a9a5SSteve French unsigned int wsize, nsegs = 0; 353338c8a9a5SSteve French 353438c8a9a5SSteve French if (signal_pending(current)) { 353538c8a9a5SSteve French rc = -EINTR; 353638c8a9a5SSteve French break; 353738c8a9a5SSteve French } 353838c8a9a5SSteve French 353938c8a9a5SSteve French if (open_file->invalidHandle) { 354038c8a9a5SSteve French rc = cifs_reopen_file(open_file, false); 354138c8a9a5SSteve French if (rc == -EAGAIN) 354238c8a9a5SSteve French continue; 354338c8a9a5SSteve French else if (rc) 354438c8a9a5SSteve French break; 354538c8a9a5SSteve French } 354638c8a9a5SSteve French 354738c8a9a5SSteve French rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->wsize, 354838c8a9a5SSteve French &wsize, credits); 354938c8a9a5SSteve French if (rc) 355038c8a9a5SSteve French break; 355138c8a9a5SSteve French 355238c8a9a5SSteve French max_len = min_t(const size_t, len, wsize); 355338c8a9a5SSteve French if (!max_len) { 355438c8a9a5SSteve French rc = -EAGAIN; 355538c8a9a5SSteve French add_credits_and_wake_if(server, credits, 0); 355638c8a9a5SSteve French break; 355738c8a9a5SSteve French } 355838c8a9a5SSteve French 355938c8a9a5SSteve French cur_len = cifs_limit_bvec_subset(from, max_len, max_segs, &nsegs); 356038c8a9a5SSteve French cifs_dbg(FYI, "write_from_iter len=%zx/%zx nsegs=%u/%lu/%u\n", 356138c8a9a5SSteve French cur_len, max_len, nsegs, from->nr_segs, max_segs); 356238c8a9a5SSteve French if (cur_len == 0) { 356338c8a9a5SSteve French rc = -EIO; 356438c8a9a5SSteve French add_credits_and_wake_if(server, credits, 0); 356538c8a9a5SSteve French break; 356638c8a9a5SSteve French } 356738c8a9a5SSteve French 356838c8a9a5SSteve French wdata = cifs_writedata_alloc(cifs_uncached_writev_complete); 356938c8a9a5SSteve French if (!wdata) { 357038c8a9a5SSteve French rc = -ENOMEM; 357138c8a9a5SSteve French add_credits_and_wake_if(server, credits, 0); 357238c8a9a5SSteve French break; 357338c8a9a5SSteve French } 357438c8a9a5SSteve French 357538c8a9a5SSteve French wdata->sync_mode = WB_SYNC_ALL; 357638c8a9a5SSteve French wdata->offset = (__u64)fpos; 357738c8a9a5SSteve French wdata->cfile = cifsFileInfo_get(open_file); 357838c8a9a5SSteve French wdata->server = server; 357938c8a9a5SSteve French wdata->pid = pid; 358038c8a9a5SSteve French wdata->bytes = cur_len; 358138c8a9a5SSteve French wdata->credits = credits_on_stack; 358238c8a9a5SSteve French wdata->iter = *from; 358338c8a9a5SSteve French wdata->ctx = ctx; 358438c8a9a5SSteve French kref_get(&ctx->refcount); 358538c8a9a5SSteve French 358638c8a9a5SSteve French iov_iter_truncate(&wdata->iter, cur_len); 358738c8a9a5SSteve French 358838c8a9a5SSteve French rc = adjust_credits(server, &wdata->credits, wdata->bytes); 358938c8a9a5SSteve French 359038c8a9a5SSteve French if (!rc) { 359138c8a9a5SSteve French if (wdata->cfile->invalidHandle) 359238c8a9a5SSteve French rc = -EAGAIN; 359338c8a9a5SSteve French else 359438c8a9a5SSteve French rc = server->ops->async_writev(wdata, 359538c8a9a5SSteve French cifs_uncached_writedata_release); 359638c8a9a5SSteve French } 359738c8a9a5SSteve French 359838c8a9a5SSteve French if (rc) { 359938c8a9a5SSteve French add_credits_and_wake_if(server, &wdata->credits, 0); 360038c8a9a5SSteve French kref_put(&wdata->refcount, 360138c8a9a5SSteve French cifs_uncached_writedata_release); 360238c8a9a5SSteve French if (rc == -EAGAIN) 360338c8a9a5SSteve French continue; 360438c8a9a5SSteve French break; 360538c8a9a5SSteve French } 360638c8a9a5SSteve French 360738c8a9a5SSteve French list_add_tail(&wdata->list, wdata_list); 360838c8a9a5SSteve French iov_iter_advance(from, cur_len); 360938c8a9a5SSteve French fpos += cur_len; 361038c8a9a5SSteve French len -= cur_len; 361138c8a9a5SSteve French } while (len > 0); 361238c8a9a5SSteve French 361338c8a9a5SSteve French free_xid(xid); 361438c8a9a5SSteve French return rc; 361538c8a9a5SSteve French } 361638c8a9a5SSteve French 361738c8a9a5SSteve French static void collect_uncached_write_data(struct cifs_aio_ctx *ctx) 361838c8a9a5SSteve French { 361938c8a9a5SSteve French struct cifs_writedata *wdata, *tmp; 362038c8a9a5SSteve French struct cifs_tcon *tcon; 362138c8a9a5SSteve French struct cifs_sb_info *cifs_sb; 362238c8a9a5SSteve French struct dentry *dentry = ctx->cfile->dentry; 362338c8a9a5SSteve French ssize_t rc; 362438c8a9a5SSteve French 362538c8a9a5SSteve French tcon = tlink_tcon(ctx->cfile->tlink); 362638c8a9a5SSteve French cifs_sb = CIFS_SB(dentry->d_sb); 362738c8a9a5SSteve French 362838c8a9a5SSteve French mutex_lock(&ctx->aio_mutex); 362938c8a9a5SSteve French 363038c8a9a5SSteve French if (list_empty(&ctx->list)) { 363138c8a9a5SSteve French mutex_unlock(&ctx->aio_mutex); 363238c8a9a5SSteve French return; 363338c8a9a5SSteve French } 363438c8a9a5SSteve French 363538c8a9a5SSteve French rc = ctx->rc; 363638c8a9a5SSteve French /* 363738c8a9a5SSteve French * Wait for and collect replies for any successful sends in order of 363838c8a9a5SSteve French * increasing offset. Once an error is hit, then return without waiting 363938c8a9a5SSteve French * for any more replies. 364038c8a9a5SSteve French */ 364138c8a9a5SSteve French restart_loop: 364238c8a9a5SSteve French list_for_each_entry_safe(wdata, tmp, &ctx->list, list) { 364338c8a9a5SSteve French if (!rc) { 364438c8a9a5SSteve French if (!try_wait_for_completion(&wdata->done)) { 364538c8a9a5SSteve French mutex_unlock(&ctx->aio_mutex); 364638c8a9a5SSteve French return; 364738c8a9a5SSteve French } 364838c8a9a5SSteve French 364938c8a9a5SSteve French if (wdata->result) 365038c8a9a5SSteve French rc = wdata->result; 365138c8a9a5SSteve French else 365238c8a9a5SSteve French ctx->total_len += wdata->bytes; 365338c8a9a5SSteve French 365438c8a9a5SSteve French /* resend call if it's a retryable error */ 365538c8a9a5SSteve French if (rc == -EAGAIN) { 365638c8a9a5SSteve French struct list_head tmp_list; 365738c8a9a5SSteve French struct iov_iter tmp_from = ctx->iter; 365838c8a9a5SSteve French 365938c8a9a5SSteve French INIT_LIST_HEAD(&tmp_list); 366038c8a9a5SSteve French list_del_init(&wdata->list); 366138c8a9a5SSteve French 366238c8a9a5SSteve French if (ctx->direct_io) 366338c8a9a5SSteve French rc = cifs_resend_wdata( 366438c8a9a5SSteve French wdata, &tmp_list, ctx); 366538c8a9a5SSteve French else { 366638c8a9a5SSteve French iov_iter_advance(&tmp_from, 366738c8a9a5SSteve French wdata->offset - ctx->pos); 366838c8a9a5SSteve French 366938c8a9a5SSteve French rc = cifs_write_from_iter(wdata->offset, 367038c8a9a5SSteve French wdata->bytes, &tmp_from, 367138c8a9a5SSteve French ctx->cfile, cifs_sb, &tmp_list, 367238c8a9a5SSteve French ctx); 367338c8a9a5SSteve French 367438c8a9a5SSteve French kref_put(&wdata->refcount, 367538c8a9a5SSteve French cifs_uncached_writedata_release); 367638c8a9a5SSteve French } 367738c8a9a5SSteve French 367838c8a9a5SSteve French list_splice(&tmp_list, &ctx->list); 367938c8a9a5SSteve French goto restart_loop; 368038c8a9a5SSteve French } 368138c8a9a5SSteve French } 368238c8a9a5SSteve French list_del_init(&wdata->list); 368338c8a9a5SSteve French kref_put(&wdata->refcount, cifs_uncached_writedata_release); 368438c8a9a5SSteve French } 368538c8a9a5SSteve French 368638c8a9a5SSteve French cifs_stats_bytes_written(tcon, ctx->total_len); 368738c8a9a5SSteve French set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags); 368838c8a9a5SSteve French 368938c8a9a5SSteve French ctx->rc = (rc == 0) ? ctx->total_len : rc; 369038c8a9a5SSteve French 369138c8a9a5SSteve French mutex_unlock(&ctx->aio_mutex); 369238c8a9a5SSteve French 369338c8a9a5SSteve French if (ctx->iocb && ctx->iocb->ki_complete) 369438c8a9a5SSteve French ctx->iocb->ki_complete(ctx->iocb, ctx->rc); 369538c8a9a5SSteve French else 369638c8a9a5SSteve French complete(&ctx->done); 369738c8a9a5SSteve French } 369838c8a9a5SSteve French 369938c8a9a5SSteve French static ssize_t __cifs_writev( 370038c8a9a5SSteve French struct kiocb *iocb, struct iov_iter *from, bool direct) 370138c8a9a5SSteve French { 370238c8a9a5SSteve French struct file *file = iocb->ki_filp; 370338c8a9a5SSteve French ssize_t total_written = 0; 370438c8a9a5SSteve French struct cifsFileInfo *cfile; 370538c8a9a5SSteve French struct cifs_tcon *tcon; 370638c8a9a5SSteve French struct cifs_sb_info *cifs_sb; 370738c8a9a5SSteve French struct cifs_aio_ctx *ctx; 370838c8a9a5SSteve French int rc; 370938c8a9a5SSteve French 371038c8a9a5SSteve French rc = generic_write_checks(iocb, from); 371138c8a9a5SSteve French if (rc <= 0) 371238c8a9a5SSteve French return rc; 371338c8a9a5SSteve French 371438c8a9a5SSteve French cifs_sb = CIFS_FILE_SB(file); 371538c8a9a5SSteve French cfile = file->private_data; 371638c8a9a5SSteve French tcon = tlink_tcon(cfile->tlink); 371738c8a9a5SSteve French 371838c8a9a5SSteve French if (!tcon->ses->server->ops->async_writev) 371938c8a9a5SSteve French return -ENOSYS; 372038c8a9a5SSteve French 372138c8a9a5SSteve French ctx = cifs_aio_ctx_alloc(); 372238c8a9a5SSteve French if (!ctx) 372338c8a9a5SSteve French return -ENOMEM; 372438c8a9a5SSteve French 372538c8a9a5SSteve French ctx->cfile = cifsFileInfo_get(cfile); 372638c8a9a5SSteve French 372738c8a9a5SSteve French if (!is_sync_kiocb(iocb)) 372838c8a9a5SSteve French ctx->iocb = iocb; 372938c8a9a5SSteve French 373038c8a9a5SSteve French ctx->pos = iocb->ki_pos; 373138c8a9a5SSteve French ctx->direct_io = direct; 373238c8a9a5SSteve French ctx->nr_pinned_pages = 0; 373338c8a9a5SSteve French 373438c8a9a5SSteve French if (user_backed_iter(from)) { 373538c8a9a5SSteve French /* 373638c8a9a5SSteve French * Extract IOVEC/UBUF-type iterators to a BVEC-type iterator as 373738c8a9a5SSteve French * they contain references to the calling process's virtual 373838c8a9a5SSteve French * memory layout which won't be available in an async worker 373938c8a9a5SSteve French * thread. This also takes a pin on every folio involved. 374038c8a9a5SSteve French */ 374138c8a9a5SSteve French rc = netfs_extract_user_iter(from, iov_iter_count(from), 374238c8a9a5SSteve French &ctx->iter, 0); 374338c8a9a5SSteve French if (rc < 0) { 374438c8a9a5SSteve French kref_put(&ctx->refcount, cifs_aio_ctx_release); 374538c8a9a5SSteve French return rc; 374638c8a9a5SSteve French } 374738c8a9a5SSteve French 374838c8a9a5SSteve French ctx->nr_pinned_pages = rc; 374938c8a9a5SSteve French ctx->bv = (void *)ctx->iter.bvec; 375038c8a9a5SSteve French ctx->bv_need_unpin = iov_iter_extract_will_pin(from); 375138c8a9a5SSteve French } else if ((iov_iter_is_bvec(from) || iov_iter_is_kvec(from)) && 375238c8a9a5SSteve French !is_sync_kiocb(iocb)) { 375338c8a9a5SSteve French /* 375438c8a9a5SSteve French * If the op is asynchronous, we need to copy the list attached 375538c8a9a5SSteve French * to a BVEC/KVEC-type iterator, but we assume that the storage 375638c8a9a5SSteve French * will be pinned by the caller; in any case, we may or may not 375738c8a9a5SSteve French * be able to pin the pages, so we don't try. 375838c8a9a5SSteve French */ 375938c8a9a5SSteve French ctx->bv = (void *)dup_iter(&ctx->iter, from, GFP_KERNEL); 376038c8a9a5SSteve French if (!ctx->bv) { 376138c8a9a5SSteve French kref_put(&ctx->refcount, cifs_aio_ctx_release); 376238c8a9a5SSteve French return -ENOMEM; 376338c8a9a5SSteve French } 376438c8a9a5SSteve French } else { 376538c8a9a5SSteve French /* 376638c8a9a5SSteve French * Otherwise, we just pass the iterator down as-is and rely on 376738c8a9a5SSteve French * the caller to make sure the pages referred to by the 376838c8a9a5SSteve French * iterator don't evaporate. 376938c8a9a5SSteve French */ 377038c8a9a5SSteve French ctx->iter = *from; 377138c8a9a5SSteve French } 377238c8a9a5SSteve French 377338c8a9a5SSteve French ctx->len = iov_iter_count(&ctx->iter); 377438c8a9a5SSteve French 377538c8a9a5SSteve French /* grab a lock here due to read response handlers can access ctx */ 377638c8a9a5SSteve French mutex_lock(&ctx->aio_mutex); 377738c8a9a5SSteve French 377838c8a9a5SSteve French rc = cifs_write_from_iter(iocb->ki_pos, ctx->len, &ctx->iter, 377938c8a9a5SSteve French cfile, cifs_sb, &ctx->list, ctx); 378038c8a9a5SSteve French 378138c8a9a5SSteve French /* 378238c8a9a5SSteve French * If at least one write was successfully sent, then discard any rc 378338c8a9a5SSteve French * value from the later writes. If the other write succeeds, then 378438c8a9a5SSteve French * we'll end up returning whatever was written. If it fails, then 378538c8a9a5SSteve French * we'll get a new rc value from that. 378638c8a9a5SSteve French */ 378738c8a9a5SSteve French if (!list_empty(&ctx->list)) 378838c8a9a5SSteve French rc = 0; 378938c8a9a5SSteve French 379038c8a9a5SSteve French mutex_unlock(&ctx->aio_mutex); 379138c8a9a5SSteve French 379238c8a9a5SSteve French if (rc) { 379338c8a9a5SSteve French kref_put(&ctx->refcount, cifs_aio_ctx_release); 379438c8a9a5SSteve French return rc; 379538c8a9a5SSteve French } 379638c8a9a5SSteve French 379738c8a9a5SSteve French if (!is_sync_kiocb(iocb)) { 379838c8a9a5SSteve French kref_put(&ctx->refcount, cifs_aio_ctx_release); 379938c8a9a5SSteve French return -EIOCBQUEUED; 380038c8a9a5SSteve French } 380138c8a9a5SSteve French 380238c8a9a5SSteve French rc = wait_for_completion_killable(&ctx->done); 380338c8a9a5SSteve French if (rc) { 380438c8a9a5SSteve French mutex_lock(&ctx->aio_mutex); 380538c8a9a5SSteve French ctx->rc = rc = -EINTR; 380638c8a9a5SSteve French total_written = ctx->total_len; 380738c8a9a5SSteve French mutex_unlock(&ctx->aio_mutex); 380838c8a9a5SSteve French } else { 380938c8a9a5SSteve French rc = ctx->rc; 381038c8a9a5SSteve French total_written = ctx->total_len; 381138c8a9a5SSteve French } 381238c8a9a5SSteve French 381338c8a9a5SSteve French kref_put(&ctx->refcount, cifs_aio_ctx_release); 381438c8a9a5SSteve French 381538c8a9a5SSteve French if (unlikely(!total_written)) 381638c8a9a5SSteve French return rc; 381738c8a9a5SSteve French 381838c8a9a5SSteve French iocb->ki_pos += total_written; 381938c8a9a5SSteve French return total_written; 382038c8a9a5SSteve French } 382138c8a9a5SSteve French 382238c8a9a5SSteve French ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from) 382338c8a9a5SSteve French { 382438c8a9a5SSteve French struct file *file = iocb->ki_filp; 382538c8a9a5SSteve French 382638c8a9a5SSteve French cifs_revalidate_mapping(file->f_inode); 382738c8a9a5SSteve French return __cifs_writev(iocb, from, true); 382838c8a9a5SSteve French } 382938c8a9a5SSteve French 383038c8a9a5SSteve French ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from) 383138c8a9a5SSteve French { 383238c8a9a5SSteve French return __cifs_writev(iocb, from, false); 383338c8a9a5SSteve French } 383438c8a9a5SSteve French 383538c8a9a5SSteve French static ssize_t 383638c8a9a5SSteve French cifs_writev(struct kiocb *iocb, struct iov_iter *from) 383738c8a9a5SSteve French { 383838c8a9a5SSteve French struct file *file = iocb->ki_filp; 383938c8a9a5SSteve French struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data; 384038c8a9a5SSteve French struct inode *inode = file->f_mapping->host; 384138c8a9a5SSteve French struct cifsInodeInfo *cinode = CIFS_I(inode); 384238c8a9a5SSteve French struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server; 384338c8a9a5SSteve French ssize_t rc; 384438c8a9a5SSteve French 384538c8a9a5SSteve French inode_lock(inode); 384638c8a9a5SSteve French /* 384738c8a9a5SSteve French * We need to hold the sem to be sure nobody modifies lock list 384838c8a9a5SSteve French * with a brlock that prevents writing. 384938c8a9a5SSteve French */ 385038c8a9a5SSteve French down_read(&cinode->lock_sem); 385138c8a9a5SSteve French 385238c8a9a5SSteve French rc = generic_write_checks(iocb, from); 385338c8a9a5SSteve French if (rc <= 0) 385438c8a9a5SSteve French goto out; 385538c8a9a5SSteve French 385638c8a9a5SSteve French if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from), 385738c8a9a5SSteve French server->vals->exclusive_lock_type, 0, 385838c8a9a5SSteve French NULL, CIFS_WRITE_OP)) 385938c8a9a5SSteve French rc = __generic_file_write_iter(iocb, from); 386038c8a9a5SSteve French else 386138c8a9a5SSteve French rc = -EACCES; 386238c8a9a5SSteve French out: 386338c8a9a5SSteve French up_read(&cinode->lock_sem); 386438c8a9a5SSteve French inode_unlock(inode); 386538c8a9a5SSteve French 386638c8a9a5SSteve French if (rc > 0) 386738c8a9a5SSteve French rc = generic_write_sync(iocb, rc); 386838c8a9a5SSteve French return rc; 386938c8a9a5SSteve French } 387038c8a9a5SSteve French 387138c8a9a5SSteve French ssize_t 387238c8a9a5SSteve French cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from) 387338c8a9a5SSteve French { 387438c8a9a5SSteve French struct inode *inode = file_inode(iocb->ki_filp); 387538c8a9a5SSteve French struct cifsInodeInfo *cinode = CIFS_I(inode); 387638c8a9a5SSteve French struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 387738c8a9a5SSteve French struct cifsFileInfo *cfile = (struct cifsFileInfo *) 387838c8a9a5SSteve French iocb->ki_filp->private_data; 387938c8a9a5SSteve French struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); 388038c8a9a5SSteve French ssize_t written; 388138c8a9a5SSteve French 388238c8a9a5SSteve French written = cifs_get_writer(cinode); 388338c8a9a5SSteve French if (written) 388438c8a9a5SSteve French return written; 388538c8a9a5SSteve French 388638c8a9a5SSteve French if (CIFS_CACHE_WRITE(cinode)) { 388738c8a9a5SSteve French if (cap_unix(tcon->ses) && 388838c8a9a5SSteve French (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) 388938c8a9a5SSteve French && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) { 389038c8a9a5SSteve French written = generic_file_write_iter(iocb, from); 389138c8a9a5SSteve French goto out; 389238c8a9a5SSteve French } 389338c8a9a5SSteve French written = cifs_writev(iocb, from); 389438c8a9a5SSteve French goto out; 389538c8a9a5SSteve French } 389638c8a9a5SSteve French /* 389738c8a9a5SSteve French * For non-oplocked files in strict cache mode we need to write the data 389838c8a9a5SSteve French * to the server exactly from the pos to pos+len-1 rather than flush all 389938c8a9a5SSteve French * affected pages because it may cause a error with mandatory locks on 390038c8a9a5SSteve French * these pages but not on the region from pos to ppos+len-1. 390138c8a9a5SSteve French */ 390238c8a9a5SSteve French written = cifs_user_writev(iocb, from); 390338c8a9a5SSteve French if (CIFS_CACHE_READ(cinode)) { 390438c8a9a5SSteve French /* 390538c8a9a5SSteve French * We have read level caching and we have just sent a write 390638c8a9a5SSteve French * request to the server thus making data in the cache stale. 390738c8a9a5SSteve French * Zap the cache and set oplock/lease level to NONE to avoid 390838c8a9a5SSteve French * reading stale data from the cache. All subsequent read 390938c8a9a5SSteve French * operations will read new data from the server. 391038c8a9a5SSteve French */ 391138c8a9a5SSteve French cifs_zap_mapping(inode); 391238c8a9a5SSteve French cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n", 391338c8a9a5SSteve French inode); 391438c8a9a5SSteve French cinode->oplock = 0; 391538c8a9a5SSteve French } 391638c8a9a5SSteve French out: 391738c8a9a5SSteve French cifs_put_writer(cinode); 391838c8a9a5SSteve French return written; 391938c8a9a5SSteve French } 392038c8a9a5SSteve French 392138c8a9a5SSteve French static struct cifs_readdata *cifs_readdata_alloc(work_func_t complete) 392238c8a9a5SSteve French { 392338c8a9a5SSteve French struct cifs_readdata *rdata; 392438c8a9a5SSteve French 392538c8a9a5SSteve French rdata = kzalloc(sizeof(*rdata), GFP_KERNEL); 392638c8a9a5SSteve French if (rdata) { 392738c8a9a5SSteve French kref_init(&rdata->refcount); 392838c8a9a5SSteve French INIT_LIST_HEAD(&rdata->list); 392938c8a9a5SSteve French init_completion(&rdata->done); 393038c8a9a5SSteve French INIT_WORK(&rdata->work, complete); 393138c8a9a5SSteve French } 393238c8a9a5SSteve French 393338c8a9a5SSteve French return rdata; 393438c8a9a5SSteve French } 393538c8a9a5SSteve French 393638c8a9a5SSteve French void 393738c8a9a5SSteve French cifs_readdata_release(struct kref *refcount) 393838c8a9a5SSteve French { 393938c8a9a5SSteve French struct cifs_readdata *rdata = container_of(refcount, 394038c8a9a5SSteve French struct cifs_readdata, refcount); 394138c8a9a5SSteve French 394238c8a9a5SSteve French if (rdata->ctx) 394338c8a9a5SSteve French kref_put(&rdata->ctx->refcount, cifs_aio_ctx_release); 394438c8a9a5SSteve French #ifdef CONFIG_CIFS_SMB_DIRECT 394538c8a9a5SSteve French if (rdata->mr) { 394638c8a9a5SSteve French smbd_deregister_mr(rdata->mr); 394738c8a9a5SSteve French rdata->mr = NULL; 394838c8a9a5SSteve French } 394938c8a9a5SSteve French #endif 395038c8a9a5SSteve French if (rdata->cfile) 395138c8a9a5SSteve French cifsFileInfo_put(rdata->cfile); 395238c8a9a5SSteve French 395338c8a9a5SSteve French kfree(rdata); 395438c8a9a5SSteve French } 395538c8a9a5SSteve French 395638c8a9a5SSteve French static void collect_uncached_read_data(struct cifs_aio_ctx *ctx); 395738c8a9a5SSteve French 395838c8a9a5SSteve French static void 395938c8a9a5SSteve French cifs_uncached_readv_complete(struct work_struct *work) 396038c8a9a5SSteve French { 396138c8a9a5SSteve French struct cifs_readdata *rdata = container_of(work, 396238c8a9a5SSteve French struct cifs_readdata, work); 396338c8a9a5SSteve French 396438c8a9a5SSteve French complete(&rdata->done); 396538c8a9a5SSteve French collect_uncached_read_data(rdata->ctx); 396638c8a9a5SSteve French /* the below call can possibly free the last ref to aio ctx */ 396738c8a9a5SSteve French kref_put(&rdata->refcount, cifs_readdata_release); 396838c8a9a5SSteve French } 396938c8a9a5SSteve French 397038c8a9a5SSteve French static int cifs_resend_rdata(struct cifs_readdata *rdata, 397138c8a9a5SSteve French struct list_head *rdata_list, 397238c8a9a5SSteve French struct cifs_aio_ctx *ctx) 397338c8a9a5SSteve French { 397438c8a9a5SSteve French unsigned int rsize; 397538c8a9a5SSteve French struct cifs_credits credits; 397638c8a9a5SSteve French int rc; 397738c8a9a5SSteve French struct TCP_Server_Info *server; 397838c8a9a5SSteve French 397938c8a9a5SSteve French /* XXX: should we pick a new channel here? */ 398038c8a9a5SSteve French server = rdata->server; 398138c8a9a5SSteve French 398238c8a9a5SSteve French do { 398338c8a9a5SSteve French if (rdata->cfile->invalidHandle) { 398438c8a9a5SSteve French rc = cifs_reopen_file(rdata->cfile, true); 398538c8a9a5SSteve French if (rc == -EAGAIN) 398638c8a9a5SSteve French continue; 398738c8a9a5SSteve French else if (rc) 398838c8a9a5SSteve French break; 398938c8a9a5SSteve French } 399038c8a9a5SSteve French 399138c8a9a5SSteve French /* 399238c8a9a5SSteve French * Wait for credits to resend this rdata. 399338c8a9a5SSteve French * Note: we are attempting to resend the whole rdata not in 399438c8a9a5SSteve French * segments 399538c8a9a5SSteve French */ 399638c8a9a5SSteve French do { 399738c8a9a5SSteve French rc = server->ops->wait_mtu_credits(server, rdata->bytes, 399838c8a9a5SSteve French &rsize, &credits); 399938c8a9a5SSteve French 400038c8a9a5SSteve French if (rc) 400138c8a9a5SSteve French goto fail; 400238c8a9a5SSteve French 400338c8a9a5SSteve French if (rsize < rdata->bytes) { 400438c8a9a5SSteve French add_credits_and_wake_if(server, &credits, 0); 400538c8a9a5SSteve French msleep(1000); 400638c8a9a5SSteve French } 400738c8a9a5SSteve French } while (rsize < rdata->bytes); 400838c8a9a5SSteve French rdata->credits = credits; 400938c8a9a5SSteve French 401038c8a9a5SSteve French rc = adjust_credits(server, &rdata->credits, rdata->bytes); 401138c8a9a5SSteve French if (!rc) { 401238c8a9a5SSteve French if (rdata->cfile->invalidHandle) 401338c8a9a5SSteve French rc = -EAGAIN; 401438c8a9a5SSteve French else { 401538c8a9a5SSteve French #ifdef CONFIG_CIFS_SMB_DIRECT 401638c8a9a5SSteve French if (rdata->mr) { 401738c8a9a5SSteve French rdata->mr->need_invalidate = true; 401838c8a9a5SSteve French smbd_deregister_mr(rdata->mr); 401938c8a9a5SSteve French rdata->mr = NULL; 402038c8a9a5SSteve French } 402138c8a9a5SSteve French #endif 402238c8a9a5SSteve French rc = server->ops->async_readv(rdata); 402338c8a9a5SSteve French } 402438c8a9a5SSteve French } 402538c8a9a5SSteve French 402638c8a9a5SSteve French /* If the read was successfully sent, we are done */ 402738c8a9a5SSteve French if (!rc) { 402838c8a9a5SSteve French /* Add to aio pending list */ 402938c8a9a5SSteve French list_add_tail(&rdata->list, rdata_list); 403038c8a9a5SSteve French return 0; 403138c8a9a5SSteve French } 403238c8a9a5SSteve French 403338c8a9a5SSteve French /* Roll back credits and retry if needed */ 403438c8a9a5SSteve French add_credits_and_wake_if(server, &rdata->credits, 0); 403538c8a9a5SSteve French } while (rc == -EAGAIN); 403638c8a9a5SSteve French 403738c8a9a5SSteve French fail: 403838c8a9a5SSteve French kref_put(&rdata->refcount, cifs_readdata_release); 403938c8a9a5SSteve French return rc; 404038c8a9a5SSteve French } 404138c8a9a5SSteve French 404238c8a9a5SSteve French static int 404338c8a9a5SSteve French cifs_send_async_read(loff_t fpos, size_t len, struct cifsFileInfo *open_file, 404438c8a9a5SSteve French struct cifs_sb_info *cifs_sb, struct list_head *rdata_list, 404538c8a9a5SSteve French struct cifs_aio_ctx *ctx) 404638c8a9a5SSteve French { 404738c8a9a5SSteve French struct cifs_readdata *rdata; 404838c8a9a5SSteve French unsigned int rsize, nsegs, max_segs = INT_MAX; 404938c8a9a5SSteve French struct cifs_credits credits_on_stack; 405038c8a9a5SSteve French struct cifs_credits *credits = &credits_on_stack; 405138c8a9a5SSteve French size_t cur_len, max_len; 405238c8a9a5SSteve French int rc; 405338c8a9a5SSteve French pid_t pid; 405438c8a9a5SSteve French struct TCP_Server_Info *server; 405538c8a9a5SSteve French 405638c8a9a5SSteve French server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses); 405738c8a9a5SSteve French 405838c8a9a5SSteve French #ifdef CONFIG_CIFS_SMB_DIRECT 405938c8a9a5SSteve French if (server->smbd_conn) 406038c8a9a5SSteve French max_segs = server->smbd_conn->max_frmr_depth; 406138c8a9a5SSteve French #endif 406238c8a9a5SSteve French 406338c8a9a5SSteve French if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) 406438c8a9a5SSteve French pid = open_file->pid; 406538c8a9a5SSteve French else 406638c8a9a5SSteve French pid = current->tgid; 406738c8a9a5SSteve French 406838c8a9a5SSteve French do { 406938c8a9a5SSteve French if (open_file->invalidHandle) { 407038c8a9a5SSteve French rc = cifs_reopen_file(open_file, true); 407138c8a9a5SSteve French if (rc == -EAGAIN) 407238c8a9a5SSteve French continue; 407338c8a9a5SSteve French else if (rc) 407438c8a9a5SSteve French break; 407538c8a9a5SSteve French } 407638c8a9a5SSteve French 407738c8a9a5SSteve French if (cifs_sb->ctx->rsize == 0) 407838c8a9a5SSteve French cifs_sb->ctx->rsize = 407938c8a9a5SSteve French server->ops->negotiate_rsize(tlink_tcon(open_file->tlink), 408038c8a9a5SSteve French cifs_sb->ctx); 408138c8a9a5SSteve French 408238c8a9a5SSteve French rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize, 408338c8a9a5SSteve French &rsize, credits); 408438c8a9a5SSteve French if (rc) 408538c8a9a5SSteve French break; 408638c8a9a5SSteve French 408738c8a9a5SSteve French max_len = min_t(size_t, len, rsize); 408838c8a9a5SSteve French 408938c8a9a5SSteve French cur_len = cifs_limit_bvec_subset(&ctx->iter, max_len, 409038c8a9a5SSteve French max_segs, &nsegs); 409138c8a9a5SSteve French cifs_dbg(FYI, "read-to-iter len=%zx/%zx nsegs=%u/%lu/%u\n", 409238c8a9a5SSteve French cur_len, max_len, nsegs, ctx->iter.nr_segs, max_segs); 409338c8a9a5SSteve French if (cur_len == 0) { 409438c8a9a5SSteve French rc = -EIO; 409538c8a9a5SSteve French add_credits_and_wake_if(server, credits, 0); 409638c8a9a5SSteve French break; 409738c8a9a5SSteve French } 409838c8a9a5SSteve French 409938c8a9a5SSteve French rdata = cifs_readdata_alloc(cifs_uncached_readv_complete); 410038c8a9a5SSteve French if (!rdata) { 410138c8a9a5SSteve French add_credits_and_wake_if(server, credits, 0); 410238c8a9a5SSteve French rc = -ENOMEM; 410338c8a9a5SSteve French break; 410438c8a9a5SSteve French } 410538c8a9a5SSteve French 410638c8a9a5SSteve French rdata->server = server; 410738c8a9a5SSteve French rdata->cfile = cifsFileInfo_get(open_file); 410838c8a9a5SSteve French rdata->offset = fpos; 410938c8a9a5SSteve French rdata->bytes = cur_len; 411038c8a9a5SSteve French rdata->pid = pid; 411138c8a9a5SSteve French rdata->credits = credits_on_stack; 411238c8a9a5SSteve French rdata->ctx = ctx; 411338c8a9a5SSteve French kref_get(&ctx->refcount); 411438c8a9a5SSteve French 411538c8a9a5SSteve French rdata->iter = ctx->iter; 411638c8a9a5SSteve French iov_iter_truncate(&rdata->iter, cur_len); 411738c8a9a5SSteve French 411838c8a9a5SSteve French rc = adjust_credits(server, &rdata->credits, rdata->bytes); 411938c8a9a5SSteve French 412038c8a9a5SSteve French if (!rc) { 412138c8a9a5SSteve French if (rdata->cfile->invalidHandle) 412238c8a9a5SSteve French rc = -EAGAIN; 412338c8a9a5SSteve French else 412438c8a9a5SSteve French rc = server->ops->async_readv(rdata); 412538c8a9a5SSteve French } 412638c8a9a5SSteve French 412738c8a9a5SSteve French if (rc) { 412838c8a9a5SSteve French add_credits_and_wake_if(server, &rdata->credits, 0); 412938c8a9a5SSteve French kref_put(&rdata->refcount, cifs_readdata_release); 413038c8a9a5SSteve French if (rc == -EAGAIN) 413138c8a9a5SSteve French continue; 413238c8a9a5SSteve French break; 413338c8a9a5SSteve French } 413438c8a9a5SSteve French 413538c8a9a5SSteve French list_add_tail(&rdata->list, rdata_list); 413638c8a9a5SSteve French iov_iter_advance(&ctx->iter, cur_len); 413738c8a9a5SSteve French fpos += cur_len; 413838c8a9a5SSteve French len -= cur_len; 413938c8a9a5SSteve French } while (len > 0); 414038c8a9a5SSteve French 414138c8a9a5SSteve French return rc; 414238c8a9a5SSteve French } 414338c8a9a5SSteve French 414438c8a9a5SSteve French static void 414538c8a9a5SSteve French collect_uncached_read_data(struct cifs_aio_ctx *ctx) 414638c8a9a5SSteve French { 414738c8a9a5SSteve French struct cifs_readdata *rdata, *tmp; 414838c8a9a5SSteve French struct cifs_sb_info *cifs_sb; 414938c8a9a5SSteve French int rc; 415038c8a9a5SSteve French 415138c8a9a5SSteve French cifs_sb = CIFS_SB(ctx->cfile->dentry->d_sb); 415238c8a9a5SSteve French 415338c8a9a5SSteve French mutex_lock(&ctx->aio_mutex); 415438c8a9a5SSteve French 415538c8a9a5SSteve French if (list_empty(&ctx->list)) { 415638c8a9a5SSteve French mutex_unlock(&ctx->aio_mutex); 415738c8a9a5SSteve French return; 415838c8a9a5SSteve French } 415938c8a9a5SSteve French 416038c8a9a5SSteve French rc = ctx->rc; 416138c8a9a5SSteve French /* the loop below should proceed in the order of increasing offsets */ 416238c8a9a5SSteve French again: 416338c8a9a5SSteve French list_for_each_entry_safe(rdata, tmp, &ctx->list, list) { 416438c8a9a5SSteve French if (!rc) { 416538c8a9a5SSteve French if (!try_wait_for_completion(&rdata->done)) { 416638c8a9a5SSteve French mutex_unlock(&ctx->aio_mutex); 416738c8a9a5SSteve French return; 416838c8a9a5SSteve French } 416938c8a9a5SSteve French 417038c8a9a5SSteve French if (rdata->result == -EAGAIN) { 417138c8a9a5SSteve French /* resend call if it's a retryable error */ 417238c8a9a5SSteve French struct list_head tmp_list; 417338c8a9a5SSteve French unsigned int got_bytes = rdata->got_bytes; 417438c8a9a5SSteve French 417538c8a9a5SSteve French list_del_init(&rdata->list); 417638c8a9a5SSteve French INIT_LIST_HEAD(&tmp_list); 417738c8a9a5SSteve French 417838c8a9a5SSteve French if (ctx->direct_io) { 417938c8a9a5SSteve French /* 418038c8a9a5SSteve French * Re-use rdata as this is a 418138c8a9a5SSteve French * direct I/O 418238c8a9a5SSteve French */ 418338c8a9a5SSteve French rc = cifs_resend_rdata( 418438c8a9a5SSteve French rdata, 418538c8a9a5SSteve French &tmp_list, ctx); 418638c8a9a5SSteve French } else { 418738c8a9a5SSteve French rc = cifs_send_async_read( 418838c8a9a5SSteve French rdata->offset + got_bytes, 418938c8a9a5SSteve French rdata->bytes - got_bytes, 419038c8a9a5SSteve French rdata->cfile, cifs_sb, 419138c8a9a5SSteve French &tmp_list, ctx); 419238c8a9a5SSteve French 419338c8a9a5SSteve French kref_put(&rdata->refcount, 419438c8a9a5SSteve French cifs_readdata_release); 419538c8a9a5SSteve French } 419638c8a9a5SSteve French 419738c8a9a5SSteve French list_splice(&tmp_list, &ctx->list); 419838c8a9a5SSteve French 419938c8a9a5SSteve French goto again; 420038c8a9a5SSteve French } else if (rdata->result) 420138c8a9a5SSteve French rc = rdata->result; 420238c8a9a5SSteve French 420338c8a9a5SSteve French /* if there was a short read -- discard anything left */ 420438c8a9a5SSteve French if (rdata->got_bytes && rdata->got_bytes < rdata->bytes) 420538c8a9a5SSteve French rc = -ENODATA; 420638c8a9a5SSteve French 420738c8a9a5SSteve French ctx->total_len += rdata->got_bytes; 420838c8a9a5SSteve French } 420938c8a9a5SSteve French list_del_init(&rdata->list); 421038c8a9a5SSteve French kref_put(&rdata->refcount, cifs_readdata_release); 421138c8a9a5SSteve French } 421238c8a9a5SSteve French 421338c8a9a5SSteve French /* mask nodata case */ 421438c8a9a5SSteve French if (rc == -ENODATA) 421538c8a9a5SSteve French rc = 0; 421638c8a9a5SSteve French 421738c8a9a5SSteve French ctx->rc = (rc == 0) ? (ssize_t)ctx->total_len : rc; 421838c8a9a5SSteve French 421938c8a9a5SSteve French mutex_unlock(&ctx->aio_mutex); 422038c8a9a5SSteve French 422138c8a9a5SSteve French if (ctx->iocb && ctx->iocb->ki_complete) 422238c8a9a5SSteve French ctx->iocb->ki_complete(ctx->iocb, ctx->rc); 422338c8a9a5SSteve French else 422438c8a9a5SSteve French complete(&ctx->done); 422538c8a9a5SSteve French } 422638c8a9a5SSteve French 422738c8a9a5SSteve French static ssize_t __cifs_readv( 422838c8a9a5SSteve French struct kiocb *iocb, struct iov_iter *to, bool direct) 422938c8a9a5SSteve French { 423038c8a9a5SSteve French size_t len; 423138c8a9a5SSteve French struct file *file = iocb->ki_filp; 423238c8a9a5SSteve French struct cifs_sb_info *cifs_sb; 423338c8a9a5SSteve French struct cifsFileInfo *cfile; 423438c8a9a5SSteve French struct cifs_tcon *tcon; 423538c8a9a5SSteve French ssize_t rc, total_read = 0; 423638c8a9a5SSteve French loff_t offset = iocb->ki_pos; 423738c8a9a5SSteve French struct cifs_aio_ctx *ctx; 423838c8a9a5SSteve French 423938c8a9a5SSteve French len = iov_iter_count(to); 424038c8a9a5SSteve French if (!len) 424138c8a9a5SSteve French return 0; 424238c8a9a5SSteve French 424338c8a9a5SSteve French cifs_sb = CIFS_FILE_SB(file); 424438c8a9a5SSteve French cfile = file->private_data; 424538c8a9a5SSteve French tcon = tlink_tcon(cfile->tlink); 424638c8a9a5SSteve French 424738c8a9a5SSteve French if (!tcon->ses->server->ops->async_readv) 424838c8a9a5SSteve French return -ENOSYS; 424938c8a9a5SSteve French 425038c8a9a5SSteve French if ((file->f_flags & O_ACCMODE) == O_WRONLY) 425138c8a9a5SSteve French cifs_dbg(FYI, "attempting read on write only file instance\n"); 425238c8a9a5SSteve French 425338c8a9a5SSteve French ctx = cifs_aio_ctx_alloc(); 425438c8a9a5SSteve French if (!ctx) 425538c8a9a5SSteve French return -ENOMEM; 425638c8a9a5SSteve French 425738c8a9a5SSteve French ctx->pos = offset; 425838c8a9a5SSteve French ctx->direct_io = direct; 425938c8a9a5SSteve French ctx->len = len; 426038c8a9a5SSteve French ctx->cfile = cifsFileInfo_get(cfile); 426138c8a9a5SSteve French ctx->nr_pinned_pages = 0; 426238c8a9a5SSteve French 426338c8a9a5SSteve French if (!is_sync_kiocb(iocb)) 426438c8a9a5SSteve French ctx->iocb = iocb; 426538c8a9a5SSteve French 426638c8a9a5SSteve French if (user_backed_iter(to)) { 426738c8a9a5SSteve French /* 426838c8a9a5SSteve French * Extract IOVEC/UBUF-type iterators to a BVEC-type iterator as 426938c8a9a5SSteve French * they contain references to the calling process's virtual 427038c8a9a5SSteve French * memory layout which won't be available in an async worker 427138c8a9a5SSteve French * thread. This also takes a pin on every folio involved. 427238c8a9a5SSteve French */ 427338c8a9a5SSteve French rc = netfs_extract_user_iter(to, iov_iter_count(to), 427438c8a9a5SSteve French &ctx->iter, 0); 427538c8a9a5SSteve French if (rc < 0) { 427638c8a9a5SSteve French kref_put(&ctx->refcount, cifs_aio_ctx_release); 427738c8a9a5SSteve French return rc; 427838c8a9a5SSteve French } 427938c8a9a5SSteve French 428038c8a9a5SSteve French ctx->nr_pinned_pages = rc; 428138c8a9a5SSteve French ctx->bv = (void *)ctx->iter.bvec; 428238c8a9a5SSteve French ctx->bv_need_unpin = iov_iter_extract_will_pin(to); 428338c8a9a5SSteve French ctx->should_dirty = true; 428438c8a9a5SSteve French } else if ((iov_iter_is_bvec(to) || iov_iter_is_kvec(to)) && 428538c8a9a5SSteve French !is_sync_kiocb(iocb)) { 428638c8a9a5SSteve French /* 428738c8a9a5SSteve French * If the op is asynchronous, we need to copy the list attached 428838c8a9a5SSteve French * to a BVEC/KVEC-type iterator, but we assume that the storage 428938c8a9a5SSteve French * will be retained by the caller; in any case, we may or may 429038c8a9a5SSteve French * not be able to pin the pages, so we don't try. 429138c8a9a5SSteve French */ 429238c8a9a5SSteve French ctx->bv = (void *)dup_iter(&ctx->iter, to, GFP_KERNEL); 429338c8a9a5SSteve French if (!ctx->bv) { 429438c8a9a5SSteve French kref_put(&ctx->refcount, cifs_aio_ctx_release); 429538c8a9a5SSteve French return -ENOMEM; 429638c8a9a5SSteve French } 429738c8a9a5SSteve French } else { 429838c8a9a5SSteve French /* 429938c8a9a5SSteve French * Otherwise, we just pass the iterator down as-is and rely on 430038c8a9a5SSteve French * the caller to make sure the pages referred to by the 430138c8a9a5SSteve French * iterator don't evaporate. 430238c8a9a5SSteve French */ 430338c8a9a5SSteve French ctx->iter = *to; 430438c8a9a5SSteve French } 430538c8a9a5SSteve French 430638c8a9a5SSteve French if (direct) { 430738c8a9a5SSteve French rc = filemap_write_and_wait_range(file->f_inode->i_mapping, 430838c8a9a5SSteve French offset, offset + len - 1); 430938c8a9a5SSteve French if (rc) { 431038c8a9a5SSteve French kref_put(&ctx->refcount, cifs_aio_ctx_release); 431138c8a9a5SSteve French return -EAGAIN; 431238c8a9a5SSteve French } 431338c8a9a5SSteve French } 431438c8a9a5SSteve French 431538c8a9a5SSteve French /* grab a lock here due to read response handlers can access ctx */ 431638c8a9a5SSteve French mutex_lock(&ctx->aio_mutex); 431738c8a9a5SSteve French 431838c8a9a5SSteve French rc = cifs_send_async_read(offset, len, cfile, cifs_sb, &ctx->list, ctx); 431938c8a9a5SSteve French 432038c8a9a5SSteve French /* if at least one read request send succeeded, then reset rc */ 432138c8a9a5SSteve French if (!list_empty(&ctx->list)) 432238c8a9a5SSteve French rc = 0; 432338c8a9a5SSteve French 432438c8a9a5SSteve French mutex_unlock(&ctx->aio_mutex); 432538c8a9a5SSteve French 432638c8a9a5SSteve French if (rc) { 432738c8a9a5SSteve French kref_put(&ctx->refcount, cifs_aio_ctx_release); 432838c8a9a5SSteve French return rc; 432938c8a9a5SSteve French } 433038c8a9a5SSteve French 433138c8a9a5SSteve French if (!is_sync_kiocb(iocb)) { 433238c8a9a5SSteve French kref_put(&ctx->refcount, cifs_aio_ctx_release); 433338c8a9a5SSteve French return -EIOCBQUEUED; 433438c8a9a5SSteve French } 433538c8a9a5SSteve French 433638c8a9a5SSteve French rc = wait_for_completion_killable(&ctx->done); 433738c8a9a5SSteve French if (rc) { 433838c8a9a5SSteve French mutex_lock(&ctx->aio_mutex); 433938c8a9a5SSteve French ctx->rc = rc = -EINTR; 434038c8a9a5SSteve French total_read = ctx->total_len; 434138c8a9a5SSteve French mutex_unlock(&ctx->aio_mutex); 434238c8a9a5SSteve French } else { 434338c8a9a5SSteve French rc = ctx->rc; 434438c8a9a5SSteve French total_read = ctx->total_len; 434538c8a9a5SSteve French } 434638c8a9a5SSteve French 434738c8a9a5SSteve French kref_put(&ctx->refcount, cifs_aio_ctx_release); 434838c8a9a5SSteve French 434938c8a9a5SSteve French if (total_read) { 435038c8a9a5SSteve French iocb->ki_pos += total_read; 435138c8a9a5SSteve French return total_read; 435238c8a9a5SSteve French } 435338c8a9a5SSteve French return rc; 435438c8a9a5SSteve French } 435538c8a9a5SSteve French 435638c8a9a5SSteve French ssize_t cifs_direct_readv(struct kiocb *iocb, struct iov_iter *to) 435738c8a9a5SSteve French { 435838c8a9a5SSteve French return __cifs_readv(iocb, to, true); 435938c8a9a5SSteve French } 436038c8a9a5SSteve French 436138c8a9a5SSteve French ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to) 436238c8a9a5SSteve French { 436338c8a9a5SSteve French return __cifs_readv(iocb, to, false); 436438c8a9a5SSteve French } 436538c8a9a5SSteve French 436638c8a9a5SSteve French ssize_t 436738c8a9a5SSteve French cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to) 436838c8a9a5SSteve French { 436938c8a9a5SSteve French struct inode *inode = file_inode(iocb->ki_filp); 437038c8a9a5SSteve French struct cifsInodeInfo *cinode = CIFS_I(inode); 437138c8a9a5SSteve French struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 437238c8a9a5SSteve French struct cifsFileInfo *cfile = (struct cifsFileInfo *) 437338c8a9a5SSteve French iocb->ki_filp->private_data; 437438c8a9a5SSteve French struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); 437538c8a9a5SSteve French int rc = -EACCES; 437638c8a9a5SSteve French 437738c8a9a5SSteve French /* 437838c8a9a5SSteve French * In strict cache mode we need to read from the server all the time 437938c8a9a5SSteve French * if we don't have level II oplock because the server can delay mtime 438038c8a9a5SSteve French * change - so we can't make a decision about inode invalidating. 438138c8a9a5SSteve French * And we can also fail with pagereading if there are mandatory locks 438238c8a9a5SSteve French * on pages affected by this read but not on the region from pos to 438338c8a9a5SSteve French * pos+len-1. 438438c8a9a5SSteve French */ 438538c8a9a5SSteve French if (!CIFS_CACHE_READ(cinode)) 438638c8a9a5SSteve French return cifs_user_readv(iocb, to); 438738c8a9a5SSteve French 438838c8a9a5SSteve French if (cap_unix(tcon->ses) && 438938c8a9a5SSteve French (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && 439038c8a9a5SSteve French ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) 439138c8a9a5SSteve French return generic_file_read_iter(iocb, to); 439238c8a9a5SSteve French 439338c8a9a5SSteve French /* 439438c8a9a5SSteve French * We need to hold the sem to be sure nobody modifies lock list 439538c8a9a5SSteve French * with a brlock that prevents reading. 439638c8a9a5SSteve French */ 439738c8a9a5SSteve French down_read(&cinode->lock_sem); 439838c8a9a5SSteve French if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to), 439938c8a9a5SSteve French tcon->ses->server->vals->shared_lock_type, 440038c8a9a5SSteve French 0, NULL, CIFS_READ_OP)) 440138c8a9a5SSteve French rc = generic_file_read_iter(iocb, to); 440238c8a9a5SSteve French up_read(&cinode->lock_sem); 440338c8a9a5SSteve French return rc; 440438c8a9a5SSteve French } 440538c8a9a5SSteve French 440638c8a9a5SSteve French static ssize_t 440738c8a9a5SSteve French cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset) 440838c8a9a5SSteve French { 440938c8a9a5SSteve French int rc = -EACCES; 441038c8a9a5SSteve French unsigned int bytes_read = 0; 441138c8a9a5SSteve French unsigned int total_read; 441238c8a9a5SSteve French unsigned int current_read_size; 441338c8a9a5SSteve French unsigned int rsize; 441438c8a9a5SSteve French struct cifs_sb_info *cifs_sb; 441538c8a9a5SSteve French struct cifs_tcon *tcon; 441638c8a9a5SSteve French struct TCP_Server_Info *server; 441738c8a9a5SSteve French unsigned int xid; 441838c8a9a5SSteve French char *cur_offset; 441938c8a9a5SSteve French struct cifsFileInfo *open_file; 442038c8a9a5SSteve French struct cifs_io_parms io_parms = {0}; 442138c8a9a5SSteve French int buf_type = CIFS_NO_BUFFER; 442238c8a9a5SSteve French __u32 pid; 442338c8a9a5SSteve French 442438c8a9a5SSteve French xid = get_xid(); 442538c8a9a5SSteve French cifs_sb = CIFS_FILE_SB(file); 442638c8a9a5SSteve French 442738c8a9a5SSteve French /* FIXME: set up handlers for larger reads and/or convert to async */ 442838c8a9a5SSteve French rsize = min_t(unsigned int, cifs_sb->ctx->rsize, CIFSMaxBufSize); 442938c8a9a5SSteve French 443038c8a9a5SSteve French if (file->private_data == NULL) { 443138c8a9a5SSteve French rc = -EBADF; 443238c8a9a5SSteve French free_xid(xid); 443338c8a9a5SSteve French return rc; 443438c8a9a5SSteve French } 443538c8a9a5SSteve French open_file = file->private_data; 443638c8a9a5SSteve French tcon = tlink_tcon(open_file->tlink); 443738c8a9a5SSteve French server = cifs_pick_channel(tcon->ses); 443838c8a9a5SSteve French 443938c8a9a5SSteve French if (!server->ops->sync_read) { 444038c8a9a5SSteve French free_xid(xid); 444138c8a9a5SSteve French return -ENOSYS; 444238c8a9a5SSteve French } 444338c8a9a5SSteve French 444438c8a9a5SSteve French if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) 444538c8a9a5SSteve French pid = open_file->pid; 444638c8a9a5SSteve French else 444738c8a9a5SSteve French pid = current->tgid; 444838c8a9a5SSteve French 444938c8a9a5SSteve French if ((file->f_flags & O_ACCMODE) == O_WRONLY) 445038c8a9a5SSteve French cifs_dbg(FYI, "attempting read on write only file instance\n"); 445138c8a9a5SSteve French 445238c8a9a5SSteve French for (total_read = 0, cur_offset = read_data; read_size > total_read; 445338c8a9a5SSteve French total_read += bytes_read, cur_offset += bytes_read) { 445438c8a9a5SSteve French do { 445538c8a9a5SSteve French current_read_size = min_t(uint, read_size - total_read, 445638c8a9a5SSteve French rsize); 445738c8a9a5SSteve French /* 445838c8a9a5SSteve French * For windows me and 9x we do not want to request more 445938c8a9a5SSteve French * than it negotiated since it will refuse the read 446038c8a9a5SSteve French * then. 446138c8a9a5SSteve French */ 446238c8a9a5SSteve French if (!(tcon->ses->capabilities & 446338c8a9a5SSteve French tcon->ses->server->vals->cap_large_files)) { 446438c8a9a5SSteve French current_read_size = min_t(uint, 446538c8a9a5SSteve French current_read_size, CIFSMaxBufSize); 446638c8a9a5SSteve French } 446738c8a9a5SSteve French if (open_file->invalidHandle) { 446838c8a9a5SSteve French rc = cifs_reopen_file(open_file, true); 446938c8a9a5SSteve French if (rc != 0) 447038c8a9a5SSteve French break; 447138c8a9a5SSteve French } 447238c8a9a5SSteve French io_parms.pid = pid; 447338c8a9a5SSteve French io_parms.tcon = tcon; 447438c8a9a5SSteve French io_parms.offset = *offset; 447538c8a9a5SSteve French io_parms.length = current_read_size; 447638c8a9a5SSteve French io_parms.server = server; 447738c8a9a5SSteve French rc = server->ops->sync_read(xid, &open_file->fid, &io_parms, 447838c8a9a5SSteve French &bytes_read, &cur_offset, 447938c8a9a5SSteve French &buf_type); 448038c8a9a5SSteve French } while (rc == -EAGAIN); 448138c8a9a5SSteve French 448238c8a9a5SSteve French if (rc || (bytes_read == 0)) { 448338c8a9a5SSteve French if (total_read) { 448438c8a9a5SSteve French break; 448538c8a9a5SSteve French } else { 448638c8a9a5SSteve French free_xid(xid); 448738c8a9a5SSteve French return rc; 448838c8a9a5SSteve French } 448938c8a9a5SSteve French } else { 449038c8a9a5SSteve French cifs_stats_bytes_read(tcon, total_read); 449138c8a9a5SSteve French *offset += bytes_read; 449238c8a9a5SSteve French } 449338c8a9a5SSteve French } 449438c8a9a5SSteve French free_xid(xid); 449538c8a9a5SSteve French return total_read; 449638c8a9a5SSteve French } 449738c8a9a5SSteve French 449838c8a9a5SSteve French /* 449938c8a9a5SSteve French * If the page is mmap'ed into a process' page tables, then we need to make 450038c8a9a5SSteve French * sure that it doesn't change while being written back. 450138c8a9a5SSteve French */ 450238c8a9a5SSteve French static vm_fault_t cifs_page_mkwrite(struct vm_fault *vmf) 450338c8a9a5SSteve French { 450438c8a9a5SSteve French struct folio *folio = page_folio(vmf->page); 450538c8a9a5SSteve French 450638c8a9a5SSteve French /* Wait for the folio to be written to the cache before we allow it to 450738c8a9a5SSteve French * be modified. We then assume the entire folio will need writing back. 450838c8a9a5SSteve French */ 450938c8a9a5SSteve French #ifdef CONFIG_CIFS_FSCACHE 451038c8a9a5SSteve French if (folio_test_fscache(folio) && 451138c8a9a5SSteve French folio_wait_fscache_killable(folio) < 0) 451238c8a9a5SSteve French return VM_FAULT_RETRY; 451338c8a9a5SSteve French #endif 451438c8a9a5SSteve French 451538c8a9a5SSteve French folio_wait_writeback(folio); 451638c8a9a5SSteve French 451738c8a9a5SSteve French if (folio_lock_killable(folio) < 0) 451838c8a9a5SSteve French return VM_FAULT_RETRY; 451938c8a9a5SSteve French return VM_FAULT_LOCKED; 452038c8a9a5SSteve French } 452138c8a9a5SSteve French 452238c8a9a5SSteve French static const struct vm_operations_struct cifs_file_vm_ops = { 452338c8a9a5SSteve French .fault = filemap_fault, 452438c8a9a5SSteve French .map_pages = filemap_map_pages, 452538c8a9a5SSteve French .page_mkwrite = cifs_page_mkwrite, 452638c8a9a5SSteve French }; 452738c8a9a5SSteve French 452838c8a9a5SSteve French int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma) 452938c8a9a5SSteve French { 453038c8a9a5SSteve French int xid, rc = 0; 453138c8a9a5SSteve French struct inode *inode = file_inode(file); 453238c8a9a5SSteve French 453338c8a9a5SSteve French xid = get_xid(); 453438c8a9a5SSteve French 453538c8a9a5SSteve French if (!CIFS_CACHE_READ(CIFS_I(inode))) 453638c8a9a5SSteve French rc = cifs_zap_mapping(inode); 453738c8a9a5SSteve French if (!rc) 453838c8a9a5SSteve French rc = generic_file_mmap(file, vma); 453938c8a9a5SSteve French if (!rc) 454038c8a9a5SSteve French vma->vm_ops = &cifs_file_vm_ops; 454138c8a9a5SSteve French 454238c8a9a5SSteve French free_xid(xid); 454338c8a9a5SSteve French return rc; 454438c8a9a5SSteve French } 454538c8a9a5SSteve French 454638c8a9a5SSteve French int cifs_file_mmap(struct file *file, struct vm_area_struct *vma) 454738c8a9a5SSteve French { 454838c8a9a5SSteve French int rc, xid; 454938c8a9a5SSteve French 455038c8a9a5SSteve French xid = get_xid(); 455138c8a9a5SSteve French 455238c8a9a5SSteve French rc = cifs_revalidate_file(file); 455338c8a9a5SSteve French if (rc) 455438c8a9a5SSteve French cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n", 455538c8a9a5SSteve French rc); 455638c8a9a5SSteve French if (!rc) 455738c8a9a5SSteve French rc = generic_file_mmap(file, vma); 455838c8a9a5SSteve French if (!rc) 455938c8a9a5SSteve French vma->vm_ops = &cifs_file_vm_ops; 456038c8a9a5SSteve French 456138c8a9a5SSteve French free_xid(xid); 456238c8a9a5SSteve French return rc; 456338c8a9a5SSteve French } 456438c8a9a5SSteve French 456538c8a9a5SSteve French /* 456638c8a9a5SSteve French * Unlock a bunch of folios in the pagecache. 456738c8a9a5SSteve French */ 456838c8a9a5SSteve French static void cifs_unlock_folios(struct address_space *mapping, pgoff_t first, pgoff_t last) 456938c8a9a5SSteve French { 457038c8a9a5SSteve French struct folio *folio; 457138c8a9a5SSteve French XA_STATE(xas, &mapping->i_pages, first); 457238c8a9a5SSteve French 457338c8a9a5SSteve French rcu_read_lock(); 457438c8a9a5SSteve French xas_for_each(&xas, folio, last) { 457538c8a9a5SSteve French folio_unlock(folio); 457638c8a9a5SSteve French } 457738c8a9a5SSteve French rcu_read_unlock(); 457838c8a9a5SSteve French } 457938c8a9a5SSteve French 458038c8a9a5SSteve French static void cifs_readahead_complete(struct work_struct *work) 458138c8a9a5SSteve French { 458238c8a9a5SSteve French struct cifs_readdata *rdata = container_of(work, 458338c8a9a5SSteve French struct cifs_readdata, work); 458438c8a9a5SSteve French struct folio *folio; 458538c8a9a5SSteve French pgoff_t last; 458638c8a9a5SSteve French bool good = rdata->result == 0 || (rdata->result == -EAGAIN && rdata->got_bytes); 458738c8a9a5SSteve French 458838c8a9a5SSteve French XA_STATE(xas, &rdata->mapping->i_pages, rdata->offset / PAGE_SIZE); 458938c8a9a5SSteve French 459038c8a9a5SSteve French if (good) 459138c8a9a5SSteve French cifs_readahead_to_fscache(rdata->mapping->host, 459238c8a9a5SSteve French rdata->offset, rdata->bytes); 459338c8a9a5SSteve French 459438c8a9a5SSteve French if (iov_iter_count(&rdata->iter) > 0) 459538c8a9a5SSteve French iov_iter_zero(iov_iter_count(&rdata->iter), &rdata->iter); 459638c8a9a5SSteve French 459738c8a9a5SSteve French last = (rdata->offset + rdata->bytes - 1) / PAGE_SIZE; 459838c8a9a5SSteve French 459938c8a9a5SSteve French rcu_read_lock(); 460038c8a9a5SSteve French xas_for_each(&xas, folio, last) { 460138c8a9a5SSteve French if (good) { 460238c8a9a5SSteve French flush_dcache_folio(folio); 460338c8a9a5SSteve French folio_mark_uptodate(folio); 460438c8a9a5SSteve French } 460538c8a9a5SSteve French folio_unlock(folio); 460638c8a9a5SSteve French } 460738c8a9a5SSteve French rcu_read_unlock(); 460838c8a9a5SSteve French 460938c8a9a5SSteve French kref_put(&rdata->refcount, cifs_readdata_release); 461038c8a9a5SSteve French } 461138c8a9a5SSteve French 461238c8a9a5SSteve French static void cifs_readahead(struct readahead_control *ractl) 461338c8a9a5SSteve French { 461438c8a9a5SSteve French struct cifsFileInfo *open_file = ractl->file->private_data; 461538c8a9a5SSteve French struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(ractl->file); 461638c8a9a5SSteve French struct TCP_Server_Info *server; 461738c8a9a5SSteve French unsigned int xid, nr_pages, cache_nr_pages = 0; 461838c8a9a5SSteve French unsigned int ra_pages; 461938c8a9a5SSteve French pgoff_t next_cached = ULONG_MAX, ra_index; 462038c8a9a5SSteve French bool caching = fscache_cookie_enabled(cifs_inode_cookie(ractl->mapping->host)) && 462138c8a9a5SSteve French cifs_inode_cookie(ractl->mapping->host)->cache_priv; 462238c8a9a5SSteve French bool check_cache = caching; 462338c8a9a5SSteve French pid_t pid; 462438c8a9a5SSteve French int rc = 0; 462538c8a9a5SSteve French 462638c8a9a5SSteve French /* Note that readahead_count() lags behind our dequeuing of pages from 462738c8a9a5SSteve French * the ractl, wo we have to keep track for ourselves. 462838c8a9a5SSteve French */ 462938c8a9a5SSteve French ra_pages = readahead_count(ractl); 463038c8a9a5SSteve French ra_index = readahead_index(ractl); 463138c8a9a5SSteve French 463238c8a9a5SSteve French xid = get_xid(); 463338c8a9a5SSteve French 463438c8a9a5SSteve French if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) 463538c8a9a5SSteve French pid = open_file->pid; 463638c8a9a5SSteve French else 463738c8a9a5SSteve French pid = current->tgid; 463838c8a9a5SSteve French 463938c8a9a5SSteve French server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses); 464038c8a9a5SSteve French 464138c8a9a5SSteve French cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n", 464238c8a9a5SSteve French __func__, ractl->file, ractl->mapping, ra_pages); 464338c8a9a5SSteve French 464438c8a9a5SSteve French /* 464538c8a9a5SSteve French * Chop the readahead request up into rsize-sized read requests. 464638c8a9a5SSteve French */ 464738c8a9a5SSteve French while ((nr_pages = ra_pages)) { 464838c8a9a5SSteve French unsigned int i, rsize; 464938c8a9a5SSteve French struct cifs_readdata *rdata; 465038c8a9a5SSteve French struct cifs_credits credits_on_stack; 465138c8a9a5SSteve French struct cifs_credits *credits = &credits_on_stack; 465238c8a9a5SSteve French struct folio *folio; 465338c8a9a5SSteve French pgoff_t fsize; 465438c8a9a5SSteve French 465538c8a9a5SSteve French /* 465638c8a9a5SSteve French * Find out if we have anything cached in the range of 465738c8a9a5SSteve French * interest, and if so, where the next chunk of cached data is. 465838c8a9a5SSteve French */ 465938c8a9a5SSteve French if (caching) { 466038c8a9a5SSteve French if (check_cache) { 466138c8a9a5SSteve French rc = cifs_fscache_query_occupancy( 466238c8a9a5SSteve French ractl->mapping->host, ra_index, nr_pages, 466338c8a9a5SSteve French &next_cached, &cache_nr_pages); 466438c8a9a5SSteve French if (rc < 0) 466538c8a9a5SSteve French caching = false; 466638c8a9a5SSteve French check_cache = false; 466738c8a9a5SSteve French } 466838c8a9a5SSteve French 466938c8a9a5SSteve French if (ra_index == next_cached) { 467038c8a9a5SSteve French /* 467138c8a9a5SSteve French * TODO: Send a whole batch of pages to be read 467238c8a9a5SSteve French * by the cache. 467338c8a9a5SSteve French */ 467438c8a9a5SSteve French folio = readahead_folio(ractl); 467538c8a9a5SSteve French fsize = folio_nr_pages(folio); 467638c8a9a5SSteve French ra_pages -= fsize; 467738c8a9a5SSteve French ra_index += fsize; 467838c8a9a5SSteve French if (cifs_readpage_from_fscache(ractl->mapping->host, 467938c8a9a5SSteve French &folio->page) < 0) { 468038c8a9a5SSteve French /* 468138c8a9a5SSteve French * TODO: Deal with cache read failure 468238c8a9a5SSteve French * here, but for the moment, delegate 468338c8a9a5SSteve French * that to readpage. 468438c8a9a5SSteve French */ 468538c8a9a5SSteve French caching = false; 468638c8a9a5SSteve French } 468738c8a9a5SSteve French folio_unlock(folio); 468838c8a9a5SSteve French next_cached += fsize; 468938c8a9a5SSteve French cache_nr_pages -= fsize; 469038c8a9a5SSteve French if (cache_nr_pages == 0) 469138c8a9a5SSteve French check_cache = true; 469238c8a9a5SSteve French continue; 469338c8a9a5SSteve French } 469438c8a9a5SSteve French } 469538c8a9a5SSteve French 469638c8a9a5SSteve French if (open_file->invalidHandle) { 469738c8a9a5SSteve French rc = cifs_reopen_file(open_file, true); 469838c8a9a5SSteve French if (rc) { 469938c8a9a5SSteve French if (rc == -EAGAIN) 470038c8a9a5SSteve French continue; 470138c8a9a5SSteve French break; 470238c8a9a5SSteve French } 470338c8a9a5SSteve French } 470438c8a9a5SSteve French 470538c8a9a5SSteve French if (cifs_sb->ctx->rsize == 0) 470638c8a9a5SSteve French cifs_sb->ctx->rsize = 470738c8a9a5SSteve French server->ops->negotiate_rsize(tlink_tcon(open_file->tlink), 470838c8a9a5SSteve French cifs_sb->ctx); 470938c8a9a5SSteve French 471038c8a9a5SSteve French rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize, 471138c8a9a5SSteve French &rsize, credits); 471238c8a9a5SSteve French if (rc) 471338c8a9a5SSteve French break; 471438c8a9a5SSteve French nr_pages = min_t(size_t, rsize / PAGE_SIZE, ra_pages); 471538c8a9a5SSteve French if (next_cached != ULONG_MAX) 471638c8a9a5SSteve French nr_pages = min_t(size_t, nr_pages, next_cached - ra_index); 471738c8a9a5SSteve French 471838c8a9a5SSteve French /* 471938c8a9a5SSteve French * Give up immediately if rsize is too small to read an entire 472038c8a9a5SSteve French * page. The VFS will fall back to readpage. We should never 472138c8a9a5SSteve French * reach this point however since we set ra_pages to 0 when the 472238c8a9a5SSteve French * rsize is smaller than a cache page. 472338c8a9a5SSteve French */ 472438c8a9a5SSteve French if (unlikely(!nr_pages)) { 472538c8a9a5SSteve French add_credits_and_wake_if(server, credits, 0); 472638c8a9a5SSteve French break; 472738c8a9a5SSteve French } 472838c8a9a5SSteve French 472938c8a9a5SSteve French rdata = cifs_readdata_alloc(cifs_readahead_complete); 473038c8a9a5SSteve French if (!rdata) { 473138c8a9a5SSteve French /* best to give up if we're out of mem */ 473238c8a9a5SSteve French add_credits_and_wake_if(server, credits, 0); 473338c8a9a5SSteve French break; 473438c8a9a5SSteve French } 473538c8a9a5SSteve French 473638c8a9a5SSteve French rdata->offset = ra_index * PAGE_SIZE; 473738c8a9a5SSteve French rdata->bytes = nr_pages * PAGE_SIZE; 473838c8a9a5SSteve French rdata->cfile = cifsFileInfo_get(open_file); 473938c8a9a5SSteve French rdata->server = server; 474038c8a9a5SSteve French rdata->mapping = ractl->mapping; 474138c8a9a5SSteve French rdata->pid = pid; 474238c8a9a5SSteve French rdata->credits = credits_on_stack; 474338c8a9a5SSteve French 474438c8a9a5SSteve French for (i = 0; i < nr_pages; i++) { 474538c8a9a5SSteve French if (!readahead_folio(ractl)) 474638c8a9a5SSteve French WARN_ON(1); 474738c8a9a5SSteve French } 474838c8a9a5SSteve French ra_pages -= nr_pages; 474938c8a9a5SSteve French ra_index += nr_pages; 475038c8a9a5SSteve French 475138c8a9a5SSteve French iov_iter_xarray(&rdata->iter, ITER_DEST, &rdata->mapping->i_pages, 475238c8a9a5SSteve French rdata->offset, rdata->bytes); 475338c8a9a5SSteve French 475438c8a9a5SSteve French rc = adjust_credits(server, &rdata->credits, rdata->bytes); 475538c8a9a5SSteve French if (!rc) { 475638c8a9a5SSteve French if (rdata->cfile->invalidHandle) 475738c8a9a5SSteve French rc = -EAGAIN; 475838c8a9a5SSteve French else 475938c8a9a5SSteve French rc = server->ops->async_readv(rdata); 476038c8a9a5SSteve French } 476138c8a9a5SSteve French 476238c8a9a5SSteve French if (rc) { 476338c8a9a5SSteve French add_credits_and_wake_if(server, &rdata->credits, 0); 476438c8a9a5SSteve French cifs_unlock_folios(rdata->mapping, 476538c8a9a5SSteve French rdata->offset / PAGE_SIZE, 476638c8a9a5SSteve French (rdata->offset + rdata->bytes - 1) / PAGE_SIZE); 476738c8a9a5SSteve French /* Fallback to the readpage in error/reconnect cases */ 476838c8a9a5SSteve French kref_put(&rdata->refcount, cifs_readdata_release); 476938c8a9a5SSteve French break; 477038c8a9a5SSteve French } 477138c8a9a5SSteve French 477238c8a9a5SSteve French kref_put(&rdata->refcount, cifs_readdata_release); 477338c8a9a5SSteve French } 477438c8a9a5SSteve French 477538c8a9a5SSteve French free_xid(xid); 477638c8a9a5SSteve French } 477738c8a9a5SSteve French 477838c8a9a5SSteve French /* 477938c8a9a5SSteve French * cifs_readpage_worker must be called with the page pinned 478038c8a9a5SSteve French */ 478138c8a9a5SSteve French static int cifs_readpage_worker(struct file *file, struct page *page, 478238c8a9a5SSteve French loff_t *poffset) 478338c8a9a5SSteve French { 478423171df5SJeff Layton struct inode *inode = file_inode(file); 478523171df5SJeff Layton struct timespec64 atime, mtime; 478638c8a9a5SSteve French char *read_data; 478738c8a9a5SSteve French int rc; 478838c8a9a5SSteve French 478938c8a9a5SSteve French /* Is the page cached? */ 479023171df5SJeff Layton rc = cifs_readpage_from_fscache(inode, page); 479138c8a9a5SSteve French if (rc == 0) 479238c8a9a5SSteve French goto read_complete; 479338c8a9a5SSteve French 479438c8a9a5SSteve French read_data = kmap(page); 479538c8a9a5SSteve French /* for reads over a certain size could initiate async read ahead */ 479638c8a9a5SSteve French 479738c8a9a5SSteve French rc = cifs_read(file, read_data, PAGE_SIZE, poffset); 479838c8a9a5SSteve French 479938c8a9a5SSteve French if (rc < 0) 480038c8a9a5SSteve French goto io_error; 480138c8a9a5SSteve French else 480238c8a9a5SSteve French cifs_dbg(FYI, "Bytes read %d\n", rc); 480338c8a9a5SSteve French 480438c8a9a5SSteve French /* we do not want atime to be less than mtime, it broke some apps */ 480523171df5SJeff Layton atime = inode_set_atime_to_ts(inode, current_time(inode)); 480623171df5SJeff Layton mtime = inode_get_mtime(inode); 48079a498744SZizhi Wo if (timespec64_compare(&atime, &mtime) < 0) 480823171df5SJeff Layton inode_set_atime_to_ts(inode, inode_get_mtime(inode)); 480938c8a9a5SSteve French 481038c8a9a5SSteve French if (PAGE_SIZE > rc) 481138c8a9a5SSteve French memset(read_data + rc, 0, PAGE_SIZE - rc); 481238c8a9a5SSteve French 481338c8a9a5SSteve French flush_dcache_page(page); 481438c8a9a5SSteve French SetPageUptodate(page); 481538c8a9a5SSteve French rc = 0; 481638c8a9a5SSteve French 481738c8a9a5SSteve French io_error: 481838c8a9a5SSteve French kunmap(page); 481938c8a9a5SSteve French 482038c8a9a5SSteve French read_complete: 482169513dd6SRussell Harmon via samba-technical unlock_page(page); 482238c8a9a5SSteve French return rc; 482338c8a9a5SSteve French } 482438c8a9a5SSteve French 482538c8a9a5SSteve French static int cifs_read_folio(struct file *file, struct folio *folio) 482638c8a9a5SSteve French { 482738c8a9a5SSteve French struct page *page = &folio->page; 482838c8a9a5SSteve French loff_t offset = page_file_offset(page); 482938c8a9a5SSteve French int rc = -EACCES; 483038c8a9a5SSteve French unsigned int xid; 483138c8a9a5SSteve French 483238c8a9a5SSteve French xid = get_xid(); 483338c8a9a5SSteve French 483438c8a9a5SSteve French if (file->private_data == NULL) { 483538c8a9a5SSteve French rc = -EBADF; 483638c8a9a5SSteve French free_xid(xid); 483738c8a9a5SSteve French return rc; 483838c8a9a5SSteve French } 483938c8a9a5SSteve French 484038c8a9a5SSteve French cifs_dbg(FYI, "read_folio %p at offset %d 0x%x\n", 484138c8a9a5SSteve French page, (int)offset, (int)offset); 484238c8a9a5SSteve French 484338c8a9a5SSteve French rc = cifs_readpage_worker(file, page, &offset); 484438c8a9a5SSteve French 484538c8a9a5SSteve French free_xid(xid); 484638c8a9a5SSteve French return rc; 484738c8a9a5SSteve French } 484838c8a9a5SSteve French 484938c8a9a5SSteve French static int is_inode_writable(struct cifsInodeInfo *cifs_inode) 485038c8a9a5SSteve French { 485138c8a9a5SSteve French struct cifsFileInfo *open_file; 485238c8a9a5SSteve French 485338c8a9a5SSteve French spin_lock(&cifs_inode->open_file_lock); 485438c8a9a5SSteve French list_for_each_entry(open_file, &cifs_inode->openFileList, flist) { 485538c8a9a5SSteve French if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) { 485638c8a9a5SSteve French spin_unlock(&cifs_inode->open_file_lock); 485738c8a9a5SSteve French return 1; 485838c8a9a5SSteve French } 485938c8a9a5SSteve French } 486038c8a9a5SSteve French spin_unlock(&cifs_inode->open_file_lock); 486138c8a9a5SSteve French return 0; 486238c8a9a5SSteve French } 486338c8a9a5SSteve French 486438c8a9a5SSteve French /* We do not want to update the file size from server for inodes 486538c8a9a5SSteve French open for write - to avoid races with writepage extending 486638c8a9a5SSteve French the file - in the future we could consider allowing 486738c8a9a5SSteve French refreshing the inode only on increases in the file size 486838c8a9a5SSteve French but this is tricky to do without racing with writebehind 486938c8a9a5SSteve French page caching in the current Linux kernel design */ 48709179aa27SBharath SM bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file, 48719179aa27SBharath SM bool from_readdir) 487238c8a9a5SSteve French { 487338c8a9a5SSteve French if (!cifsInode) 487438c8a9a5SSteve French return true; 487538c8a9a5SSteve French 48769179aa27SBharath SM if (is_inode_writable(cifsInode) || 48779179aa27SBharath SM ((cifsInode->oplock & CIFS_CACHE_RW_FLG) != 0 && from_readdir)) { 487838c8a9a5SSteve French /* This inode is open for write at least once */ 487938c8a9a5SSteve French struct cifs_sb_info *cifs_sb; 488038c8a9a5SSteve French 488138c8a9a5SSteve French cifs_sb = CIFS_SB(cifsInode->netfs.inode.i_sb); 488238c8a9a5SSteve French if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) { 488338c8a9a5SSteve French /* since no page cache to corrupt on directio 488438c8a9a5SSteve French we can change size safely */ 488538c8a9a5SSteve French return true; 488638c8a9a5SSteve French } 488738c8a9a5SSteve French 488838c8a9a5SSteve French if (i_size_read(&cifsInode->netfs.inode) < end_of_file) 488938c8a9a5SSteve French return true; 489038c8a9a5SSteve French 489138c8a9a5SSteve French return false; 489238c8a9a5SSteve French } else 489338c8a9a5SSteve French return true; 489438c8a9a5SSteve French } 489538c8a9a5SSteve French 489638c8a9a5SSteve French static int cifs_write_begin(struct file *file, struct address_space *mapping, 489738c8a9a5SSteve French loff_t pos, unsigned len, 489838c8a9a5SSteve French struct page **pagep, void **fsdata) 489938c8a9a5SSteve French { 490038c8a9a5SSteve French int oncethru = 0; 490138c8a9a5SSteve French pgoff_t index = pos >> PAGE_SHIFT; 490238c8a9a5SSteve French loff_t offset = pos & (PAGE_SIZE - 1); 490338c8a9a5SSteve French loff_t page_start = pos & PAGE_MASK; 490438c8a9a5SSteve French loff_t i_size; 490538c8a9a5SSteve French struct page *page; 490638c8a9a5SSteve French int rc = 0; 490738c8a9a5SSteve French 490838c8a9a5SSteve French cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len); 490938c8a9a5SSteve French 491038c8a9a5SSteve French start: 491138c8a9a5SSteve French page = grab_cache_page_write_begin(mapping, index); 491238c8a9a5SSteve French if (!page) { 491338c8a9a5SSteve French rc = -ENOMEM; 491438c8a9a5SSteve French goto out; 491538c8a9a5SSteve French } 491638c8a9a5SSteve French 491738c8a9a5SSteve French if (PageUptodate(page)) 491838c8a9a5SSteve French goto out; 491938c8a9a5SSteve French 492038c8a9a5SSteve French /* 492138c8a9a5SSteve French * If we write a full page it will be up to date, no need to read from 492238c8a9a5SSteve French * the server. If the write is short, we'll end up doing a sync write 492338c8a9a5SSteve French * instead. 492438c8a9a5SSteve French */ 492538c8a9a5SSteve French if (len == PAGE_SIZE) 492638c8a9a5SSteve French goto out; 492738c8a9a5SSteve French 492838c8a9a5SSteve French /* 492938c8a9a5SSteve French * optimize away the read when we have an oplock, and we're not 493038c8a9a5SSteve French * expecting to use any of the data we'd be reading in. That 493138c8a9a5SSteve French * is, when the page lies beyond the EOF, or straddles the EOF 493238c8a9a5SSteve French * and the write will cover all of the existing data. 493338c8a9a5SSteve French */ 493438c8a9a5SSteve French if (CIFS_CACHE_READ(CIFS_I(mapping->host))) { 493538c8a9a5SSteve French i_size = i_size_read(mapping->host); 493638c8a9a5SSteve French if (page_start >= i_size || 493738c8a9a5SSteve French (offset == 0 && (pos + len) >= i_size)) { 493838c8a9a5SSteve French zero_user_segments(page, 0, offset, 493938c8a9a5SSteve French offset + len, 494038c8a9a5SSteve French PAGE_SIZE); 494138c8a9a5SSteve French /* 494238c8a9a5SSteve French * PageChecked means that the parts of the page 494338c8a9a5SSteve French * to which we're not writing are considered up 494438c8a9a5SSteve French * to date. Once the data is copied to the 494538c8a9a5SSteve French * page, it can be set uptodate. 494638c8a9a5SSteve French */ 494738c8a9a5SSteve French SetPageChecked(page); 494838c8a9a5SSteve French goto out; 494938c8a9a5SSteve French } 495038c8a9a5SSteve French } 495138c8a9a5SSteve French 495238c8a9a5SSteve French if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) { 495338c8a9a5SSteve French /* 495438c8a9a5SSteve French * might as well read a page, it is fast enough. If we get 495538c8a9a5SSteve French * an error, we don't need to return it. cifs_write_end will 495638c8a9a5SSteve French * do a sync write instead since PG_uptodate isn't set. 495738c8a9a5SSteve French */ 495838c8a9a5SSteve French cifs_readpage_worker(file, page, &page_start); 495938c8a9a5SSteve French put_page(page); 496038c8a9a5SSteve French oncethru = 1; 496138c8a9a5SSteve French goto start; 496238c8a9a5SSteve French } else { 496338c8a9a5SSteve French /* we could try using another file handle if there is one - 496438c8a9a5SSteve French but how would we lock it to prevent close of that handle 496538c8a9a5SSteve French racing with this read? In any case 496638c8a9a5SSteve French this will be written out by write_end so is fine */ 496738c8a9a5SSteve French } 496838c8a9a5SSteve French out: 496938c8a9a5SSteve French *pagep = page; 497038c8a9a5SSteve French return rc; 497138c8a9a5SSteve French } 497238c8a9a5SSteve French 497338c8a9a5SSteve French static bool cifs_release_folio(struct folio *folio, gfp_t gfp) 497438c8a9a5SSteve French { 497538c8a9a5SSteve French if (folio_test_private(folio)) 497638c8a9a5SSteve French return 0; 497738c8a9a5SSteve French if (folio_test_fscache(folio)) { 497838c8a9a5SSteve French if (current_is_kswapd() || !(gfp & __GFP_FS)) 497938c8a9a5SSteve French return false; 498038c8a9a5SSteve French folio_wait_fscache(folio); 498138c8a9a5SSteve French } 498238c8a9a5SSteve French fscache_note_page_release(cifs_inode_cookie(folio->mapping->host)); 498338c8a9a5SSteve French return true; 498438c8a9a5SSteve French } 498538c8a9a5SSteve French 498638c8a9a5SSteve French static void cifs_invalidate_folio(struct folio *folio, size_t offset, 498738c8a9a5SSteve French size_t length) 498838c8a9a5SSteve French { 498938c8a9a5SSteve French folio_wait_fscache(folio); 499038c8a9a5SSteve French } 499138c8a9a5SSteve French 499238c8a9a5SSteve French static int cifs_launder_folio(struct folio *folio) 499338c8a9a5SSteve French { 499438c8a9a5SSteve French int rc = 0; 499538c8a9a5SSteve French loff_t range_start = folio_pos(folio); 499638c8a9a5SSteve French loff_t range_end = range_start + folio_size(folio); 499738c8a9a5SSteve French struct writeback_control wbc = { 499838c8a9a5SSteve French .sync_mode = WB_SYNC_ALL, 499938c8a9a5SSteve French .nr_to_write = 0, 500038c8a9a5SSteve French .range_start = range_start, 500138c8a9a5SSteve French .range_end = range_end, 500238c8a9a5SSteve French }; 500338c8a9a5SSteve French 500438c8a9a5SSteve French cifs_dbg(FYI, "Launder page: %lu\n", folio->index); 500538c8a9a5SSteve French 500638c8a9a5SSteve French if (folio_clear_dirty_for_io(folio)) 500738c8a9a5SSteve French rc = cifs_writepage_locked(&folio->page, &wbc); 500838c8a9a5SSteve French 500938c8a9a5SSteve French folio_wait_fscache(folio); 501038c8a9a5SSteve French return rc; 501138c8a9a5SSteve French } 501238c8a9a5SSteve French 501338c8a9a5SSteve French void cifs_oplock_break(struct work_struct *work) 501438c8a9a5SSteve French { 501538c8a9a5SSteve French struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo, 501638c8a9a5SSteve French oplock_break); 501738c8a9a5SSteve French struct inode *inode = d_inode(cfile->dentry); 5018e8f5f849SSteve French struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 501938c8a9a5SSteve French struct cifsInodeInfo *cinode = CIFS_I(inode); 5020e8f5f849SSteve French struct cifs_tcon *tcon; 5021e8f5f849SSteve French struct TCP_Server_Info *server; 5022e8f5f849SSteve French struct tcon_link *tlink; 502338c8a9a5SSteve French int rc = 0; 502438c8a9a5SSteve French bool purge_cache = false, oplock_break_cancelled; 502538c8a9a5SSteve French __u64 persistent_fid, volatile_fid; 502638c8a9a5SSteve French __u16 net_fid; 502738c8a9a5SSteve French 502838c8a9a5SSteve French wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS, 502938c8a9a5SSteve French TASK_UNINTERRUPTIBLE); 503038c8a9a5SSteve French 5031e8f5f849SSteve French tlink = cifs_sb_tlink(cifs_sb); 5032e8f5f849SSteve French if (IS_ERR(tlink)) 5033e8f5f849SSteve French goto out; 5034e8f5f849SSteve French tcon = tlink_tcon(tlink); 5035e8f5f849SSteve French server = tcon->ses->server; 5036e8f5f849SSteve French 503738c8a9a5SSteve French server->ops->downgrade_oplock(server, cinode, cfile->oplock_level, 503838c8a9a5SSteve French cfile->oplock_epoch, &purge_cache); 503938c8a9a5SSteve French 504038c8a9a5SSteve French if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) && 504138c8a9a5SSteve French cifs_has_mand_locks(cinode)) { 504238c8a9a5SSteve French cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n", 504338c8a9a5SSteve French inode); 504438c8a9a5SSteve French cinode->oplock = 0; 504538c8a9a5SSteve French } 504638c8a9a5SSteve French 504738c8a9a5SSteve French if (inode && S_ISREG(inode->i_mode)) { 504838c8a9a5SSteve French if (CIFS_CACHE_READ(cinode)) 504938c8a9a5SSteve French break_lease(inode, O_RDONLY); 505038c8a9a5SSteve French else 505138c8a9a5SSteve French break_lease(inode, O_WRONLY); 505238c8a9a5SSteve French rc = filemap_fdatawrite(inode->i_mapping); 505338c8a9a5SSteve French if (!CIFS_CACHE_READ(cinode) || purge_cache) { 505438c8a9a5SSteve French rc = filemap_fdatawait(inode->i_mapping); 505538c8a9a5SSteve French mapping_set_error(inode->i_mapping, rc); 505638c8a9a5SSteve French cifs_zap_mapping(inode); 505738c8a9a5SSteve French } 505838c8a9a5SSteve French cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc); 505938c8a9a5SSteve French if (CIFS_CACHE_WRITE(cinode)) 506038c8a9a5SSteve French goto oplock_break_ack; 506138c8a9a5SSteve French } 506238c8a9a5SSteve French 506338c8a9a5SSteve French rc = cifs_push_locks(cfile); 506438c8a9a5SSteve French if (rc) 506538c8a9a5SSteve French cifs_dbg(VFS, "Push locks rc = %d\n", rc); 506638c8a9a5SSteve French 506738c8a9a5SSteve French oplock_break_ack: 506838c8a9a5SSteve French /* 506938c8a9a5SSteve French * When oplock break is received and there are no active 507038c8a9a5SSteve French * file handles but cached, then schedule deferred close immediately. 507138c8a9a5SSteve French * So, new open will not use cached handle. 507238c8a9a5SSteve French */ 507338c8a9a5SSteve French 507438c8a9a5SSteve French if (!CIFS_CACHE_HANDLE(cinode) && !list_empty(&cinode->deferred_closes)) 507538c8a9a5SSteve French cifs_close_deferred_file(cinode); 507638c8a9a5SSteve French 507738c8a9a5SSteve French persistent_fid = cfile->fid.persistent_fid; 507838c8a9a5SSteve French volatile_fid = cfile->fid.volatile_fid; 507938c8a9a5SSteve French net_fid = cfile->fid.netfid; 508038c8a9a5SSteve French oplock_break_cancelled = cfile->oplock_break_cancelled; 508138c8a9a5SSteve French 508238c8a9a5SSteve French _cifsFileInfo_put(cfile, false /* do not wait for ourself */, false); 508338c8a9a5SSteve French /* 5084da787d5bSBharath SM * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require 5085da787d5bSBharath SM * an acknowledgment to be sent when the file has already been closed. 508638c8a9a5SSteve French */ 5087da787d5bSBharath SM spin_lock(&cinode->open_file_lock); 5088e8f5f849SSteve French /* check list empty since can race with kill_sb calling tree disconnect */ 5089e8f5f849SSteve French if (!oplock_break_cancelled && !list_empty(&cinode->openFileList)) { 5090da787d5bSBharath SM spin_unlock(&cinode->open_file_lock); 5091e8f5f849SSteve French rc = server->ops->oplock_response(tcon, persistent_fid, 509238c8a9a5SSteve French volatile_fid, net_fid, cinode); 509338c8a9a5SSteve French cifs_dbg(FYI, "Oplock release rc = %d\n", rc); 5094c774e677SSteve French } else 5095da787d5bSBharath SM spin_unlock(&cinode->open_file_lock); 509638c8a9a5SSteve French 5097e8f5f849SSteve French cifs_put_tlink(tlink); 5098e8f5f849SSteve French out: 509938c8a9a5SSteve French cifs_done_oplock_break(cinode); 510038c8a9a5SSteve French } 510138c8a9a5SSteve French 510238c8a9a5SSteve French /* 510338c8a9a5SSteve French * The presence of cifs_direct_io() in the address space ops vector 510438c8a9a5SSteve French * allowes open() O_DIRECT flags which would have failed otherwise. 510538c8a9a5SSteve French * 510638c8a9a5SSteve French * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests 510738c8a9a5SSteve French * so this method should never be called. 510838c8a9a5SSteve French * 510938c8a9a5SSteve French * Direct IO is not yet supported in the cached mode. 511038c8a9a5SSteve French */ 511138c8a9a5SSteve French static ssize_t 511238c8a9a5SSteve French cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter) 511338c8a9a5SSteve French { 511438c8a9a5SSteve French /* 511538c8a9a5SSteve French * FIXME 511638c8a9a5SSteve French * Eventually need to support direct IO for non forcedirectio mounts 511738c8a9a5SSteve French */ 511838c8a9a5SSteve French return -EINVAL; 511938c8a9a5SSteve French } 512038c8a9a5SSteve French 512138c8a9a5SSteve French static int cifs_swap_activate(struct swap_info_struct *sis, 512238c8a9a5SSteve French struct file *swap_file, sector_t *span) 512338c8a9a5SSteve French { 512438c8a9a5SSteve French struct cifsFileInfo *cfile = swap_file->private_data; 512538c8a9a5SSteve French struct inode *inode = swap_file->f_mapping->host; 512638c8a9a5SSteve French unsigned long blocks; 512738c8a9a5SSteve French long long isize; 512838c8a9a5SSteve French 512938c8a9a5SSteve French cifs_dbg(FYI, "swap activate\n"); 513038c8a9a5SSteve French 513138c8a9a5SSteve French if (!swap_file->f_mapping->a_ops->swap_rw) 513238c8a9a5SSteve French /* Cannot support swap */ 513338c8a9a5SSteve French return -EINVAL; 513438c8a9a5SSteve French 513538c8a9a5SSteve French spin_lock(&inode->i_lock); 513638c8a9a5SSteve French blocks = inode->i_blocks; 513738c8a9a5SSteve French isize = inode->i_size; 513838c8a9a5SSteve French spin_unlock(&inode->i_lock); 513938c8a9a5SSteve French if (blocks*512 < isize) { 514038c8a9a5SSteve French pr_warn("swap activate: swapfile has holes\n"); 514138c8a9a5SSteve French return -EINVAL; 514238c8a9a5SSteve French } 514338c8a9a5SSteve French *span = sis->pages; 514438c8a9a5SSteve French 514538c8a9a5SSteve French pr_warn_once("Swap support over SMB3 is experimental\n"); 514638c8a9a5SSteve French 514738c8a9a5SSteve French /* 514838c8a9a5SSteve French * TODO: consider adding ACL (or documenting how) to prevent other 514938c8a9a5SSteve French * users (on this or other systems) from reading it 515038c8a9a5SSteve French */ 515138c8a9a5SSteve French 515238c8a9a5SSteve French 515338c8a9a5SSteve French /* TODO: add sk_set_memalloc(inet) or similar */ 515438c8a9a5SSteve French 515538c8a9a5SSteve French if (cfile) 515638c8a9a5SSteve French cfile->swapfile = true; 515738c8a9a5SSteve French /* 515838c8a9a5SSteve French * TODO: Since file already open, we can't open with DENY_ALL here 515938c8a9a5SSteve French * but we could add call to grab a byte range lock to prevent others 516038c8a9a5SSteve French * from reading or writing the file 516138c8a9a5SSteve French */ 516238c8a9a5SSteve French 516338c8a9a5SSteve French sis->flags |= SWP_FS_OPS; 516438c8a9a5SSteve French return add_swap_extent(sis, 0, sis->max, 0); 516538c8a9a5SSteve French } 516638c8a9a5SSteve French 516738c8a9a5SSteve French static void cifs_swap_deactivate(struct file *file) 516838c8a9a5SSteve French { 516938c8a9a5SSteve French struct cifsFileInfo *cfile = file->private_data; 517038c8a9a5SSteve French 517138c8a9a5SSteve French cifs_dbg(FYI, "swap deactivate\n"); 517238c8a9a5SSteve French 517338c8a9a5SSteve French /* TODO: undo sk_set_memalloc(inet) will eventually be needed */ 517438c8a9a5SSteve French 517538c8a9a5SSteve French if (cfile) 517638c8a9a5SSteve French cfile->swapfile = false; 517738c8a9a5SSteve French 517838c8a9a5SSteve French /* do we need to unpin (or unlock) the file */ 517938c8a9a5SSteve French } 518038c8a9a5SSteve French 518138c8a9a5SSteve French /* 518238c8a9a5SSteve French * Mark a page as having been made dirty and thus needing writeback. We also 518338c8a9a5SSteve French * need to pin the cache object to write back to. 518438c8a9a5SSteve French */ 518538c8a9a5SSteve French #ifdef CONFIG_CIFS_FSCACHE 518638c8a9a5SSteve French static bool cifs_dirty_folio(struct address_space *mapping, struct folio *folio) 518738c8a9a5SSteve French { 518838c8a9a5SSteve French return fscache_dirty_folio(mapping, folio, 518938c8a9a5SSteve French cifs_inode_cookie(mapping->host)); 519038c8a9a5SSteve French } 519138c8a9a5SSteve French #else 519238c8a9a5SSteve French #define cifs_dirty_folio filemap_dirty_folio 519338c8a9a5SSteve French #endif 519438c8a9a5SSteve French 519538c8a9a5SSteve French const struct address_space_operations cifs_addr_ops = { 519638c8a9a5SSteve French .read_folio = cifs_read_folio, 519738c8a9a5SSteve French .readahead = cifs_readahead, 519838c8a9a5SSteve French .writepages = cifs_writepages, 519938c8a9a5SSteve French .write_begin = cifs_write_begin, 520038c8a9a5SSteve French .write_end = cifs_write_end, 520138c8a9a5SSteve French .dirty_folio = cifs_dirty_folio, 520238c8a9a5SSteve French .release_folio = cifs_release_folio, 520338c8a9a5SSteve French .direct_IO = cifs_direct_io, 520438c8a9a5SSteve French .invalidate_folio = cifs_invalidate_folio, 520538c8a9a5SSteve French .launder_folio = cifs_launder_folio, 520638c8a9a5SSteve French .migrate_folio = filemap_migrate_folio, 520738c8a9a5SSteve French /* 520838c8a9a5SSteve French * TODO: investigate and if useful we could add an is_dirty_writeback 520938c8a9a5SSteve French * helper if needed 521038c8a9a5SSteve French */ 521138c8a9a5SSteve French .swap_activate = cifs_swap_activate, 521238c8a9a5SSteve French .swap_deactivate = cifs_swap_deactivate, 521338c8a9a5SSteve French }; 521438c8a9a5SSteve French 521538c8a9a5SSteve French /* 521638c8a9a5SSteve French * cifs_readahead requires the server to support a buffer large enough to 521738c8a9a5SSteve French * contain the header plus one complete page of data. Otherwise, we need 521838c8a9a5SSteve French * to leave cifs_readahead out of the address space operations. 521938c8a9a5SSteve French */ 522038c8a9a5SSteve French const struct address_space_operations cifs_addr_ops_smallbuf = { 522138c8a9a5SSteve French .read_folio = cifs_read_folio, 522238c8a9a5SSteve French .writepages = cifs_writepages, 522338c8a9a5SSteve French .write_begin = cifs_write_begin, 522438c8a9a5SSteve French .write_end = cifs_write_end, 522538c8a9a5SSteve French .dirty_folio = cifs_dirty_folio, 522638c8a9a5SSteve French .release_folio = cifs_release_folio, 522738c8a9a5SSteve French .invalidate_folio = cifs_invalidate_folio, 522838c8a9a5SSteve French .launder_folio = cifs_launder_folio, 522938c8a9a5SSteve French .migrate_folio = filemap_migrate_folio, 523038c8a9a5SSteve French }; 5231