1 /* 2 * linux/fs/9p/vfs_addr.c 3 * 4 * This file contians vfs address (mmap) ops for 9P2000. 5 * 6 * Copyright (C) 2005 by Eric Van Hensbergen <ericvh@gmail.com> 7 * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 11 * as published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to: 20 * Free Software Foundation 21 * 51 Franklin Street, Fifth Floor 22 * Boston, MA 02111-1301 USA 23 * 24 */ 25 26 #include <linux/module.h> 27 #include <linux/errno.h> 28 #include <linux/fs.h> 29 #include <linux/file.h> 30 #include <linux/stat.h> 31 #include <linux/string.h> 32 #include <linux/inet.h> 33 #include <linux/pagemap.h> 34 #include <linux/idr.h> 35 #include <linux/sched.h> 36 #include <linux/aio.h> 37 #include <net/9p/9p.h> 38 #include <net/9p/client.h> 39 40 #include "v9fs.h" 41 #include "v9fs_vfs.h" 42 #include "cache.h" 43 #include "fid.h" 44 45 /** 46 * v9fs_fid_readpage - read an entire page in from 9P 47 * 48 * @fid: fid being read 49 * @page: structure to page 50 * 51 */ 52 static int v9fs_fid_readpage(struct p9_fid *fid, struct page *page) 53 { 54 int retval; 55 loff_t offset; 56 char *buffer; 57 struct inode *inode; 58 59 inode = page->mapping->host; 60 p9_debug(P9_DEBUG_VFS, "\n"); 61 62 BUG_ON(!PageLocked(page)); 63 64 retval = v9fs_readpage_from_fscache(inode, page); 65 if (retval == 0) 66 return retval; 67 68 buffer = kmap(page); 69 offset = page_offset(page); 70 71 retval = v9fs_fid_readn(fid, buffer, NULL, PAGE_CACHE_SIZE, offset); 72 if (retval < 0) { 73 v9fs_uncache_page(inode, page); 74 goto done; 75 } 76 77 memset(buffer + retval, 0, PAGE_CACHE_SIZE - retval); 78 flush_dcache_page(page); 79 SetPageUptodate(page); 80 81 v9fs_readpage_to_fscache(inode, page); 82 retval = 0; 83 84 done: 85 kunmap(page); 86 unlock_page(page); 87 return retval; 88 } 89 90 /** 91 * v9fs_vfs_readpage - read an entire page in from 9P 92 * 93 * @filp: file being read 94 * @page: structure to page 95 * 96 */ 97 98 static int v9fs_vfs_readpage(struct file *filp, struct page *page) 99 { 100 return v9fs_fid_readpage(filp->private_data, page); 101 } 102 103 /** 104 * v9fs_vfs_readpages - read a set of pages from 9P 105 * 106 * @filp: file being read 107 * @mapping: the address space 108 * @pages: list of pages to read 109 * @nr_pages: count of pages to read 110 * 111 */ 112 113 static int v9fs_vfs_readpages(struct file *filp, struct address_space *mapping, 114 struct list_head *pages, unsigned nr_pages) 115 { 116 int ret = 0; 117 struct inode *inode; 118 119 inode = mapping->host; 120 p9_debug(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, filp); 121 122 ret = v9fs_readpages_from_fscache(inode, mapping, pages, &nr_pages); 123 if (ret == 0) 124 return ret; 125 126 ret = read_cache_pages(mapping, pages, (void *)v9fs_vfs_readpage, filp); 127 p9_debug(P9_DEBUG_VFS, " = %d\n", ret); 128 return ret; 129 } 130 131 /** 132 * v9fs_release_page - release the private state associated with a page 133 * 134 * Returns 1 if the page can be released, false otherwise. 135 */ 136 137 static int v9fs_release_page(struct page *page, gfp_t gfp) 138 { 139 if (PagePrivate(page)) 140 return 0; 141 return v9fs_fscache_release_page(page, gfp); 142 } 143 144 /** 145 * v9fs_invalidate_page - Invalidate a page completely or partially 146 * 147 * @page: structure to page 148 * @offset: offset in the page 149 */ 150 151 static void v9fs_invalidate_page(struct page *page, unsigned int offset, 152 unsigned int length) 153 { 154 /* 155 * If called with zero offset, we should release 156 * the private state assocated with the page 157 */ 158 if (offset == 0 && length == PAGE_CACHE_SIZE) 159 v9fs_fscache_invalidate_page(page); 160 } 161 162 static int v9fs_vfs_writepage_locked(struct page *page) 163 { 164 char *buffer; 165 int retval, len; 166 loff_t offset, size; 167 mm_segment_t old_fs; 168 struct v9fs_inode *v9inode; 169 struct inode *inode = page->mapping->host; 170 171 v9inode = V9FS_I(inode); 172 size = i_size_read(inode); 173 if (page->index == size >> PAGE_CACHE_SHIFT) 174 len = size & ~PAGE_CACHE_MASK; 175 else 176 len = PAGE_CACHE_SIZE; 177 178 set_page_writeback(page); 179 180 buffer = kmap(page); 181 offset = page_offset(page); 182 183 old_fs = get_fs(); 184 set_fs(get_ds()); 185 /* We should have writeback_fid always set */ 186 BUG_ON(!v9inode->writeback_fid); 187 188 retval = v9fs_file_write_internal(inode, 189 v9inode->writeback_fid, 190 (__force const char __user *)buffer, 191 len, &offset, 0); 192 if (retval > 0) 193 retval = 0; 194 195 set_fs(old_fs); 196 kunmap(page); 197 end_page_writeback(page); 198 return retval; 199 } 200 201 static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc) 202 { 203 int retval; 204 205 retval = v9fs_vfs_writepage_locked(page); 206 if (retval < 0) { 207 if (retval == -EAGAIN) { 208 redirty_page_for_writepage(wbc, page); 209 retval = 0; 210 } else { 211 SetPageError(page); 212 mapping_set_error(page->mapping, retval); 213 } 214 } else 215 retval = 0; 216 217 unlock_page(page); 218 return retval; 219 } 220 221 /** 222 * v9fs_launder_page - Writeback a dirty page 223 * Returns 0 on success. 224 */ 225 226 static int v9fs_launder_page(struct page *page) 227 { 228 int retval; 229 struct inode *inode = page->mapping->host; 230 231 v9fs_fscache_wait_on_page_write(inode, page); 232 if (clear_page_dirty_for_io(page)) { 233 retval = v9fs_vfs_writepage_locked(page); 234 if (retval) 235 return retval; 236 } 237 return 0; 238 } 239 240 /** 241 * v9fs_direct_IO - 9P address space operation for direct I/O 242 * @rw: direction (read or write) 243 * @iocb: target I/O control block 244 * @iov: array of vectors that define I/O buffer 245 * @pos: offset in file to begin the operation 246 * @nr_segs: size of iovec array 247 * 248 * The presence of v9fs_direct_IO() in the address space ops vector 249 * allowes open() O_DIRECT flags which would have failed otherwise. 250 * 251 * In the non-cached mode, we shunt off direct read and write requests before 252 * the VFS gets them, so this method should never be called. 253 * 254 * Direct IO is not 'yet' supported in the cached mode. Hence when 255 * this routine is called through generic_file_aio_read(), the read/write fails 256 * with an error. 257 * 258 */ 259 static ssize_t 260 v9fs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, 261 loff_t pos, unsigned long nr_segs) 262 { 263 /* 264 * FIXME 265 * Now that we do caching with cache mode enabled, We need 266 * to support direct IO 267 */ 268 p9_debug(P9_DEBUG_VFS, "v9fs_direct_IO: v9fs_direct_IO (%s) off/no(%lld/%lu) EINVAL\n", 269 iocb->ki_filp->f_path.dentry->d_name.name, 270 (long long)pos, nr_segs); 271 272 return -EINVAL; 273 } 274 275 static int v9fs_write_begin(struct file *filp, struct address_space *mapping, 276 loff_t pos, unsigned len, unsigned flags, 277 struct page **pagep, void **fsdata) 278 { 279 int retval = 0; 280 struct page *page; 281 struct v9fs_inode *v9inode; 282 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 283 struct inode *inode = mapping->host; 284 285 v9inode = V9FS_I(inode); 286 start: 287 page = grab_cache_page_write_begin(mapping, index, flags); 288 if (!page) { 289 retval = -ENOMEM; 290 goto out; 291 } 292 BUG_ON(!v9inode->writeback_fid); 293 if (PageUptodate(page)) 294 goto out; 295 296 if (len == PAGE_CACHE_SIZE) 297 goto out; 298 299 retval = v9fs_fid_readpage(v9inode->writeback_fid, page); 300 page_cache_release(page); 301 if (!retval) 302 goto start; 303 out: 304 *pagep = page; 305 return retval; 306 } 307 308 static int v9fs_write_end(struct file *filp, struct address_space *mapping, 309 loff_t pos, unsigned len, unsigned copied, 310 struct page *page, void *fsdata) 311 { 312 loff_t last_pos = pos + copied; 313 struct inode *inode = page->mapping->host; 314 315 if (unlikely(copied < len)) { 316 /* 317 * zero out the rest of the area 318 */ 319 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 320 321 zero_user(page, from + copied, len - copied); 322 flush_dcache_page(page); 323 } 324 325 if (!PageUptodate(page)) 326 SetPageUptodate(page); 327 /* 328 * No need to use i_size_read() here, the i_size 329 * cannot change under us because we hold the i_mutex. 330 */ 331 if (last_pos > inode->i_size) { 332 inode_add_bytes(inode, last_pos - inode->i_size); 333 i_size_write(inode, last_pos); 334 } 335 set_page_dirty(page); 336 unlock_page(page); 337 page_cache_release(page); 338 339 return copied; 340 } 341 342 343 const struct address_space_operations v9fs_addr_operations = { 344 .readpage = v9fs_vfs_readpage, 345 .readpages = v9fs_vfs_readpages, 346 .set_page_dirty = __set_page_dirty_nobuffers, 347 .writepage = v9fs_vfs_writepage, 348 .write_begin = v9fs_write_begin, 349 .write_end = v9fs_write_end, 350 .releasepage = v9fs_release_page, 351 .invalidatepage = v9fs_invalidate_page, 352 .launder_page = v9fs_launder_page, 353 .direct_IO = v9fs_direct_IO, 354 }; 355