write.c (accba5f3965d6a9d1bf7c1e1a7995d17e9d521b6) | write.c (15b4650e55e06d2cc05115767551cd3ace875431) |
---|---|
1/* handling of writes to regular files and writing back to the server 2 * 3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version --- 70 unchanged lines hidden (view full) --- 79 if (wb) 80 afs_free_writeback(wb); 81} 82 83/* 84 * partly or wholly fill a page that's under preparation for writing 85 */ 86static int afs_fill_page(struct afs_vnode *vnode, struct key *key, | 1/* handling of writes to regular files and writing back to the server 2 * 3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version --- 70 unchanged lines hidden (view full) --- 79 if (wb) 80 afs_free_writeback(wb); 81} 82 83/* 84 * partly or wholly fill a page that's under preparation for writing 85 */ 86static int afs_fill_page(struct afs_vnode *vnode, struct key *key, |
87 unsigned start, unsigned len, struct page *page) | 87 loff_t pos, unsigned len, struct page *page) |
88{ | 88{ |
89 loff_t i_size; 90 unsigned eof; |
|
89 int ret; 90 | 91 int ret; 92 |
91 _enter(",,%u,%u", start, len); | 93 _enter(",,%llu,%u", (unsigned long long)pos, len); |
92 | 94 |
93 ASSERTCMP(start + len, <=, PAGE_SIZE); | 95 ASSERTCMP(len, <=, PAGE_CACHE_SIZE); |
94 | 96 |
95 ret = afs_vnode_fetch_data(vnode, key, start, len, page); | 97 i_size = i_size_read(&vnode->vfs_inode); 98 if (pos + len > i_size) 99 eof = i_size; 100 else 101 eof = PAGE_CACHE_SIZE; 102 103 ret = afs_vnode_fetch_data(vnode, key, 0, eof, page); |
96 if (ret < 0) { 97 if (ret == -ENOENT) { 98 _debug("got NOENT from server" 99 " - marking file deleted and stale"); 100 set_bit(AFS_VNODE_DELETED, &vnode->flags); 101 ret = -ESTALE; 102 } 103 } 104 105 _leave(" = %d", ret); 106 return ret; 107} 108 109/* | 104 if (ret < 0) { 105 if (ret == -ENOENT) { 106 _debug("got NOENT from server" 107 " - marking file deleted and stale"); 108 set_bit(AFS_VNODE_DELETED, &vnode->flags); 109 ret = -ESTALE; 110 } 111 } 112 113 _leave(" = %d", ret); 114 return ret; 115} 116 117/* |
110 * prepare a page for being written to 111 */ 112static int afs_prepare_page(struct afs_vnode *vnode, struct page *page, 113 struct key *key, unsigned offset, unsigned to) 114{ 115 unsigned eof, tail, start, stop, len; 116 loff_t i_size, pos; 117 void *p; 118 int ret; 119 120 _enter(""); 121 122 if (offset == 0 && to == PAGE_SIZE) 123 return 0; 124 125 p = kmap_atomic(page, KM_USER0); 126 127 i_size = i_size_read(&vnode->vfs_inode); 128 pos = (loff_t) page->index << PAGE_SHIFT; 129 if (pos >= i_size) { 130 /* partial write, page beyond EOF */ 131 _debug("beyond"); 132 if (offset > 0) 133 memset(p, 0, offset); 134 if (to < PAGE_SIZE) 135 memset(p + to, 0, PAGE_SIZE - to); 136 kunmap_atomic(p, KM_USER0); 137 return 0; 138 } 139 140 if (i_size - pos >= PAGE_SIZE) { 141 /* partial write, page entirely before EOF */ 142 _debug("before"); 143 tail = eof = PAGE_SIZE; 144 } else { 145 /* partial write, page overlaps EOF */ 146 eof = i_size - pos; 147 _debug("overlap %u", eof); 148 tail = max(eof, to); 149 if (tail < PAGE_SIZE) 150 memset(p + tail, 0, PAGE_SIZE - tail); 151 if (offset > eof) 152 memset(p + eof, 0, PAGE_SIZE - eof); 153 } 154 155 kunmap_atomic(p, KM_USER0); 156 157 ret = 0; 158 if (offset > 0 || eof > to) { 159 /* need to fill one or two bits that aren't going to be written 160 * (cover both fillers in one read if there are two) */ 161 start = (offset > 0) ? 0 : to; 162 stop = (eof > to) ? eof : offset; 163 len = stop - start; 164 _debug("wr=%u-%u av=0-%u rd=%u@%u", 165 offset, to, eof, start, len); 166 ret = afs_fill_page(vnode, key, start, len, page); 167 } 168 169 _leave(" = %d", ret); 170 return ret; 171} 172 173/* | |
174 * prepare to perform part of a write to a page | 118 * prepare to perform part of a write to a page |
175 * - the caller holds the page locked, preventing it from being written out or 176 * modified by anyone else | |
177 */ | 119 */ |
178int afs_prepare_write(struct file *file, struct page *page, 179 unsigned offset, unsigned to) | 120int afs_write_begin(struct file *file, struct address_space *mapping, 121 loff_t pos, unsigned len, unsigned flags, 122 struct page **pagep, void **fsdata) |
180{ 181 struct afs_writeback *candidate, *wb; 182 struct afs_vnode *vnode = AFS_FS_I(file->f_dentry->d_inode); | 123{ 124 struct afs_writeback *candidate, *wb; 125 struct afs_vnode *vnode = AFS_FS_I(file->f_dentry->d_inode); |
126 struct page *page; |
|
183 struct key *key = file->private_data; | 127 struct key *key = file->private_data; |
184 pgoff_t index; | 128 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 129 unsigned to = from + len; 130 pgoff_t index = pos >> PAGE_CACHE_SHIFT; |
185 int ret; 186 187 _enter("{%x:%u},{%lx},%u,%u", | 131 int ret; 132 133 _enter("{%x:%u},{%lx},%u,%u", |
188 vnode->fid.vid, vnode->fid.vnode, page->index, offset, to); | 134 vnode->fid.vid, vnode->fid.vnode, index, from, to); |
189 190 candidate = kzalloc(sizeof(*candidate), GFP_KERNEL); 191 if (!candidate) 192 return -ENOMEM; 193 candidate->vnode = vnode; | 135 136 candidate = kzalloc(sizeof(*candidate), GFP_KERNEL); 137 if (!candidate) 138 return -ENOMEM; 139 candidate->vnode = vnode; |
194 candidate->first = candidate->last = page->index; 195 candidate->offset_first = offset; | 140 candidate->first = candidate->last = index; 141 candidate->offset_first = from; |
196 candidate->to_last = to; 197 candidate->usage = 1; 198 candidate->state = AFS_WBACK_PENDING; 199 init_waitqueue_head(&candidate->waitq); 200 | 142 candidate->to_last = to; 143 candidate->usage = 1; 144 candidate->state = AFS_WBACK_PENDING; 145 init_waitqueue_head(&candidate->waitq); 146 |
147 page = __grab_cache_page(mapping, index); 148 if (!page) { 149 kfree(candidate); 150 return -ENOMEM; 151 } 152 *pagep = page; 153 /* page won't leak in error case: it eventually gets cleaned off LRU */ 154 |
|
201 if (!PageUptodate(page)) { 202 _debug("not up to date"); | 155 if (!PageUptodate(page)) { 156 _debug("not up to date"); |
203 ret = afs_prepare_page(vnode, page, key, offset, to); | 157 ret = afs_fill_page(vnode, key, pos, len, page); |
204 if (ret < 0) { 205 kfree(candidate); 206 _leave(" = %d [prep]", ret); 207 return ret; 208 } | 158 if (ret < 0) { 159 kfree(candidate); 160 _leave(" = %d [prep]", ret); 161 return ret; 162 } |
163 SetPageUptodate(page); |
|
209 } 210 211try_again: | 164 } 165 166try_again: |
212 index = page->index; | |
213 spin_lock(&vnode->writeback_lock); 214 215 /* see if this page is already pending a writeback under a suitable key 216 * - if so we can just join onto that one */ 217 wb = (struct afs_writeback *) page_private(page); 218 if (wb) { 219 if (wb->key == key && wb->state == AFS_WBACK_PENDING) 220 goto subsume_in_current_wb; --- 16 unchanged lines hidden (view full) --- 237 SetPagePrivate(page); 238 set_page_private(page, (unsigned long) candidate); 239 _leave(" = 0 [new]"); 240 return 0; 241 242subsume_in_current_wb: 243 _debug("subsume"); 244 ASSERTRANGE(wb->first, <=, index, <=, wb->last); | 167 spin_lock(&vnode->writeback_lock); 168 169 /* see if this page is already pending a writeback under a suitable key 170 * - if so we can just join onto that one */ 171 wb = (struct afs_writeback *) page_private(page); 172 if (wb) { 173 if (wb->key == key && wb->state == AFS_WBACK_PENDING) 174 goto subsume_in_current_wb; --- 16 unchanged lines hidden (view full) --- 191 SetPagePrivate(page); 192 set_page_private(page, (unsigned long) candidate); 193 _leave(" = 0 [new]"); 194 return 0; 195 196subsume_in_current_wb: 197 _debug("subsume"); 198 ASSERTRANGE(wb->first, <=, index, <=, wb->last); |
245 if (index == wb->first && offset < wb->offset_first) 246 wb->offset_first = offset; | 199 if (index == wb->first && from < wb->offset_first) 200 wb->offset_first = from; |
247 if (index == wb->last && to > wb->to_last) 248 wb->to_last = to; 249 spin_unlock(&vnode->writeback_lock); 250 kfree(candidate); 251 _leave(" = 0 [sub]"); 252 return 0; 253 254append_to_previous_wb: --- 29 unchanged lines hidden (view full) --- 284 set_page_private(page, 0); 285 ClearPagePrivate(page); 286 goto try_again; 287} 288 289/* 290 * finalise part of a write to a page 291 */ | 201 if (index == wb->last && to > wb->to_last) 202 wb->to_last = to; 203 spin_unlock(&vnode->writeback_lock); 204 kfree(candidate); 205 _leave(" = 0 [sub]"); 206 return 0; 207 208append_to_previous_wb: --- 29 unchanged lines hidden (view full) --- 238 set_page_private(page, 0); 239 ClearPagePrivate(page); 240 goto try_again; 241} 242 243/* 244 * finalise part of a write to a page 245 */ |
292int afs_commit_write(struct file *file, struct page *page, 293 unsigned offset, unsigned to) | 246int afs_write_end(struct file *file, struct address_space *mapping, 247 loff_t pos, unsigned len, unsigned copied, 248 struct page *page, void *fsdata) |
294{ 295 struct afs_vnode *vnode = AFS_FS_I(file->f_dentry->d_inode); 296 loff_t i_size, maybe_i_size; 297 | 249{ 250 struct afs_vnode *vnode = AFS_FS_I(file->f_dentry->d_inode); 251 loff_t i_size, maybe_i_size; 252 |
298 _enter("{%x:%u},{%lx},%u,%u", 299 vnode->fid.vid, vnode->fid.vnode, page->index, offset, to); | 253 _enter("{%x:%u},{%lx}", 254 vnode->fid.vid, vnode->fid.vnode, page->index); |
300 | 255 |
301 maybe_i_size = (loff_t) page->index << PAGE_SHIFT; 302 maybe_i_size += to; | 256 maybe_i_size = pos + copied; |
303 304 i_size = i_size_read(&vnode->vfs_inode); 305 if (maybe_i_size > i_size) { 306 spin_lock(&vnode->writeback_lock); 307 i_size = i_size_read(&vnode->vfs_inode); 308 if (maybe_i_size > i_size) 309 i_size_write(&vnode->vfs_inode, maybe_i_size); 310 spin_unlock(&vnode->writeback_lock); 311 } 312 | 257 258 i_size = i_size_read(&vnode->vfs_inode); 259 if (maybe_i_size > i_size) { 260 spin_lock(&vnode->writeback_lock); 261 i_size = i_size_read(&vnode->vfs_inode); 262 if (maybe_i_size > i_size) 263 i_size_write(&vnode->vfs_inode, maybe_i_size); 264 spin_unlock(&vnode->writeback_lock); 265 } 266 |
313 SetPageUptodate(page); | |
314 set_page_dirty(page); 315 if (PageDirty(page)) 316 _debug("dirtied"); | 267 set_page_dirty(page); 268 if (PageDirty(page)) 269 _debug("dirtied"); |
270 unlock_page(page); 271 page_cache_release(page); |
|
317 | 272 |
318 return 0; | 273 return copied; |
319} 320 321/* 322 * kill all the pages in the given range 323 */ 324static void afs_kill_pages(struct afs_vnode *vnode, bool error, 325 pgoff_t first, pgoff_t last) 326{ --- 501 unchanged lines hidden --- | 274} 275 276/* 277 * kill all the pages in the given range 278 */ 279static void afs_kill_pages(struct afs_vnode *vnode, bool error, 280 pgoff_t first, pgoff_t last) 281{ --- 501 unchanged lines hidden --- |