11da177e4SLinus Torvalds /*
208e0e7c8SDavid Howells * Copyright (c) 2002, 2007 Red Hat, Inc. All rights reserved.
31da177e4SLinus Torvalds *
41da177e4SLinus Torvalds * This software may be freely redistributed under the terms of the
51da177e4SLinus Torvalds * GNU General Public License.
61da177e4SLinus Torvalds *
71da177e4SLinus Torvalds * You should have received a copy of the GNU General Public License
81da177e4SLinus Torvalds * along with this program; if not, write to the Free Software
91da177e4SLinus Torvalds * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
101da177e4SLinus Torvalds *
1144d1b980SDavid Woodhouse * Authors: David Woodhouse <dwmw2@infradead.org>
121da177e4SLinus Torvalds * David Howells <dhowells@redhat.com>
131da177e4SLinus Torvalds *
141da177e4SLinus Torvalds */
151da177e4SLinus Torvalds
161da177e4SLinus Torvalds #include <linux/kernel.h>
171da177e4SLinus Torvalds #include <linux/module.h>
181da177e4SLinus Torvalds #include <linux/init.h>
1908e0e7c8SDavid Howells #include <linux/circ_buf.h>
20e8edc6e0SAlexey Dobriyan #include <linux/sched.h>
211da177e4SLinus Torvalds #include "internal.h"
2208e0e7c8SDavid Howells
23c435ee34SDavid Howells /*
246e0e99d5SDavid Howells * Handle invalidation of an mmap'd file. We invalidate all the PTEs referring
256e0e99d5SDavid Howells * to the pages in this file's pagecache, forcing the kernel to go through
266e0e99d5SDavid Howells * ->fault() or ->page_mkwrite() - at which point we can handle invalidation
276e0e99d5SDavid Howells * more fully.
286e0e99d5SDavid Howells */
afs_invalidate_mmap_work(struct work_struct * work)296e0e99d5SDavid Howells void afs_invalidate_mmap_work(struct work_struct *work)
306e0e99d5SDavid Howells {
316e0e99d5SDavid Howells struct afs_vnode *vnode = container_of(work, struct afs_vnode, cb_work);
326e0e99d5SDavid Howells
33874c8ca1SDavid Howells unmap_mapping_pages(vnode->netfs.inode.i_mapping, 0, 0, false);
346e0e99d5SDavid Howells }
356e0e99d5SDavid Howells
afs_server_init_callback_work(struct work_struct * work)366e0e99d5SDavid Howells void afs_server_init_callback_work(struct work_struct *work)
376e0e99d5SDavid Howells {
386e0e99d5SDavid Howells struct afs_server *server = container_of(work, struct afs_server, initcb_work);
396e0e99d5SDavid Howells struct afs_vnode *vnode;
406e0e99d5SDavid Howells struct afs_cell *cell = server->cell;
416e0e99d5SDavid Howells
426e0e99d5SDavid Howells down_read(&cell->fs_open_mmaps_lock);
436e0e99d5SDavid Howells
446e0e99d5SDavid Howells list_for_each_entry(vnode, &cell->fs_open_mmaps, cb_mmap_link) {
456e0e99d5SDavid Howells if (vnode->cb_server == server) {
466e0e99d5SDavid Howells clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
476e0e99d5SDavid Howells queue_work(system_unbound_wq, &vnode->cb_work);
486e0e99d5SDavid Howells }
496e0e99d5SDavid Howells }
506e0e99d5SDavid Howells
516e0e99d5SDavid Howells up_read(&cell->fs_open_mmaps_lock);
526e0e99d5SDavid Howells }
536e0e99d5SDavid Howells
546e0e99d5SDavid Howells /*
553c4c4075SDavid Howells * Allow the fileserver to request callback state (re-)initialisation.
563c4c4075SDavid Howells * Unfortunately, UUIDs are not guaranteed unique.
571da177e4SLinus Torvalds */
afs_init_callback_state(struct afs_server * server)5808e0e7c8SDavid Howells void afs_init_callback_state(struct afs_server *server)
591da177e4SLinus Torvalds {
603c4c4075SDavid Howells rcu_read_lock();
613c4c4075SDavid Howells do {
62c435ee34SDavid Howells server->cb_s_break++;
634fe6a946SDavid Howells atomic_inc(&server->cell->fs_s_break);
646e0e99d5SDavid Howells if (!list_empty(&server->cell->fs_open_mmaps))
656e0e99d5SDavid Howells queue_work(system_unbound_wq, &server->initcb_work);
666e0e99d5SDavid Howells
676e0e99d5SDavid Howells } while ((server = rcu_dereference(server->uuid_next)));
683c4c4075SDavid Howells rcu_read_unlock();
6908e0e7c8SDavid Howells }
7008e0e7c8SDavid Howells
7108e0e7c8SDavid Howells /*
7208e0e7c8SDavid Howells * actually break a callback
7308e0e7c8SDavid Howells */
__afs_break_callback(struct afs_vnode * vnode,enum afs_cb_break_reason reason)74051d2525SDavid Howells void __afs_break_callback(struct afs_vnode *vnode, enum afs_cb_break_reason reason)
7508e0e7c8SDavid Howells {
7608e0e7c8SDavid Howells _enter("");
7708e0e7c8SDavid Howells
785a813276SDavid Howells clear_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
79c435ee34SDavid Howells if (test_and_clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) {
80c435ee34SDavid Howells vnode->cb_break++;
814fe6a946SDavid Howells vnode->cb_v_break = vnode->volume->cb_v_break;
82c435ee34SDavid Howells afs_clear_permits(vnode);
83c435ee34SDavid Howells
84c7226e40SDavid Howells if (vnode->lock_state == AFS_VNODE_LOCK_WAITING_FOR_CB)
85e8d6c554SDavid Howells afs_lock_may_be_available(vnode);
86051d2525SDavid Howells
876e0e99d5SDavid Howells if (reason != afs_cb_break_for_deleted &&
886e0e99d5SDavid Howells vnode->status.type == AFS_FTYPE_FILE &&
896e0e99d5SDavid Howells atomic_read(&vnode->cb_nr_mmap))
906e0e99d5SDavid Howells queue_work(system_unbound_wq, &vnode->cb_work);
916e0e99d5SDavid Howells
92051d2525SDavid Howells trace_afs_cb_break(&vnode->fid, vnode->cb_break, reason, true);
93051d2525SDavid Howells } else {
94051d2525SDavid Howells trace_afs_cb_break(&vnode->fid, vnode->cb_break, reason, false);
9508e0e7c8SDavid Howells }
9630062bd1SDavid Howells }
97c435ee34SDavid Howells
afs_break_callback(struct afs_vnode * vnode,enum afs_cb_break_reason reason)98051d2525SDavid Howells void afs_break_callback(struct afs_vnode *vnode, enum afs_cb_break_reason reason)
9930062bd1SDavid Howells {
10030062bd1SDavid Howells write_seqlock(&vnode->cb_lock);
101051d2525SDavid Howells __afs_break_callback(vnode, reason);
102c435ee34SDavid Howells write_sequnlock(&vnode->cb_lock);
10308e0e7c8SDavid Howells }
10408e0e7c8SDavid Howells
10508e0e7c8SDavid Howells /*
10620325960SDavid Howells * Look up a volume by volume ID under RCU conditions.
1078230fd82SDavid Howells */
afs_lookup_volume_rcu(struct afs_cell * cell,afs_volid_t vid)10820325960SDavid Howells static struct afs_volume *afs_lookup_volume_rcu(struct afs_cell *cell,
1098230fd82SDavid Howells afs_volid_t vid)
1108230fd82SDavid Howells {
11120325960SDavid Howells struct afs_volume *volume = NULL;
1128230fd82SDavid Howells struct rb_node *p;
113*8d742c4aSOleg Nesterov int seq = 1;
1148230fd82SDavid Howells
1158230fd82SDavid Howells do {
1168230fd82SDavid Howells /* Unfortunately, rbtree walking doesn't give reliable results
1178230fd82SDavid Howells * under just the RCU read lock, so we have to check for
1188230fd82SDavid Howells * changes.
1198230fd82SDavid Howells */
120*8d742c4aSOleg Nesterov seq++; /* 2 on the 1st/lockless path, otherwise odd */
12120325960SDavid Howells read_seqbegin_or_lock(&cell->volume_lock, &seq);
1228230fd82SDavid Howells
12320325960SDavid Howells p = rcu_dereference_raw(cell->volumes.rb_node);
1248230fd82SDavid Howells while (p) {
12520325960SDavid Howells volume = rb_entry(p, struct afs_volume, cell_node);
1268230fd82SDavid Howells
12720325960SDavid Howells if (volume->vid < vid)
1288230fd82SDavid Howells p = rcu_dereference_raw(p->rb_left);
12920325960SDavid Howells else if (volume->vid > vid)
1308230fd82SDavid Howells p = rcu_dereference_raw(p->rb_right);
1318230fd82SDavid Howells else
1328230fd82SDavid Howells break;
13320325960SDavid Howells volume = NULL;
1348230fd82SDavid Howells }
1358230fd82SDavid Howells
13620325960SDavid Howells } while (need_seqretry(&cell->volume_lock, seq));
1378230fd82SDavid Howells
13820325960SDavid Howells done_seqretry(&cell->volume_lock, seq);
13920325960SDavid Howells return volume;
1408230fd82SDavid Howells }
1418230fd82SDavid Howells
1428230fd82SDavid Howells /*
14308e0e7c8SDavid Howells * allow the fileserver to explicitly break one callback
14408e0e7c8SDavid Howells * - happens when
14508e0e7c8SDavid Howells * - the backing file is changed
14608e0e7c8SDavid Howells * - a lock is released
14708e0e7c8SDavid Howells */
afs_break_one_callback(struct afs_volume * volume,struct afs_fid * fid)14820325960SDavid Howells static void afs_break_one_callback(struct afs_volume *volume,
14920325960SDavid Howells struct afs_fid *fid)
15008e0e7c8SDavid Howells {
15120325960SDavid Howells struct super_block *sb;
15208e0e7c8SDavid Howells struct afs_vnode *vnode;
153c435ee34SDavid Howells struct inode *inode;
15408e0e7c8SDavid Howells
15568251f0aSDavid Howells if (fid->vnode == 0 && fid->unique == 0) {
15668251f0aSDavid Howells /* The callback break applies to an entire volume. */
15790fa9b64SDavid Howells write_lock(&volume->cb_v_break_lock);
15868251f0aSDavid Howells volume->cb_v_break++;
159051d2525SDavid Howells trace_afs_cb_break(fid, volume->cb_v_break,
160051d2525SDavid Howells afs_cb_break_for_volume_callback, false);
16190fa9b64SDavid Howells write_unlock(&volume->cb_v_break_lock);
16220325960SDavid Howells return;
16320325960SDavid Howells }
16420325960SDavid Howells
16520325960SDavid Howells /* See if we can find a matching inode - even an I_NEW inode needs to
16620325960SDavid Howells * be marked as it can have its callback broken before we finish
16720325960SDavid Howells * setting up the local inode.
1683f19b2abSDavid Howells */
16920325960SDavid Howells sb = rcu_dereference(volume->sb);
17020325960SDavid Howells if (!sb)
17120325960SDavid Howells return;
17220325960SDavid Howells
17320325960SDavid Howells inode = find_inode_rcu(sb, fid->vnode, afs_ilookup5_test_by_fid, fid);
174c435ee34SDavid Howells if (inode) {
175c435ee34SDavid Howells vnode = AFS_FS_I(inode);
176051d2525SDavid Howells afs_break_callback(vnode, afs_cb_break_for_callback);
177051d2525SDavid Howells } else {
178051d2525SDavid Howells trace_afs_cb_miss(fid, afs_cb_break_for_callback);
179c435ee34SDavid Howells }
18008e0e7c8SDavid Howells }
18108e0e7c8SDavid Howells
afs_break_some_callbacks(struct afs_server * server,struct afs_callback_break * cbb,size_t * _count)1828230fd82SDavid Howells static void afs_break_some_callbacks(struct afs_server *server,
1838230fd82SDavid Howells struct afs_callback_break *cbb,
1848230fd82SDavid Howells size_t *_count)
1858230fd82SDavid Howells {
1868230fd82SDavid Howells struct afs_callback_break *residue = cbb;
18720325960SDavid Howells struct afs_volume *volume;
1888230fd82SDavid Howells afs_volid_t vid = cbb->fid.vid;
1898230fd82SDavid Howells size_t i;
1908230fd82SDavid Howells
19120325960SDavid Howells volume = afs_lookup_volume_rcu(server->cell, vid);
1928230fd82SDavid Howells
1938230fd82SDavid Howells /* TODO: Find all matching volumes if we couldn't match the server and
1948230fd82SDavid Howells * break them anyway.
1958230fd82SDavid Howells */
1968230fd82SDavid Howells
1978230fd82SDavid Howells for (i = *_count; i > 0; cbb++, i--) {
1988230fd82SDavid Howells if (cbb->fid.vid == vid) {
1998230fd82SDavid Howells _debug("- Fid { vl=%08llx n=%llu u=%u }",
2008230fd82SDavid Howells cbb->fid.vid,
2018230fd82SDavid Howells cbb->fid.vnode,
2028230fd82SDavid Howells cbb->fid.unique);
2038230fd82SDavid Howells --*_count;
20420325960SDavid Howells if (volume)
20520325960SDavid Howells afs_break_one_callback(volume, &cbb->fid);
2068230fd82SDavid Howells } else {
2078230fd82SDavid Howells *residue++ = *cbb;
2088230fd82SDavid Howells }
2098230fd82SDavid Howells }
210ec26815aSDavid Howells }
2111da177e4SLinus Torvalds
2121da177e4SLinus Torvalds /*
2131da177e4SLinus Torvalds * allow the fileserver to break callback promises
2141da177e4SLinus Torvalds */
afs_break_callbacks(struct afs_server * server,size_t count,struct afs_callback_break * callbacks)21508e0e7c8SDavid Howells void afs_break_callbacks(struct afs_server *server, size_t count,
2165cf9dd55SDavid Howells struct afs_callback_break *callbacks)
2171da177e4SLinus Torvalds {
21808e0e7c8SDavid Howells _enter("%p,%zu,", server, count);
21908e0e7c8SDavid Howells
22008e0e7c8SDavid Howells ASSERT(server != NULL);
2211da177e4SLinus Torvalds
2228230fd82SDavid Howells rcu_read_lock();
22368251f0aSDavid Howells
2248230fd82SDavid Howells while (count > 0)
2258230fd82SDavid Howells afs_break_some_callbacks(server, callbacks, &count);
2261da177e4SLinus Torvalds
2278230fd82SDavid Howells rcu_read_unlock();
22808e0e7c8SDavid Howells return;
229ec26815aSDavid Howells }
230