1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* AFS fileserver list management.
3 *
4 * Copyright (C) 2017 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8 #include <linux/kernel.h>
9 #include <linux/slab.h>
10 #include "internal.h"
11
afs_put_serverlist(struct afs_net * net,struct afs_server_list * slist)12 void afs_put_serverlist(struct afs_net *net, struct afs_server_list *slist)
13 {
14 int i;
15
16 if (slist && refcount_dec_and_test(&slist->usage)) {
17 for (i = 0; i < slist->nr_servers; i++)
18 afs_unuse_server(net, slist->servers[i].server,
19 afs_server_trace_put_slist);
20 kfree_rcu(slist, rcu);
21 }
22 }
23
24 /*
25 * Build a server list from a VLDB record.
26 */
afs_alloc_server_list(struct afs_volume * volume,struct key * key,struct afs_vldb_entry * vldb)27 struct afs_server_list *afs_alloc_server_list(struct afs_volume *volume,
28 struct key *key,
29 struct afs_vldb_entry *vldb)
30 {
31 struct afs_server_list *slist;
32 struct afs_server *server;
33 unsigned int type_mask = 1 << volume->type;
34 int ret = -ENOMEM, nr_servers = 0, i, j;
35
36 for (i = 0; i < vldb->nr_servers; i++)
37 if (vldb->fs_mask[i] & type_mask)
38 nr_servers++;
39
40 slist = kzalloc(struct_size(slist, servers, nr_servers), GFP_KERNEL);
41 if (!slist)
42 goto error;
43
44 refcount_set(&slist->usage, 1);
45 rwlock_init(&slist->lock);
46
47 /* Make sure a records exists for each server in the list. */
48 for (i = 0; i < vldb->nr_servers; i++) {
49 if (!(vldb->fs_mask[i] & type_mask))
50 continue;
51
52 server = afs_lookup_server(volume->cell, key, &vldb->fs_server[i],
53 vldb->addr_version[i]);
54 if (IS_ERR(server)) {
55 ret = PTR_ERR(server);
56 if (ret == -ENOENT ||
57 ret == -ENOMEDIUM)
58 continue;
59 goto error_2;
60 }
61
62 /* Insertion-sort by UUID */
63 for (j = 0; j < slist->nr_servers; j++)
64 if (memcmp(&slist->servers[j].server->uuid,
65 &server->uuid,
66 sizeof(server->uuid)) >= 0)
67 break;
68 if (j < slist->nr_servers) {
69 if (slist->servers[j].server == server) {
70 afs_unuse_server(volume->cell->net, server,
71 afs_server_trace_put_slist_isort);
72 continue;
73 }
74
75 memmove(slist->servers + j + 1,
76 slist->servers + j,
77 (slist->nr_servers - j) * sizeof(struct afs_server_entry));
78 }
79
80 slist->servers[j].server = server;
81 slist->servers[j].volume = volume;
82 slist->nr_servers++;
83 }
84
85 if (slist->nr_servers == 0) {
86 ret = -EDESTADDRREQ;
87 goto error_2;
88 }
89
90 return slist;
91
92 error_2:
93 afs_put_serverlist(volume->cell->net, slist);
94 error:
95 return ERR_PTR(ret);
96 }
97
98 /*
99 * Copy the annotations from an old server list to its potential replacement.
100 */
afs_annotate_server_list(struct afs_server_list * new,struct afs_server_list * old)101 bool afs_annotate_server_list(struct afs_server_list *new,
102 struct afs_server_list *old)
103 {
104 struct afs_server *cur;
105 int i, j;
106
107 if (old->nr_servers != new->nr_servers)
108 goto changed;
109
110 for (i = 0; i < old->nr_servers; i++)
111 if (old->servers[i].server != new->servers[i].server)
112 goto changed;
113
114 return false;
115
116 changed:
117 /* Maintain the same preferred server as before if possible. */
118 cur = old->servers[old->preferred].server;
119 for (j = 0; j < new->nr_servers; j++) {
120 if (new->servers[j].server == cur) {
121 new->preferred = j;
122 break;
123 }
124 }
125
126 return true;
127 }
128
129 /*
130 * Attach a volume to the servers it is going to use.
131 */
afs_attach_volume_to_servers(struct afs_volume * volume,struct afs_server_list * slist)132 void afs_attach_volume_to_servers(struct afs_volume *volume, struct afs_server_list *slist)
133 {
134 struct afs_server_entry *se, *pe;
135 struct afs_server *server;
136 struct list_head *p;
137 unsigned int i;
138
139 spin_lock(&volume->cell->vs_lock);
140
141 for (i = 0; i < slist->nr_servers; i++) {
142 se = &slist->servers[i];
143 server = se->server;
144
145 list_for_each(p, &server->volumes) {
146 pe = list_entry(p, struct afs_server_entry, slink);
147 if (volume->vid <= pe->volume->vid)
148 break;
149 }
150 list_add_tail_rcu(&se->slink, p);
151 }
152
153 slist->attached = true;
154 spin_unlock(&volume->cell->vs_lock);
155 }
156
157 /*
158 * Reattach a volume to the servers it is going to use when server list is
159 * replaced. We try to switch the attachment points to avoid rewalking the
160 * lists.
161 */
afs_reattach_volume_to_servers(struct afs_volume * volume,struct afs_server_list * new,struct afs_server_list * old)162 void afs_reattach_volume_to_servers(struct afs_volume *volume, struct afs_server_list *new,
163 struct afs_server_list *old)
164 {
165 unsigned int n = 0, o = 0;
166
167 spin_lock(&volume->cell->vs_lock);
168
169 while (n < new->nr_servers || o < old->nr_servers) {
170 struct afs_server_entry *pn = n < new->nr_servers ? &new->servers[n] : NULL;
171 struct afs_server_entry *po = o < old->nr_servers ? &old->servers[o] : NULL;
172 struct afs_server_entry *s;
173 struct list_head *p;
174 int diff;
175
176 if (pn && po && pn->server == po->server) {
177 list_replace_rcu(&po->slink, &pn->slink);
178 n++;
179 o++;
180 continue;
181 }
182
183 if (pn && po)
184 diff = memcmp(&pn->server->uuid, &po->server->uuid,
185 sizeof(pn->server->uuid));
186 else
187 diff = pn ? -1 : 1;
188
189 if (diff < 0) {
190 list_for_each(p, &pn->server->volumes) {
191 s = list_entry(p, struct afs_server_entry, slink);
192 if (volume->vid <= s->volume->vid)
193 break;
194 }
195 list_add_tail_rcu(&pn->slink, p);
196 n++;
197 } else {
198 list_del_rcu(&po->slink);
199 o++;
200 }
201 }
202
203 spin_unlock(&volume->cell->vs_lock);
204 }
205
206 /*
207 * Detach a volume from the servers it has been using.
208 */
afs_detach_volume_from_servers(struct afs_volume * volume,struct afs_server_list * slist)209 void afs_detach_volume_from_servers(struct afs_volume *volume, struct afs_server_list *slist)
210 {
211 unsigned int i;
212
213 if (!slist->attached)
214 return;
215
216 spin_lock(&volume->cell->vs_lock);
217
218 for (i = 0; i < slist->nr_servers; i++)
219 list_del_rcu(&slist->servers[i].slink);
220
221 slist->attached = false;
222 spin_unlock(&volume->cell->vs_lock);
223 }
224