1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // Register cache access API - maple tree based cache
4 //
5 // Copyright 2023 Arm, Ltd
6 //
7 // Author: Mark Brown <broonie@kernel.org>
8
9 #include <linux/debugfs.h>
10 #include <linux/device.h>
11 #include <linux/maple_tree.h>
12 #include <linux/slab.h>
13
14 #include "internal.h"
15
regcache_maple_read(struct regmap * map,unsigned int reg,unsigned int * value)16 static int regcache_maple_read(struct regmap *map,
17 unsigned int reg, unsigned int *value)
18 {
19 struct maple_tree *mt = map->cache;
20 MA_STATE(mas, mt, reg, reg);
21 unsigned long *entry;
22
23 rcu_read_lock();
24
25 entry = mas_walk(&mas);
26 if (!entry) {
27 rcu_read_unlock();
28 return -ENOENT;
29 }
30
31 *value = entry[reg - mas.index];
32
33 rcu_read_unlock();
34
35 return 0;
36 }
37
regcache_maple_write(struct regmap * map,unsigned int reg,unsigned int val)38 static int regcache_maple_write(struct regmap *map, unsigned int reg,
39 unsigned int val)
40 {
41 struct maple_tree *mt = map->cache;
42 MA_STATE(mas, mt, reg, reg);
43 unsigned long *entry, *upper, *lower;
44 unsigned long index, last;
45 size_t lower_sz, upper_sz;
46 int ret;
47
48 rcu_read_lock();
49
50 entry = mas_walk(&mas);
51 if (entry) {
52 entry[reg - mas.index] = val;
53 rcu_read_unlock();
54 return 0;
55 }
56
57 /* Any adjacent entries to extend/merge? */
58 mas_set_range(&mas, reg - 1, reg + 1);
59 index = reg;
60 last = reg;
61
62 lower = mas_find(&mas, reg - 1);
63 if (lower) {
64 index = mas.index;
65 lower_sz = (mas.last - mas.index + 1) * sizeof(unsigned long);
66 }
67
68 upper = mas_find(&mas, reg + 1);
69 if (upper) {
70 last = mas.last;
71 upper_sz = (mas.last - mas.index + 1) * sizeof(unsigned long);
72 }
73
74 rcu_read_unlock();
75
76 entry = kmalloc((last - index + 1) * sizeof(unsigned long),
77 map->alloc_flags);
78 if (!entry)
79 return -ENOMEM;
80
81 if (lower)
82 memcpy(entry, lower, lower_sz);
83 entry[reg - index] = val;
84 if (upper)
85 memcpy(&entry[reg - index + 1], upper, upper_sz);
86
87 /*
88 * This is safe because the regmap lock means the Maple lock
89 * is redundant, but we need to take it due to lockdep asserts
90 * in the maple tree code.
91 */
92 mas_lock(&mas);
93
94 mas_set_range(&mas, index, last);
95 ret = mas_store_gfp(&mas, entry, map->alloc_flags);
96
97 mas_unlock(&mas);
98
99 if (ret == 0) {
100 kfree(lower);
101 kfree(upper);
102 }
103
104 return ret;
105 }
106
regcache_maple_drop(struct regmap * map,unsigned int min,unsigned int max)107 static int regcache_maple_drop(struct regmap *map, unsigned int min,
108 unsigned int max)
109 {
110 struct maple_tree *mt = map->cache;
111 MA_STATE(mas, mt, min, max);
112 unsigned long *entry, *lower, *upper;
113 /* initialized to work around false-positive -Wuninitialized warning */
114 unsigned long lower_index = 0, lower_last = 0;
115 unsigned long upper_index, upper_last;
116 int ret = 0;
117
118 lower = NULL;
119 upper = NULL;
120
121 mas_lock(&mas);
122
123 mas_for_each(&mas, entry, max) {
124 /*
125 * This is safe because the regmap lock means the
126 * Maple lock is redundant, but we need to take it due
127 * to lockdep asserts in the maple tree code.
128 */
129 mas_unlock(&mas);
130
131 /* Do we need to save any of this entry? */
132 if (mas.index < min) {
133 lower_index = mas.index;
134 lower_last = min -1;
135
136 lower = kmemdup(entry, ((min - mas.index) *
137 sizeof(unsigned long)),
138 map->alloc_flags);
139 if (!lower) {
140 ret = -ENOMEM;
141 goto out_unlocked;
142 }
143 }
144
145 if (mas.last > max) {
146 upper_index = max + 1;
147 upper_last = mas.last;
148
149 upper = kmemdup(&entry[max - mas.index + 1],
150 ((mas.last - max) *
151 sizeof(unsigned long)),
152 map->alloc_flags);
153 if (!upper) {
154 ret = -ENOMEM;
155 goto out_unlocked;
156 }
157 }
158
159 kfree(entry);
160 mas_lock(&mas);
161 mas_erase(&mas);
162
163 /* Insert new nodes with the saved data */
164 if (lower) {
165 mas_set_range(&mas, lower_index, lower_last);
166 ret = mas_store_gfp(&mas, lower, map->alloc_flags);
167 if (ret != 0)
168 goto out;
169 lower = NULL;
170 }
171
172 if (upper) {
173 mas_set_range(&mas, upper_index, upper_last);
174 ret = mas_store_gfp(&mas, upper, map->alloc_flags);
175 if (ret != 0)
176 goto out;
177 upper = NULL;
178 }
179 }
180
181 out:
182 mas_unlock(&mas);
183 out_unlocked:
184 kfree(lower);
185 kfree(upper);
186
187 return ret;
188 }
189
regcache_maple_sync_block(struct regmap * map,unsigned long * entry,struct ma_state * mas,unsigned int min,unsigned int max)190 static int regcache_maple_sync_block(struct regmap *map, unsigned long *entry,
191 struct ma_state *mas,
192 unsigned int min, unsigned int max)
193 {
194 void *buf;
195 unsigned long r;
196 size_t val_bytes = map->format.val_bytes;
197 int ret = 0;
198
199 mas_pause(mas);
200 rcu_read_unlock();
201
202 /*
203 * Use a raw write if writing more than one register to a
204 * device that supports raw writes to reduce transaction
205 * overheads.
206 */
207 if (max - min > 1 && regmap_can_raw_write(map)) {
208 buf = kmalloc(val_bytes * (max - min), map->alloc_flags);
209 if (!buf) {
210 ret = -ENOMEM;
211 goto out;
212 }
213
214 /* Render the data for a raw write */
215 for (r = min; r < max; r++) {
216 regcache_set_val(map, buf, r - min,
217 entry[r - mas->index]);
218 }
219
220 ret = _regmap_raw_write(map, min, buf, (max - min) * val_bytes,
221 false);
222
223 kfree(buf);
224 } else {
225 for (r = min; r < max; r++) {
226 ret = _regmap_write(map, r,
227 entry[r - mas->index]);
228 if (ret != 0)
229 goto out;
230 }
231 }
232
233 out:
234 rcu_read_lock();
235
236 return ret;
237 }
238
regcache_maple_sync(struct regmap * map,unsigned int min,unsigned int max)239 static int regcache_maple_sync(struct regmap *map, unsigned int min,
240 unsigned int max)
241 {
242 struct maple_tree *mt = map->cache;
243 unsigned long *entry;
244 MA_STATE(mas, mt, min, max);
245 unsigned long lmin = min;
246 unsigned long lmax = max;
247 unsigned int r, v, sync_start;
248 int ret = 0;
249 bool sync_needed = false;
250
251 map->cache_bypass = true;
252
253 rcu_read_lock();
254
255 mas_for_each(&mas, entry, max) {
256 for (r = max(mas.index, lmin); r <= min(mas.last, lmax); r++) {
257 v = entry[r - mas.index];
258
259 if (regcache_reg_needs_sync(map, r, v)) {
260 if (!sync_needed) {
261 sync_start = r;
262 sync_needed = true;
263 }
264 continue;
265 }
266
267 if (!sync_needed)
268 continue;
269
270 ret = regcache_maple_sync_block(map, entry, &mas,
271 sync_start, r);
272 if (ret != 0)
273 goto out;
274 sync_needed = false;
275 }
276
277 if (sync_needed) {
278 ret = regcache_maple_sync_block(map, entry, &mas,
279 sync_start, r);
280 if (ret != 0)
281 goto out;
282 sync_needed = false;
283 }
284 }
285
286 out:
287 rcu_read_unlock();
288
289 map->cache_bypass = false;
290
291 return ret;
292 }
293
regcache_maple_exit(struct regmap * map)294 static int regcache_maple_exit(struct regmap *map)
295 {
296 struct maple_tree *mt = map->cache;
297 MA_STATE(mas, mt, 0, UINT_MAX);
298 unsigned int *entry;;
299
300 /* if we've already been called then just return */
301 if (!mt)
302 return 0;
303
304 mas_lock(&mas);
305 mas_for_each(&mas, entry, UINT_MAX)
306 kfree(entry);
307 __mt_destroy(mt);
308 mas_unlock(&mas);
309
310 kfree(mt);
311 map->cache = NULL;
312
313 return 0;
314 }
315
regcache_maple_insert_block(struct regmap * map,int first,int last)316 static int regcache_maple_insert_block(struct regmap *map, int first,
317 int last)
318 {
319 struct maple_tree *mt = map->cache;
320 MA_STATE(mas, mt, first, last);
321 unsigned long *entry;
322 int i, ret;
323
324 entry = kcalloc(last - first + 1, sizeof(unsigned long), map->alloc_flags);
325 if (!entry)
326 return -ENOMEM;
327
328 for (i = 0; i < last - first + 1; i++)
329 entry[i] = map->reg_defaults[first + i].def;
330
331 mas_lock(&mas);
332
333 mas_set_range(&mas, map->reg_defaults[first].reg,
334 map->reg_defaults[last].reg);
335 ret = mas_store_gfp(&mas, entry, map->alloc_flags);
336
337 mas_unlock(&mas);
338
339 if (ret)
340 kfree(entry);
341
342 return ret;
343 }
344
regcache_maple_init(struct regmap * map)345 static int regcache_maple_init(struct regmap *map)
346 {
347 struct maple_tree *mt;
348 int i;
349 int ret;
350 int range_start;
351
352 mt = kmalloc(sizeof(*mt), GFP_KERNEL);
353 if (!mt)
354 return -ENOMEM;
355 map->cache = mt;
356
357 mt_init(mt);
358
359 if (!map->num_reg_defaults)
360 return 0;
361
362 range_start = 0;
363
364 /* Scan for ranges of contiguous registers */
365 for (i = 1; i < map->num_reg_defaults; i++) {
366 if (map->reg_defaults[i].reg !=
367 map->reg_defaults[i - 1].reg + 1) {
368 ret = regcache_maple_insert_block(map, range_start,
369 i - 1);
370 if (ret != 0)
371 goto err;
372
373 range_start = i;
374 }
375 }
376
377 /* Add the last block */
378 ret = regcache_maple_insert_block(map, range_start,
379 map->num_reg_defaults - 1);
380 if (ret != 0)
381 goto err;
382
383 return 0;
384
385 err:
386 regcache_maple_exit(map);
387 return ret;
388 }
389
390 struct regcache_ops regcache_maple_ops = {
391 .type = REGCACHE_MAPLE,
392 .name = "maple",
393 .init = regcache_maple_init,
394 .exit = regcache_maple_exit,
395 .read = regcache_maple_read,
396 .write = regcache_maple_write,
397 .drop = regcache_maple_drop,
398 .sync = regcache_maple_sync,
399 };
400