1*f033c26dSMark Brown // SPDX-License-Identifier: GPL-2.0
2*f033c26dSMark Brown //
3*f033c26dSMark Brown // Register cache access API - maple tree based cache
4*f033c26dSMark Brown //
5*f033c26dSMark Brown // Copyright 2023 Arm, Ltd
6*f033c26dSMark Brown //
7*f033c26dSMark Brown // Author: Mark Brown <broonie@kernel.org>
8*f033c26dSMark Brown 
9*f033c26dSMark Brown #include <linux/debugfs.h>
10*f033c26dSMark Brown #include <linux/device.h>
11*f033c26dSMark Brown #include <linux/maple_tree.h>
12*f033c26dSMark Brown #include <linux/slab.h>
13*f033c26dSMark Brown 
14*f033c26dSMark Brown #include "internal.h"
15*f033c26dSMark Brown 
16*f033c26dSMark Brown static int regcache_maple_read(struct regmap *map,
17*f033c26dSMark Brown 			       unsigned int reg, unsigned int *value)
18*f033c26dSMark Brown {
19*f033c26dSMark Brown 	struct maple_tree *mt = map->cache;
20*f033c26dSMark Brown 	MA_STATE(mas, mt, reg, reg);
21*f033c26dSMark Brown 	unsigned long *entry;
22*f033c26dSMark Brown 
23*f033c26dSMark Brown 	rcu_read_lock();
24*f033c26dSMark Brown 
25*f033c26dSMark Brown 	entry = mas_find(&mas, reg);
26*f033c26dSMark Brown 	if (!entry) {
27*f033c26dSMark Brown 		rcu_read_unlock();
28*f033c26dSMark Brown 		return -ENOENT;
29*f033c26dSMark Brown 	}
30*f033c26dSMark Brown 
31*f033c26dSMark Brown 	*value = entry[reg - mas.index];
32*f033c26dSMark Brown 
33*f033c26dSMark Brown 	rcu_read_unlock();
34*f033c26dSMark Brown 
35*f033c26dSMark Brown 	return 0;
36*f033c26dSMark Brown }
37*f033c26dSMark Brown 
38*f033c26dSMark Brown static int regcache_maple_write(struct regmap *map, unsigned int reg,
39*f033c26dSMark Brown 				unsigned int val)
40*f033c26dSMark Brown {
41*f033c26dSMark Brown 	struct maple_tree *mt = map->cache;
42*f033c26dSMark Brown 	MA_STATE(mas, mt, reg, reg);
43*f033c26dSMark Brown 	unsigned long *entry, *upper, *lower;
44*f033c26dSMark Brown 	unsigned long index, last;
45*f033c26dSMark Brown 	size_t lower_sz, upper_sz;
46*f033c26dSMark Brown 	int ret;
47*f033c26dSMark Brown 
48*f033c26dSMark Brown 	rcu_read_lock();
49*f033c26dSMark Brown 
50*f033c26dSMark Brown 	entry = mas_find(&mas, reg);
51*f033c26dSMark Brown 	if (entry) {
52*f033c26dSMark Brown 		entry[reg - mas.index] = val;
53*f033c26dSMark Brown 		rcu_read_unlock();
54*f033c26dSMark Brown 		return 0;
55*f033c26dSMark Brown 	}
56*f033c26dSMark Brown 
57*f033c26dSMark Brown 	/* Any adjacent entries to extend/merge? */
58*f033c26dSMark Brown 	mas_set_range(&mas, reg - 1, reg + 1);
59*f033c26dSMark Brown 	index = reg;
60*f033c26dSMark Brown 	last = reg;
61*f033c26dSMark Brown 
62*f033c26dSMark Brown 	lower = mas_find(&mas, reg - 1);
63*f033c26dSMark Brown 	if (lower) {
64*f033c26dSMark Brown 		index = mas.index;
65*f033c26dSMark Brown 		lower_sz = (mas.last - mas.index + 1) * sizeof(unsigned long);
66*f033c26dSMark Brown 	}
67*f033c26dSMark Brown 
68*f033c26dSMark Brown 	upper = mas_find(&mas, reg + 1);
69*f033c26dSMark Brown 	if (upper) {
70*f033c26dSMark Brown 		last = mas.last;
71*f033c26dSMark Brown 		upper_sz = (mas.last - mas.index + 1) * sizeof(unsigned long);
72*f033c26dSMark Brown 	}
73*f033c26dSMark Brown 
74*f033c26dSMark Brown 	rcu_read_unlock();
75*f033c26dSMark Brown 
76*f033c26dSMark Brown 	entry = kmalloc((last - index + 1) * sizeof(unsigned long),
77*f033c26dSMark Brown 			GFP_KERNEL);
78*f033c26dSMark Brown 	if (!entry)
79*f033c26dSMark Brown 		return -ENOMEM;
80*f033c26dSMark Brown 
81*f033c26dSMark Brown 	if (lower)
82*f033c26dSMark Brown 		memcpy(entry, lower, lower_sz);
83*f033c26dSMark Brown 	entry[reg - index] = val;
84*f033c26dSMark Brown 	if (upper)
85*f033c26dSMark Brown 		memcpy(&entry[reg - index + 1], upper, upper_sz);
86*f033c26dSMark Brown 
87*f033c26dSMark Brown 	/*
88*f033c26dSMark Brown 	 * This is safe because the regmap lock means the Maple lock
89*f033c26dSMark Brown 	 * is redundant, but we need to take it due to lockdep asserts
90*f033c26dSMark Brown 	 * in the maple tree code.
91*f033c26dSMark Brown 	 */
92*f033c26dSMark Brown 	mas_lock(&mas);
93*f033c26dSMark Brown 
94*f033c26dSMark Brown 	mas_set_range(&mas, index, last);
95*f033c26dSMark Brown 	ret = mas_store_gfp(&mas, entry, GFP_KERNEL);
96*f033c26dSMark Brown 
97*f033c26dSMark Brown 	mas_unlock(&mas);
98*f033c26dSMark Brown 
99*f033c26dSMark Brown 	if (ret == 0) {
100*f033c26dSMark Brown 		kfree(lower);
101*f033c26dSMark Brown 		kfree(upper);
102*f033c26dSMark Brown 	}
103*f033c26dSMark Brown 
104*f033c26dSMark Brown 	return ret;
105*f033c26dSMark Brown }
106*f033c26dSMark Brown 
107*f033c26dSMark Brown static int regcache_maple_drop(struct regmap *map, unsigned int min,
108*f033c26dSMark Brown 			       unsigned int max)
109*f033c26dSMark Brown {
110*f033c26dSMark Brown 	struct maple_tree *mt = map->cache;
111*f033c26dSMark Brown 	MA_STATE(mas, mt, min, max);
112*f033c26dSMark Brown 	unsigned long *entry, *lower, *upper;
113*f033c26dSMark Brown 	unsigned long lower_index, lower_last;
114*f033c26dSMark Brown 	unsigned long upper_index, upper_last;
115*f033c26dSMark Brown 	int ret;
116*f033c26dSMark Brown 
117*f033c26dSMark Brown 	lower = NULL;
118*f033c26dSMark Brown 	upper = NULL;
119*f033c26dSMark Brown 
120*f033c26dSMark Brown 	mas_lock(&mas);
121*f033c26dSMark Brown 
122*f033c26dSMark Brown 	mas_for_each(&mas, entry, max) {
123*f033c26dSMark Brown 		/*
124*f033c26dSMark Brown 		 * This is safe because the regmap lock means the
125*f033c26dSMark Brown 		 * Maple lock is redundant, but we need to take it due
126*f033c26dSMark Brown 		 * to lockdep asserts in the maple tree code.
127*f033c26dSMark Brown 		 */
128*f033c26dSMark Brown 		mas_unlock(&mas);
129*f033c26dSMark Brown 
130*f033c26dSMark Brown 		/* Do we need to save any of this entry? */
131*f033c26dSMark Brown 		if (mas.index < min) {
132*f033c26dSMark Brown 			lower_index = mas.index;
133*f033c26dSMark Brown 			lower_last = min -1;
134*f033c26dSMark Brown 
135*f033c26dSMark Brown 			lower = kmemdup(entry, ((min - mas.index) *
136*f033c26dSMark Brown 						sizeof(unsigned long)),
137*f033c26dSMark Brown 					GFP_KERNEL);
138*f033c26dSMark Brown 			if (!lower) {
139*f033c26dSMark Brown 				ret = -ENOMEM;
140*f033c26dSMark Brown 				goto out;
141*f033c26dSMark Brown 			}
142*f033c26dSMark Brown 		}
143*f033c26dSMark Brown 
144*f033c26dSMark Brown 		if (mas.last > max) {
145*f033c26dSMark Brown 			upper_index = max + 1;
146*f033c26dSMark Brown 			upper_last = mas.last;
147*f033c26dSMark Brown 
148*f033c26dSMark Brown 			upper = kmemdup(&entry[max + 1],
149*f033c26dSMark Brown 					((mas.last - max) *
150*f033c26dSMark Brown 					 sizeof(unsigned long)),
151*f033c26dSMark Brown 					GFP_KERNEL);
152*f033c26dSMark Brown 			if (!upper) {
153*f033c26dSMark Brown 				ret = -ENOMEM;
154*f033c26dSMark Brown 				goto out;
155*f033c26dSMark Brown 			}
156*f033c26dSMark Brown 		}
157*f033c26dSMark Brown 
158*f033c26dSMark Brown 		kfree(entry);
159*f033c26dSMark Brown 		mas_lock(&mas);
160*f033c26dSMark Brown 		mas_erase(&mas);
161*f033c26dSMark Brown 
162*f033c26dSMark Brown 		/* Insert new nodes with the saved data */
163*f033c26dSMark Brown 		if (lower) {
164*f033c26dSMark Brown 			mas_set_range(&mas, lower_index, lower_last);
165*f033c26dSMark Brown 			ret = mas_store_gfp(&mas, lower, GFP_KERNEL);
166*f033c26dSMark Brown 			if (ret != 0)
167*f033c26dSMark Brown 				goto out;
168*f033c26dSMark Brown 			lower = NULL;
169*f033c26dSMark Brown 		}
170*f033c26dSMark Brown 
171*f033c26dSMark Brown 		if (upper) {
172*f033c26dSMark Brown 			mas_set_range(&mas, upper_index, upper_last);
173*f033c26dSMark Brown 			ret = mas_store_gfp(&mas, upper, GFP_KERNEL);
174*f033c26dSMark Brown 			if (ret != 0)
175*f033c26dSMark Brown 				goto out;
176*f033c26dSMark Brown 			upper = NULL;
177*f033c26dSMark Brown 		}
178*f033c26dSMark Brown 	}
179*f033c26dSMark Brown 
180*f033c26dSMark Brown out:
181*f033c26dSMark Brown 	mas_unlock(&mas);
182*f033c26dSMark Brown 	kfree(lower);
183*f033c26dSMark Brown 	kfree(upper);
184*f033c26dSMark Brown 
185*f033c26dSMark Brown 	return ret;
186*f033c26dSMark Brown }
187*f033c26dSMark Brown 
188*f033c26dSMark Brown static int regcache_maple_sync(struct regmap *map, unsigned int min,
189*f033c26dSMark Brown 			       unsigned int max)
190*f033c26dSMark Brown {
191*f033c26dSMark Brown 	struct maple_tree *mt = map->cache;
192*f033c26dSMark Brown 	unsigned long *entry;
193*f033c26dSMark Brown 	MA_STATE(mas, mt, min, max);
194*f033c26dSMark Brown 	unsigned long lmin = min;
195*f033c26dSMark Brown 	unsigned long lmax = max;
196*f033c26dSMark Brown 	unsigned int r;
197*f033c26dSMark Brown 	int ret;
198*f033c26dSMark Brown 
199*f033c26dSMark Brown 	map->cache_bypass = true;
200*f033c26dSMark Brown 
201*f033c26dSMark Brown 	rcu_read_lock();
202*f033c26dSMark Brown 
203*f033c26dSMark Brown 	mas_for_each(&mas, entry, max) {
204*f033c26dSMark Brown 		for (r = max(mas.index, lmin); r <= min(mas.last, lmax); r++) {
205*f033c26dSMark Brown 			ret = regcache_sync_val(map, r, entry[r - mas.index]);
206*f033c26dSMark Brown 			if (ret != 0)
207*f033c26dSMark Brown 				goto out;
208*f033c26dSMark Brown 		}
209*f033c26dSMark Brown 	}
210*f033c26dSMark Brown 
211*f033c26dSMark Brown out:
212*f033c26dSMark Brown 	rcu_read_unlock();
213*f033c26dSMark Brown 
214*f033c26dSMark Brown 	map->cache_bypass = false;
215*f033c26dSMark Brown 
216*f033c26dSMark Brown 	return ret;
217*f033c26dSMark Brown }
218*f033c26dSMark Brown 
219*f033c26dSMark Brown static int regcache_maple_exit(struct regmap *map)
220*f033c26dSMark Brown {
221*f033c26dSMark Brown 	struct maple_tree *mt = map->cache;
222*f033c26dSMark Brown 	MA_STATE(mas, mt, 0, UINT_MAX);
223*f033c26dSMark Brown 	unsigned int *entry;;
224*f033c26dSMark Brown 
225*f033c26dSMark Brown 	/* if we've already been called then just return */
226*f033c26dSMark Brown 	if (!mt)
227*f033c26dSMark Brown 		return 0;
228*f033c26dSMark Brown 
229*f033c26dSMark Brown 	mas_lock(&mas);
230*f033c26dSMark Brown 	mas_for_each(&mas, entry, UINT_MAX)
231*f033c26dSMark Brown 		kfree(entry);
232*f033c26dSMark Brown 	__mt_destroy(mt);
233*f033c26dSMark Brown 	mas_unlock(&mas);
234*f033c26dSMark Brown 
235*f033c26dSMark Brown 	kfree(mt);
236*f033c26dSMark Brown 	map->cache = NULL;
237*f033c26dSMark Brown 
238*f033c26dSMark Brown 	return 0;
239*f033c26dSMark Brown }
240*f033c26dSMark Brown 
241*f033c26dSMark Brown static int regcache_maple_init(struct regmap *map)
242*f033c26dSMark Brown {
243*f033c26dSMark Brown 	struct maple_tree *mt;
244*f033c26dSMark Brown 	int i;
245*f033c26dSMark Brown 	int ret;
246*f033c26dSMark Brown 
247*f033c26dSMark Brown 	mt = kmalloc(sizeof(*mt), GFP_KERNEL);
248*f033c26dSMark Brown 	if (!mt)
249*f033c26dSMark Brown 		return -ENOMEM;
250*f033c26dSMark Brown 	map->cache = mt;
251*f033c26dSMark Brown 
252*f033c26dSMark Brown 	mt_init(mt);
253*f033c26dSMark Brown 
254*f033c26dSMark Brown 	for (i = 0; i < map->num_reg_defaults; i++) {
255*f033c26dSMark Brown 		ret = regcache_maple_write(map,
256*f033c26dSMark Brown 					   map->reg_defaults[i].reg,
257*f033c26dSMark Brown 					   map->reg_defaults[i].def);
258*f033c26dSMark Brown 		if (ret)
259*f033c26dSMark Brown 			goto err;
260*f033c26dSMark Brown 	}
261*f033c26dSMark Brown 
262*f033c26dSMark Brown 	return 0;
263*f033c26dSMark Brown 
264*f033c26dSMark Brown err:
265*f033c26dSMark Brown 	regcache_maple_exit(map);
266*f033c26dSMark Brown 	return ret;
267*f033c26dSMark Brown }
268*f033c26dSMark Brown 
269*f033c26dSMark Brown struct regcache_ops regcache_maple_ops = {
270*f033c26dSMark Brown 	.type = REGCACHE_MAPLE,
271*f033c26dSMark Brown 	.name = "maple",
272*f033c26dSMark Brown 	.init = regcache_maple_init,
273*f033c26dSMark Brown 	.exit = regcache_maple_exit,
274*f033c26dSMark Brown 	.read = regcache_maple_read,
275*f033c26dSMark Brown 	.write = regcache_maple_write,
276*f033c26dSMark Brown 	.drop = regcache_maple_drop,
277*f033c26dSMark Brown 	.sync = regcache_maple_sync,
278*f033c26dSMark Brown };
279