xref: /openbmc/linux/drivers/base/regmap/regcache.c (revision 1eb4c977)
1 /*
2  * Register cache access API
3  *
4  * Copyright 2011 Wolfson Microelectronics plc
5  *
6  * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/device.h>
16 #include <trace/events/regmap.h>
17 #include <linux/bsearch.h>
18 #include <linux/sort.h>
19 
20 #include "internal.h"
21 
22 static const struct regcache_ops *cache_types[] = {
23 	&regcache_rbtree_ops,
24 	&regcache_lzo_ops,
25 };
26 
27 static int regcache_hw_init(struct regmap *map)
28 {
29 	int i, j;
30 	int ret;
31 	int count;
32 	unsigned int val;
33 	void *tmp_buf;
34 
35 	if (!map->num_reg_defaults_raw)
36 		return -EINVAL;
37 
38 	if (!map->reg_defaults_raw) {
39 		u32 cache_bypass = map->cache_bypass;
40 		dev_warn(map->dev, "No cache defaults, reading back from HW\n");
41 
42 		/* Bypass the cache access till data read from HW*/
43 		map->cache_bypass = 1;
44 		tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
45 		if (!tmp_buf)
46 			return -EINVAL;
47 		ret = regmap_bulk_read(map, 0, tmp_buf,
48 				       map->num_reg_defaults_raw);
49 		map->cache_bypass = cache_bypass;
50 		if (ret < 0) {
51 			kfree(tmp_buf);
52 			return ret;
53 		}
54 		map->reg_defaults_raw = tmp_buf;
55 		map->cache_free = 1;
56 	}
57 
58 	/* calculate the size of reg_defaults */
59 	for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) {
60 		val = regcache_get_val(map->reg_defaults_raw,
61 				       i, map->cache_word_size);
62 		if (regmap_volatile(map, i))
63 			continue;
64 		count++;
65 	}
66 
67 	map->reg_defaults = kmalloc(count * sizeof(struct reg_default),
68 				      GFP_KERNEL);
69 	if (!map->reg_defaults) {
70 		ret = -ENOMEM;
71 		goto err_free;
72 	}
73 
74 	/* fill the reg_defaults */
75 	map->num_reg_defaults = count;
76 	for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
77 		val = regcache_get_val(map->reg_defaults_raw,
78 				       i, map->cache_word_size);
79 		if (regmap_volatile(map, i))
80 			continue;
81 		map->reg_defaults[j].reg = i;
82 		map->reg_defaults[j].def = val;
83 		j++;
84 	}
85 
86 	return 0;
87 
88 err_free:
89 	if (map->cache_free)
90 		kfree(map->reg_defaults_raw);
91 
92 	return ret;
93 }
94 
95 int regcache_init(struct regmap *map, const struct regmap_config *config)
96 {
97 	int ret;
98 	int i;
99 	void *tmp_buf;
100 
101 	if (map->cache_type == REGCACHE_NONE) {
102 		map->cache_bypass = true;
103 		return 0;
104 	}
105 
106 	for (i = 0; i < ARRAY_SIZE(cache_types); i++)
107 		if (cache_types[i]->type == map->cache_type)
108 			break;
109 
110 	if (i == ARRAY_SIZE(cache_types)) {
111 		dev_err(map->dev, "Could not match compress type: %d\n",
112 			map->cache_type);
113 		return -EINVAL;
114 	}
115 
116 	map->num_reg_defaults = config->num_reg_defaults;
117 	map->num_reg_defaults_raw = config->num_reg_defaults_raw;
118 	map->reg_defaults_raw = config->reg_defaults_raw;
119 	map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8);
120 	map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
121 
122 	map->cache = NULL;
123 	map->cache_ops = cache_types[i];
124 
125 	if (!map->cache_ops->read ||
126 	    !map->cache_ops->write ||
127 	    !map->cache_ops->name)
128 		return -EINVAL;
129 
130 	/* We still need to ensure that the reg_defaults
131 	 * won't vanish from under us.  We'll need to make
132 	 * a copy of it.
133 	 */
134 	if (config->reg_defaults) {
135 		if (!map->num_reg_defaults)
136 			return -EINVAL;
137 		tmp_buf = kmemdup(config->reg_defaults, map->num_reg_defaults *
138 				  sizeof(struct reg_default), GFP_KERNEL);
139 		if (!tmp_buf)
140 			return -ENOMEM;
141 		map->reg_defaults = tmp_buf;
142 	} else if (map->num_reg_defaults_raw) {
143 		/* Some devices such as PMICs don't have cache defaults,
144 		 * we cope with this by reading back the HW registers and
145 		 * crafting the cache defaults by hand.
146 		 */
147 		ret = regcache_hw_init(map);
148 		if (ret < 0)
149 			return ret;
150 	}
151 
152 	if (!map->max_register)
153 		map->max_register = map->num_reg_defaults_raw;
154 
155 	if (map->cache_ops->init) {
156 		dev_dbg(map->dev, "Initializing %s cache\n",
157 			map->cache_ops->name);
158 		ret = map->cache_ops->init(map);
159 		if (ret)
160 			goto err_free;
161 	}
162 	return 0;
163 
164 err_free:
165 	kfree(map->reg_defaults);
166 	if (map->cache_free)
167 		kfree(map->reg_defaults_raw);
168 
169 	return ret;
170 }
171 
172 void regcache_exit(struct regmap *map)
173 {
174 	if (map->cache_type == REGCACHE_NONE)
175 		return;
176 
177 	BUG_ON(!map->cache_ops);
178 
179 	kfree(map->reg_defaults);
180 	if (map->cache_free)
181 		kfree(map->reg_defaults_raw);
182 
183 	if (map->cache_ops->exit) {
184 		dev_dbg(map->dev, "Destroying %s cache\n",
185 			map->cache_ops->name);
186 		map->cache_ops->exit(map);
187 	}
188 }
189 
190 /**
191  * regcache_read: Fetch the value of a given register from the cache.
192  *
193  * @map: map to configure.
194  * @reg: The register index.
195  * @value: The value to be returned.
196  *
197  * Return a negative value on failure, 0 on success.
198  */
199 int regcache_read(struct regmap *map,
200 		  unsigned int reg, unsigned int *value)
201 {
202 	int ret;
203 
204 	if (map->cache_type == REGCACHE_NONE)
205 		return -ENOSYS;
206 
207 	BUG_ON(!map->cache_ops);
208 
209 	if (!regmap_volatile(map, reg)) {
210 		ret = map->cache_ops->read(map, reg, value);
211 
212 		if (ret == 0)
213 			trace_regmap_reg_read_cache(map->dev, reg, *value);
214 
215 		return ret;
216 	}
217 
218 	return -EINVAL;
219 }
220 
221 /**
222  * regcache_write: Set the value of a given register in the cache.
223  *
224  * @map: map to configure.
225  * @reg: The register index.
226  * @value: The new register value.
227  *
228  * Return a negative value on failure, 0 on success.
229  */
230 int regcache_write(struct regmap *map,
231 		   unsigned int reg, unsigned int value)
232 {
233 	if (map->cache_type == REGCACHE_NONE)
234 		return 0;
235 
236 	BUG_ON(!map->cache_ops);
237 
238 	if (!regmap_writeable(map, reg))
239 		return -EIO;
240 
241 	if (!regmap_volatile(map, reg))
242 		return map->cache_ops->write(map, reg, value);
243 
244 	return 0;
245 }
246 
247 /**
248  * regcache_sync: Sync the register cache with the hardware.
249  *
250  * @map: map to configure.
251  *
252  * Any registers that should not be synced should be marked as
253  * volatile.  In general drivers can choose not to use the provided
254  * syncing functionality if they so require.
255  *
256  * Return a negative value on failure, 0 on success.
257  */
258 int regcache_sync(struct regmap *map)
259 {
260 	int ret = 0;
261 	unsigned int i;
262 	const char *name;
263 	unsigned int bypass;
264 
265 	BUG_ON(!map->cache_ops || !map->cache_ops->sync);
266 
267 	mutex_lock(&map->lock);
268 	/* Remember the initial bypass state */
269 	bypass = map->cache_bypass;
270 	dev_dbg(map->dev, "Syncing %s cache\n",
271 		map->cache_ops->name);
272 	name = map->cache_ops->name;
273 	trace_regcache_sync(map->dev, name, "start");
274 
275 	if (!map->cache_dirty)
276 		goto out;
277 
278 	/* Apply any patch first */
279 	map->cache_bypass = 1;
280 	for (i = 0; i < map->patch_regs; i++) {
281 		ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def);
282 		if (ret != 0) {
283 			dev_err(map->dev, "Failed to write %x = %x: %d\n",
284 				map->patch[i].reg, map->patch[i].def, ret);
285 			goto out;
286 		}
287 	}
288 	map->cache_bypass = 0;
289 
290 	ret = map->cache_ops->sync(map, 0, map->max_register);
291 
292 	if (ret == 0)
293 		map->cache_dirty = false;
294 
295 out:
296 	trace_regcache_sync(map->dev, name, "stop");
297 	/* Restore the bypass state */
298 	map->cache_bypass = bypass;
299 	mutex_unlock(&map->lock);
300 
301 	return ret;
302 }
303 EXPORT_SYMBOL_GPL(regcache_sync);
304 
305 /**
306  * regcache_sync_region: Sync part  of the register cache with the hardware.
307  *
308  * @map: map to sync.
309  * @min: first register to sync
310  * @max: last register to sync
311  *
312  * Write all non-default register values in the specified region to
313  * the hardware.
314  *
315  * Return a negative value on failure, 0 on success.
316  */
317 int regcache_sync_region(struct regmap *map, unsigned int min,
318 			 unsigned int max)
319 {
320 	int ret = 0;
321 	const char *name;
322 	unsigned int bypass;
323 
324 	BUG_ON(!map->cache_ops || !map->cache_ops->sync);
325 
326 	mutex_lock(&map->lock);
327 
328 	/* Remember the initial bypass state */
329 	bypass = map->cache_bypass;
330 
331 	name = map->cache_ops->name;
332 	dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max);
333 
334 	trace_regcache_sync(map->dev, name, "start region");
335 
336 	if (!map->cache_dirty)
337 		goto out;
338 
339 	ret = map->cache_ops->sync(map, min, max);
340 
341 out:
342 	trace_regcache_sync(map->dev, name, "stop region");
343 	/* Restore the bypass state */
344 	map->cache_bypass = bypass;
345 	mutex_unlock(&map->lock);
346 
347 	return ret;
348 }
349 
350 /**
351  * regcache_cache_only: Put a register map into cache only mode
352  *
353  * @map: map to configure
354  * @cache_only: flag if changes should be written to the hardware
355  *
356  * When a register map is marked as cache only writes to the register
357  * map API will only update the register cache, they will not cause
358  * any hardware changes.  This is useful for allowing portions of
359  * drivers to act as though the device were functioning as normal when
360  * it is disabled for power saving reasons.
361  */
362 void regcache_cache_only(struct regmap *map, bool enable)
363 {
364 	mutex_lock(&map->lock);
365 	WARN_ON(map->cache_bypass && enable);
366 	map->cache_only = enable;
367 	trace_regmap_cache_only(map->dev, enable);
368 	mutex_unlock(&map->lock);
369 }
370 EXPORT_SYMBOL_GPL(regcache_cache_only);
371 
372 /**
373  * regcache_mark_dirty: Mark the register cache as dirty
374  *
375  * @map: map to mark
376  *
377  * Mark the register cache as dirty, for example due to the device
378  * having been powered down for suspend.  If the cache is not marked
379  * as dirty then the cache sync will be suppressed.
380  */
381 void regcache_mark_dirty(struct regmap *map)
382 {
383 	mutex_lock(&map->lock);
384 	map->cache_dirty = true;
385 	mutex_unlock(&map->lock);
386 }
387 EXPORT_SYMBOL_GPL(regcache_mark_dirty);
388 
389 /**
390  * regcache_cache_bypass: Put a register map into cache bypass mode
391  *
392  * @map: map to configure
393  * @cache_bypass: flag if changes should not be written to the hardware
394  *
395  * When a register map is marked with the cache bypass option, writes
396  * to the register map API will only update the hardware and not the
397  * the cache directly.  This is useful when syncing the cache back to
398  * the hardware.
399  */
400 void regcache_cache_bypass(struct regmap *map, bool enable)
401 {
402 	mutex_lock(&map->lock);
403 	WARN_ON(map->cache_only && enable);
404 	map->cache_bypass = enable;
405 	trace_regmap_cache_bypass(map->dev, enable);
406 	mutex_unlock(&map->lock);
407 }
408 EXPORT_SYMBOL_GPL(regcache_cache_bypass);
409 
410 bool regcache_set_val(void *base, unsigned int idx,
411 		      unsigned int val, unsigned int word_size)
412 {
413 	switch (word_size) {
414 	case 1: {
415 		u8 *cache = base;
416 		if (cache[idx] == val)
417 			return true;
418 		cache[idx] = val;
419 		break;
420 	}
421 	case 2: {
422 		u16 *cache = base;
423 		if (cache[idx] == val)
424 			return true;
425 		cache[idx] = val;
426 		break;
427 	}
428 	case 4: {
429 		u32 *cache = base;
430 		if (cache[idx] == val)
431 			return true;
432 		cache[idx] = val;
433 		break;
434 	}
435 	default:
436 		BUG();
437 	}
438 	return false;
439 }
440 
441 unsigned int regcache_get_val(const void *base, unsigned int idx,
442 			      unsigned int word_size)
443 {
444 	if (!base)
445 		return -EINVAL;
446 
447 	switch (word_size) {
448 	case 1: {
449 		const u8 *cache = base;
450 		return cache[idx];
451 	}
452 	case 2: {
453 		const u16 *cache = base;
454 		return cache[idx];
455 	}
456 	case 4: {
457 		const u32 *cache = base;
458 		return cache[idx];
459 	}
460 	default:
461 		BUG();
462 	}
463 	/* unreachable */
464 	return -1;
465 }
466 
467 static int regcache_default_cmp(const void *a, const void *b)
468 {
469 	const struct reg_default *_a = a;
470 	const struct reg_default *_b = b;
471 
472 	return _a->reg - _b->reg;
473 }
474 
475 int regcache_lookup_reg(struct regmap *map, unsigned int reg)
476 {
477 	struct reg_default key;
478 	struct reg_default *r;
479 
480 	key.reg = reg;
481 	key.def = 0;
482 
483 	r = bsearch(&key, map->reg_defaults, map->num_reg_defaults,
484 		    sizeof(struct reg_default), regcache_default_cmp);
485 
486 	if (r)
487 		return r - map->reg_defaults;
488 	else
489 		return -ENOENT;
490 }
491