xref: /openbmc/linux/drivers/base/regmap/regcache.c (revision 9d749629)
1 /*
2  * Register cache access API
3  *
4  * Copyright 2011 Wolfson Microelectronics plc
5  *
6  * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/device.h>
16 #include <trace/events/regmap.h>
17 #include <linux/bsearch.h>
18 #include <linux/sort.h>
19 
20 #include "internal.h"
21 
22 static const struct regcache_ops *cache_types[] = {
23 	&regcache_rbtree_ops,
24 	&regcache_lzo_ops,
25 	&regcache_flat_ops,
26 };
27 
28 static int regcache_hw_init(struct regmap *map)
29 {
30 	int i, j;
31 	int ret;
32 	int count;
33 	unsigned int val;
34 	void *tmp_buf;
35 
36 	if (!map->num_reg_defaults_raw)
37 		return -EINVAL;
38 
39 	if (!map->reg_defaults_raw) {
40 		u32 cache_bypass = map->cache_bypass;
41 		dev_warn(map->dev, "No cache defaults, reading back from HW\n");
42 
43 		/* Bypass the cache access till data read from HW*/
44 		map->cache_bypass = 1;
45 		tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
46 		if (!tmp_buf)
47 			return -EINVAL;
48 		ret = regmap_bulk_read(map, 0, tmp_buf,
49 				       map->num_reg_defaults_raw);
50 		map->cache_bypass = cache_bypass;
51 		if (ret < 0) {
52 			kfree(tmp_buf);
53 			return ret;
54 		}
55 		map->reg_defaults_raw = tmp_buf;
56 		map->cache_free = 1;
57 	}
58 
59 	/* calculate the size of reg_defaults */
60 	for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) {
61 		val = regcache_get_val(map->reg_defaults_raw,
62 				       i, map->cache_word_size);
63 		if (regmap_volatile(map, i * map->reg_stride))
64 			continue;
65 		count++;
66 	}
67 
68 	map->reg_defaults = kmalloc(count * sizeof(struct reg_default),
69 				      GFP_KERNEL);
70 	if (!map->reg_defaults) {
71 		ret = -ENOMEM;
72 		goto err_free;
73 	}
74 
75 	/* fill the reg_defaults */
76 	map->num_reg_defaults = count;
77 	for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
78 		val = regcache_get_val(map->reg_defaults_raw,
79 				       i, map->cache_word_size);
80 		if (regmap_volatile(map, i * map->reg_stride))
81 			continue;
82 		map->reg_defaults[j].reg = i * map->reg_stride;
83 		map->reg_defaults[j].def = val;
84 		j++;
85 	}
86 
87 	return 0;
88 
89 err_free:
90 	if (map->cache_free)
91 		kfree(map->reg_defaults_raw);
92 
93 	return ret;
94 }
95 
96 int regcache_init(struct regmap *map, const struct regmap_config *config)
97 {
98 	int ret;
99 	int i;
100 	void *tmp_buf;
101 
102 	for (i = 0; i < config->num_reg_defaults; i++)
103 		if (config->reg_defaults[i].reg % map->reg_stride)
104 			return -EINVAL;
105 
106 	if (map->cache_type == REGCACHE_NONE) {
107 		map->cache_bypass = true;
108 		return 0;
109 	}
110 
111 	for (i = 0; i < ARRAY_SIZE(cache_types); i++)
112 		if (cache_types[i]->type == map->cache_type)
113 			break;
114 
115 	if (i == ARRAY_SIZE(cache_types)) {
116 		dev_err(map->dev, "Could not match compress type: %d\n",
117 			map->cache_type);
118 		return -EINVAL;
119 	}
120 
121 	map->num_reg_defaults = config->num_reg_defaults;
122 	map->num_reg_defaults_raw = config->num_reg_defaults_raw;
123 	map->reg_defaults_raw = config->reg_defaults_raw;
124 	map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8);
125 	map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
126 
127 	map->cache = NULL;
128 	map->cache_ops = cache_types[i];
129 
130 	if (!map->cache_ops->read ||
131 	    !map->cache_ops->write ||
132 	    !map->cache_ops->name)
133 		return -EINVAL;
134 
135 	/* We still need to ensure that the reg_defaults
136 	 * won't vanish from under us.  We'll need to make
137 	 * a copy of it.
138 	 */
139 	if (config->reg_defaults) {
140 		if (!map->num_reg_defaults)
141 			return -EINVAL;
142 		tmp_buf = kmemdup(config->reg_defaults, map->num_reg_defaults *
143 				  sizeof(struct reg_default), GFP_KERNEL);
144 		if (!tmp_buf)
145 			return -ENOMEM;
146 		map->reg_defaults = tmp_buf;
147 	} else if (map->num_reg_defaults_raw) {
148 		/* Some devices such as PMICs don't have cache defaults,
149 		 * we cope with this by reading back the HW registers and
150 		 * crafting the cache defaults by hand.
151 		 */
152 		ret = regcache_hw_init(map);
153 		if (ret < 0)
154 			return ret;
155 	}
156 
157 	if (!map->max_register)
158 		map->max_register = map->num_reg_defaults_raw;
159 
160 	if (map->cache_ops->init) {
161 		dev_dbg(map->dev, "Initializing %s cache\n",
162 			map->cache_ops->name);
163 		ret = map->cache_ops->init(map);
164 		if (ret)
165 			goto err_free;
166 	}
167 	return 0;
168 
169 err_free:
170 	kfree(map->reg_defaults);
171 	if (map->cache_free)
172 		kfree(map->reg_defaults_raw);
173 
174 	return ret;
175 }
176 
177 void regcache_exit(struct regmap *map)
178 {
179 	if (map->cache_type == REGCACHE_NONE)
180 		return;
181 
182 	BUG_ON(!map->cache_ops);
183 
184 	kfree(map->reg_defaults);
185 	if (map->cache_free)
186 		kfree(map->reg_defaults_raw);
187 
188 	if (map->cache_ops->exit) {
189 		dev_dbg(map->dev, "Destroying %s cache\n",
190 			map->cache_ops->name);
191 		map->cache_ops->exit(map);
192 	}
193 }
194 
195 /**
196  * regcache_read: Fetch the value of a given register from the cache.
197  *
198  * @map: map to configure.
199  * @reg: The register index.
200  * @value: The value to be returned.
201  *
202  * Return a negative value on failure, 0 on success.
203  */
204 int regcache_read(struct regmap *map,
205 		  unsigned int reg, unsigned int *value)
206 {
207 	int ret;
208 
209 	if (map->cache_type == REGCACHE_NONE)
210 		return -ENOSYS;
211 
212 	BUG_ON(!map->cache_ops);
213 
214 	if (!regmap_volatile(map, reg)) {
215 		ret = map->cache_ops->read(map, reg, value);
216 
217 		if (ret == 0)
218 			trace_regmap_reg_read_cache(map->dev, reg, *value);
219 
220 		return ret;
221 	}
222 
223 	return -EINVAL;
224 }
225 
226 /**
227  * regcache_write: Set the value of a given register in the cache.
228  *
229  * @map: map to configure.
230  * @reg: The register index.
231  * @value: The new register value.
232  *
233  * Return a negative value on failure, 0 on success.
234  */
235 int regcache_write(struct regmap *map,
236 		   unsigned int reg, unsigned int value)
237 {
238 	if (map->cache_type == REGCACHE_NONE)
239 		return 0;
240 
241 	BUG_ON(!map->cache_ops);
242 
243 	if (!regmap_writeable(map, reg))
244 		return -EIO;
245 
246 	if (!regmap_volatile(map, reg))
247 		return map->cache_ops->write(map, reg, value);
248 
249 	return 0;
250 }
251 
252 /**
253  * regcache_sync: Sync the register cache with the hardware.
254  *
255  * @map: map to configure.
256  *
257  * Any registers that should not be synced should be marked as
258  * volatile.  In general drivers can choose not to use the provided
259  * syncing functionality if they so require.
260  *
261  * Return a negative value on failure, 0 on success.
262  */
263 int regcache_sync(struct regmap *map)
264 {
265 	int ret = 0;
266 	unsigned int i;
267 	const char *name;
268 	unsigned int bypass;
269 
270 	BUG_ON(!map->cache_ops || !map->cache_ops->sync);
271 
272 	map->lock(map);
273 	/* Remember the initial bypass state */
274 	bypass = map->cache_bypass;
275 	dev_dbg(map->dev, "Syncing %s cache\n",
276 		map->cache_ops->name);
277 	name = map->cache_ops->name;
278 	trace_regcache_sync(map->dev, name, "start");
279 
280 	if (!map->cache_dirty)
281 		goto out;
282 
283 	/* Apply any patch first */
284 	map->cache_bypass = 1;
285 	for (i = 0; i < map->patch_regs; i++) {
286 		if (map->patch[i].reg % map->reg_stride) {
287 			ret = -EINVAL;
288 			goto out;
289 		}
290 		ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def);
291 		if (ret != 0) {
292 			dev_err(map->dev, "Failed to write %x = %x: %d\n",
293 				map->patch[i].reg, map->patch[i].def, ret);
294 			goto out;
295 		}
296 	}
297 	map->cache_bypass = 0;
298 
299 	ret = map->cache_ops->sync(map, 0, map->max_register);
300 
301 	if (ret == 0)
302 		map->cache_dirty = false;
303 
304 out:
305 	trace_regcache_sync(map->dev, name, "stop");
306 	/* Restore the bypass state */
307 	map->cache_bypass = bypass;
308 	map->unlock(map);
309 
310 	return ret;
311 }
312 EXPORT_SYMBOL_GPL(regcache_sync);
313 
314 /**
315  * regcache_sync_region: Sync part  of the register cache with the hardware.
316  *
317  * @map: map to sync.
318  * @min: first register to sync
319  * @max: last register to sync
320  *
321  * Write all non-default register values in the specified region to
322  * the hardware.
323  *
324  * Return a negative value on failure, 0 on success.
325  */
326 int regcache_sync_region(struct regmap *map, unsigned int min,
327 			 unsigned int max)
328 {
329 	int ret = 0;
330 	const char *name;
331 	unsigned int bypass;
332 
333 	BUG_ON(!map->cache_ops || !map->cache_ops->sync);
334 
335 	map->lock(map);
336 
337 	/* Remember the initial bypass state */
338 	bypass = map->cache_bypass;
339 
340 	name = map->cache_ops->name;
341 	dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max);
342 
343 	trace_regcache_sync(map->dev, name, "start region");
344 
345 	if (!map->cache_dirty)
346 		goto out;
347 
348 	ret = map->cache_ops->sync(map, min, max);
349 
350 out:
351 	trace_regcache_sync(map->dev, name, "stop region");
352 	/* Restore the bypass state */
353 	map->cache_bypass = bypass;
354 	map->unlock(map);
355 
356 	return ret;
357 }
358 EXPORT_SYMBOL_GPL(regcache_sync_region);
359 
360 /**
361  * regcache_cache_only: Put a register map into cache only mode
362  *
363  * @map: map to configure
364  * @cache_only: flag if changes should be written to the hardware
365  *
366  * When a register map is marked as cache only writes to the register
367  * map API will only update the register cache, they will not cause
368  * any hardware changes.  This is useful for allowing portions of
369  * drivers to act as though the device were functioning as normal when
370  * it is disabled for power saving reasons.
371  */
372 void regcache_cache_only(struct regmap *map, bool enable)
373 {
374 	map->lock(map);
375 	WARN_ON(map->cache_bypass && enable);
376 	map->cache_only = enable;
377 	trace_regmap_cache_only(map->dev, enable);
378 	map->unlock(map);
379 }
380 EXPORT_SYMBOL_GPL(regcache_cache_only);
381 
382 /**
383  * regcache_mark_dirty: Mark the register cache as dirty
384  *
385  * @map: map to mark
386  *
387  * Mark the register cache as dirty, for example due to the device
388  * having been powered down for suspend.  If the cache is not marked
389  * as dirty then the cache sync will be suppressed.
390  */
391 void regcache_mark_dirty(struct regmap *map)
392 {
393 	map->lock(map);
394 	map->cache_dirty = true;
395 	map->unlock(map);
396 }
397 EXPORT_SYMBOL_GPL(regcache_mark_dirty);
398 
399 /**
400  * regcache_cache_bypass: Put a register map into cache bypass mode
401  *
402  * @map: map to configure
403  * @cache_bypass: flag if changes should not be written to the hardware
404  *
405  * When a register map is marked with the cache bypass option, writes
406  * to the register map API will only update the hardware and not the
407  * the cache directly.  This is useful when syncing the cache back to
408  * the hardware.
409  */
410 void regcache_cache_bypass(struct regmap *map, bool enable)
411 {
412 	map->lock(map);
413 	WARN_ON(map->cache_only && enable);
414 	map->cache_bypass = enable;
415 	trace_regmap_cache_bypass(map->dev, enable);
416 	map->unlock(map);
417 }
418 EXPORT_SYMBOL_GPL(regcache_cache_bypass);
419 
420 bool regcache_set_val(void *base, unsigned int idx,
421 		      unsigned int val, unsigned int word_size)
422 {
423 	switch (word_size) {
424 	case 1: {
425 		u8 *cache = base;
426 		if (cache[idx] == val)
427 			return true;
428 		cache[idx] = val;
429 		break;
430 	}
431 	case 2: {
432 		u16 *cache = base;
433 		if (cache[idx] == val)
434 			return true;
435 		cache[idx] = val;
436 		break;
437 	}
438 	case 4: {
439 		u32 *cache = base;
440 		if (cache[idx] == val)
441 			return true;
442 		cache[idx] = val;
443 		break;
444 	}
445 	default:
446 		BUG();
447 	}
448 	return false;
449 }
450 
451 unsigned int regcache_get_val(const void *base, unsigned int idx,
452 			      unsigned int word_size)
453 {
454 	if (!base)
455 		return -EINVAL;
456 
457 	switch (word_size) {
458 	case 1: {
459 		const u8 *cache = base;
460 		return cache[idx];
461 	}
462 	case 2: {
463 		const u16 *cache = base;
464 		return cache[idx];
465 	}
466 	case 4: {
467 		const u32 *cache = base;
468 		return cache[idx];
469 	}
470 	default:
471 		BUG();
472 	}
473 	/* unreachable */
474 	return -1;
475 }
476 
477 static int regcache_default_cmp(const void *a, const void *b)
478 {
479 	const struct reg_default *_a = a;
480 	const struct reg_default *_b = b;
481 
482 	return _a->reg - _b->reg;
483 }
484 
485 int regcache_lookup_reg(struct regmap *map, unsigned int reg)
486 {
487 	struct reg_default key;
488 	struct reg_default *r;
489 
490 	key.reg = reg;
491 	key.def = 0;
492 
493 	r = bsearch(&key, map->reg_defaults, map->num_reg_defaults,
494 		    sizeof(struct reg_default), regcache_default_cmp);
495 
496 	if (r)
497 		return r - map->reg_defaults;
498 	else
499 		return -ENOENT;
500 }
501