xref: /openbmc/linux/drivers/base/regmap/regcache.c (revision ee8a99bd)
1 /*
2  * Register cache access API
3  *
4  * Copyright 2011 Wolfson Microelectronics plc
5  *
6  * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/device.h>
16 #include <trace/events/regmap.h>
17 #include <linux/bsearch.h>
18 #include <linux/sort.h>
19 
20 #include "internal.h"
21 
22 static const struct regcache_ops *cache_types[] = {
23 	&regcache_rbtree_ops,
24 	&regcache_lzo_ops,
25 	&regcache_flat_ops,
26 };
27 
28 static int regcache_hw_init(struct regmap *map)
29 {
30 	int i, j;
31 	int ret;
32 	int count;
33 	unsigned int val;
34 	void *tmp_buf;
35 
36 	if (!map->num_reg_defaults_raw)
37 		return -EINVAL;
38 
39 	if (!map->reg_defaults_raw) {
40 		u32 cache_bypass = map->cache_bypass;
41 		dev_warn(map->dev, "No cache defaults, reading back from HW\n");
42 
43 		/* Bypass the cache access till data read from HW*/
44 		map->cache_bypass = 1;
45 		tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
46 		if (!tmp_buf)
47 			return -EINVAL;
48 		ret = regmap_raw_read(map, 0, tmp_buf,
49 				      map->num_reg_defaults_raw);
50 		map->cache_bypass = cache_bypass;
51 		if (ret < 0) {
52 			kfree(tmp_buf);
53 			return ret;
54 		}
55 		map->reg_defaults_raw = tmp_buf;
56 		map->cache_free = 1;
57 	}
58 
59 	/* calculate the size of reg_defaults */
60 	for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) {
61 		val = regcache_get_val(map, map->reg_defaults_raw, i);
62 		if (regmap_volatile(map, i * map->reg_stride))
63 			continue;
64 		count++;
65 	}
66 
67 	map->reg_defaults = kmalloc(count * sizeof(struct reg_default),
68 				      GFP_KERNEL);
69 	if (!map->reg_defaults) {
70 		ret = -ENOMEM;
71 		goto err_free;
72 	}
73 
74 	/* fill the reg_defaults */
75 	map->num_reg_defaults = count;
76 	for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
77 		val = regcache_get_val(map, map->reg_defaults_raw, i);
78 		if (regmap_volatile(map, i * map->reg_stride))
79 			continue;
80 		map->reg_defaults[j].reg = i * map->reg_stride;
81 		map->reg_defaults[j].def = val;
82 		j++;
83 	}
84 
85 	return 0;
86 
87 err_free:
88 	if (map->cache_free)
89 		kfree(map->reg_defaults_raw);
90 
91 	return ret;
92 }
93 
94 int regcache_init(struct regmap *map, const struct regmap_config *config)
95 {
96 	int ret;
97 	int i;
98 	void *tmp_buf;
99 
100 	for (i = 0; i < config->num_reg_defaults; i++)
101 		if (config->reg_defaults[i].reg % map->reg_stride)
102 			return -EINVAL;
103 
104 	if (map->cache_type == REGCACHE_NONE) {
105 		map->cache_bypass = true;
106 		return 0;
107 	}
108 
109 	for (i = 0; i < ARRAY_SIZE(cache_types); i++)
110 		if (cache_types[i]->type == map->cache_type)
111 			break;
112 
113 	if (i == ARRAY_SIZE(cache_types)) {
114 		dev_err(map->dev, "Could not match compress type: %d\n",
115 			map->cache_type);
116 		return -EINVAL;
117 	}
118 
119 	map->num_reg_defaults = config->num_reg_defaults;
120 	map->num_reg_defaults_raw = config->num_reg_defaults_raw;
121 	map->reg_defaults_raw = config->reg_defaults_raw;
122 	map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8);
123 	map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
124 	map->cache_present = NULL;
125 	map->cache_present_nbits = 0;
126 
127 	map->cache = NULL;
128 	map->cache_ops = cache_types[i];
129 
130 	if (!map->cache_ops->read ||
131 	    !map->cache_ops->write ||
132 	    !map->cache_ops->name)
133 		return -EINVAL;
134 
135 	/* We still need to ensure that the reg_defaults
136 	 * won't vanish from under us.  We'll need to make
137 	 * a copy of it.
138 	 */
139 	if (config->reg_defaults) {
140 		if (!map->num_reg_defaults)
141 			return -EINVAL;
142 		tmp_buf = kmemdup(config->reg_defaults, map->num_reg_defaults *
143 				  sizeof(struct reg_default), GFP_KERNEL);
144 		if (!tmp_buf)
145 			return -ENOMEM;
146 		map->reg_defaults = tmp_buf;
147 	} else if (map->num_reg_defaults_raw) {
148 		/* Some devices such as PMICs don't have cache defaults,
149 		 * we cope with this by reading back the HW registers and
150 		 * crafting the cache defaults by hand.
151 		 */
152 		ret = regcache_hw_init(map);
153 		if (ret < 0)
154 			return ret;
155 	}
156 
157 	if (!map->max_register)
158 		map->max_register = map->num_reg_defaults_raw;
159 
160 	if (map->cache_ops->init) {
161 		dev_dbg(map->dev, "Initializing %s cache\n",
162 			map->cache_ops->name);
163 		ret = map->cache_ops->init(map);
164 		if (ret)
165 			goto err_free;
166 	}
167 	return 0;
168 
169 err_free:
170 	kfree(map->reg_defaults);
171 	if (map->cache_free)
172 		kfree(map->reg_defaults_raw);
173 
174 	return ret;
175 }
176 
177 void regcache_exit(struct regmap *map)
178 {
179 	if (map->cache_type == REGCACHE_NONE)
180 		return;
181 
182 	BUG_ON(!map->cache_ops);
183 
184 	kfree(map->cache_present);
185 	kfree(map->reg_defaults);
186 	if (map->cache_free)
187 		kfree(map->reg_defaults_raw);
188 
189 	if (map->cache_ops->exit) {
190 		dev_dbg(map->dev, "Destroying %s cache\n",
191 			map->cache_ops->name);
192 		map->cache_ops->exit(map);
193 	}
194 }
195 
196 /**
197  * regcache_read: Fetch the value of a given register from the cache.
198  *
199  * @map: map to configure.
200  * @reg: The register index.
201  * @value: The value to be returned.
202  *
203  * Return a negative value on failure, 0 on success.
204  */
205 int regcache_read(struct regmap *map,
206 		  unsigned int reg, unsigned int *value)
207 {
208 	int ret;
209 
210 	if (map->cache_type == REGCACHE_NONE)
211 		return -ENOSYS;
212 
213 	BUG_ON(!map->cache_ops);
214 
215 	if (!regmap_volatile(map, reg)) {
216 		ret = map->cache_ops->read(map, reg, value);
217 
218 		if (ret == 0)
219 			trace_regmap_reg_read_cache(map->dev, reg, *value);
220 
221 		return ret;
222 	}
223 
224 	return -EINVAL;
225 }
226 
227 /**
228  * regcache_write: Set the value of a given register in the cache.
229  *
230  * @map: map to configure.
231  * @reg: The register index.
232  * @value: The new register value.
233  *
234  * Return a negative value on failure, 0 on success.
235  */
236 int regcache_write(struct regmap *map,
237 		   unsigned int reg, unsigned int value)
238 {
239 	if (map->cache_type == REGCACHE_NONE)
240 		return 0;
241 
242 	BUG_ON(!map->cache_ops);
243 
244 	if (!regmap_writeable(map, reg))
245 		return -EIO;
246 
247 	if (!regmap_volatile(map, reg))
248 		return map->cache_ops->write(map, reg, value);
249 
250 	return 0;
251 }
252 
253 static int regcache_default_sync(struct regmap *map, unsigned int min,
254 				 unsigned int max)
255 {
256 	unsigned int reg;
257 
258 	for (reg = min; reg <= max; reg++) {
259 		unsigned int val;
260 		int ret;
261 
262 		if (regmap_volatile(map, reg))
263 			continue;
264 
265 		ret = regcache_read(map, reg, &val);
266 		if (ret)
267 			return ret;
268 
269 		/* Is this the hardware default?  If so skip. */
270 		ret = regcache_lookup_reg(map, reg);
271 		if (ret >= 0 && val == map->reg_defaults[ret].def)
272 			continue;
273 
274 		map->cache_bypass = 1;
275 		ret = _regmap_write(map, reg, val);
276 		map->cache_bypass = 0;
277 		if (ret)
278 			return ret;
279 		dev_dbg(map->dev, "Synced register %#x, value %#x\n", reg, val);
280 	}
281 
282 	return 0;
283 }
284 
285 /**
286  * regcache_sync: Sync the register cache with the hardware.
287  *
288  * @map: map to configure.
289  *
290  * Any registers that should not be synced should be marked as
291  * volatile.  In general drivers can choose not to use the provided
292  * syncing functionality if they so require.
293  *
294  * Return a negative value on failure, 0 on success.
295  */
296 int regcache_sync(struct regmap *map)
297 {
298 	int ret = 0;
299 	unsigned int i;
300 	const char *name;
301 	unsigned int bypass;
302 
303 	BUG_ON(!map->cache_ops);
304 
305 	map->lock(map->lock_arg);
306 	/* Remember the initial bypass state */
307 	bypass = map->cache_bypass;
308 	dev_dbg(map->dev, "Syncing %s cache\n",
309 		map->cache_ops->name);
310 	name = map->cache_ops->name;
311 	trace_regcache_sync(map->dev, name, "start");
312 
313 	if (!map->cache_dirty)
314 		goto out;
315 
316 	/* Apply any patch first */
317 	map->cache_bypass = 1;
318 	for (i = 0; i < map->patch_regs; i++) {
319 		if (map->patch[i].reg % map->reg_stride) {
320 			ret = -EINVAL;
321 			goto out;
322 		}
323 		ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def);
324 		if (ret != 0) {
325 			dev_err(map->dev, "Failed to write %x = %x: %d\n",
326 				map->patch[i].reg, map->patch[i].def, ret);
327 			goto out;
328 		}
329 	}
330 	map->cache_bypass = 0;
331 
332 	if (map->cache_ops->sync)
333 		ret = map->cache_ops->sync(map, 0, map->max_register);
334 	else
335 		ret = regcache_default_sync(map, 0, map->max_register);
336 
337 	if (ret == 0)
338 		map->cache_dirty = false;
339 
340 out:
341 	trace_regcache_sync(map->dev, name, "stop");
342 	/* Restore the bypass state */
343 	map->cache_bypass = bypass;
344 	map->unlock(map->lock_arg);
345 
346 	return ret;
347 }
348 EXPORT_SYMBOL_GPL(regcache_sync);
349 
350 /**
351  * regcache_sync_region: Sync part  of the register cache with the hardware.
352  *
353  * @map: map to sync.
354  * @min: first register to sync
355  * @max: last register to sync
356  *
357  * Write all non-default register values in the specified region to
358  * the hardware.
359  *
360  * Return a negative value on failure, 0 on success.
361  */
362 int regcache_sync_region(struct regmap *map, unsigned int min,
363 			 unsigned int max)
364 {
365 	int ret = 0;
366 	const char *name;
367 	unsigned int bypass;
368 
369 	BUG_ON(!map->cache_ops);
370 
371 	map->lock(map->lock_arg);
372 
373 	/* Remember the initial bypass state */
374 	bypass = map->cache_bypass;
375 
376 	name = map->cache_ops->name;
377 	dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max);
378 
379 	trace_regcache_sync(map->dev, name, "start region");
380 
381 	if (!map->cache_dirty)
382 		goto out;
383 
384 	if (map->cache_ops->sync)
385 		ret = map->cache_ops->sync(map, min, max);
386 	else
387 		ret = regcache_default_sync(map, min, max);
388 
389 out:
390 	trace_regcache_sync(map->dev, name, "stop region");
391 	/* Restore the bypass state */
392 	map->cache_bypass = bypass;
393 	map->unlock(map->lock_arg);
394 
395 	return ret;
396 }
397 EXPORT_SYMBOL_GPL(regcache_sync_region);
398 
399 /**
400  * regcache_drop_region: Discard part of the register cache
401  *
402  * @map: map to operate on
403  * @min: first register to discard
404  * @max: last register to discard
405  *
406  * Discard part of the register cache.
407  *
408  * Return a negative value on failure, 0 on success.
409  */
410 int regcache_drop_region(struct regmap *map, unsigned int min,
411 			 unsigned int max)
412 {
413 	unsigned int reg;
414 	int ret = 0;
415 
416 	if (!map->cache_present && !(map->cache_ops && map->cache_ops->drop))
417 		return -EINVAL;
418 
419 	map->lock(map->lock_arg);
420 
421 	trace_regcache_drop_region(map->dev, min, max);
422 
423 	if (map->cache_present)
424 		for (reg = min; reg < max + 1; reg++)
425 			clear_bit(reg, map->cache_present);
426 
427 	if (map->cache_ops && map->cache_ops->drop)
428 		ret = map->cache_ops->drop(map, min, max);
429 
430 	map->unlock(map->lock_arg);
431 
432 	return ret;
433 }
434 EXPORT_SYMBOL_GPL(regcache_drop_region);
435 
436 /**
437  * regcache_cache_only: Put a register map into cache only mode
438  *
439  * @map: map to configure
440  * @cache_only: flag if changes should be written to the hardware
441  *
442  * When a register map is marked as cache only writes to the register
443  * map API will only update the register cache, they will not cause
444  * any hardware changes.  This is useful for allowing portions of
445  * drivers to act as though the device were functioning as normal when
446  * it is disabled for power saving reasons.
447  */
448 void regcache_cache_only(struct regmap *map, bool enable)
449 {
450 	map->lock(map->lock_arg);
451 	WARN_ON(map->cache_bypass && enable);
452 	map->cache_only = enable;
453 	trace_regmap_cache_only(map->dev, enable);
454 	map->unlock(map->lock_arg);
455 }
456 EXPORT_SYMBOL_GPL(regcache_cache_only);
457 
458 /**
459  * regcache_mark_dirty: Mark the register cache as dirty
460  *
461  * @map: map to mark
462  *
463  * Mark the register cache as dirty, for example due to the device
464  * having been powered down for suspend.  If the cache is not marked
465  * as dirty then the cache sync will be suppressed.
466  */
467 void regcache_mark_dirty(struct regmap *map)
468 {
469 	map->lock(map->lock_arg);
470 	map->cache_dirty = true;
471 	map->unlock(map->lock_arg);
472 }
473 EXPORT_SYMBOL_GPL(regcache_mark_dirty);
474 
475 /**
476  * regcache_cache_bypass: Put a register map into cache bypass mode
477  *
478  * @map: map to configure
479  * @cache_bypass: flag if changes should not be written to the hardware
480  *
481  * When a register map is marked with the cache bypass option, writes
482  * to the register map API will only update the hardware and not the
483  * the cache directly.  This is useful when syncing the cache back to
484  * the hardware.
485  */
486 void regcache_cache_bypass(struct regmap *map, bool enable)
487 {
488 	map->lock(map->lock_arg);
489 	WARN_ON(map->cache_only && enable);
490 	map->cache_bypass = enable;
491 	trace_regmap_cache_bypass(map->dev, enable);
492 	map->unlock(map->lock_arg);
493 }
494 EXPORT_SYMBOL_GPL(regcache_cache_bypass);
495 
496 int regcache_set_reg_present(struct regmap *map, unsigned int reg)
497 {
498 	unsigned long *cache_present;
499 	unsigned int cache_present_size;
500 	unsigned int nregs;
501 	int i;
502 
503 	nregs = reg + 1;
504 	cache_present_size = BITS_TO_LONGS(nregs);
505 	cache_present_size *= sizeof(long);
506 
507 	if (!map->cache_present) {
508 		cache_present = kmalloc(cache_present_size, GFP_KERNEL);
509 		if (!cache_present)
510 			return -ENOMEM;
511 		bitmap_zero(cache_present, nregs);
512 		map->cache_present = cache_present;
513 		map->cache_present_nbits = nregs;
514 	}
515 
516 	if (nregs > map->cache_present_nbits) {
517 		cache_present = krealloc(map->cache_present,
518 					 cache_present_size, GFP_KERNEL);
519 		if (!cache_present)
520 			return -ENOMEM;
521 		for (i = 0; i < nregs; i++)
522 			if (i >= map->cache_present_nbits)
523 				clear_bit(i, cache_present);
524 		map->cache_present = cache_present;
525 		map->cache_present_nbits = nregs;
526 	}
527 
528 	set_bit(reg, map->cache_present);
529 	return 0;
530 }
531 
532 bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
533 		      unsigned int val)
534 {
535 	if (regcache_get_val(map, base, idx) == val)
536 		return true;
537 
538 	/* Use device native format if possible */
539 	if (map->format.format_val) {
540 		map->format.format_val(base + (map->cache_word_size * idx),
541 				       val, 0);
542 		return false;
543 	}
544 
545 	switch (map->cache_word_size) {
546 	case 1: {
547 		u8 *cache = base;
548 		cache[idx] = val;
549 		break;
550 	}
551 	case 2: {
552 		u16 *cache = base;
553 		cache[idx] = val;
554 		break;
555 	}
556 	case 4: {
557 		u32 *cache = base;
558 		cache[idx] = val;
559 		break;
560 	}
561 	default:
562 		BUG();
563 	}
564 	return false;
565 }
566 
567 unsigned int regcache_get_val(struct regmap *map, const void *base,
568 			      unsigned int idx)
569 {
570 	if (!base)
571 		return -EINVAL;
572 
573 	/* Use device native format if possible */
574 	if (map->format.parse_val)
575 		return map->format.parse_val(regcache_get_val_addr(map, base,
576 								   idx));
577 
578 	switch (map->cache_word_size) {
579 	case 1: {
580 		const u8 *cache = base;
581 		return cache[idx];
582 	}
583 	case 2: {
584 		const u16 *cache = base;
585 		return cache[idx];
586 	}
587 	case 4: {
588 		const u32 *cache = base;
589 		return cache[idx];
590 	}
591 	default:
592 		BUG();
593 	}
594 	/* unreachable */
595 	return -1;
596 }
597 
598 static int regcache_default_cmp(const void *a, const void *b)
599 {
600 	const struct reg_default *_a = a;
601 	const struct reg_default *_b = b;
602 
603 	return _a->reg - _b->reg;
604 }
605 
606 int regcache_lookup_reg(struct regmap *map, unsigned int reg)
607 {
608 	struct reg_default key;
609 	struct reg_default *r;
610 
611 	key.reg = reg;
612 	key.def = 0;
613 
614 	r = bsearch(&key, map->reg_defaults, map->num_reg_defaults,
615 		    sizeof(struct reg_default), regcache_default_cmp);
616 
617 	if (r)
618 		return r - map->reg_defaults;
619 	else
620 		return -ENOENT;
621 }
622 
623 static int regcache_sync_block_single(struct regmap *map, void *block,
624 				      unsigned int block_base,
625 				      unsigned int start, unsigned int end)
626 {
627 	unsigned int i, regtmp, val;
628 	int ret;
629 
630 	for (i = start; i < end; i++) {
631 		regtmp = block_base + (i * map->reg_stride);
632 
633 		if (!regcache_reg_present(map, regtmp))
634 			continue;
635 
636 		val = regcache_get_val(map, block, i);
637 
638 		/* Is this the hardware default?  If so skip. */
639 		ret = regcache_lookup_reg(map, regtmp);
640 		if (ret >= 0 && val == map->reg_defaults[ret].def)
641 			continue;
642 
643 		map->cache_bypass = 1;
644 
645 		ret = _regmap_write(map, regtmp, val);
646 
647 		map->cache_bypass = 0;
648 		if (ret != 0)
649 			return ret;
650 		dev_dbg(map->dev, "Synced register %#x, value %#x\n",
651 			regtmp, val);
652 	}
653 
654 	return 0;
655 }
656 
657 static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
658 					 unsigned int base, unsigned int cur)
659 {
660 	size_t val_bytes = map->format.val_bytes;
661 	int ret, count;
662 
663 	if (*data == NULL)
664 		return 0;
665 
666 	count = cur - base;
667 
668 	dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n",
669 		count * val_bytes, count, base, cur - 1);
670 
671 	map->cache_bypass = 1;
672 
673 	ret = _regmap_raw_write(map, base, *data, count * val_bytes,
674 				false);
675 
676 	map->cache_bypass = 0;
677 
678 	*data = NULL;
679 
680 	return ret;
681 }
682 
683 static int regcache_sync_block_raw(struct regmap *map, void *block,
684 			    unsigned int block_base, unsigned int start,
685 			    unsigned int end)
686 {
687 	unsigned int i, val;
688 	unsigned int regtmp = 0;
689 	unsigned int base = 0;
690 	const void *data = NULL;
691 	int ret;
692 
693 	for (i = start; i < end; i++) {
694 		regtmp = block_base + (i * map->reg_stride);
695 
696 		if (!regcache_reg_present(map, regtmp)) {
697 			ret = regcache_sync_block_raw_flush(map, &data,
698 							    base, regtmp);
699 			if (ret != 0)
700 				return ret;
701 			continue;
702 		}
703 
704 		val = regcache_get_val(map, block, i);
705 
706 		/* Is this the hardware default?  If so skip. */
707 		ret = regcache_lookup_reg(map, regtmp);
708 		if (ret >= 0 && val == map->reg_defaults[ret].def) {
709 			ret = regcache_sync_block_raw_flush(map, &data,
710 							    base, regtmp);
711 			if (ret != 0)
712 				return ret;
713 			continue;
714 		}
715 
716 		if (!data) {
717 			data = regcache_get_val_addr(map, block, i);
718 			base = regtmp;
719 		}
720 	}
721 
722 	return regcache_sync_block_raw_flush(map, &data, base, regtmp +
723 			map->reg_stride);
724 }
725 
726 int regcache_sync_block(struct regmap *map, void *block,
727 			unsigned int block_base, unsigned int start,
728 			unsigned int end)
729 {
730 	if (regmap_can_raw_write(map))
731 		return regcache_sync_block_raw(map, block, block_base,
732 					       start, end);
733 	else
734 		return regcache_sync_block_single(map, block, block_base,
735 						  start, end);
736 }
737