xref: /openbmc/linux/drivers/base/regmap/regmap.c (revision 05bcf503)
1 /*
2  * Register map access API
3  *
4  * Copyright 2011 Wolfson Microelectronics plc
5  *
6  * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 
13 #include <linux/device.h>
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/mutex.h>
17 #include <linux/err.h>
18 #include <linux/rbtree.h>
19 
20 #define CREATE_TRACE_POINTS
21 #include <trace/events/regmap.h>
22 
23 #include "internal.h"
24 
25 /*
26  * Sometimes for failures during very early init the trace
27  * infrastructure isn't available early enough to be used.  For this
28  * sort of problem defining LOG_DEVICE will add printks for basic
29  * register I/O on a specific device.
30  */
31 #undef LOG_DEVICE
32 
33 static int _regmap_update_bits(struct regmap *map, unsigned int reg,
34 			       unsigned int mask, unsigned int val,
35 			       bool *change);
36 
37 bool regmap_writeable(struct regmap *map, unsigned int reg)
38 {
39 	if (map->max_register && reg > map->max_register)
40 		return false;
41 
42 	if (map->writeable_reg)
43 		return map->writeable_reg(map->dev, reg);
44 
45 	return true;
46 }
47 
48 bool regmap_readable(struct regmap *map, unsigned int reg)
49 {
50 	if (map->max_register && reg > map->max_register)
51 		return false;
52 
53 	if (map->format.format_write)
54 		return false;
55 
56 	if (map->readable_reg)
57 		return map->readable_reg(map->dev, reg);
58 
59 	return true;
60 }
61 
62 bool regmap_volatile(struct regmap *map, unsigned int reg)
63 {
64 	if (!regmap_readable(map, reg))
65 		return false;
66 
67 	if (map->volatile_reg)
68 		return map->volatile_reg(map->dev, reg);
69 
70 	return true;
71 }
72 
73 bool regmap_precious(struct regmap *map, unsigned int reg)
74 {
75 	if (!regmap_readable(map, reg))
76 		return false;
77 
78 	if (map->precious_reg)
79 		return map->precious_reg(map->dev, reg);
80 
81 	return false;
82 }
83 
84 static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
85 	unsigned int num)
86 {
87 	unsigned int i;
88 
89 	for (i = 0; i < num; i++)
90 		if (!regmap_volatile(map, reg + i))
91 			return false;
92 
93 	return true;
94 }
95 
96 static void regmap_format_2_6_write(struct regmap *map,
97 				     unsigned int reg, unsigned int val)
98 {
99 	u8 *out = map->work_buf;
100 
101 	*out = (reg << 6) | val;
102 }
103 
104 static void regmap_format_4_12_write(struct regmap *map,
105 				     unsigned int reg, unsigned int val)
106 {
107 	__be16 *out = map->work_buf;
108 	*out = cpu_to_be16((reg << 12) | val);
109 }
110 
111 static void regmap_format_7_9_write(struct regmap *map,
112 				    unsigned int reg, unsigned int val)
113 {
114 	__be16 *out = map->work_buf;
115 	*out = cpu_to_be16((reg << 9) | val);
116 }
117 
118 static void regmap_format_10_14_write(struct regmap *map,
119 				    unsigned int reg, unsigned int val)
120 {
121 	u8 *out = map->work_buf;
122 
123 	out[2] = val;
124 	out[1] = (val >> 8) | (reg << 6);
125 	out[0] = reg >> 2;
126 }
127 
128 static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
129 {
130 	u8 *b = buf;
131 
132 	b[0] = val << shift;
133 }
134 
135 static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
136 {
137 	__be16 *b = buf;
138 
139 	b[0] = cpu_to_be16(val << shift);
140 }
141 
142 static void regmap_format_16_native(void *buf, unsigned int val,
143 				    unsigned int shift)
144 {
145 	*(u16 *)buf = val << shift;
146 }
147 
148 static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
149 {
150 	u8 *b = buf;
151 
152 	val <<= shift;
153 
154 	b[0] = val >> 16;
155 	b[1] = val >> 8;
156 	b[2] = val;
157 }
158 
159 static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
160 {
161 	__be32 *b = buf;
162 
163 	b[0] = cpu_to_be32(val << shift);
164 }
165 
166 static void regmap_format_32_native(void *buf, unsigned int val,
167 				    unsigned int shift)
168 {
169 	*(u32 *)buf = val << shift;
170 }
171 
172 static unsigned int regmap_parse_8(void *buf)
173 {
174 	u8 *b = buf;
175 
176 	return b[0];
177 }
178 
179 static unsigned int regmap_parse_16_be(void *buf)
180 {
181 	__be16 *b = buf;
182 
183 	b[0] = be16_to_cpu(b[0]);
184 
185 	return b[0];
186 }
187 
188 static unsigned int regmap_parse_16_native(void *buf)
189 {
190 	return *(u16 *)buf;
191 }
192 
193 static unsigned int regmap_parse_24(void *buf)
194 {
195 	u8 *b = buf;
196 	unsigned int ret = b[2];
197 	ret |= ((unsigned int)b[1]) << 8;
198 	ret |= ((unsigned int)b[0]) << 16;
199 
200 	return ret;
201 }
202 
203 static unsigned int regmap_parse_32_be(void *buf)
204 {
205 	__be32 *b = buf;
206 
207 	b[0] = be32_to_cpu(b[0]);
208 
209 	return b[0];
210 }
211 
212 static unsigned int regmap_parse_32_native(void *buf)
213 {
214 	return *(u32 *)buf;
215 }
216 
217 static void regmap_lock_mutex(struct regmap *map)
218 {
219 	mutex_lock(&map->mutex);
220 }
221 
222 static void regmap_unlock_mutex(struct regmap *map)
223 {
224 	mutex_unlock(&map->mutex);
225 }
226 
227 static void regmap_lock_spinlock(struct regmap *map)
228 {
229 	spin_lock(&map->spinlock);
230 }
231 
232 static void regmap_unlock_spinlock(struct regmap *map)
233 {
234 	spin_unlock(&map->spinlock);
235 }
236 
237 static void dev_get_regmap_release(struct device *dev, void *res)
238 {
239 	/*
240 	 * We don't actually have anything to do here; the goal here
241 	 * is not to manage the regmap but to provide a simple way to
242 	 * get the regmap back given a struct device.
243 	 */
244 }
245 
246 static bool _regmap_range_add(struct regmap *map,
247 			      struct regmap_range_node *data)
248 {
249 	struct rb_root *root = &map->range_tree;
250 	struct rb_node **new = &(root->rb_node), *parent = NULL;
251 
252 	while (*new) {
253 		struct regmap_range_node *this =
254 			container_of(*new, struct regmap_range_node, node);
255 
256 		parent = *new;
257 		if (data->range_max < this->range_min)
258 			new = &((*new)->rb_left);
259 		else if (data->range_min > this->range_max)
260 			new = &((*new)->rb_right);
261 		else
262 			return false;
263 	}
264 
265 	rb_link_node(&data->node, parent, new);
266 	rb_insert_color(&data->node, root);
267 
268 	return true;
269 }
270 
271 static struct regmap_range_node *_regmap_range_lookup(struct regmap *map,
272 						      unsigned int reg)
273 {
274 	struct rb_node *node = map->range_tree.rb_node;
275 
276 	while (node) {
277 		struct regmap_range_node *this =
278 			container_of(node, struct regmap_range_node, node);
279 
280 		if (reg < this->range_min)
281 			node = node->rb_left;
282 		else if (reg > this->range_max)
283 			node = node->rb_right;
284 		else
285 			return this;
286 	}
287 
288 	return NULL;
289 }
290 
291 static void regmap_range_exit(struct regmap *map)
292 {
293 	struct rb_node *next;
294 	struct regmap_range_node *range_node;
295 
296 	next = rb_first(&map->range_tree);
297 	while (next) {
298 		range_node = rb_entry(next, struct regmap_range_node, node);
299 		next = rb_next(&range_node->node);
300 		rb_erase(&range_node->node, &map->range_tree);
301 		kfree(range_node);
302 	}
303 
304 	kfree(map->selector_work_buf);
305 }
306 
307 /**
308  * regmap_init(): Initialise register map
309  *
310  * @dev: Device that will be interacted with
311  * @bus: Bus-specific callbacks to use with device
312  * @bus_context: Data passed to bus-specific callbacks
313  * @config: Configuration for register map
314  *
315  * The return value will be an ERR_PTR() on error or a valid pointer to
316  * a struct regmap.  This function should generally not be called
317  * directly, it should be called by bus-specific init functions.
318  */
319 struct regmap *regmap_init(struct device *dev,
320 			   const struct regmap_bus *bus,
321 			   void *bus_context,
322 			   const struct regmap_config *config)
323 {
324 	struct regmap *map, **m;
325 	int ret = -EINVAL;
326 	enum regmap_endian reg_endian, val_endian;
327 	int i, j;
328 
329 	if (!bus || !config)
330 		goto err;
331 
332 	map = kzalloc(sizeof(*map), GFP_KERNEL);
333 	if (map == NULL) {
334 		ret = -ENOMEM;
335 		goto err;
336 	}
337 
338 	if (bus->fast_io) {
339 		spin_lock_init(&map->spinlock);
340 		map->lock = regmap_lock_spinlock;
341 		map->unlock = regmap_unlock_spinlock;
342 	} else {
343 		mutex_init(&map->mutex);
344 		map->lock = regmap_lock_mutex;
345 		map->unlock = regmap_unlock_mutex;
346 	}
347 	map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
348 	map->format.pad_bytes = config->pad_bits / 8;
349 	map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
350 	map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
351 			config->val_bits + config->pad_bits, 8);
352 	map->reg_shift = config->pad_bits % 8;
353 	if (config->reg_stride)
354 		map->reg_stride = config->reg_stride;
355 	else
356 		map->reg_stride = 1;
357 	map->use_single_rw = config->use_single_rw;
358 	map->dev = dev;
359 	map->bus = bus;
360 	map->bus_context = bus_context;
361 	map->max_register = config->max_register;
362 	map->writeable_reg = config->writeable_reg;
363 	map->readable_reg = config->readable_reg;
364 	map->volatile_reg = config->volatile_reg;
365 	map->precious_reg = config->precious_reg;
366 	map->cache_type = config->cache_type;
367 	map->name = config->name;
368 
369 	if (config->read_flag_mask || config->write_flag_mask) {
370 		map->read_flag_mask = config->read_flag_mask;
371 		map->write_flag_mask = config->write_flag_mask;
372 	} else {
373 		map->read_flag_mask = bus->read_flag_mask;
374 	}
375 
376 	reg_endian = config->reg_format_endian;
377 	if (reg_endian == REGMAP_ENDIAN_DEFAULT)
378 		reg_endian = bus->reg_format_endian_default;
379 	if (reg_endian == REGMAP_ENDIAN_DEFAULT)
380 		reg_endian = REGMAP_ENDIAN_BIG;
381 
382 	val_endian = config->val_format_endian;
383 	if (val_endian == REGMAP_ENDIAN_DEFAULT)
384 		val_endian = bus->val_format_endian_default;
385 	if (val_endian == REGMAP_ENDIAN_DEFAULT)
386 		val_endian = REGMAP_ENDIAN_BIG;
387 
388 	switch (config->reg_bits + map->reg_shift) {
389 	case 2:
390 		switch (config->val_bits) {
391 		case 6:
392 			map->format.format_write = regmap_format_2_6_write;
393 			break;
394 		default:
395 			goto err_map;
396 		}
397 		break;
398 
399 	case 4:
400 		switch (config->val_bits) {
401 		case 12:
402 			map->format.format_write = regmap_format_4_12_write;
403 			break;
404 		default:
405 			goto err_map;
406 		}
407 		break;
408 
409 	case 7:
410 		switch (config->val_bits) {
411 		case 9:
412 			map->format.format_write = regmap_format_7_9_write;
413 			break;
414 		default:
415 			goto err_map;
416 		}
417 		break;
418 
419 	case 10:
420 		switch (config->val_bits) {
421 		case 14:
422 			map->format.format_write = regmap_format_10_14_write;
423 			break;
424 		default:
425 			goto err_map;
426 		}
427 		break;
428 
429 	case 8:
430 		map->format.format_reg = regmap_format_8;
431 		break;
432 
433 	case 16:
434 		switch (reg_endian) {
435 		case REGMAP_ENDIAN_BIG:
436 			map->format.format_reg = regmap_format_16_be;
437 			break;
438 		case REGMAP_ENDIAN_NATIVE:
439 			map->format.format_reg = regmap_format_16_native;
440 			break;
441 		default:
442 			goto err_map;
443 		}
444 		break;
445 
446 	case 32:
447 		switch (reg_endian) {
448 		case REGMAP_ENDIAN_BIG:
449 			map->format.format_reg = regmap_format_32_be;
450 			break;
451 		case REGMAP_ENDIAN_NATIVE:
452 			map->format.format_reg = regmap_format_32_native;
453 			break;
454 		default:
455 			goto err_map;
456 		}
457 		break;
458 
459 	default:
460 		goto err_map;
461 	}
462 
463 	switch (config->val_bits) {
464 	case 8:
465 		map->format.format_val = regmap_format_8;
466 		map->format.parse_val = regmap_parse_8;
467 		break;
468 	case 16:
469 		switch (val_endian) {
470 		case REGMAP_ENDIAN_BIG:
471 			map->format.format_val = regmap_format_16_be;
472 			map->format.parse_val = regmap_parse_16_be;
473 			break;
474 		case REGMAP_ENDIAN_NATIVE:
475 			map->format.format_val = regmap_format_16_native;
476 			map->format.parse_val = regmap_parse_16_native;
477 			break;
478 		default:
479 			goto err_map;
480 		}
481 		break;
482 	case 24:
483 		if (val_endian != REGMAP_ENDIAN_BIG)
484 			goto err_map;
485 		map->format.format_val = regmap_format_24;
486 		map->format.parse_val = regmap_parse_24;
487 		break;
488 	case 32:
489 		switch (val_endian) {
490 		case REGMAP_ENDIAN_BIG:
491 			map->format.format_val = regmap_format_32_be;
492 			map->format.parse_val = regmap_parse_32_be;
493 			break;
494 		case REGMAP_ENDIAN_NATIVE:
495 			map->format.format_val = regmap_format_32_native;
496 			map->format.parse_val = regmap_parse_32_native;
497 			break;
498 		default:
499 			goto err_map;
500 		}
501 		break;
502 	}
503 
504 	if (map->format.format_write) {
505 		if ((reg_endian != REGMAP_ENDIAN_BIG) ||
506 		    (val_endian != REGMAP_ENDIAN_BIG))
507 			goto err_map;
508 		map->use_single_rw = true;
509 	}
510 
511 	if (!map->format.format_write &&
512 	    !(map->format.format_reg && map->format.format_val))
513 		goto err_map;
514 
515 	map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL);
516 	if (map->work_buf == NULL) {
517 		ret = -ENOMEM;
518 		goto err_map;
519 	}
520 
521 	map->range_tree = RB_ROOT;
522 	for (i = 0; i < config->num_ranges; i++) {
523 		const struct regmap_range_cfg *range_cfg = &config->ranges[i];
524 		struct regmap_range_node *new;
525 
526 		/* Sanity check */
527 		if (range_cfg->range_max < range_cfg->range_min) {
528 			dev_err(map->dev, "Invalid range %d: %d < %d\n", i,
529 				range_cfg->range_max, range_cfg->range_min);
530 			goto err_range;
531 		}
532 
533 		if (range_cfg->range_max > map->max_register) {
534 			dev_err(map->dev, "Invalid range %d: %d > %d\n", i,
535 				range_cfg->range_max, map->max_register);
536 			goto err_range;
537 		}
538 
539 		if (range_cfg->selector_reg > map->max_register) {
540 			dev_err(map->dev,
541 				"Invalid range %d: selector out of map\n", i);
542 			goto err_range;
543 		}
544 
545 		if (range_cfg->window_len == 0) {
546 			dev_err(map->dev, "Invalid range %d: window_len 0\n",
547 				i);
548 			goto err_range;
549 		}
550 
551 		/* Make sure, that this register range has no selector
552 		   or data window within its boundary */
553 		for (j = 0; j < config->num_ranges; j++) {
554 			unsigned sel_reg = config->ranges[j].selector_reg;
555 			unsigned win_min = config->ranges[j].window_start;
556 			unsigned win_max = win_min +
557 					   config->ranges[j].window_len - 1;
558 
559 			if (range_cfg->range_min <= sel_reg &&
560 			    sel_reg <= range_cfg->range_max) {
561 				dev_err(map->dev,
562 					"Range %d: selector for %d in window\n",
563 					i, j);
564 				goto err_range;
565 			}
566 
567 			if (!(win_max < range_cfg->range_min ||
568 			      win_min > range_cfg->range_max)) {
569 				dev_err(map->dev,
570 					"Range %d: window for %d in window\n",
571 					i, j);
572 				goto err_range;
573 			}
574 		}
575 
576 		new = kzalloc(sizeof(*new), GFP_KERNEL);
577 		if (new == NULL) {
578 			ret = -ENOMEM;
579 			goto err_range;
580 		}
581 
582 		new->map = map;
583 		new->name = range_cfg->name;
584 		new->range_min = range_cfg->range_min;
585 		new->range_max = range_cfg->range_max;
586 		new->selector_reg = range_cfg->selector_reg;
587 		new->selector_mask = range_cfg->selector_mask;
588 		new->selector_shift = range_cfg->selector_shift;
589 		new->window_start = range_cfg->window_start;
590 		new->window_len = range_cfg->window_len;
591 
592 		if (_regmap_range_add(map, new) == false) {
593 			dev_err(map->dev, "Failed to add range %d\n", i);
594 			kfree(new);
595 			goto err_range;
596 		}
597 
598 		if (map->selector_work_buf == NULL) {
599 			map->selector_work_buf =
600 				kzalloc(map->format.buf_size, GFP_KERNEL);
601 			if (map->selector_work_buf == NULL) {
602 				ret = -ENOMEM;
603 				goto err_range;
604 			}
605 		}
606 	}
607 
608 	ret = regcache_init(map, config);
609 	if (ret != 0)
610 		goto err_range;
611 
612 	regmap_debugfs_init(map, config->name);
613 
614 	/* Add a devres resource for dev_get_regmap() */
615 	m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
616 	if (!m) {
617 		ret = -ENOMEM;
618 		goto err_debugfs;
619 	}
620 	*m = map;
621 	devres_add(dev, m);
622 
623 	return map;
624 
625 err_debugfs:
626 	regmap_debugfs_exit(map);
627 	regcache_exit(map);
628 err_range:
629 	regmap_range_exit(map);
630 	kfree(map->work_buf);
631 err_map:
632 	kfree(map);
633 err:
634 	return ERR_PTR(ret);
635 }
636 EXPORT_SYMBOL_GPL(regmap_init);
637 
638 static void devm_regmap_release(struct device *dev, void *res)
639 {
640 	regmap_exit(*(struct regmap **)res);
641 }
642 
643 /**
644  * devm_regmap_init(): Initialise managed register map
645  *
646  * @dev: Device that will be interacted with
647  * @bus: Bus-specific callbacks to use with device
648  * @bus_context: Data passed to bus-specific callbacks
649  * @config: Configuration for register map
650  *
651  * The return value will be an ERR_PTR() on error or a valid pointer
652  * to a struct regmap.  This function should generally not be called
653  * directly, it should be called by bus-specific init functions.  The
654  * map will be automatically freed by the device management code.
655  */
656 struct regmap *devm_regmap_init(struct device *dev,
657 				const struct regmap_bus *bus,
658 				void *bus_context,
659 				const struct regmap_config *config)
660 {
661 	struct regmap **ptr, *regmap;
662 
663 	ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL);
664 	if (!ptr)
665 		return ERR_PTR(-ENOMEM);
666 
667 	regmap = regmap_init(dev, bus, bus_context, config);
668 	if (!IS_ERR(regmap)) {
669 		*ptr = regmap;
670 		devres_add(dev, ptr);
671 	} else {
672 		devres_free(ptr);
673 	}
674 
675 	return regmap;
676 }
677 EXPORT_SYMBOL_GPL(devm_regmap_init);
678 
679 /**
680  * regmap_reinit_cache(): Reinitialise the current register cache
681  *
682  * @map: Register map to operate on.
683  * @config: New configuration.  Only the cache data will be used.
684  *
685  * Discard any existing register cache for the map and initialize a
686  * new cache.  This can be used to restore the cache to defaults or to
687  * update the cache configuration to reflect runtime discovery of the
688  * hardware.
689  *
690  * No explicit locking is done here, the user needs to ensure that
691  * this function will not race with other calls to regmap.
692  */
693 int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
694 {
695 	regcache_exit(map);
696 	regmap_debugfs_exit(map);
697 
698 	map->max_register = config->max_register;
699 	map->writeable_reg = config->writeable_reg;
700 	map->readable_reg = config->readable_reg;
701 	map->volatile_reg = config->volatile_reg;
702 	map->precious_reg = config->precious_reg;
703 	map->cache_type = config->cache_type;
704 
705 	regmap_debugfs_init(map, config->name);
706 
707 	map->cache_bypass = false;
708 	map->cache_only = false;
709 
710 	return regcache_init(map, config);
711 }
712 EXPORT_SYMBOL_GPL(regmap_reinit_cache);
713 
714 /**
715  * regmap_exit(): Free a previously allocated register map
716  */
717 void regmap_exit(struct regmap *map)
718 {
719 	regcache_exit(map);
720 	regmap_debugfs_exit(map);
721 	regmap_range_exit(map);
722 	if (map->bus->free_context)
723 		map->bus->free_context(map->bus_context);
724 	kfree(map->work_buf);
725 	kfree(map);
726 }
727 EXPORT_SYMBOL_GPL(regmap_exit);
728 
729 static int dev_get_regmap_match(struct device *dev, void *res, void *data)
730 {
731 	struct regmap **r = res;
732 	if (!r || !*r) {
733 		WARN_ON(!r || !*r);
734 		return 0;
735 	}
736 
737 	/* If the user didn't specify a name match any */
738 	if (data)
739 		return (*r)->name == data;
740 	else
741 		return 1;
742 }
743 
744 /**
745  * dev_get_regmap(): Obtain the regmap (if any) for a device
746  *
747  * @dev: Device to retrieve the map for
748  * @name: Optional name for the register map, usually NULL.
749  *
750  * Returns the regmap for the device if one is present, or NULL.  If
751  * name is specified then it must match the name specified when
752  * registering the device, if it is NULL then the first regmap found
753  * will be used.  Devices with multiple register maps are very rare,
754  * generic code should normally not need to specify a name.
755  */
756 struct regmap *dev_get_regmap(struct device *dev, const char *name)
757 {
758 	struct regmap **r = devres_find(dev, dev_get_regmap_release,
759 					dev_get_regmap_match, (void *)name);
760 
761 	if (!r)
762 		return NULL;
763 	return *r;
764 }
765 EXPORT_SYMBOL_GPL(dev_get_regmap);
766 
767 static int _regmap_select_page(struct regmap *map, unsigned int *reg,
768 			       struct regmap_range_node *range,
769 			       unsigned int val_num)
770 {
771 	void *orig_work_buf;
772 	unsigned int win_offset;
773 	unsigned int win_page;
774 	bool page_chg;
775 	int ret;
776 
777 	win_offset = (*reg - range->range_min) % range->window_len;
778 	win_page = (*reg - range->range_min) / range->window_len;
779 
780 	if (val_num > 1) {
781 		/* Bulk write shouldn't cross range boundary */
782 		if (*reg + val_num - 1 > range->range_max)
783 			return -EINVAL;
784 
785 		/* ... or single page boundary */
786 		if (val_num > range->window_len - win_offset)
787 			return -EINVAL;
788 	}
789 
790 	/* It is possible to have selector register inside data window.
791 	   In that case, selector register is located on every page and
792 	   it needs no page switching, when accessed alone. */
793 	if (val_num > 1 ||
794 	    range->window_start + win_offset != range->selector_reg) {
795 		/* Use separate work_buf during page switching */
796 		orig_work_buf = map->work_buf;
797 		map->work_buf = map->selector_work_buf;
798 
799 		ret = _regmap_update_bits(map, range->selector_reg,
800 					  range->selector_mask,
801 					  win_page << range->selector_shift,
802 					  &page_chg);
803 
804 		map->work_buf = orig_work_buf;
805 
806 		if (ret != 0)
807 			return ret;
808 	}
809 
810 	*reg = range->window_start + win_offset;
811 
812 	return 0;
813 }
814 
815 static int _regmap_raw_write(struct regmap *map, unsigned int reg,
816 			     const void *val, size_t val_len)
817 {
818 	struct regmap_range_node *range;
819 	u8 *u8 = map->work_buf;
820 	void *buf;
821 	int ret = -ENOTSUPP;
822 	size_t len;
823 	int i;
824 
825 	/* Check for unwritable registers before we start */
826 	if (map->writeable_reg)
827 		for (i = 0; i < val_len / map->format.val_bytes; i++)
828 			if (!map->writeable_reg(map->dev,
829 						reg + (i * map->reg_stride)))
830 				return -EINVAL;
831 
832 	if (!map->cache_bypass && map->format.parse_val) {
833 		unsigned int ival;
834 		int val_bytes = map->format.val_bytes;
835 		for (i = 0; i < val_len / val_bytes; i++) {
836 			memcpy(map->work_buf, val + (i * val_bytes), val_bytes);
837 			ival = map->format.parse_val(map->work_buf);
838 			ret = regcache_write(map, reg + (i * map->reg_stride),
839 					     ival);
840 			if (ret) {
841 				dev_err(map->dev,
842 				   "Error in caching of register: %u ret: %d\n",
843 					reg + i, ret);
844 				return ret;
845 			}
846 		}
847 		if (map->cache_only) {
848 			map->cache_dirty = true;
849 			return 0;
850 		}
851 	}
852 
853 	range = _regmap_range_lookup(map, reg);
854 	if (range) {
855 		int val_num = val_len / map->format.val_bytes;
856 		int win_offset = (reg - range->range_min) % range->window_len;
857 		int win_residue = range->window_len - win_offset;
858 
859 		/* If the write goes beyond the end of the window split it */
860 		while (val_num > win_residue) {
861 			dev_dbg(map->dev, "Writing window %d/%d\n",
862 				win_residue, val_len / map->format.val_bytes);
863 			ret = _regmap_raw_write(map, reg, val, win_residue *
864 						map->format.val_bytes);
865 			if (ret != 0)
866 				return ret;
867 
868 			reg += win_residue;
869 			val_num -= win_residue;
870 			val += win_residue * map->format.val_bytes;
871 			val_len -= win_residue * map->format.val_bytes;
872 
873 			win_offset = (reg - range->range_min) %
874 				range->window_len;
875 			win_residue = range->window_len - win_offset;
876 		}
877 
878 		ret = _regmap_select_page(map, &reg, range, val_num);
879 		if (ret != 0)
880 			return ret;
881 	}
882 
883 	map->format.format_reg(map->work_buf, reg, map->reg_shift);
884 
885 	u8[0] |= map->write_flag_mask;
886 
887 	trace_regmap_hw_write_start(map->dev, reg,
888 				    val_len / map->format.val_bytes);
889 
890 	/* If we're doing a single register write we can probably just
891 	 * send the work_buf directly, otherwise try to do a gather
892 	 * write.
893 	 */
894 	if (val == (map->work_buf + map->format.pad_bytes +
895 		    map->format.reg_bytes))
896 		ret = map->bus->write(map->bus_context, map->work_buf,
897 				      map->format.reg_bytes +
898 				      map->format.pad_bytes +
899 				      val_len);
900 	else if (map->bus->gather_write)
901 		ret = map->bus->gather_write(map->bus_context, map->work_buf,
902 					     map->format.reg_bytes +
903 					     map->format.pad_bytes,
904 					     val, val_len);
905 
906 	/* If that didn't work fall back on linearising by hand. */
907 	if (ret == -ENOTSUPP) {
908 		len = map->format.reg_bytes + map->format.pad_bytes + val_len;
909 		buf = kzalloc(len, GFP_KERNEL);
910 		if (!buf)
911 			return -ENOMEM;
912 
913 		memcpy(buf, map->work_buf, map->format.reg_bytes);
914 		memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
915 		       val, val_len);
916 		ret = map->bus->write(map->bus_context, buf, len);
917 
918 		kfree(buf);
919 	}
920 
921 	trace_regmap_hw_write_done(map->dev, reg,
922 				   val_len / map->format.val_bytes);
923 
924 	return ret;
925 }
926 
927 int _regmap_write(struct regmap *map, unsigned int reg,
928 		  unsigned int val)
929 {
930 	struct regmap_range_node *range;
931 	int ret;
932 	BUG_ON(!map->format.format_write && !map->format.format_val);
933 
934 	if (!map->cache_bypass && map->format.format_write) {
935 		ret = regcache_write(map, reg, val);
936 		if (ret != 0)
937 			return ret;
938 		if (map->cache_only) {
939 			map->cache_dirty = true;
940 			return 0;
941 		}
942 	}
943 
944 #ifdef LOG_DEVICE
945 	if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
946 		dev_info(map->dev, "%x <= %x\n", reg, val);
947 #endif
948 
949 	trace_regmap_reg_write(map->dev, reg, val);
950 
951 	if (map->format.format_write) {
952 		range = _regmap_range_lookup(map, reg);
953 		if (range) {
954 			ret = _regmap_select_page(map, &reg, range, 1);
955 			if (ret != 0)
956 				return ret;
957 		}
958 
959 		map->format.format_write(map, reg, val);
960 
961 		trace_regmap_hw_write_start(map->dev, reg, 1);
962 
963 		ret = map->bus->write(map->bus_context, map->work_buf,
964 				      map->format.buf_size);
965 
966 		trace_regmap_hw_write_done(map->dev, reg, 1);
967 
968 		return ret;
969 	} else {
970 		map->format.format_val(map->work_buf + map->format.reg_bytes
971 				       + map->format.pad_bytes, val, 0);
972 		return _regmap_raw_write(map, reg,
973 					 map->work_buf +
974 					 map->format.reg_bytes +
975 					 map->format.pad_bytes,
976 					 map->format.val_bytes);
977 	}
978 }
979 
980 /**
981  * regmap_write(): Write a value to a single register
982  *
983  * @map: Register map to write to
984  * @reg: Register to write to
985  * @val: Value to be written
986  *
987  * A value of zero will be returned on success, a negative errno will
988  * be returned in error cases.
989  */
990 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
991 {
992 	int ret;
993 
994 	if (reg % map->reg_stride)
995 		return -EINVAL;
996 
997 	map->lock(map);
998 
999 	ret = _regmap_write(map, reg, val);
1000 
1001 	map->unlock(map);
1002 
1003 	return ret;
1004 }
1005 EXPORT_SYMBOL_GPL(regmap_write);
1006 
1007 /**
1008  * regmap_raw_write(): Write raw values to one or more registers
1009  *
1010  * @map: Register map to write to
1011  * @reg: Initial register to write to
1012  * @val: Block of data to be written, laid out for direct transmission to the
1013  *       device
1014  * @val_len: Length of data pointed to by val.
1015  *
1016  * This function is intended to be used for things like firmware
1017  * download where a large block of data needs to be transferred to the
1018  * device.  No formatting will be done on the data provided.
1019  *
1020  * A value of zero will be returned on success, a negative errno will
1021  * be returned in error cases.
1022  */
1023 int regmap_raw_write(struct regmap *map, unsigned int reg,
1024 		     const void *val, size_t val_len)
1025 {
1026 	int ret;
1027 
1028 	if (val_len % map->format.val_bytes)
1029 		return -EINVAL;
1030 	if (reg % map->reg_stride)
1031 		return -EINVAL;
1032 
1033 	map->lock(map);
1034 
1035 	ret = _regmap_raw_write(map, reg, val, val_len);
1036 
1037 	map->unlock(map);
1038 
1039 	return ret;
1040 }
1041 EXPORT_SYMBOL_GPL(regmap_raw_write);
1042 
1043 /*
1044  * regmap_bulk_write(): Write multiple registers to the device
1045  *
1046  * @map: Register map to write to
1047  * @reg: First register to be write from
1048  * @val: Block of data to be written, in native register size for device
1049  * @val_count: Number of registers to write
1050  *
1051  * This function is intended to be used for writing a large block of
1052  * data to be device either in single transfer or multiple transfer.
1053  *
1054  * A value of zero will be returned on success, a negative errno will
1055  * be returned in error cases.
1056  */
1057 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
1058 		     size_t val_count)
1059 {
1060 	int ret = 0, i;
1061 	size_t val_bytes = map->format.val_bytes;
1062 	void *wval;
1063 
1064 	if (!map->format.parse_val)
1065 		return -EINVAL;
1066 	if (reg % map->reg_stride)
1067 		return -EINVAL;
1068 
1069 	map->lock(map);
1070 
1071 	/* No formatting is require if val_byte is 1 */
1072 	if (val_bytes == 1) {
1073 		wval = (void *)val;
1074 	} else {
1075 		wval = kmemdup(val, val_count * val_bytes, GFP_KERNEL);
1076 		if (!wval) {
1077 			ret = -ENOMEM;
1078 			dev_err(map->dev, "Error in memory allocation\n");
1079 			goto out;
1080 		}
1081 		for (i = 0; i < val_count * val_bytes; i += val_bytes)
1082 			map->format.parse_val(wval + i);
1083 	}
1084 	/*
1085 	 * Some devices does not support bulk write, for
1086 	 * them we have a series of single write operations.
1087 	 */
1088 	if (map->use_single_rw) {
1089 		for (i = 0; i < val_count; i++) {
1090 			ret = regmap_raw_write(map,
1091 						reg + (i * map->reg_stride),
1092 						val + (i * val_bytes),
1093 						val_bytes);
1094 			if (ret != 0)
1095 				return ret;
1096 		}
1097 	} else {
1098 		ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count);
1099 	}
1100 
1101 	if (val_bytes != 1)
1102 		kfree(wval);
1103 
1104 out:
1105 	map->unlock(map);
1106 	return ret;
1107 }
1108 EXPORT_SYMBOL_GPL(regmap_bulk_write);
1109 
1110 static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
1111 			    unsigned int val_len)
1112 {
1113 	struct regmap_range_node *range;
1114 	u8 *u8 = map->work_buf;
1115 	int ret;
1116 
1117 	range = _regmap_range_lookup(map, reg);
1118 	if (range) {
1119 		ret = _regmap_select_page(map, &reg, range,
1120 					  val_len / map->format.val_bytes);
1121 		if (ret != 0)
1122 			return ret;
1123 	}
1124 
1125 	map->format.format_reg(map->work_buf, reg, map->reg_shift);
1126 
1127 	/*
1128 	 * Some buses or devices flag reads by setting the high bits in the
1129 	 * register addresss; since it's always the high bits for all
1130 	 * current formats we can do this here rather than in
1131 	 * formatting.  This may break if we get interesting formats.
1132 	 */
1133 	u8[0] |= map->read_flag_mask;
1134 
1135 	trace_regmap_hw_read_start(map->dev, reg,
1136 				   val_len / map->format.val_bytes);
1137 
1138 	ret = map->bus->read(map->bus_context, map->work_buf,
1139 			     map->format.reg_bytes + map->format.pad_bytes,
1140 			     val, val_len);
1141 
1142 	trace_regmap_hw_read_done(map->dev, reg,
1143 				  val_len / map->format.val_bytes);
1144 
1145 	return ret;
1146 }
1147 
1148 static int _regmap_read(struct regmap *map, unsigned int reg,
1149 			unsigned int *val)
1150 {
1151 	int ret;
1152 
1153 	if (!map->cache_bypass) {
1154 		ret = regcache_read(map, reg, val);
1155 		if (ret == 0)
1156 			return 0;
1157 	}
1158 
1159 	if (!map->format.parse_val)
1160 		return -EINVAL;
1161 
1162 	if (map->cache_only)
1163 		return -EBUSY;
1164 
1165 	ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes);
1166 	if (ret == 0) {
1167 		*val = map->format.parse_val(map->work_buf);
1168 
1169 #ifdef LOG_DEVICE
1170 		if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
1171 			dev_info(map->dev, "%x => %x\n", reg, *val);
1172 #endif
1173 
1174 		trace_regmap_reg_read(map->dev, reg, *val);
1175 	}
1176 
1177 	if (ret == 0 && !map->cache_bypass)
1178 		regcache_write(map, reg, *val);
1179 
1180 	return ret;
1181 }
1182 
1183 /**
1184  * regmap_read(): Read a value from a single register
1185  *
1186  * @map: Register map to write to
1187  * @reg: Register to be read from
1188  * @val: Pointer to store read value
1189  *
1190  * A value of zero will be returned on success, a negative errno will
1191  * be returned in error cases.
1192  */
1193 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
1194 {
1195 	int ret;
1196 
1197 	if (reg % map->reg_stride)
1198 		return -EINVAL;
1199 
1200 	map->lock(map);
1201 
1202 	ret = _regmap_read(map, reg, val);
1203 
1204 	map->unlock(map);
1205 
1206 	return ret;
1207 }
1208 EXPORT_SYMBOL_GPL(regmap_read);
1209 
1210 /**
1211  * regmap_raw_read(): Read raw data from the device
1212  *
1213  * @map: Register map to write to
1214  * @reg: First register to be read from
1215  * @val: Pointer to store read value
1216  * @val_len: Size of data to read
1217  *
1218  * A value of zero will be returned on success, a negative errno will
1219  * be returned in error cases.
1220  */
1221 int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
1222 		    size_t val_len)
1223 {
1224 	size_t val_bytes = map->format.val_bytes;
1225 	size_t val_count = val_len / val_bytes;
1226 	unsigned int v;
1227 	int ret, i;
1228 
1229 	if (val_len % map->format.val_bytes)
1230 		return -EINVAL;
1231 	if (reg % map->reg_stride)
1232 		return -EINVAL;
1233 
1234 	map->lock(map);
1235 
1236 	if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
1237 	    map->cache_type == REGCACHE_NONE) {
1238 		/* Physical block read if there's no cache involved */
1239 		ret = _regmap_raw_read(map, reg, val, val_len);
1240 
1241 	} else {
1242 		/* Otherwise go word by word for the cache; should be low
1243 		 * cost as we expect to hit the cache.
1244 		 */
1245 		for (i = 0; i < val_count; i++) {
1246 			ret = _regmap_read(map, reg + (i * map->reg_stride),
1247 					   &v);
1248 			if (ret != 0)
1249 				goto out;
1250 
1251 			map->format.format_val(val + (i * val_bytes), v, 0);
1252 		}
1253 	}
1254 
1255  out:
1256 	map->unlock(map);
1257 
1258 	return ret;
1259 }
1260 EXPORT_SYMBOL_GPL(regmap_raw_read);
1261 
1262 /**
1263  * regmap_bulk_read(): Read multiple registers from the device
1264  *
1265  * @map: Register map to write to
1266  * @reg: First register to be read from
1267  * @val: Pointer to store read value, in native register size for device
1268  * @val_count: Number of registers to read
1269  *
1270  * A value of zero will be returned on success, a negative errno will
1271  * be returned in error cases.
1272  */
1273 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
1274 		     size_t val_count)
1275 {
1276 	int ret, i;
1277 	size_t val_bytes = map->format.val_bytes;
1278 	bool vol = regmap_volatile_range(map, reg, val_count);
1279 
1280 	if (!map->format.parse_val)
1281 		return -EINVAL;
1282 	if (reg % map->reg_stride)
1283 		return -EINVAL;
1284 
1285 	if (vol || map->cache_type == REGCACHE_NONE) {
1286 		/*
1287 		 * Some devices does not support bulk read, for
1288 		 * them we have a series of single read operations.
1289 		 */
1290 		if (map->use_single_rw) {
1291 			for (i = 0; i < val_count; i++) {
1292 				ret = regmap_raw_read(map,
1293 						reg + (i * map->reg_stride),
1294 						val + (i * val_bytes),
1295 						val_bytes);
1296 				if (ret != 0)
1297 					return ret;
1298 			}
1299 		} else {
1300 			ret = regmap_raw_read(map, reg, val,
1301 					      val_bytes * val_count);
1302 			if (ret != 0)
1303 				return ret;
1304 		}
1305 
1306 		for (i = 0; i < val_count * val_bytes; i += val_bytes)
1307 			map->format.parse_val(val + i);
1308 	} else {
1309 		for (i = 0; i < val_count; i++) {
1310 			unsigned int ival;
1311 			ret = regmap_read(map, reg + (i * map->reg_stride),
1312 					  &ival);
1313 			if (ret != 0)
1314 				return ret;
1315 			memcpy(val + (i * val_bytes), &ival, val_bytes);
1316 		}
1317 	}
1318 
1319 	return 0;
1320 }
1321 EXPORT_SYMBOL_GPL(regmap_bulk_read);
1322 
1323 static int _regmap_update_bits(struct regmap *map, unsigned int reg,
1324 			       unsigned int mask, unsigned int val,
1325 			       bool *change)
1326 {
1327 	int ret;
1328 	unsigned int tmp, orig;
1329 
1330 	ret = _regmap_read(map, reg, &orig);
1331 	if (ret != 0)
1332 		return ret;
1333 
1334 	tmp = orig & ~mask;
1335 	tmp |= val & mask;
1336 
1337 	if (tmp != orig) {
1338 		ret = _regmap_write(map, reg, tmp);
1339 		*change = true;
1340 	} else {
1341 		*change = false;
1342 	}
1343 
1344 	return ret;
1345 }
1346 
1347 /**
1348  * regmap_update_bits: Perform a read/modify/write cycle on the register map
1349  *
1350  * @map: Register map to update
1351  * @reg: Register to update
1352  * @mask: Bitmask to change
1353  * @val: New value for bitmask
1354  *
1355  * Returns zero for success, a negative number on error.
1356  */
1357 int regmap_update_bits(struct regmap *map, unsigned int reg,
1358 		       unsigned int mask, unsigned int val)
1359 {
1360 	bool change;
1361 	int ret;
1362 
1363 	map->lock(map);
1364 	ret = _regmap_update_bits(map, reg, mask, val, &change);
1365 	map->unlock(map);
1366 
1367 	return ret;
1368 }
1369 EXPORT_SYMBOL_GPL(regmap_update_bits);
1370 
1371 /**
1372  * regmap_update_bits_check: Perform a read/modify/write cycle on the
1373  *                           register map and report if updated
1374  *
1375  * @map: Register map to update
1376  * @reg: Register to update
1377  * @mask: Bitmask to change
1378  * @val: New value for bitmask
1379  * @change: Boolean indicating if a write was done
1380  *
1381  * Returns zero for success, a negative number on error.
1382  */
1383 int regmap_update_bits_check(struct regmap *map, unsigned int reg,
1384 			     unsigned int mask, unsigned int val,
1385 			     bool *change)
1386 {
1387 	int ret;
1388 
1389 	map->lock(map);
1390 	ret = _regmap_update_bits(map, reg, mask, val, change);
1391 	map->unlock(map);
1392 	return ret;
1393 }
1394 EXPORT_SYMBOL_GPL(regmap_update_bits_check);
1395 
1396 /**
1397  * regmap_register_patch: Register and apply register updates to be applied
1398  *                        on device initialistion
1399  *
1400  * @map: Register map to apply updates to.
1401  * @regs: Values to update.
1402  * @num_regs: Number of entries in regs.
1403  *
1404  * Register a set of register updates to be applied to the device
1405  * whenever the device registers are synchronised with the cache and
1406  * apply them immediately.  Typically this is used to apply
1407  * corrections to be applied to the device defaults on startup, such
1408  * as the updates some vendors provide to undocumented registers.
1409  */
1410 int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
1411 			  int num_regs)
1412 {
1413 	int i, ret;
1414 	bool bypass;
1415 
1416 	/* If needed the implementation can be extended to support this */
1417 	if (map->patch)
1418 		return -EBUSY;
1419 
1420 	map->lock(map);
1421 
1422 	bypass = map->cache_bypass;
1423 
1424 	map->cache_bypass = true;
1425 
1426 	/* Write out first; it's useful to apply even if we fail later. */
1427 	for (i = 0; i < num_regs; i++) {
1428 		ret = _regmap_write(map, regs[i].reg, regs[i].def);
1429 		if (ret != 0) {
1430 			dev_err(map->dev, "Failed to write %x = %x: %d\n",
1431 				regs[i].reg, regs[i].def, ret);
1432 			goto out;
1433 		}
1434 	}
1435 
1436 	map->patch = kcalloc(num_regs, sizeof(struct reg_default), GFP_KERNEL);
1437 	if (map->patch != NULL) {
1438 		memcpy(map->patch, regs,
1439 		       num_regs * sizeof(struct reg_default));
1440 		map->patch_regs = num_regs;
1441 	} else {
1442 		ret = -ENOMEM;
1443 	}
1444 
1445 out:
1446 	map->cache_bypass = bypass;
1447 
1448 	map->unlock(map);
1449 
1450 	return ret;
1451 }
1452 EXPORT_SYMBOL_GPL(regmap_register_patch);
1453 
1454 /*
1455  * regmap_get_val_bytes(): Report the size of a register value
1456  *
1457  * Report the size of a register value, mainly intended to for use by
1458  * generic infrastructure built on top of regmap.
1459  */
1460 int regmap_get_val_bytes(struct regmap *map)
1461 {
1462 	if (map->format.format_write)
1463 		return -EINVAL;
1464 
1465 	return map->format.val_bytes;
1466 }
1467 EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
1468 
1469 static int __init regmap_initcall(void)
1470 {
1471 	regmap_debugfs_initcall();
1472 
1473 	return 0;
1474 }
1475 postcore_initcall(regmap_initcall);
1476