xref: /openbmc/linux/drivers/nvmem/core.c (revision 99a15348)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * nvmem framework core.
4  *
5  * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
6  * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
7  */
8 
9 #include <linux/device.h>
10 #include <linux/export.h>
11 #include <linux/fs.h>
12 #include <linux/idr.h>
13 #include <linux/init.h>
14 #include <linux/kref.h>
15 #include <linux/module.h>
16 #include <linux/nvmem-consumer.h>
17 #include <linux/nvmem-provider.h>
18 #include <linux/gpio/consumer.h>
19 #include <linux/of.h>
20 #include <linux/slab.h>
21 
22 struct nvmem_device {
23 	struct module		*owner;
24 	struct device		dev;
25 	int			stride;
26 	int			word_size;
27 	int			id;
28 	struct kref		refcnt;
29 	size_t			size;
30 	bool			read_only;
31 	bool			root_only;
32 	int			flags;
33 	enum nvmem_type		type;
34 	struct bin_attribute	eeprom;
35 	struct device		*base_dev;
36 	struct list_head	cells;
37 	const struct nvmem_keepout *keepout;
38 	unsigned int		nkeepout;
39 	nvmem_reg_read_t	reg_read;
40 	nvmem_reg_write_t	reg_write;
41 	nvmem_cell_post_process_t cell_post_process;
42 	struct gpio_desc	*wp_gpio;
43 	void *priv;
44 };
45 
46 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
47 
48 #define FLAG_COMPAT		BIT(0)
49 struct nvmem_cell_entry {
50 	const char		*name;
51 	int			offset;
52 	int			bytes;
53 	int			bit_offset;
54 	int			nbits;
55 	struct device_node	*np;
56 	struct nvmem_device	*nvmem;
57 	struct list_head	node;
58 };
59 
60 struct nvmem_cell {
61 	struct nvmem_cell_entry *entry;
62 	const char		*id;
63 };
64 
65 static DEFINE_MUTEX(nvmem_mutex);
66 static DEFINE_IDA(nvmem_ida);
67 
68 static DEFINE_MUTEX(nvmem_cell_mutex);
69 static LIST_HEAD(nvmem_cell_tables);
70 
71 static DEFINE_MUTEX(nvmem_lookup_mutex);
72 static LIST_HEAD(nvmem_lookup_list);
73 
74 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
75 
76 static int __nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
77 			    void *val, size_t bytes)
78 {
79 	if (nvmem->reg_read)
80 		return nvmem->reg_read(nvmem->priv, offset, val, bytes);
81 
82 	return -EINVAL;
83 }
84 
85 static int __nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
86 			     void *val, size_t bytes)
87 {
88 	int ret;
89 
90 	if (nvmem->reg_write) {
91 		gpiod_set_value_cansleep(nvmem->wp_gpio, 0);
92 		ret = nvmem->reg_write(nvmem->priv, offset, val, bytes);
93 		gpiod_set_value_cansleep(nvmem->wp_gpio, 1);
94 		return ret;
95 	}
96 
97 	return -EINVAL;
98 }
99 
100 static int nvmem_access_with_keepouts(struct nvmem_device *nvmem,
101 				      unsigned int offset, void *val,
102 				      size_t bytes, int write)
103 {
104 
105 	unsigned int end = offset + bytes;
106 	unsigned int kend, ksize;
107 	const struct nvmem_keepout *keepout = nvmem->keepout;
108 	const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout;
109 	int rc;
110 
111 	/*
112 	 * Skip all keepouts before the range being accessed.
113 	 * Keepouts are sorted.
114 	 */
115 	while ((keepout < keepoutend) && (keepout->end <= offset))
116 		keepout++;
117 
118 	while ((offset < end) && (keepout < keepoutend)) {
119 		/* Access the valid portion before the keepout. */
120 		if (offset < keepout->start) {
121 			kend = min(end, keepout->start);
122 			ksize = kend - offset;
123 			if (write)
124 				rc = __nvmem_reg_write(nvmem, offset, val, ksize);
125 			else
126 				rc = __nvmem_reg_read(nvmem, offset, val, ksize);
127 
128 			if (rc)
129 				return rc;
130 
131 			offset += ksize;
132 			val += ksize;
133 		}
134 
135 		/*
136 		 * Now we're aligned to the start of this keepout zone. Go
137 		 * through it.
138 		 */
139 		kend = min(end, keepout->end);
140 		ksize = kend - offset;
141 		if (!write)
142 			memset(val, keepout->value, ksize);
143 
144 		val += ksize;
145 		offset += ksize;
146 		keepout++;
147 	}
148 
149 	/*
150 	 * If we ran out of keepouts but there's still stuff to do, send it
151 	 * down directly
152 	 */
153 	if (offset < end) {
154 		ksize = end - offset;
155 		if (write)
156 			return __nvmem_reg_write(nvmem, offset, val, ksize);
157 		else
158 			return __nvmem_reg_read(nvmem, offset, val, ksize);
159 	}
160 
161 	return 0;
162 }
163 
164 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
165 			  void *val, size_t bytes)
166 {
167 	if (!nvmem->nkeepout)
168 		return __nvmem_reg_read(nvmem, offset, val, bytes);
169 
170 	return nvmem_access_with_keepouts(nvmem, offset, val, bytes, false);
171 }
172 
173 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
174 			   void *val, size_t bytes)
175 {
176 	if (!nvmem->nkeepout)
177 		return __nvmem_reg_write(nvmem, offset, val, bytes);
178 
179 	return nvmem_access_with_keepouts(nvmem, offset, val, bytes, true);
180 }
181 
182 #ifdef CONFIG_NVMEM_SYSFS
183 static const char * const nvmem_type_str[] = {
184 	[NVMEM_TYPE_UNKNOWN] = "Unknown",
185 	[NVMEM_TYPE_EEPROM] = "EEPROM",
186 	[NVMEM_TYPE_OTP] = "OTP",
187 	[NVMEM_TYPE_BATTERY_BACKED] = "Battery backed",
188 	[NVMEM_TYPE_FRAM] = "FRAM",
189 };
190 
191 #ifdef CONFIG_DEBUG_LOCK_ALLOC
192 static struct lock_class_key eeprom_lock_key;
193 #endif
194 
195 static ssize_t type_show(struct device *dev,
196 			 struct device_attribute *attr, char *buf)
197 {
198 	struct nvmem_device *nvmem = to_nvmem_device(dev);
199 
200 	return sprintf(buf, "%s\n", nvmem_type_str[nvmem->type]);
201 }
202 
203 static DEVICE_ATTR_RO(type);
204 
205 static struct attribute *nvmem_attrs[] = {
206 	&dev_attr_type.attr,
207 	NULL,
208 };
209 
210 static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
211 				   struct bin_attribute *attr, char *buf,
212 				   loff_t pos, size_t count)
213 {
214 	struct device *dev;
215 	struct nvmem_device *nvmem;
216 	int rc;
217 
218 	if (attr->private)
219 		dev = attr->private;
220 	else
221 		dev = kobj_to_dev(kobj);
222 	nvmem = to_nvmem_device(dev);
223 
224 	/* Stop the user from reading */
225 	if (pos >= nvmem->size)
226 		return 0;
227 
228 	if (!IS_ALIGNED(pos, nvmem->stride))
229 		return -EINVAL;
230 
231 	if (count < nvmem->word_size)
232 		return -EINVAL;
233 
234 	if (pos + count > nvmem->size)
235 		count = nvmem->size - pos;
236 
237 	count = round_down(count, nvmem->word_size);
238 
239 	if (!nvmem->reg_read)
240 		return -EPERM;
241 
242 	rc = nvmem_reg_read(nvmem, pos, buf, count);
243 
244 	if (rc)
245 		return rc;
246 
247 	return count;
248 }
249 
250 static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
251 				    struct bin_attribute *attr, char *buf,
252 				    loff_t pos, size_t count)
253 {
254 	struct device *dev;
255 	struct nvmem_device *nvmem;
256 	int rc;
257 
258 	if (attr->private)
259 		dev = attr->private;
260 	else
261 		dev = kobj_to_dev(kobj);
262 	nvmem = to_nvmem_device(dev);
263 
264 	/* Stop the user from writing */
265 	if (pos >= nvmem->size)
266 		return -EFBIG;
267 
268 	if (!IS_ALIGNED(pos, nvmem->stride))
269 		return -EINVAL;
270 
271 	if (count < nvmem->word_size)
272 		return -EINVAL;
273 
274 	if (pos + count > nvmem->size)
275 		count = nvmem->size - pos;
276 
277 	count = round_down(count, nvmem->word_size);
278 
279 	if (!nvmem->reg_write)
280 		return -EPERM;
281 
282 	rc = nvmem_reg_write(nvmem, pos, buf, count);
283 
284 	if (rc)
285 		return rc;
286 
287 	return count;
288 }
289 
290 static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem)
291 {
292 	umode_t mode = 0400;
293 
294 	if (!nvmem->root_only)
295 		mode |= 0044;
296 
297 	if (!nvmem->read_only)
298 		mode |= 0200;
299 
300 	if (!nvmem->reg_write)
301 		mode &= ~0200;
302 
303 	if (!nvmem->reg_read)
304 		mode &= ~0444;
305 
306 	return mode;
307 }
308 
309 static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj,
310 					 struct bin_attribute *attr, int i)
311 {
312 	struct device *dev = kobj_to_dev(kobj);
313 	struct nvmem_device *nvmem = to_nvmem_device(dev);
314 
315 	attr->size = nvmem->size;
316 
317 	return nvmem_bin_attr_get_umode(nvmem);
318 }
319 
320 /* default read/write permissions */
321 static struct bin_attribute bin_attr_rw_nvmem = {
322 	.attr	= {
323 		.name	= "nvmem",
324 		.mode	= 0644,
325 	},
326 	.read	= bin_attr_nvmem_read,
327 	.write	= bin_attr_nvmem_write,
328 };
329 
330 static struct bin_attribute *nvmem_bin_attributes[] = {
331 	&bin_attr_rw_nvmem,
332 	NULL,
333 };
334 
335 static const struct attribute_group nvmem_bin_group = {
336 	.bin_attrs	= nvmem_bin_attributes,
337 	.attrs		= nvmem_attrs,
338 	.is_bin_visible = nvmem_bin_attr_is_visible,
339 };
340 
341 static const struct attribute_group *nvmem_dev_groups[] = {
342 	&nvmem_bin_group,
343 	NULL,
344 };
345 
346 static struct bin_attribute bin_attr_nvmem_eeprom_compat = {
347 	.attr	= {
348 		.name	= "eeprom",
349 	},
350 	.read	= bin_attr_nvmem_read,
351 	.write	= bin_attr_nvmem_write,
352 };
353 
354 /*
355  * nvmem_setup_compat() - Create an additional binary entry in
356  * drivers sys directory, to be backwards compatible with the older
357  * drivers/misc/eeprom drivers.
358  */
359 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
360 				    const struct nvmem_config *config)
361 {
362 	int rval;
363 
364 	if (!config->compat)
365 		return 0;
366 
367 	if (!config->base_dev)
368 		return -EINVAL;
369 
370 	if (config->type == NVMEM_TYPE_FRAM)
371 		bin_attr_nvmem_eeprom_compat.attr.name = "fram";
372 
373 	nvmem->eeprom = bin_attr_nvmem_eeprom_compat;
374 	nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem);
375 	nvmem->eeprom.size = nvmem->size;
376 #ifdef CONFIG_DEBUG_LOCK_ALLOC
377 	nvmem->eeprom.attr.key = &eeprom_lock_key;
378 #endif
379 	nvmem->eeprom.private = &nvmem->dev;
380 	nvmem->base_dev = config->base_dev;
381 
382 	rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom);
383 	if (rval) {
384 		dev_err(&nvmem->dev,
385 			"Failed to create eeprom binary file %d\n", rval);
386 		return rval;
387 	}
388 
389 	nvmem->flags |= FLAG_COMPAT;
390 
391 	return 0;
392 }
393 
394 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
395 			      const struct nvmem_config *config)
396 {
397 	if (config->compat)
398 		device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
399 }
400 
401 #else /* CONFIG_NVMEM_SYSFS */
402 
403 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
404 				    const struct nvmem_config *config)
405 {
406 	return -ENOSYS;
407 }
408 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
409 				      const struct nvmem_config *config)
410 {
411 }
412 
413 #endif /* CONFIG_NVMEM_SYSFS */
414 
415 static void nvmem_release(struct device *dev)
416 {
417 	struct nvmem_device *nvmem = to_nvmem_device(dev);
418 
419 	ida_free(&nvmem_ida, nvmem->id);
420 	gpiod_put(nvmem->wp_gpio);
421 	kfree(nvmem);
422 }
423 
424 static const struct device_type nvmem_provider_type = {
425 	.release	= nvmem_release,
426 };
427 
428 static struct bus_type nvmem_bus_type = {
429 	.name		= "nvmem",
430 };
431 
432 static void nvmem_cell_entry_drop(struct nvmem_cell_entry *cell)
433 {
434 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell);
435 	mutex_lock(&nvmem_mutex);
436 	list_del(&cell->node);
437 	mutex_unlock(&nvmem_mutex);
438 	of_node_put(cell->np);
439 	kfree_const(cell->name);
440 	kfree(cell);
441 }
442 
443 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
444 {
445 	struct nvmem_cell_entry *cell, *p;
446 
447 	list_for_each_entry_safe(cell, p, &nvmem->cells, node)
448 		nvmem_cell_entry_drop(cell);
449 }
450 
451 static void nvmem_cell_entry_add(struct nvmem_cell_entry *cell)
452 {
453 	mutex_lock(&nvmem_mutex);
454 	list_add_tail(&cell->node, &cell->nvmem->cells);
455 	mutex_unlock(&nvmem_mutex);
456 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell);
457 }
458 
459 static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem,
460 						     const struct nvmem_cell_info *info,
461 						     struct nvmem_cell_entry *cell)
462 {
463 	cell->nvmem = nvmem;
464 	cell->offset = info->offset;
465 	cell->bytes = info->bytes;
466 	cell->name = info->name;
467 
468 	cell->bit_offset = info->bit_offset;
469 	cell->nbits = info->nbits;
470 
471 	if (cell->nbits)
472 		cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
473 					   BITS_PER_BYTE);
474 
475 	if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
476 		dev_err(&nvmem->dev,
477 			"cell %s unaligned to nvmem stride %d\n",
478 			cell->name ?: "<unknown>", nvmem->stride);
479 		return -EINVAL;
480 	}
481 
482 	return 0;
483 }
484 
485 static int nvmem_cell_info_to_nvmem_cell_entry(struct nvmem_device *nvmem,
486 					       const struct nvmem_cell_info *info,
487 					       struct nvmem_cell_entry *cell)
488 {
489 	int err;
490 
491 	err = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, cell);
492 	if (err)
493 		return err;
494 
495 	cell->name = kstrdup_const(info->name, GFP_KERNEL);
496 	if (!cell->name)
497 		return -ENOMEM;
498 
499 	return 0;
500 }
501 
502 /**
503  * nvmem_add_cells() - Add cell information to an nvmem device
504  *
505  * @nvmem: nvmem device to add cells to.
506  * @info: nvmem cell info to add to the device
507  * @ncells: number of cells in info
508  *
509  * Return: 0 or negative error code on failure.
510  */
511 static int nvmem_add_cells(struct nvmem_device *nvmem,
512 		    const struct nvmem_cell_info *info,
513 		    int ncells)
514 {
515 	struct nvmem_cell_entry **cells;
516 	int i, rval;
517 
518 	cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL);
519 	if (!cells)
520 		return -ENOMEM;
521 
522 	for (i = 0; i < ncells; i++) {
523 		cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL);
524 		if (!cells[i]) {
525 			rval = -ENOMEM;
526 			goto err;
527 		}
528 
529 		rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, &info[i], cells[i]);
530 		if (rval) {
531 			kfree(cells[i]);
532 			goto err;
533 		}
534 
535 		nvmem_cell_entry_add(cells[i]);
536 	}
537 
538 	/* remove tmp array */
539 	kfree(cells);
540 
541 	return 0;
542 err:
543 	while (i--)
544 		nvmem_cell_entry_drop(cells[i]);
545 
546 	kfree(cells);
547 
548 	return rval;
549 }
550 
551 /**
552  * nvmem_register_notifier() - Register a notifier block for nvmem events.
553  *
554  * @nb: notifier block to be called on nvmem events.
555  *
556  * Return: 0 on success, negative error number on failure.
557  */
558 int nvmem_register_notifier(struct notifier_block *nb)
559 {
560 	return blocking_notifier_chain_register(&nvmem_notifier, nb);
561 }
562 EXPORT_SYMBOL_GPL(nvmem_register_notifier);
563 
564 /**
565  * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events.
566  *
567  * @nb: notifier block to be unregistered.
568  *
569  * Return: 0 on success, negative error number on failure.
570  */
571 int nvmem_unregister_notifier(struct notifier_block *nb)
572 {
573 	return blocking_notifier_chain_unregister(&nvmem_notifier, nb);
574 }
575 EXPORT_SYMBOL_GPL(nvmem_unregister_notifier);
576 
577 static int nvmem_add_cells_from_table(struct nvmem_device *nvmem)
578 {
579 	const struct nvmem_cell_info *info;
580 	struct nvmem_cell_table *table;
581 	struct nvmem_cell_entry *cell;
582 	int rval = 0, i;
583 
584 	mutex_lock(&nvmem_cell_mutex);
585 	list_for_each_entry(table, &nvmem_cell_tables, node) {
586 		if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) {
587 			for (i = 0; i < table->ncells; i++) {
588 				info = &table->cells[i];
589 
590 				cell = kzalloc(sizeof(*cell), GFP_KERNEL);
591 				if (!cell) {
592 					rval = -ENOMEM;
593 					goto out;
594 				}
595 
596 				rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell);
597 				if (rval) {
598 					kfree(cell);
599 					goto out;
600 				}
601 
602 				nvmem_cell_entry_add(cell);
603 			}
604 		}
605 	}
606 
607 out:
608 	mutex_unlock(&nvmem_cell_mutex);
609 	return rval;
610 }
611 
612 static struct nvmem_cell_entry *
613 nvmem_find_cell_entry_by_name(struct nvmem_device *nvmem, const char *cell_id)
614 {
615 	struct nvmem_cell_entry *iter, *cell = NULL;
616 
617 	mutex_lock(&nvmem_mutex);
618 	list_for_each_entry(iter, &nvmem->cells, node) {
619 		if (strcmp(cell_id, iter->name) == 0) {
620 			cell = iter;
621 			break;
622 		}
623 	}
624 	mutex_unlock(&nvmem_mutex);
625 
626 	return cell;
627 }
628 
629 static int nvmem_validate_keepouts(struct nvmem_device *nvmem)
630 {
631 	unsigned int cur = 0;
632 	const struct nvmem_keepout *keepout = nvmem->keepout;
633 	const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout;
634 
635 	while (keepout < keepoutend) {
636 		/* Ensure keepouts are sorted and don't overlap. */
637 		if (keepout->start < cur) {
638 			dev_err(&nvmem->dev,
639 				"Keepout regions aren't sorted or overlap.\n");
640 
641 			return -ERANGE;
642 		}
643 
644 		if (keepout->end < keepout->start) {
645 			dev_err(&nvmem->dev,
646 				"Invalid keepout region.\n");
647 
648 			return -EINVAL;
649 		}
650 
651 		/*
652 		 * Validate keepouts (and holes between) don't violate
653 		 * word_size constraints.
654 		 */
655 		if ((keepout->end - keepout->start < nvmem->word_size) ||
656 		    ((keepout->start != cur) &&
657 		     (keepout->start - cur < nvmem->word_size))) {
658 
659 			dev_err(&nvmem->dev,
660 				"Keepout regions violate word_size constraints.\n");
661 
662 			return -ERANGE;
663 		}
664 
665 		/* Validate keepouts don't violate stride (alignment). */
666 		if (!IS_ALIGNED(keepout->start, nvmem->stride) ||
667 		    !IS_ALIGNED(keepout->end, nvmem->stride)) {
668 
669 			dev_err(&nvmem->dev,
670 				"Keepout regions violate stride.\n");
671 
672 			return -EINVAL;
673 		}
674 
675 		cur = keepout->end;
676 		keepout++;
677 	}
678 
679 	return 0;
680 }
681 
682 static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
683 {
684 	struct device_node *parent, *child;
685 	struct device *dev = &nvmem->dev;
686 	struct nvmem_cell_entry *cell;
687 	const __be32 *addr;
688 	int len;
689 
690 	parent = dev->of_node;
691 
692 	for_each_child_of_node(parent, child) {
693 		addr = of_get_property(child, "reg", &len);
694 		if (!addr)
695 			continue;
696 		if (len < 2 * sizeof(u32)) {
697 			dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
698 			of_node_put(child);
699 			return -EINVAL;
700 		}
701 
702 		cell = kzalloc(sizeof(*cell), GFP_KERNEL);
703 		if (!cell) {
704 			of_node_put(child);
705 			return -ENOMEM;
706 		}
707 
708 		cell->nvmem = nvmem;
709 		cell->offset = be32_to_cpup(addr++);
710 		cell->bytes = be32_to_cpup(addr);
711 		cell->name = kasprintf(GFP_KERNEL, "%pOFn", child);
712 
713 		addr = of_get_property(child, "bits", &len);
714 		if (addr && len == (2 * sizeof(u32))) {
715 			cell->bit_offset = be32_to_cpup(addr++);
716 			cell->nbits = be32_to_cpup(addr);
717 		}
718 
719 		if (cell->nbits)
720 			cell->bytes = DIV_ROUND_UP(
721 					cell->nbits + cell->bit_offset,
722 					BITS_PER_BYTE);
723 
724 		if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
725 			dev_err(dev, "cell %s unaligned to nvmem stride %d\n",
726 				cell->name, nvmem->stride);
727 			/* Cells already added will be freed later. */
728 			kfree_const(cell->name);
729 			kfree(cell);
730 			of_node_put(child);
731 			return -EINVAL;
732 		}
733 
734 		cell->np = of_node_get(child);
735 		nvmem_cell_entry_add(cell);
736 	}
737 
738 	return 0;
739 }
740 
741 /**
742  * nvmem_register() - Register a nvmem device for given nvmem_config.
743  * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
744  *
745  * @config: nvmem device configuration with which nvmem device is created.
746  *
747  * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
748  * on success.
749  */
750 
751 struct nvmem_device *nvmem_register(const struct nvmem_config *config)
752 {
753 	struct nvmem_device *nvmem;
754 	int rval;
755 
756 	if (!config->dev)
757 		return ERR_PTR(-EINVAL);
758 
759 	if (!config->reg_read && !config->reg_write)
760 		return ERR_PTR(-EINVAL);
761 
762 	nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL);
763 	if (!nvmem)
764 		return ERR_PTR(-ENOMEM);
765 
766 	rval  = ida_alloc(&nvmem_ida, GFP_KERNEL);
767 	if (rval < 0) {
768 		kfree(nvmem);
769 		return ERR_PTR(rval);
770 	}
771 
772 	if (config->wp_gpio)
773 		nvmem->wp_gpio = config->wp_gpio;
774 	else if (!config->ignore_wp)
775 		nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp",
776 						    GPIOD_OUT_HIGH);
777 	if (IS_ERR(nvmem->wp_gpio)) {
778 		ida_free(&nvmem_ida, nvmem->id);
779 		rval = PTR_ERR(nvmem->wp_gpio);
780 		kfree(nvmem);
781 		return ERR_PTR(rval);
782 	}
783 
784 	kref_init(&nvmem->refcnt);
785 	INIT_LIST_HEAD(&nvmem->cells);
786 
787 	nvmem->id = rval;
788 	nvmem->owner = config->owner;
789 	if (!nvmem->owner && config->dev->driver)
790 		nvmem->owner = config->dev->driver->owner;
791 	nvmem->stride = config->stride ?: 1;
792 	nvmem->word_size = config->word_size ?: 1;
793 	nvmem->size = config->size;
794 	nvmem->dev.type = &nvmem_provider_type;
795 	nvmem->dev.bus = &nvmem_bus_type;
796 	nvmem->dev.parent = config->dev;
797 	nvmem->root_only = config->root_only;
798 	nvmem->priv = config->priv;
799 	nvmem->type = config->type;
800 	nvmem->reg_read = config->reg_read;
801 	nvmem->reg_write = config->reg_write;
802 	nvmem->cell_post_process = config->cell_post_process;
803 	nvmem->keepout = config->keepout;
804 	nvmem->nkeepout = config->nkeepout;
805 	if (config->of_node)
806 		nvmem->dev.of_node = config->of_node;
807 	else if (!config->no_of_node)
808 		nvmem->dev.of_node = config->dev->of_node;
809 
810 	switch (config->id) {
811 	case NVMEM_DEVID_NONE:
812 		dev_set_name(&nvmem->dev, "%s", config->name);
813 		break;
814 	case NVMEM_DEVID_AUTO:
815 		dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id);
816 		break;
817 	default:
818 		dev_set_name(&nvmem->dev, "%s%d",
819 			     config->name ? : "nvmem",
820 			     config->name ? config->id : nvmem->id);
821 		break;
822 	}
823 
824 	nvmem->read_only = device_property_present(config->dev, "read-only") ||
825 			   config->read_only || !nvmem->reg_write;
826 
827 #ifdef CONFIG_NVMEM_SYSFS
828 	nvmem->dev.groups = nvmem_dev_groups;
829 #endif
830 
831 	if (nvmem->nkeepout) {
832 		rval = nvmem_validate_keepouts(nvmem);
833 		if (rval) {
834 			ida_free(&nvmem_ida, nvmem->id);
835 			kfree(nvmem);
836 			return ERR_PTR(rval);
837 		}
838 	}
839 
840 	dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
841 
842 	rval = device_register(&nvmem->dev);
843 	if (rval)
844 		goto err_put_device;
845 
846 	if (config->compat) {
847 		rval = nvmem_sysfs_setup_compat(nvmem, config);
848 		if (rval)
849 			goto err_device_del;
850 	}
851 
852 	if (config->cells) {
853 		rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
854 		if (rval)
855 			goto err_teardown_compat;
856 	}
857 
858 	rval = nvmem_add_cells_from_table(nvmem);
859 	if (rval)
860 		goto err_remove_cells;
861 
862 	rval = nvmem_add_cells_from_of(nvmem);
863 	if (rval)
864 		goto err_remove_cells;
865 
866 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
867 
868 	return nvmem;
869 
870 err_remove_cells:
871 	nvmem_device_remove_all_cells(nvmem);
872 err_teardown_compat:
873 	if (config->compat)
874 		nvmem_sysfs_remove_compat(nvmem, config);
875 err_device_del:
876 	device_del(&nvmem->dev);
877 err_put_device:
878 	put_device(&nvmem->dev);
879 
880 	return ERR_PTR(rval);
881 }
882 EXPORT_SYMBOL_GPL(nvmem_register);
883 
884 static void nvmem_device_release(struct kref *kref)
885 {
886 	struct nvmem_device *nvmem;
887 
888 	nvmem = container_of(kref, struct nvmem_device, refcnt);
889 
890 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem);
891 
892 	if (nvmem->flags & FLAG_COMPAT)
893 		device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
894 
895 	nvmem_device_remove_all_cells(nvmem);
896 	device_unregister(&nvmem->dev);
897 }
898 
899 /**
900  * nvmem_unregister() - Unregister previously registered nvmem device
901  *
902  * @nvmem: Pointer to previously registered nvmem device.
903  */
904 void nvmem_unregister(struct nvmem_device *nvmem)
905 {
906 	if (nvmem)
907 		kref_put(&nvmem->refcnt, nvmem_device_release);
908 }
909 EXPORT_SYMBOL_GPL(nvmem_unregister);
910 
911 static void devm_nvmem_unregister(void *nvmem)
912 {
913 	nvmem_unregister(nvmem);
914 }
915 
916 /**
917  * devm_nvmem_register() - Register a managed nvmem device for given
918  * nvmem_config.
919  * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
920  *
921  * @dev: Device that uses the nvmem device.
922  * @config: nvmem device configuration with which nvmem device is created.
923  *
924  * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
925  * on success.
926  */
927 struct nvmem_device *devm_nvmem_register(struct device *dev,
928 					 const struct nvmem_config *config)
929 {
930 	struct nvmem_device *nvmem;
931 	int ret;
932 
933 	nvmem = nvmem_register(config);
934 	if (IS_ERR(nvmem))
935 		return nvmem;
936 
937 	ret = devm_add_action_or_reset(dev, devm_nvmem_unregister, nvmem);
938 	if (ret)
939 		return ERR_PTR(ret);
940 
941 	return nvmem;
942 }
943 EXPORT_SYMBOL_GPL(devm_nvmem_register);
944 
945 static struct nvmem_device *__nvmem_device_get(void *data,
946 			int (*match)(struct device *dev, const void *data))
947 {
948 	struct nvmem_device *nvmem = NULL;
949 	struct device *dev;
950 
951 	mutex_lock(&nvmem_mutex);
952 	dev = bus_find_device(&nvmem_bus_type, NULL, data, match);
953 	if (dev)
954 		nvmem = to_nvmem_device(dev);
955 	mutex_unlock(&nvmem_mutex);
956 	if (!nvmem)
957 		return ERR_PTR(-EPROBE_DEFER);
958 
959 	if (!try_module_get(nvmem->owner)) {
960 		dev_err(&nvmem->dev,
961 			"could not increase module refcount for cell %s\n",
962 			nvmem_dev_name(nvmem));
963 
964 		put_device(&nvmem->dev);
965 		return ERR_PTR(-EINVAL);
966 	}
967 
968 	kref_get(&nvmem->refcnt);
969 
970 	return nvmem;
971 }
972 
973 static void __nvmem_device_put(struct nvmem_device *nvmem)
974 {
975 	put_device(&nvmem->dev);
976 	module_put(nvmem->owner);
977 	kref_put(&nvmem->refcnt, nvmem_device_release);
978 }
979 
980 #if IS_ENABLED(CONFIG_OF)
981 /**
982  * of_nvmem_device_get() - Get nvmem device from a given id
983  *
984  * @np: Device tree node that uses the nvmem device.
985  * @id: nvmem name from nvmem-names property.
986  *
987  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
988  * on success.
989  */
990 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
991 {
992 
993 	struct device_node *nvmem_np;
994 	struct nvmem_device *nvmem;
995 	int index = 0;
996 
997 	if (id)
998 		index = of_property_match_string(np, "nvmem-names", id);
999 
1000 	nvmem_np = of_parse_phandle(np, "nvmem", index);
1001 	if (!nvmem_np)
1002 		return ERR_PTR(-ENOENT);
1003 
1004 	nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
1005 	of_node_put(nvmem_np);
1006 	return nvmem;
1007 }
1008 EXPORT_SYMBOL_GPL(of_nvmem_device_get);
1009 #endif
1010 
1011 /**
1012  * nvmem_device_get() - Get nvmem device from a given id
1013  *
1014  * @dev: Device that uses the nvmem device.
1015  * @dev_name: name of the requested nvmem device.
1016  *
1017  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
1018  * on success.
1019  */
1020 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
1021 {
1022 	if (dev->of_node) { /* try dt first */
1023 		struct nvmem_device *nvmem;
1024 
1025 		nvmem = of_nvmem_device_get(dev->of_node, dev_name);
1026 
1027 		if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER)
1028 			return nvmem;
1029 
1030 	}
1031 
1032 	return __nvmem_device_get((void *)dev_name, device_match_name);
1033 }
1034 EXPORT_SYMBOL_GPL(nvmem_device_get);
1035 
1036 /**
1037  * nvmem_device_find() - Find nvmem device with matching function
1038  *
1039  * @data: Data to pass to match function
1040  * @match: Callback function to check device
1041  *
1042  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
1043  * on success.
1044  */
1045 struct nvmem_device *nvmem_device_find(void *data,
1046 			int (*match)(struct device *dev, const void *data))
1047 {
1048 	return __nvmem_device_get(data, match);
1049 }
1050 EXPORT_SYMBOL_GPL(nvmem_device_find);
1051 
1052 static int devm_nvmem_device_match(struct device *dev, void *res, void *data)
1053 {
1054 	struct nvmem_device **nvmem = res;
1055 
1056 	if (WARN_ON(!nvmem || !*nvmem))
1057 		return 0;
1058 
1059 	return *nvmem == data;
1060 }
1061 
1062 static void devm_nvmem_device_release(struct device *dev, void *res)
1063 {
1064 	nvmem_device_put(*(struct nvmem_device **)res);
1065 }
1066 
1067 /**
1068  * devm_nvmem_device_put() - put alredy got nvmem device
1069  *
1070  * @dev: Device that uses the nvmem device.
1071  * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
1072  * that needs to be released.
1073  */
1074 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem)
1075 {
1076 	int ret;
1077 
1078 	ret = devres_release(dev, devm_nvmem_device_release,
1079 			     devm_nvmem_device_match, nvmem);
1080 
1081 	WARN_ON(ret);
1082 }
1083 EXPORT_SYMBOL_GPL(devm_nvmem_device_put);
1084 
1085 /**
1086  * nvmem_device_put() - put alredy got nvmem device
1087  *
1088  * @nvmem: pointer to nvmem device that needs to be released.
1089  */
1090 void nvmem_device_put(struct nvmem_device *nvmem)
1091 {
1092 	__nvmem_device_put(nvmem);
1093 }
1094 EXPORT_SYMBOL_GPL(nvmem_device_put);
1095 
1096 /**
1097  * devm_nvmem_device_get() - Get nvmem cell of device form a given id
1098  *
1099  * @dev: Device that requests the nvmem device.
1100  * @id: name id for the requested nvmem device.
1101  *
1102  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
1103  * on success.  The nvmem_cell will be freed by the automatically once the
1104  * device is freed.
1105  */
1106 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
1107 {
1108 	struct nvmem_device **ptr, *nvmem;
1109 
1110 	ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL);
1111 	if (!ptr)
1112 		return ERR_PTR(-ENOMEM);
1113 
1114 	nvmem = nvmem_device_get(dev, id);
1115 	if (!IS_ERR(nvmem)) {
1116 		*ptr = nvmem;
1117 		devres_add(dev, ptr);
1118 	} else {
1119 		devres_free(ptr);
1120 	}
1121 
1122 	return nvmem;
1123 }
1124 EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
1125 
1126 static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry, const char *id)
1127 {
1128 	struct nvmem_cell *cell;
1129 	const char *name = NULL;
1130 
1131 	cell = kzalloc(sizeof(*cell), GFP_KERNEL);
1132 	if (!cell)
1133 		return ERR_PTR(-ENOMEM);
1134 
1135 	if (id) {
1136 		name = kstrdup_const(id, GFP_KERNEL);
1137 		if (!name) {
1138 			kfree(cell);
1139 			return ERR_PTR(-ENOMEM);
1140 		}
1141 	}
1142 
1143 	cell->id = name;
1144 	cell->entry = entry;
1145 
1146 	return cell;
1147 }
1148 
1149 static struct nvmem_cell *
1150 nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
1151 {
1152 	struct nvmem_cell_entry *cell_entry;
1153 	struct nvmem_cell *cell = ERR_PTR(-ENOENT);
1154 	struct nvmem_cell_lookup *lookup;
1155 	struct nvmem_device *nvmem;
1156 	const char *dev_id;
1157 
1158 	if (!dev)
1159 		return ERR_PTR(-EINVAL);
1160 
1161 	dev_id = dev_name(dev);
1162 
1163 	mutex_lock(&nvmem_lookup_mutex);
1164 
1165 	list_for_each_entry(lookup, &nvmem_lookup_list, node) {
1166 		if ((strcmp(lookup->dev_id, dev_id) == 0) &&
1167 		    (strcmp(lookup->con_id, con_id) == 0)) {
1168 			/* This is the right entry. */
1169 			nvmem = __nvmem_device_get((void *)lookup->nvmem_name,
1170 						   device_match_name);
1171 			if (IS_ERR(nvmem)) {
1172 				/* Provider may not be registered yet. */
1173 				cell = ERR_CAST(nvmem);
1174 				break;
1175 			}
1176 
1177 			cell_entry = nvmem_find_cell_entry_by_name(nvmem,
1178 								   lookup->cell_name);
1179 			if (!cell_entry) {
1180 				__nvmem_device_put(nvmem);
1181 				cell = ERR_PTR(-ENOENT);
1182 			} else {
1183 				cell = nvmem_create_cell(cell_entry, con_id);
1184 				if (IS_ERR(cell))
1185 					__nvmem_device_put(nvmem);
1186 			}
1187 			break;
1188 		}
1189 	}
1190 
1191 	mutex_unlock(&nvmem_lookup_mutex);
1192 	return cell;
1193 }
1194 
1195 #if IS_ENABLED(CONFIG_OF)
1196 static struct nvmem_cell_entry *
1197 nvmem_find_cell_entry_by_node(struct nvmem_device *nvmem, struct device_node *np)
1198 {
1199 	struct nvmem_cell_entry *iter, *cell = NULL;
1200 
1201 	mutex_lock(&nvmem_mutex);
1202 	list_for_each_entry(iter, &nvmem->cells, node) {
1203 		if (np == iter->np) {
1204 			cell = iter;
1205 			break;
1206 		}
1207 	}
1208 	mutex_unlock(&nvmem_mutex);
1209 
1210 	return cell;
1211 }
1212 
1213 /**
1214  * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
1215  *
1216  * @np: Device tree node that uses the nvmem cell.
1217  * @id: nvmem cell name from nvmem-cell-names property, or NULL
1218  *      for the cell at index 0 (the lone cell with no accompanying
1219  *      nvmem-cell-names property).
1220  *
1221  * Return: Will be an ERR_PTR() on error or a valid pointer
1222  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
1223  * nvmem_cell_put().
1224  */
1225 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
1226 {
1227 	struct device_node *cell_np, *nvmem_np;
1228 	struct nvmem_device *nvmem;
1229 	struct nvmem_cell_entry *cell_entry;
1230 	struct nvmem_cell *cell;
1231 	int index = 0;
1232 
1233 	/* if cell name exists, find index to the name */
1234 	if (id)
1235 		index = of_property_match_string(np, "nvmem-cell-names", id);
1236 
1237 	cell_np = of_parse_phandle(np, "nvmem-cells", index);
1238 	if (!cell_np)
1239 		return ERR_PTR(-ENOENT);
1240 
1241 	nvmem_np = of_get_next_parent(cell_np);
1242 	if (!nvmem_np)
1243 		return ERR_PTR(-EINVAL);
1244 
1245 	nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
1246 	of_node_put(nvmem_np);
1247 	if (IS_ERR(nvmem))
1248 		return ERR_CAST(nvmem);
1249 
1250 	cell_entry = nvmem_find_cell_entry_by_node(nvmem, cell_np);
1251 	if (!cell_entry) {
1252 		__nvmem_device_put(nvmem);
1253 		return ERR_PTR(-ENOENT);
1254 	}
1255 
1256 	cell = nvmem_create_cell(cell_entry, id);
1257 	if (IS_ERR(cell))
1258 		__nvmem_device_put(nvmem);
1259 
1260 	return cell;
1261 }
1262 EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
1263 #endif
1264 
1265 /**
1266  * nvmem_cell_get() - Get nvmem cell of device form a given cell name
1267  *
1268  * @dev: Device that requests the nvmem cell.
1269  * @id: nvmem cell name to get (this corresponds with the name from the
1270  *      nvmem-cell-names property for DT systems and with the con_id from
1271  *      the lookup entry for non-DT systems).
1272  *
1273  * Return: Will be an ERR_PTR() on error or a valid pointer
1274  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
1275  * nvmem_cell_put().
1276  */
1277 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id)
1278 {
1279 	struct nvmem_cell *cell;
1280 
1281 	if (dev->of_node) { /* try dt first */
1282 		cell = of_nvmem_cell_get(dev->of_node, id);
1283 		if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER)
1284 			return cell;
1285 	}
1286 
1287 	/* NULL cell id only allowed for device tree; invalid otherwise */
1288 	if (!id)
1289 		return ERR_PTR(-EINVAL);
1290 
1291 	return nvmem_cell_get_from_lookup(dev, id);
1292 }
1293 EXPORT_SYMBOL_GPL(nvmem_cell_get);
1294 
1295 static void devm_nvmem_cell_release(struct device *dev, void *res)
1296 {
1297 	nvmem_cell_put(*(struct nvmem_cell **)res);
1298 }
1299 
1300 /**
1301  * devm_nvmem_cell_get() - Get nvmem cell of device form a given id
1302  *
1303  * @dev: Device that requests the nvmem cell.
1304  * @id: nvmem cell name id to get.
1305  *
1306  * Return: Will be an ERR_PTR() on error or a valid pointer
1307  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
1308  * automatically once the device is freed.
1309  */
1310 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id)
1311 {
1312 	struct nvmem_cell **ptr, *cell;
1313 
1314 	ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL);
1315 	if (!ptr)
1316 		return ERR_PTR(-ENOMEM);
1317 
1318 	cell = nvmem_cell_get(dev, id);
1319 	if (!IS_ERR(cell)) {
1320 		*ptr = cell;
1321 		devres_add(dev, ptr);
1322 	} else {
1323 		devres_free(ptr);
1324 	}
1325 
1326 	return cell;
1327 }
1328 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get);
1329 
1330 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
1331 {
1332 	struct nvmem_cell **c = res;
1333 
1334 	if (WARN_ON(!c || !*c))
1335 		return 0;
1336 
1337 	return *c == data;
1338 }
1339 
1340 /**
1341  * devm_nvmem_cell_put() - Release previously allocated nvmem cell
1342  * from devm_nvmem_cell_get.
1343  *
1344  * @dev: Device that requests the nvmem cell.
1345  * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get().
1346  */
1347 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
1348 {
1349 	int ret;
1350 
1351 	ret = devres_release(dev, devm_nvmem_cell_release,
1352 				devm_nvmem_cell_match, cell);
1353 
1354 	WARN_ON(ret);
1355 }
1356 EXPORT_SYMBOL(devm_nvmem_cell_put);
1357 
1358 /**
1359  * nvmem_cell_put() - Release previously allocated nvmem cell.
1360  *
1361  * @cell: Previously allocated nvmem cell by nvmem_cell_get().
1362  */
1363 void nvmem_cell_put(struct nvmem_cell *cell)
1364 {
1365 	struct nvmem_device *nvmem = cell->entry->nvmem;
1366 
1367 	if (cell->id)
1368 		kfree_const(cell->id);
1369 
1370 	kfree(cell);
1371 	__nvmem_device_put(nvmem);
1372 }
1373 EXPORT_SYMBOL_GPL(nvmem_cell_put);
1374 
1375 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry *cell, void *buf)
1376 {
1377 	u8 *p, *b;
1378 	int i, extra, bit_offset = cell->bit_offset;
1379 
1380 	p = b = buf;
1381 	if (bit_offset) {
1382 		/* First shift */
1383 		*b++ >>= bit_offset;
1384 
1385 		/* setup rest of the bytes if any */
1386 		for (i = 1; i < cell->bytes; i++) {
1387 			/* Get bits from next byte and shift them towards msb */
1388 			*p |= *b << (BITS_PER_BYTE - bit_offset);
1389 
1390 			p = b;
1391 			*b++ >>= bit_offset;
1392 		}
1393 	} else {
1394 		/* point to the msb */
1395 		p += cell->bytes - 1;
1396 	}
1397 
1398 	/* result fits in less bytes */
1399 	extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE);
1400 	while (--extra >= 0)
1401 		*p-- = 0;
1402 
1403 	/* clear msb bits if any leftover in the last byte */
1404 	if (cell->nbits % BITS_PER_BYTE)
1405 		*p &= GENMASK((cell->nbits % BITS_PER_BYTE) - 1, 0);
1406 }
1407 
1408 static int __nvmem_cell_read(struct nvmem_device *nvmem,
1409 		      struct nvmem_cell_entry *cell,
1410 		      void *buf, size_t *len, const char *id)
1411 {
1412 	int rc;
1413 
1414 	rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes);
1415 
1416 	if (rc)
1417 		return rc;
1418 
1419 	/* shift bits in-place */
1420 	if (cell->bit_offset || cell->nbits)
1421 		nvmem_shift_read_buffer_in_place(cell, buf);
1422 
1423 	if (nvmem->cell_post_process) {
1424 		rc = nvmem->cell_post_process(nvmem->priv, id,
1425 					      cell->offset, buf, cell->bytes);
1426 		if (rc)
1427 			return rc;
1428 	}
1429 
1430 	if (len)
1431 		*len = cell->bytes;
1432 
1433 	return 0;
1434 }
1435 
1436 /**
1437  * nvmem_cell_read() - Read a given nvmem cell
1438  *
1439  * @cell: nvmem cell to be read.
1440  * @len: pointer to length of cell which will be populated on successful read;
1441  *	 can be NULL.
1442  *
1443  * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
1444  * buffer should be freed by the consumer with a kfree().
1445  */
1446 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
1447 {
1448 	struct nvmem_device *nvmem = cell->entry->nvmem;
1449 	u8 *buf;
1450 	int rc;
1451 
1452 	if (!nvmem)
1453 		return ERR_PTR(-EINVAL);
1454 
1455 	buf = kzalloc(cell->entry->bytes, GFP_KERNEL);
1456 	if (!buf)
1457 		return ERR_PTR(-ENOMEM);
1458 
1459 	rc = __nvmem_cell_read(nvmem, cell->entry, buf, len, cell->id);
1460 	if (rc) {
1461 		kfree(buf);
1462 		return ERR_PTR(rc);
1463 	}
1464 
1465 	return buf;
1466 }
1467 EXPORT_SYMBOL_GPL(nvmem_cell_read);
1468 
1469 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell_entry *cell,
1470 					     u8 *_buf, int len)
1471 {
1472 	struct nvmem_device *nvmem = cell->nvmem;
1473 	int i, rc, nbits, bit_offset = cell->bit_offset;
1474 	u8 v, *p, *buf, *b, pbyte, pbits;
1475 
1476 	nbits = cell->nbits;
1477 	buf = kzalloc(cell->bytes, GFP_KERNEL);
1478 	if (!buf)
1479 		return ERR_PTR(-ENOMEM);
1480 
1481 	memcpy(buf, _buf, len);
1482 	p = b = buf;
1483 
1484 	if (bit_offset) {
1485 		pbyte = *b;
1486 		*b <<= bit_offset;
1487 
1488 		/* setup the first byte with lsb bits from nvmem */
1489 		rc = nvmem_reg_read(nvmem, cell->offset, &v, 1);
1490 		if (rc)
1491 			goto err;
1492 		*b++ |= GENMASK(bit_offset - 1, 0) & v;
1493 
1494 		/* setup rest of the byte if any */
1495 		for (i = 1; i < cell->bytes; i++) {
1496 			/* Get last byte bits and shift them towards lsb */
1497 			pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset);
1498 			pbyte = *b;
1499 			p = b;
1500 			*b <<= bit_offset;
1501 			*b++ |= pbits;
1502 		}
1503 	}
1504 
1505 	/* if it's not end on byte boundary */
1506 	if ((nbits + bit_offset) % BITS_PER_BYTE) {
1507 		/* setup the last byte with msb bits from nvmem */
1508 		rc = nvmem_reg_read(nvmem,
1509 				    cell->offset + cell->bytes - 1, &v, 1);
1510 		if (rc)
1511 			goto err;
1512 		*p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
1513 
1514 	}
1515 
1516 	return buf;
1517 err:
1518 	kfree(buf);
1519 	return ERR_PTR(rc);
1520 }
1521 
1522 static int __nvmem_cell_entry_write(struct nvmem_cell_entry *cell, void *buf, size_t len)
1523 {
1524 	struct nvmem_device *nvmem = cell->nvmem;
1525 	int rc;
1526 
1527 	if (!nvmem || nvmem->read_only ||
1528 	    (cell->bit_offset == 0 && len != cell->bytes))
1529 		return -EINVAL;
1530 
1531 	if (cell->bit_offset || cell->nbits) {
1532 		buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
1533 		if (IS_ERR(buf))
1534 			return PTR_ERR(buf);
1535 	}
1536 
1537 	rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes);
1538 
1539 	/* free the tmp buffer */
1540 	if (cell->bit_offset || cell->nbits)
1541 		kfree(buf);
1542 
1543 	if (rc)
1544 		return rc;
1545 
1546 	return len;
1547 }
1548 
1549 /**
1550  * nvmem_cell_write() - Write to a given nvmem cell
1551  *
1552  * @cell: nvmem cell to be written.
1553  * @buf: Buffer to be written.
1554  * @len: length of buffer to be written to nvmem cell.
1555  *
1556  * Return: length of bytes written or negative on failure.
1557  */
1558 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
1559 {
1560 	return __nvmem_cell_entry_write(cell->entry, buf, len);
1561 }
1562 
1563 EXPORT_SYMBOL_GPL(nvmem_cell_write);
1564 
1565 static int nvmem_cell_read_common(struct device *dev, const char *cell_id,
1566 				  void *val, size_t count)
1567 {
1568 	struct nvmem_cell *cell;
1569 	void *buf;
1570 	size_t len;
1571 
1572 	cell = nvmem_cell_get(dev, cell_id);
1573 	if (IS_ERR(cell))
1574 		return PTR_ERR(cell);
1575 
1576 	buf = nvmem_cell_read(cell, &len);
1577 	if (IS_ERR(buf)) {
1578 		nvmem_cell_put(cell);
1579 		return PTR_ERR(buf);
1580 	}
1581 	if (len != count) {
1582 		kfree(buf);
1583 		nvmem_cell_put(cell);
1584 		return -EINVAL;
1585 	}
1586 	memcpy(val, buf, count);
1587 	kfree(buf);
1588 	nvmem_cell_put(cell);
1589 
1590 	return 0;
1591 }
1592 
1593 /**
1594  * nvmem_cell_read_u8() - Read a cell value as a u8
1595  *
1596  * @dev: Device that requests the nvmem cell.
1597  * @cell_id: Name of nvmem cell to read.
1598  * @val: pointer to output value.
1599  *
1600  * Return: 0 on success or negative errno.
1601  */
1602 int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val)
1603 {
1604 	return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1605 }
1606 EXPORT_SYMBOL_GPL(nvmem_cell_read_u8);
1607 
1608 /**
1609  * nvmem_cell_read_u16() - Read a cell value as a u16
1610  *
1611  * @dev: Device that requests the nvmem cell.
1612  * @cell_id: Name of nvmem cell to read.
1613  * @val: pointer to output value.
1614  *
1615  * Return: 0 on success or negative errno.
1616  */
1617 int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val)
1618 {
1619 	return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1620 }
1621 EXPORT_SYMBOL_GPL(nvmem_cell_read_u16);
1622 
1623 /**
1624  * nvmem_cell_read_u32() - Read a cell value as a u32
1625  *
1626  * @dev: Device that requests the nvmem cell.
1627  * @cell_id: Name of nvmem cell to read.
1628  * @val: pointer to output value.
1629  *
1630  * Return: 0 on success or negative errno.
1631  */
1632 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val)
1633 {
1634 	return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1635 }
1636 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32);
1637 
1638 /**
1639  * nvmem_cell_read_u64() - Read a cell value as a u64
1640  *
1641  * @dev: Device that requests the nvmem cell.
1642  * @cell_id: Name of nvmem cell to read.
1643  * @val: pointer to output value.
1644  *
1645  * Return: 0 on success or negative errno.
1646  */
1647 int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val)
1648 {
1649 	return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1650 }
1651 EXPORT_SYMBOL_GPL(nvmem_cell_read_u64);
1652 
1653 static const void *nvmem_cell_read_variable_common(struct device *dev,
1654 						   const char *cell_id,
1655 						   size_t max_len, size_t *len)
1656 {
1657 	struct nvmem_cell *cell;
1658 	int nbits;
1659 	void *buf;
1660 
1661 	cell = nvmem_cell_get(dev, cell_id);
1662 	if (IS_ERR(cell))
1663 		return cell;
1664 
1665 	nbits = cell->entry->nbits;
1666 	buf = nvmem_cell_read(cell, len);
1667 	nvmem_cell_put(cell);
1668 	if (IS_ERR(buf))
1669 		return buf;
1670 
1671 	/*
1672 	 * If nbits is set then nvmem_cell_read() can significantly exaggerate
1673 	 * the length of the real data. Throw away the extra junk.
1674 	 */
1675 	if (nbits)
1676 		*len = DIV_ROUND_UP(nbits, 8);
1677 
1678 	if (*len > max_len) {
1679 		kfree(buf);
1680 		return ERR_PTR(-ERANGE);
1681 	}
1682 
1683 	return buf;
1684 }
1685 
1686 /**
1687  * nvmem_cell_read_variable_le_u32() - Read up to 32-bits of data as a little endian number.
1688  *
1689  * @dev: Device that requests the nvmem cell.
1690  * @cell_id: Name of nvmem cell to read.
1691  * @val: pointer to output value.
1692  *
1693  * Return: 0 on success or negative errno.
1694  */
1695 int nvmem_cell_read_variable_le_u32(struct device *dev, const char *cell_id,
1696 				    u32 *val)
1697 {
1698 	size_t len;
1699 	const u8 *buf;
1700 	int i;
1701 
1702 	buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len);
1703 	if (IS_ERR(buf))
1704 		return PTR_ERR(buf);
1705 
1706 	/* Copy w/ implicit endian conversion */
1707 	*val = 0;
1708 	for (i = 0; i < len; i++)
1709 		*val |= buf[i] << (8 * i);
1710 
1711 	kfree(buf);
1712 
1713 	return 0;
1714 }
1715 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u32);
1716 
1717 /**
1718  * nvmem_cell_read_variable_le_u64() - Read up to 64-bits of data as a little endian number.
1719  *
1720  * @dev: Device that requests the nvmem cell.
1721  * @cell_id: Name of nvmem cell to read.
1722  * @val: pointer to output value.
1723  *
1724  * Return: 0 on success or negative errno.
1725  */
1726 int nvmem_cell_read_variable_le_u64(struct device *dev, const char *cell_id,
1727 				    u64 *val)
1728 {
1729 	size_t len;
1730 	const u8 *buf;
1731 	int i;
1732 
1733 	buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len);
1734 	if (IS_ERR(buf))
1735 		return PTR_ERR(buf);
1736 
1737 	/* Copy w/ implicit endian conversion */
1738 	*val = 0;
1739 	for (i = 0; i < len; i++)
1740 		*val |= (uint64_t)buf[i] << (8 * i);
1741 
1742 	kfree(buf);
1743 
1744 	return 0;
1745 }
1746 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u64);
1747 
1748 /**
1749  * nvmem_device_cell_read() - Read a given nvmem device and cell
1750  *
1751  * @nvmem: nvmem device to read from.
1752  * @info: nvmem cell info to be read.
1753  * @buf: buffer pointer which will be populated on successful read.
1754  *
1755  * Return: length of successful bytes read on success and negative
1756  * error code on error.
1757  */
1758 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
1759 			   struct nvmem_cell_info *info, void *buf)
1760 {
1761 	struct nvmem_cell_entry cell;
1762 	int rc;
1763 	ssize_t len;
1764 
1765 	if (!nvmem)
1766 		return -EINVAL;
1767 
1768 	rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell);
1769 	if (rc)
1770 		return rc;
1771 
1772 	rc = __nvmem_cell_read(nvmem, &cell, buf, &len, NULL);
1773 	if (rc)
1774 		return rc;
1775 
1776 	return len;
1777 }
1778 EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
1779 
1780 /**
1781  * nvmem_device_cell_write() - Write cell to a given nvmem device
1782  *
1783  * @nvmem: nvmem device to be written to.
1784  * @info: nvmem cell info to be written.
1785  * @buf: buffer to be written to cell.
1786  *
1787  * Return: length of bytes written or negative error code on failure.
1788  */
1789 int nvmem_device_cell_write(struct nvmem_device *nvmem,
1790 			    struct nvmem_cell_info *info, void *buf)
1791 {
1792 	struct nvmem_cell_entry cell;
1793 	int rc;
1794 
1795 	if (!nvmem)
1796 		return -EINVAL;
1797 
1798 	rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell);
1799 	if (rc)
1800 		return rc;
1801 
1802 	return __nvmem_cell_entry_write(&cell, buf, cell.bytes);
1803 }
1804 EXPORT_SYMBOL_GPL(nvmem_device_cell_write);
1805 
1806 /**
1807  * nvmem_device_read() - Read from a given nvmem device
1808  *
1809  * @nvmem: nvmem device to read from.
1810  * @offset: offset in nvmem device.
1811  * @bytes: number of bytes to read.
1812  * @buf: buffer pointer which will be populated on successful read.
1813  *
1814  * Return: length of successful bytes read on success and negative
1815  * error code on error.
1816  */
1817 int nvmem_device_read(struct nvmem_device *nvmem,
1818 		      unsigned int offset,
1819 		      size_t bytes, void *buf)
1820 {
1821 	int rc;
1822 
1823 	if (!nvmem)
1824 		return -EINVAL;
1825 
1826 	rc = nvmem_reg_read(nvmem, offset, buf, bytes);
1827 
1828 	if (rc)
1829 		return rc;
1830 
1831 	return bytes;
1832 }
1833 EXPORT_SYMBOL_GPL(nvmem_device_read);
1834 
1835 /**
1836  * nvmem_device_write() - Write cell to a given nvmem device
1837  *
1838  * @nvmem: nvmem device to be written to.
1839  * @offset: offset in nvmem device.
1840  * @bytes: number of bytes to write.
1841  * @buf: buffer to be written.
1842  *
1843  * Return: length of bytes written or negative error code on failure.
1844  */
1845 int nvmem_device_write(struct nvmem_device *nvmem,
1846 		       unsigned int offset,
1847 		       size_t bytes, void *buf)
1848 {
1849 	int rc;
1850 
1851 	if (!nvmem)
1852 		return -EINVAL;
1853 
1854 	rc = nvmem_reg_write(nvmem, offset, buf, bytes);
1855 
1856 	if (rc)
1857 		return rc;
1858 
1859 
1860 	return bytes;
1861 }
1862 EXPORT_SYMBOL_GPL(nvmem_device_write);
1863 
1864 /**
1865  * nvmem_add_cell_table() - register a table of cell info entries
1866  *
1867  * @table: table of cell info entries
1868  */
1869 void nvmem_add_cell_table(struct nvmem_cell_table *table)
1870 {
1871 	mutex_lock(&nvmem_cell_mutex);
1872 	list_add_tail(&table->node, &nvmem_cell_tables);
1873 	mutex_unlock(&nvmem_cell_mutex);
1874 }
1875 EXPORT_SYMBOL_GPL(nvmem_add_cell_table);
1876 
1877 /**
1878  * nvmem_del_cell_table() - remove a previously registered cell info table
1879  *
1880  * @table: table of cell info entries
1881  */
1882 void nvmem_del_cell_table(struct nvmem_cell_table *table)
1883 {
1884 	mutex_lock(&nvmem_cell_mutex);
1885 	list_del(&table->node);
1886 	mutex_unlock(&nvmem_cell_mutex);
1887 }
1888 EXPORT_SYMBOL_GPL(nvmem_del_cell_table);
1889 
1890 /**
1891  * nvmem_add_cell_lookups() - register a list of cell lookup entries
1892  *
1893  * @entries: array of cell lookup entries
1894  * @nentries: number of cell lookup entries in the array
1895  */
1896 void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1897 {
1898 	int i;
1899 
1900 	mutex_lock(&nvmem_lookup_mutex);
1901 	for (i = 0; i < nentries; i++)
1902 		list_add_tail(&entries[i].node, &nvmem_lookup_list);
1903 	mutex_unlock(&nvmem_lookup_mutex);
1904 }
1905 EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups);
1906 
1907 /**
1908  * nvmem_del_cell_lookups() - remove a list of previously added cell lookup
1909  *                            entries
1910  *
1911  * @entries: array of cell lookup entries
1912  * @nentries: number of cell lookup entries in the array
1913  */
1914 void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1915 {
1916 	int i;
1917 
1918 	mutex_lock(&nvmem_lookup_mutex);
1919 	for (i = 0; i < nentries; i++)
1920 		list_del(&entries[i].node);
1921 	mutex_unlock(&nvmem_lookup_mutex);
1922 }
1923 EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups);
1924 
1925 /**
1926  * nvmem_dev_name() - Get the name of a given nvmem device.
1927  *
1928  * @nvmem: nvmem device.
1929  *
1930  * Return: name of the nvmem device.
1931  */
1932 const char *nvmem_dev_name(struct nvmem_device *nvmem)
1933 {
1934 	return dev_name(&nvmem->dev);
1935 }
1936 EXPORT_SYMBOL_GPL(nvmem_dev_name);
1937 
1938 static int __init nvmem_init(void)
1939 {
1940 	return bus_register(&nvmem_bus_type);
1941 }
1942 
1943 static void __exit nvmem_exit(void)
1944 {
1945 	bus_unregister(&nvmem_bus_type);
1946 }
1947 
1948 subsys_initcall(nvmem_init);
1949 module_exit(nvmem_exit);
1950 
1951 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
1952 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
1953 MODULE_DESCRIPTION("nvmem Driver Core");
1954 MODULE_LICENSE("GPL v2");
1955