xref: /openbmc/linux/drivers/nvmem/core.c (revision 05cf4fe738242183f1237f1b3a28b4479348c0a1)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * nvmem framework core.
4  *
5  * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
6  * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
7  */
8 
9 #include <linux/device.h>
10 #include <linux/export.h>
11 #include <linux/fs.h>
12 #include <linux/idr.h>
13 #include <linux/init.h>
14 #include <linux/kref.h>
15 #include <linux/module.h>
16 #include <linux/nvmem-consumer.h>
17 #include <linux/nvmem-provider.h>
18 #include <linux/of.h>
19 #include <linux/slab.h>
20 
21 struct nvmem_device {
22 	struct module		*owner;
23 	struct device		dev;
24 	int			stride;
25 	int			word_size;
26 	int			id;
27 	struct kref		refcnt;
28 	size_t			size;
29 	bool			read_only;
30 	int			flags;
31 	struct bin_attribute	eeprom;
32 	struct device		*base_dev;
33 	struct list_head	cells;
34 	nvmem_reg_read_t	reg_read;
35 	nvmem_reg_write_t	reg_write;
36 	void *priv;
37 };
38 
39 #define FLAG_COMPAT		BIT(0)
40 
41 struct nvmem_cell {
42 	const char		*name;
43 	int			offset;
44 	int			bytes;
45 	int			bit_offset;
46 	int			nbits;
47 	struct device_node	*np;
48 	struct nvmem_device	*nvmem;
49 	struct list_head	node;
50 };
51 
52 static DEFINE_MUTEX(nvmem_mutex);
53 static DEFINE_IDA(nvmem_ida);
54 
55 static DEFINE_MUTEX(nvmem_cell_mutex);
56 static LIST_HEAD(nvmem_cell_tables);
57 
58 static DEFINE_MUTEX(nvmem_lookup_mutex);
59 static LIST_HEAD(nvmem_lookup_list);
60 
61 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
62 
63 #ifdef CONFIG_DEBUG_LOCK_ALLOC
64 static struct lock_class_key eeprom_lock_key;
65 #endif
66 
67 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
68 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
69 			  void *val, size_t bytes)
70 {
71 	if (nvmem->reg_read)
72 		return nvmem->reg_read(nvmem->priv, offset, val, bytes);
73 
74 	return -EINVAL;
75 }
76 
77 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
78 			   void *val, size_t bytes)
79 {
80 	if (nvmem->reg_write)
81 		return nvmem->reg_write(nvmem->priv, offset, val, bytes);
82 
83 	return -EINVAL;
84 }
85 
86 static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
87 				    struct bin_attribute *attr,
88 				    char *buf, loff_t pos, size_t count)
89 {
90 	struct device *dev;
91 	struct nvmem_device *nvmem;
92 	int rc;
93 
94 	if (attr->private)
95 		dev = attr->private;
96 	else
97 		dev = container_of(kobj, struct device, kobj);
98 	nvmem = to_nvmem_device(dev);
99 
100 	/* Stop the user from reading */
101 	if (pos >= nvmem->size)
102 		return 0;
103 
104 	if (count < nvmem->word_size)
105 		return -EINVAL;
106 
107 	if (pos + count > nvmem->size)
108 		count = nvmem->size - pos;
109 
110 	count = round_down(count, nvmem->word_size);
111 
112 	rc = nvmem_reg_read(nvmem, pos, buf, count);
113 
114 	if (rc)
115 		return rc;
116 
117 	return count;
118 }
119 
120 static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
121 				     struct bin_attribute *attr,
122 				     char *buf, loff_t pos, size_t count)
123 {
124 	struct device *dev;
125 	struct nvmem_device *nvmem;
126 	int rc;
127 
128 	if (attr->private)
129 		dev = attr->private;
130 	else
131 		dev = container_of(kobj, struct device, kobj);
132 	nvmem = to_nvmem_device(dev);
133 
134 	/* Stop the user from writing */
135 	if (pos >= nvmem->size)
136 		return -EFBIG;
137 
138 	if (count < nvmem->word_size)
139 		return -EINVAL;
140 
141 	if (pos + count > nvmem->size)
142 		count = nvmem->size - pos;
143 
144 	count = round_down(count, nvmem->word_size);
145 
146 	rc = nvmem_reg_write(nvmem, pos, buf, count);
147 
148 	if (rc)
149 		return rc;
150 
151 	return count;
152 }
153 
154 /* default read/write permissions */
155 static struct bin_attribute bin_attr_rw_nvmem = {
156 	.attr	= {
157 		.name	= "nvmem",
158 		.mode	= 0644,
159 	},
160 	.read	= bin_attr_nvmem_read,
161 	.write	= bin_attr_nvmem_write,
162 };
163 
164 static struct bin_attribute *nvmem_bin_rw_attributes[] = {
165 	&bin_attr_rw_nvmem,
166 	NULL,
167 };
168 
169 static const struct attribute_group nvmem_bin_rw_group = {
170 	.bin_attrs	= nvmem_bin_rw_attributes,
171 };
172 
173 static const struct attribute_group *nvmem_rw_dev_groups[] = {
174 	&nvmem_bin_rw_group,
175 	NULL,
176 };
177 
178 /* read only permission */
179 static struct bin_attribute bin_attr_ro_nvmem = {
180 	.attr	= {
181 		.name	= "nvmem",
182 		.mode	= 0444,
183 	},
184 	.read	= bin_attr_nvmem_read,
185 };
186 
187 static struct bin_attribute *nvmem_bin_ro_attributes[] = {
188 	&bin_attr_ro_nvmem,
189 	NULL,
190 };
191 
192 static const struct attribute_group nvmem_bin_ro_group = {
193 	.bin_attrs	= nvmem_bin_ro_attributes,
194 };
195 
196 static const struct attribute_group *nvmem_ro_dev_groups[] = {
197 	&nvmem_bin_ro_group,
198 	NULL,
199 };
200 
201 /* default read/write permissions, root only */
202 static struct bin_attribute bin_attr_rw_root_nvmem = {
203 	.attr	= {
204 		.name	= "nvmem",
205 		.mode	= 0600,
206 	},
207 	.read	= bin_attr_nvmem_read,
208 	.write	= bin_attr_nvmem_write,
209 };
210 
211 static struct bin_attribute *nvmem_bin_rw_root_attributes[] = {
212 	&bin_attr_rw_root_nvmem,
213 	NULL,
214 };
215 
216 static const struct attribute_group nvmem_bin_rw_root_group = {
217 	.bin_attrs	= nvmem_bin_rw_root_attributes,
218 };
219 
220 static const struct attribute_group *nvmem_rw_root_dev_groups[] = {
221 	&nvmem_bin_rw_root_group,
222 	NULL,
223 };
224 
225 /* read only permission, root only */
226 static struct bin_attribute bin_attr_ro_root_nvmem = {
227 	.attr	= {
228 		.name	= "nvmem",
229 		.mode	= 0400,
230 	},
231 	.read	= bin_attr_nvmem_read,
232 };
233 
234 static struct bin_attribute *nvmem_bin_ro_root_attributes[] = {
235 	&bin_attr_ro_root_nvmem,
236 	NULL,
237 };
238 
239 static const struct attribute_group nvmem_bin_ro_root_group = {
240 	.bin_attrs	= nvmem_bin_ro_root_attributes,
241 };
242 
243 static const struct attribute_group *nvmem_ro_root_dev_groups[] = {
244 	&nvmem_bin_ro_root_group,
245 	NULL,
246 };
247 
248 static void nvmem_release(struct device *dev)
249 {
250 	struct nvmem_device *nvmem = to_nvmem_device(dev);
251 
252 	ida_simple_remove(&nvmem_ida, nvmem->id);
253 	kfree(nvmem);
254 }
255 
256 static const struct device_type nvmem_provider_type = {
257 	.release	= nvmem_release,
258 };
259 
260 static struct bus_type nvmem_bus_type = {
261 	.name		= "nvmem",
262 };
263 
264 static int of_nvmem_match(struct device *dev, void *nvmem_np)
265 {
266 	return dev->of_node == nvmem_np;
267 }
268 
269 static struct nvmem_device *of_nvmem_find(struct device_node *nvmem_np)
270 {
271 	struct device *d;
272 
273 	if (!nvmem_np)
274 		return NULL;
275 
276 	d = bus_find_device(&nvmem_bus_type, NULL, nvmem_np, of_nvmem_match);
277 
278 	if (!d)
279 		return NULL;
280 
281 	return to_nvmem_device(d);
282 }
283 
284 static struct nvmem_device *nvmem_find(const char *name)
285 {
286 	struct device *d;
287 
288 	d = bus_find_device_by_name(&nvmem_bus_type, NULL, name);
289 
290 	if (!d)
291 		return NULL;
292 
293 	return to_nvmem_device(d);
294 }
295 
296 static void nvmem_cell_drop(struct nvmem_cell *cell)
297 {
298 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell);
299 	mutex_lock(&nvmem_mutex);
300 	list_del(&cell->node);
301 	mutex_unlock(&nvmem_mutex);
302 	of_node_put(cell->np);
303 	kfree(cell->name);
304 	kfree(cell);
305 }
306 
307 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
308 {
309 	struct nvmem_cell *cell, *p;
310 
311 	list_for_each_entry_safe(cell, p, &nvmem->cells, node)
312 		nvmem_cell_drop(cell);
313 }
314 
315 static void nvmem_cell_add(struct nvmem_cell *cell)
316 {
317 	mutex_lock(&nvmem_mutex);
318 	list_add_tail(&cell->node, &cell->nvmem->cells);
319 	mutex_unlock(&nvmem_mutex);
320 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell);
321 }
322 
323 static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
324 				   const struct nvmem_cell_info *info,
325 				   struct nvmem_cell *cell)
326 {
327 	cell->nvmem = nvmem;
328 	cell->offset = info->offset;
329 	cell->bytes = info->bytes;
330 	cell->name = info->name;
331 
332 	cell->bit_offset = info->bit_offset;
333 	cell->nbits = info->nbits;
334 
335 	if (cell->nbits)
336 		cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
337 					   BITS_PER_BYTE);
338 
339 	if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
340 		dev_err(&nvmem->dev,
341 			"cell %s unaligned to nvmem stride %d\n",
342 			cell->name, nvmem->stride);
343 		return -EINVAL;
344 	}
345 
346 	return 0;
347 }
348 
349 /**
350  * nvmem_add_cells() - Add cell information to an nvmem device
351  *
352  * @nvmem: nvmem device to add cells to.
353  * @info: nvmem cell info to add to the device
354  * @ncells: number of cells in info
355  *
356  * Return: 0 or negative error code on failure.
357  */
358 static int nvmem_add_cells(struct nvmem_device *nvmem,
359 		    const struct nvmem_cell_info *info,
360 		    int ncells)
361 {
362 	struct nvmem_cell **cells;
363 	int i, rval;
364 
365 	cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL);
366 	if (!cells)
367 		return -ENOMEM;
368 
369 	for (i = 0; i < ncells; i++) {
370 		cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL);
371 		if (!cells[i]) {
372 			rval = -ENOMEM;
373 			goto err;
374 		}
375 
376 		rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]);
377 		if (rval) {
378 			kfree(cells[i]);
379 			goto err;
380 		}
381 
382 		nvmem_cell_add(cells[i]);
383 	}
384 
385 	/* remove tmp array */
386 	kfree(cells);
387 
388 	return 0;
389 err:
390 	while (i--)
391 		nvmem_cell_drop(cells[i]);
392 
393 	kfree(cells);
394 
395 	return rval;
396 }
397 
398 /*
399  * nvmem_setup_compat() - Create an additional binary entry in
400  * drivers sys directory, to be backwards compatible with the older
401  * drivers/misc/eeprom drivers.
402  */
403 static int nvmem_setup_compat(struct nvmem_device *nvmem,
404 			      const struct nvmem_config *config)
405 {
406 	int rval;
407 
408 	if (!config->base_dev)
409 		return -EINVAL;
410 
411 	if (nvmem->read_only)
412 		nvmem->eeprom = bin_attr_ro_root_nvmem;
413 	else
414 		nvmem->eeprom = bin_attr_rw_root_nvmem;
415 	nvmem->eeprom.attr.name = "eeprom";
416 	nvmem->eeprom.size = nvmem->size;
417 #ifdef CONFIG_DEBUG_LOCK_ALLOC
418 	nvmem->eeprom.attr.key = &eeprom_lock_key;
419 #endif
420 	nvmem->eeprom.private = &nvmem->dev;
421 	nvmem->base_dev = config->base_dev;
422 
423 	rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom);
424 	if (rval) {
425 		dev_err(&nvmem->dev,
426 			"Failed to create eeprom binary file %d\n", rval);
427 		return rval;
428 	}
429 
430 	nvmem->flags |= FLAG_COMPAT;
431 
432 	return 0;
433 }
434 
435 /**
436  * nvmem_register_notifier() - Register a notifier block for nvmem events.
437  *
438  * @nb: notifier block to be called on nvmem events.
439  *
440  * Return: 0 on success, negative error number on failure.
441  */
442 int nvmem_register_notifier(struct notifier_block *nb)
443 {
444 	return blocking_notifier_chain_register(&nvmem_notifier, nb);
445 }
446 EXPORT_SYMBOL_GPL(nvmem_register_notifier);
447 
448 /**
449  * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events.
450  *
451  * @nb: notifier block to be unregistered.
452  *
453  * Return: 0 on success, negative error number on failure.
454  */
455 int nvmem_unregister_notifier(struct notifier_block *nb)
456 {
457 	return blocking_notifier_chain_unregister(&nvmem_notifier, nb);
458 }
459 EXPORT_SYMBOL_GPL(nvmem_unregister_notifier);
460 
461 static int nvmem_add_cells_from_table(struct nvmem_device *nvmem)
462 {
463 	const struct nvmem_cell_info *info;
464 	struct nvmem_cell_table *table;
465 	struct nvmem_cell *cell;
466 	int rval = 0, i;
467 
468 	mutex_lock(&nvmem_cell_mutex);
469 	list_for_each_entry(table, &nvmem_cell_tables, node) {
470 		if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) {
471 			for (i = 0; i < table->ncells; i++) {
472 				info = &table->cells[i];
473 
474 				cell = kzalloc(sizeof(*cell), GFP_KERNEL);
475 				if (!cell) {
476 					rval = -ENOMEM;
477 					goto out;
478 				}
479 
480 				rval = nvmem_cell_info_to_nvmem_cell(nvmem,
481 								     info,
482 								     cell);
483 				if (rval) {
484 					kfree(cell);
485 					goto out;
486 				}
487 
488 				nvmem_cell_add(cell);
489 			}
490 		}
491 	}
492 
493 out:
494 	mutex_unlock(&nvmem_cell_mutex);
495 	return rval;
496 }
497 
498 static struct nvmem_cell *
499 nvmem_find_cell_by_name(struct nvmem_device *nvmem, const char *cell_id)
500 {
501 	struct nvmem_cell *cell = NULL;
502 
503 	mutex_lock(&nvmem_mutex);
504 	list_for_each_entry(cell, &nvmem->cells, node) {
505 		if (strcmp(cell_id, cell->name) == 0)
506 			break;
507 	}
508 	mutex_unlock(&nvmem_mutex);
509 
510 	return cell;
511 }
512 
513 static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
514 {
515 	struct device_node *parent, *child;
516 	struct device *dev = &nvmem->dev;
517 	struct nvmem_cell *cell;
518 	const __be32 *addr;
519 	int len;
520 
521 	parent = dev->of_node;
522 
523 	for_each_child_of_node(parent, child) {
524 		addr = of_get_property(child, "reg", &len);
525 		if (!addr || (len < 2 * sizeof(u32))) {
526 			dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
527 			return -EINVAL;
528 		}
529 
530 		cell = kzalloc(sizeof(*cell), GFP_KERNEL);
531 		if (!cell)
532 			return -ENOMEM;
533 
534 		cell->nvmem = nvmem;
535 		cell->np = of_node_get(child);
536 		cell->offset = be32_to_cpup(addr++);
537 		cell->bytes = be32_to_cpup(addr);
538 		cell->name = kasprintf(GFP_KERNEL, "%pOFn", child);
539 
540 		addr = of_get_property(child, "bits", &len);
541 		if (addr && len == (2 * sizeof(u32))) {
542 			cell->bit_offset = be32_to_cpup(addr++);
543 			cell->nbits = be32_to_cpup(addr);
544 		}
545 
546 		if (cell->nbits)
547 			cell->bytes = DIV_ROUND_UP(
548 					cell->nbits + cell->bit_offset,
549 					BITS_PER_BYTE);
550 
551 		if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
552 			dev_err(dev, "cell %s unaligned to nvmem stride %d\n",
553 				cell->name, nvmem->stride);
554 			/* Cells already added will be freed later. */
555 			kfree(cell->name);
556 			kfree(cell);
557 			return -EINVAL;
558 		}
559 
560 		nvmem_cell_add(cell);
561 	}
562 
563 	return 0;
564 }
565 
566 /**
567  * nvmem_register() - Register a nvmem device for given nvmem_config.
568  * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
569  *
570  * @config: nvmem device configuration with which nvmem device is created.
571  *
572  * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
573  * on success.
574  */
575 
576 struct nvmem_device *nvmem_register(const struct nvmem_config *config)
577 {
578 	struct nvmem_device *nvmem;
579 	int rval;
580 
581 	if (!config->dev)
582 		return ERR_PTR(-EINVAL);
583 
584 	nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL);
585 	if (!nvmem)
586 		return ERR_PTR(-ENOMEM);
587 
588 	rval  = ida_simple_get(&nvmem_ida, 0, 0, GFP_KERNEL);
589 	if (rval < 0) {
590 		kfree(nvmem);
591 		return ERR_PTR(rval);
592 	}
593 
594 	kref_init(&nvmem->refcnt);
595 	INIT_LIST_HEAD(&nvmem->cells);
596 
597 	nvmem->id = rval;
598 	nvmem->owner = config->owner;
599 	if (!nvmem->owner && config->dev->driver)
600 		nvmem->owner = config->dev->driver->owner;
601 	nvmem->stride = config->stride ?: 1;
602 	nvmem->word_size = config->word_size ?: 1;
603 	nvmem->size = config->size;
604 	nvmem->dev.type = &nvmem_provider_type;
605 	nvmem->dev.bus = &nvmem_bus_type;
606 	nvmem->dev.parent = config->dev;
607 	nvmem->priv = config->priv;
608 	nvmem->reg_read = config->reg_read;
609 	nvmem->reg_write = config->reg_write;
610 	nvmem->dev.of_node = config->dev->of_node;
611 
612 	if (config->id == -1 && config->name) {
613 		dev_set_name(&nvmem->dev, "%s", config->name);
614 	} else {
615 		dev_set_name(&nvmem->dev, "%s%d",
616 			     config->name ? : "nvmem",
617 			     config->name ? config->id : nvmem->id);
618 	}
619 
620 	nvmem->read_only = device_property_present(config->dev, "read-only") |
621 			   config->read_only;
622 
623 	if (config->root_only)
624 		nvmem->dev.groups = nvmem->read_only ?
625 			nvmem_ro_root_dev_groups :
626 			nvmem_rw_root_dev_groups;
627 	else
628 		nvmem->dev.groups = nvmem->read_only ?
629 			nvmem_ro_dev_groups :
630 			nvmem_rw_dev_groups;
631 
632 	device_initialize(&nvmem->dev);
633 
634 	dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
635 
636 	rval = device_add(&nvmem->dev);
637 	if (rval)
638 		goto err_put_device;
639 
640 	if (config->compat) {
641 		rval = nvmem_setup_compat(nvmem, config);
642 		if (rval)
643 			goto err_device_del;
644 	}
645 
646 	if (config->cells) {
647 		rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
648 		if (rval)
649 			goto err_teardown_compat;
650 	}
651 
652 	rval = nvmem_add_cells_from_table(nvmem);
653 	if (rval)
654 		goto err_remove_cells;
655 
656 	rval = nvmem_add_cells_from_of(nvmem);
657 	if (rval)
658 		goto err_remove_cells;
659 
660 	rval = blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
661 	if (rval)
662 		goto err_remove_cells;
663 
664 	return nvmem;
665 
666 err_remove_cells:
667 	nvmem_device_remove_all_cells(nvmem);
668 err_teardown_compat:
669 	if (config->compat)
670 		device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
671 err_device_del:
672 	device_del(&nvmem->dev);
673 err_put_device:
674 	put_device(&nvmem->dev);
675 
676 	return ERR_PTR(rval);
677 }
678 EXPORT_SYMBOL_GPL(nvmem_register);
679 
680 static void nvmem_device_release(struct kref *kref)
681 {
682 	struct nvmem_device *nvmem;
683 
684 	nvmem = container_of(kref, struct nvmem_device, refcnt);
685 
686 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem);
687 
688 	if (nvmem->flags & FLAG_COMPAT)
689 		device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
690 
691 	nvmem_device_remove_all_cells(nvmem);
692 	device_del(&nvmem->dev);
693 	put_device(&nvmem->dev);
694 }
695 
696 /**
697  * nvmem_unregister() - Unregister previously registered nvmem device
698  *
699  * @nvmem: Pointer to previously registered nvmem device.
700  */
701 void nvmem_unregister(struct nvmem_device *nvmem)
702 {
703 	kref_put(&nvmem->refcnt, nvmem_device_release);
704 }
705 EXPORT_SYMBOL_GPL(nvmem_unregister);
706 
707 static void devm_nvmem_release(struct device *dev, void *res)
708 {
709 	nvmem_unregister(*(struct nvmem_device **)res);
710 }
711 
712 /**
713  * devm_nvmem_register() - Register a managed nvmem device for given
714  * nvmem_config.
715  * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
716  *
717  * @dev: Device that uses the nvmem device.
718  * @config: nvmem device configuration with which nvmem device is created.
719  *
720  * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
721  * on success.
722  */
723 struct nvmem_device *devm_nvmem_register(struct device *dev,
724 					 const struct nvmem_config *config)
725 {
726 	struct nvmem_device **ptr, *nvmem;
727 
728 	ptr = devres_alloc(devm_nvmem_release, sizeof(*ptr), GFP_KERNEL);
729 	if (!ptr)
730 		return ERR_PTR(-ENOMEM);
731 
732 	nvmem = nvmem_register(config);
733 
734 	if (!IS_ERR(nvmem)) {
735 		*ptr = nvmem;
736 		devres_add(dev, ptr);
737 	} else {
738 		devres_free(ptr);
739 	}
740 
741 	return nvmem;
742 }
743 EXPORT_SYMBOL_GPL(devm_nvmem_register);
744 
745 static int devm_nvmem_match(struct device *dev, void *res, void *data)
746 {
747 	struct nvmem_device **r = res;
748 
749 	return *r == data;
750 }
751 
752 /**
753  * devm_nvmem_unregister() - Unregister previously registered managed nvmem
754  * device.
755  *
756  * @dev: Device that uses the nvmem device.
757  * @nvmem: Pointer to previously registered nvmem device.
758  *
759  * Return: Will be an negative on error or a zero on success.
760  */
761 int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem)
762 {
763 	return devres_release(dev, devm_nvmem_release, devm_nvmem_match, nvmem);
764 }
765 EXPORT_SYMBOL(devm_nvmem_unregister);
766 
767 static struct nvmem_device *__nvmem_device_get(struct device_node *np,
768 					       const char *nvmem_name)
769 {
770 	struct nvmem_device *nvmem = NULL;
771 
772 	mutex_lock(&nvmem_mutex);
773 	nvmem = np ? of_nvmem_find(np) : nvmem_find(nvmem_name);
774 	mutex_unlock(&nvmem_mutex);
775 	if (!nvmem)
776 		return ERR_PTR(-EPROBE_DEFER);
777 
778 	if (!try_module_get(nvmem->owner)) {
779 		dev_err(&nvmem->dev,
780 			"could not increase module refcount for cell %s\n",
781 			nvmem_dev_name(nvmem));
782 
783 		return ERR_PTR(-EINVAL);
784 	}
785 
786 	kref_get(&nvmem->refcnt);
787 
788 	return nvmem;
789 }
790 
791 static void __nvmem_device_put(struct nvmem_device *nvmem)
792 {
793 	module_put(nvmem->owner);
794 	kref_put(&nvmem->refcnt, nvmem_device_release);
795 }
796 
797 #if IS_ENABLED(CONFIG_OF)
798 /**
799  * of_nvmem_device_get() - Get nvmem device from a given id
800  *
801  * @np: Device tree node that uses the nvmem device.
802  * @id: nvmem name from nvmem-names property.
803  *
804  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
805  * on success.
806  */
807 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
808 {
809 
810 	struct device_node *nvmem_np;
811 	int index;
812 
813 	index = of_property_match_string(np, "nvmem-names", id);
814 
815 	nvmem_np = of_parse_phandle(np, "nvmem", index);
816 	if (!nvmem_np)
817 		return ERR_PTR(-EINVAL);
818 
819 	return __nvmem_device_get(nvmem_np, NULL);
820 }
821 EXPORT_SYMBOL_GPL(of_nvmem_device_get);
822 #endif
823 
824 /**
825  * nvmem_device_get() - Get nvmem device from a given id
826  *
827  * @dev: Device that uses the nvmem device.
828  * @dev_name: name of the requested nvmem device.
829  *
830  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
831  * on success.
832  */
833 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
834 {
835 	if (dev->of_node) { /* try dt first */
836 		struct nvmem_device *nvmem;
837 
838 		nvmem = of_nvmem_device_get(dev->of_node, dev_name);
839 
840 		if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER)
841 			return nvmem;
842 
843 	}
844 
845 	return nvmem_find(dev_name);
846 }
847 EXPORT_SYMBOL_GPL(nvmem_device_get);
848 
849 static int devm_nvmem_device_match(struct device *dev, void *res, void *data)
850 {
851 	struct nvmem_device **nvmem = res;
852 
853 	if (WARN_ON(!nvmem || !*nvmem))
854 		return 0;
855 
856 	return *nvmem == data;
857 }
858 
859 static void devm_nvmem_device_release(struct device *dev, void *res)
860 {
861 	nvmem_device_put(*(struct nvmem_device **)res);
862 }
863 
864 /**
865  * devm_nvmem_device_put() - put alredy got nvmem device
866  *
867  * @dev: Device that uses the nvmem device.
868  * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
869  * that needs to be released.
870  */
871 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem)
872 {
873 	int ret;
874 
875 	ret = devres_release(dev, devm_nvmem_device_release,
876 			     devm_nvmem_device_match, nvmem);
877 
878 	WARN_ON(ret);
879 }
880 EXPORT_SYMBOL_GPL(devm_nvmem_device_put);
881 
882 /**
883  * nvmem_device_put() - put alredy got nvmem device
884  *
885  * @nvmem: pointer to nvmem device that needs to be released.
886  */
887 void nvmem_device_put(struct nvmem_device *nvmem)
888 {
889 	__nvmem_device_put(nvmem);
890 }
891 EXPORT_SYMBOL_GPL(nvmem_device_put);
892 
893 /**
894  * devm_nvmem_device_get() - Get nvmem cell of device form a given id
895  *
896  * @dev: Device that requests the nvmem device.
897  * @id: name id for the requested nvmem device.
898  *
899  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
900  * on success.  The nvmem_cell will be freed by the automatically once the
901  * device is freed.
902  */
903 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
904 {
905 	struct nvmem_device **ptr, *nvmem;
906 
907 	ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL);
908 	if (!ptr)
909 		return ERR_PTR(-ENOMEM);
910 
911 	nvmem = nvmem_device_get(dev, id);
912 	if (!IS_ERR(nvmem)) {
913 		*ptr = nvmem;
914 		devres_add(dev, ptr);
915 	} else {
916 		devres_free(ptr);
917 	}
918 
919 	return nvmem;
920 }
921 EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
922 
923 static struct nvmem_cell *
924 nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
925 {
926 	struct nvmem_cell *cell = ERR_PTR(-ENOENT);
927 	struct nvmem_cell_lookup *lookup;
928 	struct nvmem_device *nvmem;
929 	const char *dev_id;
930 
931 	if (!dev)
932 		return ERR_PTR(-EINVAL);
933 
934 	dev_id = dev_name(dev);
935 
936 	mutex_lock(&nvmem_lookup_mutex);
937 
938 	list_for_each_entry(lookup, &nvmem_lookup_list, node) {
939 		if ((strcmp(lookup->dev_id, dev_id) == 0) &&
940 		    (strcmp(lookup->con_id, con_id) == 0)) {
941 			/* This is the right entry. */
942 			nvmem = __nvmem_device_get(NULL, lookup->nvmem_name);
943 			if (IS_ERR(nvmem)) {
944 				/* Provider may not be registered yet. */
945 				cell = ERR_CAST(nvmem);
946 				goto out;
947 			}
948 
949 			cell = nvmem_find_cell_by_name(nvmem,
950 						       lookup->cell_name);
951 			if (!cell) {
952 				__nvmem_device_put(nvmem);
953 				cell = ERR_PTR(-ENOENT);
954 				goto out;
955 			}
956 		}
957 	}
958 
959 out:
960 	mutex_unlock(&nvmem_lookup_mutex);
961 	return cell;
962 }
963 
964 #if IS_ENABLED(CONFIG_OF)
965 static struct nvmem_cell *
966 nvmem_find_cell_by_node(struct nvmem_device *nvmem, struct device_node *np)
967 {
968 	struct nvmem_cell *cell = NULL;
969 
970 	mutex_lock(&nvmem_mutex);
971 	list_for_each_entry(cell, &nvmem->cells, node) {
972 		if (np == cell->np)
973 			break;
974 	}
975 	mutex_unlock(&nvmem_mutex);
976 
977 	return cell;
978 }
979 
980 /**
981  * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
982  *
983  * @np: Device tree node that uses the nvmem cell.
984  * @id: nvmem cell name from nvmem-cell-names property, or NULL
985  *      for the cell at index 0 (the lone cell with no accompanying
986  *      nvmem-cell-names property).
987  *
988  * Return: Will be an ERR_PTR() on error or a valid pointer
989  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
990  * nvmem_cell_put().
991  */
992 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
993 {
994 	struct device_node *cell_np, *nvmem_np;
995 	struct nvmem_device *nvmem;
996 	struct nvmem_cell *cell;
997 	int index = 0;
998 
999 	/* if cell name exists, find index to the name */
1000 	if (id)
1001 		index = of_property_match_string(np, "nvmem-cell-names", id);
1002 
1003 	cell_np = of_parse_phandle(np, "nvmem-cells", index);
1004 	if (!cell_np)
1005 		return ERR_PTR(-EINVAL);
1006 
1007 	nvmem_np = of_get_next_parent(cell_np);
1008 	if (!nvmem_np)
1009 		return ERR_PTR(-EINVAL);
1010 
1011 	nvmem = __nvmem_device_get(nvmem_np, NULL);
1012 	of_node_put(nvmem_np);
1013 	if (IS_ERR(nvmem))
1014 		return ERR_CAST(nvmem);
1015 
1016 	cell = nvmem_find_cell_by_node(nvmem, cell_np);
1017 	if (!cell) {
1018 		__nvmem_device_put(nvmem);
1019 		return ERR_PTR(-ENOENT);
1020 	}
1021 
1022 	return cell;
1023 }
1024 EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
1025 #endif
1026 
1027 /**
1028  * nvmem_cell_get() - Get nvmem cell of device form a given cell name
1029  *
1030  * @dev: Device that requests the nvmem cell.
1031  * @id: nvmem cell name to get (this corresponds with the name from the
1032  *      nvmem-cell-names property for DT systems and with the con_id from
1033  *      the lookup entry for non-DT systems).
1034  *
1035  * Return: Will be an ERR_PTR() on error or a valid pointer
1036  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
1037  * nvmem_cell_put().
1038  */
1039 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id)
1040 {
1041 	struct nvmem_cell *cell;
1042 
1043 	if (dev->of_node) { /* try dt first */
1044 		cell = of_nvmem_cell_get(dev->of_node, id);
1045 		if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER)
1046 			return cell;
1047 	}
1048 
1049 	/* NULL cell id only allowed for device tree; invalid otherwise */
1050 	if (!id)
1051 		return ERR_PTR(-EINVAL);
1052 
1053 	return nvmem_cell_get_from_lookup(dev, id);
1054 }
1055 EXPORT_SYMBOL_GPL(nvmem_cell_get);
1056 
1057 static void devm_nvmem_cell_release(struct device *dev, void *res)
1058 {
1059 	nvmem_cell_put(*(struct nvmem_cell **)res);
1060 }
1061 
1062 /**
1063  * devm_nvmem_cell_get() - Get nvmem cell of device form a given id
1064  *
1065  * @dev: Device that requests the nvmem cell.
1066  * @id: nvmem cell name id to get.
1067  *
1068  * Return: Will be an ERR_PTR() on error or a valid pointer
1069  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
1070  * automatically once the device is freed.
1071  */
1072 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id)
1073 {
1074 	struct nvmem_cell **ptr, *cell;
1075 
1076 	ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL);
1077 	if (!ptr)
1078 		return ERR_PTR(-ENOMEM);
1079 
1080 	cell = nvmem_cell_get(dev, id);
1081 	if (!IS_ERR(cell)) {
1082 		*ptr = cell;
1083 		devres_add(dev, ptr);
1084 	} else {
1085 		devres_free(ptr);
1086 	}
1087 
1088 	return cell;
1089 }
1090 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get);
1091 
1092 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
1093 {
1094 	struct nvmem_cell **c = res;
1095 
1096 	if (WARN_ON(!c || !*c))
1097 		return 0;
1098 
1099 	return *c == data;
1100 }
1101 
1102 /**
1103  * devm_nvmem_cell_put() - Release previously allocated nvmem cell
1104  * from devm_nvmem_cell_get.
1105  *
1106  * @dev: Device that requests the nvmem cell.
1107  * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get().
1108  */
1109 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
1110 {
1111 	int ret;
1112 
1113 	ret = devres_release(dev, devm_nvmem_cell_release,
1114 				devm_nvmem_cell_match, cell);
1115 
1116 	WARN_ON(ret);
1117 }
1118 EXPORT_SYMBOL(devm_nvmem_cell_put);
1119 
1120 /**
1121  * nvmem_cell_put() - Release previously allocated nvmem cell.
1122  *
1123  * @cell: Previously allocated nvmem cell by nvmem_cell_get().
1124  */
1125 void nvmem_cell_put(struct nvmem_cell *cell)
1126 {
1127 	struct nvmem_device *nvmem = cell->nvmem;
1128 
1129 	__nvmem_device_put(nvmem);
1130 }
1131 EXPORT_SYMBOL_GPL(nvmem_cell_put);
1132 
1133 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf)
1134 {
1135 	u8 *p, *b;
1136 	int i, bit_offset = cell->bit_offset;
1137 
1138 	p = b = buf;
1139 	if (bit_offset) {
1140 		/* First shift */
1141 		*b++ >>= bit_offset;
1142 
1143 		/* setup rest of the bytes if any */
1144 		for (i = 1; i < cell->bytes; i++) {
1145 			/* Get bits from next byte and shift them towards msb */
1146 			*p |= *b << (BITS_PER_BYTE - bit_offset);
1147 
1148 			p = b;
1149 			*b++ >>= bit_offset;
1150 		}
1151 
1152 		/* result fits in less bytes */
1153 		if (cell->bytes != DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE))
1154 			*p-- = 0;
1155 	}
1156 	/* clear msb bits if any leftover in the last byte */
1157 	*p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0);
1158 }
1159 
1160 static int __nvmem_cell_read(struct nvmem_device *nvmem,
1161 		      struct nvmem_cell *cell,
1162 		      void *buf, size_t *len)
1163 {
1164 	int rc;
1165 
1166 	rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes);
1167 
1168 	if (rc)
1169 		return rc;
1170 
1171 	/* shift bits in-place */
1172 	if (cell->bit_offset || cell->nbits)
1173 		nvmem_shift_read_buffer_in_place(cell, buf);
1174 
1175 	if (len)
1176 		*len = cell->bytes;
1177 
1178 	return 0;
1179 }
1180 
1181 /**
1182  * nvmem_cell_read() - Read a given nvmem cell
1183  *
1184  * @cell: nvmem cell to be read.
1185  * @len: pointer to length of cell which will be populated on successful read;
1186  *	 can be NULL.
1187  *
1188  * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
1189  * buffer should be freed by the consumer with a kfree().
1190  */
1191 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
1192 {
1193 	struct nvmem_device *nvmem = cell->nvmem;
1194 	u8 *buf;
1195 	int rc;
1196 
1197 	if (!nvmem)
1198 		return ERR_PTR(-EINVAL);
1199 
1200 	buf = kzalloc(cell->bytes, GFP_KERNEL);
1201 	if (!buf)
1202 		return ERR_PTR(-ENOMEM);
1203 
1204 	rc = __nvmem_cell_read(nvmem, cell, buf, len);
1205 	if (rc) {
1206 		kfree(buf);
1207 		return ERR_PTR(rc);
1208 	}
1209 
1210 	return buf;
1211 }
1212 EXPORT_SYMBOL_GPL(nvmem_cell_read);
1213 
1214 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell,
1215 					     u8 *_buf, int len)
1216 {
1217 	struct nvmem_device *nvmem = cell->nvmem;
1218 	int i, rc, nbits, bit_offset = cell->bit_offset;
1219 	u8 v, *p, *buf, *b, pbyte, pbits;
1220 
1221 	nbits = cell->nbits;
1222 	buf = kzalloc(cell->bytes, GFP_KERNEL);
1223 	if (!buf)
1224 		return ERR_PTR(-ENOMEM);
1225 
1226 	memcpy(buf, _buf, len);
1227 	p = b = buf;
1228 
1229 	if (bit_offset) {
1230 		pbyte = *b;
1231 		*b <<= bit_offset;
1232 
1233 		/* setup the first byte with lsb bits from nvmem */
1234 		rc = nvmem_reg_read(nvmem, cell->offset, &v, 1);
1235 		if (rc)
1236 			goto err;
1237 		*b++ |= GENMASK(bit_offset - 1, 0) & v;
1238 
1239 		/* setup rest of the byte if any */
1240 		for (i = 1; i < cell->bytes; i++) {
1241 			/* Get last byte bits and shift them towards lsb */
1242 			pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset);
1243 			pbyte = *b;
1244 			p = b;
1245 			*b <<= bit_offset;
1246 			*b++ |= pbits;
1247 		}
1248 	}
1249 
1250 	/* if it's not end on byte boundary */
1251 	if ((nbits + bit_offset) % BITS_PER_BYTE) {
1252 		/* setup the last byte with msb bits from nvmem */
1253 		rc = nvmem_reg_read(nvmem,
1254 				    cell->offset + cell->bytes - 1, &v, 1);
1255 		if (rc)
1256 			goto err;
1257 		*p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
1258 
1259 	}
1260 
1261 	return buf;
1262 err:
1263 	kfree(buf);
1264 	return ERR_PTR(rc);
1265 }
1266 
1267 /**
1268  * nvmem_cell_write() - Write to a given nvmem cell
1269  *
1270  * @cell: nvmem cell to be written.
1271  * @buf: Buffer to be written.
1272  * @len: length of buffer to be written to nvmem cell.
1273  *
1274  * Return: length of bytes written or negative on failure.
1275  */
1276 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
1277 {
1278 	struct nvmem_device *nvmem = cell->nvmem;
1279 	int rc;
1280 
1281 	if (!nvmem || nvmem->read_only ||
1282 	    (cell->bit_offset == 0 && len != cell->bytes))
1283 		return -EINVAL;
1284 
1285 	if (cell->bit_offset || cell->nbits) {
1286 		buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
1287 		if (IS_ERR(buf))
1288 			return PTR_ERR(buf);
1289 	}
1290 
1291 	rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes);
1292 
1293 	/* free the tmp buffer */
1294 	if (cell->bit_offset || cell->nbits)
1295 		kfree(buf);
1296 
1297 	if (rc)
1298 		return rc;
1299 
1300 	return len;
1301 }
1302 EXPORT_SYMBOL_GPL(nvmem_cell_write);
1303 
1304 /**
1305  * nvmem_cell_read_u32() - Read a cell value as an u32
1306  *
1307  * @dev: Device that requests the nvmem cell.
1308  * @cell_id: Name of nvmem cell to read.
1309  * @val: pointer to output value.
1310  *
1311  * Return: 0 on success or negative errno.
1312  */
1313 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val)
1314 {
1315 	struct nvmem_cell *cell;
1316 	void *buf;
1317 	size_t len;
1318 
1319 	cell = nvmem_cell_get(dev, cell_id);
1320 	if (IS_ERR(cell))
1321 		return PTR_ERR(cell);
1322 
1323 	buf = nvmem_cell_read(cell, &len);
1324 	if (IS_ERR(buf)) {
1325 		nvmem_cell_put(cell);
1326 		return PTR_ERR(buf);
1327 	}
1328 	if (len != sizeof(*val)) {
1329 		kfree(buf);
1330 		nvmem_cell_put(cell);
1331 		return -EINVAL;
1332 	}
1333 	memcpy(val, buf, sizeof(*val));
1334 
1335 	kfree(buf);
1336 	nvmem_cell_put(cell);
1337 	return 0;
1338 }
1339 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32);
1340 
1341 /**
1342  * nvmem_device_cell_read() - Read a given nvmem device and cell
1343  *
1344  * @nvmem: nvmem device to read from.
1345  * @info: nvmem cell info to be read.
1346  * @buf: buffer pointer which will be populated on successful read.
1347  *
1348  * Return: length of successful bytes read on success and negative
1349  * error code on error.
1350  */
1351 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
1352 			   struct nvmem_cell_info *info, void *buf)
1353 {
1354 	struct nvmem_cell cell;
1355 	int rc;
1356 	ssize_t len;
1357 
1358 	if (!nvmem)
1359 		return -EINVAL;
1360 
1361 	rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
1362 	if (rc)
1363 		return rc;
1364 
1365 	rc = __nvmem_cell_read(nvmem, &cell, buf, &len);
1366 	if (rc)
1367 		return rc;
1368 
1369 	return len;
1370 }
1371 EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
1372 
1373 /**
1374  * nvmem_device_cell_write() - Write cell to a given nvmem device
1375  *
1376  * @nvmem: nvmem device to be written to.
1377  * @info: nvmem cell info to be written.
1378  * @buf: buffer to be written to cell.
1379  *
1380  * Return: length of bytes written or negative error code on failure.
1381  */
1382 int nvmem_device_cell_write(struct nvmem_device *nvmem,
1383 			    struct nvmem_cell_info *info, void *buf)
1384 {
1385 	struct nvmem_cell cell;
1386 	int rc;
1387 
1388 	if (!nvmem)
1389 		return -EINVAL;
1390 
1391 	rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
1392 	if (rc)
1393 		return rc;
1394 
1395 	return nvmem_cell_write(&cell, buf, cell.bytes);
1396 }
1397 EXPORT_SYMBOL_GPL(nvmem_device_cell_write);
1398 
1399 /**
1400  * nvmem_device_read() - Read from a given nvmem device
1401  *
1402  * @nvmem: nvmem device to read from.
1403  * @offset: offset in nvmem device.
1404  * @bytes: number of bytes to read.
1405  * @buf: buffer pointer which will be populated on successful read.
1406  *
1407  * Return: length of successful bytes read on success and negative
1408  * error code on error.
1409  */
1410 int nvmem_device_read(struct nvmem_device *nvmem,
1411 		      unsigned int offset,
1412 		      size_t bytes, void *buf)
1413 {
1414 	int rc;
1415 
1416 	if (!nvmem)
1417 		return -EINVAL;
1418 
1419 	rc = nvmem_reg_read(nvmem, offset, buf, bytes);
1420 
1421 	if (rc)
1422 		return rc;
1423 
1424 	return bytes;
1425 }
1426 EXPORT_SYMBOL_GPL(nvmem_device_read);
1427 
1428 /**
1429  * nvmem_device_write() - Write cell to a given nvmem device
1430  *
1431  * @nvmem: nvmem device to be written to.
1432  * @offset: offset in nvmem device.
1433  * @bytes: number of bytes to write.
1434  * @buf: buffer to be written.
1435  *
1436  * Return: length of bytes written or negative error code on failure.
1437  */
1438 int nvmem_device_write(struct nvmem_device *nvmem,
1439 		       unsigned int offset,
1440 		       size_t bytes, void *buf)
1441 {
1442 	int rc;
1443 
1444 	if (!nvmem)
1445 		return -EINVAL;
1446 
1447 	rc = nvmem_reg_write(nvmem, offset, buf, bytes);
1448 
1449 	if (rc)
1450 		return rc;
1451 
1452 
1453 	return bytes;
1454 }
1455 EXPORT_SYMBOL_GPL(nvmem_device_write);
1456 
1457 /**
1458  * nvmem_add_cell_table() - register a table of cell info entries
1459  *
1460  * @table: table of cell info entries
1461  */
1462 void nvmem_add_cell_table(struct nvmem_cell_table *table)
1463 {
1464 	mutex_lock(&nvmem_cell_mutex);
1465 	list_add_tail(&table->node, &nvmem_cell_tables);
1466 	mutex_unlock(&nvmem_cell_mutex);
1467 }
1468 EXPORT_SYMBOL_GPL(nvmem_add_cell_table);
1469 
1470 /**
1471  * nvmem_del_cell_table() - remove a previously registered cell info table
1472  *
1473  * @table: table of cell info entries
1474  */
1475 void nvmem_del_cell_table(struct nvmem_cell_table *table)
1476 {
1477 	mutex_lock(&nvmem_cell_mutex);
1478 	list_del(&table->node);
1479 	mutex_unlock(&nvmem_cell_mutex);
1480 }
1481 EXPORT_SYMBOL_GPL(nvmem_del_cell_table);
1482 
1483 /**
1484  * nvmem_add_cell_lookups() - register a list of cell lookup entries
1485  *
1486  * @entries: array of cell lookup entries
1487  * @nentries: number of cell lookup entries in the array
1488  */
1489 void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1490 {
1491 	int i;
1492 
1493 	mutex_lock(&nvmem_lookup_mutex);
1494 	for (i = 0; i < nentries; i++)
1495 		list_add_tail(&entries[i].node, &nvmem_lookup_list);
1496 	mutex_unlock(&nvmem_lookup_mutex);
1497 }
1498 EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups);
1499 
1500 /**
1501  * nvmem_del_cell_lookups() - remove a list of previously added cell lookup
1502  *                            entries
1503  *
1504  * @entries: array of cell lookup entries
1505  * @nentries: number of cell lookup entries in the array
1506  */
1507 void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1508 {
1509 	int i;
1510 
1511 	mutex_lock(&nvmem_lookup_mutex);
1512 	for (i = 0; i < nentries; i++)
1513 		list_del(&entries[i].node);
1514 	mutex_unlock(&nvmem_lookup_mutex);
1515 }
1516 EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups);
1517 
1518 /**
1519  * nvmem_dev_name() - Get the name of a given nvmem device.
1520  *
1521  * @nvmem: nvmem device.
1522  *
1523  * Return: name of the nvmem device.
1524  */
1525 const char *nvmem_dev_name(struct nvmem_device *nvmem)
1526 {
1527 	return dev_name(&nvmem->dev);
1528 }
1529 EXPORT_SYMBOL_GPL(nvmem_dev_name);
1530 
1531 static int __init nvmem_init(void)
1532 {
1533 	return bus_register(&nvmem_bus_type);
1534 }
1535 
1536 static void __exit nvmem_exit(void)
1537 {
1538 	bus_unregister(&nvmem_bus_type);
1539 }
1540 
1541 subsys_initcall(nvmem_init);
1542 module_exit(nvmem_exit);
1543 
1544 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
1545 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
1546 MODULE_DESCRIPTION("nvmem Driver Core");
1547 MODULE_LICENSE("GPL v2");
1548