xref: /openbmc/linux/drivers/nvmem/core.c (revision 6d99a79c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * nvmem framework core.
4  *
5  * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
6  * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
7  */
8 
9 #include <linux/device.h>
10 #include <linux/export.h>
11 #include <linux/fs.h>
12 #include <linux/idr.h>
13 #include <linux/init.h>
14 #include <linux/kref.h>
15 #include <linux/module.h>
16 #include <linux/nvmem-consumer.h>
17 #include <linux/nvmem-provider.h>
18 #include <linux/of.h>
19 #include <linux/slab.h>
20 
21 struct nvmem_device {
22 	struct module		*owner;
23 	struct device		dev;
24 	int			stride;
25 	int			word_size;
26 	int			id;
27 	struct kref		refcnt;
28 	size_t			size;
29 	bool			read_only;
30 	int			flags;
31 	struct bin_attribute	eeprom;
32 	struct device		*base_dev;
33 	struct list_head	cells;
34 	nvmem_reg_read_t	reg_read;
35 	nvmem_reg_write_t	reg_write;
36 	void *priv;
37 };
38 
39 #define FLAG_COMPAT		BIT(0)
40 
41 struct nvmem_cell {
42 	const char		*name;
43 	int			offset;
44 	int			bytes;
45 	int			bit_offset;
46 	int			nbits;
47 	struct nvmem_device	*nvmem;
48 	struct list_head	node;
49 };
50 
51 static DEFINE_MUTEX(nvmem_mutex);
52 static DEFINE_IDA(nvmem_ida);
53 
54 static DEFINE_MUTEX(nvmem_cell_mutex);
55 static LIST_HEAD(nvmem_cell_tables);
56 
57 static DEFINE_MUTEX(nvmem_lookup_mutex);
58 static LIST_HEAD(nvmem_lookup_list);
59 
60 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
61 
62 #ifdef CONFIG_DEBUG_LOCK_ALLOC
63 static struct lock_class_key eeprom_lock_key;
64 #endif
65 
66 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
67 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
68 			  void *val, size_t bytes)
69 {
70 	if (nvmem->reg_read)
71 		return nvmem->reg_read(nvmem->priv, offset, val, bytes);
72 
73 	return -EINVAL;
74 }
75 
76 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
77 			   void *val, size_t bytes)
78 {
79 	if (nvmem->reg_write)
80 		return nvmem->reg_write(nvmem->priv, offset, val, bytes);
81 
82 	return -EINVAL;
83 }
84 
85 static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
86 				    struct bin_attribute *attr,
87 				    char *buf, loff_t pos, size_t count)
88 {
89 	struct device *dev;
90 	struct nvmem_device *nvmem;
91 	int rc;
92 
93 	if (attr->private)
94 		dev = attr->private;
95 	else
96 		dev = container_of(kobj, struct device, kobj);
97 	nvmem = to_nvmem_device(dev);
98 
99 	/* Stop the user from reading */
100 	if (pos >= nvmem->size)
101 		return 0;
102 
103 	if (count < nvmem->word_size)
104 		return -EINVAL;
105 
106 	if (pos + count > nvmem->size)
107 		count = nvmem->size - pos;
108 
109 	count = round_down(count, nvmem->word_size);
110 
111 	rc = nvmem_reg_read(nvmem, pos, buf, count);
112 
113 	if (rc)
114 		return rc;
115 
116 	return count;
117 }
118 
119 static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
120 				     struct bin_attribute *attr,
121 				     char *buf, loff_t pos, size_t count)
122 {
123 	struct device *dev;
124 	struct nvmem_device *nvmem;
125 	int rc;
126 
127 	if (attr->private)
128 		dev = attr->private;
129 	else
130 		dev = container_of(kobj, struct device, kobj);
131 	nvmem = to_nvmem_device(dev);
132 
133 	/* Stop the user from writing */
134 	if (pos >= nvmem->size)
135 		return -EFBIG;
136 
137 	if (count < nvmem->word_size)
138 		return -EINVAL;
139 
140 	if (pos + count > nvmem->size)
141 		count = nvmem->size - pos;
142 
143 	count = round_down(count, nvmem->word_size);
144 
145 	rc = nvmem_reg_write(nvmem, pos, buf, count);
146 
147 	if (rc)
148 		return rc;
149 
150 	return count;
151 }
152 
153 /* default read/write permissions */
154 static struct bin_attribute bin_attr_rw_nvmem = {
155 	.attr	= {
156 		.name	= "nvmem",
157 		.mode	= 0644,
158 	},
159 	.read	= bin_attr_nvmem_read,
160 	.write	= bin_attr_nvmem_write,
161 };
162 
163 static struct bin_attribute *nvmem_bin_rw_attributes[] = {
164 	&bin_attr_rw_nvmem,
165 	NULL,
166 };
167 
168 static const struct attribute_group nvmem_bin_rw_group = {
169 	.bin_attrs	= nvmem_bin_rw_attributes,
170 };
171 
172 static const struct attribute_group *nvmem_rw_dev_groups[] = {
173 	&nvmem_bin_rw_group,
174 	NULL,
175 };
176 
177 /* read only permission */
178 static struct bin_attribute bin_attr_ro_nvmem = {
179 	.attr	= {
180 		.name	= "nvmem",
181 		.mode	= 0444,
182 	},
183 	.read	= bin_attr_nvmem_read,
184 };
185 
186 static struct bin_attribute *nvmem_bin_ro_attributes[] = {
187 	&bin_attr_ro_nvmem,
188 	NULL,
189 };
190 
191 static const struct attribute_group nvmem_bin_ro_group = {
192 	.bin_attrs	= nvmem_bin_ro_attributes,
193 };
194 
195 static const struct attribute_group *nvmem_ro_dev_groups[] = {
196 	&nvmem_bin_ro_group,
197 	NULL,
198 };
199 
200 /* default read/write permissions, root only */
201 static struct bin_attribute bin_attr_rw_root_nvmem = {
202 	.attr	= {
203 		.name	= "nvmem",
204 		.mode	= 0600,
205 	},
206 	.read	= bin_attr_nvmem_read,
207 	.write	= bin_attr_nvmem_write,
208 };
209 
210 static struct bin_attribute *nvmem_bin_rw_root_attributes[] = {
211 	&bin_attr_rw_root_nvmem,
212 	NULL,
213 };
214 
215 static const struct attribute_group nvmem_bin_rw_root_group = {
216 	.bin_attrs	= nvmem_bin_rw_root_attributes,
217 };
218 
219 static const struct attribute_group *nvmem_rw_root_dev_groups[] = {
220 	&nvmem_bin_rw_root_group,
221 	NULL,
222 };
223 
224 /* read only permission, root only */
225 static struct bin_attribute bin_attr_ro_root_nvmem = {
226 	.attr	= {
227 		.name	= "nvmem",
228 		.mode	= 0400,
229 	},
230 	.read	= bin_attr_nvmem_read,
231 };
232 
233 static struct bin_attribute *nvmem_bin_ro_root_attributes[] = {
234 	&bin_attr_ro_root_nvmem,
235 	NULL,
236 };
237 
238 static const struct attribute_group nvmem_bin_ro_root_group = {
239 	.bin_attrs	= nvmem_bin_ro_root_attributes,
240 };
241 
242 static const struct attribute_group *nvmem_ro_root_dev_groups[] = {
243 	&nvmem_bin_ro_root_group,
244 	NULL,
245 };
246 
247 static void nvmem_release(struct device *dev)
248 {
249 	struct nvmem_device *nvmem = to_nvmem_device(dev);
250 
251 	ida_simple_remove(&nvmem_ida, nvmem->id);
252 	kfree(nvmem);
253 }
254 
255 static const struct device_type nvmem_provider_type = {
256 	.release	= nvmem_release,
257 };
258 
259 static struct bus_type nvmem_bus_type = {
260 	.name		= "nvmem",
261 };
262 
263 static int of_nvmem_match(struct device *dev, void *nvmem_np)
264 {
265 	return dev->of_node == nvmem_np;
266 }
267 
268 static struct nvmem_device *of_nvmem_find(struct device_node *nvmem_np)
269 {
270 	struct device *d;
271 
272 	if (!nvmem_np)
273 		return NULL;
274 
275 	d = bus_find_device(&nvmem_bus_type, NULL, nvmem_np, of_nvmem_match);
276 
277 	if (!d)
278 		return NULL;
279 
280 	return to_nvmem_device(d);
281 }
282 
283 static struct nvmem_device *nvmem_find(const char *name)
284 {
285 	struct device *d;
286 
287 	d = bus_find_device_by_name(&nvmem_bus_type, NULL, name);
288 
289 	if (!d)
290 		return NULL;
291 
292 	return to_nvmem_device(d);
293 }
294 
295 static void nvmem_cell_drop(struct nvmem_cell *cell)
296 {
297 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell);
298 	mutex_lock(&nvmem_mutex);
299 	list_del(&cell->node);
300 	mutex_unlock(&nvmem_mutex);
301 	kfree(cell->name);
302 	kfree(cell);
303 }
304 
305 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
306 {
307 	struct nvmem_cell *cell, *p;
308 
309 	list_for_each_entry_safe(cell, p, &nvmem->cells, node)
310 		nvmem_cell_drop(cell);
311 }
312 
313 static void nvmem_cell_add(struct nvmem_cell *cell)
314 {
315 	mutex_lock(&nvmem_mutex);
316 	list_add_tail(&cell->node, &cell->nvmem->cells);
317 	mutex_unlock(&nvmem_mutex);
318 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell);
319 }
320 
321 static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
322 				   const struct nvmem_cell_info *info,
323 				   struct nvmem_cell *cell)
324 {
325 	cell->nvmem = nvmem;
326 	cell->offset = info->offset;
327 	cell->bytes = info->bytes;
328 	cell->name = info->name;
329 
330 	cell->bit_offset = info->bit_offset;
331 	cell->nbits = info->nbits;
332 
333 	if (cell->nbits)
334 		cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
335 					   BITS_PER_BYTE);
336 
337 	if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
338 		dev_err(&nvmem->dev,
339 			"cell %s unaligned to nvmem stride %d\n",
340 			cell->name, nvmem->stride);
341 		return -EINVAL;
342 	}
343 
344 	return 0;
345 }
346 
347 /**
348  * nvmem_add_cells() - Add cell information to an nvmem device
349  *
350  * @nvmem: nvmem device to add cells to.
351  * @info: nvmem cell info to add to the device
352  * @ncells: number of cells in info
353  *
354  * Return: 0 or negative error code on failure.
355  */
356 static int nvmem_add_cells(struct nvmem_device *nvmem,
357 		    const struct nvmem_cell_info *info,
358 		    int ncells)
359 {
360 	struct nvmem_cell **cells;
361 	int i, rval;
362 
363 	cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL);
364 	if (!cells)
365 		return -ENOMEM;
366 
367 	for (i = 0; i < ncells; i++) {
368 		cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL);
369 		if (!cells[i]) {
370 			rval = -ENOMEM;
371 			goto err;
372 		}
373 
374 		rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]);
375 		if (rval) {
376 			kfree(cells[i]);
377 			goto err;
378 		}
379 
380 		nvmem_cell_add(cells[i]);
381 	}
382 
383 	/* remove tmp array */
384 	kfree(cells);
385 
386 	return 0;
387 err:
388 	while (i--)
389 		nvmem_cell_drop(cells[i]);
390 
391 	kfree(cells);
392 
393 	return rval;
394 }
395 
396 /*
397  * nvmem_setup_compat() - Create an additional binary entry in
398  * drivers sys directory, to be backwards compatible with the older
399  * drivers/misc/eeprom drivers.
400  */
401 static int nvmem_setup_compat(struct nvmem_device *nvmem,
402 			      const struct nvmem_config *config)
403 {
404 	int rval;
405 
406 	if (!config->base_dev)
407 		return -EINVAL;
408 
409 	if (nvmem->read_only)
410 		nvmem->eeprom = bin_attr_ro_root_nvmem;
411 	else
412 		nvmem->eeprom = bin_attr_rw_root_nvmem;
413 	nvmem->eeprom.attr.name = "eeprom";
414 	nvmem->eeprom.size = nvmem->size;
415 #ifdef CONFIG_DEBUG_LOCK_ALLOC
416 	nvmem->eeprom.attr.key = &eeprom_lock_key;
417 #endif
418 	nvmem->eeprom.private = &nvmem->dev;
419 	nvmem->base_dev = config->base_dev;
420 
421 	rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom);
422 	if (rval) {
423 		dev_err(&nvmem->dev,
424 			"Failed to create eeprom binary file %d\n", rval);
425 		return rval;
426 	}
427 
428 	nvmem->flags |= FLAG_COMPAT;
429 
430 	return 0;
431 }
432 
433 /**
434  * nvmem_register_notifier() - Register a notifier block for nvmem events.
435  *
436  * @nb: notifier block to be called on nvmem events.
437  *
438  * Return: 0 on success, negative error number on failure.
439  */
440 int nvmem_register_notifier(struct notifier_block *nb)
441 {
442 	return blocking_notifier_chain_register(&nvmem_notifier, nb);
443 }
444 EXPORT_SYMBOL_GPL(nvmem_register_notifier);
445 
446 /**
447  * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events.
448  *
449  * @nb: notifier block to be unregistered.
450  *
451  * Return: 0 on success, negative error number on failure.
452  */
453 int nvmem_unregister_notifier(struct notifier_block *nb)
454 {
455 	return blocking_notifier_chain_unregister(&nvmem_notifier, nb);
456 }
457 EXPORT_SYMBOL_GPL(nvmem_unregister_notifier);
458 
459 static int nvmem_add_cells_from_table(struct nvmem_device *nvmem)
460 {
461 	const struct nvmem_cell_info *info;
462 	struct nvmem_cell_table *table;
463 	struct nvmem_cell *cell;
464 	int rval = 0, i;
465 
466 	mutex_lock(&nvmem_cell_mutex);
467 	list_for_each_entry(table, &nvmem_cell_tables, node) {
468 		if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) {
469 			for (i = 0; i < table->ncells; i++) {
470 				info = &table->cells[i];
471 
472 				cell = kzalloc(sizeof(*cell), GFP_KERNEL);
473 				if (!cell) {
474 					rval = -ENOMEM;
475 					goto out;
476 				}
477 
478 				rval = nvmem_cell_info_to_nvmem_cell(nvmem,
479 								     info,
480 								     cell);
481 				if (rval) {
482 					kfree(cell);
483 					goto out;
484 				}
485 
486 				nvmem_cell_add(cell);
487 			}
488 		}
489 	}
490 
491 out:
492 	mutex_unlock(&nvmem_cell_mutex);
493 	return rval;
494 }
495 
496 static struct nvmem_cell *
497 nvmem_find_cell_by_name(struct nvmem_device *nvmem, const char *cell_id)
498 {
499 	struct nvmem_cell *cell = NULL;
500 
501 	mutex_lock(&nvmem_mutex);
502 	list_for_each_entry(cell, &nvmem->cells, node) {
503 		if (strcmp(cell_id, cell->name) == 0)
504 			break;
505 	}
506 	mutex_unlock(&nvmem_mutex);
507 
508 	return cell;
509 }
510 
511 static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
512 {
513 	struct device_node *parent, *child;
514 	struct device *dev = &nvmem->dev;
515 	struct nvmem_cell *cell;
516 	const __be32 *addr;
517 	int len;
518 
519 	parent = dev->of_node;
520 
521 	for_each_child_of_node(parent, child) {
522 		addr = of_get_property(child, "reg", &len);
523 		if (!addr || (len < 2 * sizeof(u32))) {
524 			dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
525 			return -EINVAL;
526 		}
527 
528 		cell = kzalloc(sizeof(*cell), GFP_KERNEL);
529 		if (!cell)
530 			return -ENOMEM;
531 
532 		cell->nvmem = nvmem;
533 		cell->offset = be32_to_cpup(addr++);
534 		cell->bytes = be32_to_cpup(addr);
535 		cell->name = kasprintf(GFP_KERNEL, "%pOFn", child);
536 
537 		addr = of_get_property(child, "bits", &len);
538 		if (addr && len == (2 * sizeof(u32))) {
539 			cell->bit_offset = be32_to_cpup(addr++);
540 			cell->nbits = be32_to_cpup(addr);
541 		}
542 
543 		if (cell->nbits)
544 			cell->bytes = DIV_ROUND_UP(
545 					cell->nbits + cell->bit_offset,
546 					BITS_PER_BYTE);
547 
548 		if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
549 			dev_err(dev, "cell %s unaligned to nvmem stride %d\n",
550 				cell->name, nvmem->stride);
551 			/* Cells already added will be freed later. */
552 			kfree(cell->name);
553 			kfree(cell);
554 			return -EINVAL;
555 		}
556 
557 		nvmem_cell_add(cell);
558 	}
559 
560 	return 0;
561 }
562 
563 /**
564  * nvmem_register() - Register a nvmem device for given nvmem_config.
565  * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
566  *
567  * @config: nvmem device configuration with which nvmem device is created.
568  *
569  * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
570  * on success.
571  */
572 
573 struct nvmem_device *nvmem_register(const struct nvmem_config *config)
574 {
575 	struct nvmem_device *nvmem;
576 	int rval;
577 
578 	if (!config->dev)
579 		return ERR_PTR(-EINVAL);
580 
581 	nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL);
582 	if (!nvmem)
583 		return ERR_PTR(-ENOMEM);
584 
585 	rval  = ida_simple_get(&nvmem_ida, 0, 0, GFP_KERNEL);
586 	if (rval < 0) {
587 		kfree(nvmem);
588 		return ERR_PTR(rval);
589 	}
590 
591 	kref_init(&nvmem->refcnt);
592 	INIT_LIST_HEAD(&nvmem->cells);
593 
594 	nvmem->id = rval;
595 	nvmem->owner = config->owner;
596 	if (!nvmem->owner && config->dev->driver)
597 		nvmem->owner = config->dev->driver->owner;
598 	nvmem->stride = config->stride ?: 1;
599 	nvmem->word_size = config->word_size ?: 1;
600 	nvmem->size = config->size;
601 	nvmem->dev.type = &nvmem_provider_type;
602 	nvmem->dev.bus = &nvmem_bus_type;
603 	nvmem->dev.parent = config->dev;
604 	nvmem->priv = config->priv;
605 	nvmem->reg_read = config->reg_read;
606 	nvmem->reg_write = config->reg_write;
607 	nvmem->dev.of_node = config->dev->of_node;
608 
609 	if (config->id == -1 && config->name) {
610 		dev_set_name(&nvmem->dev, "%s", config->name);
611 	} else {
612 		dev_set_name(&nvmem->dev, "%s%d",
613 			     config->name ? : "nvmem",
614 			     config->name ? config->id : nvmem->id);
615 	}
616 
617 	nvmem->read_only = device_property_present(config->dev, "read-only") |
618 			   config->read_only;
619 
620 	if (config->root_only)
621 		nvmem->dev.groups = nvmem->read_only ?
622 			nvmem_ro_root_dev_groups :
623 			nvmem_rw_root_dev_groups;
624 	else
625 		nvmem->dev.groups = nvmem->read_only ?
626 			nvmem_ro_dev_groups :
627 			nvmem_rw_dev_groups;
628 
629 	device_initialize(&nvmem->dev);
630 
631 	dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
632 
633 	rval = device_add(&nvmem->dev);
634 	if (rval)
635 		goto err_put_device;
636 
637 	if (config->compat) {
638 		rval = nvmem_setup_compat(nvmem, config);
639 		if (rval)
640 			goto err_device_del;
641 	}
642 
643 	if (config->cells) {
644 		rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
645 		if (rval)
646 			goto err_teardown_compat;
647 	}
648 
649 	rval = nvmem_add_cells_from_table(nvmem);
650 	if (rval)
651 		goto err_remove_cells;
652 
653 	rval = nvmem_add_cells_from_of(nvmem);
654 	if (rval)
655 		goto err_remove_cells;
656 
657 	rval = blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
658 	if (rval)
659 		goto err_remove_cells;
660 
661 	return nvmem;
662 
663 err_remove_cells:
664 	nvmem_device_remove_all_cells(nvmem);
665 err_teardown_compat:
666 	if (config->compat)
667 		device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
668 err_device_del:
669 	device_del(&nvmem->dev);
670 err_put_device:
671 	put_device(&nvmem->dev);
672 
673 	return ERR_PTR(rval);
674 }
675 EXPORT_SYMBOL_GPL(nvmem_register);
676 
677 static void nvmem_device_release(struct kref *kref)
678 {
679 	struct nvmem_device *nvmem;
680 
681 	nvmem = container_of(kref, struct nvmem_device, refcnt);
682 
683 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem);
684 
685 	if (nvmem->flags & FLAG_COMPAT)
686 		device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
687 
688 	nvmem_device_remove_all_cells(nvmem);
689 	device_del(&nvmem->dev);
690 	put_device(&nvmem->dev);
691 }
692 
693 /**
694  * nvmem_unregister() - Unregister previously registered nvmem device
695  *
696  * @nvmem: Pointer to previously registered nvmem device.
697  */
698 void nvmem_unregister(struct nvmem_device *nvmem)
699 {
700 	kref_put(&nvmem->refcnt, nvmem_device_release);
701 }
702 EXPORT_SYMBOL_GPL(nvmem_unregister);
703 
704 static void devm_nvmem_release(struct device *dev, void *res)
705 {
706 	nvmem_unregister(*(struct nvmem_device **)res);
707 }
708 
709 /**
710  * devm_nvmem_register() - Register a managed nvmem device for given
711  * nvmem_config.
712  * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
713  *
714  * @dev: Device that uses the nvmem device.
715  * @config: nvmem device configuration with which nvmem device is created.
716  *
717  * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
718  * on success.
719  */
720 struct nvmem_device *devm_nvmem_register(struct device *dev,
721 					 const struct nvmem_config *config)
722 {
723 	struct nvmem_device **ptr, *nvmem;
724 
725 	ptr = devres_alloc(devm_nvmem_release, sizeof(*ptr), GFP_KERNEL);
726 	if (!ptr)
727 		return ERR_PTR(-ENOMEM);
728 
729 	nvmem = nvmem_register(config);
730 
731 	if (!IS_ERR(nvmem)) {
732 		*ptr = nvmem;
733 		devres_add(dev, ptr);
734 	} else {
735 		devres_free(ptr);
736 	}
737 
738 	return nvmem;
739 }
740 EXPORT_SYMBOL_GPL(devm_nvmem_register);
741 
742 static int devm_nvmem_match(struct device *dev, void *res, void *data)
743 {
744 	struct nvmem_device **r = res;
745 
746 	return *r == data;
747 }
748 
749 /**
750  * devm_nvmem_unregister() - Unregister previously registered managed nvmem
751  * device.
752  *
753  * @dev: Device that uses the nvmem device.
754  * @nvmem: Pointer to previously registered nvmem device.
755  *
756  * Return: Will be an negative on error or a zero on success.
757  */
758 int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem)
759 {
760 	return devres_release(dev, devm_nvmem_release, devm_nvmem_match, nvmem);
761 }
762 EXPORT_SYMBOL(devm_nvmem_unregister);
763 
764 static struct nvmem_device *__nvmem_device_get(struct device_node *np,
765 					       const char *nvmem_name)
766 {
767 	struct nvmem_device *nvmem = NULL;
768 
769 	mutex_lock(&nvmem_mutex);
770 	nvmem = np ? of_nvmem_find(np) : nvmem_find(nvmem_name);
771 	mutex_unlock(&nvmem_mutex);
772 	if (!nvmem)
773 		return ERR_PTR(-EPROBE_DEFER);
774 
775 	if (!try_module_get(nvmem->owner)) {
776 		dev_err(&nvmem->dev,
777 			"could not increase module refcount for cell %s\n",
778 			nvmem_dev_name(nvmem));
779 
780 		return ERR_PTR(-EINVAL);
781 	}
782 
783 	kref_get(&nvmem->refcnt);
784 
785 	return nvmem;
786 }
787 
788 static void __nvmem_device_put(struct nvmem_device *nvmem)
789 {
790 	module_put(nvmem->owner);
791 	kref_put(&nvmem->refcnt, nvmem_device_release);
792 }
793 
794 #if IS_ENABLED(CONFIG_OF)
795 /**
796  * of_nvmem_device_get() - Get nvmem device from a given id
797  *
798  * @np: Device tree node that uses the nvmem device.
799  * @id: nvmem name from nvmem-names property.
800  *
801  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
802  * on success.
803  */
804 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
805 {
806 
807 	struct device_node *nvmem_np;
808 	int index;
809 
810 	index = of_property_match_string(np, "nvmem-names", id);
811 
812 	nvmem_np = of_parse_phandle(np, "nvmem", index);
813 	if (!nvmem_np)
814 		return ERR_PTR(-EINVAL);
815 
816 	return __nvmem_device_get(nvmem_np, NULL);
817 }
818 EXPORT_SYMBOL_GPL(of_nvmem_device_get);
819 #endif
820 
821 /**
822  * nvmem_device_get() - Get nvmem device from a given id
823  *
824  * @dev: Device that uses the nvmem device.
825  * @dev_name: name of the requested nvmem device.
826  *
827  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
828  * on success.
829  */
830 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
831 {
832 	if (dev->of_node) { /* try dt first */
833 		struct nvmem_device *nvmem;
834 
835 		nvmem = of_nvmem_device_get(dev->of_node, dev_name);
836 
837 		if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER)
838 			return nvmem;
839 
840 	}
841 
842 	return nvmem_find(dev_name);
843 }
844 EXPORT_SYMBOL_GPL(nvmem_device_get);
845 
846 static int devm_nvmem_device_match(struct device *dev, void *res, void *data)
847 {
848 	struct nvmem_device **nvmem = res;
849 
850 	if (WARN_ON(!nvmem || !*nvmem))
851 		return 0;
852 
853 	return *nvmem == data;
854 }
855 
856 static void devm_nvmem_device_release(struct device *dev, void *res)
857 {
858 	nvmem_device_put(*(struct nvmem_device **)res);
859 }
860 
861 /**
862  * devm_nvmem_device_put() - put alredy got nvmem device
863  *
864  * @dev: Device that uses the nvmem device.
865  * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
866  * that needs to be released.
867  */
868 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem)
869 {
870 	int ret;
871 
872 	ret = devres_release(dev, devm_nvmem_device_release,
873 			     devm_nvmem_device_match, nvmem);
874 
875 	WARN_ON(ret);
876 }
877 EXPORT_SYMBOL_GPL(devm_nvmem_device_put);
878 
879 /**
880  * nvmem_device_put() - put alredy got nvmem device
881  *
882  * @nvmem: pointer to nvmem device that needs to be released.
883  */
884 void nvmem_device_put(struct nvmem_device *nvmem)
885 {
886 	__nvmem_device_put(nvmem);
887 }
888 EXPORT_SYMBOL_GPL(nvmem_device_put);
889 
890 /**
891  * devm_nvmem_device_get() - Get nvmem cell of device form a given id
892  *
893  * @dev: Device that requests the nvmem device.
894  * @id: name id for the requested nvmem device.
895  *
896  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
897  * on success.  The nvmem_cell will be freed by the automatically once the
898  * device is freed.
899  */
900 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
901 {
902 	struct nvmem_device **ptr, *nvmem;
903 
904 	ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL);
905 	if (!ptr)
906 		return ERR_PTR(-ENOMEM);
907 
908 	nvmem = nvmem_device_get(dev, id);
909 	if (!IS_ERR(nvmem)) {
910 		*ptr = nvmem;
911 		devres_add(dev, ptr);
912 	} else {
913 		devres_free(ptr);
914 	}
915 
916 	return nvmem;
917 }
918 EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
919 
920 static struct nvmem_cell *
921 nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
922 {
923 	struct nvmem_cell *cell = ERR_PTR(-ENOENT);
924 	struct nvmem_cell_lookup *lookup;
925 	struct nvmem_device *nvmem;
926 	const char *dev_id;
927 
928 	if (!dev)
929 		return ERR_PTR(-EINVAL);
930 
931 	dev_id = dev_name(dev);
932 
933 	mutex_lock(&nvmem_lookup_mutex);
934 
935 	list_for_each_entry(lookup, &nvmem_lookup_list, node) {
936 		if ((strcmp(lookup->dev_id, dev_id) == 0) &&
937 		    (strcmp(lookup->con_id, con_id) == 0)) {
938 			/* This is the right entry. */
939 			nvmem = __nvmem_device_get(NULL, lookup->nvmem_name);
940 			if (IS_ERR(nvmem)) {
941 				/* Provider may not be registered yet. */
942 				cell = ERR_CAST(nvmem);
943 				goto out;
944 			}
945 
946 			cell = nvmem_find_cell_by_name(nvmem,
947 						       lookup->cell_name);
948 			if (!cell) {
949 				__nvmem_device_put(nvmem);
950 				cell = ERR_PTR(-ENOENT);
951 				goto out;
952 			}
953 		}
954 	}
955 
956 out:
957 	mutex_unlock(&nvmem_lookup_mutex);
958 	return cell;
959 }
960 
961 #if IS_ENABLED(CONFIG_OF)
962 static struct nvmem_cell *
963 nvmem_find_cell_by_index(struct nvmem_device *nvmem, int index)
964 {
965 	struct nvmem_cell *cell = NULL;
966 	int i = 0;
967 
968 	mutex_lock(&nvmem_mutex);
969 	list_for_each_entry(cell, &nvmem->cells, node) {
970 		if (index == i++)
971 			break;
972 	}
973 	mutex_unlock(&nvmem_mutex);
974 
975 	return cell;
976 }
977 
978 /**
979  * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
980  *
981  * @np: Device tree node that uses the nvmem cell.
982  * @id: nvmem cell name from nvmem-cell-names property, or NULL
983  *      for the cell at index 0 (the lone cell with no accompanying
984  *      nvmem-cell-names property).
985  *
986  * Return: Will be an ERR_PTR() on error or a valid pointer
987  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
988  * nvmem_cell_put().
989  */
990 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
991 {
992 	struct device_node *cell_np, *nvmem_np;
993 	struct nvmem_device *nvmem;
994 	struct nvmem_cell *cell;
995 	int index = 0;
996 
997 	/* if cell name exists, find index to the name */
998 	if (id)
999 		index = of_property_match_string(np, "nvmem-cell-names", id);
1000 
1001 	cell_np = of_parse_phandle(np, "nvmem-cells", index);
1002 	if (!cell_np)
1003 		return ERR_PTR(-EINVAL);
1004 
1005 	nvmem_np = of_get_next_parent(cell_np);
1006 	if (!nvmem_np)
1007 		return ERR_PTR(-EINVAL);
1008 
1009 	nvmem = __nvmem_device_get(nvmem_np, NULL);
1010 	of_node_put(nvmem_np);
1011 	if (IS_ERR(nvmem))
1012 		return ERR_CAST(nvmem);
1013 
1014 	cell = nvmem_find_cell_by_index(nvmem, index);
1015 	if (!cell) {
1016 		__nvmem_device_put(nvmem);
1017 		return ERR_PTR(-ENOENT);
1018 	}
1019 
1020 	return cell;
1021 }
1022 EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
1023 #endif
1024 
1025 /**
1026  * nvmem_cell_get() - Get nvmem cell of device form a given cell name
1027  *
1028  * @dev: Device that requests the nvmem cell.
1029  * @id: nvmem cell name to get (this corresponds with the name from the
1030  *      nvmem-cell-names property for DT systems and with the con_id from
1031  *      the lookup entry for non-DT systems).
1032  *
1033  * Return: Will be an ERR_PTR() on error or a valid pointer
1034  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
1035  * nvmem_cell_put().
1036  */
1037 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id)
1038 {
1039 	struct nvmem_cell *cell;
1040 
1041 	if (dev->of_node) { /* try dt first */
1042 		cell = of_nvmem_cell_get(dev->of_node, id);
1043 		if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER)
1044 			return cell;
1045 	}
1046 
1047 	/* NULL cell id only allowed for device tree; invalid otherwise */
1048 	if (!id)
1049 		return ERR_PTR(-EINVAL);
1050 
1051 	return nvmem_cell_get_from_lookup(dev, id);
1052 }
1053 EXPORT_SYMBOL_GPL(nvmem_cell_get);
1054 
1055 static void devm_nvmem_cell_release(struct device *dev, void *res)
1056 {
1057 	nvmem_cell_put(*(struct nvmem_cell **)res);
1058 }
1059 
1060 /**
1061  * devm_nvmem_cell_get() - Get nvmem cell of device form a given id
1062  *
1063  * @dev: Device that requests the nvmem cell.
1064  * @id: nvmem cell name id to get.
1065  *
1066  * Return: Will be an ERR_PTR() on error or a valid pointer
1067  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
1068  * automatically once the device is freed.
1069  */
1070 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id)
1071 {
1072 	struct nvmem_cell **ptr, *cell;
1073 
1074 	ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL);
1075 	if (!ptr)
1076 		return ERR_PTR(-ENOMEM);
1077 
1078 	cell = nvmem_cell_get(dev, id);
1079 	if (!IS_ERR(cell)) {
1080 		*ptr = cell;
1081 		devres_add(dev, ptr);
1082 	} else {
1083 		devres_free(ptr);
1084 	}
1085 
1086 	return cell;
1087 }
1088 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get);
1089 
1090 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
1091 {
1092 	struct nvmem_cell **c = res;
1093 
1094 	if (WARN_ON(!c || !*c))
1095 		return 0;
1096 
1097 	return *c == data;
1098 }
1099 
1100 /**
1101  * devm_nvmem_cell_put() - Release previously allocated nvmem cell
1102  * from devm_nvmem_cell_get.
1103  *
1104  * @dev: Device that requests the nvmem cell.
1105  * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get().
1106  */
1107 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
1108 {
1109 	int ret;
1110 
1111 	ret = devres_release(dev, devm_nvmem_cell_release,
1112 				devm_nvmem_cell_match, cell);
1113 
1114 	WARN_ON(ret);
1115 }
1116 EXPORT_SYMBOL(devm_nvmem_cell_put);
1117 
1118 /**
1119  * nvmem_cell_put() - Release previously allocated nvmem cell.
1120  *
1121  * @cell: Previously allocated nvmem cell by nvmem_cell_get().
1122  */
1123 void nvmem_cell_put(struct nvmem_cell *cell)
1124 {
1125 	struct nvmem_device *nvmem = cell->nvmem;
1126 
1127 	__nvmem_device_put(nvmem);
1128 }
1129 EXPORT_SYMBOL_GPL(nvmem_cell_put);
1130 
1131 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf)
1132 {
1133 	u8 *p, *b;
1134 	int i, bit_offset = cell->bit_offset;
1135 
1136 	p = b = buf;
1137 	if (bit_offset) {
1138 		/* First shift */
1139 		*b++ >>= bit_offset;
1140 
1141 		/* setup rest of the bytes if any */
1142 		for (i = 1; i < cell->bytes; i++) {
1143 			/* Get bits from next byte and shift them towards msb */
1144 			*p |= *b << (BITS_PER_BYTE - bit_offset);
1145 
1146 			p = b;
1147 			*b++ >>= bit_offset;
1148 		}
1149 
1150 		/* result fits in less bytes */
1151 		if (cell->bytes != DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE))
1152 			*p-- = 0;
1153 	}
1154 	/* clear msb bits if any leftover in the last byte */
1155 	*p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0);
1156 }
1157 
1158 static int __nvmem_cell_read(struct nvmem_device *nvmem,
1159 		      struct nvmem_cell *cell,
1160 		      void *buf, size_t *len)
1161 {
1162 	int rc;
1163 
1164 	rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes);
1165 
1166 	if (rc)
1167 		return rc;
1168 
1169 	/* shift bits in-place */
1170 	if (cell->bit_offset || cell->nbits)
1171 		nvmem_shift_read_buffer_in_place(cell, buf);
1172 
1173 	if (len)
1174 		*len = cell->bytes;
1175 
1176 	return 0;
1177 }
1178 
1179 /**
1180  * nvmem_cell_read() - Read a given nvmem cell
1181  *
1182  * @cell: nvmem cell to be read.
1183  * @len: pointer to length of cell which will be populated on successful read;
1184  *	 can be NULL.
1185  *
1186  * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
1187  * buffer should be freed by the consumer with a kfree().
1188  */
1189 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
1190 {
1191 	struct nvmem_device *nvmem = cell->nvmem;
1192 	u8 *buf;
1193 	int rc;
1194 
1195 	if (!nvmem)
1196 		return ERR_PTR(-EINVAL);
1197 
1198 	buf = kzalloc(cell->bytes, GFP_KERNEL);
1199 	if (!buf)
1200 		return ERR_PTR(-ENOMEM);
1201 
1202 	rc = __nvmem_cell_read(nvmem, cell, buf, len);
1203 	if (rc) {
1204 		kfree(buf);
1205 		return ERR_PTR(rc);
1206 	}
1207 
1208 	return buf;
1209 }
1210 EXPORT_SYMBOL_GPL(nvmem_cell_read);
1211 
1212 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell,
1213 					     u8 *_buf, int len)
1214 {
1215 	struct nvmem_device *nvmem = cell->nvmem;
1216 	int i, rc, nbits, bit_offset = cell->bit_offset;
1217 	u8 v, *p, *buf, *b, pbyte, pbits;
1218 
1219 	nbits = cell->nbits;
1220 	buf = kzalloc(cell->bytes, GFP_KERNEL);
1221 	if (!buf)
1222 		return ERR_PTR(-ENOMEM);
1223 
1224 	memcpy(buf, _buf, len);
1225 	p = b = buf;
1226 
1227 	if (bit_offset) {
1228 		pbyte = *b;
1229 		*b <<= bit_offset;
1230 
1231 		/* setup the first byte with lsb bits from nvmem */
1232 		rc = nvmem_reg_read(nvmem, cell->offset, &v, 1);
1233 		if (rc)
1234 			goto err;
1235 		*b++ |= GENMASK(bit_offset - 1, 0) & v;
1236 
1237 		/* setup rest of the byte if any */
1238 		for (i = 1; i < cell->bytes; i++) {
1239 			/* Get last byte bits and shift them towards lsb */
1240 			pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset);
1241 			pbyte = *b;
1242 			p = b;
1243 			*b <<= bit_offset;
1244 			*b++ |= pbits;
1245 		}
1246 	}
1247 
1248 	/* if it's not end on byte boundary */
1249 	if ((nbits + bit_offset) % BITS_PER_BYTE) {
1250 		/* setup the last byte with msb bits from nvmem */
1251 		rc = nvmem_reg_read(nvmem,
1252 				    cell->offset + cell->bytes - 1, &v, 1);
1253 		if (rc)
1254 			goto err;
1255 		*p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
1256 
1257 	}
1258 
1259 	return buf;
1260 err:
1261 	kfree(buf);
1262 	return ERR_PTR(rc);
1263 }
1264 
1265 /**
1266  * nvmem_cell_write() - Write to a given nvmem cell
1267  *
1268  * @cell: nvmem cell to be written.
1269  * @buf: Buffer to be written.
1270  * @len: length of buffer to be written to nvmem cell.
1271  *
1272  * Return: length of bytes written or negative on failure.
1273  */
1274 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
1275 {
1276 	struct nvmem_device *nvmem = cell->nvmem;
1277 	int rc;
1278 
1279 	if (!nvmem || nvmem->read_only ||
1280 	    (cell->bit_offset == 0 && len != cell->bytes))
1281 		return -EINVAL;
1282 
1283 	if (cell->bit_offset || cell->nbits) {
1284 		buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
1285 		if (IS_ERR(buf))
1286 			return PTR_ERR(buf);
1287 	}
1288 
1289 	rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes);
1290 
1291 	/* free the tmp buffer */
1292 	if (cell->bit_offset || cell->nbits)
1293 		kfree(buf);
1294 
1295 	if (rc)
1296 		return rc;
1297 
1298 	return len;
1299 }
1300 EXPORT_SYMBOL_GPL(nvmem_cell_write);
1301 
1302 /**
1303  * nvmem_cell_read_u32() - Read a cell value as an u32
1304  *
1305  * @dev: Device that requests the nvmem cell.
1306  * @cell_id: Name of nvmem cell to read.
1307  * @val: pointer to output value.
1308  *
1309  * Return: 0 on success or negative errno.
1310  */
1311 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val)
1312 {
1313 	struct nvmem_cell *cell;
1314 	void *buf;
1315 	size_t len;
1316 
1317 	cell = nvmem_cell_get(dev, cell_id);
1318 	if (IS_ERR(cell))
1319 		return PTR_ERR(cell);
1320 
1321 	buf = nvmem_cell_read(cell, &len);
1322 	if (IS_ERR(buf)) {
1323 		nvmem_cell_put(cell);
1324 		return PTR_ERR(buf);
1325 	}
1326 	if (len != sizeof(*val)) {
1327 		kfree(buf);
1328 		nvmem_cell_put(cell);
1329 		return -EINVAL;
1330 	}
1331 	memcpy(val, buf, sizeof(*val));
1332 
1333 	kfree(buf);
1334 	nvmem_cell_put(cell);
1335 	return 0;
1336 }
1337 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32);
1338 
1339 /**
1340  * nvmem_device_cell_read() - Read a given nvmem device and cell
1341  *
1342  * @nvmem: nvmem device to read from.
1343  * @info: nvmem cell info to be read.
1344  * @buf: buffer pointer which will be populated on successful read.
1345  *
1346  * Return: length of successful bytes read on success and negative
1347  * error code on error.
1348  */
1349 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
1350 			   struct nvmem_cell_info *info, void *buf)
1351 {
1352 	struct nvmem_cell cell;
1353 	int rc;
1354 	ssize_t len;
1355 
1356 	if (!nvmem)
1357 		return -EINVAL;
1358 
1359 	rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
1360 	if (rc)
1361 		return rc;
1362 
1363 	rc = __nvmem_cell_read(nvmem, &cell, buf, &len);
1364 	if (rc)
1365 		return rc;
1366 
1367 	return len;
1368 }
1369 EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
1370 
1371 /**
1372  * nvmem_device_cell_write() - Write cell to a given nvmem device
1373  *
1374  * @nvmem: nvmem device to be written to.
1375  * @info: nvmem cell info to be written.
1376  * @buf: buffer to be written to cell.
1377  *
1378  * Return: length of bytes written or negative error code on failure.
1379  */
1380 int nvmem_device_cell_write(struct nvmem_device *nvmem,
1381 			    struct nvmem_cell_info *info, void *buf)
1382 {
1383 	struct nvmem_cell cell;
1384 	int rc;
1385 
1386 	if (!nvmem)
1387 		return -EINVAL;
1388 
1389 	rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
1390 	if (rc)
1391 		return rc;
1392 
1393 	return nvmem_cell_write(&cell, buf, cell.bytes);
1394 }
1395 EXPORT_SYMBOL_GPL(nvmem_device_cell_write);
1396 
1397 /**
1398  * nvmem_device_read() - Read from a given nvmem device
1399  *
1400  * @nvmem: nvmem device to read from.
1401  * @offset: offset in nvmem device.
1402  * @bytes: number of bytes to read.
1403  * @buf: buffer pointer which will be populated on successful read.
1404  *
1405  * Return: length of successful bytes read on success and negative
1406  * error code on error.
1407  */
1408 int nvmem_device_read(struct nvmem_device *nvmem,
1409 		      unsigned int offset,
1410 		      size_t bytes, void *buf)
1411 {
1412 	int rc;
1413 
1414 	if (!nvmem)
1415 		return -EINVAL;
1416 
1417 	rc = nvmem_reg_read(nvmem, offset, buf, bytes);
1418 
1419 	if (rc)
1420 		return rc;
1421 
1422 	return bytes;
1423 }
1424 EXPORT_SYMBOL_GPL(nvmem_device_read);
1425 
1426 /**
1427  * nvmem_device_write() - Write cell to a given nvmem device
1428  *
1429  * @nvmem: nvmem device to be written to.
1430  * @offset: offset in nvmem device.
1431  * @bytes: number of bytes to write.
1432  * @buf: buffer to be written.
1433  *
1434  * Return: length of bytes written or negative error code on failure.
1435  */
1436 int nvmem_device_write(struct nvmem_device *nvmem,
1437 		       unsigned int offset,
1438 		       size_t bytes, void *buf)
1439 {
1440 	int rc;
1441 
1442 	if (!nvmem)
1443 		return -EINVAL;
1444 
1445 	rc = nvmem_reg_write(nvmem, offset, buf, bytes);
1446 
1447 	if (rc)
1448 		return rc;
1449 
1450 
1451 	return bytes;
1452 }
1453 EXPORT_SYMBOL_GPL(nvmem_device_write);
1454 
1455 /**
1456  * nvmem_add_cell_table() - register a table of cell info entries
1457  *
1458  * @table: table of cell info entries
1459  */
1460 void nvmem_add_cell_table(struct nvmem_cell_table *table)
1461 {
1462 	mutex_lock(&nvmem_cell_mutex);
1463 	list_add_tail(&table->node, &nvmem_cell_tables);
1464 	mutex_unlock(&nvmem_cell_mutex);
1465 }
1466 EXPORT_SYMBOL_GPL(nvmem_add_cell_table);
1467 
1468 /**
1469  * nvmem_del_cell_table() - remove a previously registered cell info table
1470  *
1471  * @table: table of cell info entries
1472  */
1473 void nvmem_del_cell_table(struct nvmem_cell_table *table)
1474 {
1475 	mutex_lock(&nvmem_cell_mutex);
1476 	list_del(&table->node);
1477 	mutex_unlock(&nvmem_cell_mutex);
1478 }
1479 EXPORT_SYMBOL_GPL(nvmem_del_cell_table);
1480 
1481 /**
1482  * nvmem_add_cell_lookups() - register a list of cell lookup entries
1483  *
1484  * @entries: array of cell lookup entries
1485  * @nentries: number of cell lookup entries in the array
1486  */
1487 void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1488 {
1489 	int i;
1490 
1491 	mutex_lock(&nvmem_lookup_mutex);
1492 	for (i = 0; i < nentries; i++)
1493 		list_add_tail(&entries[i].node, &nvmem_lookup_list);
1494 	mutex_unlock(&nvmem_lookup_mutex);
1495 }
1496 EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups);
1497 
1498 /**
1499  * nvmem_del_cell_lookups() - remove a list of previously added cell lookup
1500  *                            entries
1501  *
1502  * @entries: array of cell lookup entries
1503  * @nentries: number of cell lookup entries in the array
1504  */
1505 void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1506 {
1507 	int i;
1508 
1509 	mutex_lock(&nvmem_lookup_mutex);
1510 	for (i = 0; i < nentries; i++)
1511 		list_del(&entries[i].node);
1512 	mutex_unlock(&nvmem_lookup_mutex);
1513 }
1514 EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups);
1515 
1516 /**
1517  * nvmem_dev_name() - Get the name of a given nvmem device.
1518  *
1519  * @nvmem: nvmem device.
1520  *
1521  * Return: name of the nvmem device.
1522  */
1523 const char *nvmem_dev_name(struct nvmem_device *nvmem)
1524 {
1525 	return dev_name(&nvmem->dev);
1526 }
1527 EXPORT_SYMBOL_GPL(nvmem_dev_name);
1528 
1529 static int __init nvmem_init(void)
1530 {
1531 	return bus_register(&nvmem_bus_type);
1532 }
1533 
1534 static void __exit nvmem_exit(void)
1535 {
1536 	bus_unregister(&nvmem_bus_type);
1537 }
1538 
1539 subsys_initcall(nvmem_init);
1540 module_exit(nvmem_exit);
1541 
1542 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
1543 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
1544 MODULE_DESCRIPTION("nvmem Driver Core");
1545 MODULE_LICENSE("GPL v2");
1546