xref: /openbmc/linux/drivers/nvmem/core.c (revision 151f4e2b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * nvmem framework core.
4  *
5  * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
6  * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
7  */
8 
9 #include <linux/device.h>
10 #include <linux/export.h>
11 #include <linux/fs.h>
12 #include <linux/idr.h>
13 #include <linux/init.h>
14 #include <linux/kref.h>
15 #include <linux/module.h>
16 #include <linux/nvmem-consumer.h>
17 #include <linux/nvmem-provider.h>
18 #include <linux/of.h>
19 #include <linux/slab.h>
20 #include "nvmem.h"
21 
22 struct nvmem_cell {
23 	const char		*name;
24 	int			offset;
25 	int			bytes;
26 	int			bit_offset;
27 	int			nbits;
28 	struct device_node	*np;
29 	struct nvmem_device	*nvmem;
30 	struct list_head	node;
31 };
32 
33 static DEFINE_MUTEX(nvmem_mutex);
34 static DEFINE_IDA(nvmem_ida);
35 
36 static DEFINE_MUTEX(nvmem_cell_mutex);
37 static LIST_HEAD(nvmem_cell_tables);
38 
39 static DEFINE_MUTEX(nvmem_lookup_mutex);
40 static LIST_HEAD(nvmem_lookup_list);
41 
42 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
43 
44 
45 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
46 			  void *val, size_t bytes)
47 {
48 	if (nvmem->reg_read)
49 		return nvmem->reg_read(nvmem->priv, offset, val, bytes);
50 
51 	return -EINVAL;
52 }
53 
54 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
55 			   void *val, size_t bytes)
56 {
57 	if (nvmem->reg_write)
58 		return nvmem->reg_write(nvmem->priv, offset, val, bytes);
59 
60 	return -EINVAL;
61 }
62 
63 static void nvmem_release(struct device *dev)
64 {
65 	struct nvmem_device *nvmem = to_nvmem_device(dev);
66 
67 	ida_simple_remove(&nvmem_ida, nvmem->id);
68 	kfree(nvmem);
69 }
70 
71 static const struct device_type nvmem_provider_type = {
72 	.release	= nvmem_release,
73 };
74 
75 static struct bus_type nvmem_bus_type = {
76 	.name		= "nvmem",
77 };
78 
79 static int of_nvmem_match(struct device *dev, void *nvmem_np)
80 {
81 	return dev->of_node == nvmem_np;
82 }
83 
84 static struct nvmem_device *of_nvmem_find(struct device_node *nvmem_np)
85 {
86 	struct device *d;
87 
88 	if (!nvmem_np)
89 		return NULL;
90 
91 	d = bus_find_device(&nvmem_bus_type, NULL, nvmem_np, of_nvmem_match);
92 
93 	if (!d)
94 		return NULL;
95 
96 	return to_nvmem_device(d);
97 }
98 
99 static struct nvmem_device *nvmem_find(const char *name)
100 {
101 	struct device *d;
102 
103 	d = bus_find_device_by_name(&nvmem_bus_type, NULL, name);
104 
105 	if (!d)
106 		return NULL;
107 
108 	return to_nvmem_device(d);
109 }
110 
111 static void nvmem_cell_drop(struct nvmem_cell *cell)
112 {
113 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell);
114 	mutex_lock(&nvmem_mutex);
115 	list_del(&cell->node);
116 	mutex_unlock(&nvmem_mutex);
117 	of_node_put(cell->np);
118 	kfree(cell->name);
119 	kfree(cell);
120 }
121 
122 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
123 {
124 	struct nvmem_cell *cell, *p;
125 
126 	list_for_each_entry_safe(cell, p, &nvmem->cells, node)
127 		nvmem_cell_drop(cell);
128 }
129 
130 static void nvmem_cell_add(struct nvmem_cell *cell)
131 {
132 	mutex_lock(&nvmem_mutex);
133 	list_add_tail(&cell->node, &cell->nvmem->cells);
134 	mutex_unlock(&nvmem_mutex);
135 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell);
136 }
137 
138 static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
139 				   const struct nvmem_cell_info *info,
140 				   struct nvmem_cell *cell)
141 {
142 	cell->nvmem = nvmem;
143 	cell->offset = info->offset;
144 	cell->bytes = info->bytes;
145 	cell->name = info->name;
146 
147 	cell->bit_offset = info->bit_offset;
148 	cell->nbits = info->nbits;
149 
150 	if (cell->nbits)
151 		cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
152 					   BITS_PER_BYTE);
153 
154 	if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
155 		dev_err(&nvmem->dev,
156 			"cell %s unaligned to nvmem stride %d\n",
157 			cell->name, nvmem->stride);
158 		return -EINVAL;
159 	}
160 
161 	return 0;
162 }
163 
164 /**
165  * nvmem_add_cells() - Add cell information to an nvmem device
166  *
167  * @nvmem: nvmem device to add cells to.
168  * @info: nvmem cell info to add to the device
169  * @ncells: number of cells in info
170  *
171  * Return: 0 or negative error code on failure.
172  */
173 static int nvmem_add_cells(struct nvmem_device *nvmem,
174 		    const struct nvmem_cell_info *info,
175 		    int ncells)
176 {
177 	struct nvmem_cell **cells;
178 	int i, rval;
179 
180 	cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL);
181 	if (!cells)
182 		return -ENOMEM;
183 
184 	for (i = 0; i < ncells; i++) {
185 		cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL);
186 		if (!cells[i]) {
187 			rval = -ENOMEM;
188 			goto err;
189 		}
190 
191 		rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]);
192 		if (rval) {
193 			kfree(cells[i]);
194 			goto err;
195 		}
196 
197 		nvmem_cell_add(cells[i]);
198 	}
199 
200 	/* remove tmp array */
201 	kfree(cells);
202 
203 	return 0;
204 err:
205 	while (i--)
206 		nvmem_cell_drop(cells[i]);
207 
208 	kfree(cells);
209 
210 	return rval;
211 }
212 
213 /**
214  * nvmem_register_notifier() - Register a notifier block for nvmem events.
215  *
216  * @nb: notifier block to be called on nvmem events.
217  *
218  * Return: 0 on success, negative error number on failure.
219  */
220 int nvmem_register_notifier(struct notifier_block *nb)
221 {
222 	return blocking_notifier_chain_register(&nvmem_notifier, nb);
223 }
224 EXPORT_SYMBOL_GPL(nvmem_register_notifier);
225 
226 /**
227  * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events.
228  *
229  * @nb: notifier block to be unregistered.
230  *
231  * Return: 0 on success, negative error number on failure.
232  */
233 int nvmem_unregister_notifier(struct notifier_block *nb)
234 {
235 	return blocking_notifier_chain_unregister(&nvmem_notifier, nb);
236 }
237 EXPORT_SYMBOL_GPL(nvmem_unregister_notifier);
238 
239 static int nvmem_add_cells_from_table(struct nvmem_device *nvmem)
240 {
241 	const struct nvmem_cell_info *info;
242 	struct nvmem_cell_table *table;
243 	struct nvmem_cell *cell;
244 	int rval = 0, i;
245 
246 	mutex_lock(&nvmem_cell_mutex);
247 	list_for_each_entry(table, &nvmem_cell_tables, node) {
248 		if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) {
249 			for (i = 0; i < table->ncells; i++) {
250 				info = &table->cells[i];
251 
252 				cell = kzalloc(sizeof(*cell), GFP_KERNEL);
253 				if (!cell) {
254 					rval = -ENOMEM;
255 					goto out;
256 				}
257 
258 				rval = nvmem_cell_info_to_nvmem_cell(nvmem,
259 								     info,
260 								     cell);
261 				if (rval) {
262 					kfree(cell);
263 					goto out;
264 				}
265 
266 				nvmem_cell_add(cell);
267 			}
268 		}
269 	}
270 
271 out:
272 	mutex_unlock(&nvmem_cell_mutex);
273 	return rval;
274 }
275 
276 static struct nvmem_cell *
277 nvmem_find_cell_by_name(struct nvmem_device *nvmem, const char *cell_id)
278 {
279 	struct nvmem_cell *iter, *cell = NULL;
280 
281 	mutex_lock(&nvmem_mutex);
282 	list_for_each_entry(iter, &nvmem->cells, node) {
283 		if (strcmp(cell_id, iter->name) == 0) {
284 			cell = iter;
285 			break;
286 		}
287 	}
288 	mutex_unlock(&nvmem_mutex);
289 
290 	return cell;
291 }
292 
293 static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
294 {
295 	struct device_node *parent, *child;
296 	struct device *dev = &nvmem->dev;
297 	struct nvmem_cell *cell;
298 	const __be32 *addr;
299 	int len;
300 
301 	parent = dev->of_node;
302 
303 	for_each_child_of_node(parent, child) {
304 		addr = of_get_property(child, "reg", &len);
305 		if (!addr || (len < 2 * sizeof(u32))) {
306 			dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
307 			return -EINVAL;
308 		}
309 
310 		cell = kzalloc(sizeof(*cell), GFP_KERNEL);
311 		if (!cell)
312 			return -ENOMEM;
313 
314 		cell->nvmem = nvmem;
315 		cell->np = of_node_get(child);
316 		cell->offset = be32_to_cpup(addr++);
317 		cell->bytes = be32_to_cpup(addr);
318 		cell->name = kasprintf(GFP_KERNEL, "%pOFn", child);
319 
320 		addr = of_get_property(child, "bits", &len);
321 		if (addr && len == (2 * sizeof(u32))) {
322 			cell->bit_offset = be32_to_cpup(addr++);
323 			cell->nbits = be32_to_cpup(addr);
324 		}
325 
326 		if (cell->nbits)
327 			cell->bytes = DIV_ROUND_UP(
328 					cell->nbits + cell->bit_offset,
329 					BITS_PER_BYTE);
330 
331 		if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
332 			dev_err(dev, "cell %s unaligned to nvmem stride %d\n",
333 				cell->name, nvmem->stride);
334 			/* Cells already added will be freed later. */
335 			kfree(cell->name);
336 			kfree(cell);
337 			return -EINVAL;
338 		}
339 
340 		nvmem_cell_add(cell);
341 	}
342 
343 	return 0;
344 }
345 
346 /**
347  * nvmem_register() - Register a nvmem device for given nvmem_config.
348  * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
349  *
350  * @config: nvmem device configuration with which nvmem device is created.
351  *
352  * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
353  * on success.
354  */
355 
356 struct nvmem_device *nvmem_register(const struct nvmem_config *config)
357 {
358 	struct nvmem_device *nvmem;
359 	int rval;
360 
361 	if (!config->dev)
362 		return ERR_PTR(-EINVAL);
363 
364 	nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL);
365 	if (!nvmem)
366 		return ERR_PTR(-ENOMEM);
367 
368 	rval  = ida_simple_get(&nvmem_ida, 0, 0, GFP_KERNEL);
369 	if (rval < 0) {
370 		kfree(nvmem);
371 		return ERR_PTR(rval);
372 	}
373 
374 	kref_init(&nvmem->refcnt);
375 	INIT_LIST_HEAD(&nvmem->cells);
376 
377 	nvmem->id = rval;
378 	nvmem->owner = config->owner;
379 	if (!nvmem->owner && config->dev->driver)
380 		nvmem->owner = config->dev->driver->owner;
381 	nvmem->stride = config->stride ?: 1;
382 	nvmem->word_size = config->word_size ?: 1;
383 	nvmem->size = config->size;
384 	nvmem->dev.type = &nvmem_provider_type;
385 	nvmem->dev.bus = &nvmem_bus_type;
386 	nvmem->dev.parent = config->dev;
387 	nvmem->priv = config->priv;
388 	nvmem->type = config->type;
389 	nvmem->reg_read = config->reg_read;
390 	nvmem->reg_write = config->reg_write;
391 	if (!config->no_of_node)
392 		nvmem->dev.of_node = config->dev->of_node;
393 
394 	if (config->id == -1 && config->name) {
395 		dev_set_name(&nvmem->dev, "%s", config->name);
396 	} else {
397 		dev_set_name(&nvmem->dev, "%s%d",
398 			     config->name ? : "nvmem",
399 			     config->name ? config->id : nvmem->id);
400 	}
401 
402 	nvmem->read_only = device_property_present(config->dev, "read-only") ||
403 			   config->read_only || !nvmem->reg_write;
404 
405 	nvmem->dev.groups = nvmem_sysfs_get_groups(nvmem, config);
406 
407 	device_initialize(&nvmem->dev);
408 
409 	dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
410 
411 	rval = device_add(&nvmem->dev);
412 	if (rval)
413 		goto err_put_device;
414 
415 	if (config->compat) {
416 		rval = nvmem_sysfs_setup_compat(nvmem, config);
417 		if (rval)
418 			goto err_device_del;
419 	}
420 
421 	if (config->cells) {
422 		rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
423 		if (rval)
424 			goto err_teardown_compat;
425 	}
426 
427 	rval = nvmem_add_cells_from_table(nvmem);
428 	if (rval)
429 		goto err_remove_cells;
430 
431 	rval = nvmem_add_cells_from_of(nvmem);
432 	if (rval)
433 		goto err_remove_cells;
434 
435 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
436 
437 	return nvmem;
438 
439 err_remove_cells:
440 	nvmem_device_remove_all_cells(nvmem);
441 err_teardown_compat:
442 	if (config->compat)
443 		nvmem_sysfs_remove_compat(nvmem, config);
444 err_device_del:
445 	device_del(&nvmem->dev);
446 err_put_device:
447 	put_device(&nvmem->dev);
448 
449 	return ERR_PTR(rval);
450 }
451 EXPORT_SYMBOL_GPL(nvmem_register);
452 
453 static void nvmem_device_release(struct kref *kref)
454 {
455 	struct nvmem_device *nvmem;
456 
457 	nvmem = container_of(kref, struct nvmem_device, refcnt);
458 
459 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem);
460 
461 	if (nvmem->flags & FLAG_COMPAT)
462 		device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
463 
464 	nvmem_device_remove_all_cells(nvmem);
465 	device_del(&nvmem->dev);
466 	put_device(&nvmem->dev);
467 }
468 
469 /**
470  * nvmem_unregister() - Unregister previously registered nvmem device
471  *
472  * @nvmem: Pointer to previously registered nvmem device.
473  */
474 void nvmem_unregister(struct nvmem_device *nvmem)
475 {
476 	kref_put(&nvmem->refcnt, nvmem_device_release);
477 }
478 EXPORT_SYMBOL_GPL(nvmem_unregister);
479 
480 static void devm_nvmem_release(struct device *dev, void *res)
481 {
482 	nvmem_unregister(*(struct nvmem_device **)res);
483 }
484 
485 /**
486  * devm_nvmem_register() - Register a managed nvmem device for given
487  * nvmem_config.
488  * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
489  *
490  * @dev: Device that uses the nvmem device.
491  * @config: nvmem device configuration with which nvmem device is created.
492  *
493  * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
494  * on success.
495  */
496 struct nvmem_device *devm_nvmem_register(struct device *dev,
497 					 const struct nvmem_config *config)
498 {
499 	struct nvmem_device **ptr, *nvmem;
500 
501 	ptr = devres_alloc(devm_nvmem_release, sizeof(*ptr), GFP_KERNEL);
502 	if (!ptr)
503 		return ERR_PTR(-ENOMEM);
504 
505 	nvmem = nvmem_register(config);
506 
507 	if (!IS_ERR(nvmem)) {
508 		*ptr = nvmem;
509 		devres_add(dev, ptr);
510 	} else {
511 		devres_free(ptr);
512 	}
513 
514 	return nvmem;
515 }
516 EXPORT_SYMBOL_GPL(devm_nvmem_register);
517 
518 static int devm_nvmem_match(struct device *dev, void *res, void *data)
519 {
520 	struct nvmem_device **r = res;
521 
522 	return *r == data;
523 }
524 
525 /**
526  * devm_nvmem_unregister() - Unregister previously registered managed nvmem
527  * device.
528  *
529  * @dev: Device that uses the nvmem device.
530  * @nvmem: Pointer to previously registered nvmem device.
531  *
532  * Return: Will be an negative on error or a zero on success.
533  */
534 int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem)
535 {
536 	return devres_release(dev, devm_nvmem_release, devm_nvmem_match, nvmem);
537 }
538 EXPORT_SYMBOL(devm_nvmem_unregister);
539 
540 static struct nvmem_device *__nvmem_device_get(struct device_node *np,
541 					       const char *nvmem_name)
542 {
543 	struct nvmem_device *nvmem = NULL;
544 
545 	mutex_lock(&nvmem_mutex);
546 	nvmem = np ? of_nvmem_find(np) : nvmem_find(nvmem_name);
547 	mutex_unlock(&nvmem_mutex);
548 	if (!nvmem)
549 		return ERR_PTR(-EPROBE_DEFER);
550 
551 	if (!try_module_get(nvmem->owner)) {
552 		dev_err(&nvmem->dev,
553 			"could not increase module refcount for cell %s\n",
554 			nvmem_dev_name(nvmem));
555 
556 		put_device(&nvmem->dev);
557 		return ERR_PTR(-EINVAL);
558 	}
559 
560 	kref_get(&nvmem->refcnt);
561 
562 	return nvmem;
563 }
564 
565 static void __nvmem_device_put(struct nvmem_device *nvmem)
566 {
567 	put_device(&nvmem->dev);
568 	module_put(nvmem->owner);
569 	kref_put(&nvmem->refcnt, nvmem_device_release);
570 }
571 
572 #if IS_ENABLED(CONFIG_OF)
573 /**
574  * of_nvmem_device_get() - Get nvmem device from a given id
575  *
576  * @np: Device tree node that uses the nvmem device.
577  * @id: nvmem name from nvmem-names property.
578  *
579  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
580  * on success.
581  */
582 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
583 {
584 
585 	struct device_node *nvmem_np;
586 	int index = 0;
587 
588 	if (id)
589 		index = of_property_match_string(np, "nvmem-names", id);
590 
591 	nvmem_np = of_parse_phandle(np, "nvmem", index);
592 	if (!nvmem_np)
593 		return ERR_PTR(-ENOENT);
594 
595 	return __nvmem_device_get(nvmem_np, NULL);
596 }
597 EXPORT_SYMBOL_GPL(of_nvmem_device_get);
598 #endif
599 
600 /**
601  * nvmem_device_get() - Get nvmem device from a given id
602  *
603  * @dev: Device that uses the nvmem device.
604  * @dev_name: name of the requested nvmem device.
605  *
606  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
607  * on success.
608  */
609 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
610 {
611 	if (dev->of_node) { /* try dt first */
612 		struct nvmem_device *nvmem;
613 
614 		nvmem = of_nvmem_device_get(dev->of_node, dev_name);
615 
616 		if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER)
617 			return nvmem;
618 
619 	}
620 
621 	return __nvmem_device_get(NULL, dev_name);
622 }
623 EXPORT_SYMBOL_GPL(nvmem_device_get);
624 
625 static int devm_nvmem_device_match(struct device *dev, void *res, void *data)
626 {
627 	struct nvmem_device **nvmem = res;
628 
629 	if (WARN_ON(!nvmem || !*nvmem))
630 		return 0;
631 
632 	return *nvmem == data;
633 }
634 
635 static void devm_nvmem_device_release(struct device *dev, void *res)
636 {
637 	nvmem_device_put(*(struct nvmem_device **)res);
638 }
639 
640 /**
641  * devm_nvmem_device_put() - put alredy got nvmem device
642  *
643  * @dev: Device that uses the nvmem device.
644  * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
645  * that needs to be released.
646  */
647 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem)
648 {
649 	int ret;
650 
651 	ret = devres_release(dev, devm_nvmem_device_release,
652 			     devm_nvmem_device_match, nvmem);
653 
654 	WARN_ON(ret);
655 }
656 EXPORT_SYMBOL_GPL(devm_nvmem_device_put);
657 
658 /**
659  * nvmem_device_put() - put alredy got nvmem device
660  *
661  * @nvmem: pointer to nvmem device that needs to be released.
662  */
663 void nvmem_device_put(struct nvmem_device *nvmem)
664 {
665 	__nvmem_device_put(nvmem);
666 }
667 EXPORT_SYMBOL_GPL(nvmem_device_put);
668 
669 /**
670  * devm_nvmem_device_get() - Get nvmem cell of device form a given id
671  *
672  * @dev: Device that requests the nvmem device.
673  * @id: name id for the requested nvmem device.
674  *
675  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
676  * on success.  The nvmem_cell will be freed by the automatically once the
677  * device is freed.
678  */
679 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
680 {
681 	struct nvmem_device **ptr, *nvmem;
682 
683 	ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL);
684 	if (!ptr)
685 		return ERR_PTR(-ENOMEM);
686 
687 	nvmem = nvmem_device_get(dev, id);
688 	if (!IS_ERR(nvmem)) {
689 		*ptr = nvmem;
690 		devres_add(dev, ptr);
691 	} else {
692 		devres_free(ptr);
693 	}
694 
695 	return nvmem;
696 }
697 EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
698 
699 static struct nvmem_cell *
700 nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
701 {
702 	struct nvmem_cell *cell = ERR_PTR(-ENOENT);
703 	struct nvmem_cell_lookup *lookup;
704 	struct nvmem_device *nvmem;
705 	const char *dev_id;
706 
707 	if (!dev)
708 		return ERR_PTR(-EINVAL);
709 
710 	dev_id = dev_name(dev);
711 
712 	mutex_lock(&nvmem_lookup_mutex);
713 
714 	list_for_each_entry(lookup, &nvmem_lookup_list, node) {
715 		if ((strcmp(lookup->dev_id, dev_id) == 0) &&
716 		    (strcmp(lookup->con_id, con_id) == 0)) {
717 			/* This is the right entry. */
718 			nvmem = __nvmem_device_get(NULL, lookup->nvmem_name);
719 			if (IS_ERR(nvmem)) {
720 				/* Provider may not be registered yet. */
721 				cell = ERR_CAST(nvmem);
722 				break;
723 			}
724 
725 			cell = nvmem_find_cell_by_name(nvmem,
726 						       lookup->cell_name);
727 			if (!cell) {
728 				__nvmem_device_put(nvmem);
729 				cell = ERR_PTR(-ENOENT);
730 			}
731 			break;
732 		}
733 	}
734 
735 	mutex_unlock(&nvmem_lookup_mutex);
736 	return cell;
737 }
738 
739 #if IS_ENABLED(CONFIG_OF)
740 static struct nvmem_cell *
741 nvmem_find_cell_by_node(struct nvmem_device *nvmem, struct device_node *np)
742 {
743 	struct nvmem_cell *iter, *cell = NULL;
744 
745 	mutex_lock(&nvmem_mutex);
746 	list_for_each_entry(iter, &nvmem->cells, node) {
747 		if (np == iter->np) {
748 			cell = iter;
749 			break;
750 		}
751 	}
752 	mutex_unlock(&nvmem_mutex);
753 
754 	return cell;
755 }
756 
757 /**
758  * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
759  *
760  * @np: Device tree node that uses the nvmem cell.
761  * @id: nvmem cell name from nvmem-cell-names property, or NULL
762  *      for the cell at index 0 (the lone cell with no accompanying
763  *      nvmem-cell-names property).
764  *
765  * Return: Will be an ERR_PTR() on error or a valid pointer
766  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
767  * nvmem_cell_put().
768  */
769 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
770 {
771 	struct device_node *cell_np, *nvmem_np;
772 	struct nvmem_device *nvmem;
773 	struct nvmem_cell *cell;
774 	int index = 0;
775 
776 	/* if cell name exists, find index to the name */
777 	if (id)
778 		index = of_property_match_string(np, "nvmem-cell-names", id);
779 
780 	cell_np = of_parse_phandle(np, "nvmem-cells", index);
781 	if (!cell_np)
782 		return ERR_PTR(-ENOENT);
783 
784 	nvmem_np = of_get_next_parent(cell_np);
785 	if (!nvmem_np)
786 		return ERR_PTR(-EINVAL);
787 
788 	nvmem = __nvmem_device_get(nvmem_np, NULL);
789 	of_node_put(nvmem_np);
790 	if (IS_ERR(nvmem))
791 		return ERR_CAST(nvmem);
792 
793 	cell = nvmem_find_cell_by_node(nvmem, cell_np);
794 	if (!cell) {
795 		__nvmem_device_put(nvmem);
796 		return ERR_PTR(-ENOENT);
797 	}
798 
799 	return cell;
800 }
801 EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
802 #endif
803 
804 /**
805  * nvmem_cell_get() - Get nvmem cell of device form a given cell name
806  *
807  * @dev: Device that requests the nvmem cell.
808  * @id: nvmem cell name to get (this corresponds with the name from the
809  *      nvmem-cell-names property for DT systems and with the con_id from
810  *      the lookup entry for non-DT systems).
811  *
812  * Return: Will be an ERR_PTR() on error or a valid pointer
813  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
814  * nvmem_cell_put().
815  */
816 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id)
817 {
818 	struct nvmem_cell *cell;
819 
820 	if (dev->of_node) { /* try dt first */
821 		cell = of_nvmem_cell_get(dev->of_node, id);
822 		if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER)
823 			return cell;
824 	}
825 
826 	/* NULL cell id only allowed for device tree; invalid otherwise */
827 	if (!id)
828 		return ERR_PTR(-EINVAL);
829 
830 	return nvmem_cell_get_from_lookup(dev, id);
831 }
832 EXPORT_SYMBOL_GPL(nvmem_cell_get);
833 
834 static void devm_nvmem_cell_release(struct device *dev, void *res)
835 {
836 	nvmem_cell_put(*(struct nvmem_cell **)res);
837 }
838 
839 /**
840  * devm_nvmem_cell_get() - Get nvmem cell of device form a given id
841  *
842  * @dev: Device that requests the nvmem cell.
843  * @id: nvmem cell name id to get.
844  *
845  * Return: Will be an ERR_PTR() on error or a valid pointer
846  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
847  * automatically once the device is freed.
848  */
849 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id)
850 {
851 	struct nvmem_cell **ptr, *cell;
852 
853 	ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL);
854 	if (!ptr)
855 		return ERR_PTR(-ENOMEM);
856 
857 	cell = nvmem_cell_get(dev, id);
858 	if (!IS_ERR(cell)) {
859 		*ptr = cell;
860 		devres_add(dev, ptr);
861 	} else {
862 		devres_free(ptr);
863 	}
864 
865 	return cell;
866 }
867 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get);
868 
869 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
870 {
871 	struct nvmem_cell **c = res;
872 
873 	if (WARN_ON(!c || !*c))
874 		return 0;
875 
876 	return *c == data;
877 }
878 
879 /**
880  * devm_nvmem_cell_put() - Release previously allocated nvmem cell
881  * from devm_nvmem_cell_get.
882  *
883  * @dev: Device that requests the nvmem cell.
884  * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get().
885  */
886 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
887 {
888 	int ret;
889 
890 	ret = devres_release(dev, devm_nvmem_cell_release,
891 				devm_nvmem_cell_match, cell);
892 
893 	WARN_ON(ret);
894 }
895 EXPORT_SYMBOL(devm_nvmem_cell_put);
896 
897 /**
898  * nvmem_cell_put() - Release previously allocated nvmem cell.
899  *
900  * @cell: Previously allocated nvmem cell by nvmem_cell_get().
901  */
902 void nvmem_cell_put(struct nvmem_cell *cell)
903 {
904 	struct nvmem_device *nvmem = cell->nvmem;
905 
906 	__nvmem_device_put(nvmem);
907 }
908 EXPORT_SYMBOL_GPL(nvmem_cell_put);
909 
910 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf)
911 {
912 	u8 *p, *b;
913 	int i, extra, bit_offset = cell->bit_offset;
914 
915 	p = b = buf;
916 	if (bit_offset) {
917 		/* First shift */
918 		*b++ >>= bit_offset;
919 
920 		/* setup rest of the bytes if any */
921 		for (i = 1; i < cell->bytes; i++) {
922 			/* Get bits from next byte and shift them towards msb */
923 			*p |= *b << (BITS_PER_BYTE - bit_offset);
924 
925 			p = b;
926 			*b++ >>= bit_offset;
927 		}
928 	} else {
929 		/* point to the msb */
930 		p += cell->bytes - 1;
931 	}
932 
933 	/* result fits in less bytes */
934 	extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE);
935 	while (--extra >= 0)
936 		*p-- = 0;
937 
938 	/* clear msb bits if any leftover in the last byte */
939 	*p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0);
940 }
941 
942 static int __nvmem_cell_read(struct nvmem_device *nvmem,
943 		      struct nvmem_cell *cell,
944 		      void *buf, size_t *len)
945 {
946 	int rc;
947 
948 	rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes);
949 
950 	if (rc)
951 		return rc;
952 
953 	/* shift bits in-place */
954 	if (cell->bit_offset || cell->nbits)
955 		nvmem_shift_read_buffer_in_place(cell, buf);
956 
957 	if (len)
958 		*len = cell->bytes;
959 
960 	return 0;
961 }
962 
963 /**
964  * nvmem_cell_read() - Read a given nvmem cell
965  *
966  * @cell: nvmem cell to be read.
967  * @len: pointer to length of cell which will be populated on successful read;
968  *	 can be NULL.
969  *
970  * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
971  * buffer should be freed by the consumer with a kfree().
972  */
973 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
974 {
975 	struct nvmem_device *nvmem = cell->nvmem;
976 	u8 *buf;
977 	int rc;
978 
979 	if (!nvmem)
980 		return ERR_PTR(-EINVAL);
981 
982 	buf = kzalloc(cell->bytes, GFP_KERNEL);
983 	if (!buf)
984 		return ERR_PTR(-ENOMEM);
985 
986 	rc = __nvmem_cell_read(nvmem, cell, buf, len);
987 	if (rc) {
988 		kfree(buf);
989 		return ERR_PTR(rc);
990 	}
991 
992 	return buf;
993 }
994 EXPORT_SYMBOL_GPL(nvmem_cell_read);
995 
996 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell,
997 					     u8 *_buf, int len)
998 {
999 	struct nvmem_device *nvmem = cell->nvmem;
1000 	int i, rc, nbits, bit_offset = cell->bit_offset;
1001 	u8 v, *p, *buf, *b, pbyte, pbits;
1002 
1003 	nbits = cell->nbits;
1004 	buf = kzalloc(cell->bytes, GFP_KERNEL);
1005 	if (!buf)
1006 		return ERR_PTR(-ENOMEM);
1007 
1008 	memcpy(buf, _buf, len);
1009 	p = b = buf;
1010 
1011 	if (bit_offset) {
1012 		pbyte = *b;
1013 		*b <<= bit_offset;
1014 
1015 		/* setup the first byte with lsb bits from nvmem */
1016 		rc = nvmem_reg_read(nvmem, cell->offset, &v, 1);
1017 		if (rc)
1018 			goto err;
1019 		*b++ |= GENMASK(bit_offset - 1, 0) & v;
1020 
1021 		/* setup rest of the byte if any */
1022 		for (i = 1; i < cell->bytes; i++) {
1023 			/* Get last byte bits and shift them towards lsb */
1024 			pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset);
1025 			pbyte = *b;
1026 			p = b;
1027 			*b <<= bit_offset;
1028 			*b++ |= pbits;
1029 		}
1030 	}
1031 
1032 	/* if it's not end on byte boundary */
1033 	if ((nbits + bit_offset) % BITS_PER_BYTE) {
1034 		/* setup the last byte with msb bits from nvmem */
1035 		rc = nvmem_reg_read(nvmem,
1036 				    cell->offset + cell->bytes - 1, &v, 1);
1037 		if (rc)
1038 			goto err;
1039 		*p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
1040 
1041 	}
1042 
1043 	return buf;
1044 err:
1045 	kfree(buf);
1046 	return ERR_PTR(rc);
1047 }
1048 
1049 /**
1050  * nvmem_cell_write() - Write to a given nvmem cell
1051  *
1052  * @cell: nvmem cell to be written.
1053  * @buf: Buffer to be written.
1054  * @len: length of buffer to be written to nvmem cell.
1055  *
1056  * Return: length of bytes written or negative on failure.
1057  */
1058 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
1059 {
1060 	struct nvmem_device *nvmem = cell->nvmem;
1061 	int rc;
1062 
1063 	if (!nvmem || nvmem->read_only ||
1064 	    (cell->bit_offset == 0 && len != cell->bytes))
1065 		return -EINVAL;
1066 
1067 	if (cell->bit_offset || cell->nbits) {
1068 		buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
1069 		if (IS_ERR(buf))
1070 			return PTR_ERR(buf);
1071 	}
1072 
1073 	rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes);
1074 
1075 	/* free the tmp buffer */
1076 	if (cell->bit_offset || cell->nbits)
1077 		kfree(buf);
1078 
1079 	if (rc)
1080 		return rc;
1081 
1082 	return len;
1083 }
1084 EXPORT_SYMBOL_GPL(nvmem_cell_write);
1085 
1086 /**
1087  * nvmem_cell_read_u16() - Read a cell value as an u16
1088  *
1089  * @dev: Device that requests the nvmem cell.
1090  * @cell_id: Name of nvmem cell to read.
1091  * @val: pointer to output value.
1092  *
1093  * Return: 0 on success or negative errno.
1094  */
1095 int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val)
1096 {
1097 	struct nvmem_cell *cell;
1098 	void *buf;
1099 	size_t len;
1100 
1101 	cell = nvmem_cell_get(dev, cell_id);
1102 	if (IS_ERR(cell))
1103 		return PTR_ERR(cell);
1104 
1105 	buf = nvmem_cell_read(cell, &len);
1106 	if (IS_ERR(buf)) {
1107 		nvmem_cell_put(cell);
1108 		return PTR_ERR(buf);
1109 	}
1110 	if (len != sizeof(*val)) {
1111 		kfree(buf);
1112 		nvmem_cell_put(cell);
1113 		return -EINVAL;
1114 	}
1115 	memcpy(val, buf, sizeof(*val));
1116 	kfree(buf);
1117 	nvmem_cell_put(cell);
1118 
1119 	return 0;
1120 }
1121 EXPORT_SYMBOL_GPL(nvmem_cell_read_u16);
1122 
1123 /**
1124  * nvmem_cell_read_u32() - Read a cell value as an u32
1125  *
1126  * @dev: Device that requests the nvmem cell.
1127  * @cell_id: Name of nvmem cell to read.
1128  * @val: pointer to output value.
1129  *
1130  * Return: 0 on success or negative errno.
1131  */
1132 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val)
1133 {
1134 	struct nvmem_cell *cell;
1135 	void *buf;
1136 	size_t len;
1137 
1138 	cell = nvmem_cell_get(dev, cell_id);
1139 	if (IS_ERR(cell))
1140 		return PTR_ERR(cell);
1141 
1142 	buf = nvmem_cell_read(cell, &len);
1143 	if (IS_ERR(buf)) {
1144 		nvmem_cell_put(cell);
1145 		return PTR_ERR(buf);
1146 	}
1147 	if (len != sizeof(*val)) {
1148 		kfree(buf);
1149 		nvmem_cell_put(cell);
1150 		return -EINVAL;
1151 	}
1152 	memcpy(val, buf, sizeof(*val));
1153 
1154 	kfree(buf);
1155 	nvmem_cell_put(cell);
1156 	return 0;
1157 }
1158 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32);
1159 
1160 /**
1161  * nvmem_device_cell_read() - Read a given nvmem device and cell
1162  *
1163  * @nvmem: nvmem device to read from.
1164  * @info: nvmem cell info to be read.
1165  * @buf: buffer pointer which will be populated on successful read.
1166  *
1167  * Return: length of successful bytes read on success and negative
1168  * error code on error.
1169  */
1170 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
1171 			   struct nvmem_cell_info *info, void *buf)
1172 {
1173 	struct nvmem_cell cell;
1174 	int rc;
1175 	ssize_t len;
1176 
1177 	if (!nvmem)
1178 		return -EINVAL;
1179 
1180 	rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
1181 	if (rc)
1182 		return rc;
1183 
1184 	rc = __nvmem_cell_read(nvmem, &cell, buf, &len);
1185 	if (rc)
1186 		return rc;
1187 
1188 	return len;
1189 }
1190 EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
1191 
1192 /**
1193  * nvmem_device_cell_write() - Write cell to a given nvmem device
1194  *
1195  * @nvmem: nvmem device to be written to.
1196  * @info: nvmem cell info to be written.
1197  * @buf: buffer to be written to cell.
1198  *
1199  * Return: length of bytes written or negative error code on failure.
1200  */
1201 int nvmem_device_cell_write(struct nvmem_device *nvmem,
1202 			    struct nvmem_cell_info *info, void *buf)
1203 {
1204 	struct nvmem_cell cell;
1205 	int rc;
1206 
1207 	if (!nvmem)
1208 		return -EINVAL;
1209 
1210 	rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
1211 	if (rc)
1212 		return rc;
1213 
1214 	return nvmem_cell_write(&cell, buf, cell.bytes);
1215 }
1216 EXPORT_SYMBOL_GPL(nvmem_device_cell_write);
1217 
1218 /**
1219  * nvmem_device_read() - Read from a given nvmem device
1220  *
1221  * @nvmem: nvmem device to read from.
1222  * @offset: offset in nvmem device.
1223  * @bytes: number of bytes to read.
1224  * @buf: buffer pointer which will be populated on successful read.
1225  *
1226  * Return: length of successful bytes read on success and negative
1227  * error code on error.
1228  */
1229 int nvmem_device_read(struct nvmem_device *nvmem,
1230 		      unsigned int offset,
1231 		      size_t bytes, void *buf)
1232 {
1233 	int rc;
1234 
1235 	if (!nvmem)
1236 		return -EINVAL;
1237 
1238 	rc = nvmem_reg_read(nvmem, offset, buf, bytes);
1239 
1240 	if (rc)
1241 		return rc;
1242 
1243 	return bytes;
1244 }
1245 EXPORT_SYMBOL_GPL(nvmem_device_read);
1246 
1247 /**
1248  * nvmem_device_write() - Write cell to a given nvmem device
1249  *
1250  * @nvmem: nvmem device to be written to.
1251  * @offset: offset in nvmem device.
1252  * @bytes: number of bytes to write.
1253  * @buf: buffer to be written.
1254  *
1255  * Return: length of bytes written or negative error code on failure.
1256  */
1257 int nvmem_device_write(struct nvmem_device *nvmem,
1258 		       unsigned int offset,
1259 		       size_t bytes, void *buf)
1260 {
1261 	int rc;
1262 
1263 	if (!nvmem)
1264 		return -EINVAL;
1265 
1266 	rc = nvmem_reg_write(nvmem, offset, buf, bytes);
1267 
1268 	if (rc)
1269 		return rc;
1270 
1271 
1272 	return bytes;
1273 }
1274 EXPORT_SYMBOL_GPL(nvmem_device_write);
1275 
1276 /**
1277  * nvmem_add_cell_table() - register a table of cell info entries
1278  *
1279  * @table: table of cell info entries
1280  */
1281 void nvmem_add_cell_table(struct nvmem_cell_table *table)
1282 {
1283 	mutex_lock(&nvmem_cell_mutex);
1284 	list_add_tail(&table->node, &nvmem_cell_tables);
1285 	mutex_unlock(&nvmem_cell_mutex);
1286 }
1287 EXPORT_SYMBOL_GPL(nvmem_add_cell_table);
1288 
1289 /**
1290  * nvmem_del_cell_table() - remove a previously registered cell info table
1291  *
1292  * @table: table of cell info entries
1293  */
1294 void nvmem_del_cell_table(struct nvmem_cell_table *table)
1295 {
1296 	mutex_lock(&nvmem_cell_mutex);
1297 	list_del(&table->node);
1298 	mutex_unlock(&nvmem_cell_mutex);
1299 }
1300 EXPORT_SYMBOL_GPL(nvmem_del_cell_table);
1301 
1302 /**
1303  * nvmem_add_cell_lookups() - register a list of cell lookup entries
1304  *
1305  * @entries: array of cell lookup entries
1306  * @nentries: number of cell lookup entries in the array
1307  */
1308 void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1309 {
1310 	int i;
1311 
1312 	mutex_lock(&nvmem_lookup_mutex);
1313 	for (i = 0; i < nentries; i++)
1314 		list_add_tail(&entries[i].node, &nvmem_lookup_list);
1315 	mutex_unlock(&nvmem_lookup_mutex);
1316 }
1317 EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups);
1318 
1319 /**
1320  * nvmem_del_cell_lookups() - remove a list of previously added cell lookup
1321  *                            entries
1322  *
1323  * @entries: array of cell lookup entries
1324  * @nentries: number of cell lookup entries in the array
1325  */
1326 void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1327 {
1328 	int i;
1329 
1330 	mutex_lock(&nvmem_lookup_mutex);
1331 	for (i = 0; i < nentries; i++)
1332 		list_del(&entries[i].node);
1333 	mutex_unlock(&nvmem_lookup_mutex);
1334 }
1335 EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups);
1336 
1337 /**
1338  * nvmem_dev_name() - Get the name of a given nvmem device.
1339  *
1340  * @nvmem: nvmem device.
1341  *
1342  * Return: name of the nvmem device.
1343  */
1344 const char *nvmem_dev_name(struct nvmem_device *nvmem)
1345 {
1346 	return dev_name(&nvmem->dev);
1347 }
1348 EXPORT_SYMBOL_GPL(nvmem_dev_name);
1349 
1350 static int __init nvmem_init(void)
1351 {
1352 	return bus_register(&nvmem_bus_type);
1353 }
1354 
1355 static void __exit nvmem_exit(void)
1356 {
1357 	bus_unregister(&nvmem_bus_type);
1358 }
1359 
1360 subsys_initcall(nvmem_init);
1361 module_exit(nvmem_exit);
1362 
1363 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
1364 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
1365 MODULE_DESCRIPTION("nvmem Driver Core");
1366 MODULE_LICENSE("GPL v2");
1367