xref: /openbmc/linux/drivers/nvmem/core.c (revision d37cf9b63113f13d742713881ce691fc615d8b3b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * nvmem framework core.
4  *
5  * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
6  * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
7  */
8 
9 #include <linux/device.h>
10 #include <linux/export.h>
11 #include <linux/fs.h>
12 #include <linux/idr.h>
13 #include <linux/init.h>
14 #include <linux/kref.h>
15 #include <linux/module.h>
16 #include <linux/nvmem-consumer.h>
17 #include <linux/nvmem-provider.h>
18 #include <linux/gpio/consumer.h>
19 #include <linux/of.h>
20 #include <linux/slab.h>
21 
22 #include "internals.h"
23 
24 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
25 
26 #define FLAG_COMPAT		BIT(0)
27 struct nvmem_cell_entry {
28 	const char		*name;
29 	int			offset;
30 	size_t			raw_len;
31 	int			bytes;
32 	int			bit_offset;
33 	int			nbits;
34 	nvmem_cell_post_process_t read_post_process;
35 	void			*priv;
36 	struct device_node	*np;
37 	struct nvmem_device	*nvmem;
38 	struct list_head	node;
39 };
40 
41 struct nvmem_cell {
42 	struct nvmem_cell_entry *entry;
43 	const char		*id;
44 	int			index;
45 };
46 
47 static DEFINE_MUTEX(nvmem_mutex);
48 static DEFINE_IDA(nvmem_ida);
49 
50 static DEFINE_MUTEX(nvmem_cell_mutex);
51 static LIST_HEAD(nvmem_cell_tables);
52 
53 static DEFINE_MUTEX(nvmem_lookup_mutex);
54 static LIST_HEAD(nvmem_lookup_list);
55 
56 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
57 
58 static DEFINE_SPINLOCK(nvmem_layout_lock);
59 static LIST_HEAD(nvmem_layouts);
60 
__nvmem_reg_read(struct nvmem_device * nvmem,unsigned int offset,void * val,size_t bytes)61 static int __nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
62 			    void *val, size_t bytes)
63 {
64 	if (nvmem->reg_read)
65 		return nvmem->reg_read(nvmem->priv, offset, val, bytes);
66 
67 	return -EINVAL;
68 }
69 
__nvmem_reg_write(struct nvmem_device * nvmem,unsigned int offset,void * val,size_t bytes)70 static int __nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
71 			     void *val, size_t bytes)
72 {
73 	int ret;
74 
75 	if (nvmem->reg_write) {
76 		gpiod_set_value_cansleep(nvmem->wp_gpio, 0);
77 		ret = nvmem->reg_write(nvmem->priv, offset, val, bytes);
78 		gpiod_set_value_cansleep(nvmem->wp_gpio, 1);
79 		return ret;
80 	}
81 
82 	return -EINVAL;
83 }
84 
nvmem_access_with_keepouts(struct nvmem_device * nvmem,unsigned int offset,void * val,size_t bytes,int write)85 static int nvmem_access_with_keepouts(struct nvmem_device *nvmem,
86 				      unsigned int offset, void *val,
87 				      size_t bytes, int write)
88 {
89 
90 	unsigned int end = offset + bytes;
91 	unsigned int kend, ksize;
92 	const struct nvmem_keepout *keepout = nvmem->keepout;
93 	const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout;
94 	int rc;
95 
96 	/*
97 	 * Skip all keepouts before the range being accessed.
98 	 * Keepouts are sorted.
99 	 */
100 	while ((keepout < keepoutend) && (keepout->end <= offset))
101 		keepout++;
102 
103 	while ((offset < end) && (keepout < keepoutend)) {
104 		/* Access the valid portion before the keepout. */
105 		if (offset < keepout->start) {
106 			kend = min(end, keepout->start);
107 			ksize = kend - offset;
108 			if (write)
109 				rc = __nvmem_reg_write(nvmem, offset, val, ksize);
110 			else
111 				rc = __nvmem_reg_read(nvmem, offset, val, ksize);
112 
113 			if (rc)
114 				return rc;
115 
116 			offset += ksize;
117 			val += ksize;
118 		}
119 
120 		/*
121 		 * Now we're aligned to the start of this keepout zone. Go
122 		 * through it.
123 		 */
124 		kend = min(end, keepout->end);
125 		ksize = kend - offset;
126 		if (!write)
127 			memset(val, keepout->value, ksize);
128 
129 		val += ksize;
130 		offset += ksize;
131 		keepout++;
132 	}
133 
134 	/*
135 	 * If we ran out of keepouts but there's still stuff to do, send it
136 	 * down directly
137 	 */
138 	if (offset < end) {
139 		ksize = end - offset;
140 		if (write)
141 			return __nvmem_reg_write(nvmem, offset, val, ksize);
142 		else
143 			return __nvmem_reg_read(nvmem, offset, val, ksize);
144 	}
145 
146 	return 0;
147 }
148 
nvmem_reg_read(struct nvmem_device * nvmem,unsigned int offset,void * val,size_t bytes)149 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
150 			  void *val, size_t bytes)
151 {
152 	if (!nvmem->nkeepout)
153 		return __nvmem_reg_read(nvmem, offset, val, bytes);
154 
155 	return nvmem_access_with_keepouts(nvmem, offset, val, bytes, false);
156 }
157 
nvmem_reg_write(struct nvmem_device * nvmem,unsigned int offset,void * val,size_t bytes)158 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
159 			   void *val, size_t bytes)
160 {
161 	if (!nvmem->nkeepout)
162 		return __nvmem_reg_write(nvmem, offset, val, bytes);
163 
164 	return nvmem_access_with_keepouts(nvmem, offset, val, bytes, true);
165 }
166 
167 #ifdef CONFIG_NVMEM_SYSFS
168 static const char * const nvmem_type_str[] = {
169 	[NVMEM_TYPE_UNKNOWN] = "Unknown",
170 	[NVMEM_TYPE_EEPROM] = "EEPROM",
171 	[NVMEM_TYPE_OTP] = "OTP",
172 	[NVMEM_TYPE_BATTERY_BACKED] = "Battery backed",
173 	[NVMEM_TYPE_FRAM] = "FRAM",
174 };
175 
176 #ifdef CONFIG_DEBUG_LOCK_ALLOC
177 static struct lock_class_key eeprom_lock_key;
178 #endif
179 
type_show(struct device * dev,struct device_attribute * attr,char * buf)180 static ssize_t type_show(struct device *dev,
181 			 struct device_attribute *attr, char *buf)
182 {
183 	struct nvmem_device *nvmem = to_nvmem_device(dev);
184 
185 	return sprintf(buf, "%s\n", nvmem_type_str[nvmem->type]);
186 }
187 
188 static DEVICE_ATTR_RO(type);
189 
190 static struct attribute *nvmem_attrs[] = {
191 	&dev_attr_type.attr,
192 	NULL,
193 };
194 
bin_attr_nvmem_read(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t pos,size_t count)195 static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
196 				   struct bin_attribute *attr, char *buf,
197 				   loff_t pos, size_t count)
198 {
199 	struct device *dev;
200 	struct nvmem_device *nvmem;
201 	int rc;
202 
203 	if (attr->private)
204 		dev = attr->private;
205 	else
206 		dev = kobj_to_dev(kobj);
207 	nvmem = to_nvmem_device(dev);
208 
209 	/* Stop the user from reading */
210 	if (pos >= nvmem->size)
211 		return 0;
212 
213 	if (!IS_ALIGNED(pos, nvmem->stride))
214 		return -EINVAL;
215 
216 	if (count < nvmem->word_size)
217 		return -EINVAL;
218 
219 	if (pos + count > nvmem->size)
220 		count = nvmem->size - pos;
221 
222 	count = round_down(count, nvmem->word_size);
223 
224 	if (!nvmem->reg_read)
225 		return -EPERM;
226 
227 	rc = nvmem_reg_read(nvmem, pos, buf, count);
228 
229 	if (rc)
230 		return rc;
231 
232 	return count;
233 }
234 
bin_attr_nvmem_write(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t pos,size_t count)235 static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
236 				    struct bin_attribute *attr, char *buf,
237 				    loff_t pos, size_t count)
238 {
239 	struct device *dev;
240 	struct nvmem_device *nvmem;
241 	int rc;
242 
243 	if (attr->private)
244 		dev = attr->private;
245 	else
246 		dev = kobj_to_dev(kobj);
247 	nvmem = to_nvmem_device(dev);
248 
249 	/* Stop the user from writing */
250 	if (pos >= nvmem->size)
251 		return -EFBIG;
252 
253 	if (!IS_ALIGNED(pos, nvmem->stride))
254 		return -EINVAL;
255 
256 	if (count < nvmem->word_size)
257 		return -EINVAL;
258 
259 	if (pos + count > nvmem->size)
260 		count = nvmem->size - pos;
261 
262 	count = round_down(count, nvmem->word_size);
263 
264 	if (!nvmem->reg_write)
265 		return -EPERM;
266 
267 	rc = nvmem_reg_write(nvmem, pos, buf, count);
268 
269 	if (rc)
270 		return rc;
271 
272 	return count;
273 }
274 
nvmem_bin_attr_get_umode(struct nvmem_device * nvmem)275 static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem)
276 {
277 	umode_t mode = 0400;
278 
279 	if (!nvmem->root_only)
280 		mode |= 0044;
281 
282 	if (!nvmem->read_only)
283 		mode |= 0200;
284 
285 	if (!nvmem->reg_write)
286 		mode &= ~0200;
287 
288 	if (!nvmem->reg_read)
289 		mode &= ~0444;
290 
291 	return mode;
292 }
293 
nvmem_bin_attr_is_visible(struct kobject * kobj,struct bin_attribute * attr,int i)294 static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj,
295 					 struct bin_attribute *attr, int i)
296 {
297 	struct device *dev = kobj_to_dev(kobj);
298 	struct nvmem_device *nvmem = to_nvmem_device(dev);
299 
300 	attr->size = nvmem->size;
301 
302 	return nvmem_bin_attr_get_umode(nvmem);
303 }
304 
305 /* default read/write permissions */
306 static struct bin_attribute bin_attr_rw_nvmem = {
307 	.attr	= {
308 		.name	= "nvmem",
309 		.mode	= 0644,
310 	},
311 	.read	= bin_attr_nvmem_read,
312 	.write	= bin_attr_nvmem_write,
313 };
314 
315 static struct bin_attribute *nvmem_bin_attributes[] = {
316 	&bin_attr_rw_nvmem,
317 	NULL,
318 };
319 
320 static const struct attribute_group nvmem_bin_group = {
321 	.bin_attrs	= nvmem_bin_attributes,
322 	.attrs		= nvmem_attrs,
323 	.is_bin_visible = nvmem_bin_attr_is_visible,
324 };
325 
326 static const struct attribute_group *nvmem_dev_groups[] = {
327 	&nvmem_bin_group,
328 	NULL,
329 };
330 
331 static struct bin_attribute bin_attr_nvmem_eeprom_compat = {
332 	.attr	= {
333 		.name	= "eeprom",
334 	},
335 	.read	= bin_attr_nvmem_read,
336 	.write	= bin_attr_nvmem_write,
337 };
338 
339 /*
340  * nvmem_setup_compat() - Create an additional binary entry in
341  * drivers sys directory, to be backwards compatible with the older
342  * drivers/misc/eeprom drivers.
343  */
nvmem_sysfs_setup_compat(struct nvmem_device * nvmem,const struct nvmem_config * config)344 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
345 				    const struct nvmem_config *config)
346 {
347 	int rval;
348 
349 	if (!config->compat)
350 		return 0;
351 
352 	if (!config->base_dev)
353 		return -EINVAL;
354 
355 	nvmem->eeprom = bin_attr_nvmem_eeprom_compat;
356 	if (config->type == NVMEM_TYPE_FRAM)
357 		nvmem->eeprom.attr.name = "fram";
358 	nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem);
359 	nvmem->eeprom.size = nvmem->size;
360 #ifdef CONFIG_DEBUG_LOCK_ALLOC
361 	nvmem->eeprom.attr.key = &eeprom_lock_key;
362 #endif
363 	nvmem->eeprom.private = &nvmem->dev;
364 	nvmem->base_dev = config->base_dev;
365 
366 	rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom);
367 	if (rval) {
368 		dev_err(&nvmem->dev,
369 			"Failed to create eeprom binary file %d\n", rval);
370 		return rval;
371 	}
372 
373 	nvmem->flags |= FLAG_COMPAT;
374 
375 	return 0;
376 }
377 
nvmem_sysfs_remove_compat(struct nvmem_device * nvmem,const struct nvmem_config * config)378 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
379 			      const struct nvmem_config *config)
380 {
381 	if (config->compat)
382 		device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
383 }
384 
385 #else /* CONFIG_NVMEM_SYSFS */
386 
nvmem_sysfs_setup_compat(struct nvmem_device * nvmem,const struct nvmem_config * config)387 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
388 				    const struct nvmem_config *config)
389 {
390 	return -ENOSYS;
391 }
nvmem_sysfs_remove_compat(struct nvmem_device * nvmem,const struct nvmem_config * config)392 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
393 				      const struct nvmem_config *config)
394 {
395 }
396 
397 #endif /* CONFIG_NVMEM_SYSFS */
398 
nvmem_release(struct device * dev)399 static void nvmem_release(struct device *dev)
400 {
401 	struct nvmem_device *nvmem = to_nvmem_device(dev);
402 
403 	ida_free(&nvmem_ida, nvmem->id);
404 	gpiod_put(nvmem->wp_gpio);
405 	kfree(nvmem);
406 }
407 
408 static const struct device_type nvmem_provider_type = {
409 	.release	= nvmem_release,
410 };
411 
412 static struct bus_type nvmem_bus_type = {
413 	.name		= "nvmem",
414 };
415 
nvmem_cell_entry_drop(struct nvmem_cell_entry * cell)416 static void nvmem_cell_entry_drop(struct nvmem_cell_entry *cell)
417 {
418 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell);
419 	mutex_lock(&nvmem_mutex);
420 	list_del(&cell->node);
421 	mutex_unlock(&nvmem_mutex);
422 	of_node_put(cell->np);
423 	kfree_const(cell->name);
424 	kfree(cell);
425 }
426 
nvmem_device_remove_all_cells(const struct nvmem_device * nvmem)427 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
428 {
429 	struct nvmem_cell_entry *cell, *p;
430 
431 	list_for_each_entry_safe(cell, p, &nvmem->cells, node)
432 		nvmem_cell_entry_drop(cell);
433 }
434 
nvmem_cell_entry_add(struct nvmem_cell_entry * cell)435 static void nvmem_cell_entry_add(struct nvmem_cell_entry *cell)
436 {
437 	mutex_lock(&nvmem_mutex);
438 	list_add_tail(&cell->node, &cell->nvmem->cells);
439 	mutex_unlock(&nvmem_mutex);
440 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell);
441 }
442 
nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device * nvmem,const struct nvmem_cell_info * info,struct nvmem_cell_entry * cell)443 static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem,
444 						     const struct nvmem_cell_info *info,
445 						     struct nvmem_cell_entry *cell)
446 {
447 	cell->nvmem = nvmem;
448 	cell->offset = info->offset;
449 	cell->raw_len = info->raw_len ?: info->bytes;
450 	cell->bytes = info->bytes;
451 	cell->name = info->name;
452 	cell->read_post_process = info->read_post_process;
453 	cell->priv = info->priv;
454 
455 	cell->bit_offset = info->bit_offset;
456 	cell->nbits = info->nbits;
457 	cell->np = info->np;
458 
459 	if (cell->nbits)
460 		cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
461 					   BITS_PER_BYTE);
462 
463 	if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
464 		dev_err(&nvmem->dev,
465 			"cell %s unaligned to nvmem stride %d\n",
466 			cell->name ?: "<unknown>", nvmem->stride);
467 		return -EINVAL;
468 	}
469 
470 	return 0;
471 }
472 
nvmem_cell_info_to_nvmem_cell_entry(struct nvmem_device * nvmem,const struct nvmem_cell_info * info,struct nvmem_cell_entry * cell)473 static int nvmem_cell_info_to_nvmem_cell_entry(struct nvmem_device *nvmem,
474 					       const struct nvmem_cell_info *info,
475 					       struct nvmem_cell_entry *cell)
476 {
477 	int err;
478 
479 	err = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, cell);
480 	if (err)
481 		return err;
482 
483 	cell->name = kstrdup_const(info->name, GFP_KERNEL);
484 	if (!cell->name)
485 		return -ENOMEM;
486 
487 	return 0;
488 }
489 
490 /**
491  * nvmem_add_one_cell() - Add one cell information to an nvmem device
492  *
493  * @nvmem: nvmem device to add cells to.
494  * @info: nvmem cell info to add to the device
495  *
496  * Return: 0 or negative error code on failure.
497  */
nvmem_add_one_cell(struct nvmem_device * nvmem,const struct nvmem_cell_info * info)498 int nvmem_add_one_cell(struct nvmem_device *nvmem,
499 		       const struct nvmem_cell_info *info)
500 {
501 	struct nvmem_cell_entry *cell;
502 	int rval;
503 
504 	cell = kzalloc(sizeof(*cell), GFP_KERNEL);
505 	if (!cell)
506 		return -ENOMEM;
507 
508 	rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell);
509 	if (rval) {
510 		kfree(cell);
511 		return rval;
512 	}
513 
514 	nvmem_cell_entry_add(cell);
515 
516 	return 0;
517 }
518 EXPORT_SYMBOL_GPL(nvmem_add_one_cell);
519 
520 /**
521  * nvmem_add_cells() - Add cell information to an nvmem device
522  *
523  * @nvmem: nvmem device to add cells to.
524  * @info: nvmem cell info to add to the device
525  * @ncells: number of cells in info
526  *
527  * Return: 0 or negative error code on failure.
528  */
nvmem_add_cells(struct nvmem_device * nvmem,const struct nvmem_cell_info * info,int ncells)529 static int nvmem_add_cells(struct nvmem_device *nvmem,
530 		    const struct nvmem_cell_info *info,
531 		    int ncells)
532 {
533 	int i, rval;
534 
535 	for (i = 0; i < ncells; i++) {
536 		rval = nvmem_add_one_cell(nvmem, &info[i]);
537 		if (rval)
538 			return rval;
539 	}
540 
541 	return 0;
542 }
543 
544 /**
545  * nvmem_register_notifier() - Register a notifier block for nvmem events.
546  *
547  * @nb: notifier block to be called on nvmem events.
548  *
549  * Return: 0 on success, negative error number on failure.
550  */
nvmem_register_notifier(struct notifier_block * nb)551 int nvmem_register_notifier(struct notifier_block *nb)
552 {
553 	return blocking_notifier_chain_register(&nvmem_notifier, nb);
554 }
555 EXPORT_SYMBOL_GPL(nvmem_register_notifier);
556 
557 /**
558  * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events.
559  *
560  * @nb: notifier block to be unregistered.
561  *
562  * Return: 0 on success, negative error number on failure.
563  */
nvmem_unregister_notifier(struct notifier_block * nb)564 int nvmem_unregister_notifier(struct notifier_block *nb)
565 {
566 	return blocking_notifier_chain_unregister(&nvmem_notifier, nb);
567 }
568 EXPORT_SYMBOL_GPL(nvmem_unregister_notifier);
569 
nvmem_add_cells_from_table(struct nvmem_device * nvmem)570 static int nvmem_add_cells_from_table(struct nvmem_device *nvmem)
571 {
572 	const struct nvmem_cell_info *info;
573 	struct nvmem_cell_table *table;
574 	struct nvmem_cell_entry *cell;
575 	int rval = 0, i;
576 
577 	mutex_lock(&nvmem_cell_mutex);
578 	list_for_each_entry(table, &nvmem_cell_tables, node) {
579 		if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) {
580 			for (i = 0; i < table->ncells; i++) {
581 				info = &table->cells[i];
582 
583 				cell = kzalloc(sizeof(*cell), GFP_KERNEL);
584 				if (!cell) {
585 					rval = -ENOMEM;
586 					goto out;
587 				}
588 
589 				rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell);
590 				if (rval) {
591 					kfree(cell);
592 					goto out;
593 				}
594 
595 				nvmem_cell_entry_add(cell);
596 			}
597 		}
598 	}
599 
600 out:
601 	mutex_unlock(&nvmem_cell_mutex);
602 	return rval;
603 }
604 
605 static struct nvmem_cell_entry *
nvmem_find_cell_entry_by_name(struct nvmem_device * nvmem,const char * cell_id)606 nvmem_find_cell_entry_by_name(struct nvmem_device *nvmem, const char *cell_id)
607 {
608 	struct nvmem_cell_entry *iter, *cell = NULL;
609 
610 	mutex_lock(&nvmem_mutex);
611 	list_for_each_entry(iter, &nvmem->cells, node) {
612 		if (strcmp(cell_id, iter->name) == 0) {
613 			cell = iter;
614 			break;
615 		}
616 	}
617 	mutex_unlock(&nvmem_mutex);
618 
619 	return cell;
620 }
621 
nvmem_validate_keepouts(struct nvmem_device * nvmem)622 static int nvmem_validate_keepouts(struct nvmem_device *nvmem)
623 {
624 	unsigned int cur = 0;
625 	const struct nvmem_keepout *keepout = nvmem->keepout;
626 	const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout;
627 
628 	while (keepout < keepoutend) {
629 		/* Ensure keepouts are sorted and don't overlap. */
630 		if (keepout->start < cur) {
631 			dev_err(&nvmem->dev,
632 				"Keepout regions aren't sorted or overlap.\n");
633 
634 			return -ERANGE;
635 		}
636 
637 		if (keepout->end < keepout->start) {
638 			dev_err(&nvmem->dev,
639 				"Invalid keepout region.\n");
640 
641 			return -EINVAL;
642 		}
643 
644 		/*
645 		 * Validate keepouts (and holes between) don't violate
646 		 * word_size constraints.
647 		 */
648 		if ((keepout->end - keepout->start < nvmem->word_size) ||
649 		    ((keepout->start != cur) &&
650 		     (keepout->start - cur < nvmem->word_size))) {
651 
652 			dev_err(&nvmem->dev,
653 				"Keepout regions violate word_size constraints.\n");
654 
655 			return -ERANGE;
656 		}
657 
658 		/* Validate keepouts don't violate stride (alignment). */
659 		if (!IS_ALIGNED(keepout->start, nvmem->stride) ||
660 		    !IS_ALIGNED(keepout->end, nvmem->stride)) {
661 
662 			dev_err(&nvmem->dev,
663 				"Keepout regions violate stride.\n");
664 
665 			return -EINVAL;
666 		}
667 
668 		cur = keepout->end;
669 		keepout++;
670 	}
671 
672 	return 0;
673 }
674 
nvmem_add_cells_from_dt(struct nvmem_device * nvmem,struct device_node * np)675 static int nvmem_add_cells_from_dt(struct nvmem_device *nvmem, struct device_node *np)
676 {
677 	struct device *dev = &nvmem->dev;
678 	struct device_node *child;
679 	const __be32 *addr;
680 	int len, ret;
681 
682 	for_each_child_of_node(np, child) {
683 		struct nvmem_cell_info info = {0};
684 
685 		addr = of_get_property(child, "reg", &len);
686 		if (!addr)
687 			continue;
688 		if (len < 2 * sizeof(u32)) {
689 			dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
690 			of_node_put(child);
691 			return -EINVAL;
692 		}
693 
694 		info.offset = be32_to_cpup(addr++);
695 		info.bytes = be32_to_cpup(addr);
696 		info.name = kasprintf(GFP_KERNEL, "%pOFn", child);
697 
698 		addr = of_get_property(child, "bits", &len);
699 		if (addr && len == (2 * sizeof(u32))) {
700 			info.bit_offset = be32_to_cpup(addr++);
701 			info.nbits = be32_to_cpup(addr);
702 		}
703 
704 		info.np = of_node_get(child);
705 
706 		if (nvmem->fixup_dt_cell_info)
707 			nvmem->fixup_dt_cell_info(nvmem, &info);
708 
709 		ret = nvmem_add_one_cell(nvmem, &info);
710 		kfree(info.name);
711 		if (ret) {
712 			of_node_put(child);
713 			return ret;
714 		}
715 	}
716 
717 	return 0;
718 }
719 
nvmem_add_cells_from_legacy_of(struct nvmem_device * nvmem)720 static int nvmem_add_cells_from_legacy_of(struct nvmem_device *nvmem)
721 {
722 	return nvmem_add_cells_from_dt(nvmem, nvmem->dev.of_node);
723 }
724 
nvmem_add_cells_from_fixed_layout(struct nvmem_device * nvmem)725 static int nvmem_add_cells_from_fixed_layout(struct nvmem_device *nvmem)
726 {
727 	struct device_node *layout_np;
728 	int err = 0;
729 
730 	layout_np = of_nvmem_layout_get_container(nvmem);
731 	if (!layout_np)
732 		return 0;
733 
734 	if (of_device_is_compatible(layout_np, "fixed-layout"))
735 		err = nvmem_add_cells_from_dt(nvmem, layout_np);
736 
737 	of_node_put(layout_np);
738 
739 	return err;
740 }
741 
__nvmem_layout_register(struct nvmem_layout * layout,struct module * owner)742 int __nvmem_layout_register(struct nvmem_layout *layout, struct module *owner)
743 {
744 	layout->owner = owner;
745 
746 	spin_lock(&nvmem_layout_lock);
747 	list_add(&layout->node, &nvmem_layouts);
748 	spin_unlock(&nvmem_layout_lock);
749 
750 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_LAYOUT_ADD, layout);
751 
752 	return 0;
753 }
754 EXPORT_SYMBOL_GPL(__nvmem_layout_register);
755 
nvmem_layout_unregister(struct nvmem_layout * layout)756 void nvmem_layout_unregister(struct nvmem_layout *layout)
757 {
758 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_LAYOUT_REMOVE, layout);
759 
760 	spin_lock(&nvmem_layout_lock);
761 	list_del(&layout->node);
762 	spin_unlock(&nvmem_layout_lock);
763 }
764 EXPORT_SYMBOL_GPL(nvmem_layout_unregister);
765 
nvmem_layout_get(struct nvmem_device * nvmem)766 static struct nvmem_layout *nvmem_layout_get(struct nvmem_device *nvmem)
767 {
768 	struct device_node *layout_np;
769 	struct nvmem_layout *l, *layout = ERR_PTR(-EPROBE_DEFER);
770 
771 	layout_np = of_nvmem_layout_get_container(nvmem);
772 	if (!layout_np)
773 		return NULL;
774 
775 	/* Fixed layouts don't have a matching driver */
776 	if (of_device_is_compatible(layout_np, "fixed-layout")) {
777 		of_node_put(layout_np);
778 		return NULL;
779 	}
780 
781 	/*
782 	 * In case the nvmem device was built-in while the layout was built as a
783 	 * module, we shall manually request the layout driver loading otherwise
784 	 * we'll never have any match.
785 	 */
786 	of_request_module(layout_np);
787 
788 	spin_lock(&nvmem_layout_lock);
789 
790 	list_for_each_entry(l, &nvmem_layouts, node) {
791 		if (of_match_node(l->of_match_table, layout_np)) {
792 			if (try_module_get(l->owner))
793 				layout = l;
794 
795 			break;
796 		}
797 	}
798 
799 	spin_unlock(&nvmem_layout_lock);
800 	of_node_put(layout_np);
801 
802 	return layout;
803 }
804 
nvmem_layout_put(struct nvmem_layout * layout)805 static void nvmem_layout_put(struct nvmem_layout *layout)
806 {
807 	if (layout)
808 		module_put(layout->owner);
809 }
810 
nvmem_add_cells_from_layout(struct nvmem_device * nvmem)811 static int nvmem_add_cells_from_layout(struct nvmem_device *nvmem)
812 {
813 	struct nvmem_layout *layout = nvmem->layout;
814 	int ret;
815 
816 	if (layout && layout->add_cells) {
817 		ret = layout->add_cells(&nvmem->dev, nvmem);
818 		if (ret)
819 			return ret;
820 	}
821 
822 	return 0;
823 }
824 
825 #if IS_ENABLED(CONFIG_OF)
826 /**
827  * of_nvmem_layout_get_container() - Get OF node to layout container.
828  *
829  * @nvmem: nvmem device.
830  *
831  * Return: a node pointer with refcount incremented or NULL if no
832  * container exists. Use of_node_put() on it when done.
833  */
of_nvmem_layout_get_container(struct nvmem_device * nvmem)834 struct device_node *of_nvmem_layout_get_container(struct nvmem_device *nvmem)
835 {
836 	return of_get_child_by_name(nvmem->dev.of_node, "nvmem-layout");
837 }
838 EXPORT_SYMBOL_GPL(of_nvmem_layout_get_container);
839 #endif
840 
nvmem_layout_get_match_data(struct nvmem_device * nvmem,struct nvmem_layout * layout)841 const void *nvmem_layout_get_match_data(struct nvmem_device *nvmem,
842 					struct nvmem_layout *layout)
843 {
844 	struct device_node __maybe_unused *layout_np;
845 	const struct of_device_id *match;
846 
847 	layout_np = of_nvmem_layout_get_container(nvmem);
848 	match = of_match_node(layout->of_match_table, layout_np);
849 
850 	return match ? match->data : NULL;
851 }
852 EXPORT_SYMBOL_GPL(nvmem_layout_get_match_data);
853 
854 /**
855  * nvmem_register() - Register a nvmem device for given nvmem_config.
856  * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
857  *
858  * @config: nvmem device configuration with which nvmem device is created.
859  *
860  * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
861  * on success.
862  */
863 
nvmem_register(const struct nvmem_config * config)864 struct nvmem_device *nvmem_register(const struct nvmem_config *config)
865 {
866 	struct nvmem_device *nvmem;
867 	int rval;
868 
869 	if (!config->dev)
870 		return ERR_PTR(-EINVAL);
871 
872 	if (!config->reg_read && !config->reg_write)
873 		return ERR_PTR(-EINVAL);
874 
875 	nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL);
876 	if (!nvmem)
877 		return ERR_PTR(-ENOMEM);
878 
879 	rval = ida_alloc(&nvmem_ida, GFP_KERNEL);
880 	if (rval < 0) {
881 		kfree(nvmem);
882 		return ERR_PTR(rval);
883 	}
884 
885 	nvmem->id = rval;
886 
887 	nvmem->dev.type = &nvmem_provider_type;
888 	nvmem->dev.bus = &nvmem_bus_type;
889 	nvmem->dev.parent = config->dev;
890 
891 	device_initialize(&nvmem->dev);
892 
893 	if (!config->ignore_wp)
894 		nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp",
895 						    GPIOD_OUT_HIGH);
896 	if (IS_ERR(nvmem->wp_gpio)) {
897 		rval = PTR_ERR(nvmem->wp_gpio);
898 		nvmem->wp_gpio = NULL;
899 		goto err_put_device;
900 	}
901 
902 	kref_init(&nvmem->refcnt);
903 	INIT_LIST_HEAD(&nvmem->cells);
904 	nvmem->fixup_dt_cell_info = config->fixup_dt_cell_info;
905 
906 	nvmem->owner = config->owner;
907 	if (!nvmem->owner && config->dev->driver)
908 		nvmem->owner = config->dev->driver->owner;
909 	nvmem->stride = config->stride ?: 1;
910 	nvmem->word_size = config->word_size ?: 1;
911 	nvmem->size = config->size;
912 	nvmem->root_only = config->root_only;
913 	nvmem->priv = config->priv;
914 	nvmem->type = config->type;
915 	nvmem->reg_read = config->reg_read;
916 	nvmem->reg_write = config->reg_write;
917 	nvmem->keepout = config->keepout;
918 	nvmem->nkeepout = config->nkeepout;
919 	if (config->of_node)
920 		nvmem->dev.of_node = config->of_node;
921 	else if (!config->no_of_node)
922 		nvmem->dev.of_node = config->dev->of_node;
923 
924 	switch (config->id) {
925 	case NVMEM_DEVID_NONE:
926 		rval = dev_set_name(&nvmem->dev, "%s", config->name);
927 		break;
928 	case NVMEM_DEVID_AUTO:
929 		rval = dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id);
930 		break;
931 	default:
932 		rval = dev_set_name(&nvmem->dev, "%s%d",
933 			     config->name ? : "nvmem",
934 			     config->name ? config->id : nvmem->id);
935 		break;
936 	}
937 
938 	if (rval)
939 		goto err_put_device;
940 
941 	nvmem->read_only = device_property_present(config->dev, "read-only") ||
942 			   config->read_only || !nvmem->reg_write;
943 
944 #ifdef CONFIG_NVMEM_SYSFS
945 	nvmem->dev.groups = nvmem_dev_groups;
946 #endif
947 
948 	if (nvmem->nkeepout) {
949 		rval = nvmem_validate_keepouts(nvmem);
950 		if (rval)
951 			goto err_put_device;
952 	}
953 
954 	if (config->compat) {
955 		rval = nvmem_sysfs_setup_compat(nvmem, config);
956 		if (rval)
957 			goto err_put_device;
958 	}
959 
960 	/*
961 	 * If the driver supplied a layout by config->layout, the module
962 	 * pointer will be NULL and nvmem_layout_put() will be a noop.
963 	 */
964 	nvmem->layout = config->layout ?: nvmem_layout_get(nvmem);
965 	if (IS_ERR(nvmem->layout)) {
966 		rval = PTR_ERR(nvmem->layout);
967 		nvmem->layout = NULL;
968 
969 		if (rval == -EPROBE_DEFER)
970 			goto err_teardown_compat;
971 	}
972 
973 	if (config->cells) {
974 		rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
975 		if (rval)
976 			goto err_remove_cells;
977 	}
978 
979 	rval = nvmem_add_cells_from_table(nvmem);
980 	if (rval)
981 		goto err_remove_cells;
982 
983 	if (config->add_legacy_fixed_of_cells) {
984 		rval = nvmem_add_cells_from_legacy_of(nvmem);
985 		if (rval)
986 			goto err_remove_cells;
987 	}
988 
989 	rval = nvmem_add_cells_from_fixed_layout(nvmem);
990 	if (rval)
991 		goto err_remove_cells;
992 
993 	rval = nvmem_add_cells_from_layout(nvmem);
994 	if (rval)
995 		goto err_remove_cells;
996 
997 	dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
998 
999 	rval = device_add(&nvmem->dev);
1000 	if (rval)
1001 		goto err_remove_cells;
1002 
1003 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
1004 
1005 	return nvmem;
1006 
1007 err_remove_cells:
1008 	nvmem_device_remove_all_cells(nvmem);
1009 	nvmem_layout_put(nvmem->layout);
1010 err_teardown_compat:
1011 	if (config->compat)
1012 		nvmem_sysfs_remove_compat(nvmem, config);
1013 err_put_device:
1014 	put_device(&nvmem->dev);
1015 
1016 	return ERR_PTR(rval);
1017 }
1018 EXPORT_SYMBOL_GPL(nvmem_register);
1019 
nvmem_device_release(struct kref * kref)1020 static void nvmem_device_release(struct kref *kref)
1021 {
1022 	struct nvmem_device *nvmem;
1023 
1024 	nvmem = container_of(kref, struct nvmem_device, refcnt);
1025 
1026 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem);
1027 
1028 	if (nvmem->flags & FLAG_COMPAT)
1029 		device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
1030 
1031 	nvmem_device_remove_all_cells(nvmem);
1032 	nvmem_layout_put(nvmem->layout);
1033 	device_unregister(&nvmem->dev);
1034 }
1035 
1036 /**
1037  * nvmem_unregister() - Unregister previously registered nvmem device
1038  *
1039  * @nvmem: Pointer to previously registered nvmem device.
1040  */
nvmem_unregister(struct nvmem_device * nvmem)1041 void nvmem_unregister(struct nvmem_device *nvmem)
1042 {
1043 	if (nvmem)
1044 		kref_put(&nvmem->refcnt, nvmem_device_release);
1045 }
1046 EXPORT_SYMBOL_GPL(nvmem_unregister);
1047 
devm_nvmem_unregister(void * nvmem)1048 static void devm_nvmem_unregister(void *nvmem)
1049 {
1050 	nvmem_unregister(nvmem);
1051 }
1052 
1053 /**
1054  * devm_nvmem_register() - Register a managed nvmem device for given
1055  * nvmem_config.
1056  * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
1057  *
1058  * @dev: Device that uses the nvmem device.
1059  * @config: nvmem device configuration with which nvmem device is created.
1060  *
1061  * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
1062  * on success.
1063  */
devm_nvmem_register(struct device * dev,const struct nvmem_config * config)1064 struct nvmem_device *devm_nvmem_register(struct device *dev,
1065 					 const struct nvmem_config *config)
1066 {
1067 	struct nvmem_device *nvmem;
1068 	int ret;
1069 
1070 	nvmem = nvmem_register(config);
1071 	if (IS_ERR(nvmem))
1072 		return nvmem;
1073 
1074 	ret = devm_add_action_or_reset(dev, devm_nvmem_unregister, nvmem);
1075 	if (ret)
1076 		return ERR_PTR(ret);
1077 
1078 	return nvmem;
1079 }
1080 EXPORT_SYMBOL_GPL(devm_nvmem_register);
1081 
__nvmem_device_get(void * data,int (* match)(struct device * dev,const void * data))1082 static struct nvmem_device *__nvmem_device_get(void *data,
1083 			int (*match)(struct device *dev, const void *data))
1084 {
1085 	struct nvmem_device *nvmem = NULL;
1086 	struct device *dev;
1087 
1088 	mutex_lock(&nvmem_mutex);
1089 	dev = bus_find_device(&nvmem_bus_type, NULL, data, match);
1090 	if (dev)
1091 		nvmem = to_nvmem_device(dev);
1092 	mutex_unlock(&nvmem_mutex);
1093 	if (!nvmem)
1094 		return ERR_PTR(-EPROBE_DEFER);
1095 
1096 	if (!try_module_get(nvmem->owner)) {
1097 		dev_err(&nvmem->dev,
1098 			"could not increase module refcount for cell %s\n",
1099 			nvmem_dev_name(nvmem));
1100 
1101 		put_device(&nvmem->dev);
1102 		return ERR_PTR(-EINVAL);
1103 	}
1104 
1105 	kref_get(&nvmem->refcnt);
1106 
1107 	return nvmem;
1108 }
1109 
__nvmem_device_put(struct nvmem_device * nvmem)1110 static void __nvmem_device_put(struct nvmem_device *nvmem)
1111 {
1112 	put_device(&nvmem->dev);
1113 	module_put(nvmem->owner);
1114 	kref_put(&nvmem->refcnt, nvmem_device_release);
1115 }
1116 
1117 #if IS_ENABLED(CONFIG_OF)
1118 /**
1119  * of_nvmem_device_get() - Get nvmem device from a given id
1120  *
1121  * @np: Device tree node that uses the nvmem device.
1122  * @id: nvmem name from nvmem-names property.
1123  *
1124  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
1125  * on success.
1126  */
of_nvmem_device_get(struct device_node * np,const char * id)1127 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
1128 {
1129 
1130 	struct device_node *nvmem_np;
1131 	struct nvmem_device *nvmem;
1132 	int index = 0;
1133 
1134 	if (id)
1135 		index = of_property_match_string(np, "nvmem-names", id);
1136 
1137 	nvmem_np = of_parse_phandle(np, "nvmem", index);
1138 	if (!nvmem_np)
1139 		return ERR_PTR(-ENOENT);
1140 
1141 	nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
1142 	of_node_put(nvmem_np);
1143 	return nvmem;
1144 }
1145 EXPORT_SYMBOL_GPL(of_nvmem_device_get);
1146 #endif
1147 
1148 /**
1149  * nvmem_device_get() - Get nvmem device from a given id
1150  *
1151  * @dev: Device that uses the nvmem device.
1152  * @dev_name: name of the requested nvmem device.
1153  *
1154  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
1155  * on success.
1156  */
nvmem_device_get(struct device * dev,const char * dev_name)1157 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
1158 {
1159 	if (dev->of_node) { /* try dt first */
1160 		struct nvmem_device *nvmem;
1161 
1162 		nvmem = of_nvmem_device_get(dev->of_node, dev_name);
1163 
1164 		if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER)
1165 			return nvmem;
1166 
1167 	}
1168 
1169 	return __nvmem_device_get((void *)dev_name, device_match_name);
1170 }
1171 EXPORT_SYMBOL_GPL(nvmem_device_get);
1172 
1173 /**
1174  * nvmem_device_find() - Find nvmem device with matching function
1175  *
1176  * @data: Data to pass to match function
1177  * @match: Callback function to check device
1178  *
1179  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
1180  * on success.
1181  */
nvmem_device_find(void * data,int (* match)(struct device * dev,const void * data))1182 struct nvmem_device *nvmem_device_find(void *data,
1183 			int (*match)(struct device *dev, const void *data))
1184 {
1185 	return __nvmem_device_get(data, match);
1186 }
1187 EXPORT_SYMBOL_GPL(nvmem_device_find);
1188 
devm_nvmem_device_match(struct device * dev,void * res,void * data)1189 static int devm_nvmem_device_match(struct device *dev, void *res, void *data)
1190 {
1191 	struct nvmem_device **nvmem = res;
1192 
1193 	if (WARN_ON(!nvmem || !*nvmem))
1194 		return 0;
1195 
1196 	return *nvmem == data;
1197 }
1198 
devm_nvmem_device_release(struct device * dev,void * res)1199 static void devm_nvmem_device_release(struct device *dev, void *res)
1200 {
1201 	nvmem_device_put(*(struct nvmem_device **)res);
1202 }
1203 
1204 /**
1205  * devm_nvmem_device_put() - put alredy got nvmem device
1206  *
1207  * @dev: Device that uses the nvmem device.
1208  * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
1209  * that needs to be released.
1210  */
devm_nvmem_device_put(struct device * dev,struct nvmem_device * nvmem)1211 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem)
1212 {
1213 	int ret;
1214 
1215 	ret = devres_release(dev, devm_nvmem_device_release,
1216 			     devm_nvmem_device_match, nvmem);
1217 
1218 	WARN_ON(ret);
1219 }
1220 EXPORT_SYMBOL_GPL(devm_nvmem_device_put);
1221 
1222 /**
1223  * nvmem_device_put() - put alredy got nvmem device
1224  *
1225  * @nvmem: pointer to nvmem device that needs to be released.
1226  */
nvmem_device_put(struct nvmem_device * nvmem)1227 void nvmem_device_put(struct nvmem_device *nvmem)
1228 {
1229 	__nvmem_device_put(nvmem);
1230 }
1231 EXPORT_SYMBOL_GPL(nvmem_device_put);
1232 
1233 /**
1234  * devm_nvmem_device_get() - Get nvmem device of device form a given id
1235  *
1236  * @dev: Device that requests the nvmem device.
1237  * @id: name id for the requested nvmem device.
1238  *
1239  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
1240  * on success.  The nvmem_device will be freed by the automatically once the
1241  * device is freed.
1242  */
devm_nvmem_device_get(struct device * dev,const char * id)1243 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
1244 {
1245 	struct nvmem_device **ptr, *nvmem;
1246 
1247 	ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL);
1248 	if (!ptr)
1249 		return ERR_PTR(-ENOMEM);
1250 
1251 	nvmem = nvmem_device_get(dev, id);
1252 	if (!IS_ERR(nvmem)) {
1253 		*ptr = nvmem;
1254 		devres_add(dev, ptr);
1255 	} else {
1256 		devres_free(ptr);
1257 	}
1258 
1259 	return nvmem;
1260 }
1261 EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
1262 
nvmem_create_cell(struct nvmem_cell_entry * entry,const char * id,int index)1263 static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry,
1264 					    const char *id, int index)
1265 {
1266 	struct nvmem_cell *cell;
1267 	const char *name = NULL;
1268 
1269 	cell = kzalloc(sizeof(*cell), GFP_KERNEL);
1270 	if (!cell)
1271 		return ERR_PTR(-ENOMEM);
1272 
1273 	if (id) {
1274 		name = kstrdup_const(id, GFP_KERNEL);
1275 		if (!name) {
1276 			kfree(cell);
1277 			return ERR_PTR(-ENOMEM);
1278 		}
1279 	}
1280 
1281 	cell->id = name;
1282 	cell->entry = entry;
1283 	cell->index = index;
1284 
1285 	return cell;
1286 }
1287 
1288 static struct nvmem_cell *
nvmem_cell_get_from_lookup(struct device * dev,const char * con_id)1289 nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
1290 {
1291 	struct nvmem_cell_entry *cell_entry;
1292 	struct nvmem_cell *cell = ERR_PTR(-ENOENT);
1293 	struct nvmem_cell_lookup *lookup;
1294 	struct nvmem_device *nvmem;
1295 	const char *dev_id;
1296 
1297 	if (!dev)
1298 		return ERR_PTR(-EINVAL);
1299 
1300 	dev_id = dev_name(dev);
1301 
1302 	mutex_lock(&nvmem_lookup_mutex);
1303 
1304 	list_for_each_entry(lookup, &nvmem_lookup_list, node) {
1305 		if ((strcmp(lookup->dev_id, dev_id) == 0) &&
1306 		    (strcmp(lookup->con_id, con_id) == 0)) {
1307 			/* This is the right entry. */
1308 			nvmem = __nvmem_device_get((void *)lookup->nvmem_name,
1309 						   device_match_name);
1310 			if (IS_ERR(nvmem)) {
1311 				/* Provider may not be registered yet. */
1312 				cell = ERR_CAST(nvmem);
1313 				break;
1314 			}
1315 
1316 			cell_entry = nvmem_find_cell_entry_by_name(nvmem,
1317 								   lookup->cell_name);
1318 			if (!cell_entry) {
1319 				__nvmem_device_put(nvmem);
1320 				cell = ERR_PTR(-ENOENT);
1321 			} else {
1322 				cell = nvmem_create_cell(cell_entry, con_id, 0);
1323 				if (IS_ERR(cell))
1324 					__nvmem_device_put(nvmem);
1325 			}
1326 			break;
1327 		}
1328 	}
1329 
1330 	mutex_unlock(&nvmem_lookup_mutex);
1331 	return cell;
1332 }
1333 
1334 #if IS_ENABLED(CONFIG_OF)
1335 static struct nvmem_cell_entry *
nvmem_find_cell_entry_by_node(struct nvmem_device * nvmem,struct device_node * np)1336 nvmem_find_cell_entry_by_node(struct nvmem_device *nvmem, struct device_node *np)
1337 {
1338 	struct nvmem_cell_entry *iter, *cell = NULL;
1339 
1340 	mutex_lock(&nvmem_mutex);
1341 	list_for_each_entry(iter, &nvmem->cells, node) {
1342 		if (np == iter->np) {
1343 			cell = iter;
1344 			break;
1345 		}
1346 	}
1347 	mutex_unlock(&nvmem_mutex);
1348 
1349 	return cell;
1350 }
1351 
1352 /**
1353  * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
1354  *
1355  * @np: Device tree node that uses the nvmem cell.
1356  * @id: nvmem cell name from nvmem-cell-names property, or NULL
1357  *      for the cell at index 0 (the lone cell with no accompanying
1358  *      nvmem-cell-names property).
1359  *
1360  * Return: Will be an ERR_PTR() on error or a valid pointer
1361  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
1362  * nvmem_cell_put().
1363  */
of_nvmem_cell_get(struct device_node * np,const char * id)1364 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
1365 {
1366 	struct device_node *cell_np, *nvmem_np;
1367 	struct nvmem_device *nvmem;
1368 	struct nvmem_cell_entry *cell_entry;
1369 	struct nvmem_cell *cell;
1370 	struct of_phandle_args cell_spec;
1371 	int index = 0;
1372 	int cell_index = 0;
1373 	int ret;
1374 
1375 	/* if cell name exists, find index to the name */
1376 	if (id)
1377 		index = of_property_match_string(np, "nvmem-cell-names", id);
1378 
1379 	ret = of_parse_phandle_with_optional_args(np, "nvmem-cells",
1380 						  "#nvmem-cell-cells",
1381 						  index, &cell_spec);
1382 	if (ret)
1383 		return ERR_PTR(-ENOENT);
1384 
1385 	if (cell_spec.args_count > 1)
1386 		return ERR_PTR(-EINVAL);
1387 
1388 	cell_np = cell_spec.np;
1389 	if (cell_spec.args_count)
1390 		cell_index = cell_spec.args[0];
1391 
1392 	nvmem_np = of_get_parent(cell_np);
1393 	if (!nvmem_np) {
1394 		of_node_put(cell_np);
1395 		return ERR_PTR(-EINVAL);
1396 	}
1397 
1398 	/* nvmem layouts produce cells within the nvmem-layout container */
1399 	if (of_node_name_eq(nvmem_np, "nvmem-layout")) {
1400 		nvmem_np = of_get_next_parent(nvmem_np);
1401 		if (!nvmem_np) {
1402 			of_node_put(cell_np);
1403 			return ERR_PTR(-EINVAL);
1404 		}
1405 	}
1406 
1407 	nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
1408 	of_node_put(nvmem_np);
1409 	if (IS_ERR(nvmem)) {
1410 		of_node_put(cell_np);
1411 		return ERR_CAST(nvmem);
1412 	}
1413 
1414 	cell_entry = nvmem_find_cell_entry_by_node(nvmem, cell_np);
1415 	of_node_put(cell_np);
1416 	if (!cell_entry) {
1417 		__nvmem_device_put(nvmem);
1418 		return ERR_PTR(-ENOENT);
1419 	}
1420 
1421 	cell = nvmem_create_cell(cell_entry, id, cell_index);
1422 	if (IS_ERR(cell))
1423 		__nvmem_device_put(nvmem);
1424 
1425 	return cell;
1426 }
1427 EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
1428 #endif
1429 
1430 /**
1431  * nvmem_cell_get() - Get nvmem cell of device form a given cell name
1432  *
1433  * @dev: Device that requests the nvmem cell.
1434  * @id: nvmem cell name to get (this corresponds with the name from the
1435  *      nvmem-cell-names property for DT systems and with the con_id from
1436  *      the lookup entry for non-DT systems).
1437  *
1438  * Return: Will be an ERR_PTR() on error or a valid pointer
1439  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
1440  * nvmem_cell_put().
1441  */
nvmem_cell_get(struct device * dev,const char * id)1442 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id)
1443 {
1444 	struct nvmem_cell *cell;
1445 
1446 	if (dev->of_node) { /* try dt first */
1447 		cell = of_nvmem_cell_get(dev->of_node, id);
1448 		if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER)
1449 			return cell;
1450 	}
1451 
1452 	/* NULL cell id only allowed for device tree; invalid otherwise */
1453 	if (!id)
1454 		return ERR_PTR(-EINVAL);
1455 
1456 	return nvmem_cell_get_from_lookup(dev, id);
1457 }
1458 EXPORT_SYMBOL_GPL(nvmem_cell_get);
1459 
devm_nvmem_cell_release(struct device * dev,void * res)1460 static void devm_nvmem_cell_release(struct device *dev, void *res)
1461 {
1462 	nvmem_cell_put(*(struct nvmem_cell **)res);
1463 }
1464 
1465 /**
1466  * devm_nvmem_cell_get() - Get nvmem cell of device form a given id
1467  *
1468  * @dev: Device that requests the nvmem cell.
1469  * @id: nvmem cell name id to get.
1470  *
1471  * Return: Will be an ERR_PTR() on error or a valid pointer
1472  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
1473  * automatically once the device is freed.
1474  */
devm_nvmem_cell_get(struct device * dev,const char * id)1475 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id)
1476 {
1477 	struct nvmem_cell **ptr, *cell;
1478 
1479 	ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL);
1480 	if (!ptr)
1481 		return ERR_PTR(-ENOMEM);
1482 
1483 	cell = nvmem_cell_get(dev, id);
1484 	if (!IS_ERR(cell)) {
1485 		*ptr = cell;
1486 		devres_add(dev, ptr);
1487 	} else {
1488 		devres_free(ptr);
1489 	}
1490 
1491 	return cell;
1492 }
1493 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get);
1494 
devm_nvmem_cell_match(struct device * dev,void * res,void * data)1495 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
1496 {
1497 	struct nvmem_cell **c = res;
1498 
1499 	if (WARN_ON(!c || !*c))
1500 		return 0;
1501 
1502 	return *c == data;
1503 }
1504 
1505 /**
1506  * devm_nvmem_cell_put() - Release previously allocated nvmem cell
1507  * from devm_nvmem_cell_get.
1508  *
1509  * @dev: Device that requests the nvmem cell.
1510  * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get().
1511  */
devm_nvmem_cell_put(struct device * dev,struct nvmem_cell * cell)1512 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
1513 {
1514 	int ret;
1515 
1516 	ret = devres_release(dev, devm_nvmem_cell_release,
1517 				devm_nvmem_cell_match, cell);
1518 
1519 	WARN_ON(ret);
1520 }
1521 EXPORT_SYMBOL(devm_nvmem_cell_put);
1522 
1523 /**
1524  * nvmem_cell_put() - Release previously allocated nvmem cell.
1525  *
1526  * @cell: Previously allocated nvmem cell by nvmem_cell_get().
1527  */
nvmem_cell_put(struct nvmem_cell * cell)1528 void nvmem_cell_put(struct nvmem_cell *cell)
1529 {
1530 	struct nvmem_device *nvmem = cell->entry->nvmem;
1531 
1532 	if (cell->id)
1533 		kfree_const(cell->id);
1534 
1535 	kfree(cell);
1536 	__nvmem_device_put(nvmem);
1537 }
1538 EXPORT_SYMBOL_GPL(nvmem_cell_put);
1539 
nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry * cell,void * buf)1540 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry *cell, void *buf)
1541 {
1542 	u8 *p, *b;
1543 	int i, extra, bit_offset = cell->bit_offset;
1544 
1545 	p = b = buf;
1546 	if (bit_offset) {
1547 		/* First shift */
1548 		*b++ >>= bit_offset;
1549 
1550 		/* setup rest of the bytes if any */
1551 		for (i = 1; i < cell->bytes; i++) {
1552 			/* Get bits from next byte and shift them towards msb */
1553 			*p |= *b << (BITS_PER_BYTE - bit_offset);
1554 
1555 			p = b;
1556 			*b++ >>= bit_offset;
1557 		}
1558 	} else {
1559 		/* point to the msb */
1560 		p += cell->bytes - 1;
1561 	}
1562 
1563 	/* result fits in less bytes */
1564 	extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE);
1565 	while (--extra >= 0)
1566 		*p-- = 0;
1567 
1568 	/* clear msb bits if any leftover in the last byte */
1569 	if (cell->nbits % BITS_PER_BYTE)
1570 		*p &= GENMASK((cell->nbits % BITS_PER_BYTE) - 1, 0);
1571 }
1572 
__nvmem_cell_read(struct nvmem_device * nvmem,struct nvmem_cell_entry * cell,void * buf,size_t * len,const char * id,int index)1573 static int __nvmem_cell_read(struct nvmem_device *nvmem,
1574 			     struct nvmem_cell_entry *cell,
1575 			     void *buf, size_t *len, const char *id, int index)
1576 {
1577 	int rc;
1578 
1579 	rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->raw_len);
1580 
1581 	if (rc)
1582 		return rc;
1583 
1584 	/* shift bits in-place */
1585 	if (cell->bit_offset || cell->nbits)
1586 		nvmem_shift_read_buffer_in_place(cell, buf);
1587 
1588 	if (cell->read_post_process) {
1589 		rc = cell->read_post_process(cell->priv, id, index,
1590 					     cell->offset, buf, cell->raw_len);
1591 		if (rc)
1592 			return rc;
1593 	}
1594 
1595 	if (len)
1596 		*len = cell->bytes;
1597 
1598 	return 0;
1599 }
1600 
1601 /**
1602  * nvmem_cell_read() - Read a given nvmem cell
1603  *
1604  * @cell: nvmem cell to be read.
1605  * @len: pointer to length of cell which will be populated on successful read;
1606  *	 can be NULL.
1607  *
1608  * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
1609  * buffer should be freed by the consumer with a kfree().
1610  */
nvmem_cell_read(struct nvmem_cell * cell,size_t * len)1611 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
1612 {
1613 	struct nvmem_cell_entry *entry = cell->entry;
1614 	struct nvmem_device *nvmem = entry->nvmem;
1615 	u8 *buf;
1616 	int rc;
1617 
1618 	if (!nvmem)
1619 		return ERR_PTR(-EINVAL);
1620 
1621 	buf = kzalloc(max_t(size_t, entry->raw_len, entry->bytes), GFP_KERNEL);
1622 	if (!buf)
1623 		return ERR_PTR(-ENOMEM);
1624 
1625 	rc = __nvmem_cell_read(nvmem, cell->entry, buf, len, cell->id, cell->index);
1626 	if (rc) {
1627 		kfree(buf);
1628 		return ERR_PTR(rc);
1629 	}
1630 
1631 	return buf;
1632 }
1633 EXPORT_SYMBOL_GPL(nvmem_cell_read);
1634 
nvmem_cell_prepare_write_buffer(struct nvmem_cell_entry * cell,u8 * _buf,int len)1635 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell_entry *cell,
1636 					     u8 *_buf, int len)
1637 {
1638 	struct nvmem_device *nvmem = cell->nvmem;
1639 	int i, rc, nbits, bit_offset = cell->bit_offset;
1640 	u8 v, *p, *buf, *b, pbyte, pbits;
1641 
1642 	nbits = cell->nbits;
1643 	buf = kzalloc(cell->bytes, GFP_KERNEL);
1644 	if (!buf)
1645 		return ERR_PTR(-ENOMEM);
1646 
1647 	memcpy(buf, _buf, len);
1648 	p = b = buf;
1649 
1650 	if (bit_offset) {
1651 		pbyte = *b;
1652 		*b <<= bit_offset;
1653 
1654 		/* setup the first byte with lsb bits from nvmem */
1655 		rc = nvmem_reg_read(nvmem, cell->offset, &v, 1);
1656 		if (rc)
1657 			goto err;
1658 		*b++ |= GENMASK(bit_offset - 1, 0) & v;
1659 
1660 		/* setup rest of the byte if any */
1661 		for (i = 1; i < cell->bytes; i++) {
1662 			/* Get last byte bits and shift them towards lsb */
1663 			pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset);
1664 			pbyte = *b;
1665 			p = b;
1666 			*b <<= bit_offset;
1667 			*b++ |= pbits;
1668 		}
1669 	}
1670 
1671 	/* if it's not end on byte boundary */
1672 	if ((nbits + bit_offset) % BITS_PER_BYTE) {
1673 		/* setup the last byte with msb bits from nvmem */
1674 		rc = nvmem_reg_read(nvmem,
1675 				    cell->offset + cell->bytes - 1, &v, 1);
1676 		if (rc)
1677 			goto err;
1678 		*p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
1679 
1680 	}
1681 
1682 	return buf;
1683 err:
1684 	kfree(buf);
1685 	return ERR_PTR(rc);
1686 }
1687 
__nvmem_cell_entry_write(struct nvmem_cell_entry * cell,void * buf,size_t len)1688 static int __nvmem_cell_entry_write(struct nvmem_cell_entry *cell, void *buf, size_t len)
1689 {
1690 	struct nvmem_device *nvmem = cell->nvmem;
1691 	int rc;
1692 
1693 	if (!nvmem || nvmem->read_only ||
1694 	    (cell->bit_offset == 0 && len != cell->bytes))
1695 		return -EINVAL;
1696 
1697 	/*
1698 	 * Any cells which have a read_post_process hook are read-only because
1699 	 * we cannot reverse the operation and it might affect other cells,
1700 	 * too.
1701 	 */
1702 	if (cell->read_post_process)
1703 		return -EINVAL;
1704 
1705 	if (cell->bit_offset || cell->nbits) {
1706 		if (len != BITS_TO_BYTES(cell->nbits) && len != cell->bytes)
1707 			return -EINVAL;
1708 		buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
1709 		if (IS_ERR(buf))
1710 			return PTR_ERR(buf);
1711 	}
1712 
1713 	rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes);
1714 
1715 	/* free the tmp buffer */
1716 	if (cell->bit_offset || cell->nbits)
1717 		kfree(buf);
1718 
1719 	if (rc)
1720 		return rc;
1721 
1722 	return len;
1723 }
1724 
1725 /**
1726  * nvmem_cell_write() - Write to a given nvmem cell
1727  *
1728  * @cell: nvmem cell to be written.
1729  * @buf: Buffer to be written.
1730  * @len: length of buffer to be written to nvmem cell.
1731  *
1732  * Return: length of bytes written or negative on failure.
1733  */
nvmem_cell_write(struct nvmem_cell * cell,void * buf,size_t len)1734 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
1735 {
1736 	return __nvmem_cell_entry_write(cell->entry, buf, len);
1737 }
1738 
1739 EXPORT_SYMBOL_GPL(nvmem_cell_write);
1740 
nvmem_cell_read_common(struct device * dev,const char * cell_id,void * val,size_t count)1741 static int nvmem_cell_read_common(struct device *dev, const char *cell_id,
1742 				  void *val, size_t count)
1743 {
1744 	struct nvmem_cell *cell;
1745 	void *buf;
1746 	size_t len;
1747 
1748 	cell = nvmem_cell_get(dev, cell_id);
1749 	if (IS_ERR(cell))
1750 		return PTR_ERR(cell);
1751 
1752 	buf = nvmem_cell_read(cell, &len);
1753 	if (IS_ERR(buf)) {
1754 		nvmem_cell_put(cell);
1755 		return PTR_ERR(buf);
1756 	}
1757 	if (len != count) {
1758 		kfree(buf);
1759 		nvmem_cell_put(cell);
1760 		return -EINVAL;
1761 	}
1762 	memcpy(val, buf, count);
1763 	kfree(buf);
1764 	nvmem_cell_put(cell);
1765 
1766 	return 0;
1767 }
1768 
1769 /**
1770  * nvmem_cell_read_u8() - Read a cell value as a u8
1771  *
1772  * @dev: Device that requests the nvmem cell.
1773  * @cell_id: Name of nvmem cell to read.
1774  * @val: pointer to output value.
1775  *
1776  * Return: 0 on success or negative errno.
1777  */
nvmem_cell_read_u8(struct device * dev,const char * cell_id,u8 * val)1778 int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val)
1779 {
1780 	return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1781 }
1782 EXPORT_SYMBOL_GPL(nvmem_cell_read_u8);
1783 
1784 /**
1785  * nvmem_cell_read_u16() - Read a cell value as a u16
1786  *
1787  * @dev: Device that requests the nvmem cell.
1788  * @cell_id: Name of nvmem cell to read.
1789  * @val: pointer to output value.
1790  *
1791  * Return: 0 on success or negative errno.
1792  */
nvmem_cell_read_u16(struct device * dev,const char * cell_id,u16 * val)1793 int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val)
1794 {
1795 	return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1796 }
1797 EXPORT_SYMBOL_GPL(nvmem_cell_read_u16);
1798 
1799 /**
1800  * nvmem_cell_read_u32() - Read a cell value as a u32
1801  *
1802  * @dev: Device that requests the nvmem cell.
1803  * @cell_id: Name of nvmem cell to read.
1804  * @val: pointer to output value.
1805  *
1806  * Return: 0 on success or negative errno.
1807  */
nvmem_cell_read_u32(struct device * dev,const char * cell_id,u32 * val)1808 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val)
1809 {
1810 	return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1811 }
1812 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32);
1813 
1814 /**
1815  * nvmem_cell_read_u64() - Read a cell value as a u64
1816  *
1817  * @dev: Device that requests the nvmem cell.
1818  * @cell_id: Name of nvmem cell to read.
1819  * @val: pointer to output value.
1820  *
1821  * Return: 0 on success or negative errno.
1822  */
nvmem_cell_read_u64(struct device * dev,const char * cell_id,u64 * val)1823 int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val)
1824 {
1825 	return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1826 }
1827 EXPORT_SYMBOL_GPL(nvmem_cell_read_u64);
1828 
nvmem_cell_read_variable_common(struct device * dev,const char * cell_id,size_t max_len,size_t * len)1829 static const void *nvmem_cell_read_variable_common(struct device *dev,
1830 						   const char *cell_id,
1831 						   size_t max_len, size_t *len)
1832 {
1833 	struct nvmem_cell *cell;
1834 	int nbits;
1835 	void *buf;
1836 
1837 	cell = nvmem_cell_get(dev, cell_id);
1838 	if (IS_ERR(cell))
1839 		return cell;
1840 
1841 	nbits = cell->entry->nbits;
1842 	buf = nvmem_cell_read(cell, len);
1843 	nvmem_cell_put(cell);
1844 	if (IS_ERR(buf))
1845 		return buf;
1846 
1847 	/*
1848 	 * If nbits is set then nvmem_cell_read() can significantly exaggerate
1849 	 * the length of the real data. Throw away the extra junk.
1850 	 */
1851 	if (nbits)
1852 		*len = DIV_ROUND_UP(nbits, 8);
1853 
1854 	if (*len > max_len) {
1855 		kfree(buf);
1856 		return ERR_PTR(-ERANGE);
1857 	}
1858 
1859 	return buf;
1860 }
1861 
1862 /**
1863  * nvmem_cell_read_variable_le_u32() - Read up to 32-bits of data as a little endian number.
1864  *
1865  * @dev: Device that requests the nvmem cell.
1866  * @cell_id: Name of nvmem cell to read.
1867  * @val: pointer to output value.
1868  *
1869  * Return: 0 on success or negative errno.
1870  */
nvmem_cell_read_variable_le_u32(struct device * dev,const char * cell_id,u32 * val)1871 int nvmem_cell_read_variable_le_u32(struct device *dev, const char *cell_id,
1872 				    u32 *val)
1873 {
1874 	size_t len;
1875 	const u8 *buf;
1876 	int i;
1877 
1878 	buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len);
1879 	if (IS_ERR(buf))
1880 		return PTR_ERR(buf);
1881 
1882 	/* Copy w/ implicit endian conversion */
1883 	*val = 0;
1884 	for (i = 0; i < len; i++)
1885 		*val |= buf[i] << (8 * i);
1886 
1887 	kfree(buf);
1888 
1889 	return 0;
1890 }
1891 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u32);
1892 
1893 /**
1894  * nvmem_cell_read_variable_le_u64() - Read up to 64-bits of data as a little endian number.
1895  *
1896  * @dev: Device that requests the nvmem cell.
1897  * @cell_id: Name of nvmem cell to read.
1898  * @val: pointer to output value.
1899  *
1900  * Return: 0 on success or negative errno.
1901  */
nvmem_cell_read_variable_le_u64(struct device * dev,const char * cell_id,u64 * val)1902 int nvmem_cell_read_variable_le_u64(struct device *dev, const char *cell_id,
1903 				    u64 *val)
1904 {
1905 	size_t len;
1906 	const u8 *buf;
1907 	int i;
1908 
1909 	buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len);
1910 	if (IS_ERR(buf))
1911 		return PTR_ERR(buf);
1912 
1913 	/* Copy w/ implicit endian conversion */
1914 	*val = 0;
1915 	for (i = 0; i < len; i++)
1916 		*val |= (uint64_t)buf[i] << (8 * i);
1917 
1918 	kfree(buf);
1919 
1920 	return 0;
1921 }
1922 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u64);
1923 
1924 /**
1925  * nvmem_device_cell_read() - Read a given nvmem device and cell
1926  *
1927  * @nvmem: nvmem device to read from.
1928  * @info: nvmem cell info to be read.
1929  * @buf: buffer pointer which will be populated on successful read.
1930  *
1931  * Return: length of successful bytes read on success and negative
1932  * error code on error.
1933  */
nvmem_device_cell_read(struct nvmem_device * nvmem,struct nvmem_cell_info * info,void * buf)1934 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
1935 			   struct nvmem_cell_info *info, void *buf)
1936 {
1937 	struct nvmem_cell_entry cell;
1938 	int rc;
1939 	ssize_t len;
1940 
1941 	if (!nvmem)
1942 		return -EINVAL;
1943 
1944 	rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell);
1945 	if (rc)
1946 		return rc;
1947 
1948 	rc = __nvmem_cell_read(nvmem, &cell, buf, &len, NULL, 0);
1949 	if (rc)
1950 		return rc;
1951 
1952 	return len;
1953 }
1954 EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
1955 
1956 /**
1957  * nvmem_device_cell_write() - Write cell to a given nvmem device
1958  *
1959  * @nvmem: nvmem device to be written to.
1960  * @info: nvmem cell info to be written.
1961  * @buf: buffer to be written to cell.
1962  *
1963  * Return: length of bytes written or negative error code on failure.
1964  */
nvmem_device_cell_write(struct nvmem_device * nvmem,struct nvmem_cell_info * info,void * buf)1965 int nvmem_device_cell_write(struct nvmem_device *nvmem,
1966 			    struct nvmem_cell_info *info, void *buf)
1967 {
1968 	struct nvmem_cell_entry cell;
1969 	int rc;
1970 
1971 	if (!nvmem)
1972 		return -EINVAL;
1973 
1974 	rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell);
1975 	if (rc)
1976 		return rc;
1977 
1978 	return __nvmem_cell_entry_write(&cell, buf, cell.bytes);
1979 }
1980 EXPORT_SYMBOL_GPL(nvmem_device_cell_write);
1981 
1982 /**
1983  * nvmem_device_read() - Read from a given nvmem device
1984  *
1985  * @nvmem: nvmem device to read from.
1986  * @offset: offset in nvmem device.
1987  * @bytes: number of bytes to read.
1988  * @buf: buffer pointer which will be populated on successful read.
1989  *
1990  * Return: length of successful bytes read on success and negative
1991  * error code on error.
1992  */
nvmem_device_read(struct nvmem_device * nvmem,unsigned int offset,size_t bytes,void * buf)1993 int nvmem_device_read(struct nvmem_device *nvmem,
1994 		      unsigned int offset,
1995 		      size_t bytes, void *buf)
1996 {
1997 	int rc;
1998 
1999 	if (!nvmem)
2000 		return -EINVAL;
2001 
2002 	rc = nvmem_reg_read(nvmem, offset, buf, bytes);
2003 
2004 	if (rc)
2005 		return rc;
2006 
2007 	return bytes;
2008 }
2009 EXPORT_SYMBOL_GPL(nvmem_device_read);
2010 
2011 /**
2012  * nvmem_device_write() - Write cell to a given nvmem device
2013  *
2014  * @nvmem: nvmem device to be written to.
2015  * @offset: offset in nvmem device.
2016  * @bytes: number of bytes to write.
2017  * @buf: buffer to be written.
2018  *
2019  * Return: length of bytes written or negative error code on failure.
2020  */
nvmem_device_write(struct nvmem_device * nvmem,unsigned int offset,size_t bytes,void * buf)2021 int nvmem_device_write(struct nvmem_device *nvmem,
2022 		       unsigned int offset,
2023 		       size_t bytes, void *buf)
2024 {
2025 	int rc;
2026 
2027 	if (!nvmem)
2028 		return -EINVAL;
2029 
2030 	rc = nvmem_reg_write(nvmem, offset, buf, bytes);
2031 
2032 	if (rc)
2033 		return rc;
2034 
2035 
2036 	return bytes;
2037 }
2038 EXPORT_SYMBOL_GPL(nvmem_device_write);
2039 
2040 /**
2041  * nvmem_add_cell_table() - register a table of cell info entries
2042  *
2043  * @table: table of cell info entries
2044  */
nvmem_add_cell_table(struct nvmem_cell_table * table)2045 void nvmem_add_cell_table(struct nvmem_cell_table *table)
2046 {
2047 	mutex_lock(&nvmem_cell_mutex);
2048 	list_add_tail(&table->node, &nvmem_cell_tables);
2049 	mutex_unlock(&nvmem_cell_mutex);
2050 }
2051 EXPORT_SYMBOL_GPL(nvmem_add_cell_table);
2052 
2053 /**
2054  * nvmem_del_cell_table() - remove a previously registered cell info table
2055  *
2056  * @table: table of cell info entries
2057  */
nvmem_del_cell_table(struct nvmem_cell_table * table)2058 void nvmem_del_cell_table(struct nvmem_cell_table *table)
2059 {
2060 	mutex_lock(&nvmem_cell_mutex);
2061 	list_del(&table->node);
2062 	mutex_unlock(&nvmem_cell_mutex);
2063 }
2064 EXPORT_SYMBOL_GPL(nvmem_del_cell_table);
2065 
2066 /**
2067  * nvmem_add_cell_lookups() - register a list of cell lookup entries
2068  *
2069  * @entries: array of cell lookup entries
2070  * @nentries: number of cell lookup entries in the array
2071  */
nvmem_add_cell_lookups(struct nvmem_cell_lookup * entries,size_t nentries)2072 void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
2073 {
2074 	int i;
2075 
2076 	mutex_lock(&nvmem_lookup_mutex);
2077 	for (i = 0; i < nentries; i++)
2078 		list_add_tail(&entries[i].node, &nvmem_lookup_list);
2079 	mutex_unlock(&nvmem_lookup_mutex);
2080 }
2081 EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups);
2082 
2083 /**
2084  * nvmem_del_cell_lookups() - remove a list of previously added cell lookup
2085  *                            entries
2086  *
2087  * @entries: array of cell lookup entries
2088  * @nentries: number of cell lookup entries in the array
2089  */
nvmem_del_cell_lookups(struct nvmem_cell_lookup * entries,size_t nentries)2090 void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
2091 {
2092 	int i;
2093 
2094 	mutex_lock(&nvmem_lookup_mutex);
2095 	for (i = 0; i < nentries; i++)
2096 		list_del(&entries[i].node);
2097 	mutex_unlock(&nvmem_lookup_mutex);
2098 }
2099 EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups);
2100 
2101 /**
2102  * nvmem_dev_name() - Get the name of a given nvmem device.
2103  *
2104  * @nvmem: nvmem device.
2105  *
2106  * Return: name of the nvmem device.
2107  */
nvmem_dev_name(struct nvmem_device * nvmem)2108 const char *nvmem_dev_name(struct nvmem_device *nvmem)
2109 {
2110 	return dev_name(&nvmem->dev);
2111 }
2112 EXPORT_SYMBOL_GPL(nvmem_dev_name);
2113 
2114 /**
2115  * nvmem_dev_size() - Get the size of a given nvmem device.
2116  *
2117  * @nvmem: nvmem device.
2118  *
2119  * Return: size of the nvmem device.
2120  */
nvmem_dev_size(struct nvmem_device * nvmem)2121 size_t nvmem_dev_size(struct nvmem_device *nvmem)
2122 {
2123 	return nvmem->size;
2124 }
2125 EXPORT_SYMBOL_GPL(nvmem_dev_size);
2126 
nvmem_init(void)2127 static int __init nvmem_init(void)
2128 {
2129 	return bus_register(&nvmem_bus_type);
2130 }
2131 
nvmem_exit(void)2132 static void __exit nvmem_exit(void)
2133 {
2134 	bus_unregister(&nvmem_bus_type);
2135 }
2136 
2137 subsys_initcall(nvmem_init);
2138 module_exit(nvmem_exit);
2139 
2140 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
2141 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
2142 MODULE_DESCRIPTION("nvmem Driver Core");
2143