xref: /openbmc/linux/drivers/nvmem/core.c (revision 4d75f5c664195b970e1cd2fd25b65b5eff257a0a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * nvmem framework core.
4  *
5  * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
6  * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
7  */
8 
9 #include <linux/device.h>
10 #include <linux/export.h>
11 #include <linux/fs.h>
12 #include <linux/idr.h>
13 #include <linux/init.h>
14 #include <linux/kref.h>
15 #include <linux/module.h>
16 #include <linux/nvmem-consumer.h>
17 #include <linux/nvmem-provider.h>
18 #include <linux/gpio/consumer.h>
19 #include <linux/of.h>
20 #include <linux/slab.h>
21 
22 #include "internals.h"
23 
24 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
25 
26 #define FLAG_COMPAT		BIT(0)
27 struct nvmem_cell_entry {
28 	const char		*name;
29 	int			offset;
30 	size_t			raw_len;
31 	int			bytes;
32 	int			bit_offset;
33 	int			nbits;
34 	nvmem_cell_post_process_t read_post_process;
35 	void			*priv;
36 	struct device_node	*np;
37 	struct nvmem_device	*nvmem;
38 	struct list_head	node;
39 };
40 
41 struct nvmem_cell {
42 	struct nvmem_cell_entry *entry;
43 	const char		*id;
44 	int			index;
45 };
46 
47 static DEFINE_MUTEX(nvmem_mutex);
48 static DEFINE_IDA(nvmem_ida);
49 
50 static DEFINE_MUTEX(nvmem_cell_mutex);
51 static LIST_HEAD(nvmem_cell_tables);
52 
53 static DEFINE_MUTEX(nvmem_lookup_mutex);
54 static LIST_HEAD(nvmem_lookup_list);
55 
56 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
57 
58 static DEFINE_SPINLOCK(nvmem_layout_lock);
59 static LIST_HEAD(nvmem_layouts);
60 
__nvmem_reg_read(struct nvmem_device * nvmem,unsigned int offset,void * val,size_t bytes)61 static int __nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
62 			    void *val, size_t bytes)
63 {
64 	if (nvmem->reg_read)
65 		return nvmem->reg_read(nvmem->priv, offset, val, bytes);
66 
67 	return -EINVAL;
68 }
69 
__nvmem_reg_write(struct nvmem_device * nvmem,unsigned int offset,void * val,size_t bytes)70 static int __nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
71 			     void *val, size_t bytes)
72 {
73 	int ret;
74 
75 	if (nvmem->reg_write) {
76 		gpiod_set_value_cansleep(nvmem->wp_gpio, 0);
77 		ret = nvmem->reg_write(nvmem->priv, offset, val, bytes);
78 		gpiod_set_value_cansleep(nvmem->wp_gpio, 1);
79 		return ret;
80 	}
81 
82 	return -EINVAL;
83 }
84 
nvmem_access_with_keepouts(struct nvmem_device * nvmem,unsigned int offset,void * val,size_t bytes,int write)85 static int nvmem_access_with_keepouts(struct nvmem_device *nvmem,
86 				      unsigned int offset, void *val,
87 				      size_t bytes, int write)
88 {
89 
90 	unsigned int end = offset + bytes;
91 	unsigned int kend, ksize;
92 	const struct nvmem_keepout *keepout = nvmem->keepout;
93 	const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout;
94 	int rc;
95 
96 	/*
97 	 * Skip all keepouts before the range being accessed.
98 	 * Keepouts are sorted.
99 	 */
100 	while ((keepout < keepoutend) && (keepout->end <= offset))
101 		keepout++;
102 
103 	while ((offset < end) && (keepout < keepoutend)) {
104 		/* Access the valid portion before the keepout. */
105 		if (offset < keepout->start) {
106 			kend = min(end, keepout->start);
107 			ksize = kend - offset;
108 			if (write)
109 				rc = __nvmem_reg_write(nvmem, offset, val, ksize);
110 			else
111 				rc = __nvmem_reg_read(nvmem, offset, val, ksize);
112 
113 			if (rc)
114 				return rc;
115 
116 			offset += ksize;
117 			val += ksize;
118 		}
119 
120 		/*
121 		 * Now we're aligned to the start of this keepout zone. Go
122 		 * through it.
123 		 */
124 		kend = min(end, keepout->end);
125 		ksize = kend - offset;
126 		if (!write)
127 			memset(val, keepout->value, ksize);
128 
129 		val += ksize;
130 		offset += ksize;
131 		keepout++;
132 	}
133 
134 	/*
135 	 * If we ran out of keepouts but there's still stuff to do, send it
136 	 * down directly
137 	 */
138 	if (offset < end) {
139 		ksize = end - offset;
140 		if (write)
141 			return __nvmem_reg_write(nvmem, offset, val, ksize);
142 		else
143 			return __nvmem_reg_read(nvmem, offset, val, ksize);
144 	}
145 
146 	return 0;
147 }
148 
nvmem_reg_read(struct nvmem_device * nvmem,unsigned int offset,void * val,size_t bytes)149 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
150 			  void *val, size_t bytes)
151 {
152 	if (!nvmem->nkeepout)
153 		return __nvmem_reg_read(nvmem, offset, val, bytes);
154 
155 	return nvmem_access_with_keepouts(nvmem, offset, val, bytes, false);
156 }
157 
nvmem_reg_write(struct nvmem_device * nvmem,unsigned int offset,void * val,size_t bytes)158 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
159 			   void *val, size_t bytes)
160 {
161 	if (!nvmem->nkeepout)
162 		return __nvmem_reg_write(nvmem, offset, val, bytes);
163 
164 	return nvmem_access_with_keepouts(nvmem, offset, val, bytes, true);
165 }
166 
167 #ifdef CONFIG_NVMEM_SYSFS
168 static const char * const nvmem_type_str[] = {
169 	[NVMEM_TYPE_UNKNOWN] = "Unknown",
170 	[NVMEM_TYPE_EEPROM] = "EEPROM",
171 	[NVMEM_TYPE_OTP] = "OTP",
172 	[NVMEM_TYPE_BATTERY_BACKED] = "Battery backed",
173 	[NVMEM_TYPE_FRAM] = "FRAM",
174 };
175 
176 #ifdef CONFIG_DEBUG_LOCK_ALLOC
177 static struct lock_class_key eeprom_lock_key;
178 #endif
179 
type_show(struct device * dev,struct device_attribute * attr,char * buf)180 static ssize_t type_show(struct device *dev,
181 			 struct device_attribute *attr, char *buf)
182 {
183 	struct nvmem_device *nvmem = to_nvmem_device(dev);
184 
185 	return sprintf(buf, "%s\n", nvmem_type_str[nvmem->type]);
186 }
187 
188 static DEVICE_ATTR_RO(type);
189 
190 static struct attribute *nvmem_attrs[] = {
191 	&dev_attr_type.attr,
192 	NULL,
193 };
194 
bin_attr_nvmem_read(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t pos,size_t count)195 static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
196 				   struct bin_attribute *attr, char *buf,
197 				   loff_t pos, size_t count)
198 {
199 	struct device *dev;
200 	struct nvmem_device *nvmem;
201 	int rc;
202 
203 	if (attr->private)
204 		dev = attr->private;
205 	else
206 		dev = kobj_to_dev(kobj);
207 	nvmem = to_nvmem_device(dev);
208 
209 	/* Stop the user from reading */
210 	if (pos >= nvmem->size)
211 		return 0;
212 
213 	if (!IS_ALIGNED(pos, nvmem->stride))
214 		return -EINVAL;
215 
216 	if (count < nvmem->word_size)
217 		return -EINVAL;
218 
219 	if (pos + count > nvmem->size)
220 		count = nvmem->size - pos;
221 
222 	count = round_down(count, nvmem->word_size);
223 
224 	if (!nvmem->reg_read)
225 		return -EPERM;
226 
227 	rc = nvmem_reg_read(nvmem, pos, buf, count);
228 
229 	if (rc)
230 		return rc;
231 
232 	return count;
233 }
234 
bin_attr_nvmem_write(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t pos,size_t count)235 static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
236 				    struct bin_attribute *attr, char *buf,
237 				    loff_t pos, size_t count)
238 {
239 	struct device *dev;
240 	struct nvmem_device *nvmem;
241 	int rc;
242 
243 	if (attr->private)
244 		dev = attr->private;
245 	else
246 		dev = kobj_to_dev(kobj);
247 	nvmem = to_nvmem_device(dev);
248 
249 	/* Stop the user from writing */
250 	if (pos >= nvmem->size)
251 		return -EFBIG;
252 
253 	if (!IS_ALIGNED(pos, nvmem->stride))
254 		return -EINVAL;
255 
256 	if (count < nvmem->word_size)
257 		return -EINVAL;
258 
259 	if (pos + count > nvmem->size)
260 		count = nvmem->size - pos;
261 
262 	count = round_down(count, nvmem->word_size);
263 
264 	if (!nvmem->reg_write)
265 		return -EPERM;
266 
267 	rc = nvmem_reg_write(nvmem, pos, buf, count);
268 
269 	if (rc)
270 		return rc;
271 
272 	return count;
273 }
274 
nvmem_bin_attr_get_umode(struct nvmem_device * nvmem)275 static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem)
276 {
277 	umode_t mode = 0400;
278 
279 	if (!nvmem->root_only)
280 		mode |= 0044;
281 
282 	if (!nvmem->read_only)
283 		mode |= 0200;
284 
285 	if (!nvmem->reg_write)
286 		mode &= ~0200;
287 
288 	if (!nvmem->reg_read)
289 		mode &= ~0444;
290 
291 	return mode;
292 }
293 
nvmem_bin_attr_is_visible(struct kobject * kobj,struct bin_attribute * attr,int i)294 static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj,
295 					 struct bin_attribute *attr, int i)
296 {
297 	struct device *dev = kobj_to_dev(kobj);
298 	struct nvmem_device *nvmem = to_nvmem_device(dev);
299 
300 	attr->size = nvmem->size;
301 
302 	return nvmem_bin_attr_get_umode(nvmem);
303 }
304 
305 /* default read/write permissions */
306 static struct bin_attribute bin_attr_rw_nvmem = {
307 	.attr	= {
308 		.name	= "nvmem",
309 		.mode	= 0644,
310 	},
311 	.read	= bin_attr_nvmem_read,
312 	.write	= bin_attr_nvmem_write,
313 };
314 
315 static struct bin_attribute *nvmem_bin_attributes[] = {
316 	&bin_attr_rw_nvmem,
317 	NULL,
318 };
319 
320 static const struct attribute_group nvmem_bin_group = {
321 	.bin_attrs	= nvmem_bin_attributes,
322 	.attrs		= nvmem_attrs,
323 	.is_bin_visible = nvmem_bin_attr_is_visible,
324 };
325 
326 static const struct attribute_group *nvmem_dev_groups[] = {
327 	&nvmem_bin_group,
328 	NULL,
329 };
330 
331 static struct bin_attribute bin_attr_nvmem_eeprom_compat = {
332 	.attr	= {
333 		.name	= "eeprom",
334 	},
335 	.read	= bin_attr_nvmem_read,
336 	.write	= bin_attr_nvmem_write,
337 };
338 
339 /*
340  * nvmem_setup_compat() - Create an additional binary entry in
341  * drivers sys directory, to be backwards compatible with the older
342  * drivers/misc/eeprom drivers.
343  */
nvmem_sysfs_setup_compat(struct nvmem_device * nvmem,const struct nvmem_config * config)344 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
345 				    const struct nvmem_config *config)
346 {
347 	int rval;
348 
349 	if (!config->compat)
350 		return 0;
351 
352 	if (!config->base_dev)
353 		return -EINVAL;
354 
355 	nvmem->eeprom = bin_attr_nvmem_eeprom_compat;
356 	if (config->type == NVMEM_TYPE_FRAM)
357 		nvmem->eeprom.attr.name = "fram";
358 	nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem);
359 	nvmem->eeprom.size = nvmem->size;
360 #ifdef CONFIG_DEBUG_LOCK_ALLOC
361 	nvmem->eeprom.attr.key = &eeprom_lock_key;
362 #endif
363 	nvmem->eeprom.private = &nvmem->dev;
364 	nvmem->base_dev = config->base_dev;
365 
366 	rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom);
367 	if (rval) {
368 		dev_err(&nvmem->dev,
369 			"Failed to create eeprom binary file %d\n", rval);
370 		return rval;
371 	}
372 
373 	nvmem->flags |= FLAG_COMPAT;
374 
375 	return 0;
376 }
377 
nvmem_sysfs_remove_compat(struct nvmem_device * nvmem,const struct nvmem_config * config)378 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
379 			      const struct nvmem_config *config)
380 {
381 	if (config->compat)
382 		device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
383 }
384 
385 #else /* CONFIG_NVMEM_SYSFS */
386 
nvmem_sysfs_setup_compat(struct nvmem_device * nvmem,const struct nvmem_config * config)387 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
388 				    const struct nvmem_config *config)
389 {
390 	return -ENOSYS;
391 }
nvmem_sysfs_remove_compat(struct nvmem_device * nvmem,const struct nvmem_config * config)392 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
393 				      const struct nvmem_config *config)
394 {
395 }
396 
397 #endif /* CONFIG_NVMEM_SYSFS */
398 
nvmem_release(struct device * dev)399 static void nvmem_release(struct device *dev)
400 {
401 	struct nvmem_device *nvmem = to_nvmem_device(dev);
402 
403 	ida_free(&nvmem_ida, nvmem->id);
404 	gpiod_put(nvmem->wp_gpio);
405 	kfree(nvmem);
406 }
407 
408 static const struct device_type nvmem_provider_type = {
409 	.release	= nvmem_release,
410 };
411 
412 static struct bus_type nvmem_bus_type = {
413 	.name		= "nvmem",
414 };
415 
nvmem_cell_entry_drop(struct nvmem_cell_entry * cell)416 static void nvmem_cell_entry_drop(struct nvmem_cell_entry *cell)
417 {
418 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell);
419 	mutex_lock(&nvmem_mutex);
420 	list_del(&cell->node);
421 	mutex_unlock(&nvmem_mutex);
422 	of_node_put(cell->np);
423 	kfree_const(cell->name);
424 	kfree(cell);
425 }
426 
nvmem_device_remove_all_cells(const struct nvmem_device * nvmem)427 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
428 {
429 	struct nvmem_cell_entry *cell, *p;
430 
431 	list_for_each_entry_safe(cell, p, &nvmem->cells, node)
432 		nvmem_cell_entry_drop(cell);
433 }
434 
nvmem_cell_entry_add(struct nvmem_cell_entry * cell)435 static void nvmem_cell_entry_add(struct nvmem_cell_entry *cell)
436 {
437 	mutex_lock(&nvmem_mutex);
438 	list_add_tail(&cell->node, &cell->nvmem->cells);
439 	mutex_unlock(&nvmem_mutex);
440 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell);
441 }
442 
nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device * nvmem,const struct nvmem_cell_info * info,struct nvmem_cell_entry * cell)443 static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem,
444 						     const struct nvmem_cell_info *info,
445 						     struct nvmem_cell_entry *cell)
446 {
447 	cell->nvmem = nvmem;
448 	cell->offset = info->offset;
449 	cell->raw_len = info->raw_len ?: info->bytes;
450 	cell->bytes = info->bytes;
451 	cell->name = info->name;
452 	cell->read_post_process = info->read_post_process;
453 	cell->priv = info->priv;
454 
455 	cell->bit_offset = info->bit_offset;
456 	cell->nbits = info->nbits;
457 	cell->np = info->np;
458 
459 	if (cell->nbits) {
460 		cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
461 					   BITS_PER_BYTE);
462 		cell->raw_len = ALIGN(cell->bytes, nvmem->word_size);
463 	}
464 
465 	if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
466 		dev_err(&nvmem->dev,
467 			"cell %s unaligned to nvmem stride %d\n",
468 			cell->name ?: "<unknown>", nvmem->stride);
469 		return -EINVAL;
470 	}
471 
472 	if (!IS_ALIGNED(cell->raw_len, nvmem->word_size)) {
473 		dev_err(&nvmem->dev,
474 			"cell %s raw len %zd unaligned to nvmem word size %d\n",
475 			cell->name ?: "<unknown>", cell->raw_len,
476 			nvmem->word_size);
477 
478 		if (info->raw_len)
479 			return -EINVAL;
480 
481 		cell->raw_len = ALIGN(cell->raw_len, nvmem->word_size);
482 	}
483 
484 	return 0;
485 }
486 
nvmem_cell_info_to_nvmem_cell_entry(struct nvmem_device * nvmem,const struct nvmem_cell_info * info,struct nvmem_cell_entry * cell)487 static int nvmem_cell_info_to_nvmem_cell_entry(struct nvmem_device *nvmem,
488 					       const struct nvmem_cell_info *info,
489 					       struct nvmem_cell_entry *cell)
490 {
491 	int err;
492 
493 	err = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, cell);
494 	if (err)
495 		return err;
496 
497 	cell->name = kstrdup_const(info->name, GFP_KERNEL);
498 	if (!cell->name)
499 		return -ENOMEM;
500 
501 	return 0;
502 }
503 
504 /**
505  * nvmem_add_one_cell() - Add one cell information to an nvmem device
506  *
507  * @nvmem: nvmem device to add cells to.
508  * @info: nvmem cell info to add to the device
509  *
510  * Return: 0 or negative error code on failure.
511  */
nvmem_add_one_cell(struct nvmem_device * nvmem,const struct nvmem_cell_info * info)512 int nvmem_add_one_cell(struct nvmem_device *nvmem,
513 		       const struct nvmem_cell_info *info)
514 {
515 	struct nvmem_cell_entry *cell;
516 	int rval;
517 
518 	cell = kzalloc(sizeof(*cell), GFP_KERNEL);
519 	if (!cell)
520 		return -ENOMEM;
521 
522 	rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell);
523 	if (rval) {
524 		kfree(cell);
525 		return rval;
526 	}
527 
528 	nvmem_cell_entry_add(cell);
529 
530 	return 0;
531 }
532 EXPORT_SYMBOL_GPL(nvmem_add_one_cell);
533 
534 /**
535  * nvmem_add_cells() - Add cell information to an nvmem device
536  *
537  * @nvmem: nvmem device to add cells to.
538  * @info: nvmem cell info to add to the device
539  * @ncells: number of cells in info
540  *
541  * Return: 0 or negative error code on failure.
542  */
nvmem_add_cells(struct nvmem_device * nvmem,const struct nvmem_cell_info * info,int ncells)543 static int nvmem_add_cells(struct nvmem_device *nvmem,
544 		    const struct nvmem_cell_info *info,
545 		    int ncells)
546 {
547 	int i, rval;
548 
549 	for (i = 0; i < ncells; i++) {
550 		rval = nvmem_add_one_cell(nvmem, &info[i]);
551 		if (rval)
552 			return rval;
553 	}
554 
555 	return 0;
556 }
557 
558 /**
559  * nvmem_register_notifier() - Register a notifier block for nvmem events.
560  *
561  * @nb: notifier block to be called on nvmem events.
562  *
563  * Return: 0 on success, negative error number on failure.
564  */
nvmem_register_notifier(struct notifier_block * nb)565 int nvmem_register_notifier(struct notifier_block *nb)
566 {
567 	return blocking_notifier_chain_register(&nvmem_notifier, nb);
568 }
569 EXPORT_SYMBOL_GPL(nvmem_register_notifier);
570 
571 /**
572  * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events.
573  *
574  * @nb: notifier block to be unregistered.
575  *
576  * Return: 0 on success, negative error number on failure.
577  */
nvmem_unregister_notifier(struct notifier_block * nb)578 int nvmem_unregister_notifier(struct notifier_block *nb)
579 {
580 	return blocking_notifier_chain_unregister(&nvmem_notifier, nb);
581 }
582 EXPORT_SYMBOL_GPL(nvmem_unregister_notifier);
583 
nvmem_add_cells_from_table(struct nvmem_device * nvmem)584 static int nvmem_add_cells_from_table(struct nvmem_device *nvmem)
585 {
586 	const struct nvmem_cell_info *info;
587 	struct nvmem_cell_table *table;
588 	struct nvmem_cell_entry *cell;
589 	int rval = 0, i;
590 
591 	mutex_lock(&nvmem_cell_mutex);
592 	list_for_each_entry(table, &nvmem_cell_tables, node) {
593 		if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) {
594 			for (i = 0; i < table->ncells; i++) {
595 				info = &table->cells[i];
596 
597 				cell = kzalloc(sizeof(*cell), GFP_KERNEL);
598 				if (!cell) {
599 					rval = -ENOMEM;
600 					goto out;
601 				}
602 
603 				rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell);
604 				if (rval) {
605 					kfree(cell);
606 					goto out;
607 				}
608 
609 				nvmem_cell_entry_add(cell);
610 			}
611 		}
612 	}
613 
614 out:
615 	mutex_unlock(&nvmem_cell_mutex);
616 	return rval;
617 }
618 
619 static struct nvmem_cell_entry *
nvmem_find_cell_entry_by_name(struct nvmem_device * nvmem,const char * cell_id)620 nvmem_find_cell_entry_by_name(struct nvmem_device *nvmem, const char *cell_id)
621 {
622 	struct nvmem_cell_entry *iter, *cell = NULL;
623 
624 	mutex_lock(&nvmem_mutex);
625 	list_for_each_entry(iter, &nvmem->cells, node) {
626 		if (strcmp(cell_id, iter->name) == 0) {
627 			cell = iter;
628 			break;
629 		}
630 	}
631 	mutex_unlock(&nvmem_mutex);
632 
633 	return cell;
634 }
635 
nvmem_validate_keepouts(struct nvmem_device * nvmem)636 static int nvmem_validate_keepouts(struct nvmem_device *nvmem)
637 {
638 	unsigned int cur = 0;
639 	const struct nvmem_keepout *keepout = nvmem->keepout;
640 	const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout;
641 
642 	while (keepout < keepoutend) {
643 		/* Ensure keepouts are sorted and don't overlap. */
644 		if (keepout->start < cur) {
645 			dev_err(&nvmem->dev,
646 				"Keepout regions aren't sorted or overlap.\n");
647 
648 			return -ERANGE;
649 		}
650 
651 		if (keepout->end < keepout->start) {
652 			dev_err(&nvmem->dev,
653 				"Invalid keepout region.\n");
654 
655 			return -EINVAL;
656 		}
657 
658 		/*
659 		 * Validate keepouts (and holes between) don't violate
660 		 * word_size constraints.
661 		 */
662 		if ((keepout->end - keepout->start < nvmem->word_size) ||
663 		    ((keepout->start != cur) &&
664 		     (keepout->start - cur < nvmem->word_size))) {
665 
666 			dev_err(&nvmem->dev,
667 				"Keepout regions violate word_size constraints.\n");
668 
669 			return -ERANGE;
670 		}
671 
672 		/* Validate keepouts don't violate stride (alignment). */
673 		if (!IS_ALIGNED(keepout->start, nvmem->stride) ||
674 		    !IS_ALIGNED(keepout->end, nvmem->stride)) {
675 
676 			dev_err(&nvmem->dev,
677 				"Keepout regions violate stride.\n");
678 
679 			return -EINVAL;
680 		}
681 
682 		cur = keepout->end;
683 		keepout++;
684 	}
685 
686 	return 0;
687 }
688 
nvmem_add_cells_from_dt(struct nvmem_device * nvmem,struct device_node * np)689 static int nvmem_add_cells_from_dt(struct nvmem_device *nvmem, struct device_node *np)
690 {
691 	struct device *dev = &nvmem->dev;
692 	struct device_node *child;
693 	const __be32 *addr;
694 	int len, ret;
695 
696 	for_each_child_of_node(np, child) {
697 		struct nvmem_cell_info info = {0};
698 
699 		addr = of_get_property(child, "reg", &len);
700 		if (!addr)
701 			continue;
702 		if (len < 2 * sizeof(u32)) {
703 			dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
704 			of_node_put(child);
705 			return -EINVAL;
706 		}
707 
708 		info.offset = be32_to_cpup(addr++);
709 		info.bytes = be32_to_cpup(addr);
710 		info.name = kasprintf(GFP_KERNEL, "%pOFn", child);
711 
712 		addr = of_get_property(child, "bits", &len);
713 		if (addr && len == (2 * sizeof(u32))) {
714 			info.bit_offset = be32_to_cpup(addr++);
715 			info.nbits = be32_to_cpup(addr);
716 		}
717 
718 		info.np = of_node_get(child);
719 
720 		if (nvmem->fixup_dt_cell_info)
721 			nvmem->fixup_dt_cell_info(nvmem, &info);
722 
723 		ret = nvmem_add_one_cell(nvmem, &info);
724 		kfree(info.name);
725 		if (ret) {
726 			of_node_put(child);
727 			return ret;
728 		}
729 	}
730 
731 	return 0;
732 }
733 
nvmem_add_cells_from_legacy_of(struct nvmem_device * nvmem)734 static int nvmem_add_cells_from_legacy_of(struct nvmem_device *nvmem)
735 {
736 	return nvmem_add_cells_from_dt(nvmem, nvmem->dev.of_node);
737 }
738 
nvmem_add_cells_from_fixed_layout(struct nvmem_device * nvmem)739 static int nvmem_add_cells_from_fixed_layout(struct nvmem_device *nvmem)
740 {
741 	struct device_node *layout_np;
742 	int err = 0;
743 
744 	layout_np = of_nvmem_layout_get_container(nvmem);
745 	if (!layout_np)
746 		return 0;
747 
748 	if (of_device_is_compatible(layout_np, "fixed-layout"))
749 		err = nvmem_add_cells_from_dt(nvmem, layout_np);
750 
751 	of_node_put(layout_np);
752 
753 	return err;
754 }
755 
__nvmem_layout_register(struct nvmem_layout * layout,struct module * owner)756 int __nvmem_layout_register(struct nvmem_layout *layout, struct module *owner)
757 {
758 	layout->owner = owner;
759 
760 	spin_lock(&nvmem_layout_lock);
761 	list_add(&layout->node, &nvmem_layouts);
762 	spin_unlock(&nvmem_layout_lock);
763 
764 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_LAYOUT_ADD, layout);
765 
766 	return 0;
767 }
768 EXPORT_SYMBOL_GPL(__nvmem_layout_register);
769 
nvmem_layout_unregister(struct nvmem_layout * layout)770 void nvmem_layout_unregister(struct nvmem_layout *layout)
771 {
772 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_LAYOUT_REMOVE, layout);
773 
774 	spin_lock(&nvmem_layout_lock);
775 	list_del(&layout->node);
776 	spin_unlock(&nvmem_layout_lock);
777 }
778 EXPORT_SYMBOL_GPL(nvmem_layout_unregister);
779 
nvmem_layout_get(struct nvmem_device * nvmem)780 static struct nvmem_layout *nvmem_layout_get(struct nvmem_device *nvmem)
781 {
782 	struct device_node *layout_np;
783 	struct nvmem_layout *l, *layout = ERR_PTR(-EPROBE_DEFER);
784 
785 	layout_np = of_nvmem_layout_get_container(nvmem);
786 	if (!layout_np)
787 		return NULL;
788 
789 	/* Fixed layouts don't have a matching driver */
790 	if (of_device_is_compatible(layout_np, "fixed-layout")) {
791 		of_node_put(layout_np);
792 		return NULL;
793 	}
794 
795 	/*
796 	 * In case the nvmem device was built-in while the layout was built as a
797 	 * module, we shall manually request the layout driver loading otherwise
798 	 * we'll never have any match.
799 	 */
800 	of_request_module(layout_np);
801 
802 	spin_lock(&nvmem_layout_lock);
803 
804 	list_for_each_entry(l, &nvmem_layouts, node) {
805 		if (of_match_node(l->of_match_table, layout_np)) {
806 			if (try_module_get(l->owner))
807 				layout = l;
808 
809 			break;
810 		}
811 	}
812 
813 	spin_unlock(&nvmem_layout_lock);
814 	of_node_put(layout_np);
815 
816 	return layout;
817 }
818 
nvmem_layout_put(struct nvmem_layout * layout)819 static void nvmem_layout_put(struct nvmem_layout *layout)
820 {
821 	if (layout)
822 		module_put(layout->owner);
823 }
824 
nvmem_add_cells_from_layout(struct nvmem_device * nvmem)825 static int nvmem_add_cells_from_layout(struct nvmem_device *nvmem)
826 {
827 	struct nvmem_layout *layout = nvmem->layout;
828 	int ret;
829 
830 	if (layout && layout->add_cells) {
831 		ret = layout->add_cells(&nvmem->dev, nvmem);
832 		if (ret)
833 			return ret;
834 	}
835 
836 	return 0;
837 }
838 
839 #if IS_ENABLED(CONFIG_OF)
840 /**
841  * of_nvmem_layout_get_container() - Get OF node to layout container.
842  *
843  * @nvmem: nvmem device.
844  *
845  * Return: a node pointer with refcount incremented or NULL if no
846  * container exists. Use of_node_put() on it when done.
847  */
of_nvmem_layout_get_container(struct nvmem_device * nvmem)848 struct device_node *of_nvmem_layout_get_container(struct nvmem_device *nvmem)
849 {
850 	return of_get_child_by_name(nvmem->dev.of_node, "nvmem-layout");
851 }
852 EXPORT_SYMBOL_GPL(of_nvmem_layout_get_container);
853 #endif
854 
nvmem_layout_get_match_data(struct nvmem_device * nvmem,struct nvmem_layout * layout)855 const void *nvmem_layout_get_match_data(struct nvmem_device *nvmem,
856 					struct nvmem_layout *layout)
857 {
858 	struct device_node __maybe_unused *layout_np;
859 	const struct of_device_id *match;
860 
861 	layout_np = of_nvmem_layout_get_container(nvmem);
862 	match = of_match_node(layout->of_match_table, layout_np);
863 
864 	return match ? match->data : NULL;
865 }
866 EXPORT_SYMBOL_GPL(nvmem_layout_get_match_data);
867 
868 /**
869  * nvmem_register() - Register a nvmem device for given nvmem_config.
870  * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
871  *
872  * @config: nvmem device configuration with which nvmem device is created.
873  *
874  * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
875  * on success.
876  */
877 
nvmem_register(const struct nvmem_config * config)878 struct nvmem_device *nvmem_register(const struct nvmem_config *config)
879 {
880 	struct nvmem_device *nvmem;
881 	int rval;
882 
883 	if (!config->dev)
884 		return ERR_PTR(-EINVAL);
885 
886 	if (!config->reg_read && !config->reg_write)
887 		return ERR_PTR(-EINVAL);
888 
889 	nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL);
890 	if (!nvmem)
891 		return ERR_PTR(-ENOMEM);
892 
893 	rval = ida_alloc(&nvmem_ida, GFP_KERNEL);
894 	if (rval < 0) {
895 		kfree(nvmem);
896 		return ERR_PTR(rval);
897 	}
898 
899 	nvmem->id = rval;
900 
901 	nvmem->dev.type = &nvmem_provider_type;
902 	nvmem->dev.bus = &nvmem_bus_type;
903 	nvmem->dev.parent = config->dev;
904 
905 	device_initialize(&nvmem->dev);
906 
907 	if (!config->ignore_wp)
908 		nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp",
909 						    GPIOD_OUT_HIGH);
910 	if (IS_ERR(nvmem->wp_gpio)) {
911 		rval = PTR_ERR(nvmem->wp_gpio);
912 		nvmem->wp_gpio = NULL;
913 		goto err_put_device;
914 	}
915 
916 	kref_init(&nvmem->refcnt);
917 	INIT_LIST_HEAD(&nvmem->cells);
918 	nvmem->fixup_dt_cell_info = config->fixup_dt_cell_info;
919 
920 	nvmem->owner = config->owner;
921 	if (!nvmem->owner && config->dev->driver)
922 		nvmem->owner = config->dev->driver->owner;
923 	nvmem->stride = config->stride ?: 1;
924 	nvmem->word_size = config->word_size ?: 1;
925 	nvmem->size = config->size;
926 	nvmem->root_only = config->root_only;
927 	nvmem->priv = config->priv;
928 	nvmem->type = config->type;
929 	nvmem->reg_read = config->reg_read;
930 	nvmem->reg_write = config->reg_write;
931 	nvmem->keepout = config->keepout;
932 	nvmem->nkeepout = config->nkeepout;
933 	if (config->of_node)
934 		nvmem->dev.of_node = config->of_node;
935 	else if (!config->no_of_node)
936 		nvmem->dev.of_node = config->dev->of_node;
937 
938 	switch (config->id) {
939 	case NVMEM_DEVID_NONE:
940 		rval = dev_set_name(&nvmem->dev, "%s", config->name);
941 		break;
942 	case NVMEM_DEVID_AUTO:
943 		rval = dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id);
944 		break;
945 	default:
946 		rval = dev_set_name(&nvmem->dev, "%s%d",
947 			     config->name ? : "nvmem",
948 			     config->name ? config->id : nvmem->id);
949 		break;
950 	}
951 
952 	if (rval)
953 		goto err_put_device;
954 
955 	nvmem->read_only = device_property_present(config->dev, "read-only") ||
956 			   config->read_only || !nvmem->reg_write;
957 
958 #ifdef CONFIG_NVMEM_SYSFS
959 	nvmem->dev.groups = nvmem_dev_groups;
960 #endif
961 
962 	if (nvmem->nkeepout) {
963 		rval = nvmem_validate_keepouts(nvmem);
964 		if (rval)
965 			goto err_put_device;
966 	}
967 
968 	if (config->compat) {
969 		rval = nvmem_sysfs_setup_compat(nvmem, config);
970 		if (rval)
971 			goto err_put_device;
972 	}
973 
974 	/*
975 	 * If the driver supplied a layout by config->layout, the module
976 	 * pointer will be NULL and nvmem_layout_put() will be a noop.
977 	 */
978 	nvmem->layout = config->layout ?: nvmem_layout_get(nvmem);
979 	if (IS_ERR(nvmem->layout)) {
980 		rval = PTR_ERR(nvmem->layout);
981 		nvmem->layout = NULL;
982 
983 		if (rval == -EPROBE_DEFER)
984 			goto err_teardown_compat;
985 	}
986 
987 	if (config->cells) {
988 		rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
989 		if (rval)
990 			goto err_remove_cells;
991 	}
992 
993 	rval = nvmem_add_cells_from_table(nvmem);
994 	if (rval)
995 		goto err_remove_cells;
996 
997 	if (config->add_legacy_fixed_of_cells) {
998 		rval = nvmem_add_cells_from_legacy_of(nvmem);
999 		if (rval)
1000 			goto err_remove_cells;
1001 	}
1002 
1003 	rval = nvmem_add_cells_from_fixed_layout(nvmem);
1004 	if (rval)
1005 		goto err_remove_cells;
1006 
1007 	rval = nvmem_add_cells_from_layout(nvmem);
1008 	if (rval)
1009 		goto err_remove_cells;
1010 
1011 	dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
1012 
1013 	rval = device_add(&nvmem->dev);
1014 	if (rval)
1015 		goto err_remove_cells;
1016 
1017 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
1018 
1019 	return nvmem;
1020 
1021 err_remove_cells:
1022 	nvmem_device_remove_all_cells(nvmem);
1023 	nvmem_layout_put(nvmem->layout);
1024 err_teardown_compat:
1025 	if (config->compat)
1026 		nvmem_sysfs_remove_compat(nvmem, config);
1027 err_put_device:
1028 	put_device(&nvmem->dev);
1029 
1030 	return ERR_PTR(rval);
1031 }
1032 EXPORT_SYMBOL_GPL(nvmem_register);
1033 
nvmem_device_release(struct kref * kref)1034 static void nvmem_device_release(struct kref *kref)
1035 {
1036 	struct nvmem_device *nvmem;
1037 
1038 	nvmem = container_of(kref, struct nvmem_device, refcnt);
1039 
1040 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem);
1041 
1042 	if (nvmem->flags & FLAG_COMPAT)
1043 		device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
1044 
1045 	nvmem_device_remove_all_cells(nvmem);
1046 	nvmem_layout_put(nvmem->layout);
1047 	device_unregister(&nvmem->dev);
1048 }
1049 
1050 /**
1051  * nvmem_unregister() - Unregister previously registered nvmem device
1052  *
1053  * @nvmem: Pointer to previously registered nvmem device.
1054  */
nvmem_unregister(struct nvmem_device * nvmem)1055 void nvmem_unregister(struct nvmem_device *nvmem)
1056 {
1057 	if (nvmem)
1058 		kref_put(&nvmem->refcnt, nvmem_device_release);
1059 }
1060 EXPORT_SYMBOL_GPL(nvmem_unregister);
1061 
devm_nvmem_unregister(void * nvmem)1062 static void devm_nvmem_unregister(void *nvmem)
1063 {
1064 	nvmem_unregister(nvmem);
1065 }
1066 
1067 /**
1068  * devm_nvmem_register() - Register a managed nvmem device for given
1069  * nvmem_config.
1070  * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
1071  *
1072  * @dev: Device that uses the nvmem device.
1073  * @config: nvmem device configuration with which nvmem device is created.
1074  *
1075  * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
1076  * on success.
1077  */
devm_nvmem_register(struct device * dev,const struct nvmem_config * config)1078 struct nvmem_device *devm_nvmem_register(struct device *dev,
1079 					 const struct nvmem_config *config)
1080 {
1081 	struct nvmem_device *nvmem;
1082 	int ret;
1083 
1084 	nvmem = nvmem_register(config);
1085 	if (IS_ERR(nvmem))
1086 		return nvmem;
1087 
1088 	ret = devm_add_action_or_reset(dev, devm_nvmem_unregister, nvmem);
1089 	if (ret)
1090 		return ERR_PTR(ret);
1091 
1092 	return nvmem;
1093 }
1094 EXPORT_SYMBOL_GPL(devm_nvmem_register);
1095 
__nvmem_device_get(void * data,int (* match)(struct device * dev,const void * data))1096 static struct nvmem_device *__nvmem_device_get(void *data,
1097 			int (*match)(struct device *dev, const void *data))
1098 {
1099 	struct nvmem_device *nvmem = NULL;
1100 	struct device *dev;
1101 
1102 	mutex_lock(&nvmem_mutex);
1103 	dev = bus_find_device(&nvmem_bus_type, NULL, data, match);
1104 	if (dev)
1105 		nvmem = to_nvmem_device(dev);
1106 	mutex_unlock(&nvmem_mutex);
1107 	if (!nvmem)
1108 		return ERR_PTR(-EPROBE_DEFER);
1109 
1110 	if (!try_module_get(nvmem->owner)) {
1111 		dev_err(&nvmem->dev,
1112 			"could not increase module refcount for cell %s\n",
1113 			nvmem_dev_name(nvmem));
1114 
1115 		put_device(&nvmem->dev);
1116 		return ERR_PTR(-EINVAL);
1117 	}
1118 
1119 	kref_get(&nvmem->refcnt);
1120 
1121 	return nvmem;
1122 }
1123 
__nvmem_device_put(struct nvmem_device * nvmem)1124 static void __nvmem_device_put(struct nvmem_device *nvmem)
1125 {
1126 	put_device(&nvmem->dev);
1127 	module_put(nvmem->owner);
1128 	kref_put(&nvmem->refcnt, nvmem_device_release);
1129 }
1130 
1131 #if IS_ENABLED(CONFIG_OF)
1132 /**
1133  * of_nvmem_device_get() - Get nvmem device from a given id
1134  *
1135  * @np: Device tree node that uses the nvmem device.
1136  * @id: nvmem name from nvmem-names property.
1137  *
1138  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
1139  * on success.
1140  */
of_nvmem_device_get(struct device_node * np,const char * id)1141 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
1142 {
1143 
1144 	struct device_node *nvmem_np;
1145 	struct nvmem_device *nvmem;
1146 	int index = 0;
1147 
1148 	if (id)
1149 		index = of_property_match_string(np, "nvmem-names", id);
1150 
1151 	nvmem_np = of_parse_phandle(np, "nvmem", index);
1152 	if (!nvmem_np)
1153 		return ERR_PTR(-ENOENT);
1154 
1155 	nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
1156 	of_node_put(nvmem_np);
1157 	return nvmem;
1158 }
1159 EXPORT_SYMBOL_GPL(of_nvmem_device_get);
1160 #endif
1161 
1162 /**
1163  * nvmem_device_get() - Get nvmem device from a given id
1164  *
1165  * @dev: Device that uses the nvmem device.
1166  * @dev_name: name of the requested nvmem device.
1167  *
1168  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
1169  * on success.
1170  */
nvmem_device_get(struct device * dev,const char * dev_name)1171 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
1172 {
1173 	if (dev->of_node) { /* try dt first */
1174 		struct nvmem_device *nvmem;
1175 
1176 		nvmem = of_nvmem_device_get(dev->of_node, dev_name);
1177 
1178 		if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER)
1179 			return nvmem;
1180 
1181 	}
1182 
1183 	return __nvmem_device_get((void *)dev_name, device_match_name);
1184 }
1185 EXPORT_SYMBOL_GPL(nvmem_device_get);
1186 
1187 /**
1188  * nvmem_device_find() - Find nvmem device with matching function
1189  *
1190  * @data: Data to pass to match function
1191  * @match: Callback function to check device
1192  *
1193  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
1194  * on success.
1195  */
nvmem_device_find(void * data,int (* match)(struct device * dev,const void * data))1196 struct nvmem_device *nvmem_device_find(void *data,
1197 			int (*match)(struct device *dev, const void *data))
1198 {
1199 	return __nvmem_device_get(data, match);
1200 }
1201 EXPORT_SYMBOL_GPL(nvmem_device_find);
1202 
devm_nvmem_device_match(struct device * dev,void * res,void * data)1203 static int devm_nvmem_device_match(struct device *dev, void *res, void *data)
1204 {
1205 	struct nvmem_device **nvmem = res;
1206 
1207 	if (WARN_ON(!nvmem || !*nvmem))
1208 		return 0;
1209 
1210 	return *nvmem == data;
1211 }
1212 
devm_nvmem_device_release(struct device * dev,void * res)1213 static void devm_nvmem_device_release(struct device *dev, void *res)
1214 {
1215 	nvmem_device_put(*(struct nvmem_device **)res);
1216 }
1217 
1218 /**
1219  * devm_nvmem_device_put() - put alredy got nvmem device
1220  *
1221  * @dev: Device that uses the nvmem device.
1222  * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
1223  * that needs to be released.
1224  */
devm_nvmem_device_put(struct device * dev,struct nvmem_device * nvmem)1225 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem)
1226 {
1227 	int ret;
1228 
1229 	ret = devres_release(dev, devm_nvmem_device_release,
1230 			     devm_nvmem_device_match, nvmem);
1231 
1232 	WARN_ON(ret);
1233 }
1234 EXPORT_SYMBOL_GPL(devm_nvmem_device_put);
1235 
1236 /**
1237  * nvmem_device_put() - put alredy got nvmem device
1238  *
1239  * @nvmem: pointer to nvmem device that needs to be released.
1240  */
nvmem_device_put(struct nvmem_device * nvmem)1241 void nvmem_device_put(struct nvmem_device *nvmem)
1242 {
1243 	__nvmem_device_put(nvmem);
1244 }
1245 EXPORT_SYMBOL_GPL(nvmem_device_put);
1246 
1247 /**
1248  * devm_nvmem_device_get() - Get nvmem device of device form a given id
1249  *
1250  * @dev: Device that requests the nvmem device.
1251  * @id: name id for the requested nvmem device.
1252  *
1253  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
1254  * on success.  The nvmem_device will be freed by the automatically once the
1255  * device is freed.
1256  */
devm_nvmem_device_get(struct device * dev,const char * id)1257 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
1258 {
1259 	struct nvmem_device **ptr, *nvmem;
1260 
1261 	ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL);
1262 	if (!ptr)
1263 		return ERR_PTR(-ENOMEM);
1264 
1265 	nvmem = nvmem_device_get(dev, id);
1266 	if (!IS_ERR(nvmem)) {
1267 		*ptr = nvmem;
1268 		devres_add(dev, ptr);
1269 	} else {
1270 		devres_free(ptr);
1271 	}
1272 
1273 	return nvmem;
1274 }
1275 EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
1276 
nvmem_create_cell(struct nvmem_cell_entry * entry,const char * id,int index)1277 static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry,
1278 					    const char *id, int index)
1279 {
1280 	struct nvmem_cell *cell;
1281 	const char *name = NULL;
1282 
1283 	cell = kzalloc(sizeof(*cell), GFP_KERNEL);
1284 	if (!cell)
1285 		return ERR_PTR(-ENOMEM);
1286 
1287 	if (id) {
1288 		name = kstrdup_const(id, GFP_KERNEL);
1289 		if (!name) {
1290 			kfree(cell);
1291 			return ERR_PTR(-ENOMEM);
1292 		}
1293 	}
1294 
1295 	cell->id = name;
1296 	cell->entry = entry;
1297 	cell->index = index;
1298 
1299 	return cell;
1300 }
1301 
1302 static struct nvmem_cell *
nvmem_cell_get_from_lookup(struct device * dev,const char * con_id)1303 nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
1304 {
1305 	struct nvmem_cell_entry *cell_entry;
1306 	struct nvmem_cell *cell = ERR_PTR(-ENOENT);
1307 	struct nvmem_cell_lookup *lookup;
1308 	struct nvmem_device *nvmem;
1309 	const char *dev_id;
1310 
1311 	if (!dev)
1312 		return ERR_PTR(-EINVAL);
1313 
1314 	dev_id = dev_name(dev);
1315 
1316 	mutex_lock(&nvmem_lookup_mutex);
1317 
1318 	list_for_each_entry(lookup, &nvmem_lookup_list, node) {
1319 		if ((strcmp(lookup->dev_id, dev_id) == 0) &&
1320 		    (strcmp(lookup->con_id, con_id) == 0)) {
1321 			/* This is the right entry. */
1322 			nvmem = __nvmem_device_get((void *)lookup->nvmem_name,
1323 						   device_match_name);
1324 			if (IS_ERR(nvmem)) {
1325 				/* Provider may not be registered yet. */
1326 				cell = ERR_CAST(nvmem);
1327 				break;
1328 			}
1329 
1330 			cell_entry = nvmem_find_cell_entry_by_name(nvmem,
1331 								   lookup->cell_name);
1332 			if (!cell_entry) {
1333 				__nvmem_device_put(nvmem);
1334 				cell = ERR_PTR(-ENOENT);
1335 			} else {
1336 				cell = nvmem_create_cell(cell_entry, con_id, 0);
1337 				if (IS_ERR(cell))
1338 					__nvmem_device_put(nvmem);
1339 			}
1340 			break;
1341 		}
1342 	}
1343 
1344 	mutex_unlock(&nvmem_lookup_mutex);
1345 	return cell;
1346 }
1347 
1348 #if IS_ENABLED(CONFIG_OF)
1349 static struct nvmem_cell_entry *
nvmem_find_cell_entry_by_node(struct nvmem_device * nvmem,struct device_node * np)1350 nvmem_find_cell_entry_by_node(struct nvmem_device *nvmem, struct device_node *np)
1351 {
1352 	struct nvmem_cell_entry *iter, *cell = NULL;
1353 
1354 	mutex_lock(&nvmem_mutex);
1355 	list_for_each_entry(iter, &nvmem->cells, node) {
1356 		if (np == iter->np) {
1357 			cell = iter;
1358 			break;
1359 		}
1360 	}
1361 	mutex_unlock(&nvmem_mutex);
1362 
1363 	return cell;
1364 }
1365 
1366 /**
1367  * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
1368  *
1369  * @np: Device tree node that uses the nvmem cell.
1370  * @id: nvmem cell name from nvmem-cell-names property, or NULL
1371  *      for the cell at index 0 (the lone cell with no accompanying
1372  *      nvmem-cell-names property).
1373  *
1374  * Return: Will be an ERR_PTR() on error or a valid pointer
1375  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
1376  * nvmem_cell_put().
1377  */
of_nvmem_cell_get(struct device_node * np,const char * id)1378 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
1379 {
1380 	struct device_node *cell_np, *nvmem_np;
1381 	struct nvmem_device *nvmem;
1382 	struct nvmem_cell_entry *cell_entry;
1383 	struct nvmem_cell *cell;
1384 	struct of_phandle_args cell_spec;
1385 	int index = 0;
1386 	int cell_index = 0;
1387 	int ret;
1388 
1389 	/* if cell name exists, find index to the name */
1390 	if (id)
1391 		index = of_property_match_string(np, "nvmem-cell-names", id);
1392 
1393 	ret = of_parse_phandle_with_optional_args(np, "nvmem-cells",
1394 						  "#nvmem-cell-cells",
1395 						  index, &cell_spec);
1396 	if (ret)
1397 		return ERR_PTR(-ENOENT);
1398 
1399 	if (cell_spec.args_count > 1)
1400 		return ERR_PTR(-EINVAL);
1401 
1402 	cell_np = cell_spec.np;
1403 	if (cell_spec.args_count)
1404 		cell_index = cell_spec.args[0];
1405 
1406 	nvmem_np = of_get_parent(cell_np);
1407 	if (!nvmem_np) {
1408 		of_node_put(cell_np);
1409 		return ERR_PTR(-EINVAL);
1410 	}
1411 
1412 	/* nvmem layouts produce cells within the nvmem-layout container */
1413 	if (of_node_name_eq(nvmem_np, "nvmem-layout")) {
1414 		nvmem_np = of_get_next_parent(nvmem_np);
1415 		if (!nvmem_np) {
1416 			of_node_put(cell_np);
1417 			return ERR_PTR(-EINVAL);
1418 		}
1419 	}
1420 
1421 	nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
1422 	of_node_put(nvmem_np);
1423 	if (IS_ERR(nvmem)) {
1424 		of_node_put(cell_np);
1425 		return ERR_CAST(nvmem);
1426 	}
1427 
1428 	cell_entry = nvmem_find_cell_entry_by_node(nvmem, cell_np);
1429 	of_node_put(cell_np);
1430 	if (!cell_entry) {
1431 		__nvmem_device_put(nvmem);
1432 		return ERR_PTR(-ENOENT);
1433 	}
1434 
1435 	cell = nvmem_create_cell(cell_entry, id, cell_index);
1436 	if (IS_ERR(cell))
1437 		__nvmem_device_put(nvmem);
1438 
1439 	return cell;
1440 }
1441 EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
1442 #endif
1443 
1444 /**
1445  * nvmem_cell_get() - Get nvmem cell of device form a given cell name
1446  *
1447  * @dev: Device that requests the nvmem cell.
1448  * @id: nvmem cell name to get (this corresponds with the name from the
1449  *      nvmem-cell-names property for DT systems and with the con_id from
1450  *      the lookup entry for non-DT systems).
1451  *
1452  * Return: Will be an ERR_PTR() on error or a valid pointer
1453  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
1454  * nvmem_cell_put().
1455  */
nvmem_cell_get(struct device * dev,const char * id)1456 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id)
1457 {
1458 	struct nvmem_cell *cell;
1459 
1460 	if (dev->of_node) { /* try dt first */
1461 		cell = of_nvmem_cell_get(dev->of_node, id);
1462 		if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER)
1463 			return cell;
1464 	}
1465 
1466 	/* NULL cell id only allowed for device tree; invalid otherwise */
1467 	if (!id)
1468 		return ERR_PTR(-EINVAL);
1469 
1470 	return nvmem_cell_get_from_lookup(dev, id);
1471 }
1472 EXPORT_SYMBOL_GPL(nvmem_cell_get);
1473 
devm_nvmem_cell_release(struct device * dev,void * res)1474 static void devm_nvmem_cell_release(struct device *dev, void *res)
1475 {
1476 	nvmem_cell_put(*(struct nvmem_cell **)res);
1477 }
1478 
1479 /**
1480  * devm_nvmem_cell_get() - Get nvmem cell of device form a given id
1481  *
1482  * @dev: Device that requests the nvmem cell.
1483  * @id: nvmem cell name id to get.
1484  *
1485  * Return: Will be an ERR_PTR() on error or a valid pointer
1486  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
1487  * automatically once the device is freed.
1488  */
devm_nvmem_cell_get(struct device * dev,const char * id)1489 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id)
1490 {
1491 	struct nvmem_cell **ptr, *cell;
1492 
1493 	ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL);
1494 	if (!ptr)
1495 		return ERR_PTR(-ENOMEM);
1496 
1497 	cell = nvmem_cell_get(dev, id);
1498 	if (!IS_ERR(cell)) {
1499 		*ptr = cell;
1500 		devres_add(dev, ptr);
1501 	} else {
1502 		devres_free(ptr);
1503 	}
1504 
1505 	return cell;
1506 }
1507 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get);
1508 
devm_nvmem_cell_match(struct device * dev,void * res,void * data)1509 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
1510 {
1511 	struct nvmem_cell **c = res;
1512 
1513 	if (WARN_ON(!c || !*c))
1514 		return 0;
1515 
1516 	return *c == data;
1517 }
1518 
1519 /**
1520  * devm_nvmem_cell_put() - Release previously allocated nvmem cell
1521  * from devm_nvmem_cell_get.
1522  *
1523  * @dev: Device that requests the nvmem cell.
1524  * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get().
1525  */
devm_nvmem_cell_put(struct device * dev,struct nvmem_cell * cell)1526 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
1527 {
1528 	int ret;
1529 
1530 	ret = devres_release(dev, devm_nvmem_cell_release,
1531 				devm_nvmem_cell_match, cell);
1532 
1533 	WARN_ON(ret);
1534 }
1535 EXPORT_SYMBOL(devm_nvmem_cell_put);
1536 
1537 /**
1538  * nvmem_cell_put() - Release previously allocated nvmem cell.
1539  *
1540  * @cell: Previously allocated nvmem cell by nvmem_cell_get().
1541  */
nvmem_cell_put(struct nvmem_cell * cell)1542 void nvmem_cell_put(struct nvmem_cell *cell)
1543 {
1544 	struct nvmem_device *nvmem = cell->entry->nvmem;
1545 
1546 	if (cell->id)
1547 		kfree_const(cell->id);
1548 
1549 	kfree(cell);
1550 	__nvmem_device_put(nvmem);
1551 }
1552 EXPORT_SYMBOL_GPL(nvmem_cell_put);
1553 
nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry * cell,void * buf)1554 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry *cell, void *buf)
1555 {
1556 	u8 *p, *b;
1557 	int i, extra, bit_offset = cell->bit_offset;
1558 
1559 	p = b = buf;
1560 	if (bit_offset) {
1561 		/* First shift */
1562 		*b++ >>= bit_offset;
1563 
1564 		/* setup rest of the bytes if any */
1565 		for (i = 1; i < cell->bytes; i++) {
1566 			/* Get bits from next byte and shift them towards msb */
1567 			*p |= *b << (BITS_PER_BYTE - bit_offset);
1568 
1569 			p = b;
1570 			*b++ >>= bit_offset;
1571 		}
1572 	} else {
1573 		/* point to the msb */
1574 		p += cell->bytes - 1;
1575 	}
1576 
1577 	/* result fits in less bytes */
1578 	extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE);
1579 	while (--extra >= 0)
1580 		*p-- = 0;
1581 
1582 	/* clear msb bits if any leftover in the last byte */
1583 	if (cell->nbits % BITS_PER_BYTE)
1584 		*p &= GENMASK((cell->nbits % BITS_PER_BYTE) - 1, 0);
1585 }
1586 
__nvmem_cell_read(struct nvmem_device * nvmem,struct nvmem_cell_entry * cell,void * buf,size_t * len,const char * id,int index)1587 static int __nvmem_cell_read(struct nvmem_device *nvmem,
1588 			     struct nvmem_cell_entry *cell,
1589 			     void *buf, size_t *len, const char *id, int index)
1590 {
1591 	int rc;
1592 
1593 	rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->raw_len);
1594 
1595 	if (rc)
1596 		return rc;
1597 
1598 	/* shift bits in-place */
1599 	if (cell->bit_offset || cell->nbits)
1600 		nvmem_shift_read_buffer_in_place(cell, buf);
1601 
1602 	if (cell->read_post_process) {
1603 		rc = cell->read_post_process(cell->priv, id, index,
1604 					     cell->offset, buf, cell->raw_len);
1605 		if (rc)
1606 			return rc;
1607 	}
1608 
1609 	if (len)
1610 		*len = cell->bytes;
1611 
1612 	return 0;
1613 }
1614 
1615 /**
1616  * nvmem_cell_read() - Read a given nvmem cell
1617  *
1618  * @cell: nvmem cell to be read.
1619  * @len: pointer to length of cell which will be populated on successful read;
1620  *	 can be NULL.
1621  *
1622  * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
1623  * buffer should be freed by the consumer with a kfree().
1624  */
nvmem_cell_read(struct nvmem_cell * cell,size_t * len)1625 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
1626 {
1627 	struct nvmem_cell_entry *entry = cell->entry;
1628 	struct nvmem_device *nvmem = entry->nvmem;
1629 	u8 *buf;
1630 	int rc;
1631 
1632 	if (!nvmem)
1633 		return ERR_PTR(-EINVAL);
1634 
1635 	buf = kzalloc(max_t(size_t, entry->raw_len, entry->bytes), GFP_KERNEL);
1636 	if (!buf)
1637 		return ERR_PTR(-ENOMEM);
1638 
1639 	rc = __nvmem_cell_read(nvmem, cell->entry, buf, len, cell->id, cell->index);
1640 	if (rc) {
1641 		kfree(buf);
1642 		return ERR_PTR(rc);
1643 	}
1644 
1645 	return buf;
1646 }
1647 EXPORT_SYMBOL_GPL(nvmem_cell_read);
1648 
nvmem_cell_prepare_write_buffer(struct nvmem_cell_entry * cell,u8 * _buf,int len)1649 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell_entry *cell,
1650 					     u8 *_buf, int len)
1651 {
1652 	struct nvmem_device *nvmem = cell->nvmem;
1653 	int i, rc, nbits, bit_offset = cell->bit_offset;
1654 	u8 v, *p, *buf, *b, pbyte, pbits;
1655 
1656 	nbits = cell->nbits;
1657 	buf = kzalloc(cell->bytes, GFP_KERNEL);
1658 	if (!buf)
1659 		return ERR_PTR(-ENOMEM);
1660 
1661 	memcpy(buf, _buf, len);
1662 	p = b = buf;
1663 
1664 	if (bit_offset) {
1665 		pbyte = *b;
1666 		*b <<= bit_offset;
1667 
1668 		/* setup the first byte with lsb bits from nvmem */
1669 		rc = nvmem_reg_read(nvmem, cell->offset, &v, 1);
1670 		if (rc)
1671 			goto err;
1672 		*b++ |= GENMASK(bit_offset - 1, 0) & v;
1673 
1674 		/* setup rest of the byte if any */
1675 		for (i = 1; i < cell->bytes; i++) {
1676 			/* Get last byte bits and shift them towards lsb */
1677 			pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset);
1678 			pbyte = *b;
1679 			p = b;
1680 			*b <<= bit_offset;
1681 			*b++ |= pbits;
1682 		}
1683 	}
1684 
1685 	/* if it's not end on byte boundary */
1686 	if ((nbits + bit_offset) % BITS_PER_BYTE) {
1687 		/* setup the last byte with msb bits from nvmem */
1688 		rc = nvmem_reg_read(nvmem,
1689 				    cell->offset + cell->bytes - 1, &v, 1);
1690 		if (rc)
1691 			goto err;
1692 		*p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
1693 
1694 	}
1695 
1696 	return buf;
1697 err:
1698 	kfree(buf);
1699 	return ERR_PTR(rc);
1700 }
1701 
__nvmem_cell_entry_write(struct nvmem_cell_entry * cell,void * buf,size_t len)1702 static int __nvmem_cell_entry_write(struct nvmem_cell_entry *cell, void *buf, size_t len)
1703 {
1704 	struct nvmem_device *nvmem = cell->nvmem;
1705 	int rc;
1706 
1707 	if (!nvmem || nvmem->read_only ||
1708 	    (cell->bit_offset == 0 && len != cell->bytes))
1709 		return -EINVAL;
1710 
1711 	/*
1712 	 * Any cells which have a read_post_process hook are read-only because
1713 	 * we cannot reverse the operation and it might affect other cells,
1714 	 * too.
1715 	 */
1716 	if (cell->read_post_process)
1717 		return -EINVAL;
1718 
1719 	if (cell->bit_offset || cell->nbits) {
1720 		if (len != BITS_TO_BYTES(cell->nbits) && len != cell->bytes)
1721 			return -EINVAL;
1722 		buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
1723 		if (IS_ERR(buf))
1724 			return PTR_ERR(buf);
1725 	}
1726 
1727 	rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes);
1728 
1729 	/* free the tmp buffer */
1730 	if (cell->bit_offset || cell->nbits)
1731 		kfree(buf);
1732 
1733 	if (rc)
1734 		return rc;
1735 
1736 	return len;
1737 }
1738 
1739 /**
1740  * nvmem_cell_write() - Write to a given nvmem cell
1741  *
1742  * @cell: nvmem cell to be written.
1743  * @buf: Buffer to be written.
1744  * @len: length of buffer to be written to nvmem cell.
1745  *
1746  * Return: length of bytes written or negative on failure.
1747  */
nvmem_cell_write(struct nvmem_cell * cell,void * buf,size_t len)1748 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
1749 {
1750 	return __nvmem_cell_entry_write(cell->entry, buf, len);
1751 }
1752 
1753 EXPORT_SYMBOL_GPL(nvmem_cell_write);
1754 
nvmem_cell_read_common(struct device * dev,const char * cell_id,void * val,size_t count)1755 static int nvmem_cell_read_common(struct device *dev, const char *cell_id,
1756 				  void *val, size_t count)
1757 {
1758 	struct nvmem_cell *cell;
1759 	void *buf;
1760 	size_t len;
1761 
1762 	cell = nvmem_cell_get(dev, cell_id);
1763 	if (IS_ERR(cell))
1764 		return PTR_ERR(cell);
1765 
1766 	buf = nvmem_cell_read(cell, &len);
1767 	if (IS_ERR(buf)) {
1768 		nvmem_cell_put(cell);
1769 		return PTR_ERR(buf);
1770 	}
1771 	if (len != count) {
1772 		kfree(buf);
1773 		nvmem_cell_put(cell);
1774 		return -EINVAL;
1775 	}
1776 	memcpy(val, buf, count);
1777 	kfree(buf);
1778 	nvmem_cell_put(cell);
1779 
1780 	return 0;
1781 }
1782 
1783 /**
1784  * nvmem_cell_read_u8() - Read a cell value as a u8
1785  *
1786  * @dev: Device that requests the nvmem cell.
1787  * @cell_id: Name of nvmem cell to read.
1788  * @val: pointer to output value.
1789  *
1790  * Return: 0 on success or negative errno.
1791  */
nvmem_cell_read_u8(struct device * dev,const char * cell_id,u8 * val)1792 int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val)
1793 {
1794 	return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1795 }
1796 EXPORT_SYMBOL_GPL(nvmem_cell_read_u8);
1797 
1798 /**
1799  * nvmem_cell_read_u16() - Read a cell value as a u16
1800  *
1801  * @dev: Device that requests the nvmem cell.
1802  * @cell_id: Name of nvmem cell to read.
1803  * @val: pointer to output value.
1804  *
1805  * Return: 0 on success or negative errno.
1806  */
nvmem_cell_read_u16(struct device * dev,const char * cell_id,u16 * val)1807 int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val)
1808 {
1809 	return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1810 }
1811 EXPORT_SYMBOL_GPL(nvmem_cell_read_u16);
1812 
1813 /**
1814  * nvmem_cell_read_u32() - Read a cell value as a u32
1815  *
1816  * @dev: Device that requests the nvmem cell.
1817  * @cell_id: Name of nvmem cell to read.
1818  * @val: pointer to output value.
1819  *
1820  * Return: 0 on success or negative errno.
1821  */
nvmem_cell_read_u32(struct device * dev,const char * cell_id,u32 * val)1822 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val)
1823 {
1824 	return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1825 }
1826 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32);
1827 
1828 /**
1829  * nvmem_cell_read_u64() - Read a cell value as a u64
1830  *
1831  * @dev: Device that requests the nvmem cell.
1832  * @cell_id: Name of nvmem cell to read.
1833  * @val: pointer to output value.
1834  *
1835  * Return: 0 on success or negative errno.
1836  */
nvmem_cell_read_u64(struct device * dev,const char * cell_id,u64 * val)1837 int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val)
1838 {
1839 	return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1840 }
1841 EXPORT_SYMBOL_GPL(nvmem_cell_read_u64);
1842 
nvmem_cell_read_variable_common(struct device * dev,const char * cell_id,size_t max_len,size_t * len)1843 static const void *nvmem_cell_read_variable_common(struct device *dev,
1844 						   const char *cell_id,
1845 						   size_t max_len, size_t *len)
1846 {
1847 	struct nvmem_cell *cell;
1848 	int nbits;
1849 	void *buf;
1850 
1851 	cell = nvmem_cell_get(dev, cell_id);
1852 	if (IS_ERR(cell))
1853 		return cell;
1854 
1855 	nbits = cell->entry->nbits;
1856 	buf = nvmem_cell_read(cell, len);
1857 	nvmem_cell_put(cell);
1858 	if (IS_ERR(buf))
1859 		return buf;
1860 
1861 	/*
1862 	 * If nbits is set then nvmem_cell_read() can significantly exaggerate
1863 	 * the length of the real data. Throw away the extra junk.
1864 	 */
1865 	if (nbits)
1866 		*len = DIV_ROUND_UP(nbits, 8);
1867 
1868 	if (*len > max_len) {
1869 		kfree(buf);
1870 		return ERR_PTR(-ERANGE);
1871 	}
1872 
1873 	return buf;
1874 }
1875 
1876 /**
1877  * nvmem_cell_read_variable_le_u32() - Read up to 32-bits of data as a little endian number.
1878  *
1879  * @dev: Device that requests the nvmem cell.
1880  * @cell_id: Name of nvmem cell to read.
1881  * @val: pointer to output value.
1882  *
1883  * Return: 0 on success or negative errno.
1884  */
nvmem_cell_read_variable_le_u32(struct device * dev,const char * cell_id,u32 * val)1885 int nvmem_cell_read_variable_le_u32(struct device *dev, const char *cell_id,
1886 				    u32 *val)
1887 {
1888 	size_t len;
1889 	const u8 *buf;
1890 	int i;
1891 
1892 	buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len);
1893 	if (IS_ERR(buf))
1894 		return PTR_ERR(buf);
1895 
1896 	/* Copy w/ implicit endian conversion */
1897 	*val = 0;
1898 	for (i = 0; i < len; i++)
1899 		*val |= buf[i] << (8 * i);
1900 
1901 	kfree(buf);
1902 
1903 	return 0;
1904 }
1905 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u32);
1906 
1907 /**
1908  * nvmem_cell_read_variable_le_u64() - Read up to 64-bits of data as a little endian number.
1909  *
1910  * @dev: Device that requests the nvmem cell.
1911  * @cell_id: Name of nvmem cell to read.
1912  * @val: pointer to output value.
1913  *
1914  * Return: 0 on success or negative errno.
1915  */
nvmem_cell_read_variable_le_u64(struct device * dev,const char * cell_id,u64 * val)1916 int nvmem_cell_read_variable_le_u64(struct device *dev, const char *cell_id,
1917 				    u64 *val)
1918 {
1919 	size_t len;
1920 	const u8 *buf;
1921 	int i;
1922 
1923 	buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len);
1924 	if (IS_ERR(buf))
1925 		return PTR_ERR(buf);
1926 
1927 	/* Copy w/ implicit endian conversion */
1928 	*val = 0;
1929 	for (i = 0; i < len; i++)
1930 		*val |= (uint64_t)buf[i] << (8 * i);
1931 
1932 	kfree(buf);
1933 
1934 	return 0;
1935 }
1936 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u64);
1937 
1938 /**
1939  * nvmem_device_cell_read() - Read a given nvmem device and cell
1940  *
1941  * @nvmem: nvmem device to read from.
1942  * @info: nvmem cell info to be read.
1943  * @buf: buffer pointer which will be populated on successful read.
1944  *
1945  * Return: length of successful bytes read on success and negative
1946  * error code on error.
1947  */
nvmem_device_cell_read(struct nvmem_device * nvmem,struct nvmem_cell_info * info,void * buf)1948 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
1949 			   struct nvmem_cell_info *info, void *buf)
1950 {
1951 	struct nvmem_cell_entry cell;
1952 	int rc;
1953 	ssize_t len;
1954 
1955 	if (!nvmem)
1956 		return -EINVAL;
1957 
1958 	rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell);
1959 	if (rc)
1960 		return rc;
1961 
1962 	rc = __nvmem_cell_read(nvmem, &cell, buf, &len, NULL, 0);
1963 	if (rc)
1964 		return rc;
1965 
1966 	return len;
1967 }
1968 EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
1969 
1970 /**
1971  * nvmem_device_cell_write() - Write cell to a given nvmem device
1972  *
1973  * @nvmem: nvmem device to be written to.
1974  * @info: nvmem cell info to be written.
1975  * @buf: buffer to be written to cell.
1976  *
1977  * Return: length of bytes written or negative error code on failure.
1978  */
nvmem_device_cell_write(struct nvmem_device * nvmem,struct nvmem_cell_info * info,void * buf)1979 int nvmem_device_cell_write(struct nvmem_device *nvmem,
1980 			    struct nvmem_cell_info *info, void *buf)
1981 {
1982 	struct nvmem_cell_entry cell;
1983 	int rc;
1984 
1985 	if (!nvmem)
1986 		return -EINVAL;
1987 
1988 	rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell);
1989 	if (rc)
1990 		return rc;
1991 
1992 	return __nvmem_cell_entry_write(&cell, buf, cell.bytes);
1993 }
1994 EXPORT_SYMBOL_GPL(nvmem_device_cell_write);
1995 
1996 /**
1997  * nvmem_device_read() - Read from a given nvmem device
1998  *
1999  * @nvmem: nvmem device to read from.
2000  * @offset: offset in nvmem device.
2001  * @bytes: number of bytes to read.
2002  * @buf: buffer pointer which will be populated on successful read.
2003  *
2004  * Return: length of successful bytes read on success and negative
2005  * error code on error.
2006  */
nvmem_device_read(struct nvmem_device * nvmem,unsigned int offset,size_t bytes,void * buf)2007 int nvmem_device_read(struct nvmem_device *nvmem,
2008 		      unsigned int offset,
2009 		      size_t bytes, void *buf)
2010 {
2011 	int rc;
2012 
2013 	if (!nvmem)
2014 		return -EINVAL;
2015 
2016 	rc = nvmem_reg_read(nvmem, offset, buf, bytes);
2017 
2018 	if (rc)
2019 		return rc;
2020 
2021 	return bytes;
2022 }
2023 EXPORT_SYMBOL_GPL(nvmem_device_read);
2024 
2025 /**
2026  * nvmem_device_write() - Write cell to a given nvmem device
2027  *
2028  * @nvmem: nvmem device to be written to.
2029  * @offset: offset in nvmem device.
2030  * @bytes: number of bytes to write.
2031  * @buf: buffer to be written.
2032  *
2033  * Return: length of bytes written or negative error code on failure.
2034  */
nvmem_device_write(struct nvmem_device * nvmem,unsigned int offset,size_t bytes,void * buf)2035 int nvmem_device_write(struct nvmem_device *nvmem,
2036 		       unsigned int offset,
2037 		       size_t bytes, void *buf)
2038 {
2039 	int rc;
2040 
2041 	if (!nvmem)
2042 		return -EINVAL;
2043 
2044 	rc = nvmem_reg_write(nvmem, offset, buf, bytes);
2045 
2046 	if (rc)
2047 		return rc;
2048 
2049 
2050 	return bytes;
2051 }
2052 EXPORT_SYMBOL_GPL(nvmem_device_write);
2053 
2054 /**
2055  * nvmem_add_cell_table() - register a table of cell info entries
2056  *
2057  * @table: table of cell info entries
2058  */
nvmem_add_cell_table(struct nvmem_cell_table * table)2059 void nvmem_add_cell_table(struct nvmem_cell_table *table)
2060 {
2061 	mutex_lock(&nvmem_cell_mutex);
2062 	list_add_tail(&table->node, &nvmem_cell_tables);
2063 	mutex_unlock(&nvmem_cell_mutex);
2064 }
2065 EXPORT_SYMBOL_GPL(nvmem_add_cell_table);
2066 
2067 /**
2068  * nvmem_del_cell_table() - remove a previously registered cell info table
2069  *
2070  * @table: table of cell info entries
2071  */
nvmem_del_cell_table(struct nvmem_cell_table * table)2072 void nvmem_del_cell_table(struct nvmem_cell_table *table)
2073 {
2074 	mutex_lock(&nvmem_cell_mutex);
2075 	list_del(&table->node);
2076 	mutex_unlock(&nvmem_cell_mutex);
2077 }
2078 EXPORT_SYMBOL_GPL(nvmem_del_cell_table);
2079 
2080 /**
2081  * nvmem_add_cell_lookups() - register a list of cell lookup entries
2082  *
2083  * @entries: array of cell lookup entries
2084  * @nentries: number of cell lookup entries in the array
2085  */
nvmem_add_cell_lookups(struct nvmem_cell_lookup * entries,size_t nentries)2086 void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
2087 {
2088 	int i;
2089 
2090 	mutex_lock(&nvmem_lookup_mutex);
2091 	for (i = 0; i < nentries; i++)
2092 		list_add_tail(&entries[i].node, &nvmem_lookup_list);
2093 	mutex_unlock(&nvmem_lookup_mutex);
2094 }
2095 EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups);
2096 
2097 /**
2098  * nvmem_del_cell_lookups() - remove a list of previously added cell lookup
2099  *                            entries
2100  *
2101  * @entries: array of cell lookup entries
2102  * @nentries: number of cell lookup entries in the array
2103  */
nvmem_del_cell_lookups(struct nvmem_cell_lookup * entries,size_t nentries)2104 void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
2105 {
2106 	int i;
2107 
2108 	mutex_lock(&nvmem_lookup_mutex);
2109 	for (i = 0; i < nentries; i++)
2110 		list_del(&entries[i].node);
2111 	mutex_unlock(&nvmem_lookup_mutex);
2112 }
2113 EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups);
2114 
2115 /**
2116  * nvmem_dev_name() - Get the name of a given nvmem device.
2117  *
2118  * @nvmem: nvmem device.
2119  *
2120  * Return: name of the nvmem device.
2121  */
nvmem_dev_name(struct nvmem_device * nvmem)2122 const char *nvmem_dev_name(struct nvmem_device *nvmem)
2123 {
2124 	return dev_name(&nvmem->dev);
2125 }
2126 EXPORT_SYMBOL_GPL(nvmem_dev_name);
2127 
2128 /**
2129  * nvmem_dev_size() - Get the size of a given nvmem device.
2130  *
2131  * @nvmem: nvmem device.
2132  *
2133  * Return: size of the nvmem device.
2134  */
nvmem_dev_size(struct nvmem_device * nvmem)2135 size_t nvmem_dev_size(struct nvmem_device *nvmem)
2136 {
2137 	return nvmem->size;
2138 }
2139 EXPORT_SYMBOL_GPL(nvmem_dev_size);
2140 
nvmem_init(void)2141 static int __init nvmem_init(void)
2142 {
2143 	return bus_register(&nvmem_bus_type);
2144 }
2145 
nvmem_exit(void)2146 static void __exit nvmem_exit(void)
2147 {
2148 	bus_unregister(&nvmem_bus_type);
2149 }
2150 
2151 subsys_initcall(nvmem_init);
2152 module_exit(nvmem_exit);
2153 
2154 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
2155 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
2156 MODULE_DESCRIPTION("nvmem Driver Core");
2157