xref: /openbmc/linux/drivers/nvmem/core.c (revision 26e2fe4c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * nvmem framework core.
4  *
5  * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
6  * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
7  */
8 
9 #include <linux/device.h>
10 #include <linux/export.h>
11 #include <linux/fs.h>
12 #include <linux/idr.h>
13 #include <linux/init.h>
14 #include <linux/kref.h>
15 #include <linux/module.h>
16 #include <linux/nvmem-consumer.h>
17 #include <linux/nvmem-provider.h>
18 #include <linux/gpio/consumer.h>
19 #include <linux/of.h>
20 #include <linux/slab.h>
21 
22 struct nvmem_device {
23 	struct module		*owner;
24 	struct device		dev;
25 	int			stride;
26 	int			word_size;
27 	int			id;
28 	struct kref		refcnt;
29 	size_t			size;
30 	bool			read_only;
31 	bool			root_only;
32 	int			flags;
33 	enum nvmem_type		type;
34 	struct bin_attribute	eeprom;
35 	struct device		*base_dev;
36 	struct list_head	cells;
37 	const struct nvmem_keepout *keepout;
38 	unsigned int		nkeepout;
39 	nvmem_reg_read_t	reg_read;
40 	nvmem_reg_write_t	reg_write;
41 	struct gpio_desc	*wp_gpio;
42 	struct nvmem_layout	*layout;
43 	void *priv;
44 };
45 
46 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
47 
48 #define FLAG_COMPAT		BIT(0)
49 struct nvmem_cell_entry {
50 	const char		*name;
51 	int			offset;
52 	size_t			raw_len;
53 	int			bytes;
54 	int			bit_offset;
55 	int			nbits;
56 	nvmem_cell_post_process_t read_post_process;
57 	void			*priv;
58 	struct device_node	*np;
59 	struct nvmem_device	*nvmem;
60 	struct list_head	node;
61 };
62 
63 struct nvmem_cell {
64 	struct nvmem_cell_entry *entry;
65 	const char		*id;
66 	int			index;
67 };
68 
69 static DEFINE_MUTEX(nvmem_mutex);
70 static DEFINE_IDA(nvmem_ida);
71 
72 static DEFINE_MUTEX(nvmem_cell_mutex);
73 static LIST_HEAD(nvmem_cell_tables);
74 
75 static DEFINE_MUTEX(nvmem_lookup_mutex);
76 static LIST_HEAD(nvmem_lookup_list);
77 
78 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
79 
80 static DEFINE_SPINLOCK(nvmem_layout_lock);
81 static LIST_HEAD(nvmem_layouts);
82 
__nvmem_reg_read(struct nvmem_device * nvmem,unsigned int offset,void * val,size_t bytes)83 static int __nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
84 			    void *val, size_t bytes)
85 {
86 	if (nvmem->reg_read)
87 		return nvmem->reg_read(nvmem->priv, offset, val, bytes);
88 
89 	return -EINVAL;
90 }
91 
__nvmem_reg_write(struct nvmem_device * nvmem,unsigned int offset,void * val,size_t bytes)92 static int __nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
93 			     void *val, size_t bytes)
94 {
95 	int ret;
96 
97 	if (nvmem->reg_write) {
98 		gpiod_set_value_cansleep(nvmem->wp_gpio, 0);
99 		ret = nvmem->reg_write(nvmem->priv, offset, val, bytes);
100 		gpiod_set_value_cansleep(nvmem->wp_gpio, 1);
101 		return ret;
102 	}
103 
104 	return -EINVAL;
105 }
106 
nvmem_access_with_keepouts(struct nvmem_device * nvmem,unsigned int offset,void * val,size_t bytes,int write)107 static int nvmem_access_with_keepouts(struct nvmem_device *nvmem,
108 				      unsigned int offset, void *val,
109 				      size_t bytes, int write)
110 {
111 
112 	unsigned int end = offset + bytes;
113 	unsigned int kend, ksize;
114 	const struct nvmem_keepout *keepout = nvmem->keepout;
115 	const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout;
116 	int rc;
117 
118 	/*
119 	 * Skip all keepouts before the range being accessed.
120 	 * Keepouts are sorted.
121 	 */
122 	while ((keepout < keepoutend) && (keepout->end <= offset))
123 		keepout++;
124 
125 	while ((offset < end) && (keepout < keepoutend)) {
126 		/* Access the valid portion before the keepout. */
127 		if (offset < keepout->start) {
128 			kend = min(end, keepout->start);
129 			ksize = kend - offset;
130 			if (write)
131 				rc = __nvmem_reg_write(nvmem, offset, val, ksize);
132 			else
133 				rc = __nvmem_reg_read(nvmem, offset, val, ksize);
134 
135 			if (rc)
136 				return rc;
137 
138 			offset += ksize;
139 			val += ksize;
140 		}
141 
142 		/*
143 		 * Now we're aligned to the start of this keepout zone. Go
144 		 * through it.
145 		 */
146 		kend = min(end, keepout->end);
147 		ksize = kend - offset;
148 		if (!write)
149 			memset(val, keepout->value, ksize);
150 
151 		val += ksize;
152 		offset += ksize;
153 		keepout++;
154 	}
155 
156 	/*
157 	 * If we ran out of keepouts but there's still stuff to do, send it
158 	 * down directly
159 	 */
160 	if (offset < end) {
161 		ksize = end - offset;
162 		if (write)
163 			return __nvmem_reg_write(nvmem, offset, val, ksize);
164 		else
165 			return __nvmem_reg_read(nvmem, offset, val, ksize);
166 	}
167 
168 	return 0;
169 }
170 
nvmem_reg_read(struct nvmem_device * nvmem,unsigned int offset,void * val,size_t bytes)171 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
172 			  void *val, size_t bytes)
173 {
174 	if (!nvmem->nkeepout)
175 		return __nvmem_reg_read(nvmem, offset, val, bytes);
176 
177 	return nvmem_access_with_keepouts(nvmem, offset, val, bytes, false);
178 }
179 
nvmem_reg_write(struct nvmem_device * nvmem,unsigned int offset,void * val,size_t bytes)180 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
181 			   void *val, size_t bytes)
182 {
183 	if (!nvmem->nkeepout)
184 		return __nvmem_reg_write(nvmem, offset, val, bytes);
185 
186 	return nvmem_access_with_keepouts(nvmem, offset, val, bytes, true);
187 }
188 
189 #ifdef CONFIG_NVMEM_SYSFS
190 static const char * const nvmem_type_str[] = {
191 	[NVMEM_TYPE_UNKNOWN] = "Unknown",
192 	[NVMEM_TYPE_EEPROM] = "EEPROM",
193 	[NVMEM_TYPE_OTP] = "OTP",
194 	[NVMEM_TYPE_BATTERY_BACKED] = "Battery backed",
195 	[NVMEM_TYPE_FRAM] = "FRAM",
196 };
197 
198 #ifdef CONFIG_DEBUG_LOCK_ALLOC
199 static struct lock_class_key eeprom_lock_key;
200 #endif
201 
type_show(struct device * dev,struct device_attribute * attr,char * buf)202 static ssize_t type_show(struct device *dev,
203 			 struct device_attribute *attr, char *buf)
204 {
205 	struct nvmem_device *nvmem = to_nvmem_device(dev);
206 
207 	return sprintf(buf, "%s\n", nvmem_type_str[nvmem->type]);
208 }
209 
210 static DEVICE_ATTR_RO(type);
211 
212 static struct attribute *nvmem_attrs[] = {
213 	&dev_attr_type.attr,
214 	NULL,
215 };
216 
bin_attr_nvmem_read(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t pos,size_t count)217 static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
218 				   struct bin_attribute *attr, char *buf,
219 				   loff_t pos, size_t count)
220 {
221 	struct device *dev;
222 	struct nvmem_device *nvmem;
223 	int rc;
224 
225 	if (attr->private)
226 		dev = attr->private;
227 	else
228 		dev = kobj_to_dev(kobj);
229 	nvmem = to_nvmem_device(dev);
230 
231 	/* Stop the user from reading */
232 	if (pos >= nvmem->size)
233 		return 0;
234 
235 	if (!IS_ALIGNED(pos, nvmem->stride))
236 		return -EINVAL;
237 
238 	if (count < nvmem->word_size)
239 		return -EINVAL;
240 
241 	if (pos + count > nvmem->size)
242 		count = nvmem->size - pos;
243 
244 	count = round_down(count, nvmem->word_size);
245 
246 	if (!nvmem->reg_read)
247 		return -EPERM;
248 
249 	rc = nvmem_reg_read(nvmem, pos, buf, count);
250 
251 	if (rc)
252 		return rc;
253 
254 	return count;
255 }
256 
bin_attr_nvmem_write(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t pos,size_t count)257 static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
258 				    struct bin_attribute *attr, char *buf,
259 				    loff_t pos, size_t count)
260 {
261 	struct device *dev;
262 	struct nvmem_device *nvmem;
263 	int rc;
264 
265 	if (attr->private)
266 		dev = attr->private;
267 	else
268 		dev = kobj_to_dev(kobj);
269 	nvmem = to_nvmem_device(dev);
270 
271 	/* Stop the user from writing */
272 	if (pos >= nvmem->size)
273 		return -EFBIG;
274 
275 	if (!IS_ALIGNED(pos, nvmem->stride))
276 		return -EINVAL;
277 
278 	if (count < nvmem->word_size)
279 		return -EINVAL;
280 
281 	if (pos + count > nvmem->size)
282 		count = nvmem->size - pos;
283 
284 	count = round_down(count, nvmem->word_size);
285 
286 	if (!nvmem->reg_write)
287 		return -EPERM;
288 
289 	rc = nvmem_reg_write(nvmem, pos, buf, count);
290 
291 	if (rc)
292 		return rc;
293 
294 	return count;
295 }
296 
nvmem_bin_attr_get_umode(struct nvmem_device * nvmem)297 static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem)
298 {
299 	umode_t mode = 0400;
300 
301 	if (!nvmem->root_only)
302 		mode |= 0044;
303 
304 	if (!nvmem->read_only)
305 		mode |= 0200;
306 
307 	if (!nvmem->reg_write)
308 		mode &= ~0200;
309 
310 	if (!nvmem->reg_read)
311 		mode &= ~0444;
312 
313 	return mode;
314 }
315 
nvmem_bin_attr_is_visible(struct kobject * kobj,struct bin_attribute * attr,int i)316 static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj,
317 					 struct bin_attribute *attr, int i)
318 {
319 	struct device *dev = kobj_to_dev(kobj);
320 	struct nvmem_device *nvmem = to_nvmem_device(dev);
321 
322 	attr->size = nvmem->size;
323 
324 	return nvmem_bin_attr_get_umode(nvmem);
325 }
326 
327 /* default read/write permissions */
328 static struct bin_attribute bin_attr_rw_nvmem = {
329 	.attr	= {
330 		.name	= "nvmem",
331 		.mode	= 0644,
332 	},
333 	.read	= bin_attr_nvmem_read,
334 	.write	= bin_attr_nvmem_write,
335 };
336 
337 static struct bin_attribute *nvmem_bin_attributes[] = {
338 	&bin_attr_rw_nvmem,
339 	NULL,
340 };
341 
342 static const struct attribute_group nvmem_bin_group = {
343 	.bin_attrs	= nvmem_bin_attributes,
344 	.attrs		= nvmem_attrs,
345 	.is_bin_visible = nvmem_bin_attr_is_visible,
346 };
347 
348 static const struct attribute_group *nvmem_dev_groups[] = {
349 	&nvmem_bin_group,
350 	NULL,
351 };
352 
353 static struct bin_attribute bin_attr_nvmem_eeprom_compat = {
354 	.attr	= {
355 		.name	= "eeprom",
356 	},
357 	.read	= bin_attr_nvmem_read,
358 	.write	= bin_attr_nvmem_write,
359 };
360 
361 /*
362  * nvmem_setup_compat() - Create an additional binary entry in
363  * drivers sys directory, to be backwards compatible with the older
364  * drivers/misc/eeprom drivers.
365  */
nvmem_sysfs_setup_compat(struct nvmem_device * nvmem,const struct nvmem_config * config)366 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
367 				    const struct nvmem_config *config)
368 {
369 	int rval;
370 
371 	if (!config->compat)
372 		return 0;
373 
374 	if (!config->base_dev)
375 		return -EINVAL;
376 
377 	if (config->type == NVMEM_TYPE_FRAM)
378 		bin_attr_nvmem_eeprom_compat.attr.name = "fram";
379 
380 	nvmem->eeprom = bin_attr_nvmem_eeprom_compat;
381 	nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem);
382 	nvmem->eeprom.size = nvmem->size;
383 #ifdef CONFIG_DEBUG_LOCK_ALLOC
384 	nvmem->eeprom.attr.key = &eeprom_lock_key;
385 #endif
386 	nvmem->eeprom.private = &nvmem->dev;
387 	nvmem->base_dev = config->base_dev;
388 
389 	rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom);
390 	if (rval) {
391 		dev_err(&nvmem->dev,
392 			"Failed to create eeprom binary file %d\n", rval);
393 		return rval;
394 	}
395 
396 	nvmem->flags |= FLAG_COMPAT;
397 
398 	return 0;
399 }
400 
nvmem_sysfs_remove_compat(struct nvmem_device * nvmem,const struct nvmem_config * config)401 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
402 			      const struct nvmem_config *config)
403 {
404 	if (config->compat)
405 		device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
406 }
407 
408 #else /* CONFIG_NVMEM_SYSFS */
409 
nvmem_sysfs_setup_compat(struct nvmem_device * nvmem,const struct nvmem_config * config)410 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
411 				    const struct nvmem_config *config)
412 {
413 	return -ENOSYS;
414 }
nvmem_sysfs_remove_compat(struct nvmem_device * nvmem,const struct nvmem_config * config)415 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
416 				      const struct nvmem_config *config)
417 {
418 }
419 
420 #endif /* CONFIG_NVMEM_SYSFS */
421 
nvmem_release(struct device * dev)422 static void nvmem_release(struct device *dev)
423 {
424 	struct nvmem_device *nvmem = to_nvmem_device(dev);
425 
426 	ida_free(&nvmem_ida, nvmem->id);
427 	gpiod_put(nvmem->wp_gpio);
428 	kfree(nvmem);
429 }
430 
431 static const struct device_type nvmem_provider_type = {
432 	.release	= nvmem_release,
433 };
434 
435 static struct bus_type nvmem_bus_type = {
436 	.name		= "nvmem",
437 };
438 
nvmem_cell_entry_drop(struct nvmem_cell_entry * cell)439 static void nvmem_cell_entry_drop(struct nvmem_cell_entry *cell)
440 {
441 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell);
442 	mutex_lock(&nvmem_mutex);
443 	list_del(&cell->node);
444 	mutex_unlock(&nvmem_mutex);
445 	of_node_put(cell->np);
446 	kfree_const(cell->name);
447 	kfree(cell);
448 }
449 
nvmem_device_remove_all_cells(const struct nvmem_device * nvmem)450 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
451 {
452 	struct nvmem_cell_entry *cell, *p;
453 
454 	list_for_each_entry_safe(cell, p, &nvmem->cells, node)
455 		nvmem_cell_entry_drop(cell);
456 }
457 
nvmem_cell_entry_add(struct nvmem_cell_entry * cell)458 static void nvmem_cell_entry_add(struct nvmem_cell_entry *cell)
459 {
460 	mutex_lock(&nvmem_mutex);
461 	list_add_tail(&cell->node, &cell->nvmem->cells);
462 	mutex_unlock(&nvmem_mutex);
463 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell);
464 }
465 
nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device * nvmem,const struct nvmem_cell_info * info,struct nvmem_cell_entry * cell)466 static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem,
467 						     const struct nvmem_cell_info *info,
468 						     struct nvmem_cell_entry *cell)
469 {
470 	cell->nvmem = nvmem;
471 	cell->offset = info->offset;
472 	cell->raw_len = info->raw_len ?: info->bytes;
473 	cell->bytes = info->bytes;
474 	cell->name = info->name;
475 	cell->read_post_process = info->read_post_process;
476 	cell->priv = info->priv;
477 
478 	cell->bit_offset = info->bit_offset;
479 	cell->nbits = info->nbits;
480 	cell->np = info->np;
481 
482 	if (cell->nbits)
483 		cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
484 					   BITS_PER_BYTE);
485 
486 	if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
487 		dev_err(&nvmem->dev,
488 			"cell %s unaligned to nvmem stride %d\n",
489 			cell->name ?: "<unknown>", nvmem->stride);
490 		return -EINVAL;
491 	}
492 
493 	return 0;
494 }
495 
nvmem_cell_info_to_nvmem_cell_entry(struct nvmem_device * nvmem,const struct nvmem_cell_info * info,struct nvmem_cell_entry * cell)496 static int nvmem_cell_info_to_nvmem_cell_entry(struct nvmem_device *nvmem,
497 					       const struct nvmem_cell_info *info,
498 					       struct nvmem_cell_entry *cell)
499 {
500 	int err;
501 
502 	err = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, cell);
503 	if (err)
504 		return err;
505 
506 	cell->name = kstrdup_const(info->name, GFP_KERNEL);
507 	if (!cell->name)
508 		return -ENOMEM;
509 
510 	return 0;
511 }
512 
513 /**
514  * nvmem_add_one_cell() - Add one cell information to an nvmem device
515  *
516  * @nvmem: nvmem device to add cells to.
517  * @info: nvmem cell info to add to the device
518  *
519  * Return: 0 or negative error code on failure.
520  */
nvmem_add_one_cell(struct nvmem_device * nvmem,const struct nvmem_cell_info * info)521 int nvmem_add_one_cell(struct nvmem_device *nvmem,
522 		       const struct nvmem_cell_info *info)
523 {
524 	struct nvmem_cell_entry *cell;
525 	int rval;
526 
527 	cell = kzalloc(sizeof(*cell), GFP_KERNEL);
528 	if (!cell)
529 		return -ENOMEM;
530 
531 	rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell);
532 	if (rval) {
533 		kfree(cell);
534 		return rval;
535 	}
536 
537 	nvmem_cell_entry_add(cell);
538 
539 	return 0;
540 }
541 EXPORT_SYMBOL_GPL(nvmem_add_one_cell);
542 
543 /**
544  * nvmem_add_cells() - Add cell information to an nvmem device
545  *
546  * @nvmem: nvmem device to add cells to.
547  * @info: nvmem cell info to add to the device
548  * @ncells: number of cells in info
549  *
550  * Return: 0 or negative error code on failure.
551  */
nvmem_add_cells(struct nvmem_device * nvmem,const struct nvmem_cell_info * info,int ncells)552 static int nvmem_add_cells(struct nvmem_device *nvmem,
553 		    const struct nvmem_cell_info *info,
554 		    int ncells)
555 {
556 	int i, rval;
557 
558 	for (i = 0; i < ncells; i++) {
559 		rval = nvmem_add_one_cell(nvmem, &info[i]);
560 		if (rval)
561 			return rval;
562 	}
563 
564 	return 0;
565 }
566 
567 /**
568  * nvmem_register_notifier() - Register a notifier block for nvmem events.
569  *
570  * @nb: notifier block to be called on nvmem events.
571  *
572  * Return: 0 on success, negative error number on failure.
573  */
nvmem_register_notifier(struct notifier_block * nb)574 int nvmem_register_notifier(struct notifier_block *nb)
575 {
576 	return blocking_notifier_chain_register(&nvmem_notifier, nb);
577 }
578 EXPORT_SYMBOL_GPL(nvmem_register_notifier);
579 
580 /**
581  * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events.
582  *
583  * @nb: notifier block to be unregistered.
584  *
585  * Return: 0 on success, negative error number on failure.
586  */
nvmem_unregister_notifier(struct notifier_block * nb)587 int nvmem_unregister_notifier(struct notifier_block *nb)
588 {
589 	return blocking_notifier_chain_unregister(&nvmem_notifier, nb);
590 }
591 EXPORT_SYMBOL_GPL(nvmem_unregister_notifier);
592 
nvmem_add_cells_from_table(struct nvmem_device * nvmem)593 static int nvmem_add_cells_from_table(struct nvmem_device *nvmem)
594 {
595 	const struct nvmem_cell_info *info;
596 	struct nvmem_cell_table *table;
597 	struct nvmem_cell_entry *cell;
598 	int rval = 0, i;
599 
600 	mutex_lock(&nvmem_cell_mutex);
601 	list_for_each_entry(table, &nvmem_cell_tables, node) {
602 		if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) {
603 			for (i = 0; i < table->ncells; i++) {
604 				info = &table->cells[i];
605 
606 				cell = kzalloc(sizeof(*cell), GFP_KERNEL);
607 				if (!cell) {
608 					rval = -ENOMEM;
609 					goto out;
610 				}
611 
612 				rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell);
613 				if (rval) {
614 					kfree(cell);
615 					goto out;
616 				}
617 
618 				nvmem_cell_entry_add(cell);
619 			}
620 		}
621 	}
622 
623 out:
624 	mutex_unlock(&nvmem_cell_mutex);
625 	return rval;
626 }
627 
628 static struct nvmem_cell_entry *
nvmem_find_cell_entry_by_name(struct nvmem_device * nvmem,const char * cell_id)629 nvmem_find_cell_entry_by_name(struct nvmem_device *nvmem, const char *cell_id)
630 {
631 	struct nvmem_cell_entry *iter, *cell = NULL;
632 
633 	mutex_lock(&nvmem_mutex);
634 	list_for_each_entry(iter, &nvmem->cells, node) {
635 		if (strcmp(cell_id, iter->name) == 0) {
636 			cell = iter;
637 			break;
638 		}
639 	}
640 	mutex_unlock(&nvmem_mutex);
641 
642 	return cell;
643 }
644 
nvmem_validate_keepouts(struct nvmem_device * nvmem)645 static int nvmem_validate_keepouts(struct nvmem_device *nvmem)
646 {
647 	unsigned int cur = 0;
648 	const struct nvmem_keepout *keepout = nvmem->keepout;
649 	const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout;
650 
651 	while (keepout < keepoutend) {
652 		/* Ensure keepouts are sorted and don't overlap. */
653 		if (keepout->start < cur) {
654 			dev_err(&nvmem->dev,
655 				"Keepout regions aren't sorted or overlap.\n");
656 
657 			return -ERANGE;
658 		}
659 
660 		if (keepout->end < keepout->start) {
661 			dev_err(&nvmem->dev,
662 				"Invalid keepout region.\n");
663 
664 			return -EINVAL;
665 		}
666 
667 		/*
668 		 * Validate keepouts (and holes between) don't violate
669 		 * word_size constraints.
670 		 */
671 		if ((keepout->end - keepout->start < nvmem->word_size) ||
672 		    ((keepout->start != cur) &&
673 		     (keepout->start - cur < nvmem->word_size))) {
674 
675 			dev_err(&nvmem->dev,
676 				"Keepout regions violate word_size constraints.\n");
677 
678 			return -ERANGE;
679 		}
680 
681 		/* Validate keepouts don't violate stride (alignment). */
682 		if (!IS_ALIGNED(keepout->start, nvmem->stride) ||
683 		    !IS_ALIGNED(keepout->end, nvmem->stride)) {
684 
685 			dev_err(&nvmem->dev,
686 				"Keepout regions violate stride.\n");
687 
688 			return -EINVAL;
689 		}
690 
691 		cur = keepout->end;
692 		keepout++;
693 	}
694 
695 	return 0;
696 }
697 
nvmem_add_cells_from_dt(struct nvmem_device * nvmem,struct device_node * np)698 static int nvmem_add_cells_from_dt(struct nvmem_device *nvmem, struct device_node *np)
699 {
700 	struct nvmem_layout *layout = nvmem->layout;
701 	struct device *dev = &nvmem->dev;
702 	struct device_node *child;
703 	const __be32 *addr;
704 	int len, ret;
705 
706 	for_each_child_of_node(np, child) {
707 		struct nvmem_cell_info info = {0};
708 
709 		addr = of_get_property(child, "reg", &len);
710 		if (!addr)
711 			continue;
712 		if (len < 2 * sizeof(u32)) {
713 			dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
714 			of_node_put(child);
715 			return -EINVAL;
716 		}
717 
718 		info.offset = be32_to_cpup(addr++);
719 		info.bytes = be32_to_cpup(addr);
720 		info.name = kasprintf(GFP_KERNEL, "%pOFn", child);
721 
722 		addr = of_get_property(child, "bits", &len);
723 		if (addr && len == (2 * sizeof(u32))) {
724 			info.bit_offset = be32_to_cpup(addr++);
725 			info.nbits = be32_to_cpup(addr);
726 		}
727 
728 		info.np = of_node_get(child);
729 
730 		if (layout && layout->fixup_cell_info)
731 			layout->fixup_cell_info(nvmem, layout, &info);
732 
733 		ret = nvmem_add_one_cell(nvmem, &info);
734 		kfree(info.name);
735 		if (ret) {
736 			of_node_put(child);
737 			return ret;
738 		}
739 	}
740 
741 	return 0;
742 }
743 
nvmem_add_cells_from_legacy_of(struct nvmem_device * nvmem)744 static int nvmem_add_cells_from_legacy_of(struct nvmem_device *nvmem)
745 {
746 	return nvmem_add_cells_from_dt(nvmem, nvmem->dev.of_node);
747 }
748 
nvmem_add_cells_from_fixed_layout(struct nvmem_device * nvmem)749 static int nvmem_add_cells_from_fixed_layout(struct nvmem_device *nvmem)
750 {
751 	struct device_node *layout_np;
752 	int err = 0;
753 
754 	layout_np = of_nvmem_layout_get_container(nvmem);
755 	if (!layout_np)
756 		return 0;
757 
758 	if (of_device_is_compatible(layout_np, "fixed-layout"))
759 		err = nvmem_add_cells_from_dt(nvmem, layout_np);
760 
761 	of_node_put(layout_np);
762 
763 	return err;
764 }
765 
__nvmem_layout_register(struct nvmem_layout * layout,struct module * owner)766 int __nvmem_layout_register(struct nvmem_layout *layout, struct module *owner)
767 {
768 	layout->owner = owner;
769 
770 	spin_lock(&nvmem_layout_lock);
771 	list_add(&layout->node, &nvmem_layouts);
772 	spin_unlock(&nvmem_layout_lock);
773 
774 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_LAYOUT_ADD, layout);
775 
776 	return 0;
777 }
778 EXPORT_SYMBOL_GPL(__nvmem_layout_register);
779 
nvmem_layout_unregister(struct nvmem_layout * layout)780 void nvmem_layout_unregister(struct nvmem_layout *layout)
781 {
782 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_LAYOUT_REMOVE, layout);
783 
784 	spin_lock(&nvmem_layout_lock);
785 	list_del(&layout->node);
786 	spin_unlock(&nvmem_layout_lock);
787 }
788 EXPORT_SYMBOL_GPL(nvmem_layout_unregister);
789 
nvmem_layout_get(struct nvmem_device * nvmem)790 static struct nvmem_layout *nvmem_layout_get(struct nvmem_device *nvmem)
791 {
792 	struct device_node *layout_np;
793 	struct nvmem_layout *l, *layout = ERR_PTR(-EPROBE_DEFER);
794 
795 	layout_np = of_nvmem_layout_get_container(nvmem);
796 	if (!layout_np)
797 		return NULL;
798 
799 	/* Fixed layouts don't have a matching driver */
800 	if (of_device_is_compatible(layout_np, "fixed-layout")) {
801 		of_node_put(layout_np);
802 		return NULL;
803 	}
804 
805 	/*
806 	 * In case the nvmem device was built-in while the layout was built as a
807 	 * module, we shall manually request the layout driver loading otherwise
808 	 * we'll never have any match.
809 	 */
810 	of_request_module(layout_np);
811 
812 	spin_lock(&nvmem_layout_lock);
813 
814 	list_for_each_entry(l, &nvmem_layouts, node) {
815 		if (of_match_node(l->of_match_table, layout_np)) {
816 			if (try_module_get(l->owner))
817 				layout = l;
818 
819 			break;
820 		}
821 	}
822 
823 	spin_unlock(&nvmem_layout_lock);
824 	of_node_put(layout_np);
825 
826 	return layout;
827 }
828 
nvmem_layout_put(struct nvmem_layout * layout)829 static void nvmem_layout_put(struct nvmem_layout *layout)
830 {
831 	if (layout)
832 		module_put(layout->owner);
833 }
834 
nvmem_add_cells_from_layout(struct nvmem_device * nvmem)835 static int nvmem_add_cells_from_layout(struct nvmem_device *nvmem)
836 {
837 	struct nvmem_layout *layout = nvmem->layout;
838 	int ret;
839 
840 	if (layout && layout->add_cells) {
841 		ret = layout->add_cells(&nvmem->dev, nvmem, layout);
842 		if (ret)
843 			return ret;
844 	}
845 
846 	return 0;
847 }
848 
849 #if IS_ENABLED(CONFIG_OF)
850 /**
851  * of_nvmem_layout_get_container() - Get OF node to layout container.
852  *
853  * @nvmem: nvmem device.
854  *
855  * Return: a node pointer with refcount incremented or NULL if no
856  * container exists. Use of_node_put() on it when done.
857  */
of_nvmem_layout_get_container(struct nvmem_device * nvmem)858 struct device_node *of_nvmem_layout_get_container(struct nvmem_device *nvmem)
859 {
860 	return of_get_child_by_name(nvmem->dev.of_node, "nvmem-layout");
861 }
862 EXPORT_SYMBOL_GPL(of_nvmem_layout_get_container);
863 #endif
864 
nvmem_layout_get_match_data(struct nvmem_device * nvmem,struct nvmem_layout * layout)865 const void *nvmem_layout_get_match_data(struct nvmem_device *nvmem,
866 					struct nvmem_layout *layout)
867 {
868 	struct device_node __maybe_unused *layout_np;
869 	const struct of_device_id *match;
870 
871 	layout_np = of_nvmem_layout_get_container(nvmem);
872 	match = of_match_node(layout->of_match_table, layout_np);
873 
874 	return match ? match->data : NULL;
875 }
876 EXPORT_SYMBOL_GPL(nvmem_layout_get_match_data);
877 
878 /**
879  * nvmem_register() - Register a nvmem device for given nvmem_config.
880  * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
881  *
882  * @config: nvmem device configuration with which nvmem device is created.
883  *
884  * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
885  * on success.
886  */
887 
nvmem_register(const struct nvmem_config * config)888 struct nvmem_device *nvmem_register(const struct nvmem_config *config)
889 {
890 	struct nvmem_device *nvmem;
891 	int rval;
892 
893 	if (!config->dev)
894 		return ERR_PTR(-EINVAL);
895 
896 	if (!config->reg_read && !config->reg_write)
897 		return ERR_PTR(-EINVAL);
898 
899 	nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL);
900 	if (!nvmem)
901 		return ERR_PTR(-ENOMEM);
902 
903 	rval = ida_alloc(&nvmem_ida, GFP_KERNEL);
904 	if (rval < 0) {
905 		kfree(nvmem);
906 		return ERR_PTR(rval);
907 	}
908 
909 	nvmem->id = rval;
910 
911 	nvmem->dev.type = &nvmem_provider_type;
912 	nvmem->dev.bus = &nvmem_bus_type;
913 	nvmem->dev.parent = config->dev;
914 
915 	device_initialize(&nvmem->dev);
916 
917 	if (!config->ignore_wp)
918 		nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp",
919 						    GPIOD_OUT_HIGH);
920 	if (IS_ERR(nvmem->wp_gpio)) {
921 		rval = PTR_ERR(nvmem->wp_gpio);
922 		nvmem->wp_gpio = NULL;
923 		goto err_put_device;
924 	}
925 
926 	kref_init(&nvmem->refcnt);
927 	INIT_LIST_HEAD(&nvmem->cells);
928 
929 	nvmem->owner = config->owner;
930 	if (!nvmem->owner && config->dev->driver)
931 		nvmem->owner = config->dev->driver->owner;
932 	nvmem->stride = config->stride ?: 1;
933 	nvmem->word_size = config->word_size ?: 1;
934 	nvmem->size = config->size;
935 	nvmem->root_only = config->root_only;
936 	nvmem->priv = config->priv;
937 	nvmem->type = config->type;
938 	nvmem->reg_read = config->reg_read;
939 	nvmem->reg_write = config->reg_write;
940 	nvmem->keepout = config->keepout;
941 	nvmem->nkeepout = config->nkeepout;
942 	if (config->of_node)
943 		nvmem->dev.of_node = config->of_node;
944 	else if (!config->no_of_node)
945 		nvmem->dev.of_node = config->dev->of_node;
946 
947 	switch (config->id) {
948 	case NVMEM_DEVID_NONE:
949 		rval = dev_set_name(&nvmem->dev, "%s", config->name);
950 		break;
951 	case NVMEM_DEVID_AUTO:
952 		rval = dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id);
953 		break;
954 	default:
955 		rval = dev_set_name(&nvmem->dev, "%s%d",
956 			     config->name ? : "nvmem",
957 			     config->name ? config->id : nvmem->id);
958 		break;
959 	}
960 
961 	if (rval)
962 		goto err_put_device;
963 
964 	nvmem->read_only = device_property_present(config->dev, "read-only") ||
965 			   config->read_only || !nvmem->reg_write;
966 
967 #ifdef CONFIG_NVMEM_SYSFS
968 	nvmem->dev.groups = nvmem_dev_groups;
969 #endif
970 
971 	if (nvmem->nkeepout) {
972 		rval = nvmem_validate_keepouts(nvmem);
973 		if (rval)
974 			goto err_put_device;
975 	}
976 
977 	if (config->compat) {
978 		rval = nvmem_sysfs_setup_compat(nvmem, config);
979 		if (rval)
980 			goto err_put_device;
981 	}
982 
983 	/*
984 	 * If the driver supplied a layout by config->layout, the module
985 	 * pointer will be NULL and nvmem_layout_put() will be a noop.
986 	 */
987 	nvmem->layout = config->layout ?: nvmem_layout_get(nvmem);
988 	if (IS_ERR(nvmem->layout)) {
989 		rval = PTR_ERR(nvmem->layout);
990 		nvmem->layout = NULL;
991 
992 		if (rval == -EPROBE_DEFER)
993 			goto err_teardown_compat;
994 	}
995 
996 	if (config->cells) {
997 		rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
998 		if (rval)
999 			goto err_remove_cells;
1000 	}
1001 
1002 	rval = nvmem_add_cells_from_table(nvmem);
1003 	if (rval)
1004 		goto err_remove_cells;
1005 
1006 	if (config->add_legacy_fixed_of_cells) {
1007 		rval = nvmem_add_cells_from_legacy_of(nvmem);
1008 		if (rval)
1009 			goto err_remove_cells;
1010 	}
1011 
1012 	rval = nvmem_add_cells_from_fixed_layout(nvmem);
1013 	if (rval)
1014 		goto err_remove_cells;
1015 
1016 	rval = nvmem_add_cells_from_layout(nvmem);
1017 	if (rval)
1018 		goto err_remove_cells;
1019 
1020 	dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
1021 
1022 	rval = device_add(&nvmem->dev);
1023 	if (rval)
1024 		goto err_remove_cells;
1025 
1026 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
1027 
1028 	return nvmem;
1029 
1030 err_remove_cells:
1031 	nvmem_device_remove_all_cells(nvmem);
1032 	nvmem_layout_put(nvmem->layout);
1033 err_teardown_compat:
1034 	if (config->compat)
1035 		nvmem_sysfs_remove_compat(nvmem, config);
1036 err_put_device:
1037 	put_device(&nvmem->dev);
1038 
1039 	return ERR_PTR(rval);
1040 }
1041 EXPORT_SYMBOL_GPL(nvmem_register);
1042 
nvmem_device_release(struct kref * kref)1043 static void nvmem_device_release(struct kref *kref)
1044 {
1045 	struct nvmem_device *nvmem;
1046 
1047 	nvmem = container_of(kref, struct nvmem_device, refcnt);
1048 
1049 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem);
1050 
1051 	if (nvmem->flags & FLAG_COMPAT)
1052 		device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
1053 
1054 	nvmem_device_remove_all_cells(nvmem);
1055 	nvmem_layout_put(nvmem->layout);
1056 	device_unregister(&nvmem->dev);
1057 }
1058 
1059 /**
1060  * nvmem_unregister() - Unregister previously registered nvmem device
1061  *
1062  * @nvmem: Pointer to previously registered nvmem device.
1063  */
nvmem_unregister(struct nvmem_device * nvmem)1064 void nvmem_unregister(struct nvmem_device *nvmem)
1065 {
1066 	if (nvmem)
1067 		kref_put(&nvmem->refcnt, nvmem_device_release);
1068 }
1069 EXPORT_SYMBOL_GPL(nvmem_unregister);
1070 
devm_nvmem_unregister(void * nvmem)1071 static void devm_nvmem_unregister(void *nvmem)
1072 {
1073 	nvmem_unregister(nvmem);
1074 }
1075 
1076 /**
1077  * devm_nvmem_register() - Register a managed nvmem device for given
1078  * nvmem_config.
1079  * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
1080  *
1081  * @dev: Device that uses the nvmem device.
1082  * @config: nvmem device configuration with which nvmem device is created.
1083  *
1084  * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
1085  * on success.
1086  */
devm_nvmem_register(struct device * dev,const struct nvmem_config * config)1087 struct nvmem_device *devm_nvmem_register(struct device *dev,
1088 					 const struct nvmem_config *config)
1089 {
1090 	struct nvmem_device *nvmem;
1091 	int ret;
1092 
1093 	nvmem = nvmem_register(config);
1094 	if (IS_ERR(nvmem))
1095 		return nvmem;
1096 
1097 	ret = devm_add_action_or_reset(dev, devm_nvmem_unregister, nvmem);
1098 	if (ret)
1099 		return ERR_PTR(ret);
1100 
1101 	return nvmem;
1102 }
1103 EXPORT_SYMBOL_GPL(devm_nvmem_register);
1104 
__nvmem_device_get(void * data,int (* match)(struct device * dev,const void * data))1105 static struct nvmem_device *__nvmem_device_get(void *data,
1106 			int (*match)(struct device *dev, const void *data))
1107 {
1108 	struct nvmem_device *nvmem = NULL;
1109 	struct device *dev;
1110 
1111 	mutex_lock(&nvmem_mutex);
1112 	dev = bus_find_device(&nvmem_bus_type, NULL, data, match);
1113 	if (dev)
1114 		nvmem = to_nvmem_device(dev);
1115 	mutex_unlock(&nvmem_mutex);
1116 	if (!nvmem)
1117 		return ERR_PTR(-EPROBE_DEFER);
1118 
1119 	if (!try_module_get(nvmem->owner)) {
1120 		dev_err(&nvmem->dev,
1121 			"could not increase module refcount for cell %s\n",
1122 			nvmem_dev_name(nvmem));
1123 
1124 		put_device(&nvmem->dev);
1125 		return ERR_PTR(-EINVAL);
1126 	}
1127 
1128 	kref_get(&nvmem->refcnt);
1129 
1130 	return nvmem;
1131 }
1132 
__nvmem_device_put(struct nvmem_device * nvmem)1133 static void __nvmem_device_put(struct nvmem_device *nvmem)
1134 {
1135 	put_device(&nvmem->dev);
1136 	module_put(nvmem->owner);
1137 	kref_put(&nvmem->refcnt, nvmem_device_release);
1138 }
1139 
1140 #if IS_ENABLED(CONFIG_OF)
1141 /**
1142  * of_nvmem_device_get() - Get nvmem device from a given id
1143  *
1144  * @np: Device tree node that uses the nvmem device.
1145  * @id: nvmem name from nvmem-names property.
1146  *
1147  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
1148  * on success.
1149  */
of_nvmem_device_get(struct device_node * np,const char * id)1150 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
1151 {
1152 
1153 	struct device_node *nvmem_np;
1154 	struct nvmem_device *nvmem;
1155 	int index = 0;
1156 
1157 	if (id)
1158 		index = of_property_match_string(np, "nvmem-names", id);
1159 
1160 	nvmem_np = of_parse_phandle(np, "nvmem", index);
1161 	if (!nvmem_np)
1162 		return ERR_PTR(-ENOENT);
1163 
1164 	nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
1165 	of_node_put(nvmem_np);
1166 	return nvmem;
1167 }
1168 EXPORT_SYMBOL_GPL(of_nvmem_device_get);
1169 #endif
1170 
1171 /**
1172  * nvmem_device_get() - Get nvmem device from a given id
1173  *
1174  * @dev: Device that uses the nvmem device.
1175  * @dev_name: name of the requested nvmem device.
1176  *
1177  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
1178  * on success.
1179  */
nvmem_device_get(struct device * dev,const char * dev_name)1180 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
1181 {
1182 	if (dev->of_node) { /* try dt first */
1183 		struct nvmem_device *nvmem;
1184 
1185 		nvmem = of_nvmem_device_get(dev->of_node, dev_name);
1186 
1187 		if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER)
1188 			return nvmem;
1189 
1190 	}
1191 
1192 	return __nvmem_device_get((void *)dev_name, device_match_name);
1193 }
1194 EXPORT_SYMBOL_GPL(nvmem_device_get);
1195 
1196 /**
1197  * nvmem_device_find() - Find nvmem device with matching function
1198  *
1199  * @data: Data to pass to match function
1200  * @match: Callback function to check device
1201  *
1202  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
1203  * on success.
1204  */
nvmem_device_find(void * data,int (* match)(struct device * dev,const void * data))1205 struct nvmem_device *nvmem_device_find(void *data,
1206 			int (*match)(struct device *dev, const void *data))
1207 {
1208 	return __nvmem_device_get(data, match);
1209 }
1210 EXPORT_SYMBOL_GPL(nvmem_device_find);
1211 
devm_nvmem_device_match(struct device * dev,void * res,void * data)1212 static int devm_nvmem_device_match(struct device *dev, void *res, void *data)
1213 {
1214 	struct nvmem_device **nvmem = res;
1215 
1216 	if (WARN_ON(!nvmem || !*nvmem))
1217 		return 0;
1218 
1219 	return *nvmem == data;
1220 }
1221 
devm_nvmem_device_release(struct device * dev,void * res)1222 static void devm_nvmem_device_release(struct device *dev, void *res)
1223 {
1224 	nvmem_device_put(*(struct nvmem_device **)res);
1225 }
1226 
1227 /**
1228  * devm_nvmem_device_put() - put alredy got nvmem device
1229  *
1230  * @dev: Device that uses the nvmem device.
1231  * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
1232  * that needs to be released.
1233  */
devm_nvmem_device_put(struct device * dev,struct nvmem_device * nvmem)1234 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem)
1235 {
1236 	int ret;
1237 
1238 	ret = devres_release(dev, devm_nvmem_device_release,
1239 			     devm_nvmem_device_match, nvmem);
1240 
1241 	WARN_ON(ret);
1242 }
1243 EXPORT_SYMBOL_GPL(devm_nvmem_device_put);
1244 
1245 /**
1246  * nvmem_device_put() - put alredy got nvmem device
1247  *
1248  * @nvmem: pointer to nvmem device that needs to be released.
1249  */
nvmem_device_put(struct nvmem_device * nvmem)1250 void nvmem_device_put(struct nvmem_device *nvmem)
1251 {
1252 	__nvmem_device_put(nvmem);
1253 }
1254 EXPORT_SYMBOL_GPL(nvmem_device_put);
1255 
1256 /**
1257  * devm_nvmem_device_get() - Get nvmem cell of device form a given id
1258  *
1259  * @dev: Device that requests the nvmem device.
1260  * @id: name id for the requested nvmem device.
1261  *
1262  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
1263  * on success.  The nvmem_cell will be freed by the automatically once the
1264  * device is freed.
1265  */
devm_nvmem_device_get(struct device * dev,const char * id)1266 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
1267 {
1268 	struct nvmem_device **ptr, *nvmem;
1269 
1270 	ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL);
1271 	if (!ptr)
1272 		return ERR_PTR(-ENOMEM);
1273 
1274 	nvmem = nvmem_device_get(dev, id);
1275 	if (!IS_ERR(nvmem)) {
1276 		*ptr = nvmem;
1277 		devres_add(dev, ptr);
1278 	} else {
1279 		devres_free(ptr);
1280 	}
1281 
1282 	return nvmem;
1283 }
1284 EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
1285 
nvmem_create_cell(struct nvmem_cell_entry * entry,const char * id,int index)1286 static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry,
1287 					    const char *id, int index)
1288 {
1289 	struct nvmem_cell *cell;
1290 	const char *name = NULL;
1291 
1292 	cell = kzalloc(sizeof(*cell), GFP_KERNEL);
1293 	if (!cell)
1294 		return ERR_PTR(-ENOMEM);
1295 
1296 	if (id) {
1297 		name = kstrdup_const(id, GFP_KERNEL);
1298 		if (!name) {
1299 			kfree(cell);
1300 			return ERR_PTR(-ENOMEM);
1301 		}
1302 	}
1303 
1304 	cell->id = name;
1305 	cell->entry = entry;
1306 	cell->index = index;
1307 
1308 	return cell;
1309 }
1310 
1311 static struct nvmem_cell *
nvmem_cell_get_from_lookup(struct device * dev,const char * con_id)1312 nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
1313 {
1314 	struct nvmem_cell_entry *cell_entry;
1315 	struct nvmem_cell *cell = ERR_PTR(-ENOENT);
1316 	struct nvmem_cell_lookup *lookup;
1317 	struct nvmem_device *nvmem;
1318 	const char *dev_id;
1319 
1320 	if (!dev)
1321 		return ERR_PTR(-EINVAL);
1322 
1323 	dev_id = dev_name(dev);
1324 
1325 	mutex_lock(&nvmem_lookup_mutex);
1326 
1327 	list_for_each_entry(lookup, &nvmem_lookup_list, node) {
1328 		if ((strcmp(lookup->dev_id, dev_id) == 0) &&
1329 		    (strcmp(lookup->con_id, con_id) == 0)) {
1330 			/* This is the right entry. */
1331 			nvmem = __nvmem_device_get((void *)lookup->nvmem_name,
1332 						   device_match_name);
1333 			if (IS_ERR(nvmem)) {
1334 				/* Provider may not be registered yet. */
1335 				cell = ERR_CAST(nvmem);
1336 				break;
1337 			}
1338 
1339 			cell_entry = nvmem_find_cell_entry_by_name(nvmem,
1340 								   lookup->cell_name);
1341 			if (!cell_entry) {
1342 				__nvmem_device_put(nvmem);
1343 				cell = ERR_PTR(-ENOENT);
1344 			} else {
1345 				cell = nvmem_create_cell(cell_entry, con_id, 0);
1346 				if (IS_ERR(cell))
1347 					__nvmem_device_put(nvmem);
1348 			}
1349 			break;
1350 		}
1351 	}
1352 
1353 	mutex_unlock(&nvmem_lookup_mutex);
1354 	return cell;
1355 }
1356 
1357 #if IS_ENABLED(CONFIG_OF)
1358 static struct nvmem_cell_entry *
nvmem_find_cell_entry_by_node(struct nvmem_device * nvmem,struct device_node * np)1359 nvmem_find_cell_entry_by_node(struct nvmem_device *nvmem, struct device_node *np)
1360 {
1361 	struct nvmem_cell_entry *iter, *cell = NULL;
1362 
1363 	mutex_lock(&nvmem_mutex);
1364 	list_for_each_entry(iter, &nvmem->cells, node) {
1365 		if (np == iter->np) {
1366 			cell = iter;
1367 			break;
1368 		}
1369 	}
1370 	mutex_unlock(&nvmem_mutex);
1371 
1372 	return cell;
1373 }
1374 
1375 /**
1376  * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
1377  *
1378  * @np: Device tree node that uses the nvmem cell.
1379  * @id: nvmem cell name from nvmem-cell-names property, or NULL
1380  *      for the cell at index 0 (the lone cell with no accompanying
1381  *      nvmem-cell-names property).
1382  *
1383  * Return: Will be an ERR_PTR() on error or a valid pointer
1384  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
1385  * nvmem_cell_put().
1386  */
of_nvmem_cell_get(struct device_node * np,const char * id)1387 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
1388 {
1389 	struct device_node *cell_np, *nvmem_np;
1390 	struct nvmem_device *nvmem;
1391 	struct nvmem_cell_entry *cell_entry;
1392 	struct nvmem_cell *cell;
1393 	struct of_phandle_args cell_spec;
1394 	int index = 0;
1395 	int cell_index = 0;
1396 	int ret;
1397 
1398 	/* if cell name exists, find index to the name */
1399 	if (id)
1400 		index = of_property_match_string(np, "nvmem-cell-names", id);
1401 
1402 	ret = of_parse_phandle_with_optional_args(np, "nvmem-cells",
1403 						  "#nvmem-cell-cells",
1404 						  index, &cell_spec);
1405 	if (ret)
1406 		return ERR_PTR(-ENOENT);
1407 
1408 	if (cell_spec.args_count > 1)
1409 		return ERR_PTR(-EINVAL);
1410 
1411 	cell_np = cell_spec.np;
1412 	if (cell_spec.args_count)
1413 		cell_index = cell_spec.args[0];
1414 
1415 	nvmem_np = of_get_parent(cell_np);
1416 	if (!nvmem_np) {
1417 		of_node_put(cell_np);
1418 		return ERR_PTR(-EINVAL);
1419 	}
1420 
1421 	/* nvmem layouts produce cells within the nvmem-layout container */
1422 	if (of_node_name_eq(nvmem_np, "nvmem-layout")) {
1423 		nvmem_np = of_get_next_parent(nvmem_np);
1424 		if (!nvmem_np) {
1425 			of_node_put(cell_np);
1426 			return ERR_PTR(-EINVAL);
1427 		}
1428 	}
1429 
1430 	nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
1431 	of_node_put(nvmem_np);
1432 	if (IS_ERR(nvmem)) {
1433 		of_node_put(cell_np);
1434 		return ERR_CAST(nvmem);
1435 	}
1436 
1437 	cell_entry = nvmem_find_cell_entry_by_node(nvmem, cell_np);
1438 	of_node_put(cell_np);
1439 	if (!cell_entry) {
1440 		__nvmem_device_put(nvmem);
1441 		return ERR_PTR(-ENOENT);
1442 	}
1443 
1444 	cell = nvmem_create_cell(cell_entry, id, cell_index);
1445 	if (IS_ERR(cell))
1446 		__nvmem_device_put(nvmem);
1447 
1448 	return cell;
1449 }
1450 EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
1451 #endif
1452 
1453 /**
1454  * nvmem_cell_get() - Get nvmem cell of device form a given cell name
1455  *
1456  * @dev: Device that requests the nvmem cell.
1457  * @id: nvmem cell name to get (this corresponds with the name from the
1458  *      nvmem-cell-names property for DT systems and with the con_id from
1459  *      the lookup entry for non-DT systems).
1460  *
1461  * Return: Will be an ERR_PTR() on error or a valid pointer
1462  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
1463  * nvmem_cell_put().
1464  */
nvmem_cell_get(struct device * dev,const char * id)1465 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id)
1466 {
1467 	struct nvmem_cell *cell;
1468 
1469 	if (dev->of_node) { /* try dt first */
1470 		cell = of_nvmem_cell_get(dev->of_node, id);
1471 		if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER)
1472 			return cell;
1473 	}
1474 
1475 	/* NULL cell id only allowed for device tree; invalid otherwise */
1476 	if (!id)
1477 		return ERR_PTR(-EINVAL);
1478 
1479 	return nvmem_cell_get_from_lookup(dev, id);
1480 }
1481 EXPORT_SYMBOL_GPL(nvmem_cell_get);
1482 
devm_nvmem_cell_release(struct device * dev,void * res)1483 static void devm_nvmem_cell_release(struct device *dev, void *res)
1484 {
1485 	nvmem_cell_put(*(struct nvmem_cell **)res);
1486 }
1487 
1488 /**
1489  * devm_nvmem_cell_get() - Get nvmem cell of device form a given id
1490  *
1491  * @dev: Device that requests the nvmem cell.
1492  * @id: nvmem cell name id to get.
1493  *
1494  * Return: Will be an ERR_PTR() on error or a valid pointer
1495  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
1496  * automatically once the device is freed.
1497  */
devm_nvmem_cell_get(struct device * dev,const char * id)1498 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id)
1499 {
1500 	struct nvmem_cell **ptr, *cell;
1501 
1502 	ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL);
1503 	if (!ptr)
1504 		return ERR_PTR(-ENOMEM);
1505 
1506 	cell = nvmem_cell_get(dev, id);
1507 	if (!IS_ERR(cell)) {
1508 		*ptr = cell;
1509 		devres_add(dev, ptr);
1510 	} else {
1511 		devres_free(ptr);
1512 	}
1513 
1514 	return cell;
1515 }
1516 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get);
1517 
devm_nvmem_cell_match(struct device * dev,void * res,void * data)1518 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
1519 {
1520 	struct nvmem_cell **c = res;
1521 
1522 	if (WARN_ON(!c || !*c))
1523 		return 0;
1524 
1525 	return *c == data;
1526 }
1527 
1528 /**
1529  * devm_nvmem_cell_put() - Release previously allocated nvmem cell
1530  * from devm_nvmem_cell_get.
1531  *
1532  * @dev: Device that requests the nvmem cell.
1533  * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get().
1534  */
devm_nvmem_cell_put(struct device * dev,struct nvmem_cell * cell)1535 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
1536 {
1537 	int ret;
1538 
1539 	ret = devres_release(dev, devm_nvmem_cell_release,
1540 				devm_nvmem_cell_match, cell);
1541 
1542 	WARN_ON(ret);
1543 }
1544 EXPORT_SYMBOL(devm_nvmem_cell_put);
1545 
1546 /**
1547  * nvmem_cell_put() - Release previously allocated nvmem cell.
1548  *
1549  * @cell: Previously allocated nvmem cell by nvmem_cell_get().
1550  */
nvmem_cell_put(struct nvmem_cell * cell)1551 void nvmem_cell_put(struct nvmem_cell *cell)
1552 {
1553 	struct nvmem_device *nvmem = cell->entry->nvmem;
1554 
1555 	if (cell->id)
1556 		kfree_const(cell->id);
1557 
1558 	kfree(cell);
1559 	__nvmem_device_put(nvmem);
1560 }
1561 EXPORT_SYMBOL_GPL(nvmem_cell_put);
1562 
nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry * cell,void * buf)1563 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry *cell, void *buf)
1564 {
1565 	u8 *p, *b;
1566 	int i, extra, bit_offset = cell->bit_offset;
1567 
1568 	p = b = buf;
1569 	if (bit_offset) {
1570 		/* First shift */
1571 		*b++ >>= bit_offset;
1572 
1573 		/* setup rest of the bytes if any */
1574 		for (i = 1; i < cell->bytes; i++) {
1575 			/* Get bits from next byte and shift them towards msb */
1576 			*p |= *b << (BITS_PER_BYTE - bit_offset);
1577 
1578 			p = b;
1579 			*b++ >>= bit_offset;
1580 		}
1581 	} else {
1582 		/* point to the msb */
1583 		p += cell->bytes - 1;
1584 	}
1585 
1586 	/* result fits in less bytes */
1587 	extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE);
1588 	while (--extra >= 0)
1589 		*p-- = 0;
1590 
1591 	/* clear msb bits if any leftover in the last byte */
1592 	if (cell->nbits % BITS_PER_BYTE)
1593 		*p &= GENMASK((cell->nbits % BITS_PER_BYTE) - 1, 0);
1594 }
1595 
__nvmem_cell_read(struct nvmem_device * nvmem,struct nvmem_cell_entry * cell,void * buf,size_t * len,const char * id,int index)1596 static int __nvmem_cell_read(struct nvmem_device *nvmem,
1597 			     struct nvmem_cell_entry *cell,
1598 			     void *buf, size_t *len, const char *id, int index)
1599 {
1600 	int rc;
1601 
1602 	rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->raw_len);
1603 
1604 	if (rc)
1605 		return rc;
1606 
1607 	/* shift bits in-place */
1608 	if (cell->bit_offset || cell->nbits)
1609 		nvmem_shift_read_buffer_in_place(cell, buf);
1610 
1611 	if (cell->read_post_process) {
1612 		rc = cell->read_post_process(cell->priv, id, index,
1613 					     cell->offset, buf, cell->raw_len);
1614 		if (rc)
1615 			return rc;
1616 	}
1617 
1618 	if (len)
1619 		*len = cell->bytes;
1620 
1621 	return 0;
1622 }
1623 
1624 /**
1625  * nvmem_cell_read() - Read a given nvmem cell
1626  *
1627  * @cell: nvmem cell to be read.
1628  * @len: pointer to length of cell which will be populated on successful read;
1629  *	 can be NULL.
1630  *
1631  * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
1632  * buffer should be freed by the consumer with a kfree().
1633  */
nvmem_cell_read(struct nvmem_cell * cell,size_t * len)1634 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
1635 {
1636 	struct nvmem_cell_entry *entry = cell->entry;
1637 	struct nvmem_device *nvmem = entry->nvmem;
1638 	u8 *buf;
1639 	int rc;
1640 
1641 	if (!nvmem)
1642 		return ERR_PTR(-EINVAL);
1643 
1644 	buf = kzalloc(max_t(size_t, entry->raw_len, entry->bytes), GFP_KERNEL);
1645 	if (!buf)
1646 		return ERR_PTR(-ENOMEM);
1647 
1648 	rc = __nvmem_cell_read(nvmem, cell->entry, buf, len, cell->id, cell->index);
1649 	if (rc) {
1650 		kfree(buf);
1651 		return ERR_PTR(rc);
1652 	}
1653 
1654 	return buf;
1655 }
1656 EXPORT_SYMBOL_GPL(nvmem_cell_read);
1657 
nvmem_cell_prepare_write_buffer(struct nvmem_cell_entry * cell,u8 * _buf,int len)1658 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell_entry *cell,
1659 					     u8 *_buf, int len)
1660 {
1661 	struct nvmem_device *nvmem = cell->nvmem;
1662 	int i, rc, nbits, bit_offset = cell->bit_offset;
1663 	u8 v, *p, *buf, *b, pbyte, pbits;
1664 
1665 	nbits = cell->nbits;
1666 	buf = kzalloc(cell->bytes, GFP_KERNEL);
1667 	if (!buf)
1668 		return ERR_PTR(-ENOMEM);
1669 
1670 	memcpy(buf, _buf, len);
1671 	p = b = buf;
1672 
1673 	if (bit_offset) {
1674 		pbyte = *b;
1675 		*b <<= bit_offset;
1676 
1677 		/* setup the first byte with lsb bits from nvmem */
1678 		rc = nvmem_reg_read(nvmem, cell->offset, &v, 1);
1679 		if (rc)
1680 			goto err;
1681 		*b++ |= GENMASK(bit_offset - 1, 0) & v;
1682 
1683 		/* setup rest of the byte if any */
1684 		for (i = 1; i < cell->bytes; i++) {
1685 			/* Get last byte bits and shift them towards lsb */
1686 			pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset);
1687 			pbyte = *b;
1688 			p = b;
1689 			*b <<= bit_offset;
1690 			*b++ |= pbits;
1691 		}
1692 	}
1693 
1694 	/* if it's not end on byte boundary */
1695 	if ((nbits + bit_offset) % BITS_PER_BYTE) {
1696 		/* setup the last byte with msb bits from nvmem */
1697 		rc = nvmem_reg_read(nvmem,
1698 				    cell->offset + cell->bytes - 1, &v, 1);
1699 		if (rc)
1700 			goto err;
1701 		*p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
1702 
1703 	}
1704 
1705 	return buf;
1706 err:
1707 	kfree(buf);
1708 	return ERR_PTR(rc);
1709 }
1710 
__nvmem_cell_entry_write(struct nvmem_cell_entry * cell,void * buf,size_t len)1711 static int __nvmem_cell_entry_write(struct nvmem_cell_entry *cell, void *buf, size_t len)
1712 {
1713 	struct nvmem_device *nvmem = cell->nvmem;
1714 	int rc;
1715 
1716 	if (!nvmem || nvmem->read_only ||
1717 	    (cell->bit_offset == 0 && len != cell->bytes))
1718 		return -EINVAL;
1719 
1720 	/*
1721 	 * Any cells which have a read_post_process hook are read-only because
1722 	 * we cannot reverse the operation and it might affect other cells,
1723 	 * too.
1724 	 */
1725 	if (cell->read_post_process)
1726 		return -EINVAL;
1727 
1728 	if (cell->bit_offset || cell->nbits) {
1729 		buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
1730 		if (IS_ERR(buf))
1731 			return PTR_ERR(buf);
1732 	}
1733 
1734 	rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes);
1735 
1736 	/* free the tmp buffer */
1737 	if (cell->bit_offset || cell->nbits)
1738 		kfree(buf);
1739 
1740 	if (rc)
1741 		return rc;
1742 
1743 	return len;
1744 }
1745 
1746 /**
1747  * nvmem_cell_write() - Write to a given nvmem cell
1748  *
1749  * @cell: nvmem cell to be written.
1750  * @buf: Buffer to be written.
1751  * @len: length of buffer to be written to nvmem cell.
1752  *
1753  * Return: length of bytes written or negative on failure.
1754  */
nvmem_cell_write(struct nvmem_cell * cell,void * buf,size_t len)1755 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
1756 {
1757 	return __nvmem_cell_entry_write(cell->entry, buf, len);
1758 }
1759 
1760 EXPORT_SYMBOL_GPL(nvmem_cell_write);
1761 
nvmem_cell_read_common(struct device * dev,const char * cell_id,void * val,size_t count)1762 static int nvmem_cell_read_common(struct device *dev, const char *cell_id,
1763 				  void *val, size_t count)
1764 {
1765 	struct nvmem_cell *cell;
1766 	void *buf;
1767 	size_t len;
1768 
1769 	cell = nvmem_cell_get(dev, cell_id);
1770 	if (IS_ERR(cell))
1771 		return PTR_ERR(cell);
1772 
1773 	buf = nvmem_cell_read(cell, &len);
1774 	if (IS_ERR(buf)) {
1775 		nvmem_cell_put(cell);
1776 		return PTR_ERR(buf);
1777 	}
1778 	if (len != count) {
1779 		kfree(buf);
1780 		nvmem_cell_put(cell);
1781 		return -EINVAL;
1782 	}
1783 	memcpy(val, buf, count);
1784 	kfree(buf);
1785 	nvmem_cell_put(cell);
1786 
1787 	return 0;
1788 }
1789 
1790 /**
1791  * nvmem_cell_read_u8() - Read a cell value as a u8
1792  *
1793  * @dev: Device that requests the nvmem cell.
1794  * @cell_id: Name of nvmem cell to read.
1795  * @val: pointer to output value.
1796  *
1797  * Return: 0 on success or negative errno.
1798  */
nvmem_cell_read_u8(struct device * dev,const char * cell_id,u8 * val)1799 int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val)
1800 {
1801 	return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1802 }
1803 EXPORT_SYMBOL_GPL(nvmem_cell_read_u8);
1804 
1805 /**
1806  * nvmem_cell_read_u16() - Read a cell value as a u16
1807  *
1808  * @dev: Device that requests the nvmem cell.
1809  * @cell_id: Name of nvmem cell to read.
1810  * @val: pointer to output value.
1811  *
1812  * Return: 0 on success or negative errno.
1813  */
nvmem_cell_read_u16(struct device * dev,const char * cell_id,u16 * val)1814 int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val)
1815 {
1816 	return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1817 }
1818 EXPORT_SYMBOL_GPL(nvmem_cell_read_u16);
1819 
1820 /**
1821  * nvmem_cell_read_u32() - Read a cell value as a u32
1822  *
1823  * @dev: Device that requests the nvmem cell.
1824  * @cell_id: Name of nvmem cell to read.
1825  * @val: pointer to output value.
1826  *
1827  * Return: 0 on success or negative errno.
1828  */
nvmem_cell_read_u32(struct device * dev,const char * cell_id,u32 * val)1829 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val)
1830 {
1831 	return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1832 }
1833 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32);
1834 
1835 /**
1836  * nvmem_cell_read_u64() - Read a cell value as a u64
1837  *
1838  * @dev: Device that requests the nvmem cell.
1839  * @cell_id: Name of nvmem cell to read.
1840  * @val: pointer to output value.
1841  *
1842  * Return: 0 on success or negative errno.
1843  */
nvmem_cell_read_u64(struct device * dev,const char * cell_id,u64 * val)1844 int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val)
1845 {
1846 	return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1847 }
1848 EXPORT_SYMBOL_GPL(nvmem_cell_read_u64);
1849 
nvmem_cell_read_variable_common(struct device * dev,const char * cell_id,size_t max_len,size_t * len)1850 static const void *nvmem_cell_read_variable_common(struct device *dev,
1851 						   const char *cell_id,
1852 						   size_t max_len, size_t *len)
1853 {
1854 	struct nvmem_cell *cell;
1855 	int nbits;
1856 	void *buf;
1857 
1858 	cell = nvmem_cell_get(dev, cell_id);
1859 	if (IS_ERR(cell))
1860 		return cell;
1861 
1862 	nbits = cell->entry->nbits;
1863 	buf = nvmem_cell_read(cell, len);
1864 	nvmem_cell_put(cell);
1865 	if (IS_ERR(buf))
1866 		return buf;
1867 
1868 	/*
1869 	 * If nbits is set then nvmem_cell_read() can significantly exaggerate
1870 	 * the length of the real data. Throw away the extra junk.
1871 	 */
1872 	if (nbits)
1873 		*len = DIV_ROUND_UP(nbits, 8);
1874 
1875 	if (*len > max_len) {
1876 		kfree(buf);
1877 		return ERR_PTR(-ERANGE);
1878 	}
1879 
1880 	return buf;
1881 }
1882 
1883 /**
1884  * nvmem_cell_read_variable_le_u32() - Read up to 32-bits of data as a little endian number.
1885  *
1886  * @dev: Device that requests the nvmem cell.
1887  * @cell_id: Name of nvmem cell to read.
1888  * @val: pointer to output value.
1889  *
1890  * Return: 0 on success or negative errno.
1891  */
nvmem_cell_read_variable_le_u32(struct device * dev,const char * cell_id,u32 * val)1892 int nvmem_cell_read_variable_le_u32(struct device *dev, const char *cell_id,
1893 				    u32 *val)
1894 {
1895 	size_t len;
1896 	const u8 *buf;
1897 	int i;
1898 
1899 	buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len);
1900 	if (IS_ERR(buf))
1901 		return PTR_ERR(buf);
1902 
1903 	/* Copy w/ implicit endian conversion */
1904 	*val = 0;
1905 	for (i = 0; i < len; i++)
1906 		*val |= buf[i] << (8 * i);
1907 
1908 	kfree(buf);
1909 
1910 	return 0;
1911 }
1912 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u32);
1913 
1914 /**
1915  * nvmem_cell_read_variable_le_u64() - Read up to 64-bits of data as a little endian number.
1916  *
1917  * @dev: Device that requests the nvmem cell.
1918  * @cell_id: Name of nvmem cell to read.
1919  * @val: pointer to output value.
1920  *
1921  * Return: 0 on success or negative errno.
1922  */
nvmem_cell_read_variable_le_u64(struct device * dev,const char * cell_id,u64 * val)1923 int nvmem_cell_read_variable_le_u64(struct device *dev, const char *cell_id,
1924 				    u64 *val)
1925 {
1926 	size_t len;
1927 	const u8 *buf;
1928 	int i;
1929 
1930 	buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len);
1931 	if (IS_ERR(buf))
1932 		return PTR_ERR(buf);
1933 
1934 	/* Copy w/ implicit endian conversion */
1935 	*val = 0;
1936 	for (i = 0; i < len; i++)
1937 		*val |= (uint64_t)buf[i] << (8 * i);
1938 
1939 	kfree(buf);
1940 
1941 	return 0;
1942 }
1943 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u64);
1944 
1945 /**
1946  * nvmem_device_cell_read() - Read a given nvmem device and cell
1947  *
1948  * @nvmem: nvmem device to read from.
1949  * @info: nvmem cell info to be read.
1950  * @buf: buffer pointer which will be populated on successful read.
1951  *
1952  * Return: length of successful bytes read on success and negative
1953  * error code on error.
1954  */
nvmem_device_cell_read(struct nvmem_device * nvmem,struct nvmem_cell_info * info,void * buf)1955 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
1956 			   struct nvmem_cell_info *info, void *buf)
1957 {
1958 	struct nvmem_cell_entry cell;
1959 	int rc;
1960 	ssize_t len;
1961 
1962 	if (!nvmem)
1963 		return -EINVAL;
1964 
1965 	rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell);
1966 	if (rc)
1967 		return rc;
1968 
1969 	rc = __nvmem_cell_read(nvmem, &cell, buf, &len, NULL, 0);
1970 	if (rc)
1971 		return rc;
1972 
1973 	return len;
1974 }
1975 EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
1976 
1977 /**
1978  * nvmem_device_cell_write() - Write cell to a given nvmem device
1979  *
1980  * @nvmem: nvmem device to be written to.
1981  * @info: nvmem cell info to be written.
1982  * @buf: buffer to be written to cell.
1983  *
1984  * Return: length of bytes written or negative error code on failure.
1985  */
nvmem_device_cell_write(struct nvmem_device * nvmem,struct nvmem_cell_info * info,void * buf)1986 int nvmem_device_cell_write(struct nvmem_device *nvmem,
1987 			    struct nvmem_cell_info *info, void *buf)
1988 {
1989 	struct nvmem_cell_entry cell;
1990 	int rc;
1991 
1992 	if (!nvmem)
1993 		return -EINVAL;
1994 
1995 	rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell);
1996 	if (rc)
1997 		return rc;
1998 
1999 	return __nvmem_cell_entry_write(&cell, buf, cell.bytes);
2000 }
2001 EXPORT_SYMBOL_GPL(nvmem_device_cell_write);
2002 
2003 /**
2004  * nvmem_device_read() - Read from a given nvmem device
2005  *
2006  * @nvmem: nvmem device to read from.
2007  * @offset: offset in nvmem device.
2008  * @bytes: number of bytes to read.
2009  * @buf: buffer pointer which will be populated on successful read.
2010  *
2011  * Return: length of successful bytes read on success and negative
2012  * error code on error.
2013  */
nvmem_device_read(struct nvmem_device * nvmem,unsigned int offset,size_t bytes,void * buf)2014 int nvmem_device_read(struct nvmem_device *nvmem,
2015 		      unsigned int offset,
2016 		      size_t bytes, void *buf)
2017 {
2018 	int rc;
2019 
2020 	if (!nvmem)
2021 		return -EINVAL;
2022 
2023 	rc = nvmem_reg_read(nvmem, offset, buf, bytes);
2024 
2025 	if (rc)
2026 		return rc;
2027 
2028 	return bytes;
2029 }
2030 EXPORT_SYMBOL_GPL(nvmem_device_read);
2031 
2032 /**
2033  * nvmem_device_write() - Write cell to a given nvmem device
2034  *
2035  * @nvmem: nvmem device to be written to.
2036  * @offset: offset in nvmem device.
2037  * @bytes: number of bytes to write.
2038  * @buf: buffer to be written.
2039  *
2040  * Return: length of bytes written or negative error code on failure.
2041  */
nvmem_device_write(struct nvmem_device * nvmem,unsigned int offset,size_t bytes,void * buf)2042 int nvmem_device_write(struct nvmem_device *nvmem,
2043 		       unsigned int offset,
2044 		       size_t bytes, void *buf)
2045 {
2046 	int rc;
2047 
2048 	if (!nvmem)
2049 		return -EINVAL;
2050 
2051 	rc = nvmem_reg_write(nvmem, offset, buf, bytes);
2052 
2053 	if (rc)
2054 		return rc;
2055 
2056 
2057 	return bytes;
2058 }
2059 EXPORT_SYMBOL_GPL(nvmem_device_write);
2060 
2061 /**
2062  * nvmem_add_cell_table() - register a table of cell info entries
2063  *
2064  * @table: table of cell info entries
2065  */
nvmem_add_cell_table(struct nvmem_cell_table * table)2066 void nvmem_add_cell_table(struct nvmem_cell_table *table)
2067 {
2068 	mutex_lock(&nvmem_cell_mutex);
2069 	list_add_tail(&table->node, &nvmem_cell_tables);
2070 	mutex_unlock(&nvmem_cell_mutex);
2071 }
2072 EXPORT_SYMBOL_GPL(nvmem_add_cell_table);
2073 
2074 /**
2075  * nvmem_del_cell_table() - remove a previously registered cell info table
2076  *
2077  * @table: table of cell info entries
2078  */
nvmem_del_cell_table(struct nvmem_cell_table * table)2079 void nvmem_del_cell_table(struct nvmem_cell_table *table)
2080 {
2081 	mutex_lock(&nvmem_cell_mutex);
2082 	list_del(&table->node);
2083 	mutex_unlock(&nvmem_cell_mutex);
2084 }
2085 EXPORT_SYMBOL_GPL(nvmem_del_cell_table);
2086 
2087 /**
2088  * nvmem_add_cell_lookups() - register a list of cell lookup entries
2089  *
2090  * @entries: array of cell lookup entries
2091  * @nentries: number of cell lookup entries in the array
2092  */
nvmem_add_cell_lookups(struct nvmem_cell_lookup * entries,size_t nentries)2093 void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
2094 {
2095 	int i;
2096 
2097 	mutex_lock(&nvmem_lookup_mutex);
2098 	for (i = 0; i < nentries; i++)
2099 		list_add_tail(&entries[i].node, &nvmem_lookup_list);
2100 	mutex_unlock(&nvmem_lookup_mutex);
2101 }
2102 EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups);
2103 
2104 /**
2105  * nvmem_del_cell_lookups() - remove a list of previously added cell lookup
2106  *                            entries
2107  *
2108  * @entries: array of cell lookup entries
2109  * @nentries: number of cell lookup entries in the array
2110  */
nvmem_del_cell_lookups(struct nvmem_cell_lookup * entries,size_t nentries)2111 void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
2112 {
2113 	int i;
2114 
2115 	mutex_lock(&nvmem_lookup_mutex);
2116 	for (i = 0; i < nentries; i++)
2117 		list_del(&entries[i].node);
2118 	mutex_unlock(&nvmem_lookup_mutex);
2119 }
2120 EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups);
2121 
2122 /**
2123  * nvmem_dev_name() - Get the name of a given nvmem device.
2124  *
2125  * @nvmem: nvmem device.
2126  *
2127  * Return: name of the nvmem device.
2128  */
nvmem_dev_name(struct nvmem_device * nvmem)2129 const char *nvmem_dev_name(struct nvmem_device *nvmem)
2130 {
2131 	return dev_name(&nvmem->dev);
2132 }
2133 EXPORT_SYMBOL_GPL(nvmem_dev_name);
2134 
nvmem_init(void)2135 static int __init nvmem_init(void)
2136 {
2137 	return bus_register(&nvmem_bus_type);
2138 }
2139 
nvmem_exit(void)2140 static void __exit nvmem_exit(void)
2141 {
2142 	bus_unregister(&nvmem_bus_type);
2143 }
2144 
2145 subsys_initcall(nvmem_init);
2146 module_exit(nvmem_exit);
2147 
2148 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
2149 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
2150 MODULE_DESCRIPTION("nvmem Driver Core");
2151