xref: /openbmc/linux/drivers/base/devres.c (revision 6d99a79c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/devres.c - device resource management
4  *
5  * Copyright (c) 2006  SUSE Linux Products GmbH
6  * Copyright (c) 2006  Tejun Heo <teheo@suse.de>
7  */
8 
9 #include <linux/device.h>
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/percpu.h>
13 
14 #include <asm/sections.h>
15 
16 #include "base.h"
17 
18 struct devres_node {
19 	struct list_head		entry;
20 	dr_release_t			release;
21 #ifdef CONFIG_DEBUG_DEVRES
22 	const char			*name;
23 	size_t				size;
24 #endif
25 };
26 
27 struct devres {
28 	struct devres_node		node;
29 	/* -- 3 pointers */
30 	unsigned long long		data[];	/* guarantee ull alignment */
31 };
32 
33 struct devres_group {
34 	struct devres_node		node[2];
35 	void				*id;
36 	int				color;
37 	/* -- 8 pointers */
38 };
39 
40 #ifdef CONFIG_DEBUG_DEVRES
41 static int log_devres = 0;
42 module_param_named(log, log_devres, int, S_IRUGO | S_IWUSR);
43 
44 static void set_node_dbginfo(struct devres_node *node, const char *name,
45 			     size_t size)
46 {
47 	node->name = name;
48 	node->size = size;
49 }
50 
51 static void devres_log(struct device *dev, struct devres_node *node,
52 		       const char *op)
53 {
54 	if (unlikely(log_devres))
55 		dev_err(dev, "DEVRES %3s %p %s (%lu bytes)\n",
56 			op, node, node->name, (unsigned long)node->size);
57 }
58 #else /* CONFIG_DEBUG_DEVRES */
59 #define set_node_dbginfo(node, n, s)	do {} while (0)
60 #define devres_log(dev, node, op)	do {} while (0)
61 #endif /* CONFIG_DEBUG_DEVRES */
62 
63 /*
64  * Release functions for devres group.  These callbacks are used only
65  * for identification.
66  */
67 static void group_open_release(struct device *dev, void *res)
68 {
69 	/* noop */
70 }
71 
72 static void group_close_release(struct device *dev, void *res)
73 {
74 	/* noop */
75 }
76 
77 static struct devres_group * node_to_group(struct devres_node *node)
78 {
79 	if (node->release == &group_open_release)
80 		return container_of(node, struct devres_group, node[0]);
81 	if (node->release == &group_close_release)
82 		return container_of(node, struct devres_group, node[1]);
83 	return NULL;
84 }
85 
86 static __always_inline struct devres * alloc_dr(dr_release_t release,
87 						size_t size, gfp_t gfp, int nid)
88 {
89 	size_t tot_size;
90 	struct devres *dr;
91 
92 	/* We must catch any near-SIZE_MAX cases that could overflow. */
93 	if (unlikely(check_add_overflow(sizeof(struct devres), size,
94 					&tot_size)))
95 		return NULL;
96 
97 	dr = kmalloc_node_track_caller(tot_size, gfp, nid);
98 	if (unlikely(!dr))
99 		return NULL;
100 
101 	memset(dr, 0, offsetof(struct devres, data));
102 
103 	INIT_LIST_HEAD(&dr->node.entry);
104 	dr->node.release = release;
105 	return dr;
106 }
107 
108 static void add_dr(struct device *dev, struct devres_node *node)
109 {
110 	devres_log(dev, node, "ADD");
111 	BUG_ON(!list_empty(&node->entry));
112 	list_add_tail(&node->entry, &dev->devres_head);
113 }
114 
115 #ifdef CONFIG_DEBUG_DEVRES
116 void * __devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid,
117 		      const char *name)
118 {
119 	struct devres *dr;
120 
121 	dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid);
122 	if (unlikely(!dr))
123 		return NULL;
124 	set_node_dbginfo(&dr->node, name, size);
125 	return dr->data;
126 }
127 EXPORT_SYMBOL_GPL(__devres_alloc_node);
128 #else
129 /**
130  * devres_alloc - Allocate device resource data
131  * @release: Release function devres will be associated with
132  * @size: Allocation size
133  * @gfp: Allocation flags
134  * @nid: NUMA node
135  *
136  * Allocate devres of @size bytes.  The allocated area is zeroed, then
137  * associated with @release.  The returned pointer can be passed to
138  * other devres_*() functions.
139  *
140  * RETURNS:
141  * Pointer to allocated devres on success, NULL on failure.
142  */
143 void * devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid)
144 {
145 	struct devres *dr;
146 
147 	dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid);
148 	if (unlikely(!dr))
149 		return NULL;
150 	return dr->data;
151 }
152 EXPORT_SYMBOL_GPL(devres_alloc_node);
153 #endif
154 
155 /**
156  * devres_for_each_res - Resource iterator
157  * @dev: Device to iterate resource from
158  * @release: Look for resources associated with this release function
159  * @match: Match function (optional)
160  * @match_data: Data for the match function
161  * @fn: Function to be called for each matched resource.
162  * @data: Data for @fn, the 3rd parameter of @fn
163  *
164  * Call @fn for each devres of @dev which is associated with @release
165  * and for which @match returns 1.
166  *
167  * RETURNS:
168  * 	void
169  */
170 void devres_for_each_res(struct device *dev, dr_release_t release,
171 			dr_match_t match, void *match_data,
172 			void (*fn)(struct device *, void *, void *),
173 			void *data)
174 {
175 	struct devres_node *node;
176 	struct devres_node *tmp;
177 	unsigned long flags;
178 
179 	if (!fn)
180 		return;
181 
182 	spin_lock_irqsave(&dev->devres_lock, flags);
183 	list_for_each_entry_safe_reverse(node, tmp,
184 			&dev->devres_head, entry) {
185 		struct devres *dr = container_of(node, struct devres, node);
186 
187 		if (node->release != release)
188 			continue;
189 		if (match && !match(dev, dr->data, match_data))
190 			continue;
191 		fn(dev, dr->data, data);
192 	}
193 	spin_unlock_irqrestore(&dev->devres_lock, flags);
194 }
195 EXPORT_SYMBOL_GPL(devres_for_each_res);
196 
197 /**
198  * devres_free - Free device resource data
199  * @res: Pointer to devres data to free
200  *
201  * Free devres created with devres_alloc().
202  */
203 void devres_free(void *res)
204 {
205 	if (res) {
206 		struct devres *dr = container_of(res, struct devres, data);
207 
208 		BUG_ON(!list_empty(&dr->node.entry));
209 		kfree(dr);
210 	}
211 }
212 EXPORT_SYMBOL_GPL(devres_free);
213 
214 /**
215  * devres_add - Register device resource
216  * @dev: Device to add resource to
217  * @res: Resource to register
218  *
219  * Register devres @res to @dev.  @res should have been allocated
220  * using devres_alloc().  On driver detach, the associated release
221  * function will be invoked and devres will be freed automatically.
222  */
223 void devres_add(struct device *dev, void *res)
224 {
225 	struct devres *dr = container_of(res, struct devres, data);
226 	unsigned long flags;
227 
228 	spin_lock_irqsave(&dev->devres_lock, flags);
229 	add_dr(dev, &dr->node);
230 	spin_unlock_irqrestore(&dev->devres_lock, flags);
231 }
232 EXPORT_SYMBOL_GPL(devres_add);
233 
234 static struct devres *find_dr(struct device *dev, dr_release_t release,
235 			      dr_match_t match, void *match_data)
236 {
237 	struct devres_node *node;
238 
239 	list_for_each_entry_reverse(node, &dev->devres_head, entry) {
240 		struct devres *dr = container_of(node, struct devres, node);
241 
242 		if (node->release != release)
243 			continue;
244 		if (match && !match(dev, dr->data, match_data))
245 			continue;
246 		return dr;
247 	}
248 
249 	return NULL;
250 }
251 
252 /**
253  * devres_find - Find device resource
254  * @dev: Device to lookup resource from
255  * @release: Look for resources associated with this release function
256  * @match: Match function (optional)
257  * @match_data: Data for the match function
258  *
259  * Find the latest devres of @dev which is associated with @release
260  * and for which @match returns 1.  If @match is NULL, it's considered
261  * to match all.
262  *
263  * RETURNS:
264  * Pointer to found devres, NULL if not found.
265  */
266 void * devres_find(struct device *dev, dr_release_t release,
267 		   dr_match_t match, void *match_data)
268 {
269 	struct devres *dr;
270 	unsigned long flags;
271 
272 	spin_lock_irqsave(&dev->devres_lock, flags);
273 	dr = find_dr(dev, release, match, match_data);
274 	spin_unlock_irqrestore(&dev->devres_lock, flags);
275 
276 	if (dr)
277 		return dr->data;
278 	return NULL;
279 }
280 EXPORT_SYMBOL_GPL(devres_find);
281 
282 /**
283  * devres_get - Find devres, if non-existent, add one atomically
284  * @dev: Device to lookup or add devres for
285  * @new_res: Pointer to new initialized devres to add if not found
286  * @match: Match function (optional)
287  * @match_data: Data for the match function
288  *
289  * Find the latest devres of @dev which has the same release function
290  * as @new_res and for which @match return 1.  If found, @new_res is
291  * freed; otherwise, @new_res is added atomically.
292  *
293  * RETURNS:
294  * Pointer to found or added devres.
295  */
296 void * devres_get(struct device *dev, void *new_res,
297 		  dr_match_t match, void *match_data)
298 {
299 	struct devres *new_dr = container_of(new_res, struct devres, data);
300 	struct devres *dr;
301 	unsigned long flags;
302 
303 	spin_lock_irqsave(&dev->devres_lock, flags);
304 	dr = find_dr(dev, new_dr->node.release, match, match_data);
305 	if (!dr) {
306 		add_dr(dev, &new_dr->node);
307 		dr = new_dr;
308 		new_res = NULL;
309 	}
310 	spin_unlock_irqrestore(&dev->devres_lock, flags);
311 	devres_free(new_res);
312 
313 	return dr->data;
314 }
315 EXPORT_SYMBOL_GPL(devres_get);
316 
317 /**
318  * devres_remove - Find a device resource and remove it
319  * @dev: Device to find resource from
320  * @release: Look for resources associated with this release function
321  * @match: Match function (optional)
322  * @match_data: Data for the match function
323  *
324  * Find the latest devres of @dev associated with @release and for
325  * which @match returns 1.  If @match is NULL, it's considered to
326  * match all.  If found, the resource is removed atomically and
327  * returned.
328  *
329  * RETURNS:
330  * Pointer to removed devres on success, NULL if not found.
331  */
332 void * devres_remove(struct device *dev, dr_release_t release,
333 		     dr_match_t match, void *match_data)
334 {
335 	struct devres *dr;
336 	unsigned long flags;
337 
338 	spin_lock_irqsave(&dev->devres_lock, flags);
339 	dr = find_dr(dev, release, match, match_data);
340 	if (dr) {
341 		list_del_init(&dr->node.entry);
342 		devres_log(dev, &dr->node, "REM");
343 	}
344 	spin_unlock_irqrestore(&dev->devres_lock, flags);
345 
346 	if (dr)
347 		return dr->data;
348 	return NULL;
349 }
350 EXPORT_SYMBOL_GPL(devres_remove);
351 
352 /**
353  * devres_destroy - Find a device resource and destroy it
354  * @dev: Device to find resource from
355  * @release: Look for resources associated with this release function
356  * @match: Match function (optional)
357  * @match_data: Data for the match function
358  *
359  * Find the latest devres of @dev associated with @release and for
360  * which @match returns 1.  If @match is NULL, it's considered to
361  * match all.  If found, the resource is removed atomically and freed.
362  *
363  * Note that the release function for the resource will not be called,
364  * only the devres-allocated data will be freed.  The caller becomes
365  * responsible for freeing any other data.
366  *
367  * RETURNS:
368  * 0 if devres is found and freed, -ENOENT if not found.
369  */
370 int devres_destroy(struct device *dev, dr_release_t release,
371 		   dr_match_t match, void *match_data)
372 {
373 	void *res;
374 
375 	res = devres_remove(dev, release, match, match_data);
376 	if (unlikely(!res))
377 		return -ENOENT;
378 
379 	devres_free(res);
380 	return 0;
381 }
382 EXPORT_SYMBOL_GPL(devres_destroy);
383 
384 
385 /**
386  * devres_release - Find a device resource and destroy it, calling release
387  * @dev: Device to find resource from
388  * @release: Look for resources associated with this release function
389  * @match: Match function (optional)
390  * @match_data: Data for the match function
391  *
392  * Find the latest devres of @dev associated with @release and for
393  * which @match returns 1.  If @match is NULL, it's considered to
394  * match all.  If found, the resource is removed atomically, the
395  * release function called and the resource freed.
396  *
397  * RETURNS:
398  * 0 if devres is found and freed, -ENOENT if not found.
399  */
400 int devres_release(struct device *dev, dr_release_t release,
401 		   dr_match_t match, void *match_data)
402 {
403 	void *res;
404 
405 	res = devres_remove(dev, release, match, match_data);
406 	if (unlikely(!res))
407 		return -ENOENT;
408 
409 	(*release)(dev, res);
410 	devres_free(res);
411 	return 0;
412 }
413 EXPORT_SYMBOL_GPL(devres_release);
414 
415 static int remove_nodes(struct device *dev,
416 			struct list_head *first, struct list_head *end,
417 			struct list_head *todo)
418 {
419 	int cnt = 0, nr_groups = 0;
420 	struct list_head *cur;
421 
422 	/* First pass - move normal devres entries to @todo and clear
423 	 * devres_group colors.
424 	 */
425 	cur = first;
426 	while (cur != end) {
427 		struct devres_node *node;
428 		struct devres_group *grp;
429 
430 		node = list_entry(cur, struct devres_node, entry);
431 		cur = cur->next;
432 
433 		grp = node_to_group(node);
434 		if (grp) {
435 			/* clear color of group markers in the first pass */
436 			grp->color = 0;
437 			nr_groups++;
438 		} else {
439 			/* regular devres entry */
440 			if (&node->entry == first)
441 				first = first->next;
442 			list_move_tail(&node->entry, todo);
443 			cnt++;
444 		}
445 	}
446 
447 	if (!nr_groups)
448 		return cnt;
449 
450 	/* Second pass - Scan groups and color them.  A group gets
451 	 * color value of two iff the group is wholly contained in
452 	 * [cur, end).  That is, for a closed group, both opening and
453 	 * closing markers should be in the range, while just the
454 	 * opening marker is enough for an open group.
455 	 */
456 	cur = first;
457 	while (cur != end) {
458 		struct devres_node *node;
459 		struct devres_group *grp;
460 
461 		node = list_entry(cur, struct devres_node, entry);
462 		cur = cur->next;
463 
464 		grp = node_to_group(node);
465 		BUG_ON(!grp || list_empty(&grp->node[0].entry));
466 
467 		grp->color++;
468 		if (list_empty(&grp->node[1].entry))
469 			grp->color++;
470 
471 		BUG_ON(grp->color <= 0 || grp->color > 2);
472 		if (grp->color == 2) {
473 			/* No need to update cur or end.  The removed
474 			 * nodes are always before both.
475 			 */
476 			list_move_tail(&grp->node[0].entry, todo);
477 			list_del_init(&grp->node[1].entry);
478 		}
479 	}
480 
481 	return cnt;
482 }
483 
484 static int release_nodes(struct device *dev, struct list_head *first,
485 			 struct list_head *end, unsigned long flags)
486 	__releases(&dev->devres_lock)
487 {
488 	LIST_HEAD(todo);
489 	int cnt;
490 	struct devres *dr, *tmp;
491 
492 	cnt = remove_nodes(dev, first, end, &todo);
493 
494 	spin_unlock_irqrestore(&dev->devres_lock, flags);
495 
496 	/* Release.  Note that both devres and devres_group are
497 	 * handled as devres in the following loop.  This is safe.
498 	 */
499 	list_for_each_entry_safe_reverse(dr, tmp, &todo, node.entry) {
500 		devres_log(dev, &dr->node, "REL");
501 		dr->node.release(dev, dr->data);
502 		kfree(dr);
503 	}
504 
505 	return cnt;
506 }
507 
508 /**
509  * devres_release_all - Release all managed resources
510  * @dev: Device to release resources for
511  *
512  * Release all resources associated with @dev.  This function is
513  * called on driver detach.
514  */
515 int devres_release_all(struct device *dev)
516 {
517 	unsigned long flags;
518 
519 	/* Looks like an uninitialized device structure */
520 	if (WARN_ON(dev->devres_head.next == NULL))
521 		return -ENODEV;
522 	spin_lock_irqsave(&dev->devres_lock, flags);
523 	return release_nodes(dev, dev->devres_head.next, &dev->devres_head,
524 			     flags);
525 }
526 
527 /**
528  * devres_open_group - Open a new devres group
529  * @dev: Device to open devres group for
530  * @id: Separator ID
531  * @gfp: Allocation flags
532  *
533  * Open a new devres group for @dev with @id.  For @id, using a
534  * pointer to an object which won't be used for another group is
535  * recommended.  If @id is NULL, address-wise unique ID is created.
536  *
537  * RETURNS:
538  * ID of the new group, NULL on failure.
539  */
540 void * devres_open_group(struct device *dev, void *id, gfp_t gfp)
541 {
542 	struct devres_group *grp;
543 	unsigned long flags;
544 
545 	grp = kmalloc(sizeof(*grp), gfp);
546 	if (unlikely(!grp))
547 		return NULL;
548 
549 	grp->node[0].release = &group_open_release;
550 	grp->node[1].release = &group_close_release;
551 	INIT_LIST_HEAD(&grp->node[0].entry);
552 	INIT_LIST_HEAD(&grp->node[1].entry);
553 	set_node_dbginfo(&grp->node[0], "grp<", 0);
554 	set_node_dbginfo(&grp->node[1], "grp>", 0);
555 	grp->id = grp;
556 	if (id)
557 		grp->id = id;
558 
559 	spin_lock_irqsave(&dev->devres_lock, flags);
560 	add_dr(dev, &grp->node[0]);
561 	spin_unlock_irqrestore(&dev->devres_lock, flags);
562 	return grp->id;
563 }
564 EXPORT_SYMBOL_GPL(devres_open_group);
565 
566 /* Find devres group with ID @id.  If @id is NULL, look for the latest. */
567 static struct devres_group * find_group(struct device *dev, void *id)
568 {
569 	struct devres_node *node;
570 
571 	list_for_each_entry_reverse(node, &dev->devres_head, entry) {
572 		struct devres_group *grp;
573 
574 		if (node->release != &group_open_release)
575 			continue;
576 
577 		grp = container_of(node, struct devres_group, node[0]);
578 
579 		if (id) {
580 			if (grp->id == id)
581 				return grp;
582 		} else if (list_empty(&grp->node[1].entry))
583 			return grp;
584 	}
585 
586 	return NULL;
587 }
588 
589 /**
590  * devres_close_group - Close a devres group
591  * @dev: Device to close devres group for
592  * @id: ID of target group, can be NULL
593  *
594  * Close the group identified by @id.  If @id is NULL, the latest open
595  * group is selected.
596  */
597 void devres_close_group(struct device *dev, void *id)
598 {
599 	struct devres_group *grp;
600 	unsigned long flags;
601 
602 	spin_lock_irqsave(&dev->devres_lock, flags);
603 
604 	grp = find_group(dev, id);
605 	if (grp)
606 		add_dr(dev, &grp->node[1]);
607 	else
608 		WARN_ON(1);
609 
610 	spin_unlock_irqrestore(&dev->devres_lock, flags);
611 }
612 EXPORT_SYMBOL_GPL(devres_close_group);
613 
614 /**
615  * devres_remove_group - Remove a devres group
616  * @dev: Device to remove group for
617  * @id: ID of target group, can be NULL
618  *
619  * Remove the group identified by @id.  If @id is NULL, the latest
620  * open group is selected.  Note that removing a group doesn't affect
621  * any other resources.
622  */
623 void devres_remove_group(struct device *dev, void *id)
624 {
625 	struct devres_group *grp;
626 	unsigned long flags;
627 
628 	spin_lock_irqsave(&dev->devres_lock, flags);
629 
630 	grp = find_group(dev, id);
631 	if (grp) {
632 		list_del_init(&grp->node[0].entry);
633 		list_del_init(&grp->node[1].entry);
634 		devres_log(dev, &grp->node[0], "REM");
635 	} else
636 		WARN_ON(1);
637 
638 	spin_unlock_irqrestore(&dev->devres_lock, flags);
639 
640 	kfree(grp);
641 }
642 EXPORT_SYMBOL_GPL(devres_remove_group);
643 
644 /**
645  * devres_release_group - Release resources in a devres group
646  * @dev: Device to release group for
647  * @id: ID of target group, can be NULL
648  *
649  * Release all resources in the group identified by @id.  If @id is
650  * NULL, the latest open group is selected.  The selected group and
651  * groups properly nested inside the selected group are removed.
652  *
653  * RETURNS:
654  * The number of released non-group resources.
655  */
656 int devres_release_group(struct device *dev, void *id)
657 {
658 	struct devres_group *grp;
659 	unsigned long flags;
660 	int cnt = 0;
661 
662 	spin_lock_irqsave(&dev->devres_lock, flags);
663 
664 	grp = find_group(dev, id);
665 	if (grp) {
666 		struct list_head *first = &grp->node[0].entry;
667 		struct list_head *end = &dev->devres_head;
668 
669 		if (!list_empty(&grp->node[1].entry))
670 			end = grp->node[1].entry.next;
671 
672 		cnt = release_nodes(dev, first, end, flags);
673 	} else {
674 		WARN_ON(1);
675 		spin_unlock_irqrestore(&dev->devres_lock, flags);
676 	}
677 
678 	return cnt;
679 }
680 EXPORT_SYMBOL_GPL(devres_release_group);
681 
682 /*
683  * Custom devres actions allow inserting a simple function call
684  * into the teadown sequence.
685  */
686 
687 struct action_devres {
688 	void *data;
689 	void (*action)(void *);
690 };
691 
692 static int devm_action_match(struct device *dev, void *res, void *p)
693 {
694 	struct action_devres *devres = res;
695 	struct action_devres *target = p;
696 
697 	return devres->action == target->action &&
698 	       devres->data == target->data;
699 }
700 
701 static void devm_action_release(struct device *dev, void *res)
702 {
703 	struct action_devres *devres = res;
704 
705 	devres->action(devres->data);
706 }
707 
708 /**
709  * devm_add_action() - add a custom action to list of managed resources
710  * @dev: Device that owns the action
711  * @action: Function that should be called
712  * @data: Pointer to data passed to @action implementation
713  *
714  * This adds a custom action to the list of managed resources so that
715  * it gets executed as part of standard resource unwinding.
716  */
717 int devm_add_action(struct device *dev, void (*action)(void *), void *data)
718 {
719 	struct action_devres *devres;
720 
721 	devres = devres_alloc(devm_action_release,
722 			      sizeof(struct action_devres), GFP_KERNEL);
723 	if (!devres)
724 		return -ENOMEM;
725 
726 	devres->data = data;
727 	devres->action = action;
728 
729 	devres_add(dev, devres);
730 	return 0;
731 }
732 EXPORT_SYMBOL_GPL(devm_add_action);
733 
734 /**
735  * devm_remove_action() - removes previously added custom action
736  * @dev: Device that owns the action
737  * @action: Function implementing the action
738  * @data: Pointer to data passed to @action implementation
739  *
740  * Removes instance of @action previously added by devm_add_action().
741  * Both action and data should match one of the existing entries.
742  */
743 void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
744 {
745 	struct action_devres devres = {
746 		.data = data,
747 		.action = action,
748 	};
749 
750 	WARN_ON(devres_destroy(dev, devm_action_release, devm_action_match,
751 			       &devres));
752 
753 }
754 EXPORT_SYMBOL_GPL(devm_remove_action);
755 
756 /*
757  * Managed kmalloc/kfree
758  */
759 static void devm_kmalloc_release(struct device *dev, void *res)
760 {
761 	/* noop */
762 }
763 
764 static int devm_kmalloc_match(struct device *dev, void *res, void *data)
765 {
766 	return res == data;
767 }
768 
769 /**
770  * devm_kmalloc - Resource-managed kmalloc
771  * @dev: Device to allocate memory for
772  * @size: Allocation size
773  * @gfp: Allocation gfp flags
774  *
775  * Managed kmalloc.  Memory allocated with this function is
776  * automatically freed on driver detach.  Like all other devres
777  * resources, guaranteed alignment is unsigned long long.
778  *
779  * RETURNS:
780  * Pointer to allocated memory on success, NULL on failure.
781  */
782 void * devm_kmalloc(struct device *dev, size_t size, gfp_t gfp)
783 {
784 	struct devres *dr;
785 
786 	/* use raw alloc_dr for kmalloc caller tracing */
787 	dr = alloc_dr(devm_kmalloc_release, size, gfp, dev_to_node(dev));
788 	if (unlikely(!dr))
789 		return NULL;
790 
791 	/*
792 	 * This is named devm_kzalloc_release for historical reasons
793 	 * The initial implementation did not support kmalloc, only kzalloc
794 	 */
795 	set_node_dbginfo(&dr->node, "devm_kzalloc_release", size);
796 	devres_add(dev, dr->data);
797 	return dr->data;
798 }
799 EXPORT_SYMBOL_GPL(devm_kmalloc);
800 
801 /**
802  * devm_kstrdup - Allocate resource managed space and
803  *                copy an existing string into that.
804  * @dev: Device to allocate memory for
805  * @s: the string to duplicate
806  * @gfp: the GFP mask used in the devm_kmalloc() call when
807  *       allocating memory
808  * RETURNS:
809  * Pointer to allocated string on success, NULL on failure.
810  */
811 char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp)
812 {
813 	size_t size;
814 	char *buf;
815 
816 	if (!s)
817 		return NULL;
818 
819 	size = strlen(s) + 1;
820 	buf = devm_kmalloc(dev, size, gfp);
821 	if (buf)
822 		memcpy(buf, s, size);
823 	return buf;
824 }
825 EXPORT_SYMBOL_GPL(devm_kstrdup);
826 
827 /**
828  * devm_kstrdup_const - resource managed conditional string duplication
829  * @dev: device for which to duplicate the string
830  * @s: the string to duplicate
831  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
832  *
833  * Strings allocated by devm_kstrdup_const will be automatically freed when
834  * the associated device is detached.
835  *
836  * RETURNS:
837  * Source string if it is in .rodata section otherwise it falls back to
838  * devm_kstrdup.
839  */
840 const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp)
841 {
842 	if (is_kernel_rodata((unsigned long)s))
843 		return s;
844 
845 	return devm_kstrdup(dev, s, gfp);
846 }
847 EXPORT_SYMBOL_GPL(devm_kstrdup_const);
848 
849 /**
850  * devm_kvasprintf - Allocate resource managed space and format a string
851  *		     into that.
852  * @dev: Device to allocate memory for
853  * @gfp: the GFP mask used in the devm_kmalloc() call when
854  *       allocating memory
855  * @fmt: The printf()-style format string
856  * @ap: Arguments for the format string
857  * RETURNS:
858  * Pointer to allocated string on success, NULL on failure.
859  */
860 char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
861 		      va_list ap)
862 {
863 	unsigned int len;
864 	char *p;
865 	va_list aq;
866 
867 	va_copy(aq, ap);
868 	len = vsnprintf(NULL, 0, fmt, aq);
869 	va_end(aq);
870 
871 	p = devm_kmalloc(dev, len+1, gfp);
872 	if (!p)
873 		return NULL;
874 
875 	vsnprintf(p, len+1, fmt, ap);
876 
877 	return p;
878 }
879 EXPORT_SYMBOL(devm_kvasprintf);
880 
881 /**
882  * devm_kasprintf - Allocate resource managed space and format a string
883  *		    into that.
884  * @dev: Device to allocate memory for
885  * @gfp: the GFP mask used in the devm_kmalloc() call when
886  *       allocating memory
887  * @fmt: The printf()-style format string
888  * @...: Arguments for the format string
889  * RETURNS:
890  * Pointer to allocated string on success, NULL on failure.
891  */
892 char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...)
893 {
894 	va_list ap;
895 	char *p;
896 
897 	va_start(ap, fmt);
898 	p = devm_kvasprintf(dev, gfp, fmt, ap);
899 	va_end(ap);
900 
901 	return p;
902 }
903 EXPORT_SYMBOL_GPL(devm_kasprintf);
904 
905 /**
906  * devm_kfree - Resource-managed kfree
907  * @dev: Device this memory belongs to
908  * @p: Memory to free
909  *
910  * Free memory allocated with devm_kmalloc().
911  */
912 void devm_kfree(struct device *dev, const void *p)
913 {
914 	int rc;
915 
916 	/*
917 	 * Special case: pointer to a string in .rodata returned by
918 	 * devm_kstrdup_const().
919 	 */
920 	if (unlikely(is_kernel_rodata((unsigned long)p)))
921 		return;
922 
923 	rc = devres_destroy(dev, devm_kmalloc_release,
924 			    devm_kmalloc_match, (void *)p);
925 	WARN_ON(rc);
926 }
927 EXPORT_SYMBOL_GPL(devm_kfree);
928 
929 /**
930  * devm_kmemdup - Resource-managed kmemdup
931  * @dev: Device this memory belongs to
932  * @src: Memory region to duplicate
933  * @len: Memory region length
934  * @gfp: GFP mask to use
935  *
936  * Duplicate region of a memory using resource managed kmalloc
937  */
938 void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp)
939 {
940 	void *p;
941 
942 	p = devm_kmalloc(dev, len, gfp);
943 	if (p)
944 		memcpy(p, src, len);
945 
946 	return p;
947 }
948 EXPORT_SYMBOL_GPL(devm_kmemdup);
949 
950 struct pages_devres {
951 	unsigned long addr;
952 	unsigned int order;
953 };
954 
955 static int devm_pages_match(struct device *dev, void *res, void *p)
956 {
957 	struct pages_devres *devres = res;
958 	struct pages_devres *target = p;
959 
960 	return devres->addr == target->addr;
961 }
962 
963 static void devm_pages_release(struct device *dev, void *res)
964 {
965 	struct pages_devres *devres = res;
966 
967 	free_pages(devres->addr, devres->order);
968 }
969 
970 /**
971  * devm_get_free_pages - Resource-managed __get_free_pages
972  * @dev: Device to allocate memory for
973  * @gfp_mask: Allocation gfp flags
974  * @order: Allocation size is (1 << order) pages
975  *
976  * Managed get_free_pages.  Memory allocated with this function is
977  * automatically freed on driver detach.
978  *
979  * RETURNS:
980  * Address of allocated memory on success, 0 on failure.
981  */
982 
983 unsigned long devm_get_free_pages(struct device *dev,
984 				  gfp_t gfp_mask, unsigned int order)
985 {
986 	struct pages_devres *devres;
987 	unsigned long addr;
988 
989 	addr = __get_free_pages(gfp_mask, order);
990 
991 	if (unlikely(!addr))
992 		return 0;
993 
994 	devres = devres_alloc(devm_pages_release,
995 			      sizeof(struct pages_devres), GFP_KERNEL);
996 	if (unlikely(!devres)) {
997 		free_pages(addr, order);
998 		return 0;
999 	}
1000 
1001 	devres->addr = addr;
1002 	devres->order = order;
1003 
1004 	devres_add(dev, devres);
1005 	return addr;
1006 }
1007 EXPORT_SYMBOL_GPL(devm_get_free_pages);
1008 
1009 /**
1010  * devm_free_pages - Resource-managed free_pages
1011  * @dev: Device this memory belongs to
1012  * @addr: Memory to free
1013  *
1014  * Free memory allocated with devm_get_free_pages(). Unlike free_pages,
1015  * there is no need to supply the @order.
1016  */
1017 void devm_free_pages(struct device *dev, unsigned long addr)
1018 {
1019 	struct pages_devres devres = { .addr = addr };
1020 
1021 	WARN_ON(devres_release(dev, devm_pages_release, devm_pages_match,
1022 			       &devres));
1023 }
1024 EXPORT_SYMBOL_GPL(devm_free_pages);
1025 
1026 static void devm_percpu_release(struct device *dev, void *pdata)
1027 {
1028 	void __percpu *p;
1029 
1030 	p = *(void __percpu **)pdata;
1031 	free_percpu(p);
1032 }
1033 
1034 static int devm_percpu_match(struct device *dev, void *data, void *p)
1035 {
1036 	struct devres *devr = container_of(data, struct devres, data);
1037 
1038 	return *(void **)devr->data == p;
1039 }
1040 
1041 /**
1042  * __devm_alloc_percpu - Resource-managed alloc_percpu
1043  * @dev: Device to allocate per-cpu memory for
1044  * @size: Size of per-cpu memory to allocate
1045  * @align: Alignment of per-cpu memory to allocate
1046  *
1047  * Managed alloc_percpu. Per-cpu memory allocated with this function is
1048  * automatically freed on driver detach.
1049  *
1050  * RETURNS:
1051  * Pointer to allocated memory on success, NULL on failure.
1052  */
1053 void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
1054 		size_t align)
1055 {
1056 	void *p;
1057 	void __percpu *pcpu;
1058 
1059 	pcpu = __alloc_percpu(size, align);
1060 	if (!pcpu)
1061 		return NULL;
1062 
1063 	p = devres_alloc(devm_percpu_release, sizeof(void *), GFP_KERNEL);
1064 	if (!p) {
1065 		free_percpu(pcpu);
1066 		return NULL;
1067 	}
1068 
1069 	*(void __percpu **)p = pcpu;
1070 
1071 	devres_add(dev, p);
1072 
1073 	return pcpu;
1074 }
1075 EXPORT_SYMBOL_GPL(__devm_alloc_percpu);
1076 
1077 /**
1078  * devm_free_percpu - Resource-managed free_percpu
1079  * @dev: Device this memory belongs to
1080  * @pdata: Per-cpu memory to free
1081  *
1082  * Free memory allocated with devm_alloc_percpu().
1083  */
1084 void devm_free_percpu(struct device *dev, void __percpu *pdata)
1085 {
1086 	WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match,
1087 			       (void *)pdata));
1088 }
1089 EXPORT_SYMBOL_GPL(devm_free_percpu);
1090