xref: /openbmc/linux/drivers/gpu/drm/drm_managed.c (revision c6603c740e0e3492c9c95fdab833375bf7117b6b)
1*c6603c74SDaniel Vetter // SPDX-License-Identifier: GPL-2.0
2*c6603c74SDaniel Vetter /*
3*c6603c74SDaniel Vetter  * Copyright (C) 2020 Intel
4*c6603c74SDaniel Vetter  *
5*c6603c74SDaniel Vetter  * Based on drivers/base/devres.c
6*c6603c74SDaniel Vetter  */
7*c6603c74SDaniel Vetter 
8*c6603c74SDaniel Vetter #include <drm/drm_managed.h>
9*c6603c74SDaniel Vetter 
10*c6603c74SDaniel Vetter #include <linux/list.h>
11*c6603c74SDaniel Vetter #include <linux/slab.h>
12*c6603c74SDaniel Vetter #include <linux/spinlock.h>
13*c6603c74SDaniel Vetter 
14*c6603c74SDaniel Vetter #include <drm/drm_device.h>
15*c6603c74SDaniel Vetter #include <drm/drm_print.h>
16*c6603c74SDaniel Vetter 
17*c6603c74SDaniel Vetter /**
18*c6603c74SDaniel Vetter  * DOC: managed resources
19*c6603c74SDaniel Vetter  *
20*c6603c74SDaniel Vetter  * Inspired by struct &device managed resources, but tied to the lifetime of
21*c6603c74SDaniel Vetter  * struct &drm_device, which can outlive the underlying physical device, usually
22*c6603c74SDaniel Vetter  * when userspace has some open files and other handles to resources still open.
23*c6603c74SDaniel Vetter  */
24*c6603c74SDaniel Vetter struct drmres_node {
25*c6603c74SDaniel Vetter 	struct list_head	entry;
26*c6603c74SDaniel Vetter 	drmres_release_t	release;
27*c6603c74SDaniel Vetter 	const char		*name;
28*c6603c74SDaniel Vetter 	size_t			size;
29*c6603c74SDaniel Vetter };
30*c6603c74SDaniel Vetter 
31*c6603c74SDaniel Vetter struct drmres {
32*c6603c74SDaniel Vetter 	struct drmres_node		node;
33*c6603c74SDaniel Vetter 	/*
34*c6603c74SDaniel Vetter 	 * Some archs want to perform DMA into kmalloc caches
35*c6603c74SDaniel Vetter 	 * and need a guaranteed alignment larger than
36*c6603c74SDaniel Vetter 	 * the alignment of a 64-bit integer.
37*c6603c74SDaniel Vetter 	 * Thus we use ARCH_KMALLOC_MINALIGN here and get exactly the same
38*c6603c74SDaniel Vetter 	 * buffer alignment as if it was allocated by plain kmalloc().
39*c6603c74SDaniel Vetter 	 */
40*c6603c74SDaniel Vetter 	u8 __aligned(ARCH_KMALLOC_MINALIGN) data[];
41*c6603c74SDaniel Vetter };
42*c6603c74SDaniel Vetter 
43*c6603c74SDaniel Vetter static void free_dr(struct drmres *dr)
44*c6603c74SDaniel Vetter {
45*c6603c74SDaniel Vetter 	kfree_const(dr->node.name);
46*c6603c74SDaniel Vetter 	kfree(dr);
47*c6603c74SDaniel Vetter }
48*c6603c74SDaniel Vetter 
49*c6603c74SDaniel Vetter void drm_managed_release(struct drm_device *dev)
50*c6603c74SDaniel Vetter {
51*c6603c74SDaniel Vetter 	struct drmres *dr, *tmp;
52*c6603c74SDaniel Vetter 
53*c6603c74SDaniel Vetter 	drm_dbg_drmres(dev, "drmres release begin\n");
54*c6603c74SDaniel Vetter 	list_for_each_entry_safe(dr, tmp, &dev->managed.resources, node.entry) {
55*c6603c74SDaniel Vetter 		drm_dbg_drmres(dev, "REL %p %s (%zu bytes)\n",
56*c6603c74SDaniel Vetter 			       dr, dr->node.name, dr->node.size);
57*c6603c74SDaniel Vetter 
58*c6603c74SDaniel Vetter 		if (dr->node.release)
59*c6603c74SDaniel Vetter 			dr->node.release(dev, dr->node.size ? *(void **)&dr->data : NULL);
60*c6603c74SDaniel Vetter 
61*c6603c74SDaniel Vetter 		list_del(&dr->node.entry);
62*c6603c74SDaniel Vetter 		free_dr(dr);
63*c6603c74SDaniel Vetter 	}
64*c6603c74SDaniel Vetter 	drm_dbg_drmres(dev, "drmres release end\n");
65*c6603c74SDaniel Vetter }
66*c6603c74SDaniel Vetter 
67*c6603c74SDaniel Vetter /*
68*c6603c74SDaniel Vetter  * Always inline so that kmalloc_track_caller tracks the actual interesting
69*c6603c74SDaniel Vetter  * caller outside of drm_managed.c.
70*c6603c74SDaniel Vetter  */
71*c6603c74SDaniel Vetter static __always_inline struct drmres * alloc_dr(drmres_release_t release,
72*c6603c74SDaniel Vetter 						size_t size, gfp_t gfp, int nid)
73*c6603c74SDaniel Vetter {
74*c6603c74SDaniel Vetter 	size_t tot_size;
75*c6603c74SDaniel Vetter 	struct drmres *dr;
76*c6603c74SDaniel Vetter 
77*c6603c74SDaniel Vetter 	/* We must catch any near-SIZE_MAX cases that could overflow. */
78*c6603c74SDaniel Vetter 	if (unlikely(check_add_overflow(sizeof(*dr), size, &tot_size)))
79*c6603c74SDaniel Vetter 		return NULL;
80*c6603c74SDaniel Vetter 
81*c6603c74SDaniel Vetter 	dr = kmalloc_node_track_caller(tot_size, gfp, nid);
82*c6603c74SDaniel Vetter 	if (unlikely(!dr))
83*c6603c74SDaniel Vetter 		return NULL;
84*c6603c74SDaniel Vetter 
85*c6603c74SDaniel Vetter 	memset(dr, 0, offsetof(struct drmres, data));
86*c6603c74SDaniel Vetter 
87*c6603c74SDaniel Vetter 	INIT_LIST_HEAD(&dr->node.entry);
88*c6603c74SDaniel Vetter 	dr->node.release = release;
89*c6603c74SDaniel Vetter 	dr->node.size = size;
90*c6603c74SDaniel Vetter 
91*c6603c74SDaniel Vetter 	return dr;
92*c6603c74SDaniel Vetter }
93*c6603c74SDaniel Vetter 
94*c6603c74SDaniel Vetter static void del_dr(struct drm_device *dev, struct drmres *dr)
95*c6603c74SDaniel Vetter {
96*c6603c74SDaniel Vetter 	list_del_init(&dr->node.entry);
97*c6603c74SDaniel Vetter 
98*c6603c74SDaniel Vetter 	drm_dbg_drmres(dev, "DEL %p %s (%lu bytes)\n",
99*c6603c74SDaniel Vetter 		       dr, dr->node.name, (unsigned long) dr->node.size);
100*c6603c74SDaniel Vetter }
101*c6603c74SDaniel Vetter 
102*c6603c74SDaniel Vetter static void add_dr(struct drm_device *dev, struct drmres *dr)
103*c6603c74SDaniel Vetter {
104*c6603c74SDaniel Vetter 	unsigned long flags;
105*c6603c74SDaniel Vetter 
106*c6603c74SDaniel Vetter 	spin_lock_irqsave(&dev->managed.lock, flags);
107*c6603c74SDaniel Vetter 	list_add(&dr->node.entry, &dev->managed.resources);
108*c6603c74SDaniel Vetter 	spin_unlock_irqrestore(&dev->managed.lock, flags);
109*c6603c74SDaniel Vetter 
110*c6603c74SDaniel Vetter 	drm_dbg_drmres(dev, "ADD %p %s (%lu bytes)\n",
111*c6603c74SDaniel Vetter 		       dr, dr->node.name, (unsigned long) dr->node.size);
112*c6603c74SDaniel Vetter }
113*c6603c74SDaniel Vetter 
114*c6603c74SDaniel Vetter void drmm_add_final_kfree(struct drm_device *dev, void *container)
115*c6603c74SDaniel Vetter {
116*c6603c74SDaniel Vetter 	WARN_ON(dev->managed.final_kfree);
117*c6603c74SDaniel Vetter 	WARN_ON(dev < (struct drm_device *) container);
118*c6603c74SDaniel Vetter 	WARN_ON(dev + 1 >=
119*c6603c74SDaniel Vetter 		(struct drm_device *) (container + ksize(container)));
120*c6603c74SDaniel Vetter 	dev->managed.final_kfree = container;
121*c6603c74SDaniel Vetter }
122*c6603c74SDaniel Vetter EXPORT_SYMBOL(drmm_add_final_kfree);
123*c6603c74SDaniel Vetter 
124*c6603c74SDaniel Vetter int __drmm_add_action(struct drm_device *dev,
125*c6603c74SDaniel Vetter 		      drmres_release_t action,
126*c6603c74SDaniel Vetter 		      void *data, const char *name)
127*c6603c74SDaniel Vetter {
128*c6603c74SDaniel Vetter 	struct drmres *dr;
129*c6603c74SDaniel Vetter 	void **void_ptr;
130*c6603c74SDaniel Vetter 
131*c6603c74SDaniel Vetter 	dr = alloc_dr(action, data ? sizeof(void*) : 0,
132*c6603c74SDaniel Vetter 		      GFP_KERNEL | __GFP_ZERO,
133*c6603c74SDaniel Vetter 		      dev_to_node(dev->dev));
134*c6603c74SDaniel Vetter 	if (!dr) {
135*c6603c74SDaniel Vetter 		drm_dbg_drmres(dev, "failed to add action %s for %p\n",
136*c6603c74SDaniel Vetter 			       name, data);
137*c6603c74SDaniel Vetter 		return -ENOMEM;
138*c6603c74SDaniel Vetter 	}
139*c6603c74SDaniel Vetter 
140*c6603c74SDaniel Vetter 	dr->node.name = kstrdup_const(name, GFP_KERNEL);
141*c6603c74SDaniel Vetter 	if (data) {
142*c6603c74SDaniel Vetter 		void_ptr = (void **)&dr->data;
143*c6603c74SDaniel Vetter 		*void_ptr = data;
144*c6603c74SDaniel Vetter 	}
145*c6603c74SDaniel Vetter 
146*c6603c74SDaniel Vetter 	add_dr(dev, dr);
147*c6603c74SDaniel Vetter 
148*c6603c74SDaniel Vetter 	return 0;
149*c6603c74SDaniel Vetter }
150*c6603c74SDaniel Vetter EXPORT_SYMBOL(__drmm_add_action);
151*c6603c74SDaniel Vetter 
152*c6603c74SDaniel Vetter void *drmm_kmalloc(struct drm_device *dev, size_t size, gfp_t gfp)
153*c6603c74SDaniel Vetter {
154*c6603c74SDaniel Vetter 	struct drmres *dr;
155*c6603c74SDaniel Vetter 
156*c6603c74SDaniel Vetter 	dr = alloc_dr(NULL, size, gfp, dev_to_node(dev->dev));
157*c6603c74SDaniel Vetter 	if (!dr) {
158*c6603c74SDaniel Vetter 		drm_dbg_drmres(dev, "failed to allocate %zu bytes, %u flags\n",
159*c6603c74SDaniel Vetter 			       size, gfp);
160*c6603c74SDaniel Vetter 		return NULL;
161*c6603c74SDaniel Vetter 	}
162*c6603c74SDaniel Vetter 	dr->node.name = kstrdup_const("kmalloc", GFP_KERNEL);
163*c6603c74SDaniel Vetter 
164*c6603c74SDaniel Vetter 	add_dr(dev, dr);
165*c6603c74SDaniel Vetter 
166*c6603c74SDaniel Vetter 	return dr->data;
167*c6603c74SDaniel Vetter }
168*c6603c74SDaniel Vetter EXPORT_SYMBOL(drmm_kmalloc);
169*c6603c74SDaniel Vetter 
170*c6603c74SDaniel Vetter void drmm_kfree(struct drm_device *dev, void *data)
171*c6603c74SDaniel Vetter {
172*c6603c74SDaniel Vetter 	struct drmres *dr_match = NULL, *dr;
173*c6603c74SDaniel Vetter 	unsigned long flags;
174*c6603c74SDaniel Vetter 
175*c6603c74SDaniel Vetter 	if (!data)
176*c6603c74SDaniel Vetter 		return;
177*c6603c74SDaniel Vetter 
178*c6603c74SDaniel Vetter 	spin_lock_irqsave(&dev->managed.lock, flags);
179*c6603c74SDaniel Vetter 	list_for_each_entry(dr, &dev->managed.resources, node.entry) {
180*c6603c74SDaniel Vetter 		if (dr->data == data) {
181*c6603c74SDaniel Vetter 			dr_match = dr;
182*c6603c74SDaniel Vetter 			del_dr(dev, dr_match);
183*c6603c74SDaniel Vetter 			break;
184*c6603c74SDaniel Vetter 		}
185*c6603c74SDaniel Vetter 	}
186*c6603c74SDaniel Vetter 	spin_unlock_irqrestore(&dev->managed.lock, flags);
187*c6603c74SDaniel Vetter 
188*c6603c74SDaniel Vetter 	if (WARN_ON(!dr_match))
189*c6603c74SDaniel Vetter 		return;
190*c6603c74SDaniel Vetter 
191*c6603c74SDaniel Vetter 	free_dr(dr_match);
192*c6603c74SDaniel Vetter }
193*c6603c74SDaniel Vetter EXPORT_SYMBOL(drmm_kfree);
194