1 /*
2  * V4L2 asynchronous subdevice registration API
3  *
4  * Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/device.h>
12 #include <linux/err.h>
13 #include <linux/i2c.h>
14 #include <linux/list.h>
15 #include <linux/mm.h>
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/of.h>
19 #include <linux/platform_device.h>
20 #include <linux/slab.h>
21 #include <linux/types.h>
22 
23 #include <media/v4l2-async.h>
24 #include <media/v4l2-device.h>
25 #include <media/v4l2-subdev.h>
26 
27 static bool match_i2c(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
28 {
29 #if IS_ENABLED(CONFIG_I2C)
30 	struct i2c_client *client = i2c_verify_client(sd->dev);
31 	return client &&
32 		asd->match.i2c.adapter_id == client->adapter->nr &&
33 		asd->match.i2c.address == client->addr;
34 #else
35 	return false;
36 #endif
37 }
38 
39 static bool match_devname(struct v4l2_subdev *sd,
40 			  struct v4l2_async_subdev *asd)
41 {
42 	return !strcmp(asd->match.device_name.name, dev_name(sd->dev));
43 }
44 
45 static bool match_fwnode(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
46 {
47 	return sd->fwnode == asd->match.fwnode.fwnode;
48 }
49 
50 static bool match_custom(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
51 {
52 	if (!asd->match.custom.match)
53 		/* Match always */
54 		return true;
55 
56 	return asd->match.custom.match(sd->dev, asd);
57 }
58 
59 static LIST_HEAD(subdev_list);
60 static LIST_HEAD(notifier_list);
61 static DEFINE_MUTEX(list_lock);
62 
63 static struct v4l2_async_subdev *v4l2_async_belongs(struct v4l2_async_notifier *notifier,
64 						    struct v4l2_subdev *sd)
65 {
66 	bool (*match)(struct v4l2_subdev *, struct v4l2_async_subdev *);
67 	struct v4l2_async_subdev *asd;
68 
69 	list_for_each_entry(asd, &notifier->waiting, list) {
70 		/* bus_type has been verified valid before */
71 		switch (asd->match_type) {
72 		case V4L2_ASYNC_MATCH_CUSTOM:
73 			match = match_custom;
74 			break;
75 		case V4L2_ASYNC_MATCH_DEVNAME:
76 			match = match_devname;
77 			break;
78 		case V4L2_ASYNC_MATCH_I2C:
79 			match = match_i2c;
80 			break;
81 		case V4L2_ASYNC_MATCH_FWNODE:
82 			match = match_fwnode;
83 			break;
84 		default:
85 			/* Cannot happen, unless someone breaks us */
86 			WARN_ON(true);
87 			return NULL;
88 		}
89 
90 		/* match cannot be NULL here */
91 		if (match(sd, asd))
92 			return asd;
93 	}
94 
95 	return NULL;
96 }
97 
98 static int v4l2_async_test_notify(struct v4l2_async_notifier *notifier,
99 				  struct v4l2_subdev *sd,
100 				  struct v4l2_async_subdev *asd)
101 {
102 	int ret;
103 
104 	if (notifier->bound) {
105 		ret = notifier->bound(notifier, sd, asd);
106 		if (ret < 0)
107 			return ret;
108 	}
109 
110 	ret = v4l2_device_register_subdev(notifier->v4l2_dev, sd);
111 	if (ret < 0) {
112 		if (notifier->unbind)
113 			notifier->unbind(notifier, sd, asd);
114 		return ret;
115 	}
116 
117 	/* Remove from the waiting list */
118 	list_del(&asd->list);
119 	sd->asd = asd;
120 	sd->notifier = notifier;
121 
122 	/* Move from the global subdevice list to notifier's done */
123 	list_move(&sd->async_list, &notifier->done);
124 
125 	if (list_empty(&notifier->waiting) && notifier->complete)
126 		return notifier->complete(notifier);
127 
128 	return 0;
129 }
130 
131 static void v4l2_async_cleanup(struct v4l2_subdev *sd)
132 {
133 	v4l2_device_unregister_subdev(sd);
134 	/* Subdevice driver will reprobe and put the subdev back onto the list */
135 	list_del_init(&sd->async_list);
136 	sd->asd = NULL;
137 	sd->dev = NULL;
138 }
139 
140 int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
141 				 struct v4l2_async_notifier *notifier)
142 {
143 	struct v4l2_subdev *sd, *tmp;
144 	struct v4l2_async_subdev *asd;
145 	int i;
146 
147 	if (!v4l2_dev || !notifier->num_subdevs ||
148 	    notifier->num_subdevs > V4L2_MAX_SUBDEVS)
149 		return -EINVAL;
150 
151 	notifier->v4l2_dev = v4l2_dev;
152 	INIT_LIST_HEAD(&notifier->waiting);
153 	INIT_LIST_HEAD(&notifier->done);
154 
155 	for (i = 0; i < notifier->num_subdevs; i++) {
156 		asd = notifier->subdevs[i];
157 
158 		switch (asd->match_type) {
159 		case V4L2_ASYNC_MATCH_CUSTOM:
160 		case V4L2_ASYNC_MATCH_DEVNAME:
161 		case V4L2_ASYNC_MATCH_I2C:
162 		case V4L2_ASYNC_MATCH_FWNODE:
163 			break;
164 		default:
165 			dev_err(notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL,
166 				"Invalid match type %u on %p\n",
167 				asd->match_type, asd);
168 			return -EINVAL;
169 		}
170 		list_add_tail(&asd->list, &notifier->waiting);
171 	}
172 
173 	mutex_lock(&list_lock);
174 
175 	list_for_each_entry_safe(sd, tmp, &subdev_list, async_list) {
176 		int ret;
177 
178 		asd = v4l2_async_belongs(notifier, sd);
179 		if (!asd)
180 			continue;
181 
182 		ret = v4l2_async_test_notify(notifier, sd, asd);
183 		if (ret < 0) {
184 			mutex_unlock(&list_lock);
185 			return ret;
186 		}
187 	}
188 
189 	/* Keep also completed notifiers on the list */
190 	list_add(&notifier->list, &notifier_list);
191 
192 	mutex_unlock(&list_lock);
193 
194 	return 0;
195 }
196 EXPORT_SYMBOL(v4l2_async_notifier_register);
197 
198 void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
199 {
200 	struct v4l2_subdev *sd, *tmp;
201 	unsigned int notif_n_subdev = notifier->num_subdevs;
202 	unsigned int n_subdev = min(notif_n_subdev, V4L2_MAX_SUBDEVS);
203 	struct device **dev;
204 	int i = 0;
205 
206 	if (!notifier->v4l2_dev)
207 		return;
208 
209 	dev = kvmalloc_array(n_subdev, sizeof(*dev), GFP_KERNEL);
210 	if (!dev) {
211 		dev_err(notifier->v4l2_dev->dev,
212 			"Failed to allocate device cache!\n");
213 	}
214 
215 	mutex_lock(&list_lock);
216 
217 	list_del(&notifier->list);
218 
219 	list_for_each_entry_safe(sd, tmp, &notifier->done, async_list) {
220 		struct device *d;
221 
222 		d = get_device(sd->dev);
223 
224 		v4l2_async_cleanup(sd);
225 
226 		/* If we handled USB devices, we'd have to lock the parent too */
227 		device_release_driver(d);
228 
229 		if (notifier->unbind)
230 			notifier->unbind(notifier, sd, sd->asd);
231 
232 		/*
233 		 * Store device at the device cache, in order to call
234 		 * put_device() on the final step
235 		 */
236 		if (dev)
237 			dev[i++] = d;
238 		else
239 			put_device(d);
240 	}
241 
242 	mutex_unlock(&list_lock);
243 
244 	/*
245 	 * Call device_attach() to reprobe devices
246 	 *
247 	 * NOTE: If dev allocation fails, i is 0, and the whole loop won't be
248 	 * executed.
249 	 */
250 	while (i--) {
251 		struct device *d = dev[i];
252 
253 		if (d && device_attach(d) < 0) {
254 			const char *name = "(none)";
255 			int lock = device_trylock(d);
256 
257 			if (lock && d->driver)
258 				name = d->driver->name;
259 			dev_err(d, "Failed to re-probe to %s\n", name);
260 			if (lock)
261 				device_unlock(d);
262 		}
263 		put_device(d);
264 	}
265 	kvfree(dev);
266 
267 	notifier->v4l2_dev = NULL;
268 
269 	/*
270 	 * Don't care about the waiting list, it is initialised and populated
271 	 * upon notifier registration.
272 	 */
273 }
274 EXPORT_SYMBOL(v4l2_async_notifier_unregister);
275 
276 int v4l2_async_register_subdev(struct v4l2_subdev *sd)
277 {
278 	struct v4l2_async_notifier *notifier;
279 
280 	/*
281 	 * No reference taken. The reference is held by the device
282 	 * (struct v4l2_subdev.dev), and async sub-device does not
283 	 * exist independently of the device at any point of time.
284 	 */
285 	if (!sd->fwnode && sd->dev)
286 		sd->fwnode = dev_fwnode(sd->dev);
287 
288 	mutex_lock(&list_lock);
289 
290 	INIT_LIST_HEAD(&sd->async_list);
291 
292 	list_for_each_entry(notifier, &notifier_list, list) {
293 		struct v4l2_async_subdev *asd = v4l2_async_belongs(notifier, sd);
294 		if (asd) {
295 			int ret = v4l2_async_test_notify(notifier, sd, asd);
296 			mutex_unlock(&list_lock);
297 			return ret;
298 		}
299 	}
300 
301 	/* None matched, wait for hot-plugging */
302 	list_add(&sd->async_list, &subdev_list);
303 
304 	mutex_unlock(&list_lock);
305 
306 	return 0;
307 }
308 EXPORT_SYMBOL(v4l2_async_register_subdev);
309 
310 void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
311 {
312 	struct v4l2_async_notifier *notifier = sd->notifier;
313 
314 	if (!sd->asd) {
315 		if (!list_empty(&sd->async_list))
316 			v4l2_async_cleanup(sd);
317 		return;
318 	}
319 
320 	mutex_lock(&list_lock);
321 
322 	list_add(&sd->asd->list, &notifier->waiting);
323 
324 	v4l2_async_cleanup(sd);
325 
326 	if (notifier->unbind)
327 		notifier->unbind(notifier, sd, sd->asd);
328 
329 	mutex_unlock(&list_lock);
330 }
331 EXPORT_SYMBOL(v4l2_async_unregister_subdev);
332