1 /*
2  * V4L2 asynchronous subdevice registration API
3  *
4  * Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/device.h>
12 #include <linux/err.h>
13 #include <linux/i2c.h>
14 #include <linux/list.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/platform_device.h>
18 #include <linux/slab.h>
19 #include <linux/types.h>
20 
21 #include <media/v4l2-async.h>
22 #include <media/v4l2-device.h>
23 #include <media/v4l2-subdev.h>
24 
25 static bool match_i2c(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
26 {
27 #if IS_ENABLED(CONFIG_I2C)
28 	struct i2c_client *client = i2c_verify_client(sd->dev);
29 	return client &&
30 		asd->match.i2c.adapter_id == client->adapter->nr &&
31 		asd->match.i2c.address == client->addr;
32 #else
33 	return false;
34 #endif
35 }
36 
37 static bool match_devname(struct v4l2_subdev *sd,
38 			  struct v4l2_async_subdev *asd)
39 {
40 	return !strcmp(asd->match.device_name.name, dev_name(sd->dev));
41 }
42 
43 static bool match_of(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
44 {
45 	return sd->of_node == asd->match.of.node;
46 }
47 
48 static bool match_custom(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
49 {
50 	if (!asd->match.custom.match)
51 		/* Match always */
52 		return true;
53 
54 	return asd->match.custom.match(sd->dev, asd);
55 }
56 
57 static LIST_HEAD(subdev_list);
58 static LIST_HEAD(notifier_list);
59 static DEFINE_MUTEX(list_lock);
60 
61 static struct v4l2_async_subdev *v4l2_async_belongs(struct v4l2_async_notifier *notifier,
62 						    struct v4l2_subdev *sd)
63 {
64 	bool (*match)(struct v4l2_subdev *, struct v4l2_async_subdev *);
65 	struct v4l2_async_subdev *asd;
66 
67 	list_for_each_entry(asd, &notifier->waiting, list) {
68 		/* bus_type has been verified valid before */
69 		switch (asd->match_type) {
70 		case V4L2_ASYNC_MATCH_CUSTOM:
71 			match = match_custom;
72 			break;
73 		case V4L2_ASYNC_MATCH_DEVNAME:
74 			match = match_devname;
75 			break;
76 		case V4L2_ASYNC_MATCH_I2C:
77 			match = match_i2c;
78 			break;
79 		case V4L2_ASYNC_MATCH_OF:
80 			match = match_of;
81 			break;
82 		default:
83 			/* Cannot happen, unless someone breaks us */
84 			WARN_ON(true);
85 			return NULL;
86 		}
87 
88 		/* match cannot be NULL here */
89 		if (match(sd, asd))
90 			return asd;
91 	}
92 
93 	return NULL;
94 }
95 
96 static int v4l2_async_test_notify(struct v4l2_async_notifier *notifier,
97 				  struct v4l2_subdev *sd,
98 				  struct v4l2_async_subdev *asd)
99 {
100 	int ret;
101 
102 	/* Remove from the waiting list */
103 	list_del(&asd->list);
104 	sd->asd = asd;
105 	sd->notifier = notifier;
106 
107 	if (notifier->bound) {
108 		ret = notifier->bound(notifier, sd, asd);
109 		if (ret < 0)
110 			return ret;
111 	}
112 	/* Move from the global subdevice list to notifier's done */
113 	list_move(&sd->async_list, &notifier->done);
114 
115 	ret = v4l2_device_register_subdev(notifier->v4l2_dev, sd);
116 	if (ret < 0) {
117 		if (notifier->unbind)
118 			notifier->unbind(notifier, sd, asd);
119 		return ret;
120 	}
121 
122 	ret = v4l2_subdev_call(sd, core, registered_async);
123 	if (ret < 0 && ret != -ENOIOCTLCMD) {
124 		if (notifier->unbind)
125 			notifier->unbind(notifier, sd, asd);
126 		return ret;
127 	}
128 
129 	if (list_empty(&notifier->waiting) && notifier->complete)
130 		return notifier->complete(notifier);
131 
132 	return 0;
133 }
134 
135 static void v4l2_async_cleanup(struct v4l2_subdev *sd)
136 {
137 	v4l2_device_unregister_subdev(sd);
138 	/* Subdevice driver will reprobe and put the subdev back onto the list */
139 	list_del_init(&sd->async_list);
140 	sd->asd = NULL;
141 	sd->dev = NULL;
142 }
143 
144 int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
145 				 struct v4l2_async_notifier *notifier)
146 {
147 	struct v4l2_subdev *sd, *tmp;
148 	struct v4l2_async_subdev *asd;
149 	int i;
150 
151 	if (!notifier->num_subdevs || notifier->num_subdevs > V4L2_MAX_SUBDEVS)
152 		return -EINVAL;
153 
154 	notifier->v4l2_dev = v4l2_dev;
155 	INIT_LIST_HEAD(&notifier->waiting);
156 	INIT_LIST_HEAD(&notifier->done);
157 
158 	for (i = 0; i < notifier->num_subdevs; i++) {
159 		asd = notifier->subdevs[i];
160 
161 		switch (asd->match_type) {
162 		case V4L2_ASYNC_MATCH_CUSTOM:
163 		case V4L2_ASYNC_MATCH_DEVNAME:
164 		case V4L2_ASYNC_MATCH_I2C:
165 		case V4L2_ASYNC_MATCH_OF:
166 			break;
167 		default:
168 			dev_err(notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL,
169 				"Invalid match type %u on %p\n",
170 				asd->match_type, asd);
171 			return -EINVAL;
172 		}
173 		list_add_tail(&asd->list, &notifier->waiting);
174 	}
175 
176 	mutex_lock(&list_lock);
177 
178 	/* Keep also completed notifiers on the list */
179 	list_add(&notifier->list, &notifier_list);
180 
181 	list_for_each_entry_safe(sd, tmp, &subdev_list, async_list) {
182 		int ret;
183 
184 		asd = v4l2_async_belongs(notifier, sd);
185 		if (!asd)
186 			continue;
187 
188 		ret = v4l2_async_test_notify(notifier, sd, asd);
189 		if (ret < 0) {
190 			mutex_unlock(&list_lock);
191 			return ret;
192 		}
193 	}
194 
195 	mutex_unlock(&list_lock);
196 
197 	return 0;
198 }
199 EXPORT_SYMBOL(v4l2_async_notifier_register);
200 
201 void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
202 {
203 	struct v4l2_subdev *sd, *tmp;
204 	unsigned int notif_n_subdev = notifier->num_subdevs;
205 	unsigned int n_subdev = min(notif_n_subdev, V4L2_MAX_SUBDEVS);
206 	struct device **dev;
207 	int i = 0;
208 
209 	if (!notifier->v4l2_dev)
210 		return;
211 
212 	dev = kmalloc(n_subdev * sizeof(*dev), GFP_KERNEL);
213 	if (!dev) {
214 		dev_err(notifier->v4l2_dev->dev,
215 			"Failed to allocate device cache!\n");
216 	}
217 
218 	mutex_lock(&list_lock);
219 
220 	list_del(&notifier->list);
221 
222 	list_for_each_entry_safe(sd, tmp, &notifier->done, async_list) {
223 		struct device *d;
224 
225 		d = get_device(sd->dev);
226 
227 		v4l2_async_cleanup(sd);
228 
229 		/* If we handled USB devices, we'd have to lock the parent too */
230 		device_release_driver(d);
231 
232 		if (notifier->unbind)
233 			notifier->unbind(notifier, sd, sd->asd);
234 
235 		/*
236 		 * Store device at the device cache, in order to call
237 		 * put_device() on the final step
238 		 */
239 		if (dev)
240 			dev[i++] = d;
241 		else
242 			put_device(d);
243 	}
244 
245 	mutex_unlock(&list_lock);
246 
247 	/*
248 	 * Call device_attach() to reprobe devices
249 	 *
250 	 * NOTE: If dev allocation fails, i is 0, and the whole loop won't be
251 	 * executed.
252 	 */
253 	while (i--) {
254 		struct device *d = dev[i];
255 
256 		if (d && device_attach(d) < 0) {
257 			const char *name = "(none)";
258 			int lock = device_trylock(d);
259 
260 			if (lock && d->driver)
261 				name = d->driver->name;
262 			dev_err(d, "Failed to re-probe to %s\n", name);
263 			if (lock)
264 				device_unlock(d);
265 		}
266 		put_device(d);
267 	}
268 	kfree(dev);
269 
270 	notifier->v4l2_dev = NULL;
271 
272 	/*
273 	 * Don't care about the waiting list, it is initialised and populated
274 	 * upon notifier registration.
275 	 */
276 }
277 EXPORT_SYMBOL(v4l2_async_notifier_unregister);
278 
279 int v4l2_async_register_subdev(struct v4l2_subdev *sd)
280 {
281 	struct v4l2_async_notifier *notifier;
282 
283 	/*
284 	 * No reference taken. The reference is held by the device
285 	 * (struct v4l2_subdev.dev), and async sub-device does not
286 	 * exist independently of the device at any point of time.
287 	 */
288 	if (!sd->of_node && sd->dev)
289 		sd->of_node = sd->dev->of_node;
290 
291 	mutex_lock(&list_lock);
292 
293 	INIT_LIST_HEAD(&sd->async_list);
294 
295 	list_for_each_entry(notifier, &notifier_list, list) {
296 		struct v4l2_async_subdev *asd = v4l2_async_belongs(notifier, sd);
297 		if (asd) {
298 			int ret = v4l2_async_test_notify(notifier, sd, asd);
299 			mutex_unlock(&list_lock);
300 			return ret;
301 		}
302 	}
303 
304 	/* None matched, wait for hot-plugging */
305 	list_add(&sd->async_list, &subdev_list);
306 
307 	mutex_unlock(&list_lock);
308 
309 	return 0;
310 }
311 EXPORT_SYMBOL(v4l2_async_register_subdev);
312 
313 void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
314 {
315 	struct v4l2_async_notifier *notifier = sd->notifier;
316 
317 	if (!sd->asd) {
318 		if (!list_empty(&sd->async_list))
319 			v4l2_async_cleanup(sd);
320 		return;
321 	}
322 
323 	mutex_lock(&list_lock);
324 
325 	list_add(&sd->asd->list, &notifier->waiting);
326 
327 	v4l2_async_cleanup(sd);
328 
329 	if (notifier->unbind)
330 		notifier->unbind(notifier, sd, sd->asd);
331 
332 	mutex_unlock(&list_lock);
333 }
334 EXPORT_SYMBOL(v4l2_async_unregister_subdev);
335