1 /*
2  * v4l2-event.c
3  *
4  * V4L2 events.
5  *
6  * Copyright (C) 2009--2010 Nokia Corporation.
7  *
8  * Contact: Sakari Ailus <sakari.ailus@iki.fi>
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License
12  * version 2 as published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful, but
15  * WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * General Public License for more details.
18  */
19 
20 #include <media/v4l2-dev.h>
21 #include <media/v4l2-fh.h>
22 #include <media/v4l2-event.h>
23 
24 #include <linux/mm.h>
25 #include <linux/sched.h>
26 #include <linux/slab.h>
27 #include <linux/export.h>
28 
29 static unsigned sev_pos(const struct v4l2_subscribed_event *sev, unsigned idx)
30 {
31 	idx += sev->first;
32 	return idx >= sev->elems ? idx - sev->elems : idx;
33 }
34 
35 static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
36 {
37 	struct v4l2_kevent *kev;
38 	unsigned long flags;
39 
40 	spin_lock_irqsave(&fh->vdev->fh_lock, flags);
41 
42 	if (list_empty(&fh->available)) {
43 		spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
44 		return -ENOENT;
45 	}
46 
47 	WARN_ON(fh->navailable == 0);
48 
49 	kev = list_first_entry(&fh->available, struct v4l2_kevent, list);
50 	list_del(&kev->list);
51 	fh->navailable--;
52 
53 	kev->event.pending = fh->navailable;
54 	*event = kev->event;
55 	event->timestamp = ns_to_timespec(kev->ts);
56 	kev->sev->first = sev_pos(kev->sev, 1);
57 	kev->sev->in_use--;
58 
59 	spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
60 
61 	return 0;
62 }
63 
64 int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
65 		       int nonblocking)
66 {
67 	int ret;
68 
69 	if (nonblocking)
70 		return __v4l2_event_dequeue(fh, event);
71 
72 	/* Release the vdev lock while waiting */
73 	if (fh->vdev->lock)
74 		mutex_unlock(fh->vdev->lock);
75 
76 	do {
77 		ret = wait_event_interruptible(fh->wait,
78 					       fh->navailable != 0);
79 		if (ret < 0)
80 			break;
81 
82 		ret = __v4l2_event_dequeue(fh, event);
83 	} while (ret == -ENOENT);
84 
85 	if (fh->vdev->lock)
86 		mutex_lock(fh->vdev->lock);
87 
88 	return ret;
89 }
90 EXPORT_SYMBOL_GPL(v4l2_event_dequeue);
91 
92 /* Caller must hold fh->vdev->fh_lock! */
93 static struct v4l2_subscribed_event *v4l2_event_subscribed(
94 		struct v4l2_fh *fh, u32 type, u32 id)
95 {
96 	struct v4l2_subscribed_event *sev;
97 
98 	assert_spin_locked(&fh->vdev->fh_lock);
99 
100 	list_for_each_entry(sev, &fh->subscribed, list)
101 		if (sev->type == type && sev->id == id)
102 			return sev;
103 
104 	return NULL;
105 }
106 
107 static void __v4l2_event_queue_fh(struct v4l2_fh *fh,
108 				  const struct v4l2_event *ev, u64 ts)
109 {
110 	struct v4l2_subscribed_event *sev;
111 	struct v4l2_kevent *kev;
112 	bool copy_payload = true;
113 
114 	/* Are we subscribed? */
115 	sev = v4l2_event_subscribed(fh, ev->type, ev->id);
116 	if (sev == NULL)
117 		return;
118 
119 	/* Increase event sequence number on fh. */
120 	fh->sequence++;
121 
122 	/* Do we have any free events? */
123 	if (sev->in_use == sev->elems) {
124 		/* no, remove the oldest one */
125 		kev = sev->events + sev_pos(sev, 0);
126 		list_del(&kev->list);
127 		sev->in_use--;
128 		sev->first = sev_pos(sev, 1);
129 		fh->navailable--;
130 		if (sev->elems == 1) {
131 			if (sev->ops && sev->ops->replace) {
132 				sev->ops->replace(&kev->event, ev);
133 				copy_payload = false;
134 			}
135 		} else if (sev->ops && sev->ops->merge) {
136 			struct v4l2_kevent *second_oldest =
137 				sev->events + sev_pos(sev, 0);
138 			sev->ops->merge(&kev->event, &second_oldest->event);
139 		}
140 	}
141 
142 	/* Take one and fill it. */
143 	kev = sev->events + sev_pos(sev, sev->in_use);
144 	kev->event.type = ev->type;
145 	if (copy_payload)
146 		kev->event.u = ev->u;
147 	kev->event.id = ev->id;
148 	kev->ts = ts;
149 	kev->event.sequence = fh->sequence;
150 	sev->in_use++;
151 	list_add_tail(&kev->list, &fh->available);
152 
153 	fh->navailable++;
154 
155 	wake_up_all(&fh->wait);
156 }
157 
158 void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
159 {
160 	struct v4l2_fh *fh;
161 	unsigned long flags;
162 	u64 ts;
163 
164 	if (vdev == NULL)
165 		return;
166 
167 	ts = ktime_get_ns();
168 
169 	spin_lock_irqsave(&vdev->fh_lock, flags);
170 
171 	list_for_each_entry(fh, &vdev->fh_list, list)
172 		__v4l2_event_queue_fh(fh, ev, ts);
173 
174 	spin_unlock_irqrestore(&vdev->fh_lock, flags);
175 }
176 EXPORT_SYMBOL_GPL(v4l2_event_queue);
177 
178 void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev)
179 {
180 	unsigned long flags;
181 	u64 ts = ktime_get_ns();
182 
183 	spin_lock_irqsave(&fh->vdev->fh_lock, flags);
184 	__v4l2_event_queue_fh(fh, ev, ts);
185 	spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
186 }
187 EXPORT_SYMBOL_GPL(v4l2_event_queue_fh);
188 
189 int v4l2_event_pending(struct v4l2_fh *fh)
190 {
191 	return fh->navailable;
192 }
193 EXPORT_SYMBOL_GPL(v4l2_event_pending);
194 
195 static void __v4l2_event_unsubscribe(struct v4l2_subscribed_event *sev)
196 {
197 	struct v4l2_fh *fh = sev->fh;
198 	unsigned int i;
199 
200 	lockdep_assert_held(&fh->subscribe_lock);
201 	assert_spin_locked(&fh->vdev->fh_lock);
202 
203 	/* Remove any pending events for this subscription */
204 	for (i = 0; i < sev->in_use; i++) {
205 		list_del(&sev->events[sev_pos(sev, i)].list);
206 		fh->navailable--;
207 	}
208 	list_del(&sev->list);
209 }
210 
211 int v4l2_event_subscribe(struct v4l2_fh *fh,
212 			 const struct v4l2_event_subscription *sub, unsigned elems,
213 			 const struct v4l2_subscribed_event_ops *ops)
214 {
215 	struct v4l2_subscribed_event *sev, *found_ev;
216 	unsigned long flags;
217 	unsigned i;
218 	int ret = 0;
219 
220 	if (sub->type == V4L2_EVENT_ALL)
221 		return -EINVAL;
222 
223 	if (elems < 1)
224 		elems = 1;
225 
226 	sev = kvzalloc(struct_size(sev, events, elems), GFP_KERNEL);
227 	if (!sev)
228 		return -ENOMEM;
229 	for (i = 0; i < elems; i++)
230 		sev->events[i].sev = sev;
231 	sev->type = sub->type;
232 	sev->id = sub->id;
233 	sev->flags = sub->flags;
234 	sev->fh = fh;
235 	sev->ops = ops;
236 	sev->elems = elems;
237 
238 	mutex_lock(&fh->subscribe_lock);
239 
240 	spin_lock_irqsave(&fh->vdev->fh_lock, flags);
241 	found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
242 	if (!found_ev)
243 		list_add(&sev->list, &fh->subscribed);
244 	spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
245 
246 	if (found_ev) {
247 		/* Already listening */
248 		kvfree(sev);
249 	} else if (sev->ops && sev->ops->add) {
250 		ret = sev->ops->add(sev, elems);
251 		if (ret) {
252 			spin_lock_irqsave(&fh->vdev->fh_lock, flags);
253 			__v4l2_event_unsubscribe(sev);
254 			spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
255 			kvfree(sev);
256 		}
257 	}
258 
259 	mutex_unlock(&fh->subscribe_lock);
260 
261 	return ret;
262 }
263 EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
264 
265 void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
266 {
267 	struct v4l2_event_subscription sub;
268 	struct v4l2_subscribed_event *sev;
269 	unsigned long flags;
270 
271 	do {
272 		sev = NULL;
273 
274 		spin_lock_irqsave(&fh->vdev->fh_lock, flags);
275 		if (!list_empty(&fh->subscribed)) {
276 			sev = list_first_entry(&fh->subscribed,
277 					struct v4l2_subscribed_event, list);
278 			sub.type = sev->type;
279 			sub.id = sev->id;
280 		}
281 		spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
282 		if (sev)
283 			v4l2_event_unsubscribe(fh, &sub);
284 	} while (sev);
285 }
286 EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all);
287 
288 int v4l2_event_unsubscribe(struct v4l2_fh *fh,
289 			   const struct v4l2_event_subscription *sub)
290 {
291 	struct v4l2_subscribed_event *sev;
292 	unsigned long flags;
293 
294 	if (sub->type == V4L2_EVENT_ALL) {
295 		v4l2_event_unsubscribe_all(fh);
296 		return 0;
297 	}
298 
299 	mutex_lock(&fh->subscribe_lock);
300 
301 	spin_lock_irqsave(&fh->vdev->fh_lock, flags);
302 
303 	sev = v4l2_event_subscribed(fh, sub->type, sub->id);
304 	if (sev != NULL)
305 		__v4l2_event_unsubscribe(sev);
306 
307 	spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
308 
309 	if (sev && sev->ops && sev->ops->del)
310 		sev->ops->del(sev);
311 
312 	mutex_unlock(&fh->subscribe_lock);
313 
314 	kvfree(sev);
315 
316 	return 0;
317 }
318 EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe);
319 
320 int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd, struct v4l2_fh *fh,
321 				  struct v4l2_event_subscription *sub)
322 {
323 	return v4l2_event_unsubscribe(fh, sub);
324 }
325 EXPORT_SYMBOL_GPL(v4l2_event_subdev_unsubscribe);
326 
327 static void v4l2_event_src_replace(struct v4l2_event *old,
328 				const struct v4l2_event *new)
329 {
330 	u32 old_changes = old->u.src_change.changes;
331 
332 	old->u.src_change = new->u.src_change;
333 	old->u.src_change.changes |= old_changes;
334 }
335 
336 static void v4l2_event_src_merge(const struct v4l2_event *old,
337 				struct v4l2_event *new)
338 {
339 	new->u.src_change.changes |= old->u.src_change.changes;
340 }
341 
342 static const struct v4l2_subscribed_event_ops v4l2_event_src_ch_ops = {
343 	.replace = v4l2_event_src_replace,
344 	.merge = v4l2_event_src_merge,
345 };
346 
347 int v4l2_src_change_event_subscribe(struct v4l2_fh *fh,
348 				const struct v4l2_event_subscription *sub)
349 {
350 	if (sub->type == V4L2_EVENT_SOURCE_CHANGE)
351 		return v4l2_event_subscribe(fh, sub, 0, &v4l2_event_src_ch_ops);
352 	return -EINVAL;
353 }
354 EXPORT_SYMBOL_GPL(v4l2_src_change_event_subscribe);
355 
356 int v4l2_src_change_event_subdev_subscribe(struct v4l2_subdev *sd,
357 		struct v4l2_fh *fh, struct v4l2_event_subscription *sub)
358 {
359 	return v4l2_src_change_event_subscribe(fh, sub);
360 }
361 EXPORT_SYMBOL_GPL(v4l2_src_change_event_subdev_subscribe);
362