1 /*
2  * Memory-to-memory device framework for Video for Linux 2 and videobuf.
3  *
4  * Helper functions for devices that use videobuf buffers for both their
5  * source and destination.
6  *
7  * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
8  * Pawel Osciak, <pawel@osciak.com>
9  * Marek Szyprowski, <m.szyprowski@samsung.com>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by the
13  * Free Software Foundation; either version 2 of the License, or (at your
14  * option) any later version.
15  */
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 
20 #include <media/videobuf2-v4l2.h>
21 #include <media/v4l2-mem2mem.h>
22 #include <media/v4l2-dev.h>
23 #include <media/v4l2-fh.h>
24 #include <media/v4l2-event.h>
25 
26 MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
27 MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
28 MODULE_LICENSE("GPL");
29 
30 static bool debug;
31 module_param(debug, bool, 0644);
32 
33 #define dprintk(fmt, arg...)						\
34 	do {								\
35 		if (debug)						\
36 			printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\
37 	} while (0)
38 
39 
40 /* Instance is already queued on the job_queue */
41 #define TRANS_QUEUED		(1 << 0)
42 /* Instance is currently running in hardware */
43 #define TRANS_RUNNING		(1 << 1)
44 /* Instance is currently aborting */
45 #define TRANS_ABORT		(1 << 2)
46 
47 
48 /* Offset base for buffers on the destination queue - used to distinguish
49  * between source and destination buffers when mmapping - they receive the same
50  * offsets but for different queues */
51 #define DST_QUEUE_OFF_BASE	(1 << 30)
52 
53 
54 /**
55  * struct v4l2_m2m_dev - per-device context
56  * @curr_ctx:		currently running instance
57  * @job_queue:		instances queued to run
58  * @job_spinlock:	protects job_queue
59  * @m2m_ops:		driver callbacks
60  */
61 struct v4l2_m2m_dev {
62 	struct v4l2_m2m_ctx	*curr_ctx;
63 
64 	struct list_head	job_queue;
65 	spinlock_t		job_spinlock;
66 
67 	const struct v4l2_m2m_ops *m2m_ops;
68 };
69 
70 static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
71 						enum v4l2_buf_type type)
72 {
73 	if (V4L2_TYPE_IS_OUTPUT(type))
74 		return &m2m_ctx->out_q_ctx;
75 	else
76 		return &m2m_ctx->cap_q_ctx;
77 }
78 
79 /**
80  * v4l2_m2m_get_vq() - return vb2_queue for the given type
81  */
82 struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
83 				       enum v4l2_buf_type type)
84 {
85 	struct v4l2_m2m_queue_ctx *q_ctx;
86 
87 	q_ctx = get_queue_ctx(m2m_ctx, type);
88 	if (!q_ctx)
89 		return NULL;
90 
91 	return &q_ctx->q;
92 }
93 EXPORT_SYMBOL(v4l2_m2m_get_vq);
94 
95 /**
96  * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers
97  */
98 void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
99 {
100 	struct v4l2_m2m_buffer *b;
101 	unsigned long flags;
102 
103 	spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
104 
105 	if (list_empty(&q_ctx->rdy_queue)) {
106 		spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
107 		return NULL;
108 	}
109 
110 	b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
111 	spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
112 	return &b->vb;
113 }
114 EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
115 
116 /**
117  * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and
118  * return it
119  */
120 void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
121 {
122 	struct v4l2_m2m_buffer *b;
123 	unsigned long flags;
124 
125 	spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
126 	if (list_empty(&q_ctx->rdy_queue)) {
127 		spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
128 		return NULL;
129 	}
130 	b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
131 	list_del(&b->list);
132 	q_ctx->num_rdy--;
133 	spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
134 
135 	return &b->vb;
136 }
137 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove);
138 
139 /*
140  * Scheduling handlers
141  */
142 
143 /**
144  * v4l2_m2m_get_curr_priv() - return driver private data for the currently
145  * running instance or NULL if no instance is running
146  */
147 void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev)
148 {
149 	unsigned long flags;
150 	void *ret = NULL;
151 
152 	spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
153 	if (m2m_dev->curr_ctx)
154 		ret = m2m_dev->curr_ctx->priv;
155 	spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
156 
157 	return ret;
158 }
159 EXPORT_SYMBOL(v4l2_m2m_get_curr_priv);
160 
161 /**
162  * v4l2_m2m_try_run() - select next job to perform and run it if possible
163  *
164  * Get next transaction (if present) from the waiting jobs list and run it.
165  */
166 static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
167 {
168 	unsigned long flags;
169 
170 	spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
171 	if (NULL != m2m_dev->curr_ctx) {
172 		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
173 		dprintk("Another instance is running, won't run now\n");
174 		return;
175 	}
176 
177 	if (list_empty(&m2m_dev->job_queue)) {
178 		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
179 		dprintk("No job pending\n");
180 		return;
181 	}
182 
183 	m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue,
184 				   struct v4l2_m2m_ctx, queue);
185 	m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING;
186 	spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
187 
188 	m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv);
189 }
190 
191 /**
192  * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to
193  * the pending job queue and add it if so.
194  * @m2m_ctx:	m2m context assigned to the instance to be checked
195  *
196  * There are three basic requirements an instance has to meet to be able to run:
197  * 1) at least one source buffer has to be queued,
198  * 2) at least one destination buffer has to be queued,
199  * 3) streaming has to be on.
200  *
201  * If a queue is buffered (for example a decoder hardware ringbuffer that has
202  * to be drained before doing streamoff), allow scheduling without v4l2 buffers
203  * on that queue.
204  *
205  * There may also be additional, custom requirements. In such case the driver
206  * should supply a custom callback (job_ready in v4l2_m2m_ops) that should
207  * return 1 if the instance is ready.
208  * An example of the above could be an instance that requires more than one
209  * src/dst buffer per transaction.
210  */
211 void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
212 {
213 	struct v4l2_m2m_dev *m2m_dev;
214 	unsigned long flags_job, flags_out, flags_cap;
215 
216 	m2m_dev = m2m_ctx->m2m_dev;
217 	dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
218 
219 	if (!m2m_ctx->out_q_ctx.q.streaming
220 	    || !m2m_ctx->cap_q_ctx.q.streaming) {
221 		dprintk("Streaming needs to be on for both queues\n");
222 		return;
223 	}
224 
225 	spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
226 
227 	/* If the context is aborted then don't schedule it */
228 	if (m2m_ctx->job_flags & TRANS_ABORT) {
229 		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
230 		dprintk("Aborted context\n");
231 		return;
232 	}
233 
234 	if (m2m_ctx->job_flags & TRANS_QUEUED) {
235 		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
236 		dprintk("On job queue already\n");
237 		return;
238 	}
239 
240 	spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
241 	if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)
242 	    && !m2m_ctx->out_q_ctx.buffered) {
243 		spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
244 					flags_out);
245 		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
246 		dprintk("No input buffers available\n");
247 		return;
248 	}
249 	spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
250 	if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)
251 	    && !m2m_ctx->cap_q_ctx.buffered) {
252 		spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock,
253 					flags_cap);
254 		spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
255 					flags_out);
256 		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
257 		dprintk("No output buffers available\n");
258 		return;
259 	}
260 	spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
261 	spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
262 
263 	if (m2m_dev->m2m_ops->job_ready
264 		&& (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
265 		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
266 		dprintk("Driver not ready\n");
267 		return;
268 	}
269 
270 	list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
271 	m2m_ctx->job_flags |= TRANS_QUEUED;
272 
273 	spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
274 
275 	v4l2_m2m_try_run(m2m_dev);
276 }
277 EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule);
278 
279 /**
280  * v4l2_m2m_cancel_job() - cancel pending jobs for the context
281  *
282  * In case of streamoff or release called on any context,
283  * 1] If the context is currently running, then abort job will be called
284  * 2] If the context is queued, then the context will be removed from
285  *    the job_queue
286  */
287 static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
288 {
289 	struct v4l2_m2m_dev *m2m_dev;
290 	unsigned long flags;
291 
292 	m2m_dev = m2m_ctx->m2m_dev;
293 	spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
294 
295 	m2m_ctx->job_flags |= TRANS_ABORT;
296 	if (m2m_ctx->job_flags & TRANS_RUNNING) {
297 		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
298 		m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
299 		dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
300 		wait_event(m2m_ctx->finished,
301 				!(m2m_ctx->job_flags & TRANS_RUNNING));
302 	} else if (m2m_ctx->job_flags & TRANS_QUEUED) {
303 		list_del(&m2m_ctx->queue);
304 		m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
305 		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
306 		dprintk("m2m_ctx: %p had been on queue and was removed\n",
307 			m2m_ctx);
308 	} else {
309 		/* Do nothing, was not on queue/running */
310 		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
311 	}
312 }
313 
314 /**
315  * v4l2_m2m_job_finish() - inform the framework that a job has been finished
316  * and have it clean up
317  *
318  * Called by a driver to yield back the device after it has finished with it.
319  * Should be called as soon as possible after reaching a state which allows
320  * other instances to take control of the device.
321  *
322  * This function has to be called only after device_run() callback has been
323  * called on the driver. To prevent recursion, it should not be called directly
324  * from the device_run() callback though.
325  */
326 void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
327 			 struct v4l2_m2m_ctx *m2m_ctx)
328 {
329 	unsigned long flags;
330 
331 	spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
332 	if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
333 		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
334 		dprintk("Called by an instance not currently running\n");
335 		return;
336 	}
337 
338 	list_del(&m2m_dev->curr_ctx->queue);
339 	m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
340 	wake_up(&m2m_dev->curr_ctx->finished);
341 	m2m_dev->curr_ctx = NULL;
342 
343 	spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
344 
345 	/* This instance might have more buffers ready, but since we do not
346 	 * allow more than one job on the job_queue per instance, each has
347 	 * to be scheduled separately after the previous one finishes. */
348 	v4l2_m2m_try_schedule(m2m_ctx);
349 	v4l2_m2m_try_run(m2m_dev);
350 }
351 EXPORT_SYMBOL(v4l2_m2m_job_finish);
352 
353 /**
354  * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer
355  */
356 int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
357 		     struct v4l2_requestbuffers *reqbufs)
358 {
359 	struct vb2_queue *vq;
360 	int ret;
361 
362 	vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type);
363 	ret = vb2_reqbufs(vq, reqbufs);
364 	/* If count == 0, then the owner has released all buffers and he
365 	   is no longer owner of the queue. Otherwise we have an owner. */
366 	if (ret == 0)
367 		vq->owner = reqbufs->count ? file->private_data : NULL;
368 
369 	return ret;
370 }
371 EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
372 
373 /**
374  * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer
375  *
376  * See v4l2_m2m_mmap() documentation for details.
377  */
378 int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
379 		      struct v4l2_buffer *buf)
380 {
381 	struct vb2_queue *vq;
382 	int ret = 0;
383 	unsigned int i;
384 
385 	vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
386 	ret = vb2_querybuf(vq, buf);
387 
388 	/* Adjust MMAP memory offsets for the CAPTURE queue */
389 	if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) {
390 		if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) {
391 			for (i = 0; i < buf->length; ++i)
392 				buf->m.planes[i].m.mem_offset
393 					+= DST_QUEUE_OFF_BASE;
394 		} else {
395 			buf->m.offset += DST_QUEUE_OFF_BASE;
396 		}
397 	}
398 
399 	return ret;
400 }
401 EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
402 
403 /**
404  * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on
405  * the type
406  */
407 int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
408 		  struct v4l2_buffer *buf)
409 {
410 	struct vb2_queue *vq;
411 	int ret;
412 
413 	vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
414 	ret = vb2_qbuf(vq, buf);
415 	if (!ret)
416 		v4l2_m2m_try_schedule(m2m_ctx);
417 
418 	return ret;
419 }
420 EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf);
421 
422 /**
423  * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on
424  * the type
425  */
426 int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
427 		   struct v4l2_buffer *buf)
428 {
429 	struct vb2_queue *vq;
430 
431 	vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
432 	return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
433 }
434 EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
435 
436 /**
437  * v4l2_m2m_prepare_buf() - prepare a source or destination buffer, depending on
438  * the type
439  */
440 int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
441 			 struct v4l2_buffer *buf)
442 {
443 	struct vb2_queue *vq;
444 	int ret;
445 
446 	vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
447 	ret = vb2_prepare_buf(vq, buf);
448 	if (!ret)
449 		v4l2_m2m_try_schedule(m2m_ctx);
450 
451 	return ret;
452 }
453 EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf);
454 
455 /**
456  * v4l2_m2m_create_bufs() - create a source or destination buffer, depending
457  * on the type
458  */
459 int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
460 			 struct v4l2_create_buffers *create)
461 {
462 	struct vb2_queue *vq;
463 
464 	vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type);
465 	return vb2_create_bufs(vq, create);
466 }
467 EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs);
468 
469 /**
470  * v4l2_m2m_expbuf() - export a source or destination buffer, depending on
471  * the type
472  */
473 int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
474 		  struct v4l2_exportbuffer *eb)
475 {
476 	struct vb2_queue *vq;
477 
478 	vq = v4l2_m2m_get_vq(m2m_ctx, eb->type);
479 	return vb2_expbuf(vq, eb);
480 }
481 EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf);
482 /**
483  * v4l2_m2m_streamon() - turn on streaming for a video queue
484  */
485 int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
486 		      enum v4l2_buf_type type)
487 {
488 	struct vb2_queue *vq;
489 	int ret;
490 
491 	vq = v4l2_m2m_get_vq(m2m_ctx, type);
492 	ret = vb2_streamon(vq, type);
493 	if (!ret)
494 		v4l2_m2m_try_schedule(m2m_ctx);
495 
496 	return ret;
497 }
498 EXPORT_SYMBOL_GPL(v4l2_m2m_streamon);
499 
500 /**
501  * v4l2_m2m_streamoff() - turn off streaming for a video queue
502  */
503 int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
504 		       enum v4l2_buf_type type)
505 {
506 	struct v4l2_m2m_dev *m2m_dev;
507 	struct v4l2_m2m_queue_ctx *q_ctx;
508 	unsigned long flags_job, flags;
509 	int ret;
510 
511 	/* wait until the current context is dequeued from job_queue */
512 	v4l2_m2m_cancel_job(m2m_ctx);
513 
514 	q_ctx = get_queue_ctx(m2m_ctx, type);
515 	ret = vb2_streamoff(&q_ctx->q, type);
516 	if (ret)
517 		return ret;
518 
519 	m2m_dev = m2m_ctx->m2m_dev;
520 	spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
521 	/* We should not be scheduled anymore, since we're dropping a queue. */
522 	if (m2m_ctx->job_flags & TRANS_QUEUED)
523 		list_del(&m2m_ctx->queue);
524 	m2m_ctx->job_flags = 0;
525 
526 	spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
527 	/* Drop queue, since streamoff returns device to the same state as after
528 	 * calling reqbufs. */
529 	INIT_LIST_HEAD(&q_ctx->rdy_queue);
530 	q_ctx->num_rdy = 0;
531 	spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
532 
533 	if (m2m_dev->curr_ctx == m2m_ctx) {
534 		m2m_dev->curr_ctx = NULL;
535 		wake_up(&m2m_ctx->finished);
536 	}
537 	spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
538 
539 	return 0;
540 }
541 EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
542 
543 /**
544  * v4l2_m2m_poll() - poll replacement, for destination buffers only
545  *
546  * Call from the driver's poll() function. Will poll both queues. If a buffer
547  * is available to dequeue (with dqbuf) from the source queue, this will
548  * indicate that a non-blocking write can be performed, while read will be
549  * returned in case of the destination queue.
550  */
551 unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
552 			   struct poll_table_struct *wait)
553 {
554 	struct video_device *vfd = video_devdata(file);
555 	unsigned long req_events = poll_requested_events(wait);
556 	struct vb2_queue *src_q, *dst_q;
557 	struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
558 	unsigned int rc = 0;
559 	unsigned long flags;
560 
561 	if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
562 		struct v4l2_fh *fh = file->private_data;
563 
564 		if (v4l2_event_pending(fh))
565 			rc = POLLPRI;
566 		else if (req_events & POLLPRI)
567 			poll_wait(file, &fh->wait, wait);
568 		if (!(req_events & (POLLOUT | POLLWRNORM | POLLIN | POLLRDNORM)))
569 			return rc;
570 	}
571 
572 	src_q = v4l2_m2m_get_src_vq(m2m_ctx);
573 	dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
574 
575 	/*
576 	 * There has to be at least one buffer queued on each queued_list, which
577 	 * means either in driver already or waiting for driver to claim it
578 	 * and start processing.
579 	 */
580 	if ((!src_q->streaming || list_empty(&src_q->queued_list))
581 		&& (!dst_q->streaming || list_empty(&dst_q->queued_list))) {
582 		rc |= POLLERR;
583 		goto end;
584 	}
585 
586 	spin_lock_irqsave(&src_q->done_lock, flags);
587 	if (list_empty(&src_q->done_list))
588 		poll_wait(file, &src_q->done_wq, wait);
589 	spin_unlock_irqrestore(&src_q->done_lock, flags);
590 
591 	spin_lock_irqsave(&dst_q->done_lock, flags);
592 	if (list_empty(&dst_q->done_list)) {
593 		/*
594 		 * If the last buffer was dequeued from the capture queue,
595 		 * return immediately. DQBUF will return -EPIPE.
596 		 */
597 		if (dst_q->last_buffer_dequeued) {
598 			spin_unlock_irqrestore(&dst_q->done_lock, flags);
599 			return rc | POLLIN | POLLRDNORM;
600 		}
601 
602 		poll_wait(file, &dst_q->done_wq, wait);
603 	}
604 	spin_unlock_irqrestore(&dst_q->done_lock, flags);
605 
606 	spin_lock_irqsave(&src_q->done_lock, flags);
607 	if (!list_empty(&src_q->done_list))
608 		src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer,
609 						done_entry);
610 	if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE
611 			|| src_vb->state == VB2_BUF_STATE_ERROR))
612 		rc |= POLLOUT | POLLWRNORM;
613 	spin_unlock_irqrestore(&src_q->done_lock, flags);
614 
615 	spin_lock_irqsave(&dst_q->done_lock, flags);
616 	if (!list_empty(&dst_q->done_list))
617 		dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer,
618 						done_entry);
619 	if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE
620 			|| dst_vb->state == VB2_BUF_STATE_ERROR))
621 		rc |= POLLIN | POLLRDNORM;
622 	spin_unlock_irqrestore(&dst_q->done_lock, flags);
623 
624 end:
625 	return rc;
626 }
627 EXPORT_SYMBOL_GPL(v4l2_m2m_poll);
628 
629 /**
630  * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer
631  *
632  * Call from driver's mmap() function. Will handle mmap() for both queues
633  * seamlessly for videobuffer, which will receive normal per-queue offsets and
634  * proper videobuf queue pointers. The differentiation is made outside videobuf
635  * by adding a predefined offset to buffers from one of the queues and
636  * subtracting it before passing it back to videobuf. Only drivers (and
637  * thus applications) receive modified offsets.
638  */
639 int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
640 			 struct vm_area_struct *vma)
641 {
642 	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
643 	struct vb2_queue *vq;
644 
645 	if (offset < DST_QUEUE_OFF_BASE) {
646 		vq = v4l2_m2m_get_src_vq(m2m_ctx);
647 	} else {
648 		vq = v4l2_m2m_get_dst_vq(m2m_ctx);
649 		vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
650 	}
651 
652 	return vb2_mmap(vq, vma);
653 }
654 EXPORT_SYMBOL(v4l2_m2m_mmap);
655 
656 /**
657  * v4l2_m2m_init() - initialize per-driver m2m data
658  *
659  * Usually called from driver's probe() function.
660  */
661 struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops)
662 {
663 	struct v4l2_m2m_dev *m2m_dev;
664 
665 	if (!m2m_ops || WARN_ON(!m2m_ops->device_run) ||
666 			WARN_ON(!m2m_ops->job_abort))
667 		return ERR_PTR(-EINVAL);
668 
669 	m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL);
670 	if (!m2m_dev)
671 		return ERR_PTR(-ENOMEM);
672 
673 	m2m_dev->curr_ctx = NULL;
674 	m2m_dev->m2m_ops = m2m_ops;
675 	INIT_LIST_HEAD(&m2m_dev->job_queue);
676 	spin_lock_init(&m2m_dev->job_spinlock);
677 
678 	return m2m_dev;
679 }
680 EXPORT_SYMBOL_GPL(v4l2_m2m_init);
681 
682 /**
683  * v4l2_m2m_release() - cleans up and frees a m2m_dev structure
684  *
685  * Usually called from driver's remove() function.
686  */
687 void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev)
688 {
689 	kfree(m2m_dev);
690 }
691 EXPORT_SYMBOL_GPL(v4l2_m2m_release);
692 
693 /**
694  * v4l2_m2m_ctx_init() - allocate and initialize a m2m context
695  * @priv - driver's instance private data
696  * @m2m_dev - a previously initialized m2m_dev struct
697  * @vq_init - a callback for queue type-specific initialization function to be
698  * used for initializing videobuf_queues
699  *
700  * Usually called from driver's open() function.
701  */
702 struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
703 		void *drv_priv,
704 		int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq))
705 {
706 	struct v4l2_m2m_ctx *m2m_ctx;
707 	struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx;
708 	int ret;
709 
710 	m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL);
711 	if (!m2m_ctx)
712 		return ERR_PTR(-ENOMEM);
713 
714 	m2m_ctx->priv = drv_priv;
715 	m2m_ctx->m2m_dev = m2m_dev;
716 	init_waitqueue_head(&m2m_ctx->finished);
717 
718 	out_q_ctx = &m2m_ctx->out_q_ctx;
719 	cap_q_ctx = &m2m_ctx->cap_q_ctx;
720 
721 	INIT_LIST_HEAD(&out_q_ctx->rdy_queue);
722 	INIT_LIST_HEAD(&cap_q_ctx->rdy_queue);
723 	spin_lock_init(&out_q_ctx->rdy_spinlock);
724 	spin_lock_init(&cap_q_ctx->rdy_spinlock);
725 
726 	INIT_LIST_HEAD(&m2m_ctx->queue);
727 
728 	ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q);
729 
730 	if (ret)
731 		goto err;
732 	/*
733 	 * If both queues use same mutex assign it as the common buffer
734 	 * queues lock to the m2m context. This lock is used in the
735 	 * v4l2_m2m_ioctl_* helpers.
736 	 */
737 	if (out_q_ctx->q.lock == cap_q_ctx->q.lock)
738 		m2m_ctx->q_lock = out_q_ctx->q.lock;
739 
740 	return m2m_ctx;
741 err:
742 	kfree(m2m_ctx);
743 	return ERR_PTR(ret);
744 }
745 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init);
746 
747 /**
748  * v4l2_m2m_ctx_release() - release m2m context
749  *
750  * Usually called from driver's release() function.
751  */
752 void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
753 {
754 	/* wait until the current context is dequeued from job_queue */
755 	v4l2_m2m_cancel_job(m2m_ctx);
756 
757 	vb2_queue_release(&m2m_ctx->cap_q_ctx.q);
758 	vb2_queue_release(&m2m_ctx->out_q_ctx.q);
759 
760 	kfree(m2m_ctx);
761 }
762 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release);
763 
764 /**
765  * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list.
766  *
767  * Call from buf_queue(), videobuf_queue_ops callback.
768  */
769 void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
770 		struct vb2_v4l2_buffer *vbuf)
771 {
772 	struct v4l2_m2m_buffer *b = container_of(vbuf,
773 				struct v4l2_m2m_buffer, vb);
774 	struct v4l2_m2m_queue_ctx *q_ctx;
775 	unsigned long flags;
776 
777 	q_ctx = get_queue_ctx(m2m_ctx, vbuf->vb2_buf.vb2_queue->type);
778 	if (!q_ctx)
779 		return;
780 
781 	spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
782 	list_add_tail(&b->list, &q_ctx->rdy_queue);
783 	q_ctx->num_rdy++;
784 	spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
785 }
786 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
787 
788 /* Videobuf2 ioctl helpers */
789 
790 int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
791 				struct v4l2_requestbuffers *rb)
792 {
793 	struct v4l2_fh *fh = file->private_data;
794 
795 	return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb);
796 }
797 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs);
798 
799 int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv,
800 				struct v4l2_create_buffers *create)
801 {
802 	struct v4l2_fh *fh = file->private_data;
803 
804 	return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create);
805 }
806 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs);
807 
808 int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv,
809 				struct v4l2_buffer *buf)
810 {
811 	struct v4l2_fh *fh = file->private_data;
812 
813 	return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf);
814 }
815 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf);
816 
817 int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv,
818 				struct v4l2_buffer *buf)
819 {
820 	struct v4l2_fh *fh = file->private_data;
821 
822 	return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf);
823 }
824 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf);
825 
826 int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv,
827 				struct v4l2_buffer *buf)
828 {
829 	struct v4l2_fh *fh = file->private_data;
830 
831 	return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf);
832 }
833 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf);
834 
835 int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *priv,
836 			       struct v4l2_buffer *buf)
837 {
838 	struct v4l2_fh *fh = file->private_data;
839 
840 	return v4l2_m2m_prepare_buf(file, fh->m2m_ctx, buf);
841 }
842 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_prepare_buf);
843 
844 int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv,
845 				struct v4l2_exportbuffer *eb)
846 {
847 	struct v4l2_fh *fh = file->private_data;
848 
849 	return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb);
850 }
851 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf);
852 
853 int v4l2_m2m_ioctl_streamon(struct file *file, void *priv,
854 				enum v4l2_buf_type type)
855 {
856 	struct v4l2_fh *fh = file->private_data;
857 
858 	return v4l2_m2m_streamon(file, fh->m2m_ctx, type);
859 }
860 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon);
861 
862 int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv,
863 				enum v4l2_buf_type type)
864 {
865 	struct v4l2_fh *fh = file->private_data;
866 
867 	return v4l2_m2m_streamoff(file, fh->m2m_ctx, type);
868 }
869 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff);
870 
871 /*
872  * v4l2_file_operations helpers. It is assumed here same lock is used
873  * for the output and the capture buffer queue.
874  */
875 
876 int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma)
877 {
878 	struct v4l2_fh *fh = file->private_data;
879 
880 	return v4l2_m2m_mmap(file, fh->m2m_ctx, vma);
881 }
882 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap);
883 
884 unsigned int v4l2_m2m_fop_poll(struct file *file, poll_table *wait)
885 {
886 	struct v4l2_fh *fh = file->private_data;
887 	struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx;
888 	unsigned int ret;
889 
890 	if (m2m_ctx->q_lock)
891 		mutex_lock(m2m_ctx->q_lock);
892 
893 	ret = v4l2_m2m_poll(file, m2m_ctx, wait);
894 
895 	if (m2m_ctx->q_lock)
896 		mutex_unlock(m2m_ctx->q_lock);
897 
898 	return ret;
899 }
900 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll);
901 
902