1 /*
2  * videobuf2-v4l2.c - V4L2 driver helper framework
3  *
4  * Copyright (C) 2010 Samsung Electronics
5  *
6  * Author: Pawel Osciak <pawel@osciak.com>
7  *	   Marek Szyprowski <m.szyprowski@samsung.com>
8  *
9  * The vb2_thread implementation was based on code from videobuf-dvb.c:
10  *	(c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs]
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation.
15  */
16 
17 #include <linux/device.h>
18 #include <linux/err.h>
19 #include <linux/freezer.h>
20 #include <linux/kernel.h>
21 #include <linux/kthread.h>
22 #include <linux/mm.h>
23 #include <linux/module.h>
24 #include <linux/poll.h>
25 #include <linux/sched.h>
26 #include <linux/slab.h>
27 
28 #include <media/v4l2-common.h>
29 #include <media/v4l2-dev.h>
30 #include <media/v4l2-device.h>
31 #include <media/v4l2-event.h>
32 #include <media/v4l2-fh.h>
33 
34 #include <media/videobuf2-v4l2.h>
35 
36 static int debug;
37 module_param(debug, int, 0644);
38 
39 #define dprintk(q, level, fmt, arg...)					      \
40 	do {								      \
41 		if (debug >= level)					      \
42 			pr_info("vb2-v4l2: [%p] %s: " fmt,		      \
43 				(q)->name, __func__, ## arg);		      \
44 	} while (0)
45 
46 /* Flags that are set by us */
47 #define V4L2_BUFFER_MASK_FLAGS	(V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \
48 				 V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \
49 				 V4L2_BUF_FLAG_PREPARED | \
50 				 V4L2_BUF_FLAG_IN_REQUEST | \
51 				 V4L2_BUF_FLAG_REQUEST_FD | \
52 				 V4L2_BUF_FLAG_TIMESTAMP_MASK)
53 /* Output buffer flags that should be passed on to the driver */
54 #define V4L2_BUFFER_OUT_FLAGS	(V4L2_BUF_FLAG_PFRAME | \
55 				 V4L2_BUF_FLAG_BFRAME | \
56 				 V4L2_BUF_FLAG_KEYFRAME | \
57 				 V4L2_BUF_FLAG_TIMECODE | \
58 				 V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF)
59 
60 /*
61  * __verify_planes_array() - verify that the planes array passed in struct
62  * v4l2_buffer from userspace can be safely used
63  */
64 static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer *b)
65 {
66 	if (!V4L2_TYPE_IS_MULTIPLANAR(b->type))
67 		return 0;
68 
69 	/* Is memory for copying plane information present? */
70 	if (b->m.planes == NULL) {
71 		dprintk(vb->vb2_queue, 1,
72 			"multi-planar buffer passed but planes array not provided\n");
73 		return -EINVAL;
74 	}
75 
76 	if (b->length < vb->num_planes || b->length > VB2_MAX_PLANES) {
77 		dprintk(vb->vb2_queue, 1,
78 			"incorrect planes array length, expected %d, got %d\n",
79 			vb->num_planes, b->length);
80 		return -EINVAL;
81 	}
82 
83 	return 0;
84 }
85 
86 static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb)
87 {
88 	return __verify_planes_array(vb, pb);
89 }
90 
91 /*
92  * __verify_length() - Verify that the bytesused value for each plane fits in
93  * the plane length and that the data offset doesn't exceed the bytesused value.
94  */
95 static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)
96 {
97 	unsigned int length;
98 	unsigned int bytesused;
99 	unsigned int plane;
100 
101 	if (V4L2_TYPE_IS_CAPTURE(b->type))
102 		return 0;
103 
104 	if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
105 		for (plane = 0; plane < vb->num_planes; ++plane) {
106 			length = (b->memory == VB2_MEMORY_USERPTR ||
107 				  b->memory == VB2_MEMORY_DMABUF)
108 			       ? b->m.planes[plane].length
109 				: vb->planes[plane].length;
110 			bytesused = b->m.planes[plane].bytesused
111 				  ? b->m.planes[plane].bytesused : length;
112 
113 			if (b->m.planes[plane].bytesused > length)
114 				return -EINVAL;
115 
116 			if (b->m.planes[plane].data_offset > 0 &&
117 			    b->m.planes[plane].data_offset >= bytesused)
118 				return -EINVAL;
119 		}
120 	} else {
121 		length = (b->memory == VB2_MEMORY_USERPTR ||
122 			  b->memory == VB2_MEMORY_DMABUF)
123 			? b->length : vb->planes[0].length;
124 
125 		if (b->bytesused > length)
126 			return -EINVAL;
127 	}
128 
129 	return 0;
130 }
131 
132 /*
133  * __init_vb2_v4l2_buffer() - initialize the vb2_v4l2_buffer struct
134  */
135 static void __init_vb2_v4l2_buffer(struct vb2_buffer *vb)
136 {
137 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
138 
139 	vbuf->request_fd = -1;
140 }
141 
142 static void __copy_timestamp(struct vb2_buffer *vb, const void *pb)
143 {
144 	const struct v4l2_buffer *b = pb;
145 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
146 	struct vb2_queue *q = vb->vb2_queue;
147 
148 	if (q->is_output) {
149 		/*
150 		 * For output buffers copy the timestamp if needed,
151 		 * and the timecode field and flag if needed.
152 		 */
153 		if (q->copy_timestamp)
154 			vb->timestamp = v4l2_buffer_get_timestamp(b);
155 		vbuf->flags |= b->flags & V4L2_BUF_FLAG_TIMECODE;
156 		if (b->flags & V4L2_BUF_FLAG_TIMECODE)
157 			vbuf->timecode = b->timecode;
158 	}
159 };
160 
161 static void vb2_warn_zero_bytesused(struct vb2_buffer *vb)
162 {
163 	static bool check_once;
164 
165 	if (check_once)
166 		return;
167 
168 	check_once = true;
169 
170 	pr_warn("use of bytesused == 0 is deprecated and will be removed in the future,\n");
171 	if (vb->vb2_queue->allow_zero_bytesused)
172 		pr_warn("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
173 	else
174 		pr_warn("use the actual size instead.\n");
175 }
176 
177 static int vb2_fill_vb2_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
178 {
179 	struct vb2_queue *q = vb->vb2_queue;
180 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
181 	struct vb2_plane *planes = vbuf->planes;
182 	unsigned int plane;
183 	int ret;
184 
185 	ret = __verify_length(vb, b);
186 	if (ret < 0) {
187 		dprintk(q, 1, "plane parameters verification failed: %d\n", ret);
188 		return ret;
189 	}
190 	if (b->field == V4L2_FIELD_ALTERNATE && q->is_output) {
191 		/*
192 		 * If the format's field is ALTERNATE, then the buffer's field
193 		 * should be either TOP or BOTTOM, not ALTERNATE since that
194 		 * makes no sense. The driver has to know whether the
195 		 * buffer represents a top or a bottom field in order to
196 		 * program any DMA correctly. Using ALTERNATE is wrong, since
197 		 * that just says that it is either a top or a bottom field,
198 		 * but not which of the two it is.
199 		 */
200 		dprintk(q, 1, "the field is incorrectly set to ALTERNATE for an output buffer\n");
201 		return -EINVAL;
202 	}
203 	vbuf->sequence = 0;
204 	vbuf->request_fd = -1;
205 	vbuf->is_held = false;
206 
207 	if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
208 		switch (b->memory) {
209 		case VB2_MEMORY_USERPTR:
210 			for (plane = 0; plane < vb->num_planes; ++plane) {
211 				planes[plane].m.userptr =
212 					b->m.planes[plane].m.userptr;
213 				planes[plane].length =
214 					b->m.planes[plane].length;
215 			}
216 			break;
217 		case VB2_MEMORY_DMABUF:
218 			for (plane = 0; plane < vb->num_planes; ++plane) {
219 				planes[plane].m.fd =
220 					b->m.planes[plane].m.fd;
221 				planes[plane].length =
222 					b->m.planes[plane].length;
223 			}
224 			break;
225 		default:
226 			for (plane = 0; plane < vb->num_planes; ++plane) {
227 				planes[plane].m.offset =
228 					vb->planes[plane].m.offset;
229 				planes[plane].length =
230 					vb->planes[plane].length;
231 			}
232 			break;
233 		}
234 
235 		/* Fill in driver-provided information for OUTPUT types */
236 		if (V4L2_TYPE_IS_OUTPUT(b->type)) {
237 			/*
238 			 * Will have to go up to b->length when API starts
239 			 * accepting variable number of planes.
240 			 *
241 			 * If bytesused == 0 for the output buffer, then fall
242 			 * back to the full buffer size. In that case
243 			 * userspace clearly never bothered to set it and
244 			 * it's a safe assumption that they really meant to
245 			 * use the full plane sizes.
246 			 *
247 			 * Some drivers, e.g. old codec drivers, use bytesused == 0
248 			 * as a way to indicate that streaming is finished.
249 			 * In that case, the driver should use the
250 			 * allow_zero_bytesused flag to keep old userspace
251 			 * applications working.
252 			 */
253 			for (plane = 0; plane < vb->num_planes; ++plane) {
254 				struct vb2_plane *pdst = &planes[plane];
255 				struct v4l2_plane *psrc = &b->m.planes[plane];
256 
257 				if (psrc->bytesused == 0)
258 					vb2_warn_zero_bytesused(vb);
259 
260 				if (vb->vb2_queue->allow_zero_bytesused)
261 					pdst->bytesused = psrc->bytesused;
262 				else
263 					pdst->bytesused = psrc->bytesused ?
264 						psrc->bytesused : pdst->length;
265 				pdst->data_offset = psrc->data_offset;
266 			}
267 		}
268 	} else {
269 		/*
270 		 * Single-planar buffers do not use planes array,
271 		 * so fill in relevant v4l2_buffer struct fields instead.
272 		 * In videobuf we use our internal V4l2_planes struct for
273 		 * single-planar buffers as well, for simplicity.
274 		 *
275 		 * If bytesused == 0 for the output buffer, then fall back
276 		 * to the full buffer size as that's a sensible default.
277 		 *
278 		 * Some drivers, e.g. old codec drivers, use bytesused == 0 as
279 		 * a way to indicate that streaming is finished. In that case,
280 		 * the driver should use the allow_zero_bytesused flag to keep
281 		 * old userspace applications working.
282 		 */
283 		switch (b->memory) {
284 		case VB2_MEMORY_USERPTR:
285 			planes[0].m.userptr = b->m.userptr;
286 			planes[0].length = b->length;
287 			break;
288 		case VB2_MEMORY_DMABUF:
289 			planes[0].m.fd = b->m.fd;
290 			planes[0].length = b->length;
291 			break;
292 		default:
293 			planes[0].m.offset = vb->planes[0].m.offset;
294 			planes[0].length = vb->planes[0].length;
295 			break;
296 		}
297 
298 		planes[0].data_offset = 0;
299 		if (V4L2_TYPE_IS_OUTPUT(b->type)) {
300 			if (b->bytesused == 0)
301 				vb2_warn_zero_bytesused(vb);
302 
303 			if (vb->vb2_queue->allow_zero_bytesused)
304 				planes[0].bytesused = b->bytesused;
305 			else
306 				planes[0].bytesused = b->bytesused ?
307 					b->bytesused : planes[0].length;
308 		} else
309 			planes[0].bytesused = 0;
310 
311 	}
312 
313 	/* Zero flags that we handle */
314 	vbuf->flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS;
315 	if (!vb->vb2_queue->copy_timestamp || V4L2_TYPE_IS_CAPTURE(b->type)) {
316 		/*
317 		 * Non-COPY timestamps and non-OUTPUT queues will get
318 		 * their timestamp and timestamp source flags from the
319 		 * queue.
320 		 */
321 		vbuf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
322 	}
323 
324 	if (V4L2_TYPE_IS_OUTPUT(b->type)) {
325 		/*
326 		 * For output buffers mask out the timecode flag:
327 		 * this will be handled later in vb2_qbuf().
328 		 * The 'field' is valid metadata for this output buffer
329 		 * and so that needs to be copied here.
330 		 */
331 		vbuf->flags &= ~V4L2_BUF_FLAG_TIMECODE;
332 		vbuf->field = b->field;
333 		if (!(q->subsystem_flags & VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF))
334 			vbuf->flags &= ~V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF;
335 	} else {
336 		/* Zero any output buffer flags as this is a capture buffer */
337 		vbuf->flags &= ~V4L2_BUFFER_OUT_FLAGS;
338 		/* Zero last flag, this is a signal from driver to userspace */
339 		vbuf->flags &= ~V4L2_BUF_FLAG_LAST;
340 	}
341 
342 	return 0;
343 }
344 
345 static void set_buffer_cache_hints(struct vb2_queue *q,
346 				   struct vb2_buffer *vb,
347 				   struct v4l2_buffer *b)
348 {
349 	/*
350 	 * DMA exporter should take care of cache syncs, so we can avoid
351 	 * explicit ->prepare()/->finish() syncs. For other ->memory types
352 	 * we always need ->prepare() or/and ->finish() cache sync.
353 	 */
354 	if (q->memory == VB2_MEMORY_DMABUF) {
355 		vb->need_cache_sync_on_finish = 0;
356 		vb->need_cache_sync_on_prepare = 0;
357 		return;
358 	}
359 
360 	/*
361 	 * Cache sync/invalidation flags are set by default in order to
362 	 * preserve existing behaviour for old apps/drivers.
363 	 */
364 	vb->need_cache_sync_on_prepare = 1;
365 	vb->need_cache_sync_on_finish = 1;
366 
367 	if (!vb2_queue_allows_cache_hints(q)) {
368 		/*
369 		 * Clear buffer cache flags if queue does not support user
370 		 * space hints. That's to indicate to userspace that these
371 		 * flags won't work.
372 		 */
373 		b->flags &= ~V4L2_BUF_FLAG_NO_CACHE_INVALIDATE;
374 		b->flags &= ~V4L2_BUF_FLAG_NO_CACHE_CLEAN;
375 		return;
376 	}
377 
378 	/*
379 	 * ->finish() cache sync can be avoided when queue direction is
380 	 * TO_DEVICE.
381 	 */
382 	if (q->dma_dir == DMA_TO_DEVICE)
383 		vb->need_cache_sync_on_finish = 0;
384 
385 	if (b->flags & V4L2_BUF_FLAG_NO_CACHE_INVALIDATE)
386 		vb->need_cache_sync_on_finish = 0;
387 
388 	if (b->flags & V4L2_BUF_FLAG_NO_CACHE_CLEAN)
389 		vb->need_cache_sync_on_prepare = 0;
390 }
391 
392 static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
393 				    struct v4l2_buffer *b, bool is_prepare,
394 				    struct media_request **p_req)
395 {
396 	const char *opname = is_prepare ? "prepare_buf" : "qbuf";
397 	struct media_request *req;
398 	struct vb2_v4l2_buffer *vbuf;
399 	struct vb2_buffer *vb;
400 	int ret;
401 
402 	if (b->type != q->type) {
403 		dprintk(q, 1, "%s: invalid buffer type\n", opname);
404 		return -EINVAL;
405 	}
406 
407 	if (b->index >= q->num_buffers) {
408 		dprintk(q, 1, "%s: buffer index out of range\n", opname);
409 		return -EINVAL;
410 	}
411 
412 	if (q->bufs[b->index] == NULL) {
413 		/* Should never happen */
414 		dprintk(q, 1, "%s: buffer is NULL\n", opname);
415 		return -EINVAL;
416 	}
417 
418 	if (b->memory != q->memory) {
419 		dprintk(q, 1, "%s: invalid memory type\n", opname);
420 		return -EINVAL;
421 	}
422 
423 	vb = q->bufs[b->index];
424 	vbuf = to_vb2_v4l2_buffer(vb);
425 	ret = __verify_planes_array(vb, b);
426 	if (ret)
427 		return ret;
428 
429 	if (!is_prepare && (b->flags & V4L2_BUF_FLAG_REQUEST_FD) &&
430 	    vb->state != VB2_BUF_STATE_DEQUEUED) {
431 		dprintk(q, 1, "%s: buffer is not in dequeued state\n", opname);
432 		return -EINVAL;
433 	}
434 
435 	if (!vb->prepared) {
436 		set_buffer_cache_hints(q, vb, b);
437 		/* Copy relevant information provided by the userspace */
438 		memset(vbuf->planes, 0,
439 		       sizeof(vbuf->planes[0]) * vb->num_planes);
440 		ret = vb2_fill_vb2_v4l2_buffer(vb, b);
441 		if (ret)
442 			return ret;
443 	}
444 
445 	if (is_prepare)
446 		return 0;
447 
448 	if (!(b->flags & V4L2_BUF_FLAG_REQUEST_FD)) {
449 		if (q->requires_requests) {
450 			dprintk(q, 1, "%s: queue requires requests\n", opname);
451 			return -EBADR;
452 		}
453 		if (q->uses_requests) {
454 			dprintk(q, 1, "%s: queue uses requests\n", opname);
455 			return -EBUSY;
456 		}
457 		return 0;
458 	} else if (!q->supports_requests) {
459 		dprintk(q, 1, "%s: queue does not support requests\n", opname);
460 		return -EBADR;
461 	} else if (q->uses_qbuf) {
462 		dprintk(q, 1, "%s: queue does not use requests\n", opname);
463 		return -EBUSY;
464 	}
465 
466 	/*
467 	 * For proper locking when queueing a request you need to be able
468 	 * to lock access to the vb2 queue, so check that there is a lock
469 	 * that we can use. In addition p_req must be non-NULL.
470 	 */
471 	if (WARN_ON(!q->lock || !p_req))
472 		return -EINVAL;
473 
474 	/*
475 	 * Make sure this op is implemented by the driver. It's easy to forget
476 	 * this callback, but is it important when canceling a buffer in a
477 	 * queued request.
478 	 */
479 	if (WARN_ON(!q->ops->buf_request_complete))
480 		return -EINVAL;
481 	/*
482 	 * Make sure this op is implemented by the driver for the output queue.
483 	 * It's easy to forget this callback, but is it important to correctly
484 	 * validate the 'field' value at QBUF time.
485 	 */
486 	if (WARN_ON((q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT ||
487 		     q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) &&
488 		    !q->ops->buf_out_validate))
489 		return -EINVAL;
490 
491 	if (b->request_fd < 0) {
492 		dprintk(q, 1, "%s: request_fd < 0\n", opname);
493 		return -EINVAL;
494 	}
495 
496 	req = media_request_get_by_fd(mdev, b->request_fd);
497 	if (IS_ERR(req)) {
498 		dprintk(q, 1, "%s: invalid request_fd\n", opname);
499 		return PTR_ERR(req);
500 	}
501 
502 	/*
503 	 * Early sanity check. This is checked again when the buffer
504 	 * is bound to the request in vb2_core_qbuf().
505 	 */
506 	if (req->state != MEDIA_REQUEST_STATE_IDLE &&
507 	    req->state != MEDIA_REQUEST_STATE_UPDATING) {
508 		dprintk(q, 1, "%s: request is not idle\n", opname);
509 		media_request_put(req);
510 		return -EBUSY;
511 	}
512 
513 	*p_req = req;
514 	vbuf->request_fd = b->request_fd;
515 
516 	return 0;
517 }
518 
519 /*
520  * __fill_v4l2_buffer() - fill in a struct v4l2_buffer with information to be
521  * returned to userspace
522  */
523 static void __fill_v4l2_buffer(struct vb2_buffer *vb, void *pb)
524 {
525 	struct v4l2_buffer *b = pb;
526 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
527 	struct vb2_queue *q = vb->vb2_queue;
528 	unsigned int plane;
529 
530 	/* Copy back data such as timestamp, flags, etc. */
531 	b->index = vb->index;
532 	b->type = vb->type;
533 	b->memory = vb->memory;
534 	b->bytesused = 0;
535 
536 	b->flags = vbuf->flags;
537 	b->field = vbuf->field;
538 	v4l2_buffer_set_timestamp(b, vb->timestamp);
539 	b->timecode = vbuf->timecode;
540 	b->sequence = vbuf->sequence;
541 	b->reserved2 = 0;
542 	b->request_fd = 0;
543 
544 	if (q->is_multiplanar) {
545 		/*
546 		 * Fill in plane-related data if userspace provided an array
547 		 * for it. The caller has already verified memory and size.
548 		 */
549 		b->length = vb->num_planes;
550 		for (plane = 0; plane < vb->num_planes; ++plane) {
551 			struct v4l2_plane *pdst = &b->m.planes[plane];
552 			struct vb2_plane *psrc = &vb->planes[plane];
553 
554 			pdst->bytesused = psrc->bytesused;
555 			pdst->length = psrc->length;
556 			if (q->memory == VB2_MEMORY_MMAP)
557 				pdst->m.mem_offset = psrc->m.offset;
558 			else if (q->memory == VB2_MEMORY_USERPTR)
559 				pdst->m.userptr = psrc->m.userptr;
560 			else if (q->memory == VB2_MEMORY_DMABUF)
561 				pdst->m.fd = psrc->m.fd;
562 			pdst->data_offset = psrc->data_offset;
563 			memset(pdst->reserved, 0, sizeof(pdst->reserved));
564 		}
565 	} else {
566 		/*
567 		 * We use length and offset in v4l2_planes array even for
568 		 * single-planar buffers, but userspace does not.
569 		 */
570 		b->length = vb->planes[0].length;
571 		b->bytesused = vb->planes[0].bytesused;
572 		if (q->memory == VB2_MEMORY_MMAP)
573 			b->m.offset = vb->planes[0].m.offset;
574 		else if (q->memory == VB2_MEMORY_USERPTR)
575 			b->m.userptr = vb->planes[0].m.userptr;
576 		else if (q->memory == VB2_MEMORY_DMABUF)
577 			b->m.fd = vb->planes[0].m.fd;
578 	}
579 
580 	/*
581 	 * Clear any buffer state related flags.
582 	 */
583 	b->flags &= ~V4L2_BUFFER_MASK_FLAGS;
584 	b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK;
585 	if (!q->copy_timestamp) {
586 		/*
587 		 * For non-COPY timestamps, drop timestamp source bits
588 		 * and obtain the timestamp source from the queue.
589 		 */
590 		b->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
591 		b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
592 	}
593 
594 	switch (vb->state) {
595 	case VB2_BUF_STATE_QUEUED:
596 	case VB2_BUF_STATE_ACTIVE:
597 		b->flags |= V4L2_BUF_FLAG_QUEUED;
598 		break;
599 	case VB2_BUF_STATE_IN_REQUEST:
600 		b->flags |= V4L2_BUF_FLAG_IN_REQUEST;
601 		break;
602 	case VB2_BUF_STATE_ERROR:
603 		b->flags |= V4L2_BUF_FLAG_ERROR;
604 		fallthrough;
605 	case VB2_BUF_STATE_DONE:
606 		b->flags |= V4L2_BUF_FLAG_DONE;
607 		break;
608 	case VB2_BUF_STATE_PREPARING:
609 	case VB2_BUF_STATE_DEQUEUED:
610 		/* nothing */
611 		break;
612 	}
613 
614 	if ((vb->state == VB2_BUF_STATE_DEQUEUED ||
615 	     vb->state == VB2_BUF_STATE_IN_REQUEST) &&
616 	    vb->synced && vb->prepared)
617 		b->flags |= V4L2_BUF_FLAG_PREPARED;
618 
619 	if (vb2_buffer_in_use(q, vb))
620 		b->flags |= V4L2_BUF_FLAG_MAPPED;
621 	if (vbuf->request_fd >= 0) {
622 		b->flags |= V4L2_BUF_FLAG_REQUEST_FD;
623 		b->request_fd = vbuf->request_fd;
624 	}
625 }
626 
627 /*
628  * __fill_vb2_buffer() - fill a vb2_buffer with information provided in a
629  * v4l2_buffer by the userspace. It also verifies that struct
630  * v4l2_buffer has a valid number of planes.
631  */
632 static int __fill_vb2_buffer(struct vb2_buffer *vb, struct vb2_plane *planes)
633 {
634 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
635 	unsigned int plane;
636 
637 	if (!vb->vb2_queue->copy_timestamp)
638 		vb->timestamp = 0;
639 
640 	for (plane = 0; plane < vb->num_planes; ++plane) {
641 		if (vb->vb2_queue->memory != VB2_MEMORY_MMAP) {
642 			planes[plane].m = vbuf->planes[plane].m;
643 			planes[plane].length = vbuf->planes[plane].length;
644 		}
645 		planes[plane].bytesused = vbuf->planes[plane].bytesused;
646 		planes[plane].data_offset = vbuf->planes[plane].data_offset;
647 	}
648 	return 0;
649 }
650 
651 static const struct vb2_buf_ops v4l2_buf_ops = {
652 	.verify_planes_array	= __verify_planes_array_core,
653 	.init_buffer		= __init_vb2_v4l2_buffer,
654 	.fill_user_buffer	= __fill_v4l2_buffer,
655 	.fill_vb2_buffer	= __fill_vb2_buffer,
656 	.copy_timestamp		= __copy_timestamp,
657 };
658 
659 int vb2_find_timestamp(const struct vb2_queue *q, u64 timestamp,
660 		       unsigned int start_idx)
661 {
662 	unsigned int i;
663 
664 	for (i = start_idx; i < q->num_buffers; i++)
665 		if (q->bufs[i]->copied_timestamp &&
666 		    q->bufs[i]->timestamp == timestamp)
667 			return i;
668 	return -1;
669 }
670 EXPORT_SYMBOL_GPL(vb2_find_timestamp);
671 
672 /*
673  * vb2_querybuf() - query video buffer information
674  * @q:		videobuf queue
675  * @b:		buffer struct passed from userspace to vidioc_querybuf handler
676  *		in driver
677  *
678  * Should be called from vidioc_querybuf ioctl handler in driver.
679  * This function will verify the passed v4l2_buffer structure and fill the
680  * relevant information for the userspace.
681  *
682  * The return values from this function are intended to be directly returned
683  * from vidioc_querybuf handler in driver.
684  */
685 int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
686 {
687 	struct vb2_buffer *vb;
688 	int ret;
689 
690 	if (b->type != q->type) {
691 		dprintk(q, 1, "wrong buffer type\n");
692 		return -EINVAL;
693 	}
694 
695 	if (b->index >= q->num_buffers) {
696 		dprintk(q, 1, "buffer index out of range\n");
697 		return -EINVAL;
698 	}
699 	vb = q->bufs[b->index];
700 	ret = __verify_planes_array(vb, b);
701 	if (!ret)
702 		vb2_core_querybuf(q, b->index, b);
703 	return ret;
704 }
705 EXPORT_SYMBOL(vb2_querybuf);
706 
707 static void fill_buf_caps(struct vb2_queue *q, u32 *caps)
708 {
709 	*caps = V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS;
710 	if (q->io_modes & VB2_MMAP)
711 		*caps |= V4L2_BUF_CAP_SUPPORTS_MMAP;
712 	if (q->io_modes & VB2_USERPTR)
713 		*caps |= V4L2_BUF_CAP_SUPPORTS_USERPTR;
714 	if (q->io_modes & VB2_DMABUF)
715 		*caps |= V4L2_BUF_CAP_SUPPORTS_DMABUF;
716 	if (q->subsystem_flags & VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF)
717 		*caps |= V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF;
718 	if (q->allow_cache_hints && q->io_modes & VB2_MMAP)
719 		*caps |= V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS;
720 #ifdef CONFIG_MEDIA_CONTROLLER_REQUEST_API
721 	if (q->supports_requests)
722 		*caps |= V4L2_BUF_CAP_SUPPORTS_REQUESTS;
723 #endif
724 }
725 
726 int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
727 {
728 	int ret = vb2_verify_memory_type(q, req->memory, req->type);
729 
730 	fill_buf_caps(q, &req->capabilities);
731 	return ret ? ret : vb2_core_reqbufs(q, req->memory, &req->count);
732 }
733 EXPORT_SYMBOL_GPL(vb2_reqbufs);
734 
735 int vb2_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
736 		    struct v4l2_buffer *b)
737 {
738 	int ret;
739 
740 	if (vb2_fileio_is_active(q)) {
741 		dprintk(q, 1, "file io in progress\n");
742 		return -EBUSY;
743 	}
744 
745 	if (b->flags & V4L2_BUF_FLAG_REQUEST_FD)
746 		return -EINVAL;
747 
748 	ret = vb2_queue_or_prepare_buf(q, mdev, b, true, NULL);
749 
750 	return ret ? ret : vb2_core_prepare_buf(q, b->index, b);
751 }
752 EXPORT_SYMBOL_GPL(vb2_prepare_buf);
753 
754 int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
755 {
756 	unsigned requested_planes = 1;
757 	unsigned requested_sizes[VIDEO_MAX_PLANES];
758 	struct v4l2_format *f = &create->format;
759 	int ret = vb2_verify_memory_type(q, create->memory, f->type);
760 	unsigned i;
761 
762 	fill_buf_caps(q, &create->capabilities);
763 	create->index = q->num_buffers;
764 	if (create->count == 0)
765 		return ret != -EBUSY ? ret : 0;
766 
767 	switch (f->type) {
768 	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
769 	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
770 		requested_planes = f->fmt.pix_mp.num_planes;
771 		if (requested_planes == 0 ||
772 		    requested_planes > VIDEO_MAX_PLANES)
773 			return -EINVAL;
774 		for (i = 0; i < requested_planes; i++)
775 			requested_sizes[i] =
776 				f->fmt.pix_mp.plane_fmt[i].sizeimage;
777 		break;
778 	case V4L2_BUF_TYPE_VIDEO_CAPTURE:
779 	case V4L2_BUF_TYPE_VIDEO_OUTPUT:
780 		requested_sizes[0] = f->fmt.pix.sizeimage;
781 		break;
782 	case V4L2_BUF_TYPE_VBI_CAPTURE:
783 	case V4L2_BUF_TYPE_VBI_OUTPUT:
784 		requested_sizes[0] = f->fmt.vbi.samples_per_line *
785 			(f->fmt.vbi.count[0] + f->fmt.vbi.count[1]);
786 		break;
787 	case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
788 	case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
789 		requested_sizes[0] = f->fmt.sliced.io_size;
790 		break;
791 	case V4L2_BUF_TYPE_SDR_CAPTURE:
792 	case V4L2_BUF_TYPE_SDR_OUTPUT:
793 		requested_sizes[0] = f->fmt.sdr.buffersize;
794 		break;
795 	case V4L2_BUF_TYPE_META_CAPTURE:
796 	case V4L2_BUF_TYPE_META_OUTPUT:
797 		requested_sizes[0] = f->fmt.meta.buffersize;
798 		break;
799 	default:
800 		return -EINVAL;
801 	}
802 	for (i = 0; i < requested_planes; i++)
803 		if (requested_sizes[i] == 0)
804 			return -EINVAL;
805 	return ret ? ret : vb2_core_create_bufs(q, create->memory,
806 						&create->count,
807 						requested_planes,
808 						requested_sizes);
809 }
810 EXPORT_SYMBOL_GPL(vb2_create_bufs);
811 
812 int vb2_qbuf(struct vb2_queue *q, struct media_device *mdev,
813 	     struct v4l2_buffer *b)
814 {
815 	struct media_request *req = NULL;
816 	int ret;
817 
818 	if (vb2_fileio_is_active(q)) {
819 		dprintk(q, 1, "file io in progress\n");
820 		return -EBUSY;
821 	}
822 
823 	ret = vb2_queue_or_prepare_buf(q, mdev, b, false, &req);
824 	if (ret)
825 		return ret;
826 	ret = vb2_core_qbuf(q, b->index, b, req);
827 	if (req)
828 		media_request_put(req);
829 	return ret;
830 }
831 EXPORT_SYMBOL_GPL(vb2_qbuf);
832 
833 int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
834 {
835 	int ret;
836 
837 	if (vb2_fileio_is_active(q)) {
838 		dprintk(q, 1, "file io in progress\n");
839 		return -EBUSY;
840 	}
841 
842 	if (b->type != q->type) {
843 		dprintk(q, 1, "invalid buffer type\n");
844 		return -EINVAL;
845 	}
846 
847 	ret = vb2_core_dqbuf(q, NULL, b, nonblocking);
848 
849 	if (!q->is_output &&
850 	    b->flags & V4L2_BUF_FLAG_DONE &&
851 	    b->flags & V4L2_BUF_FLAG_LAST)
852 		q->last_buffer_dequeued = true;
853 
854 	/*
855 	 *  After calling the VIDIOC_DQBUF V4L2_BUF_FLAG_DONE must be
856 	 *  cleared.
857 	 */
858 	b->flags &= ~V4L2_BUF_FLAG_DONE;
859 
860 	return ret;
861 }
862 EXPORT_SYMBOL_GPL(vb2_dqbuf);
863 
864 int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
865 {
866 	if (vb2_fileio_is_active(q)) {
867 		dprintk(q, 1, "file io in progress\n");
868 		return -EBUSY;
869 	}
870 	return vb2_core_streamon(q, type);
871 }
872 EXPORT_SYMBOL_GPL(vb2_streamon);
873 
874 int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
875 {
876 	if (vb2_fileio_is_active(q)) {
877 		dprintk(q, 1, "file io in progress\n");
878 		return -EBUSY;
879 	}
880 	return vb2_core_streamoff(q, type);
881 }
882 EXPORT_SYMBOL_GPL(vb2_streamoff);
883 
884 int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
885 {
886 	return vb2_core_expbuf(q, &eb->fd, eb->type, eb->index,
887 				eb->plane, eb->flags);
888 }
889 EXPORT_SYMBOL_GPL(vb2_expbuf);
890 
891 int vb2_queue_init_name(struct vb2_queue *q, const char *name)
892 {
893 	/*
894 	 * Sanity check
895 	 */
896 	if (WARN_ON(!q)			  ||
897 	    WARN_ON(q->timestamp_flags &
898 		    ~(V4L2_BUF_FLAG_TIMESTAMP_MASK |
899 		      V4L2_BUF_FLAG_TSTAMP_SRC_MASK)))
900 		return -EINVAL;
901 
902 	/* Warn that the driver should choose an appropriate timestamp type */
903 	WARN_ON((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
904 		V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN);
905 
906 	/* Warn that vb2_memory should match with v4l2_memory */
907 	if (WARN_ON(VB2_MEMORY_MMAP != (int)V4L2_MEMORY_MMAP)
908 		|| WARN_ON(VB2_MEMORY_USERPTR != (int)V4L2_MEMORY_USERPTR)
909 		|| WARN_ON(VB2_MEMORY_DMABUF != (int)V4L2_MEMORY_DMABUF))
910 		return -EINVAL;
911 
912 	if (q->buf_struct_size == 0)
913 		q->buf_struct_size = sizeof(struct vb2_v4l2_buffer);
914 
915 	q->buf_ops = &v4l2_buf_ops;
916 	q->is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(q->type);
917 	q->is_output = V4L2_TYPE_IS_OUTPUT(q->type);
918 	q->copy_timestamp = (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK)
919 			== V4L2_BUF_FLAG_TIMESTAMP_COPY;
920 	/*
921 	 * For compatibility with vb1: if QBUF hasn't been called yet, then
922 	 * return EPOLLERR as well. This only affects capture queues, output
923 	 * queues will always initialize waiting_for_buffers to false.
924 	 */
925 	q->quirk_poll_must_check_waiting_for_buffers = true;
926 
927 	if (name)
928 		strscpy(q->name, name, sizeof(q->name));
929 	else
930 		q->name[0] = '\0';
931 
932 	return vb2_core_queue_init(q);
933 }
934 EXPORT_SYMBOL_GPL(vb2_queue_init_name);
935 
936 int vb2_queue_init(struct vb2_queue *q)
937 {
938 	return vb2_queue_init_name(q, NULL);
939 }
940 EXPORT_SYMBOL_GPL(vb2_queue_init);
941 
942 void vb2_queue_release(struct vb2_queue *q)
943 {
944 	vb2_core_queue_release(q);
945 }
946 EXPORT_SYMBOL_GPL(vb2_queue_release);
947 
948 __poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
949 {
950 	struct video_device *vfd = video_devdata(file);
951 	__poll_t res;
952 
953 	res = vb2_core_poll(q, file, wait);
954 
955 	if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
956 		struct v4l2_fh *fh = file->private_data;
957 
958 		poll_wait(file, &fh->wait, wait);
959 		if (v4l2_event_pending(fh))
960 			res |= EPOLLPRI;
961 	}
962 
963 	return res;
964 }
965 EXPORT_SYMBOL_GPL(vb2_poll);
966 
967 /*
968  * The following functions are not part of the vb2 core API, but are helper
969  * functions that plug into struct v4l2_ioctl_ops, struct v4l2_file_operations
970  * and struct vb2_ops.
971  * They contain boilerplate code that most if not all drivers have to do
972  * and so they simplify the driver code.
973  */
974 
975 /* The queue is busy if there is a owner and you are not that owner. */
976 static inline bool vb2_queue_is_busy(struct video_device *vdev, struct file *file)
977 {
978 	return vdev->queue->owner && vdev->queue->owner != file->private_data;
979 }
980 
981 /* vb2 ioctl helpers */
982 
983 int vb2_ioctl_reqbufs(struct file *file, void *priv,
984 			  struct v4l2_requestbuffers *p)
985 {
986 	struct video_device *vdev = video_devdata(file);
987 	int res = vb2_verify_memory_type(vdev->queue, p->memory, p->type);
988 
989 	fill_buf_caps(vdev->queue, &p->capabilities);
990 	if (res)
991 		return res;
992 	if (vb2_queue_is_busy(vdev, file))
993 		return -EBUSY;
994 	res = vb2_core_reqbufs(vdev->queue, p->memory, &p->count);
995 	/* If count == 0, then the owner has released all buffers and he
996 	   is no longer owner of the queue. Otherwise we have a new owner. */
997 	if (res == 0)
998 		vdev->queue->owner = p->count ? file->private_data : NULL;
999 	return res;
1000 }
1001 EXPORT_SYMBOL_GPL(vb2_ioctl_reqbufs);
1002 
1003 int vb2_ioctl_create_bufs(struct file *file, void *priv,
1004 			  struct v4l2_create_buffers *p)
1005 {
1006 	struct video_device *vdev = video_devdata(file);
1007 	int res = vb2_verify_memory_type(vdev->queue, p->memory,
1008 			p->format.type);
1009 
1010 	p->index = vdev->queue->num_buffers;
1011 	fill_buf_caps(vdev->queue, &p->capabilities);
1012 	/*
1013 	 * If count == 0, then just check if memory and type are valid.
1014 	 * Any -EBUSY result from vb2_verify_memory_type can be mapped to 0.
1015 	 */
1016 	if (p->count == 0)
1017 		return res != -EBUSY ? res : 0;
1018 	if (res)
1019 		return res;
1020 	if (vb2_queue_is_busy(vdev, file))
1021 		return -EBUSY;
1022 
1023 	res = vb2_create_bufs(vdev->queue, p);
1024 	if (res == 0)
1025 		vdev->queue->owner = file->private_data;
1026 	return res;
1027 }
1028 EXPORT_SYMBOL_GPL(vb2_ioctl_create_bufs);
1029 
1030 int vb2_ioctl_prepare_buf(struct file *file, void *priv,
1031 			  struct v4l2_buffer *p)
1032 {
1033 	struct video_device *vdev = video_devdata(file);
1034 
1035 	if (vb2_queue_is_busy(vdev, file))
1036 		return -EBUSY;
1037 	return vb2_prepare_buf(vdev->queue, vdev->v4l2_dev->mdev, p);
1038 }
1039 EXPORT_SYMBOL_GPL(vb2_ioctl_prepare_buf);
1040 
1041 int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
1042 {
1043 	struct video_device *vdev = video_devdata(file);
1044 
1045 	/* No need to call vb2_queue_is_busy(), anyone can query buffers. */
1046 	return vb2_querybuf(vdev->queue, p);
1047 }
1048 EXPORT_SYMBOL_GPL(vb2_ioctl_querybuf);
1049 
1050 int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
1051 {
1052 	struct video_device *vdev = video_devdata(file);
1053 
1054 	if (vb2_queue_is_busy(vdev, file))
1055 		return -EBUSY;
1056 	return vb2_qbuf(vdev->queue, vdev->v4l2_dev->mdev, p);
1057 }
1058 EXPORT_SYMBOL_GPL(vb2_ioctl_qbuf);
1059 
1060 int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
1061 {
1062 	struct video_device *vdev = video_devdata(file);
1063 
1064 	if (vb2_queue_is_busy(vdev, file))
1065 		return -EBUSY;
1066 	return vb2_dqbuf(vdev->queue, p, file->f_flags & O_NONBLOCK);
1067 }
1068 EXPORT_SYMBOL_GPL(vb2_ioctl_dqbuf);
1069 
1070 int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
1071 {
1072 	struct video_device *vdev = video_devdata(file);
1073 
1074 	if (vb2_queue_is_busy(vdev, file))
1075 		return -EBUSY;
1076 	return vb2_streamon(vdev->queue, i);
1077 }
1078 EXPORT_SYMBOL_GPL(vb2_ioctl_streamon);
1079 
1080 int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
1081 {
1082 	struct video_device *vdev = video_devdata(file);
1083 
1084 	if (vb2_queue_is_busy(vdev, file))
1085 		return -EBUSY;
1086 	return vb2_streamoff(vdev->queue, i);
1087 }
1088 EXPORT_SYMBOL_GPL(vb2_ioctl_streamoff);
1089 
1090 int vb2_ioctl_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *p)
1091 {
1092 	struct video_device *vdev = video_devdata(file);
1093 
1094 	if (vb2_queue_is_busy(vdev, file))
1095 		return -EBUSY;
1096 	return vb2_expbuf(vdev->queue, p);
1097 }
1098 EXPORT_SYMBOL_GPL(vb2_ioctl_expbuf);
1099 
1100 /* v4l2_file_operations helpers */
1101 
1102 int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma)
1103 {
1104 	struct video_device *vdev = video_devdata(file);
1105 
1106 	return vb2_mmap(vdev->queue, vma);
1107 }
1108 EXPORT_SYMBOL_GPL(vb2_fop_mmap);
1109 
1110 int _vb2_fop_release(struct file *file, struct mutex *lock)
1111 {
1112 	struct video_device *vdev = video_devdata(file);
1113 
1114 	if (lock)
1115 		mutex_lock(lock);
1116 	if (file->private_data == vdev->queue->owner) {
1117 		vb2_queue_release(vdev->queue);
1118 		vdev->queue->owner = NULL;
1119 	}
1120 	if (lock)
1121 		mutex_unlock(lock);
1122 	return v4l2_fh_release(file);
1123 }
1124 EXPORT_SYMBOL_GPL(_vb2_fop_release);
1125 
1126 int vb2_fop_release(struct file *file)
1127 {
1128 	struct video_device *vdev = video_devdata(file);
1129 	struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
1130 
1131 	return _vb2_fop_release(file, lock);
1132 }
1133 EXPORT_SYMBOL_GPL(vb2_fop_release);
1134 
1135 ssize_t vb2_fop_write(struct file *file, const char __user *buf,
1136 		size_t count, loff_t *ppos)
1137 {
1138 	struct video_device *vdev = video_devdata(file);
1139 	struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
1140 	int err = -EBUSY;
1141 
1142 	if (!(vdev->queue->io_modes & VB2_WRITE))
1143 		return -EINVAL;
1144 	if (lock && mutex_lock_interruptible(lock))
1145 		return -ERESTARTSYS;
1146 	if (vb2_queue_is_busy(vdev, file))
1147 		goto exit;
1148 	err = vb2_write(vdev->queue, buf, count, ppos,
1149 		       file->f_flags & O_NONBLOCK);
1150 	if (vdev->queue->fileio)
1151 		vdev->queue->owner = file->private_data;
1152 exit:
1153 	if (lock)
1154 		mutex_unlock(lock);
1155 	return err;
1156 }
1157 EXPORT_SYMBOL_GPL(vb2_fop_write);
1158 
1159 ssize_t vb2_fop_read(struct file *file, char __user *buf,
1160 		size_t count, loff_t *ppos)
1161 {
1162 	struct video_device *vdev = video_devdata(file);
1163 	struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
1164 	int err = -EBUSY;
1165 
1166 	if (!(vdev->queue->io_modes & VB2_READ))
1167 		return -EINVAL;
1168 	if (lock && mutex_lock_interruptible(lock))
1169 		return -ERESTARTSYS;
1170 	if (vb2_queue_is_busy(vdev, file))
1171 		goto exit;
1172 	err = vb2_read(vdev->queue, buf, count, ppos,
1173 		       file->f_flags & O_NONBLOCK);
1174 	if (vdev->queue->fileio)
1175 		vdev->queue->owner = file->private_data;
1176 exit:
1177 	if (lock)
1178 		mutex_unlock(lock);
1179 	return err;
1180 }
1181 EXPORT_SYMBOL_GPL(vb2_fop_read);
1182 
1183 __poll_t vb2_fop_poll(struct file *file, poll_table *wait)
1184 {
1185 	struct video_device *vdev = video_devdata(file);
1186 	struct vb2_queue *q = vdev->queue;
1187 	struct mutex *lock = q->lock ? q->lock : vdev->lock;
1188 	__poll_t res;
1189 	void *fileio;
1190 
1191 	/*
1192 	 * If this helper doesn't know how to lock, then you shouldn't be using
1193 	 * it but you should write your own.
1194 	 */
1195 	WARN_ON(!lock);
1196 
1197 	if (lock && mutex_lock_interruptible(lock))
1198 		return EPOLLERR;
1199 
1200 	fileio = q->fileio;
1201 
1202 	res = vb2_poll(vdev->queue, file, wait);
1203 
1204 	/* If fileio was started, then we have a new queue owner. */
1205 	if (!fileio && q->fileio)
1206 		q->owner = file->private_data;
1207 	if (lock)
1208 		mutex_unlock(lock);
1209 	return res;
1210 }
1211 EXPORT_SYMBOL_GPL(vb2_fop_poll);
1212 
1213 #ifndef CONFIG_MMU
1214 unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
1215 		unsigned long len, unsigned long pgoff, unsigned long flags)
1216 {
1217 	struct video_device *vdev = video_devdata(file);
1218 
1219 	return vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags);
1220 }
1221 EXPORT_SYMBOL_GPL(vb2_fop_get_unmapped_area);
1222 #endif
1223 
1224 void vb2_video_unregister_device(struct video_device *vdev)
1225 {
1226 	/* Check if vdev was ever registered at all */
1227 	if (!vdev || !video_is_registered(vdev))
1228 		return;
1229 
1230 	/*
1231 	 * Calling this function only makes sense if vdev->queue is set.
1232 	 * If it is NULL, then just call video_unregister_device() instead.
1233 	 */
1234 	WARN_ON(!vdev->queue);
1235 
1236 	/*
1237 	 * Take a reference to the device since video_unregister_device()
1238 	 * calls device_unregister(), but we don't want that to release
1239 	 * the device since we want to clean up the queue first.
1240 	 */
1241 	get_device(&vdev->dev);
1242 	video_unregister_device(vdev);
1243 	if (vdev->queue && vdev->queue->owner) {
1244 		struct mutex *lock = vdev->queue->lock ?
1245 			vdev->queue->lock : vdev->lock;
1246 
1247 		if (lock)
1248 			mutex_lock(lock);
1249 		vb2_queue_release(vdev->queue);
1250 		vdev->queue->owner = NULL;
1251 		if (lock)
1252 			mutex_unlock(lock);
1253 	}
1254 	/*
1255 	 * Now we put the device, and in most cases this will release
1256 	 * everything.
1257 	 */
1258 	put_device(&vdev->dev);
1259 }
1260 EXPORT_SYMBOL_GPL(vb2_video_unregister_device);
1261 
1262 /* vb2_ops helpers. Only use if vq->lock is non-NULL. */
1263 
1264 void vb2_ops_wait_prepare(struct vb2_queue *vq)
1265 {
1266 	mutex_unlock(vq->lock);
1267 }
1268 EXPORT_SYMBOL_GPL(vb2_ops_wait_prepare);
1269 
1270 void vb2_ops_wait_finish(struct vb2_queue *vq)
1271 {
1272 	mutex_lock(vq->lock);
1273 }
1274 EXPORT_SYMBOL_GPL(vb2_ops_wait_finish);
1275 
1276 /*
1277  * Note that this function is called during validation time and
1278  * thus the req_queue_mutex is held to ensure no request objects
1279  * can be added or deleted while validating. So there is no need
1280  * to protect the objects list.
1281  */
1282 int vb2_request_validate(struct media_request *req)
1283 {
1284 	struct media_request_object *obj;
1285 	int ret = 0;
1286 
1287 	if (!vb2_request_buffer_cnt(req))
1288 		return -ENOENT;
1289 
1290 	list_for_each_entry(obj, &req->objects, list) {
1291 		if (!obj->ops->prepare)
1292 			continue;
1293 
1294 		ret = obj->ops->prepare(obj);
1295 		if (ret)
1296 			break;
1297 	}
1298 
1299 	if (ret) {
1300 		list_for_each_entry_continue_reverse(obj, &req->objects, list)
1301 			if (obj->ops->unprepare)
1302 				obj->ops->unprepare(obj);
1303 		return ret;
1304 	}
1305 	return 0;
1306 }
1307 EXPORT_SYMBOL_GPL(vb2_request_validate);
1308 
1309 void vb2_request_queue(struct media_request *req)
1310 {
1311 	struct media_request_object *obj, *obj_safe;
1312 
1313 	/*
1314 	 * Queue all objects. Note that buffer objects are at the end of the
1315 	 * objects list, after all other object types. Once buffer objects
1316 	 * are queued, the driver might delete them immediately (if the driver
1317 	 * processes the buffer at once), so we have to use
1318 	 * list_for_each_entry_safe() to handle the case where the object we
1319 	 * queue is deleted.
1320 	 */
1321 	list_for_each_entry_safe(obj, obj_safe, &req->objects, list)
1322 		if (obj->ops->queue)
1323 			obj->ops->queue(obj);
1324 }
1325 EXPORT_SYMBOL_GPL(vb2_request_queue);
1326 
1327 MODULE_DESCRIPTION("Driver helper framework for Video for Linux 2");
1328 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski");
1329 MODULE_LICENSE("GPL");
1330