1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Memory-to-memory device framework for Video for Linux 2 and videobuf. 4 * 5 * Helper functions for devices that use videobuf buffers for both their 6 * source and destination. 7 * 8 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. 9 * Pawel Osciak, <pawel@osciak.com> 10 * Marek Szyprowski, <m.szyprowski@samsung.com> 11 */ 12 #include <linux/module.h> 13 #include <linux/sched.h> 14 #include <linux/slab.h> 15 16 #include <media/media-device.h> 17 #include <media/videobuf2-v4l2.h> 18 #include <media/v4l2-mem2mem.h> 19 #include <media/v4l2-dev.h> 20 #include <media/v4l2-device.h> 21 #include <media/v4l2-fh.h> 22 #include <media/v4l2-event.h> 23 24 MODULE_DESCRIPTION("Mem to mem device framework for videobuf"); 25 MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>"); 26 MODULE_LICENSE("GPL"); 27 28 static bool debug; 29 module_param(debug, bool, 0644); 30 31 #define dprintk(fmt, arg...) \ 32 do { \ 33 if (debug) \ 34 printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\ 35 } while (0) 36 37 38 /* Instance is already queued on the job_queue */ 39 #define TRANS_QUEUED (1 << 0) 40 /* Instance is currently running in hardware */ 41 #define TRANS_RUNNING (1 << 1) 42 /* Instance is currently aborting */ 43 #define TRANS_ABORT (1 << 2) 44 45 46 /* Offset base for buffers on the destination queue - used to distinguish 47 * between source and destination buffers when mmapping - they receive the same 48 * offsets but for different queues */ 49 #define DST_QUEUE_OFF_BASE (1 << 30) 50 51 enum v4l2_m2m_entity_type { 52 MEM2MEM_ENT_TYPE_SOURCE, 53 MEM2MEM_ENT_TYPE_SINK, 54 MEM2MEM_ENT_TYPE_PROC 55 }; 56 57 static const char * const m2m_entity_name[] = { 58 "source", 59 "sink", 60 "proc" 61 }; 62 63 /** 64 * struct v4l2_m2m_dev - per-device context 65 * @source: &struct media_entity pointer with the source entity 66 * Used only when the M2M device is registered via 67 * v4l2_m2m_unregister_media_controller(). 68 * @source_pad: &struct media_pad with the source pad. 69 * Used only when the M2M device is registered via 70 * v4l2_m2m_unregister_media_controller(). 71 * @sink: &struct media_entity pointer with the sink entity 72 * Used only when the M2M device is registered via 73 * v4l2_m2m_unregister_media_controller(). 74 * @sink_pad: &struct media_pad with the sink pad. 75 * Used only when the M2M device is registered via 76 * v4l2_m2m_unregister_media_controller(). 77 * @proc: &struct media_entity pointer with the M2M device itself. 78 * @proc_pads: &struct media_pad with the @proc pads. 79 * Used only when the M2M device is registered via 80 * v4l2_m2m_unregister_media_controller(). 81 * @intf_devnode: &struct media_intf devnode pointer with the interface 82 * with controls the M2M device. 83 * @curr_ctx: currently running instance 84 * @job_queue: instances queued to run 85 * @job_spinlock: protects job_queue 86 * @job_work: worker to run queued jobs. 87 * @m2m_ops: driver callbacks 88 */ 89 struct v4l2_m2m_dev { 90 struct v4l2_m2m_ctx *curr_ctx; 91 #ifdef CONFIG_MEDIA_CONTROLLER 92 struct media_entity *source; 93 struct media_pad source_pad; 94 struct media_entity sink; 95 struct media_pad sink_pad; 96 struct media_entity proc; 97 struct media_pad proc_pads[2]; 98 struct media_intf_devnode *intf_devnode; 99 #endif 100 101 struct list_head job_queue; 102 spinlock_t job_spinlock; 103 struct work_struct job_work; 104 105 const struct v4l2_m2m_ops *m2m_ops; 106 }; 107 108 static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx, 109 enum v4l2_buf_type type) 110 { 111 if (V4L2_TYPE_IS_OUTPUT(type)) 112 return &m2m_ctx->out_q_ctx; 113 else 114 return &m2m_ctx->cap_q_ctx; 115 } 116 117 struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx, 118 enum v4l2_buf_type type) 119 { 120 struct v4l2_m2m_queue_ctx *q_ctx; 121 122 q_ctx = get_queue_ctx(m2m_ctx, type); 123 if (!q_ctx) 124 return NULL; 125 126 return &q_ctx->q; 127 } 128 EXPORT_SYMBOL(v4l2_m2m_get_vq); 129 130 struct vb2_v4l2_buffer *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx) 131 { 132 struct v4l2_m2m_buffer *b; 133 unsigned long flags; 134 135 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 136 137 if (list_empty(&q_ctx->rdy_queue)) { 138 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 139 return NULL; 140 } 141 142 b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); 143 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 144 return &b->vb; 145 } 146 EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf); 147 148 struct vb2_v4l2_buffer *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx) 149 { 150 struct v4l2_m2m_buffer *b; 151 unsigned long flags; 152 153 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 154 155 if (list_empty(&q_ctx->rdy_queue)) { 156 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 157 return NULL; 158 } 159 160 b = list_last_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); 161 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 162 return &b->vb; 163 } 164 EXPORT_SYMBOL_GPL(v4l2_m2m_last_buf); 165 166 struct vb2_v4l2_buffer *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx) 167 { 168 struct v4l2_m2m_buffer *b; 169 unsigned long flags; 170 171 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 172 if (list_empty(&q_ctx->rdy_queue)) { 173 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 174 return NULL; 175 } 176 b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); 177 list_del(&b->list); 178 q_ctx->num_rdy--; 179 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 180 181 return &b->vb; 182 } 183 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove); 184 185 void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx, 186 struct vb2_v4l2_buffer *vbuf) 187 { 188 struct v4l2_m2m_buffer *b; 189 unsigned long flags; 190 191 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 192 b = container_of(vbuf, struct v4l2_m2m_buffer, vb); 193 list_del(&b->list); 194 q_ctx->num_rdy--; 195 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 196 } 197 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_buf); 198 199 struct vb2_v4l2_buffer * 200 v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx) 201 202 { 203 struct v4l2_m2m_buffer *b, *tmp; 204 struct vb2_v4l2_buffer *ret = NULL; 205 unsigned long flags; 206 207 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 208 list_for_each_entry_safe(b, tmp, &q_ctx->rdy_queue, list) { 209 if (b->vb.vb2_buf.index == idx) { 210 list_del(&b->list); 211 q_ctx->num_rdy--; 212 ret = &b->vb; 213 break; 214 } 215 } 216 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 217 218 return ret; 219 } 220 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_idx); 221 222 /* 223 * Scheduling handlers 224 */ 225 226 void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev) 227 { 228 unsigned long flags; 229 void *ret = NULL; 230 231 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 232 if (m2m_dev->curr_ctx) 233 ret = m2m_dev->curr_ctx->priv; 234 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 235 236 return ret; 237 } 238 EXPORT_SYMBOL(v4l2_m2m_get_curr_priv); 239 240 /** 241 * v4l2_m2m_try_run() - select next job to perform and run it if possible 242 * @m2m_dev: per-device context 243 * 244 * Get next transaction (if present) from the waiting jobs list and run it. 245 * 246 * Note that this function can run on a given v4l2_m2m_ctx context, 247 * but call .device_run for another context. 248 */ 249 static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev) 250 { 251 unsigned long flags; 252 253 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 254 if (NULL != m2m_dev->curr_ctx) { 255 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 256 dprintk("Another instance is running, won't run now\n"); 257 return; 258 } 259 260 if (list_empty(&m2m_dev->job_queue)) { 261 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 262 dprintk("No job pending\n"); 263 return; 264 } 265 266 m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue, 267 struct v4l2_m2m_ctx, queue); 268 m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING; 269 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 270 271 dprintk("Running job on m2m_ctx: %p\n", m2m_dev->curr_ctx); 272 m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv); 273 } 274 275 /* 276 * __v4l2_m2m_try_queue() - queue a job 277 * @m2m_dev: m2m device 278 * @m2m_ctx: m2m context 279 * 280 * Check if this context is ready to queue a job. 281 * 282 * This function can run in interrupt context. 283 */ 284 static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev, 285 struct v4l2_m2m_ctx *m2m_ctx) 286 { 287 unsigned long flags_job; 288 struct vb2_v4l2_buffer *dst, *src; 289 290 dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx); 291 292 if (!m2m_ctx->out_q_ctx.q.streaming 293 || !m2m_ctx->cap_q_ctx.q.streaming) { 294 dprintk("Streaming needs to be on for both queues\n"); 295 return; 296 } 297 298 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); 299 300 /* If the context is aborted then don't schedule it */ 301 if (m2m_ctx->job_flags & TRANS_ABORT) { 302 dprintk("Aborted context\n"); 303 goto job_unlock; 304 } 305 306 if (m2m_ctx->job_flags & TRANS_QUEUED) { 307 dprintk("On job queue already\n"); 308 goto job_unlock; 309 } 310 311 src = v4l2_m2m_next_src_buf(m2m_ctx); 312 dst = v4l2_m2m_next_dst_buf(m2m_ctx); 313 if (!src && !m2m_ctx->out_q_ctx.buffered) { 314 dprintk("No input buffers available\n"); 315 goto job_unlock; 316 } 317 if (!dst && !m2m_ctx->cap_q_ctx.buffered) { 318 dprintk("No output buffers available\n"); 319 goto job_unlock; 320 } 321 322 m2m_ctx->new_frame = true; 323 324 if (src && dst && dst->is_held && 325 dst->vb2_buf.copied_timestamp && 326 dst->vb2_buf.timestamp != src->vb2_buf.timestamp) { 327 dst->is_held = false; 328 v4l2_m2m_dst_buf_remove(m2m_ctx); 329 v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE); 330 dst = v4l2_m2m_next_dst_buf(m2m_ctx); 331 332 if (!dst && !m2m_ctx->cap_q_ctx.buffered) { 333 dprintk("No output buffers available after returning held buffer\n"); 334 goto job_unlock; 335 } 336 } 337 338 if (src && dst && (m2m_ctx->out_q_ctx.q.subsystem_flags & 339 VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF)) 340 m2m_ctx->new_frame = !dst->vb2_buf.copied_timestamp || 341 dst->vb2_buf.timestamp != src->vb2_buf.timestamp; 342 343 if (m2m_ctx->has_stopped) { 344 dprintk("Device has stopped\n"); 345 goto job_unlock; 346 } 347 348 if (m2m_dev->m2m_ops->job_ready 349 && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) { 350 dprintk("Driver not ready\n"); 351 goto job_unlock; 352 } 353 354 list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue); 355 m2m_ctx->job_flags |= TRANS_QUEUED; 356 357 job_unlock: 358 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); 359 } 360 361 /** 362 * v4l2_m2m_try_schedule() - schedule and possibly run a job for any context 363 * @m2m_ctx: m2m context 364 * 365 * Check if this context is ready to queue a job. If suitable, 366 * run the next queued job on the mem2mem device. 367 * 368 * This function shouldn't run in interrupt context. 369 * 370 * Note that v4l2_m2m_try_schedule() can schedule one job for this context, 371 * and then run another job for another context. 372 */ 373 void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx) 374 { 375 struct v4l2_m2m_dev *m2m_dev = m2m_ctx->m2m_dev; 376 377 __v4l2_m2m_try_queue(m2m_dev, m2m_ctx); 378 v4l2_m2m_try_run(m2m_dev); 379 } 380 EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule); 381 382 /** 383 * v4l2_m2m_device_run_work() - run pending jobs for the context 384 * @work: Work structure used for scheduling the execution of this function. 385 */ 386 static void v4l2_m2m_device_run_work(struct work_struct *work) 387 { 388 struct v4l2_m2m_dev *m2m_dev = 389 container_of(work, struct v4l2_m2m_dev, job_work); 390 391 v4l2_m2m_try_run(m2m_dev); 392 } 393 394 /** 395 * v4l2_m2m_cancel_job() - cancel pending jobs for the context 396 * @m2m_ctx: m2m context with jobs to be canceled 397 * 398 * In case of streamoff or release called on any context, 399 * 1] If the context is currently running, then abort job will be called 400 * 2] If the context is queued, then the context will be removed from 401 * the job_queue 402 */ 403 static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx) 404 { 405 struct v4l2_m2m_dev *m2m_dev; 406 unsigned long flags; 407 408 m2m_dev = m2m_ctx->m2m_dev; 409 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 410 411 m2m_ctx->job_flags |= TRANS_ABORT; 412 if (m2m_ctx->job_flags & TRANS_RUNNING) { 413 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 414 if (m2m_dev->m2m_ops->job_abort) 415 m2m_dev->m2m_ops->job_abort(m2m_ctx->priv); 416 dprintk("m2m_ctx %p running, will wait to complete\n", m2m_ctx); 417 wait_event(m2m_ctx->finished, 418 !(m2m_ctx->job_flags & TRANS_RUNNING)); 419 } else if (m2m_ctx->job_flags & TRANS_QUEUED) { 420 list_del(&m2m_ctx->queue); 421 m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); 422 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 423 dprintk("m2m_ctx: %p had been on queue and was removed\n", 424 m2m_ctx); 425 } else { 426 /* Do nothing, was not on queue/running */ 427 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 428 } 429 } 430 431 /* 432 * Schedule the next job, called from v4l2_m2m_job_finish() or 433 * v4l2_m2m_buf_done_and_job_finish(). 434 */ 435 static void v4l2_m2m_schedule_next_job(struct v4l2_m2m_dev *m2m_dev, 436 struct v4l2_m2m_ctx *m2m_ctx) 437 { 438 /* 439 * This instance might have more buffers ready, but since we do not 440 * allow more than one job on the job_queue per instance, each has 441 * to be scheduled separately after the previous one finishes. 442 */ 443 __v4l2_m2m_try_queue(m2m_dev, m2m_ctx); 444 445 /* 446 * We might be running in atomic context, 447 * but the job must be run in non-atomic context. 448 */ 449 schedule_work(&m2m_dev->job_work); 450 } 451 452 /* 453 * Assumes job_spinlock is held, called from v4l2_m2m_job_finish() or 454 * v4l2_m2m_buf_done_and_job_finish(). 455 */ 456 static bool _v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, 457 struct v4l2_m2m_ctx *m2m_ctx) 458 { 459 if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) { 460 dprintk("Called by an instance not currently running\n"); 461 return false; 462 } 463 464 list_del(&m2m_dev->curr_ctx->queue); 465 m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); 466 wake_up(&m2m_dev->curr_ctx->finished); 467 m2m_dev->curr_ctx = NULL; 468 return true; 469 } 470 471 void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, 472 struct v4l2_m2m_ctx *m2m_ctx) 473 { 474 unsigned long flags; 475 bool schedule_next; 476 477 /* 478 * This function should not be used for drivers that support 479 * holding capture buffers. Those should use 480 * v4l2_m2m_buf_done_and_job_finish() instead. 481 */ 482 WARN_ON(m2m_ctx->out_q_ctx.q.subsystem_flags & 483 VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF); 484 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 485 schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx); 486 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 487 488 if (schedule_next) 489 v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx); 490 } 491 EXPORT_SYMBOL(v4l2_m2m_job_finish); 492 493 void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev, 494 struct v4l2_m2m_ctx *m2m_ctx, 495 enum vb2_buffer_state state) 496 { 497 struct vb2_v4l2_buffer *src_buf, *dst_buf; 498 bool schedule_next = false; 499 unsigned long flags; 500 501 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 502 src_buf = v4l2_m2m_src_buf_remove(m2m_ctx); 503 dst_buf = v4l2_m2m_next_dst_buf(m2m_ctx); 504 505 if (WARN_ON(!src_buf || !dst_buf)) 506 goto unlock; 507 dst_buf->is_held = src_buf->flags & V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF; 508 if (!dst_buf->is_held) { 509 v4l2_m2m_dst_buf_remove(m2m_ctx); 510 v4l2_m2m_buf_done(dst_buf, state); 511 } 512 /* 513 * If the request API is being used, returning the OUTPUT 514 * (src) buffer will wake-up any process waiting on the 515 * request file descriptor. 516 * 517 * Therefore, return the CAPTURE (dst) buffer first, 518 * to avoid signalling the request file descriptor 519 * before the CAPTURE buffer is done. 520 */ 521 v4l2_m2m_buf_done(src_buf, state); 522 schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx); 523 unlock: 524 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 525 526 if (schedule_next) 527 v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx); 528 } 529 EXPORT_SYMBOL(v4l2_m2m_buf_done_and_job_finish); 530 531 int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 532 struct v4l2_requestbuffers *reqbufs) 533 { 534 struct vb2_queue *vq; 535 int ret; 536 537 vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type); 538 ret = vb2_reqbufs(vq, reqbufs); 539 /* If count == 0, then the owner has released all buffers and he 540 is no longer owner of the queue. Otherwise we have an owner. */ 541 if (ret == 0) 542 vq->owner = reqbufs->count ? file->private_data : NULL; 543 544 return ret; 545 } 546 EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs); 547 548 int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 549 struct v4l2_buffer *buf) 550 { 551 struct vb2_queue *vq; 552 int ret = 0; 553 unsigned int i; 554 555 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); 556 ret = vb2_querybuf(vq, buf); 557 558 /* Adjust MMAP memory offsets for the CAPTURE queue */ 559 if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) { 560 if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) { 561 for (i = 0; i < buf->length; ++i) 562 buf->m.planes[i].m.mem_offset 563 += DST_QUEUE_OFF_BASE; 564 } else { 565 buf->m.offset += DST_QUEUE_OFF_BASE; 566 } 567 } 568 569 return ret; 570 } 571 EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf); 572 573 /* 574 * This will add the LAST flag and mark the buffer management 575 * state as stopped. 576 * This is called when the last capture buffer must be flagged as LAST 577 * in draining mode from the encoder/decoder driver buf_queue() callback 578 * or from v4l2_update_last_buf_state() when a capture buffer is available. 579 */ 580 void v4l2_m2m_last_buffer_done(struct v4l2_m2m_ctx *m2m_ctx, 581 struct vb2_v4l2_buffer *vbuf) 582 { 583 vbuf->flags |= V4L2_BUF_FLAG_LAST; 584 vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE); 585 586 v4l2_m2m_mark_stopped(m2m_ctx); 587 } 588 EXPORT_SYMBOL_GPL(v4l2_m2m_last_buffer_done); 589 590 /* When stop command is issued, update buffer management state */ 591 static int v4l2_update_last_buf_state(struct v4l2_m2m_ctx *m2m_ctx) 592 { 593 struct vb2_v4l2_buffer *next_dst_buf; 594 595 if (m2m_ctx->is_draining) 596 return -EBUSY; 597 598 if (m2m_ctx->has_stopped) 599 return 0; 600 601 m2m_ctx->last_src_buf = v4l2_m2m_last_src_buf(m2m_ctx); 602 m2m_ctx->is_draining = true; 603 604 /* 605 * The processing of the last output buffer queued before 606 * the STOP command is expected to mark the buffer management 607 * state as stopped with v4l2_m2m_mark_stopped(). 608 */ 609 if (m2m_ctx->last_src_buf) 610 return 0; 611 612 /* 613 * In case the output queue is empty, try to mark the last capture 614 * buffer as LAST. 615 */ 616 next_dst_buf = v4l2_m2m_dst_buf_remove(m2m_ctx); 617 if (!next_dst_buf) { 618 /* 619 * Wait for the next queued one in encoder/decoder driver 620 * buf_queue() callback using the v4l2_m2m_dst_buf_is_last() 621 * helper or in v4l2_m2m_qbuf() if encoder/decoder is not yet 622 * streaming. 623 */ 624 m2m_ctx->next_buf_last = true; 625 return 0; 626 } 627 628 v4l2_m2m_last_buffer_done(m2m_ctx, next_dst_buf); 629 630 return 0; 631 } 632 633 /* 634 * Updates the encoding/decoding buffer management state, should 635 * be called from encoder/decoder drivers start_streaming() 636 */ 637 void v4l2_m2m_update_start_streaming_state(struct v4l2_m2m_ctx *m2m_ctx, 638 struct vb2_queue *q) 639 { 640 /* If start streaming again, untag the last output buffer */ 641 if (V4L2_TYPE_IS_OUTPUT(q->type)) 642 m2m_ctx->last_src_buf = NULL; 643 } 644 EXPORT_SYMBOL_GPL(v4l2_m2m_update_start_streaming_state); 645 646 /* 647 * Updates the encoding/decoding buffer management state, should 648 * be called from encoder/decoder driver stop_streaming() 649 */ 650 void v4l2_m2m_update_stop_streaming_state(struct v4l2_m2m_ctx *m2m_ctx, 651 struct vb2_queue *q) 652 { 653 if (V4L2_TYPE_IS_OUTPUT(q->type)) { 654 /* 655 * If in draining state, either mark next dst buffer as 656 * done or flag next one to be marked as done either 657 * in encoder/decoder driver buf_queue() callback using 658 * the v4l2_m2m_dst_buf_is_last() helper or in v4l2_m2m_qbuf() 659 * if encoder/decoder is not yet streaming 660 */ 661 if (m2m_ctx->is_draining) { 662 struct vb2_v4l2_buffer *next_dst_buf; 663 664 m2m_ctx->last_src_buf = NULL; 665 next_dst_buf = v4l2_m2m_dst_buf_remove(m2m_ctx); 666 if (!next_dst_buf) 667 m2m_ctx->next_buf_last = true; 668 else 669 v4l2_m2m_last_buffer_done(m2m_ctx, 670 next_dst_buf); 671 } 672 } else { 673 v4l2_m2m_clear_state(m2m_ctx); 674 } 675 } 676 EXPORT_SYMBOL_GPL(v4l2_m2m_update_stop_streaming_state); 677 678 static void v4l2_m2m_force_last_buf_done(struct v4l2_m2m_ctx *m2m_ctx, 679 struct vb2_queue *q) 680 { 681 struct vb2_buffer *vb; 682 struct vb2_v4l2_buffer *vbuf; 683 unsigned int i; 684 685 if (WARN_ON(q->is_output)) 686 return; 687 if (list_empty(&q->queued_list)) 688 return; 689 690 vb = list_first_entry(&q->queued_list, struct vb2_buffer, queued_entry); 691 for (i = 0; i < vb->num_planes; i++) 692 vb2_set_plane_payload(vb, i, 0); 693 694 /* 695 * Since the buffer hasn't been queued to the ready queue, 696 * mark is active and owned before marking it LAST and DONE 697 */ 698 vb->state = VB2_BUF_STATE_ACTIVE; 699 atomic_inc(&q->owned_by_drv_count); 700 701 vbuf = to_vb2_v4l2_buffer(vb); 702 vbuf->field = V4L2_FIELD_NONE; 703 704 v4l2_m2m_last_buffer_done(m2m_ctx, vbuf); 705 } 706 707 int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 708 struct v4l2_buffer *buf) 709 { 710 struct video_device *vdev = video_devdata(file); 711 struct vb2_queue *vq; 712 int ret; 713 714 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); 715 if (!V4L2_TYPE_IS_OUTPUT(vq->type) && 716 (buf->flags & V4L2_BUF_FLAG_REQUEST_FD)) { 717 dprintk("%s: requests cannot be used with capture buffers\n", 718 __func__); 719 return -EPERM; 720 } 721 722 ret = vb2_qbuf(vq, vdev->v4l2_dev->mdev, buf); 723 if (ret) 724 return ret; 725 726 /* 727 * If the capture queue is streaming, but streaming hasn't started 728 * on the device, but was asked to stop, mark the previously queued 729 * buffer as DONE with LAST flag since it won't be queued on the 730 * device. 731 */ 732 if (!V4L2_TYPE_IS_OUTPUT(vq->type) && 733 vb2_is_streaming(vq) && !vb2_start_streaming_called(vq) && 734 (v4l2_m2m_has_stopped(m2m_ctx) || v4l2_m2m_dst_buf_is_last(m2m_ctx))) 735 v4l2_m2m_force_last_buf_done(m2m_ctx, vq); 736 else if (!(buf->flags & V4L2_BUF_FLAG_IN_REQUEST)) 737 v4l2_m2m_try_schedule(m2m_ctx); 738 739 return 0; 740 } 741 EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf); 742 743 int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 744 struct v4l2_buffer *buf) 745 { 746 struct vb2_queue *vq; 747 748 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); 749 return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK); 750 } 751 EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf); 752 753 int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 754 struct v4l2_buffer *buf) 755 { 756 struct video_device *vdev = video_devdata(file); 757 struct vb2_queue *vq; 758 759 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); 760 return vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf); 761 } 762 EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf); 763 764 int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 765 struct v4l2_create_buffers *create) 766 { 767 struct vb2_queue *vq; 768 769 vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type); 770 return vb2_create_bufs(vq, create); 771 } 772 EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs); 773 774 int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 775 struct v4l2_exportbuffer *eb) 776 { 777 struct vb2_queue *vq; 778 779 vq = v4l2_m2m_get_vq(m2m_ctx, eb->type); 780 return vb2_expbuf(vq, eb); 781 } 782 EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf); 783 784 int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 785 enum v4l2_buf_type type) 786 { 787 struct vb2_queue *vq; 788 int ret; 789 790 vq = v4l2_m2m_get_vq(m2m_ctx, type); 791 ret = vb2_streamon(vq, type); 792 if (!ret) 793 v4l2_m2m_try_schedule(m2m_ctx); 794 795 return ret; 796 } 797 EXPORT_SYMBOL_GPL(v4l2_m2m_streamon); 798 799 int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 800 enum v4l2_buf_type type) 801 { 802 struct v4l2_m2m_dev *m2m_dev; 803 struct v4l2_m2m_queue_ctx *q_ctx; 804 unsigned long flags_job, flags; 805 int ret; 806 807 /* wait until the current context is dequeued from job_queue */ 808 v4l2_m2m_cancel_job(m2m_ctx); 809 810 q_ctx = get_queue_ctx(m2m_ctx, type); 811 ret = vb2_streamoff(&q_ctx->q, type); 812 if (ret) 813 return ret; 814 815 m2m_dev = m2m_ctx->m2m_dev; 816 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); 817 /* We should not be scheduled anymore, since we're dropping a queue. */ 818 if (m2m_ctx->job_flags & TRANS_QUEUED) 819 list_del(&m2m_ctx->queue); 820 m2m_ctx->job_flags = 0; 821 822 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 823 /* Drop queue, since streamoff returns device to the same state as after 824 * calling reqbufs. */ 825 INIT_LIST_HEAD(&q_ctx->rdy_queue); 826 q_ctx->num_rdy = 0; 827 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 828 829 if (m2m_dev->curr_ctx == m2m_ctx) { 830 m2m_dev->curr_ctx = NULL; 831 wake_up(&m2m_ctx->finished); 832 } 833 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); 834 835 return 0; 836 } 837 EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff); 838 839 static __poll_t v4l2_m2m_poll_for_data(struct file *file, 840 struct v4l2_m2m_ctx *m2m_ctx, 841 struct poll_table_struct *wait) 842 { 843 struct vb2_queue *src_q, *dst_q; 844 struct vb2_buffer *src_vb = NULL, *dst_vb = NULL; 845 __poll_t rc = 0; 846 unsigned long flags; 847 848 src_q = v4l2_m2m_get_src_vq(m2m_ctx); 849 dst_q = v4l2_m2m_get_dst_vq(m2m_ctx); 850 851 poll_wait(file, &src_q->done_wq, wait); 852 poll_wait(file, &dst_q->done_wq, wait); 853 854 /* 855 * There has to be at least one buffer queued on each queued_list, which 856 * means either in driver already or waiting for driver to claim it 857 * and start processing. 858 */ 859 if ((!src_q->streaming || src_q->error || 860 list_empty(&src_q->queued_list)) && 861 (!dst_q->streaming || dst_q->error || 862 list_empty(&dst_q->queued_list))) 863 return EPOLLERR; 864 865 spin_lock_irqsave(&dst_q->done_lock, flags); 866 if (list_empty(&dst_q->done_list)) { 867 /* 868 * If the last buffer was dequeued from the capture queue, 869 * return immediately. DQBUF will return -EPIPE. 870 */ 871 if (dst_q->last_buffer_dequeued) { 872 spin_unlock_irqrestore(&dst_q->done_lock, flags); 873 return EPOLLIN | EPOLLRDNORM; 874 } 875 } 876 spin_unlock_irqrestore(&dst_q->done_lock, flags); 877 878 spin_lock_irqsave(&src_q->done_lock, flags); 879 if (!list_empty(&src_q->done_list)) 880 src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer, 881 done_entry); 882 if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE 883 || src_vb->state == VB2_BUF_STATE_ERROR)) 884 rc |= EPOLLOUT | EPOLLWRNORM; 885 spin_unlock_irqrestore(&src_q->done_lock, flags); 886 887 spin_lock_irqsave(&dst_q->done_lock, flags); 888 if (!list_empty(&dst_q->done_list)) 889 dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer, 890 done_entry); 891 if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE 892 || dst_vb->state == VB2_BUF_STATE_ERROR)) 893 rc |= EPOLLIN | EPOLLRDNORM; 894 spin_unlock_irqrestore(&dst_q->done_lock, flags); 895 896 return rc; 897 } 898 899 __poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 900 struct poll_table_struct *wait) 901 { 902 struct video_device *vfd = video_devdata(file); 903 __poll_t req_events = poll_requested_events(wait); 904 __poll_t rc = 0; 905 906 if (req_events & (EPOLLOUT | EPOLLWRNORM | EPOLLIN | EPOLLRDNORM)) 907 rc = v4l2_m2m_poll_for_data(file, m2m_ctx, wait); 908 909 if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) { 910 struct v4l2_fh *fh = file->private_data; 911 912 poll_wait(file, &fh->wait, wait); 913 if (v4l2_event_pending(fh)) 914 rc |= EPOLLPRI; 915 } 916 917 return rc; 918 } 919 EXPORT_SYMBOL_GPL(v4l2_m2m_poll); 920 921 int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 922 struct vm_area_struct *vma) 923 { 924 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; 925 struct vb2_queue *vq; 926 927 if (offset < DST_QUEUE_OFF_BASE) { 928 vq = v4l2_m2m_get_src_vq(m2m_ctx); 929 } else { 930 vq = v4l2_m2m_get_dst_vq(m2m_ctx); 931 vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT); 932 } 933 934 return vb2_mmap(vq, vma); 935 } 936 EXPORT_SYMBOL(v4l2_m2m_mmap); 937 938 #if defined(CONFIG_MEDIA_CONTROLLER) 939 void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev) 940 { 941 media_remove_intf_links(&m2m_dev->intf_devnode->intf); 942 media_devnode_remove(m2m_dev->intf_devnode); 943 944 media_entity_remove_links(m2m_dev->source); 945 media_entity_remove_links(&m2m_dev->sink); 946 media_entity_remove_links(&m2m_dev->proc); 947 media_device_unregister_entity(m2m_dev->source); 948 media_device_unregister_entity(&m2m_dev->sink); 949 media_device_unregister_entity(&m2m_dev->proc); 950 kfree(m2m_dev->source->name); 951 kfree(m2m_dev->sink.name); 952 kfree(m2m_dev->proc.name); 953 } 954 EXPORT_SYMBOL_GPL(v4l2_m2m_unregister_media_controller); 955 956 static int v4l2_m2m_register_entity(struct media_device *mdev, 957 struct v4l2_m2m_dev *m2m_dev, enum v4l2_m2m_entity_type type, 958 struct video_device *vdev, int function) 959 { 960 struct media_entity *entity; 961 struct media_pad *pads; 962 char *name; 963 unsigned int len; 964 int num_pads; 965 int ret; 966 967 switch (type) { 968 case MEM2MEM_ENT_TYPE_SOURCE: 969 entity = m2m_dev->source; 970 pads = &m2m_dev->source_pad; 971 pads[0].flags = MEDIA_PAD_FL_SOURCE; 972 num_pads = 1; 973 break; 974 case MEM2MEM_ENT_TYPE_SINK: 975 entity = &m2m_dev->sink; 976 pads = &m2m_dev->sink_pad; 977 pads[0].flags = MEDIA_PAD_FL_SINK; 978 num_pads = 1; 979 break; 980 case MEM2MEM_ENT_TYPE_PROC: 981 entity = &m2m_dev->proc; 982 pads = m2m_dev->proc_pads; 983 pads[0].flags = MEDIA_PAD_FL_SINK; 984 pads[1].flags = MEDIA_PAD_FL_SOURCE; 985 num_pads = 2; 986 break; 987 default: 988 return -EINVAL; 989 } 990 991 entity->obj_type = MEDIA_ENTITY_TYPE_BASE; 992 if (type != MEM2MEM_ENT_TYPE_PROC) { 993 entity->info.dev.major = VIDEO_MAJOR; 994 entity->info.dev.minor = vdev->minor; 995 } 996 len = strlen(vdev->name) + 2 + strlen(m2m_entity_name[type]); 997 name = kmalloc(len, GFP_KERNEL); 998 if (!name) 999 return -ENOMEM; 1000 snprintf(name, len, "%s-%s", vdev->name, m2m_entity_name[type]); 1001 entity->name = name; 1002 entity->function = function; 1003 1004 ret = media_entity_pads_init(entity, num_pads, pads); 1005 if (ret) 1006 return ret; 1007 ret = media_device_register_entity(mdev, entity); 1008 if (ret) 1009 return ret; 1010 1011 return 0; 1012 } 1013 1014 int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev, 1015 struct video_device *vdev, int function) 1016 { 1017 struct media_device *mdev = vdev->v4l2_dev->mdev; 1018 struct media_link *link; 1019 int ret; 1020 1021 if (!mdev) 1022 return 0; 1023 1024 /* A memory-to-memory device consists in two 1025 * DMA engine and one video processing entities. 1026 * The DMA engine entities are linked to a V4L interface 1027 */ 1028 1029 /* Create the three entities with their pads */ 1030 m2m_dev->source = &vdev->entity; 1031 ret = v4l2_m2m_register_entity(mdev, m2m_dev, 1032 MEM2MEM_ENT_TYPE_SOURCE, vdev, MEDIA_ENT_F_IO_V4L); 1033 if (ret) 1034 return ret; 1035 ret = v4l2_m2m_register_entity(mdev, m2m_dev, 1036 MEM2MEM_ENT_TYPE_PROC, vdev, function); 1037 if (ret) 1038 goto err_rel_entity0; 1039 ret = v4l2_m2m_register_entity(mdev, m2m_dev, 1040 MEM2MEM_ENT_TYPE_SINK, vdev, MEDIA_ENT_F_IO_V4L); 1041 if (ret) 1042 goto err_rel_entity1; 1043 1044 /* Connect the three entities */ 1045 ret = media_create_pad_link(m2m_dev->source, 0, &m2m_dev->proc, 0, 1046 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); 1047 if (ret) 1048 goto err_rel_entity2; 1049 1050 ret = media_create_pad_link(&m2m_dev->proc, 1, &m2m_dev->sink, 0, 1051 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); 1052 if (ret) 1053 goto err_rm_links0; 1054 1055 /* Create video interface */ 1056 m2m_dev->intf_devnode = media_devnode_create(mdev, 1057 MEDIA_INTF_T_V4L_VIDEO, 0, 1058 VIDEO_MAJOR, vdev->minor); 1059 if (!m2m_dev->intf_devnode) { 1060 ret = -ENOMEM; 1061 goto err_rm_links1; 1062 } 1063 1064 /* Connect the two DMA engines to the interface */ 1065 link = media_create_intf_link(m2m_dev->source, 1066 &m2m_dev->intf_devnode->intf, 1067 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); 1068 if (!link) { 1069 ret = -ENOMEM; 1070 goto err_rm_devnode; 1071 } 1072 1073 link = media_create_intf_link(&m2m_dev->sink, 1074 &m2m_dev->intf_devnode->intf, 1075 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); 1076 if (!link) { 1077 ret = -ENOMEM; 1078 goto err_rm_intf_link; 1079 } 1080 return 0; 1081 1082 err_rm_intf_link: 1083 media_remove_intf_links(&m2m_dev->intf_devnode->intf); 1084 err_rm_devnode: 1085 media_devnode_remove(m2m_dev->intf_devnode); 1086 err_rm_links1: 1087 media_entity_remove_links(&m2m_dev->sink); 1088 err_rm_links0: 1089 media_entity_remove_links(&m2m_dev->proc); 1090 media_entity_remove_links(m2m_dev->source); 1091 err_rel_entity2: 1092 media_device_unregister_entity(&m2m_dev->proc); 1093 kfree(m2m_dev->proc.name); 1094 err_rel_entity1: 1095 media_device_unregister_entity(&m2m_dev->sink); 1096 kfree(m2m_dev->sink.name); 1097 err_rel_entity0: 1098 media_device_unregister_entity(m2m_dev->source); 1099 kfree(m2m_dev->source->name); 1100 return ret; 1101 return 0; 1102 } 1103 EXPORT_SYMBOL_GPL(v4l2_m2m_register_media_controller); 1104 #endif 1105 1106 struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops) 1107 { 1108 struct v4l2_m2m_dev *m2m_dev; 1109 1110 if (!m2m_ops || WARN_ON(!m2m_ops->device_run)) 1111 return ERR_PTR(-EINVAL); 1112 1113 m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL); 1114 if (!m2m_dev) 1115 return ERR_PTR(-ENOMEM); 1116 1117 m2m_dev->curr_ctx = NULL; 1118 m2m_dev->m2m_ops = m2m_ops; 1119 INIT_LIST_HEAD(&m2m_dev->job_queue); 1120 spin_lock_init(&m2m_dev->job_spinlock); 1121 INIT_WORK(&m2m_dev->job_work, v4l2_m2m_device_run_work); 1122 1123 return m2m_dev; 1124 } 1125 EXPORT_SYMBOL_GPL(v4l2_m2m_init); 1126 1127 void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev) 1128 { 1129 kfree(m2m_dev); 1130 } 1131 EXPORT_SYMBOL_GPL(v4l2_m2m_release); 1132 1133 struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev, 1134 void *drv_priv, 1135 int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)) 1136 { 1137 struct v4l2_m2m_ctx *m2m_ctx; 1138 struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx; 1139 int ret; 1140 1141 m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL); 1142 if (!m2m_ctx) 1143 return ERR_PTR(-ENOMEM); 1144 1145 m2m_ctx->priv = drv_priv; 1146 m2m_ctx->m2m_dev = m2m_dev; 1147 init_waitqueue_head(&m2m_ctx->finished); 1148 1149 out_q_ctx = &m2m_ctx->out_q_ctx; 1150 cap_q_ctx = &m2m_ctx->cap_q_ctx; 1151 1152 INIT_LIST_HEAD(&out_q_ctx->rdy_queue); 1153 INIT_LIST_HEAD(&cap_q_ctx->rdy_queue); 1154 spin_lock_init(&out_q_ctx->rdy_spinlock); 1155 spin_lock_init(&cap_q_ctx->rdy_spinlock); 1156 1157 INIT_LIST_HEAD(&m2m_ctx->queue); 1158 1159 ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q); 1160 1161 if (ret) 1162 goto err; 1163 /* 1164 * Both queues should use same the mutex to lock the m2m context. 1165 * This lock is used in some v4l2_m2m_* helpers. 1166 */ 1167 if (WARN_ON(out_q_ctx->q.lock != cap_q_ctx->q.lock)) { 1168 ret = -EINVAL; 1169 goto err; 1170 } 1171 m2m_ctx->q_lock = out_q_ctx->q.lock; 1172 1173 return m2m_ctx; 1174 err: 1175 kfree(m2m_ctx); 1176 return ERR_PTR(ret); 1177 } 1178 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init); 1179 1180 void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx) 1181 { 1182 /* wait until the current context is dequeued from job_queue */ 1183 v4l2_m2m_cancel_job(m2m_ctx); 1184 1185 vb2_queue_release(&m2m_ctx->cap_q_ctx.q); 1186 vb2_queue_release(&m2m_ctx->out_q_ctx.q); 1187 1188 kfree(m2m_ctx); 1189 } 1190 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release); 1191 1192 void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, 1193 struct vb2_v4l2_buffer *vbuf) 1194 { 1195 struct v4l2_m2m_buffer *b = container_of(vbuf, 1196 struct v4l2_m2m_buffer, vb); 1197 struct v4l2_m2m_queue_ctx *q_ctx; 1198 unsigned long flags; 1199 1200 q_ctx = get_queue_ctx(m2m_ctx, vbuf->vb2_buf.vb2_queue->type); 1201 if (!q_ctx) 1202 return; 1203 1204 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 1205 list_add_tail(&b->list, &q_ctx->rdy_queue); 1206 q_ctx->num_rdy++; 1207 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 1208 } 1209 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue); 1210 1211 void v4l2_m2m_buf_copy_metadata(const struct vb2_v4l2_buffer *out_vb, 1212 struct vb2_v4l2_buffer *cap_vb, 1213 bool copy_frame_flags) 1214 { 1215 u32 mask = V4L2_BUF_FLAG_TIMECODE | V4L2_BUF_FLAG_TSTAMP_SRC_MASK; 1216 1217 if (copy_frame_flags) 1218 mask |= V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_PFRAME | 1219 V4L2_BUF_FLAG_BFRAME; 1220 1221 cap_vb->vb2_buf.timestamp = out_vb->vb2_buf.timestamp; 1222 1223 if (out_vb->flags & V4L2_BUF_FLAG_TIMECODE) 1224 cap_vb->timecode = out_vb->timecode; 1225 cap_vb->field = out_vb->field; 1226 cap_vb->flags &= ~mask; 1227 cap_vb->flags |= out_vb->flags & mask; 1228 cap_vb->vb2_buf.copied_timestamp = 1; 1229 } 1230 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_copy_metadata); 1231 1232 void v4l2_m2m_request_queue(struct media_request *req) 1233 { 1234 struct media_request_object *obj, *obj_safe; 1235 struct v4l2_m2m_ctx *m2m_ctx = NULL; 1236 1237 /* 1238 * Queue all objects. Note that buffer objects are at the end of the 1239 * objects list, after all other object types. Once buffer objects 1240 * are queued, the driver might delete them immediately (if the driver 1241 * processes the buffer at once), so we have to use 1242 * list_for_each_entry_safe() to handle the case where the object we 1243 * queue is deleted. 1244 */ 1245 list_for_each_entry_safe(obj, obj_safe, &req->objects, list) { 1246 struct v4l2_m2m_ctx *m2m_ctx_obj; 1247 struct vb2_buffer *vb; 1248 1249 if (!obj->ops->queue) 1250 continue; 1251 1252 if (vb2_request_object_is_buffer(obj)) { 1253 /* Sanity checks */ 1254 vb = container_of(obj, struct vb2_buffer, req_obj); 1255 WARN_ON(!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)); 1256 m2m_ctx_obj = container_of(vb->vb2_queue, 1257 struct v4l2_m2m_ctx, 1258 out_q_ctx.q); 1259 WARN_ON(m2m_ctx && m2m_ctx_obj != m2m_ctx); 1260 m2m_ctx = m2m_ctx_obj; 1261 } 1262 1263 /* 1264 * The buffer we queue here can in theory be immediately 1265 * unbound, hence the use of list_for_each_entry_safe() 1266 * above and why we call the queue op last. 1267 */ 1268 obj->ops->queue(obj); 1269 } 1270 1271 WARN_ON(!m2m_ctx); 1272 1273 if (m2m_ctx) 1274 v4l2_m2m_try_schedule(m2m_ctx); 1275 } 1276 EXPORT_SYMBOL_GPL(v4l2_m2m_request_queue); 1277 1278 /* Videobuf2 ioctl helpers */ 1279 1280 int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv, 1281 struct v4l2_requestbuffers *rb) 1282 { 1283 struct v4l2_fh *fh = file->private_data; 1284 1285 return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb); 1286 } 1287 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs); 1288 1289 int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv, 1290 struct v4l2_create_buffers *create) 1291 { 1292 struct v4l2_fh *fh = file->private_data; 1293 1294 return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create); 1295 } 1296 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs); 1297 1298 int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv, 1299 struct v4l2_buffer *buf) 1300 { 1301 struct v4l2_fh *fh = file->private_data; 1302 1303 return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf); 1304 } 1305 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf); 1306 1307 int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv, 1308 struct v4l2_buffer *buf) 1309 { 1310 struct v4l2_fh *fh = file->private_data; 1311 1312 return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf); 1313 } 1314 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf); 1315 1316 int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv, 1317 struct v4l2_buffer *buf) 1318 { 1319 struct v4l2_fh *fh = file->private_data; 1320 1321 return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf); 1322 } 1323 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf); 1324 1325 int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *priv, 1326 struct v4l2_buffer *buf) 1327 { 1328 struct v4l2_fh *fh = file->private_data; 1329 1330 return v4l2_m2m_prepare_buf(file, fh->m2m_ctx, buf); 1331 } 1332 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_prepare_buf); 1333 1334 int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv, 1335 struct v4l2_exportbuffer *eb) 1336 { 1337 struct v4l2_fh *fh = file->private_data; 1338 1339 return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb); 1340 } 1341 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf); 1342 1343 int v4l2_m2m_ioctl_streamon(struct file *file, void *priv, 1344 enum v4l2_buf_type type) 1345 { 1346 struct v4l2_fh *fh = file->private_data; 1347 1348 return v4l2_m2m_streamon(file, fh->m2m_ctx, type); 1349 } 1350 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon); 1351 1352 int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv, 1353 enum v4l2_buf_type type) 1354 { 1355 struct v4l2_fh *fh = file->private_data; 1356 1357 return v4l2_m2m_streamoff(file, fh->m2m_ctx, type); 1358 } 1359 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff); 1360 1361 int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *fh, 1362 struct v4l2_encoder_cmd *ec) 1363 { 1364 if (ec->cmd != V4L2_ENC_CMD_STOP && ec->cmd != V4L2_ENC_CMD_START) 1365 return -EINVAL; 1366 1367 ec->flags = 0; 1368 return 0; 1369 } 1370 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_encoder_cmd); 1371 1372 int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *fh, 1373 struct v4l2_decoder_cmd *dc) 1374 { 1375 if (dc->cmd != V4L2_DEC_CMD_STOP && dc->cmd != V4L2_DEC_CMD_START) 1376 return -EINVAL; 1377 1378 dc->flags = 0; 1379 1380 if (dc->cmd == V4L2_DEC_CMD_STOP) { 1381 dc->stop.pts = 0; 1382 } else if (dc->cmd == V4L2_DEC_CMD_START) { 1383 dc->start.speed = 0; 1384 dc->start.format = V4L2_DEC_START_FMT_NONE; 1385 } 1386 return 0; 1387 } 1388 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_decoder_cmd); 1389 1390 /* 1391 * Updates the encoding state on ENC_CMD_STOP/ENC_CMD_START 1392 * Should be called from the encoder driver encoder_cmd() callback 1393 */ 1394 int v4l2_m2m_encoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 1395 struct v4l2_encoder_cmd *ec) 1396 { 1397 if (ec->cmd != V4L2_ENC_CMD_STOP && ec->cmd != V4L2_ENC_CMD_START) 1398 return -EINVAL; 1399 1400 if (ec->cmd == V4L2_ENC_CMD_STOP) 1401 return v4l2_update_last_buf_state(m2m_ctx); 1402 1403 if (m2m_ctx->is_draining) 1404 return -EBUSY; 1405 1406 if (m2m_ctx->has_stopped) 1407 m2m_ctx->has_stopped = false; 1408 1409 return 0; 1410 } 1411 EXPORT_SYMBOL_GPL(v4l2_m2m_encoder_cmd); 1412 1413 /* 1414 * Updates the decoding state on DEC_CMD_STOP/DEC_CMD_START 1415 * Should be called from the decoder driver decoder_cmd() callback 1416 */ 1417 int v4l2_m2m_decoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 1418 struct v4l2_decoder_cmd *dc) 1419 { 1420 if (dc->cmd != V4L2_DEC_CMD_STOP && dc->cmd != V4L2_DEC_CMD_START) 1421 return -EINVAL; 1422 1423 if (dc->cmd == V4L2_DEC_CMD_STOP) 1424 return v4l2_update_last_buf_state(m2m_ctx); 1425 1426 if (m2m_ctx->is_draining) 1427 return -EBUSY; 1428 1429 if (m2m_ctx->has_stopped) 1430 m2m_ctx->has_stopped = false; 1431 1432 return 0; 1433 } 1434 EXPORT_SYMBOL_GPL(v4l2_m2m_decoder_cmd); 1435 1436 int v4l2_m2m_ioctl_encoder_cmd(struct file *file, void *priv, 1437 struct v4l2_encoder_cmd *ec) 1438 { 1439 struct v4l2_fh *fh = file->private_data; 1440 1441 return v4l2_m2m_encoder_cmd(file, fh->m2m_ctx, ec); 1442 } 1443 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_encoder_cmd); 1444 1445 int v4l2_m2m_ioctl_decoder_cmd(struct file *file, void *priv, 1446 struct v4l2_decoder_cmd *dc) 1447 { 1448 struct v4l2_fh *fh = file->private_data; 1449 1450 return v4l2_m2m_decoder_cmd(file, fh->m2m_ctx, dc); 1451 } 1452 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_decoder_cmd); 1453 1454 int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *fh, 1455 struct v4l2_decoder_cmd *dc) 1456 { 1457 if (dc->cmd != V4L2_DEC_CMD_FLUSH) 1458 return -EINVAL; 1459 1460 dc->flags = 0; 1461 1462 return 0; 1463 } 1464 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_try_decoder_cmd); 1465 1466 int v4l2_m2m_ioctl_stateless_decoder_cmd(struct file *file, void *priv, 1467 struct v4l2_decoder_cmd *dc) 1468 { 1469 struct v4l2_fh *fh = file->private_data; 1470 struct vb2_v4l2_buffer *out_vb, *cap_vb; 1471 struct v4l2_m2m_dev *m2m_dev = fh->m2m_ctx->m2m_dev; 1472 unsigned long flags; 1473 int ret; 1474 1475 ret = v4l2_m2m_ioctl_stateless_try_decoder_cmd(file, priv, dc); 1476 if (ret < 0) 1477 return ret; 1478 1479 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 1480 out_vb = v4l2_m2m_last_src_buf(fh->m2m_ctx); 1481 cap_vb = v4l2_m2m_last_dst_buf(fh->m2m_ctx); 1482 1483 /* 1484 * If there is an out buffer pending, then clear any HOLD flag. 1485 * 1486 * By clearing this flag we ensure that when this output 1487 * buffer is processed any held capture buffer will be released. 1488 */ 1489 if (out_vb) { 1490 out_vb->flags &= ~V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF; 1491 } else if (cap_vb && cap_vb->is_held) { 1492 /* 1493 * If there were no output buffers, but there is a 1494 * capture buffer that is held, then release that 1495 * buffer. 1496 */ 1497 cap_vb->is_held = false; 1498 v4l2_m2m_dst_buf_remove(fh->m2m_ctx); 1499 v4l2_m2m_buf_done(cap_vb, VB2_BUF_STATE_DONE); 1500 } 1501 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 1502 1503 return 0; 1504 } 1505 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_decoder_cmd); 1506 1507 /* 1508 * v4l2_file_operations helpers. It is assumed here same lock is used 1509 * for the output and the capture buffer queue. 1510 */ 1511 1512 int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma) 1513 { 1514 struct v4l2_fh *fh = file->private_data; 1515 1516 return v4l2_m2m_mmap(file, fh->m2m_ctx, vma); 1517 } 1518 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap); 1519 1520 __poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait) 1521 { 1522 struct v4l2_fh *fh = file->private_data; 1523 struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx; 1524 __poll_t ret; 1525 1526 if (m2m_ctx->q_lock) 1527 mutex_lock(m2m_ctx->q_lock); 1528 1529 ret = v4l2_m2m_poll(file, m2m_ctx, wait); 1530 1531 if (m2m_ctx->q_lock) 1532 mutex_unlock(m2m_ctx->q_lock); 1533 1534 return ret; 1535 } 1536 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll); 1537 1538