1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Memory-to-memory device framework for Video for Linux 2 and vb2. 4 * 5 * Helper functions for devices that use vb2 buffers for both their 6 * source and destination. 7 * 8 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. 9 * Pawel Osciak, <pawel@osciak.com> 10 * Marek Szyprowski, <m.szyprowski@samsung.com> 11 */ 12 #include <linux/module.h> 13 #include <linux/sched.h> 14 #include <linux/slab.h> 15 16 #include <media/media-device.h> 17 #include <media/videobuf2-v4l2.h> 18 #include <media/v4l2-mem2mem.h> 19 #include <media/v4l2-dev.h> 20 #include <media/v4l2-device.h> 21 #include <media/v4l2-fh.h> 22 #include <media/v4l2-event.h> 23 24 MODULE_DESCRIPTION("Mem to mem device framework for vb2"); 25 MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>"); 26 MODULE_LICENSE("GPL"); 27 28 static bool debug; 29 module_param(debug, bool, 0644); 30 31 #define dprintk(fmt, arg...) \ 32 do { \ 33 if (debug) \ 34 printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\ 35 } while (0) 36 37 38 /* Instance is already queued on the job_queue */ 39 #define TRANS_QUEUED (1 << 0) 40 /* Instance is currently running in hardware */ 41 #define TRANS_RUNNING (1 << 1) 42 /* Instance is currently aborting */ 43 #define TRANS_ABORT (1 << 2) 44 45 46 /* The job queue is not running new jobs */ 47 #define QUEUE_PAUSED (1 << 0) 48 49 50 /* Offset base for buffers on the destination queue - used to distinguish 51 * between source and destination buffers when mmapping - they receive the same 52 * offsets but for different queues */ 53 #define DST_QUEUE_OFF_BASE (1 << 30) 54 55 enum v4l2_m2m_entity_type { 56 MEM2MEM_ENT_TYPE_SOURCE, 57 MEM2MEM_ENT_TYPE_SINK, 58 MEM2MEM_ENT_TYPE_PROC 59 }; 60 61 static const char * const m2m_entity_name[] = { 62 "source", 63 "sink", 64 "proc" 65 }; 66 67 /** 68 * struct v4l2_m2m_dev - per-device context 69 * @source: &struct media_entity pointer with the source entity 70 * Used only when the M2M device is registered via 71 * v4l2_m2m_register_media_controller(). 72 * @source_pad: &struct media_pad with the source pad. 73 * Used only when the M2M device is registered via 74 * v4l2_m2m_register_media_controller(). 75 * @sink: &struct media_entity pointer with the sink entity 76 * Used only when the M2M device is registered via 77 * v4l2_m2m_register_media_controller(). 78 * @sink_pad: &struct media_pad with the sink pad. 79 * Used only when the M2M device is registered via 80 * v4l2_m2m_register_media_controller(). 81 * @proc: &struct media_entity pointer with the M2M device itself. 82 * @proc_pads: &struct media_pad with the @proc pads. 83 * Used only when the M2M device is registered via 84 * v4l2_m2m_unregister_media_controller(). 85 * @intf_devnode: &struct media_intf devnode pointer with the interface 86 * with controls the M2M device. 87 * @curr_ctx: currently running instance 88 * @job_queue: instances queued to run 89 * @job_spinlock: protects job_queue 90 * @job_work: worker to run queued jobs. 91 * @job_queue_flags: flags of the queue status, %QUEUE_PAUSED. 92 * @m2m_ops: driver callbacks 93 */ 94 struct v4l2_m2m_dev { 95 struct v4l2_m2m_ctx *curr_ctx; 96 #ifdef CONFIG_MEDIA_CONTROLLER 97 struct media_entity *source; 98 struct media_pad source_pad; 99 struct media_entity sink; 100 struct media_pad sink_pad; 101 struct media_entity proc; 102 struct media_pad proc_pads[2]; 103 struct media_intf_devnode *intf_devnode; 104 #endif 105 106 struct list_head job_queue; 107 spinlock_t job_spinlock; 108 struct work_struct job_work; 109 unsigned long job_queue_flags; 110 111 const struct v4l2_m2m_ops *m2m_ops; 112 }; 113 114 static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx, 115 enum v4l2_buf_type type) 116 { 117 if (V4L2_TYPE_IS_OUTPUT(type)) 118 return &m2m_ctx->out_q_ctx; 119 else 120 return &m2m_ctx->cap_q_ctx; 121 } 122 123 struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx, 124 enum v4l2_buf_type type) 125 { 126 struct v4l2_m2m_queue_ctx *q_ctx; 127 128 q_ctx = get_queue_ctx(m2m_ctx, type); 129 if (!q_ctx) 130 return NULL; 131 132 return &q_ctx->q; 133 } 134 EXPORT_SYMBOL(v4l2_m2m_get_vq); 135 136 struct vb2_v4l2_buffer *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx) 137 { 138 struct v4l2_m2m_buffer *b; 139 unsigned long flags; 140 141 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 142 143 if (list_empty(&q_ctx->rdy_queue)) { 144 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 145 return NULL; 146 } 147 148 b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); 149 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 150 return &b->vb; 151 } 152 EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf); 153 154 struct vb2_v4l2_buffer *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx) 155 { 156 struct v4l2_m2m_buffer *b; 157 unsigned long flags; 158 159 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 160 161 if (list_empty(&q_ctx->rdy_queue)) { 162 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 163 return NULL; 164 } 165 166 b = list_last_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); 167 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 168 return &b->vb; 169 } 170 EXPORT_SYMBOL_GPL(v4l2_m2m_last_buf); 171 172 struct vb2_v4l2_buffer *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx) 173 { 174 struct v4l2_m2m_buffer *b; 175 unsigned long flags; 176 177 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 178 if (list_empty(&q_ctx->rdy_queue)) { 179 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 180 return NULL; 181 } 182 b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); 183 list_del(&b->list); 184 q_ctx->num_rdy--; 185 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 186 187 return &b->vb; 188 } 189 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove); 190 191 void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx, 192 struct vb2_v4l2_buffer *vbuf) 193 { 194 struct v4l2_m2m_buffer *b; 195 unsigned long flags; 196 197 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 198 b = container_of(vbuf, struct v4l2_m2m_buffer, vb); 199 list_del(&b->list); 200 q_ctx->num_rdy--; 201 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 202 } 203 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_buf); 204 205 struct vb2_v4l2_buffer * 206 v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx) 207 208 { 209 struct v4l2_m2m_buffer *b, *tmp; 210 struct vb2_v4l2_buffer *ret = NULL; 211 unsigned long flags; 212 213 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 214 list_for_each_entry_safe(b, tmp, &q_ctx->rdy_queue, list) { 215 if (b->vb.vb2_buf.index == idx) { 216 list_del(&b->list); 217 q_ctx->num_rdy--; 218 ret = &b->vb; 219 break; 220 } 221 } 222 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 223 224 return ret; 225 } 226 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_idx); 227 228 /* 229 * Scheduling handlers 230 */ 231 232 void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev) 233 { 234 unsigned long flags; 235 void *ret = NULL; 236 237 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 238 if (m2m_dev->curr_ctx) 239 ret = m2m_dev->curr_ctx->priv; 240 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 241 242 return ret; 243 } 244 EXPORT_SYMBOL(v4l2_m2m_get_curr_priv); 245 246 /** 247 * v4l2_m2m_try_run() - select next job to perform and run it if possible 248 * @m2m_dev: per-device context 249 * 250 * Get next transaction (if present) from the waiting jobs list and run it. 251 * 252 * Note that this function can run on a given v4l2_m2m_ctx context, 253 * but call .device_run for another context. 254 */ 255 static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev) 256 { 257 unsigned long flags; 258 259 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 260 if (NULL != m2m_dev->curr_ctx) { 261 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 262 dprintk("Another instance is running, won't run now\n"); 263 return; 264 } 265 266 if (list_empty(&m2m_dev->job_queue)) { 267 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 268 dprintk("No job pending\n"); 269 return; 270 } 271 272 if (m2m_dev->job_queue_flags & QUEUE_PAUSED) { 273 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 274 dprintk("Running new jobs is paused\n"); 275 return; 276 } 277 278 m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue, 279 struct v4l2_m2m_ctx, queue); 280 m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING; 281 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 282 283 dprintk("Running job on m2m_ctx: %p\n", m2m_dev->curr_ctx); 284 m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv); 285 } 286 287 /* 288 * __v4l2_m2m_try_queue() - queue a job 289 * @m2m_dev: m2m device 290 * @m2m_ctx: m2m context 291 * 292 * Check if this context is ready to queue a job. 293 * 294 * This function can run in interrupt context. 295 */ 296 static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev, 297 struct v4l2_m2m_ctx *m2m_ctx) 298 { 299 unsigned long flags_job; 300 struct vb2_v4l2_buffer *dst, *src; 301 302 dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx); 303 304 if (!m2m_ctx->out_q_ctx.q.streaming 305 || !m2m_ctx->cap_q_ctx.q.streaming) { 306 dprintk("Streaming needs to be on for both queues\n"); 307 return; 308 } 309 310 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); 311 312 /* If the context is aborted then don't schedule it */ 313 if (m2m_ctx->job_flags & TRANS_ABORT) { 314 dprintk("Aborted context\n"); 315 goto job_unlock; 316 } 317 318 if (m2m_ctx->job_flags & TRANS_QUEUED) { 319 dprintk("On job queue already\n"); 320 goto job_unlock; 321 } 322 323 src = v4l2_m2m_next_src_buf(m2m_ctx); 324 dst = v4l2_m2m_next_dst_buf(m2m_ctx); 325 if (!src && !m2m_ctx->out_q_ctx.buffered) { 326 dprintk("No input buffers available\n"); 327 goto job_unlock; 328 } 329 if (!dst && !m2m_ctx->cap_q_ctx.buffered) { 330 dprintk("No output buffers available\n"); 331 goto job_unlock; 332 } 333 334 m2m_ctx->new_frame = true; 335 336 if (src && dst && dst->is_held && 337 dst->vb2_buf.copied_timestamp && 338 dst->vb2_buf.timestamp != src->vb2_buf.timestamp) { 339 dprintk("Timestamp mismatch, returning held capture buffer\n"); 340 dst->is_held = false; 341 v4l2_m2m_dst_buf_remove(m2m_ctx); 342 v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE); 343 dst = v4l2_m2m_next_dst_buf(m2m_ctx); 344 345 if (!dst && !m2m_ctx->cap_q_ctx.buffered) { 346 dprintk("No output buffers available after returning held buffer\n"); 347 goto job_unlock; 348 } 349 } 350 351 if (src && dst && (m2m_ctx->out_q_ctx.q.subsystem_flags & 352 VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF)) 353 m2m_ctx->new_frame = !dst->vb2_buf.copied_timestamp || 354 dst->vb2_buf.timestamp != src->vb2_buf.timestamp; 355 356 if (m2m_ctx->has_stopped) { 357 dprintk("Device has stopped\n"); 358 goto job_unlock; 359 } 360 361 if (m2m_dev->m2m_ops->job_ready 362 && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) { 363 dprintk("Driver not ready\n"); 364 goto job_unlock; 365 } 366 367 list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue); 368 m2m_ctx->job_flags |= TRANS_QUEUED; 369 370 job_unlock: 371 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); 372 } 373 374 /** 375 * v4l2_m2m_try_schedule() - schedule and possibly run a job for any context 376 * @m2m_ctx: m2m context 377 * 378 * Check if this context is ready to queue a job. If suitable, 379 * run the next queued job on the mem2mem device. 380 * 381 * This function shouldn't run in interrupt context. 382 * 383 * Note that v4l2_m2m_try_schedule() can schedule one job for this context, 384 * and then run another job for another context. 385 */ 386 void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx) 387 { 388 struct v4l2_m2m_dev *m2m_dev = m2m_ctx->m2m_dev; 389 390 __v4l2_m2m_try_queue(m2m_dev, m2m_ctx); 391 v4l2_m2m_try_run(m2m_dev); 392 } 393 EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule); 394 395 /** 396 * v4l2_m2m_device_run_work() - run pending jobs for the context 397 * @work: Work structure used for scheduling the execution of this function. 398 */ 399 static void v4l2_m2m_device_run_work(struct work_struct *work) 400 { 401 struct v4l2_m2m_dev *m2m_dev = 402 container_of(work, struct v4l2_m2m_dev, job_work); 403 404 v4l2_m2m_try_run(m2m_dev); 405 } 406 407 /** 408 * v4l2_m2m_cancel_job() - cancel pending jobs for the context 409 * @m2m_ctx: m2m context with jobs to be canceled 410 * 411 * In case of streamoff or release called on any context, 412 * 1] If the context is currently running, then abort job will be called 413 * 2] If the context is queued, then the context will be removed from 414 * the job_queue 415 */ 416 static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx) 417 { 418 struct v4l2_m2m_dev *m2m_dev; 419 unsigned long flags; 420 421 m2m_dev = m2m_ctx->m2m_dev; 422 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 423 424 m2m_ctx->job_flags |= TRANS_ABORT; 425 if (m2m_ctx->job_flags & TRANS_RUNNING) { 426 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 427 if (m2m_dev->m2m_ops->job_abort) 428 m2m_dev->m2m_ops->job_abort(m2m_ctx->priv); 429 dprintk("m2m_ctx %p running, will wait to complete\n", m2m_ctx); 430 wait_event(m2m_ctx->finished, 431 !(m2m_ctx->job_flags & TRANS_RUNNING)); 432 } else if (m2m_ctx->job_flags & TRANS_QUEUED) { 433 list_del(&m2m_ctx->queue); 434 m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); 435 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 436 dprintk("m2m_ctx: %p had been on queue and was removed\n", 437 m2m_ctx); 438 } else { 439 /* Do nothing, was not on queue/running */ 440 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 441 } 442 } 443 444 /* 445 * Schedule the next job, called from v4l2_m2m_job_finish() or 446 * v4l2_m2m_buf_done_and_job_finish(). 447 */ 448 static void v4l2_m2m_schedule_next_job(struct v4l2_m2m_dev *m2m_dev, 449 struct v4l2_m2m_ctx *m2m_ctx) 450 { 451 /* 452 * This instance might have more buffers ready, but since we do not 453 * allow more than one job on the job_queue per instance, each has 454 * to be scheduled separately after the previous one finishes. 455 */ 456 __v4l2_m2m_try_queue(m2m_dev, m2m_ctx); 457 458 /* 459 * We might be running in atomic context, 460 * but the job must be run in non-atomic context. 461 */ 462 schedule_work(&m2m_dev->job_work); 463 } 464 465 /* 466 * Assumes job_spinlock is held, called from v4l2_m2m_job_finish() or 467 * v4l2_m2m_buf_done_and_job_finish(). 468 */ 469 static bool _v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, 470 struct v4l2_m2m_ctx *m2m_ctx) 471 { 472 if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) { 473 dprintk("Called by an instance not currently running\n"); 474 return false; 475 } 476 477 list_del(&m2m_dev->curr_ctx->queue); 478 m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); 479 wake_up(&m2m_dev->curr_ctx->finished); 480 m2m_dev->curr_ctx = NULL; 481 return true; 482 } 483 484 void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, 485 struct v4l2_m2m_ctx *m2m_ctx) 486 { 487 unsigned long flags; 488 bool schedule_next; 489 490 /* 491 * This function should not be used for drivers that support 492 * holding capture buffers. Those should use 493 * v4l2_m2m_buf_done_and_job_finish() instead. 494 */ 495 WARN_ON(m2m_ctx->out_q_ctx.q.subsystem_flags & 496 VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF); 497 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 498 schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx); 499 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 500 501 if (schedule_next) 502 v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx); 503 } 504 EXPORT_SYMBOL(v4l2_m2m_job_finish); 505 506 void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev, 507 struct v4l2_m2m_ctx *m2m_ctx, 508 enum vb2_buffer_state state) 509 { 510 struct vb2_v4l2_buffer *src_buf, *dst_buf; 511 bool schedule_next = false; 512 unsigned long flags; 513 514 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 515 src_buf = v4l2_m2m_src_buf_remove(m2m_ctx); 516 dst_buf = v4l2_m2m_next_dst_buf(m2m_ctx); 517 518 if (WARN_ON(!src_buf || !dst_buf)) 519 goto unlock; 520 dst_buf->is_held = src_buf->flags & V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF; 521 if (!dst_buf->is_held) { 522 v4l2_m2m_dst_buf_remove(m2m_ctx); 523 v4l2_m2m_buf_done(dst_buf, state); 524 } 525 /* 526 * If the request API is being used, returning the OUTPUT 527 * (src) buffer will wake-up any process waiting on the 528 * request file descriptor. 529 * 530 * Therefore, return the CAPTURE (dst) buffer first, 531 * to avoid signalling the request file descriptor 532 * before the CAPTURE buffer is done. 533 */ 534 v4l2_m2m_buf_done(src_buf, state); 535 schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx); 536 unlock: 537 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 538 539 if (schedule_next) 540 v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx); 541 } 542 EXPORT_SYMBOL(v4l2_m2m_buf_done_and_job_finish); 543 544 void v4l2_m2m_suspend(struct v4l2_m2m_dev *m2m_dev) 545 { 546 unsigned long flags; 547 struct v4l2_m2m_ctx *curr_ctx; 548 549 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 550 m2m_dev->job_queue_flags |= QUEUE_PAUSED; 551 curr_ctx = m2m_dev->curr_ctx; 552 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 553 554 if (curr_ctx) 555 wait_event(curr_ctx->finished, 556 !(curr_ctx->job_flags & TRANS_RUNNING)); 557 } 558 EXPORT_SYMBOL(v4l2_m2m_suspend); 559 560 void v4l2_m2m_resume(struct v4l2_m2m_dev *m2m_dev) 561 { 562 unsigned long flags; 563 564 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 565 m2m_dev->job_queue_flags &= ~QUEUE_PAUSED; 566 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 567 568 v4l2_m2m_try_run(m2m_dev); 569 } 570 EXPORT_SYMBOL(v4l2_m2m_resume); 571 572 int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 573 struct v4l2_requestbuffers *reqbufs) 574 { 575 struct vb2_queue *vq; 576 int ret; 577 578 vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type); 579 ret = vb2_reqbufs(vq, reqbufs); 580 /* If count == 0, then the owner has released all buffers and he 581 is no longer owner of the queue. Otherwise we have an owner. */ 582 if (ret == 0) 583 vq->owner = reqbufs->count ? file->private_data : NULL; 584 585 return ret; 586 } 587 EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs); 588 589 static void v4l2_m2m_adjust_mem_offset(struct vb2_queue *vq, 590 struct v4l2_buffer *buf) 591 { 592 /* Adjust MMAP memory offsets for the CAPTURE queue */ 593 if (buf->memory == V4L2_MEMORY_MMAP && V4L2_TYPE_IS_CAPTURE(vq->type)) { 594 if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) { 595 unsigned int i; 596 597 for (i = 0; i < buf->length; ++i) 598 buf->m.planes[i].m.mem_offset 599 += DST_QUEUE_OFF_BASE; 600 } else { 601 buf->m.offset += DST_QUEUE_OFF_BASE; 602 } 603 } 604 } 605 606 int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 607 struct v4l2_buffer *buf) 608 { 609 struct vb2_queue *vq; 610 int ret; 611 612 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); 613 ret = vb2_querybuf(vq, buf); 614 if (ret) 615 return ret; 616 617 /* Adjust MMAP memory offsets for the CAPTURE queue */ 618 v4l2_m2m_adjust_mem_offset(vq, buf); 619 620 return 0; 621 } 622 EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf); 623 624 /* 625 * This will add the LAST flag and mark the buffer management 626 * state as stopped. 627 * This is called when the last capture buffer must be flagged as LAST 628 * in draining mode from the encoder/decoder driver buf_queue() callback 629 * or from v4l2_update_last_buf_state() when a capture buffer is available. 630 */ 631 void v4l2_m2m_last_buffer_done(struct v4l2_m2m_ctx *m2m_ctx, 632 struct vb2_v4l2_buffer *vbuf) 633 { 634 vbuf->flags |= V4L2_BUF_FLAG_LAST; 635 vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE); 636 637 v4l2_m2m_mark_stopped(m2m_ctx); 638 } 639 EXPORT_SYMBOL_GPL(v4l2_m2m_last_buffer_done); 640 641 /* When stop command is issued, update buffer management state */ 642 static int v4l2_update_last_buf_state(struct v4l2_m2m_ctx *m2m_ctx) 643 { 644 struct vb2_v4l2_buffer *next_dst_buf; 645 646 if (m2m_ctx->is_draining) 647 return -EBUSY; 648 649 if (m2m_ctx->has_stopped) 650 return 0; 651 652 m2m_ctx->last_src_buf = v4l2_m2m_last_src_buf(m2m_ctx); 653 m2m_ctx->is_draining = true; 654 655 /* 656 * The processing of the last output buffer queued before 657 * the STOP command is expected to mark the buffer management 658 * state as stopped with v4l2_m2m_mark_stopped(). 659 */ 660 if (m2m_ctx->last_src_buf) 661 return 0; 662 663 /* 664 * In case the output queue is empty, try to mark the last capture 665 * buffer as LAST. 666 */ 667 next_dst_buf = v4l2_m2m_dst_buf_remove(m2m_ctx); 668 if (!next_dst_buf) { 669 /* 670 * Wait for the next queued one in encoder/decoder driver 671 * buf_queue() callback using the v4l2_m2m_dst_buf_is_last() 672 * helper or in v4l2_m2m_qbuf() if encoder/decoder is not yet 673 * streaming. 674 */ 675 m2m_ctx->next_buf_last = true; 676 return 0; 677 } 678 679 v4l2_m2m_last_buffer_done(m2m_ctx, next_dst_buf); 680 681 return 0; 682 } 683 684 /* 685 * Updates the encoding/decoding buffer management state, should 686 * be called from encoder/decoder drivers start_streaming() 687 */ 688 void v4l2_m2m_update_start_streaming_state(struct v4l2_m2m_ctx *m2m_ctx, 689 struct vb2_queue *q) 690 { 691 /* If start streaming again, untag the last output buffer */ 692 if (V4L2_TYPE_IS_OUTPUT(q->type)) 693 m2m_ctx->last_src_buf = NULL; 694 } 695 EXPORT_SYMBOL_GPL(v4l2_m2m_update_start_streaming_state); 696 697 /* 698 * Updates the encoding/decoding buffer management state, should 699 * be called from encoder/decoder driver stop_streaming() 700 */ 701 void v4l2_m2m_update_stop_streaming_state(struct v4l2_m2m_ctx *m2m_ctx, 702 struct vb2_queue *q) 703 { 704 if (V4L2_TYPE_IS_OUTPUT(q->type)) { 705 /* 706 * If in draining state, either mark next dst buffer as 707 * done or flag next one to be marked as done either 708 * in encoder/decoder driver buf_queue() callback using 709 * the v4l2_m2m_dst_buf_is_last() helper or in v4l2_m2m_qbuf() 710 * if encoder/decoder is not yet streaming 711 */ 712 if (m2m_ctx->is_draining) { 713 struct vb2_v4l2_buffer *next_dst_buf; 714 715 m2m_ctx->last_src_buf = NULL; 716 next_dst_buf = v4l2_m2m_dst_buf_remove(m2m_ctx); 717 if (!next_dst_buf) 718 m2m_ctx->next_buf_last = true; 719 else 720 v4l2_m2m_last_buffer_done(m2m_ctx, 721 next_dst_buf); 722 } 723 } else { 724 v4l2_m2m_clear_state(m2m_ctx); 725 } 726 } 727 EXPORT_SYMBOL_GPL(v4l2_m2m_update_stop_streaming_state); 728 729 static void v4l2_m2m_force_last_buf_done(struct v4l2_m2m_ctx *m2m_ctx, 730 struct vb2_queue *q) 731 { 732 struct vb2_buffer *vb; 733 struct vb2_v4l2_buffer *vbuf; 734 unsigned int i; 735 736 if (WARN_ON(q->is_output)) 737 return; 738 if (list_empty(&q->queued_list)) 739 return; 740 741 vb = list_first_entry(&q->queued_list, struct vb2_buffer, queued_entry); 742 for (i = 0; i < vb->num_planes; i++) 743 vb2_set_plane_payload(vb, i, 0); 744 745 /* 746 * Since the buffer hasn't been queued to the ready queue, 747 * mark is active and owned before marking it LAST and DONE 748 */ 749 vb->state = VB2_BUF_STATE_ACTIVE; 750 atomic_inc(&q->owned_by_drv_count); 751 752 vbuf = to_vb2_v4l2_buffer(vb); 753 vbuf->field = V4L2_FIELD_NONE; 754 755 v4l2_m2m_last_buffer_done(m2m_ctx, vbuf); 756 } 757 758 int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 759 struct v4l2_buffer *buf) 760 { 761 struct video_device *vdev = video_devdata(file); 762 struct vb2_queue *vq; 763 int ret; 764 765 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); 766 if (V4L2_TYPE_IS_CAPTURE(vq->type) && 767 (buf->flags & V4L2_BUF_FLAG_REQUEST_FD)) { 768 dprintk("%s: requests cannot be used with capture buffers\n", 769 __func__); 770 return -EPERM; 771 } 772 773 ret = vb2_qbuf(vq, vdev->v4l2_dev->mdev, buf); 774 if (ret) 775 return ret; 776 777 /* Adjust MMAP memory offsets for the CAPTURE queue */ 778 v4l2_m2m_adjust_mem_offset(vq, buf); 779 780 /* 781 * If the capture queue is streaming, but streaming hasn't started 782 * on the device, but was asked to stop, mark the previously queued 783 * buffer as DONE with LAST flag since it won't be queued on the 784 * device. 785 */ 786 if (V4L2_TYPE_IS_CAPTURE(vq->type) && 787 vb2_is_streaming(vq) && !vb2_start_streaming_called(vq) && 788 (v4l2_m2m_has_stopped(m2m_ctx) || v4l2_m2m_dst_buf_is_last(m2m_ctx))) 789 v4l2_m2m_force_last_buf_done(m2m_ctx, vq); 790 else if (!(buf->flags & V4L2_BUF_FLAG_IN_REQUEST)) 791 v4l2_m2m_try_schedule(m2m_ctx); 792 793 return 0; 794 } 795 EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf); 796 797 int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 798 struct v4l2_buffer *buf) 799 { 800 struct vb2_queue *vq; 801 int ret; 802 803 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); 804 ret = vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK); 805 if (ret) 806 return ret; 807 808 /* Adjust MMAP memory offsets for the CAPTURE queue */ 809 v4l2_m2m_adjust_mem_offset(vq, buf); 810 811 return 0; 812 } 813 EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf); 814 815 int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 816 struct v4l2_buffer *buf) 817 { 818 struct video_device *vdev = video_devdata(file); 819 struct vb2_queue *vq; 820 int ret; 821 822 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); 823 ret = vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf); 824 if (ret) 825 return ret; 826 827 /* Adjust MMAP memory offsets for the CAPTURE queue */ 828 v4l2_m2m_adjust_mem_offset(vq, buf); 829 830 return 0; 831 } 832 EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf); 833 834 int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 835 struct v4l2_create_buffers *create) 836 { 837 struct vb2_queue *vq; 838 839 vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type); 840 return vb2_create_bufs(vq, create); 841 } 842 EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs); 843 844 int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 845 struct v4l2_exportbuffer *eb) 846 { 847 struct vb2_queue *vq; 848 849 vq = v4l2_m2m_get_vq(m2m_ctx, eb->type); 850 return vb2_expbuf(vq, eb); 851 } 852 EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf); 853 854 int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 855 enum v4l2_buf_type type) 856 { 857 struct vb2_queue *vq; 858 int ret; 859 860 vq = v4l2_m2m_get_vq(m2m_ctx, type); 861 ret = vb2_streamon(vq, type); 862 if (!ret) 863 v4l2_m2m_try_schedule(m2m_ctx); 864 865 return ret; 866 } 867 EXPORT_SYMBOL_GPL(v4l2_m2m_streamon); 868 869 int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 870 enum v4l2_buf_type type) 871 { 872 struct v4l2_m2m_dev *m2m_dev; 873 struct v4l2_m2m_queue_ctx *q_ctx; 874 unsigned long flags_job, flags; 875 int ret; 876 877 /* wait until the current context is dequeued from job_queue */ 878 v4l2_m2m_cancel_job(m2m_ctx); 879 880 q_ctx = get_queue_ctx(m2m_ctx, type); 881 ret = vb2_streamoff(&q_ctx->q, type); 882 if (ret) 883 return ret; 884 885 m2m_dev = m2m_ctx->m2m_dev; 886 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); 887 /* We should not be scheduled anymore, since we're dropping a queue. */ 888 if (m2m_ctx->job_flags & TRANS_QUEUED) 889 list_del(&m2m_ctx->queue); 890 m2m_ctx->job_flags = 0; 891 892 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 893 /* Drop queue, since streamoff returns device to the same state as after 894 * calling reqbufs. */ 895 INIT_LIST_HEAD(&q_ctx->rdy_queue); 896 q_ctx->num_rdy = 0; 897 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 898 899 if (m2m_dev->curr_ctx == m2m_ctx) { 900 m2m_dev->curr_ctx = NULL; 901 wake_up(&m2m_ctx->finished); 902 } 903 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); 904 905 return 0; 906 } 907 EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff); 908 909 static __poll_t v4l2_m2m_poll_for_data(struct file *file, 910 struct v4l2_m2m_ctx *m2m_ctx, 911 struct poll_table_struct *wait) 912 { 913 struct vb2_queue *src_q, *dst_q; 914 __poll_t rc = 0; 915 unsigned long flags; 916 917 src_q = v4l2_m2m_get_src_vq(m2m_ctx); 918 dst_q = v4l2_m2m_get_dst_vq(m2m_ctx); 919 920 /* 921 * There has to be at least one buffer queued on each queued_list, which 922 * means either in driver already or waiting for driver to claim it 923 * and start processing. 924 */ 925 if ((!vb2_is_streaming(src_q) || src_q->error || 926 list_empty(&src_q->queued_list)) && 927 (!vb2_is_streaming(dst_q) || dst_q->error || 928 (list_empty(&dst_q->queued_list) && !dst_q->last_buffer_dequeued))) 929 return EPOLLERR; 930 931 spin_lock_irqsave(&src_q->done_lock, flags); 932 if (!list_empty(&src_q->done_list)) 933 rc |= EPOLLOUT | EPOLLWRNORM; 934 spin_unlock_irqrestore(&src_q->done_lock, flags); 935 936 spin_lock_irqsave(&dst_q->done_lock, flags); 937 /* 938 * If the last buffer was dequeued from the capture queue, signal 939 * userspace. DQBUF(CAPTURE) will return -EPIPE. 940 */ 941 if (!list_empty(&dst_q->done_list) || dst_q->last_buffer_dequeued) 942 rc |= EPOLLIN | EPOLLRDNORM; 943 spin_unlock_irqrestore(&dst_q->done_lock, flags); 944 945 return rc; 946 } 947 948 __poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 949 struct poll_table_struct *wait) 950 { 951 struct video_device *vfd = video_devdata(file); 952 struct vb2_queue *src_q = v4l2_m2m_get_src_vq(m2m_ctx); 953 struct vb2_queue *dst_q = v4l2_m2m_get_dst_vq(m2m_ctx); 954 __poll_t req_events = poll_requested_events(wait); 955 __poll_t rc = 0; 956 957 /* 958 * poll_wait() MUST be called on the first invocation on all the 959 * potential queues of interest, even if we are not interested in their 960 * events during this first call. Failure to do so will result in 961 * queue's events to be ignored because the poll_table won't be capable 962 * of adding new wait queues thereafter. 963 */ 964 poll_wait(file, &src_q->done_wq, wait); 965 poll_wait(file, &dst_q->done_wq, wait); 966 967 if (req_events & (EPOLLOUT | EPOLLWRNORM | EPOLLIN | EPOLLRDNORM)) 968 rc = v4l2_m2m_poll_for_data(file, m2m_ctx, wait); 969 970 if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) { 971 struct v4l2_fh *fh = file->private_data; 972 973 poll_wait(file, &fh->wait, wait); 974 if (v4l2_event_pending(fh)) 975 rc |= EPOLLPRI; 976 } 977 978 return rc; 979 } 980 EXPORT_SYMBOL_GPL(v4l2_m2m_poll); 981 982 int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 983 struct vm_area_struct *vma) 984 { 985 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; 986 struct vb2_queue *vq; 987 988 if (offset < DST_QUEUE_OFF_BASE) { 989 vq = v4l2_m2m_get_src_vq(m2m_ctx); 990 } else { 991 vq = v4l2_m2m_get_dst_vq(m2m_ctx); 992 vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT); 993 } 994 995 return vb2_mmap(vq, vma); 996 } 997 EXPORT_SYMBOL(v4l2_m2m_mmap); 998 999 #ifndef CONFIG_MMU 1000 unsigned long v4l2_m2m_get_unmapped_area(struct file *file, unsigned long addr, 1001 unsigned long len, unsigned long pgoff, 1002 unsigned long flags) 1003 { 1004 struct v4l2_fh *fh = file->private_data; 1005 unsigned long offset = pgoff << PAGE_SHIFT; 1006 struct vb2_queue *vq; 1007 1008 if (offset < DST_QUEUE_OFF_BASE) { 1009 vq = v4l2_m2m_get_src_vq(fh->m2m_ctx); 1010 } else { 1011 vq = v4l2_m2m_get_dst_vq(fh->m2m_ctx); 1012 pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT); 1013 } 1014 1015 return vb2_get_unmapped_area(vq, addr, len, pgoff, flags); 1016 } 1017 EXPORT_SYMBOL_GPL(v4l2_m2m_get_unmapped_area); 1018 #endif 1019 1020 #if defined(CONFIG_MEDIA_CONTROLLER) 1021 void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev) 1022 { 1023 media_remove_intf_links(&m2m_dev->intf_devnode->intf); 1024 media_devnode_remove(m2m_dev->intf_devnode); 1025 1026 media_entity_remove_links(m2m_dev->source); 1027 media_entity_remove_links(&m2m_dev->sink); 1028 media_entity_remove_links(&m2m_dev->proc); 1029 media_device_unregister_entity(m2m_dev->source); 1030 media_device_unregister_entity(&m2m_dev->sink); 1031 media_device_unregister_entity(&m2m_dev->proc); 1032 kfree(m2m_dev->source->name); 1033 kfree(m2m_dev->sink.name); 1034 kfree(m2m_dev->proc.name); 1035 } 1036 EXPORT_SYMBOL_GPL(v4l2_m2m_unregister_media_controller); 1037 1038 static int v4l2_m2m_register_entity(struct media_device *mdev, 1039 struct v4l2_m2m_dev *m2m_dev, enum v4l2_m2m_entity_type type, 1040 struct video_device *vdev, int function) 1041 { 1042 struct media_entity *entity; 1043 struct media_pad *pads; 1044 char *name; 1045 unsigned int len; 1046 int num_pads; 1047 int ret; 1048 1049 switch (type) { 1050 case MEM2MEM_ENT_TYPE_SOURCE: 1051 entity = m2m_dev->source; 1052 pads = &m2m_dev->source_pad; 1053 pads[0].flags = MEDIA_PAD_FL_SOURCE; 1054 num_pads = 1; 1055 break; 1056 case MEM2MEM_ENT_TYPE_SINK: 1057 entity = &m2m_dev->sink; 1058 pads = &m2m_dev->sink_pad; 1059 pads[0].flags = MEDIA_PAD_FL_SINK; 1060 num_pads = 1; 1061 break; 1062 case MEM2MEM_ENT_TYPE_PROC: 1063 entity = &m2m_dev->proc; 1064 pads = m2m_dev->proc_pads; 1065 pads[0].flags = MEDIA_PAD_FL_SINK; 1066 pads[1].flags = MEDIA_PAD_FL_SOURCE; 1067 num_pads = 2; 1068 break; 1069 default: 1070 return -EINVAL; 1071 } 1072 1073 entity->obj_type = MEDIA_ENTITY_TYPE_BASE; 1074 if (type != MEM2MEM_ENT_TYPE_PROC) { 1075 entity->info.dev.major = VIDEO_MAJOR; 1076 entity->info.dev.minor = vdev->minor; 1077 } 1078 len = strlen(vdev->name) + 2 + strlen(m2m_entity_name[type]); 1079 name = kmalloc(len, GFP_KERNEL); 1080 if (!name) 1081 return -ENOMEM; 1082 snprintf(name, len, "%s-%s", vdev->name, m2m_entity_name[type]); 1083 entity->name = name; 1084 entity->function = function; 1085 1086 ret = media_entity_pads_init(entity, num_pads, pads); 1087 if (ret) { 1088 kfree(entity->name); 1089 entity->name = NULL; 1090 return ret; 1091 } 1092 ret = media_device_register_entity(mdev, entity); 1093 if (ret) { 1094 kfree(entity->name); 1095 entity->name = NULL; 1096 return ret; 1097 } 1098 1099 return 0; 1100 } 1101 1102 int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev, 1103 struct video_device *vdev, int function) 1104 { 1105 struct media_device *mdev = vdev->v4l2_dev->mdev; 1106 struct media_link *link; 1107 int ret; 1108 1109 if (!mdev) 1110 return 0; 1111 1112 /* A memory-to-memory device consists in two 1113 * DMA engine and one video processing entities. 1114 * The DMA engine entities are linked to a V4L interface 1115 */ 1116 1117 /* Create the three entities with their pads */ 1118 m2m_dev->source = &vdev->entity; 1119 ret = v4l2_m2m_register_entity(mdev, m2m_dev, 1120 MEM2MEM_ENT_TYPE_SOURCE, vdev, MEDIA_ENT_F_IO_V4L); 1121 if (ret) 1122 return ret; 1123 ret = v4l2_m2m_register_entity(mdev, m2m_dev, 1124 MEM2MEM_ENT_TYPE_PROC, vdev, function); 1125 if (ret) 1126 goto err_rel_entity0; 1127 ret = v4l2_m2m_register_entity(mdev, m2m_dev, 1128 MEM2MEM_ENT_TYPE_SINK, vdev, MEDIA_ENT_F_IO_V4L); 1129 if (ret) 1130 goto err_rel_entity1; 1131 1132 /* Connect the three entities */ 1133 ret = media_create_pad_link(m2m_dev->source, 0, &m2m_dev->proc, 0, 1134 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); 1135 if (ret) 1136 goto err_rel_entity2; 1137 1138 ret = media_create_pad_link(&m2m_dev->proc, 1, &m2m_dev->sink, 0, 1139 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); 1140 if (ret) 1141 goto err_rm_links0; 1142 1143 /* Create video interface */ 1144 m2m_dev->intf_devnode = media_devnode_create(mdev, 1145 MEDIA_INTF_T_V4L_VIDEO, 0, 1146 VIDEO_MAJOR, vdev->minor); 1147 if (!m2m_dev->intf_devnode) { 1148 ret = -ENOMEM; 1149 goto err_rm_links1; 1150 } 1151 1152 /* Connect the two DMA engines to the interface */ 1153 link = media_create_intf_link(m2m_dev->source, 1154 &m2m_dev->intf_devnode->intf, 1155 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); 1156 if (!link) { 1157 ret = -ENOMEM; 1158 goto err_rm_devnode; 1159 } 1160 1161 link = media_create_intf_link(&m2m_dev->sink, 1162 &m2m_dev->intf_devnode->intf, 1163 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); 1164 if (!link) { 1165 ret = -ENOMEM; 1166 goto err_rm_intf_link; 1167 } 1168 return 0; 1169 1170 err_rm_intf_link: 1171 media_remove_intf_links(&m2m_dev->intf_devnode->intf); 1172 err_rm_devnode: 1173 media_devnode_remove(m2m_dev->intf_devnode); 1174 err_rm_links1: 1175 media_entity_remove_links(&m2m_dev->sink); 1176 err_rm_links0: 1177 media_entity_remove_links(&m2m_dev->proc); 1178 media_entity_remove_links(m2m_dev->source); 1179 err_rel_entity2: 1180 media_device_unregister_entity(&m2m_dev->proc); 1181 kfree(m2m_dev->proc.name); 1182 err_rel_entity1: 1183 media_device_unregister_entity(&m2m_dev->sink); 1184 kfree(m2m_dev->sink.name); 1185 err_rel_entity0: 1186 media_device_unregister_entity(m2m_dev->source); 1187 kfree(m2m_dev->source->name); 1188 return ret; 1189 return 0; 1190 } 1191 EXPORT_SYMBOL_GPL(v4l2_m2m_register_media_controller); 1192 #endif 1193 1194 struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops) 1195 { 1196 struct v4l2_m2m_dev *m2m_dev; 1197 1198 if (!m2m_ops || WARN_ON(!m2m_ops->device_run)) 1199 return ERR_PTR(-EINVAL); 1200 1201 m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL); 1202 if (!m2m_dev) 1203 return ERR_PTR(-ENOMEM); 1204 1205 m2m_dev->curr_ctx = NULL; 1206 m2m_dev->m2m_ops = m2m_ops; 1207 INIT_LIST_HEAD(&m2m_dev->job_queue); 1208 spin_lock_init(&m2m_dev->job_spinlock); 1209 INIT_WORK(&m2m_dev->job_work, v4l2_m2m_device_run_work); 1210 1211 return m2m_dev; 1212 } 1213 EXPORT_SYMBOL_GPL(v4l2_m2m_init); 1214 1215 void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev) 1216 { 1217 kfree(m2m_dev); 1218 } 1219 EXPORT_SYMBOL_GPL(v4l2_m2m_release); 1220 1221 struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev, 1222 void *drv_priv, 1223 int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)) 1224 { 1225 struct v4l2_m2m_ctx *m2m_ctx; 1226 struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx; 1227 int ret; 1228 1229 m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL); 1230 if (!m2m_ctx) 1231 return ERR_PTR(-ENOMEM); 1232 1233 m2m_ctx->priv = drv_priv; 1234 m2m_ctx->m2m_dev = m2m_dev; 1235 init_waitqueue_head(&m2m_ctx->finished); 1236 1237 out_q_ctx = &m2m_ctx->out_q_ctx; 1238 cap_q_ctx = &m2m_ctx->cap_q_ctx; 1239 1240 INIT_LIST_HEAD(&out_q_ctx->rdy_queue); 1241 INIT_LIST_HEAD(&cap_q_ctx->rdy_queue); 1242 spin_lock_init(&out_q_ctx->rdy_spinlock); 1243 spin_lock_init(&cap_q_ctx->rdy_spinlock); 1244 1245 INIT_LIST_HEAD(&m2m_ctx->queue); 1246 1247 ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q); 1248 1249 if (ret) 1250 goto err; 1251 /* 1252 * Both queues should use same the mutex to lock the m2m context. 1253 * This lock is used in some v4l2_m2m_* helpers. 1254 */ 1255 if (WARN_ON(out_q_ctx->q.lock != cap_q_ctx->q.lock)) { 1256 ret = -EINVAL; 1257 goto err; 1258 } 1259 m2m_ctx->q_lock = out_q_ctx->q.lock; 1260 1261 return m2m_ctx; 1262 err: 1263 kfree(m2m_ctx); 1264 return ERR_PTR(ret); 1265 } 1266 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init); 1267 1268 void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx) 1269 { 1270 /* wait until the current context is dequeued from job_queue */ 1271 v4l2_m2m_cancel_job(m2m_ctx); 1272 1273 vb2_queue_release(&m2m_ctx->cap_q_ctx.q); 1274 vb2_queue_release(&m2m_ctx->out_q_ctx.q); 1275 1276 kfree(m2m_ctx); 1277 } 1278 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release); 1279 1280 void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, 1281 struct vb2_v4l2_buffer *vbuf) 1282 { 1283 struct v4l2_m2m_buffer *b = container_of(vbuf, 1284 struct v4l2_m2m_buffer, vb); 1285 struct v4l2_m2m_queue_ctx *q_ctx; 1286 unsigned long flags; 1287 1288 q_ctx = get_queue_ctx(m2m_ctx, vbuf->vb2_buf.vb2_queue->type); 1289 if (!q_ctx) 1290 return; 1291 1292 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 1293 list_add_tail(&b->list, &q_ctx->rdy_queue); 1294 q_ctx->num_rdy++; 1295 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 1296 } 1297 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue); 1298 1299 void v4l2_m2m_buf_copy_metadata(const struct vb2_v4l2_buffer *out_vb, 1300 struct vb2_v4l2_buffer *cap_vb, 1301 bool copy_frame_flags) 1302 { 1303 u32 mask = V4L2_BUF_FLAG_TIMECODE | V4L2_BUF_FLAG_TSTAMP_SRC_MASK; 1304 1305 if (copy_frame_flags) 1306 mask |= V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_PFRAME | 1307 V4L2_BUF_FLAG_BFRAME; 1308 1309 cap_vb->vb2_buf.timestamp = out_vb->vb2_buf.timestamp; 1310 1311 if (out_vb->flags & V4L2_BUF_FLAG_TIMECODE) 1312 cap_vb->timecode = out_vb->timecode; 1313 cap_vb->field = out_vb->field; 1314 cap_vb->flags &= ~mask; 1315 cap_vb->flags |= out_vb->flags & mask; 1316 cap_vb->vb2_buf.copied_timestamp = 1; 1317 } 1318 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_copy_metadata); 1319 1320 void v4l2_m2m_request_queue(struct media_request *req) 1321 { 1322 struct media_request_object *obj, *obj_safe; 1323 struct v4l2_m2m_ctx *m2m_ctx = NULL; 1324 1325 /* 1326 * Queue all objects. Note that buffer objects are at the end of the 1327 * objects list, after all other object types. Once buffer objects 1328 * are queued, the driver might delete them immediately (if the driver 1329 * processes the buffer at once), so we have to use 1330 * list_for_each_entry_safe() to handle the case where the object we 1331 * queue is deleted. 1332 */ 1333 list_for_each_entry_safe(obj, obj_safe, &req->objects, list) { 1334 struct v4l2_m2m_ctx *m2m_ctx_obj; 1335 struct vb2_buffer *vb; 1336 1337 if (!obj->ops->queue) 1338 continue; 1339 1340 if (vb2_request_object_is_buffer(obj)) { 1341 /* Sanity checks */ 1342 vb = container_of(obj, struct vb2_buffer, req_obj); 1343 WARN_ON(!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)); 1344 m2m_ctx_obj = container_of(vb->vb2_queue, 1345 struct v4l2_m2m_ctx, 1346 out_q_ctx.q); 1347 WARN_ON(m2m_ctx && m2m_ctx_obj != m2m_ctx); 1348 m2m_ctx = m2m_ctx_obj; 1349 } 1350 1351 /* 1352 * The buffer we queue here can in theory be immediately 1353 * unbound, hence the use of list_for_each_entry_safe() 1354 * above and why we call the queue op last. 1355 */ 1356 obj->ops->queue(obj); 1357 } 1358 1359 WARN_ON(!m2m_ctx); 1360 1361 if (m2m_ctx) 1362 v4l2_m2m_try_schedule(m2m_ctx); 1363 } 1364 EXPORT_SYMBOL_GPL(v4l2_m2m_request_queue); 1365 1366 /* Videobuf2 ioctl helpers */ 1367 1368 int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv, 1369 struct v4l2_requestbuffers *rb) 1370 { 1371 struct v4l2_fh *fh = file->private_data; 1372 1373 return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb); 1374 } 1375 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs); 1376 1377 int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv, 1378 struct v4l2_create_buffers *create) 1379 { 1380 struct v4l2_fh *fh = file->private_data; 1381 1382 return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create); 1383 } 1384 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs); 1385 1386 int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv, 1387 struct v4l2_buffer *buf) 1388 { 1389 struct v4l2_fh *fh = file->private_data; 1390 1391 return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf); 1392 } 1393 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf); 1394 1395 int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv, 1396 struct v4l2_buffer *buf) 1397 { 1398 struct v4l2_fh *fh = file->private_data; 1399 1400 return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf); 1401 } 1402 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf); 1403 1404 int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv, 1405 struct v4l2_buffer *buf) 1406 { 1407 struct v4l2_fh *fh = file->private_data; 1408 1409 return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf); 1410 } 1411 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf); 1412 1413 int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *priv, 1414 struct v4l2_buffer *buf) 1415 { 1416 struct v4l2_fh *fh = file->private_data; 1417 1418 return v4l2_m2m_prepare_buf(file, fh->m2m_ctx, buf); 1419 } 1420 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_prepare_buf); 1421 1422 int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv, 1423 struct v4l2_exportbuffer *eb) 1424 { 1425 struct v4l2_fh *fh = file->private_data; 1426 1427 return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb); 1428 } 1429 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf); 1430 1431 int v4l2_m2m_ioctl_streamon(struct file *file, void *priv, 1432 enum v4l2_buf_type type) 1433 { 1434 struct v4l2_fh *fh = file->private_data; 1435 1436 return v4l2_m2m_streamon(file, fh->m2m_ctx, type); 1437 } 1438 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon); 1439 1440 int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv, 1441 enum v4l2_buf_type type) 1442 { 1443 struct v4l2_fh *fh = file->private_data; 1444 1445 return v4l2_m2m_streamoff(file, fh->m2m_ctx, type); 1446 } 1447 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff); 1448 1449 int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *fh, 1450 struct v4l2_encoder_cmd *ec) 1451 { 1452 if (ec->cmd != V4L2_ENC_CMD_STOP && ec->cmd != V4L2_ENC_CMD_START) 1453 return -EINVAL; 1454 1455 ec->flags = 0; 1456 return 0; 1457 } 1458 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_encoder_cmd); 1459 1460 int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *fh, 1461 struct v4l2_decoder_cmd *dc) 1462 { 1463 if (dc->cmd != V4L2_DEC_CMD_STOP && dc->cmd != V4L2_DEC_CMD_START) 1464 return -EINVAL; 1465 1466 dc->flags = 0; 1467 1468 if (dc->cmd == V4L2_DEC_CMD_STOP) { 1469 dc->stop.pts = 0; 1470 } else if (dc->cmd == V4L2_DEC_CMD_START) { 1471 dc->start.speed = 0; 1472 dc->start.format = V4L2_DEC_START_FMT_NONE; 1473 } 1474 return 0; 1475 } 1476 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_decoder_cmd); 1477 1478 /* 1479 * Updates the encoding state on ENC_CMD_STOP/ENC_CMD_START 1480 * Should be called from the encoder driver encoder_cmd() callback 1481 */ 1482 int v4l2_m2m_encoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 1483 struct v4l2_encoder_cmd *ec) 1484 { 1485 if (ec->cmd != V4L2_ENC_CMD_STOP && ec->cmd != V4L2_ENC_CMD_START) 1486 return -EINVAL; 1487 1488 if (ec->cmd == V4L2_ENC_CMD_STOP) 1489 return v4l2_update_last_buf_state(m2m_ctx); 1490 1491 if (m2m_ctx->is_draining) 1492 return -EBUSY; 1493 1494 if (m2m_ctx->has_stopped) 1495 m2m_ctx->has_stopped = false; 1496 1497 return 0; 1498 } 1499 EXPORT_SYMBOL_GPL(v4l2_m2m_encoder_cmd); 1500 1501 /* 1502 * Updates the decoding state on DEC_CMD_STOP/DEC_CMD_START 1503 * Should be called from the decoder driver decoder_cmd() callback 1504 */ 1505 int v4l2_m2m_decoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 1506 struct v4l2_decoder_cmd *dc) 1507 { 1508 if (dc->cmd != V4L2_DEC_CMD_STOP && dc->cmd != V4L2_DEC_CMD_START) 1509 return -EINVAL; 1510 1511 if (dc->cmd == V4L2_DEC_CMD_STOP) 1512 return v4l2_update_last_buf_state(m2m_ctx); 1513 1514 if (m2m_ctx->is_draining) 1515 return -EBUSY; 1516 1517 if (m2m_ctx->has_stopped) 1518 m2m_ctx->has_stopped = false; 1519 1520 return 0; 1521 } 1522 EXPORT_SYMBOL_GPL(v4l2_m2m_decoder_cmd); 1523 1524 int v4l2_m2m_ioctl_encoder_cmd(struct file *file, void *priv, 1525 struct v4l2_encoder_cmd *ec) 1526 { 1527 struct v4l2_fh *fh = file->private_data; 1528 1529 return v4l2_m2m_encoder_cmd(file, fh->m2m_ctx, ec); 1530 } 1531 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_encoder_cmd); 1532 1533 int v4l2_m2m_ioctl_decoder_cmd(struct file *file, void *priv, 1534 struct v4l2_decoder_cmd *dc) 1535 { 1536 struct v4l2_fh *fh = file->private_data; 1537 1538 return v4l2_m2m_decoder_cmd(file, fh->m2m_ctx, dc); 1539 } 1540 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_decoder_cmd); 1541 1542 int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *fh, 1543 struct v4l2_decoder_cmd *dc) 1544 { 1545 if (dc->cmd != V4L2_DEC_CMD_FLUSH) 1546 return -EINVAL; 1547 1548 dc->flags = 0; 1549 1550 return 0; 1551 } 1552 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_try_decoder_cmd); 1553 1554 int v4l2_m2m_ioctl_stateless_decoder_cmd(struct file *file, void *priv, 1555 struct v4l2_decoder_cmd *dc) 1556 { 1557 struct v4l2_fh *fh = file->private_data; 1558 struct vb2_v4l2_buffer *out_vb, *cap_vb; 1559 struct v4l2_m2m_dev *m2m_dev = fh->m2m_ctx->m2m_dev; 1560 unsigned long flags; 1561 int ret; 1562 1563 ret = v4l2_m2m_ioctl_stateless_try_decoder_cmd(file, priv, dc); 1564 if (ret < 0) 1565 return ret; 1566 1567 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 1568 out_vb = v4l2_m2m_last_src_buf(fh->m2m_ctx); 1569 cap_vb = v4l2_m2m_last_dst_buf(fh->m2m_ctx); 1570 1571 /* 1572 * If there is an out buffer pending, then clear any HOLD flag. 1573 * 1574 * By clearing this flag we ensure that when this output 1575 * buffer is processed any held capture buffer will be released. 1576 */ 1577 if (out_vb) { 1578 out_vb->flags &= ~V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF; 1579 } else if (cap_vb && cap_vb->is_held) { 1580 /* 1581 * If there were no output buffers, but there is a 1582 * capture buffer that is held, then release that 1583 * buffer. 1584 */ 1585 cap_vb->is_held = false; 1586 v4l2_m2m_dst_buf_remove(fh->m2m_ctx); 1587 v4l2_m2m_buf_done(cap_vb, VB2_BUF_STATE_DONE); 1588 } 1589 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 1590 1591 return 0; 1592 } 1593 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_decoder_cmd); 1594 1595 /* 1596 * v4l2_file_operations helpers. It is assumed here same lock is used 1597 * for the output and the capture buffer queue. 1598 */ 1599 1600 int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma) 1601 { 1602 struct v4l2_fh *fh = file->private_data; 1603 1604 return v4l2_m2m_mmap(file, fh->m2m_ctx, vma); 1605 } 1606 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap); 1607 1608 __poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait) 1609 { 1610 struct v4l2_fh *fh = file->private_data; 1611 struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx; 1612 __poll_t ret; 1613 1614 if (m2m_ctx->q_lock) 1615 mutex_lock(m2m_ctx->q_lock); 1616 1617 ret = v4l2_m2m_poll(file, m2m_ctx, wait); 1618 1619 if (m2m_ctx->q_lock) 1620 mutex_unlock(m2m_ctx->q_lock); 1621 1622 return ret; 1623 } 1624 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll); 1625 1626