1 /* 2 * Memory-to-memory device framework for Video for Linux 2 and videobuf. 3 * 4 * Helper functions for devices that use videobuf buffers for both their 5 * source and destination. 6 * 7 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. 8 * Pawel Osciak, <pawel@osciak.com> 9 * Marek Szyprowski, <m.szyprowski@samsung.com> 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by the 13 * Free Software Foundation; either version 2 of the License, or (at your 14 * option) any later version. 15 */ 16 #include <linux/module.h> 17 #include <linux/sched.h> 18 #include <linux/slab.h> 19 20 #include <media/media-device.h> 21 #include <media/videobuf2-v4l2.h> 22 #include <media/v4l2-mem2mem.h> 23 #include <media/v4l2-dev.h> 24 #include <media/v4l2-device.h> 25 #include <media/v4l2-fh.h> 26 #include <media/v4l2-event.h> 27 28 MODULE_DESCRIPTION("Mem to mem device framework for videobuf"); 29 MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>"); 30 MODULE_LICENSE("GPL"); 31 32 static bool debug; 33 module_param(debug, bool, 0644); 34 35 #define dprintk(fmt, arg...) \ 36 do { \ 37 if (debug) \ 38 printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\ 39 } while (0) 40 41 42 /* Instance is already queued on the job_queue */ 43 #define TRANS_QUEUED (1 << 0) 44 /* Instance is currently running in hardware */ 45 #define TRANS_RUNNING (1 << 1) 46 /* Instance is currently aborting */ 47 #define TRANS_ABORT (1 << 2) 48 49 50 /* Offset base for buffers on the destination queue - used to distinguish 51 * between source and destination buffers when mmapping - they receive the same 52 * offsets but for different queues */ 53 #define DST_QUEUE_OFF_BASE (1 << 30) 54 55 enum v4l2_m2m_entity_type { 56 MEM2MEM_ENT_TYPE_SOURCE, 57 MEM2MEM_ENT_TYPE_SINK, 58 MEM2MEM_ENT_TYPE_PROC 59 }; 60 61 static const char * const m2m_entity_name[] = { 62 "source", 63 "sink", 64 "proc" 65 }; 66 67 /** 68 * struct v4l2_m2m_dev - per-device context 69 * @source: &struct media_entity pointer with the source entity 70 * Used only when the M2M device is registered via 71 * v4l2_m2m_unregister_media_controller(). 72 * @source_pad: &struct media_pad with the source pad. 73 * Used only when the M2M device is registered via 74 * v4l2_m2m_unregister_media_controller(). 75 * @sink: &struct media_entity pointer with the sink entity 76 * Used only when the M2M device is registered via 77 * v4l2_m2m_unregister_media_controller(). 78 * @sink_pad: &struct media_pad with the sink pad. 79 * Used only when the M2M device is registered via 80 * v4l2_m2m_unregister_media_controller(). 81 * @proc: &struct media_entity pointer with the M2M device itself. 82 * @proc_pads: &struct media_pad with the @proc pads. 83 * Used only when the M2M device is registered via 84 * v4l2_m2m_unregister_media_controller(). 85 * @intf_devnode: &struct media_intf devnode pointer with the interface 86 * with controls the M2M device. 87 * @curr_ctx: currently running instance 88 * @job_queue: instances queued to run 89 * @job_spinlock: protects job_queue 90 * @job_work: worker to run queued jobs. 91 * @m2m_ops: driver callbacks 92 */ 93 struct v4l2_m2m_dev { 94 struct v4l2_m2m_ctx *curr_ctx; 95 #ifdef CONFIG_MEDIA_CONTROLLER 96 struct media_entity *source; 97 struct media_pad source_pad; 98 struct media_entity sink; 99 struct media_pad sink_pad; 100 struct media_entity proc; 101 struct media_pad proc_pads[2]; 102 struct media_intf_devnode *intf_devnode; 103 #endif 104 105 struct list_head job_queue; 106 spinlock_t job_spinlock; 107 struct work_struct job_work; 108 109 const struct v4l2_m2m_ops *m2m_ops; 110 }; 111 112 static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx, 113 enum v4l2_buf_type type) 114 { 115 if (V4L2_TYPE_IS_OUTPUT(type)) 116 return &m2m_ctx->out_q_ctx; 117 else 118 return &m2m_ctx->cap_q_ctx; 119 } 120 121 struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx, 122 enum v4l2_buf_type type) 123 { 124 struct v4l2_m2m_queue_ctx *q_ctx; 125 126 q_ctx = get_queue_ctx(m2m_ctx, type); 127 if (!q_ctx) 128 return NULL; 129 130 return &q_ctx->q; 131 } 132 EXPORT_SYMBOL(v4l2_m2m_get_vq); 133 134 struct vb2_v4l2_buffer *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx) 135 { 136 struct v4l2_m2m_buffer *b; 137 unsigned long flags; 138 139 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 140 141 if (list_empty(&q_ctx->rdy_queue)) { 142 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 143 return NULL; 144 } 145 146 b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); 147 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 148 return &b->vb; 149 } 150 EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf); 151 152 struct vb2_v4l2_buffer *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx) 153 { 154 struct v4l2_m2m_buffer *b; 155 unsigned long flags; 156 157 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 158 159 if (list_empty(&q_ctx->rdy_queue)) { 160 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 161 return NULL; 162 } 163 164 b = list_last_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); 165 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 166 return &b->vb; 167 } 168 EXPORT_SYMBOL_GPL(v4l2_m2m_last_buf); 169 170 struct vb2_v4l2_buffer *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx) 171 { 172 struct v4l2_m2m_buffer *b; 173 unsigned long flags; 174 175 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 176 if (list_empty(&q_ctx->rdy_queue)) { 177 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 178 return NULL; 179 } 180 b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); 181 list_del(&b->list); 182 q_ctx->num_rdy--; 183 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 184 185 return &b->vb; 186 } 187 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove); 188 189 void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx, 190 struct vb2_v4l2_buffer *vbuf) 191 { 192 struct v4l2_m2m_buffer *b; 193 unsigned long flags; 194 195 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 196 b = container_of(vbuf, struct v4l2_m2m_buffer, vb); 197 list_del(&b->list); 198 q_ctx->num_rdy--; 199 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 200 } 201 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_buf); 202 203 struct vb2_v4l2_buffer * 204 v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx) 205 206 { 207 struct v4l2_m2m_buffer *b, *tmp; 208 struct vb2_v4l2_buffer *ret = NULL; 209 unsigned long flags; 210 211 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 212 list_for_each_entry_safe(b, tmp, &q_ctx->rdy_queue, list) { 213 if (b->vb.vb2_buf.index == idx) { 214 list_del(&b->list); 215 q_ctx->num_rdy--; 216 ret = &b->vb; 217 break; 218 } 219 } 220 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 221 222 return ret; 223 } 224 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_idx); 225 226 /* 227 * Scheduling handlers 228 */ 229 230 void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev) 231 { 232 unsigned long flags; 233 void *ret = NULL; 234 235 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 236 if (m2m_dev->curr_ctx) 237 ret = m2m_dev->curr_ctx->priv; 238 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 239 240 return ret; 241 } 242 EXPORT_SYMBOL(v4l2_m2m_get_curr_priv); 243 244 /** 245 * v4l2_m2m_try_run() - select next job to perform and run it if possible 246 * @m2m_dev: per-device context 247 * 248 * Get next transaction (if present) from the waiting jobs list and run it. 249 * 250 * Note that this function can run on a given v4l2_m2m_ctx context, 251 * but call .device_run for another context. 252 */ 253 static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev) 254 { 255 unsigned long flags; 256 257 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 258 if (NULL != m2m_dev->curr_ctx) { 259 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 260 dprintk("Another instance is running, won't run now\n"); 261 return; 262 } 263 264 if (list_empty(&m2m_dev->job_queue)) { 265 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 266 dprintk("No job pending\n"); 267 return; 268 } 269 270 m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue, 271 struct v4l2_m2m_ctx, queue); 272 m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING; 273 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 274 275 dprintk("Running job on m2m_ctx: %p\n", m2m_dev->curr_ctx); 276 m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv); 277 } 278 279 /* 280 * __v4l2_m2m_try_queue() - queue a job 281 * @m2m_dev: m2m device 282 * @m2m_ctx: m2m context 283 * 284 * Check if this context is ready to queue a job. 285 * 286 * This function can run in interrupt context. 287 */ 288 static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev, 289 struct v4l2_m2m_ctx *m2m_ctx) 290 { 291 unsigned long flags_job, flags_out, flags_cap; 292 293 dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx); 294 295 if (!m2m_ctx->out_q_ctx.q.streaming 296 || !m2m_ctx->cap_q_ctx.q.streaming) { 297 dprintk("Streaming needs to be on for both queues\n"); 298 return; 299 } 300 301 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); 302 303 /* If the context is aborted then don't schedule it */ 304 if (m2m_ctx->job_flags & TRANS_ABORT) { 305 dprintk("Aborted context\n"); 306 goto job_unlock; 307 } 308 309 if (m2m_ctx->job_flags & TRANS_QUEUED) { 310 dprintk("On job queue already\n"); 311 goto job_unlock; 312 } 313 314 spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out); 315 if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue) 316 && !m2m_ctx->out_q_ctx.buffered) { 317 dprintk("No input buffers available\n"); 318 goto out_unlock; 319 } 320 spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap); 321 if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue) 322 && !m2m_ctx->cap_q_ctx.buffered) { 323 dprintk("No output buffers available\n"); 324 goto cap_unlock; 325 } 326 spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap); 327 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out); 328 329 if (m2m_dev->m2m_ops->job_ready 330 && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) { 331 dprintk("Driver not ready\n"); 332 goto job_unlock; 333 } 334 335 list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue); 336 m2m_ctx->job_flags |= TRANS_QUEUED; 337 338 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); 339 return; 340 341 cap_unlock: 342 spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap); 343 out_unlock: 344 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out); 345 job_unlock: 346 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); 347 } 348 349 /** 350 * v4l2_m2m_try_schedule() - schedule and possibly run a job for any context 351 * @m2m_ctx: m2m context 352 * 353 * Check if this context is ready to queue a job. If suitable, 354 * run the next queued job on the mem2mem device. 355 * 356 * This function shouldn't run in interrupt context. 357 * 358 * Note that v4l2_m2m_try_schedule() can schedule one job for this context, 359 * and then run another job for another context. 360 */ 361 void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx) 362 { 363 struct v4l2_m2m_dev *m2m_dev = m2m_ctx->m2m_dev; 364 365 __v4l2_m2m_try_queue(m2m_dev, m2m_ctx); 366 v4l2_m2m_try_run(m2m_dev); 367 } 368 EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule); 369 370 /** 371 * v4l2_m2m_device_run_work() - run pending jobs for the context 372 * @work: Work structure used for scheduling the execution of this function. 373 */ 374 static void v4l2_m2m_device_run_work(struct work_struct *work) 375 { 376 struct v4l2_m2m_dev *m2m_dev = 377 container_of(work, struct v4l2_m2m_dev, job_work); 378 379 v4l2_m2m_try_run(m2m_dev); 380 } 381 382 /** 383 * v4l2_m2m_cancel_job() - cancel pending jobs for the context 384 * @m2m_ctx: m2m context with jobs to be canceled 385 * 386 * In case of streamoff or release called on any context, 387 * 1] If the context is currently running, then abort job will be called 388 * 2] If the context is queued, then the context will be removed from 389 * the job_queue 390 */ 391 static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx) 392 { 393 struct v4l2_m2m_dev *m2m_dev; 394 unsigned long flags; 395 396 m2m_dev = m2m_ctx->m2m_dev; 397 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 398 399 m2m_ctx->job_flags |= TRANS_ABORT; 400 if (m2m_ctx->job_flags & TRANS_RUNNING) { 401 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 402 if (m2m_dev->m2m_ops->job_abort) 403 m2m_dev->m2m_ops->job_abort(m2m_ctx->priv); 404 dprintk("m2m_ctx %p running, will wait to complete\n", m2m_ctx); 405 wait_event(m2m_ctx->finished, 406 !(m2m_ctx->job_flags & TRANS_RUNNING)); 407 } else if (m2m_ctx->job_flags & TRANS_QUEUED) { 408 list_del(&m2m_ctx->queue); 409 m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); 410 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 411 dprintk("m2m_ctx: %p had been on queue and was removed\n", 412 m2m_ctx); 413 } else { 414 /* Do nothing, was not on queue/running */ 415 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 416 } 417 } 418 419 void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, 420 struct v4l2_m2m_ctx *m2m_ctx) 421 { 422 unsigned long flags; 423 424 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 425 if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) { 426 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 427 dprintk("Called by an instance not currently running\n"); 428 return; 429 } 430 431 list_del(&m2m_dev->curr_ctx->queue); 432 m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); 433 wake_up(&m2m_dev->curr_ctx->finished); 434 m2m_dev->curr_ctx = NULL; 435 436 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 437 438 /* This instance might have more buffers ready, but since we do not 439 * allow more than one job on the job_queue per instance, each has 440 * to be scheduled separately after the previous one finishes. */ 441 __v4l2_m2m_try_queue(m2m_dev, m2m_ctx); 442 443 /* We might be running in atomic context, 444 * but the job must be run in non-atomic context. 445 */ 446 schedule_work(&m2m_dev->job_work); 447 } 448 EXPORT_SYMBOL(v4l2_m2m_job_finish); 449 450 int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 451 struct v4l2_requestbuffers *reqbufs) 452 { 453 struct vb2_queue *vq; 454 int ret; 455 456 vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type); 457 ret = vb2_reqbufs(vq, reqbufs); 458 /* If count == 0, then the owner has released all buffers and he 459 is no longer owner of the queue. Otherwise we have an owner. */ 460 if (ret == 0) 461 vq->owner = reqbufs->count ? file->private_data : NULL; 462 463 return ret; 464 } 465 EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs); 466 467 int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 468 struct v4l2_buffer *buf) 469 { 470 struct vb2_queue *vq; 471 int ret = 0; 472 unsigned int i; 473 474 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); 475 ret = vb2_querybuf(vq, buf); 476 477 /* Adjust MMAP memory offsets for the CAPTURE queue */ 478 if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) { 479 if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) { 480 for (i = 0; i < buf->length; ++i) 481 buf->m.planes[i].m.mem_offset 482 += DST_QUEUE_OFF_BASE; 483 } else { 484 buf->m.offset += DST_QUEUE_OFF_BASE; 485 } 486 } 487 488 return ret; 489 } 490 EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf); 491 492 int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 493 struct v4l2_buffer *buf) 494 { 495 struct video_device *vdev = video_devdata(file); 496 struct vb2_queue *vq; 497 int ret; 498 499 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); 500 if (!V4L2_TYPE_IS_OUTPUT(vq->type) && 501 (buf->flags & V4L2_BUF_FLAG_REQUEST_FD)) { 502 dprintk("%s: requests cannot be used with capture buffers\n", 503 __func__); 504 return -EPERM; 505 } 506 ret = vb2_qbuf(vq, vdev->v4l2_dev->mdev, buf); 507 if (!ret && !(buf->flags & V4L2_BUF_FLAG_IN_REQUEST)) 508 v4l2_m2m_try_schedule(m2m_ctx); 509 510 return ret; 511 } 512 EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf); 513 514 int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 515 struct v4l2_buffer *buf) 516 { 517 struct vb2_queue *vq; 518 519 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); 520 return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK); 521 } 522 EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf); 523 524 int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 525 struct v4l2_buffer *buf) 526 { 527 struct video_device *vdev = video_devdata(file); 528 struct vb2_queue *vq; 529 530 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); 531 return vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf); 532 } 533 EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf); 534 535 int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 536 struct v4l2_create_buffers *create) 537 { 538 struct vb2_queue *vq; 539 540 vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type); 541 return vb2_create_bufs(vq, create); 542 } 543 EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs); 544 545 int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 546 struct v4l2_exportbuffer *eb) 547 { 548 struct vb2_queue *vq; 549 550 vq = v4l2_m2m_get_vq(m2m_ctx, eb->type); 551 return vb2_expbuf(vq, eb); 552 } 553 EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf); 554 555 int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 556 enum v4l2_buf_type type) 557 { 558 struct vb2_queue *vq; 559 int ret; 560 561 vq = v4l2_m2m_get_vq(m2m_ctx, type); 562 ret = vb2_streamon(vq, type); 563 if (!ret) 564 v4l2_m2m_try_schedule(m2m_ctx); 565 566 return ret; 567 } 568 EXPORT_SYMBOL_GPL(v4l2_m2m_streamon); 569 570 int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 571 enum v4l2_buf_type type) 572 { 573 struct v4l2_m2m_dev *m2m_dev; 574 struct v4l2_m2m_queue_ctx *q_ctx; 575 unsigned long flags_job, flags; 576 int ret; 577 578 /* wait until the current context is dequeued from job_queue */ 579 v4l2_m2m_cancel_job(m2m_ctx); 580 581 q_ctx = get_queue_ctx(m2m_ctx, type); 582 ret = vb2_streamoff(&q_ctx->q, type); 583 if (ret) 584 return ret; 585 586 m2m_dev = m2m_ctx->m2m_dev; 587 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); 588 /* We should not be scheduled anymore, since we're dropping a queue. */ 589 if (m2m_ctx->job_flags & TRANS_QUEUED) 590 list_del(&m2m_ctx->queue); 591 m2m_ctx->job_flags = 0; 592 593 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 594 /* Drop queue, since streamoff returns device to the same state as after 595 * calling reqbufs. */ 596 INIT_LIST_HEAD(&q_ctx->rdy_queue); 597 q_ctx->num_rdy = 0; 598 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 599 600 if (m2m_dev->curr_ctx == m2m_ctx) { 601 m2m_dev->curr_ctx = NULL; 602 wake_up(&m2m_ctx->finished); 603 } 604 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); 605 606 return 0; 607 } 608 EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff); 609 610 __poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 611 struct poll_table_struct *wait) 612 { 613 struct video_device *vfd = video_devdata(file); 614 __poll_t req_events = poll_requested_events(wait); 615 struct vb2_queue *src_q, *dst_q; 616 struct vb2_buffer *src_vb = NULL, *dst_vb = NULL; 617 __poll_t rc = 0; 618 unsigned long flags; 619 620 src_q = v4l2_m2m_get_src_vq(m2m_ctx); 621 dst_q = v4l2_m2m_get_dst_vq(m2m_ctx); 622 623 poll_wait(file, &src_q->done_wq, wait); 624 poll_wait(file, &dst_q->done_wq, wait); 625 626 if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) { 627 struct v4l2_fh *fh = file->private_data; 628 629 poll_wait(file, &fh->wait, wait); 630 if (v4l2_event_pending(fh)) 631 rc = EPOLLPRI; 632 if (!(req_events & (EPOLLOUT | EPOLLWRNORM | EPOLLIN | EPOLLRDNORM))) 633 return rc; 634 } 635 636 /* 637 * There has to be at least one buffer queued on each queued_list, which 638 * means either in driver already or waiting for driver to claim it 639 * and start processing. 640 */ 641 if ((!src_q->streaming || src_q->error || 642 list_empty(&src_q->queued_list)) && 643 (!dst_q->streaming || dst_q->error || 644 list_empty(&dst_q->queued_list))) { 645 rc |= EPOLLERR; 646 goto end; 647 } 648 649 spin_lock_irqsave(&dst_q->done_lock, flags); 650 if (list_empty(&dst_q->done_list)) { 651 /* 652 * If the last buffer was dequeued from the capture queue, 653 * return immediately. DQBUF will return -EPIPE. 654 */ 655 if (dst_q->last_buffer_dequeued) { 656 spin_unlock_irqrestore(&dst_q->done_lock, flags); 657 return rc | EPOLLIN | EPOLLRDNORM; 658 } 659 } 660 spin_unlock_irqrestore(&dst_q->done_lock, flags); 661 662 spin_lock_irqsave(&src_q->done_lock, flags); 663 if (!list_empty(&src_q->done_list)) 664 src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer, 665 done_entry); 666 if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE 667 || src_vb->state == VB2_BUF_STATE_ERROR)) 668 rc |= EPOLLOUT | EPOLLWRNORM; 669 spin_unlock_irqrestore(&src_q->done_lock, flags); 670 671 spin_lock_irqsave(&dst_q->done_lock, flags); 672 if (!list_empty(&dst_q->done_list)) 673 dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer, 674 done_entry); 675 if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE 676 || dst_vb->state == VB2_BUF_STATE_ERROR)) 677 rc |= EPOLLIN | EPOLLRDNORM; 678 spin_unlock_irqrestore(&dst_q->done_lock, flags); 679 680 end: 681 return rc; 682 } 683 EXPORT_SYMBOL_GPL(v4l2_m2m_poll); 684 685 int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 686 struct vm_area_struct *vma) 687 { 688 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; 689 struct vb2_queue *vq; 690 691 if (offset < DST_QUEUE_OFF_BASE) { 692 vq = v4l2_m2m_get_src_vq(m2m_ctx); 693 } else { 694 vq = v4l2_m2m_get_dst_vq(m2m_ctx); 695 vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT); 696 } 697 698 return vb2_mmap(vq, vma); 699 } 700 EXPORT_SYMBOL(v4l2_m2m_mmap); 701 702 #if defined(CONFIG_MEDIA_CONTROLLER) 703 void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev) 704 { 705 media_remove_intf_links(&m2m_dev->intf_devnode->intf); 706 media_devnode_remove(m2m_dev->intf_devnode); 707 708 media_entity_remove_links(m2m_dev->source); 709 media_entity_remove_links(&m2m_dev->sink); 710 media_entity_remove_links(&m2m_dev->proc); 711 media_device_unregister_entity(m2m_dev->source); 712 media_device_unregister_entity(&m2m_dev->sink); 713 media_device_unregister_entity(&m2m_dev->proc); 714 kfree(m2m_dev->source->name); 715 kfree(m2m_dev->sink.name); 716 kfree(m2m_dev->proc.name); 717 } 718 EXPORT_SYMBOL_GPL(v4l2_m2m_unregister_media_controller); 719 720 static int v4l2_m2m_register_entity(struct media_device *mdev, 721 struct v4l2_m2m_dev *m2m_dev, enum v4l2_m2m_entity_type type, 722 struct video_device *vdev, int function) 723 { 724 struct media_entity *entity; 725 struct media_pad *pads; 726 char *name; 727 unsigned int len; 728 int num_pads; 729 int ret; 730 731 switch (type) { 732 case MEM2MEM_ENT_TYPE_SOURCE: 733 entity = m2m_dev->source; 734 pads = &m2m_dev->source_pad; 735 pads[0].flags = MEDIA_PAD_FL_SOURCE; 736 num_pads = 1; 737 break; 738 case MEM2MEM_ENT_TYPE_SINK: 739 entity = &m2m_dev->sink; 740 pads = &m2m_dev->sink_pad; 741 pads[0].flags = MEDIA_PAD_FL_SINK; 742 num_pads = 1; 743 break; 744 case MEM2MEM_ENT_TYPE_PROC: 745 entity = &m2m_dev->proc; 746 pads = m2m_dev->proc_pads; 747 pads[0].flags = MEDIA_PAD_FL_SINK; 748 pads[1].flags = MEDIA_PAD_FL_SOURCE; 749 num_pads = 2; 750 break; 751 default: 752 return -EINVAL; 753 } 754 755 entity->obj_type = MEDIA_ENTITY_TYPE_BASE; 756 if (type != MEM2MEM_ENT_TYPE_PROC) { 757 entity->info.dev.major = VIDEO_MAJOR; 758 entity->info.dev.minor = vdev->minor; 759 } 760 len = strlen(vdev->name) + 2 + strlen(m2m_entity_name[type]); 761 name = kmalloc(len, GFP_KERNEL); 762 if (!name) 763 return -ENOMEM; 764 snprintf(name, len, "%s-%s", vdev->name, m2m_entity_name[type]); 765 entity->name = name; 766 entity->function = function; 767 768 ret = media_entity_pads_init(entity, num_pads, pads); 769 if (ret) 770 return ret; 771 ret = media_device_register_entity(mdev, entity); 772 if (ret) 773 return ret; 774 775 return 0; 776 } 777 778 int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev, 779 struct video_device *vdev, int function) 780 { 781 struct media_device *mdev = vdev->v4l2_dev->mdev; 782 struct media_link *link; 783 int ret; 784 785 if (!mdev) 786 return 0; 787 788 /* A memory-to-memory device consists in two 789 * DMA engine and one video processing entities. 790 * The DMA engine entities are linked to a V4L interface 791 */ 792 793 /* Create the three entities with their pads */ 794 m2m_dev->source = &vdev->entity; 795 ret = v4l2_m2m_register_entity(mdev, m2m_dev, 796 MEM2MEM_ENT_TYPE_SOURCE, vdev, MEDIA_ENT_F_IO_V4L); 797 if (ret) 798 return ret; 799 ret = v4l2_m2m_register_entity(mdev, m2m_dev, 800 MEM2MEM_ENT_TYPE_PROC, vdev, function); 801 if (ret) 802 goto err_rel_entity0; 803 ret = v4l2_m2m_register_entity(mdev, m2m_dev, 804 MEM2MEM_ENT_TYPE_SINK, vdev, MEDIA_ENT_F_IO_V4L); 805 if (ret) 806 goto err_rel_entity1; 807 808 /* Connect the three entities */ 809 ret = media_create_pad_link(m2m_dev->source, 0, &m2m_dev->proc, 1, 810 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); 811 if (ret) 812 goto err_rel_entity2; 813 814 ret = media_create_pad_link(&m2m_dev->proc, 0, &m2m_dev->sink, 0, 815 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); 816 if (ret) 817 goto err_rm_links0; 818 819 /* Create video interface */ 820 m2m_dev->intf_devnode = media_devnode_create(mdev, 821 MEDIA_INTF_T_V4L_VIDEO, 0, 822 VIDEO_MAJOR, vdev->minor); 823 if (!m2m_dev->intf_devnode) { 824 ret = -ENOMEM; 825 goto err_rm_links1; 826 } 827 828 /* Connect the two DMA engines to the interface */ 829 link = media_create_intf_link(m2m_dev->source, 830 &m2m_dev->intf_devnode->intf, 831 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); 832 if (!link) { 833 ret = -ENOMEM; 834 goto err_rm_devnode; 835 } 836 837 link = media_create_intf_link(&m2m_dev->sink, 838 &m2m_dev->intf_devnode->intf, 839 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); 840 if (!link) { 841 ret = -ENOMEM; 842 goto err_rm_intf_link; 843 } 844 return 0; 845 846 err_rm_intf_link: 847 media_remove_intf_links(&m2m_dev->intf_devnode->intf); 848 err_rm_devnode: 849 media_devnode_remove(m2m_dev->intf_devnode); 850 err_rm_links1: 851 media_entity_remove_links(&m2m_dev->sink); 852 err_rm_links0: 853 media_entity_remove_links(&m2m_dev->proc); 854 media_entity_remove_links(m2m_dev->source); 855 err_rel_entity2: 856 media_device_unregister_entity(&m2m_dev->proc); 857 kfree(m2m_dev->proc.name); 858 err_rel_entity1: 859 media_device_unregister_entity(&m2m_dev->sink); 860 kfree(m2m_dev->sink.name); 861 err_rel_entity0: 862 media_device_unregister_entity(m2m_dev->source); 863 kfree(m2m_dev->source->name); 864 return ret; 865 return 0; 866 } 867 EXPORT_SYMBOL_GPL(v4l2_m2m_register_media_controller); 868 #endif 869 870 struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops) 871 { 872 struct v4l2_m2m_dev *m2m_dev; 873 874 if (!m2m_ops || WARN_ON(!m2m_ops->device_run)) 875 return ERR_PTR(-EINVAL); 876 877 m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL); 878 if (!m2m_dev) 879 return ERR_PTR(-ENOMEM); 880 881 m2m_dev->curr_ctx = NULL; 882 m2m_dev->m2m_ops = m2m_ops; 883 INIT_LIST_HEAD(&m2m_dev->job_queue); 884 spin_lock_init(&m2m_dev->job_spinlock); 885 INIT_WORK(&m2m_dev->job_work, v4l2_m2m_device_run_work); 886 887 return m2m_dev; 888 } 889 EXPORT_SYMBOL_GPL(v4l2_m2m_init); 890 891 void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev) 892 { 893 kfree(m2m_dev); 894 } 895 EXPORT_SYMBOL_GPL(v4l2_m2m_release); 896 897 struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev, 898 void *drv_priv, 899 int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)) 900 { 901 struct v4l2_m2m_ctx *m2m_ctx; 902 struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx; 903 int ret; 904 905 m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL); 906 if (!m2m_ctx) 907 return ERR_PTR(-ENOMEM); 908 909 m2m_ctx->priv = drv_priv; 910 m2m_ctx->m2m_dev = m2m_dev; 911 init_waitqueue_head(&m2m_ctx->finished); 912 913 out_q_ctx = &m2m_ctx->out_q_ctx; 914 cap_q_ctx = &m2m_ctx->cap_q_ctx; 915 916 INIT_LIST_HEAD(&out_q_ctx->rdy_queue); 917 INIT_LIST_HEAD(&cap_q_ctx->rdy_queue); 918 spin_lock_init(&out_q_ctx->rdy_spinlock); 919 spin_lock_init(&cap_q_ctx->rdy_spinlock); 920 921 INIT_LIST_HEAD(&m2m_ctx->queue); 922 923 ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q); 924 925 if (ret) 926 goto err; 927 /* 928 * Both queues should use same the mutex to lock the m2m context. 929 * This lock is used in some v4l2_m2m_* helpers. 930 */ 931 if (WARN_ON(out_q_ctx->q.lock != cap_q_ctx->q.lock)) { 932 ret = -EINVAL; 933 goto err; 934 } 935 m2m_ctx->q_lock = out_q_ctx->q.lock; 936 937 return m2m_ctx; 938 err: 939 kfree(m2m_ctx); 940 return ERR_PTR(ret); 941 } 942 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init); 943 944 void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx) 945 { 946 /* wait until the current context is dequeued from job_queue */ 947 v4l2_m2m_cancel_job(m2m_ctx); 948 949 vb2_queue_release(&m2m_ctx->cap_q_ctx.q); 950 vb2_queue_release(&m2m_ctx->out_q_ctx.q); 951 952 kfree(m2m_ctx); 953 } 954 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release); 955 956 void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, 957 struct vb2_v4l2_buffer *vbuf) 958 { 959 struct v4l2_m2m_buffer *b = container_of(vbuf, 960 struct v4l2_m2m_buffer, vb); 961 struct v4l2_m2m_queue_ctx *q_ctx; 962 unsigned long flags; 963 964 q_ctx = get_queue_ctx(m2m_ctx, vbuf->vb2_buf.vb2_queue->type); 965 if (!q_ctx) 966 return; 967 968 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 969 list_add_tail(&b->list, &q_ctx->rdy_queue); 970 q_ctx->num_rdy++; 971 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 972 } 973 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue); 974 975 void v4l2_m2m_buf_copy_metadata(const struct vb2_v4l2_buffer *out_vb, 976 struct vb2_v4l2_buffer *cap_vb, 977 bool copy_frame_flags) 978 { 979 u32 mask = V4L2_BUF_FLAG_TIMECODE | V4L2_BUF_FLAG_TSTAMP_SRC_MASK; 980 981 if (copy_frame_flags) 982 mask |= V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_PFRAME | 983 V4L2_BUF_FLAG_BFRAME; 984 985 cap_vb->vb2_buf.timestamp = out_vb->vb2_buf.timestamp; 986 987 if (out_vb->flags & V4L2_BUF_FLAG_TIMECODE) 988 cap_vb->timecode = out_vb->timecode; 989 cap_vb->field = out_vb->field; 990 cap_vb->flags &= ~mask; 991 cap_vb->flags |= out_vb->flags & mask; 992 cap_vb->vb2_buf.copied_timestamp = 1; 993 } 994 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_copy_metadata); 995 996 void v4l2_m2m_request_queue(struct media_request *req) 997 { 998 struct media_request_object *obj, *obj_safe; 999 struct v4l2_m2m_ctx *m2m_ctx = NULL; 1000 1001 /* 1002 * Queue all objects. Note that buffer objects are at the end of the 1003 * objects list, after all other object types. Once buffer objects 1004 * are queued, the driver might delete them immediately (if the driver 1005 * processes the buffer at once), so we have to use 1006 * list_for_each_entry_safe() to handle the case where the object we 1007 * queue is deleted. 1008 */ 1009 list_for_each_entry_safe(obj, obj_safe, &req->objects, list) { 1010 struct v4l2_m2m_ctx *m2m_ctx_obj; 1011 struct vb2_buffer *vb; 1012 1013 if (!obj->ops->queue) 1014 continue; 1015 1016 if (vb2_request_object_is_buffer(obj)) { 1017 /* Sanity checks */ 1018 vb = container_of(obj, struct vb2_buffer, req_obj); 1019 WARN_ON(!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)); 1020 m2m_ctx_obj = container_of(vb->vb2_queue, 1021 struct v4l2_m2m_ctx, 1022 out_q_ctx.q); 1023 WARN_ON(m2m_ctx && m2m_ctx_obj != m2m_ctx); 1024 m2m_ctx = m2m_ctx_obj; 1025 } 1026 1027 /* 1028 * The buffer we queue here can in theory be immediately 1029 * unbound, hence the use of list_for_each_entry_safe() 1030 * above and why we call the queue op last. 1031 */ 1032 obj->ops->queue(obj); 1033 } 1034 1035 WARN_ON(!m2m_ctx); 1036 1037 if (m2m_ctx) 1038 v4l2_m2m_try_schedule(m2m_ctx); 1039 } 1040 EXPORT_SYMBOL_GPL(v4l2_m2m_request_queue); 1041 1042 /* Videobuf2 ioctl helpers */ 1043 1044 int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv, 1045 struct v4l2_requestbuffers *rb) 1046 { 1047 struct v4l2_fh *fh = file->private_data; 1048 1049 return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb); 1050 } 1051 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs); 1052 1053 int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv, 1054 struct v4l2_create_buffers *create) 1055 { 1056 struct v4l2_fh *fh = file->private_data; 1057 1058 return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create); 1059 } 1060 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs); 1061 1062 int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv, 1063 struct v4l2_buffer *buf) 1064 { 1065 struct v4l2_fh *fh = file->private_data; 1066 1067 return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf); 1068 } 1069 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf); 1070 1071 int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv, 1072 struct v4l2_buffer *buf) 1073 { 1074 struct v4l2_fh *fh = file->private_data; 1075 1076 return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf); 1077 } 1078 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf); 1079 1080 int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv, 1081 struct v4l2_buffer *buf) 1082 { 1083 struct v4l2_fh *fh = file->private_data; 1084 1085 return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf); 1086 } 1087 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf); 1088 1089 int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *priv, 1090 struct v4l2_buffer *buf) 1091 { 1092 struct v4l2_fh *fh = file->private_data; 1093 1094 return v4l2_m2m_prepare_buf(file, fh->m2m_ctx, buf); 1095 } 1096 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_prepare_buf); 1097 1098 int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv, 1099 struct v4l2_exportbuffer *eb) 1100 { 1101 struct v4l2_fh *fh = file->private_data; 1102 1103 return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb); 1104 } 1105 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf); 1106 1107 int v4l2_m2m_ioctl_streamon(struct file *file, void *priv, 1108 enum v4l2_buf_type type) 1109 { 1110 struct v4l2_fh *fh = file->private_data; 1111 1112 return v4l2_m2m_streamon(file, fh->m2m_ctx, type); 1113 } 1114 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon); 1115 1116 int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv, 1117 enum v4l2_buf_type type) 1118 { 1119 struct v4l2_fh *fh = file->private_data; 1120 1121 return v4l2_m2m_streamoff(file, fh->m2m_ctx, type); 1122 } 1123 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff); 1124 1125 /* 1126 * v4l2_file_operations helpers. It is assumed here same lock is used 1127 * for the output and the capture buffer queue. 1128 */ 1129 1130 int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma) 1131 { 1132 struct v4l2_fh *fh = file->private_data; 1133 1134 return v4l2_m2m_mmap(file, fh->m2m_ctx, vma); 1135 } 1136 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap); 1137 1138 __poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait) 1139 { 1140 struct v4l2_fh *fh = file->private_data; 1141 struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx; 1142 __poll_t ret; 1143 1144 if (m2m_ctx->q_lock) 1145 mutex_lock(m2m_ctx->q_lock); 1146 1147 ret = v4l2_m2m_poll(file, m2m_ctx, wait); 1148 1149 if (m2m_ctx->q_lock) 1150 mutex_unlock(m2m_ctx->q_lock); 1151 1152 return ret; 1153 } 1154 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll); 1155 1156