1 /* 2 * Memory-to-memory device framework for Video for Linux 2 and videobuf. 3 * 4 * Helper functions for devices that use videobuf buffers for both their 5 * source and destination. 6 * 7 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. 8 * Pawel Osciak, <pawel@osciak.com> 9 * Marek Szyprowski, <m.szyprowski@samsung.com> 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by the 13 * Free Software Foundation; either version 2 of the License, or (at your 14 * option) any later version. 15 */ 16 #include <linux/module.h> 17 #include <linux/sched.h> 18 #include <linux/slab.h> 19 20 #include <media/media-device.h> 21 #include <media/videobuf2-v4l2.h> 22 #include <media/v4l2-mem2mem.h> 23 #include <media/v4l2-dev.h> 24 #include <media/v4l2-device.h> 25 #include <media/v4l2-fh.h> 26 #include <media/v4l2-event.h> 27 28 MODULE_DESCRIPTION("Mem to mem device framework for videobuf"); 29 MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>"); 30 MODULE_LICENSE("GPL"); 31 32 static bool debug; 33 module_param(debug, bool, 0644); 34 35 #define dprintk(fmt, arg...) \ 36 do { \ 37 if (debug) \ 38 printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\ 39 } while (0) 40 41 42 /* Instance is already queued on the job_queue */ 43 #define TRANS_QUEUED (1 << 0) 44 /* Instance is currently running in hardware */ 45 #define TRANS_RUNNING (1 << 1) 46 /* Instance is currently aborting */ 47 #define TRANS_ABORT (1 << 2) 48 49 50 /* Offset base for buffers on the destination queue - used to distinguish 51 * between source and destination buffers when mmapping - they receive the same 52 * offsets but for different queues */ 53 #define DST_QUEUE_OFF_BASE (1 << 30) 54 55 enum v4l2_m2m_entity_type { 56 MEM2MEM_ENT_TYPE_SOURCE, 57 MEM2MEM_ENT_TYPE_SINK, 58 MEM2MEM_ENT_TYPE_PROC 59 }; 60 61 static const char * const m2m_entity_name[] = { 62 "source", 63 "sink", 64 "proc" 65 }; 66 67 /** 68 * struct v4l2_m2m_dev - per-device context 69 * @curr_ctx: currently running instance 70 * @job_queue: instances queued to run 71 * @job_spinlock: protects job_queue 72 * @m2m_ops: driver callbacks 73 */ 74 struct v4l2_m2m_dev { 75 struct v4l2_m2m_ctx *curr_ctx; 76 #ifdef CONFIG_MEDIA_CONTROLLER 77 struct media_entity *source; 78 struct media_pad source_pad; 79 struct media_entity sink; 80 struct media_pad sink_pad; 81 struct media_entity proc; 82 struct media_pad proc_pads[2]; 83 struct media_intf_devnode *intf_devnode; 84 #endif 85 86 struct list_head job_queue; 87 spinlock_t job_spinlock; 88 89 const struct v4l2_m2m_ops *m2m_ops; 90 }; 91 92 static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx, 93 enum v4l2_buf_type type) 94 { 95 if (V4L2_TYPE_IS_OUTPUT(type)) 96 return &m2m_ctx->out_q_ctx; 97 else 98 return &m2m_ctx->cap_q_ctx; 99 } 100 101 struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx, 102 enum v4l2_buf_type type) 103 { 104 struct v4l2_m2m_queue_ctx *q_ctx; 105 106 q_ctx = get_queue_ctx(m2m_ctx, type); 107 if (!q_ctx) 108 return NULL; 109 110 return &q_ctx->q; 111 } 112 EXPORT_SYMBOL(v4l2_m2m_get_vq); 113 114 void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx) 115 { 116 struct v4l2_m2m_buffer *b; 117 unsigned long flags; 118 119 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 120 121 if (list_empty(&q_ctx->rdy_queue)) { 122 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 123 return NULL; 124 } 125 126 b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); 127 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 128 return &b->vb; 129 } 130 EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf); 131 132 void *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx) 133 { 134 struct v4l2_m2m_buffer *b; 135 unsigned long flags; 136 137 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 138 139 if (list_empty(&q_ctx->rdy_queue)) { 140 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 141 return NULL; 142 } 143 144 b = list_last_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); 145 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 146 return &b->vb; 147 } 148 EXPORT_SYMBOL_GPL(v4l2_m2m_last_buf); 149 150 void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx) 151 { 152 struct v4l2_m2m_buffer *b; 153 unsigned long flags; 154 155 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 156 if (list_empty(&q_ctx->rdy_queue)) { 157 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 158 return NULL; 159 } 160 b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); 161 list_del(&b->list); 162 q_ctx->num_rdy--; 163 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 164 165 return &b->vb; 166 } 167 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove); 168 169 void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx, 170 struct vb2_v4l2_buffer *vbuf) 171 { 172 struct v4l2_m2m_buffer *b; 173 unsigned long flags; 174 175 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 176 b = container_of(vbuf, struct v4l2_m2m_buffer, vb); 177 list_del(&b->list); 178 q_ctx->num_rdy--; 179 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 180 } 181 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_buf); 182 183 struct vb2_v4l2_buffer * 184 v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx) 185 186 { 187 struct v4l2_m2m_buffer *b, *tmp; 188 struct vb2_v4l2_buffer *ret = NULL; 189 unsigned long flags; 190 191 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 192 list_for_each_entry_safe(b, tmp, &q_ctx->rdy_queue, list) { 193 if (b->vb.vb2_buf.index == idx) { 194 list_del(&b->list); 195 q_ctx->num_rdy--; 196 ret = &b->vb; 197 break; 198 } 199 } 200 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 201 202 return ret; 203 } 204 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_idx); 205 206 /* 207 * Scheduling handlers 208 */ 209 210 void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev) 211 { 212 unsigned long flags; 213 void *ret = NULL; 214 215 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 216 if (m2m_dev->curr_ctx) 217 ret = m2m_dev->curr_ctx->priv; 218 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 219 220 return ret; 221 } 222 EXPORT_SYMBOL(v4l2_m2m_get_curr_priv); 223 224 /** 225 * v4l2_m2m_try_run() - select next job to perform and run it if possible 226 * @m2m_dev: per-device context 227 * 228 * Get next transaction (if present) from the waiting jobs list and run it. 229 */ 230 static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev) 231 { 232 unsigned long flags; 233 234 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 235 if (NULL != m2m_dev->curr_ctx) { 236 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 237 dprintk("Another instance is running, won't run now\n"); 238 return; 239 } 240 241 if (list_empty(&m2m_dev->job_queue)) { 242 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 243 dprintk("No job pending\n"); 244 return; 245 } 246 247 m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue, 248 struct v4l2_m2m_ctx, queue); 249 m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING; 250 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 251 252 m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv); 253 } 254 255 void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx) 256 { 257 struct v4l2_m2m_dev *m2m_dev; 258 unsigned long flags_job, flags_out, flags_cap; 259 260 m2m_dev = m2m_ctx->m2m_dev; 261 dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx); 262 263 if (!m2m_ctx->out_q_ctx.q.streaming 264 || !m2m_ctx->cap_q_ctx.q.streaming) { 265 dprintk("Streaming needs to be on for both queues\n"); 266 return; 267 } 268 269 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); 270 271 /* If the context is aborted then don't schedule it */ 272 if (m2m_ctx->job_flags & TRANS_ABORT) { 273 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); 274 dprintk("Aborted context\n"); 275 return; 276 } 277 278 if (m2m_ctx->job_flags & TRANS_QUEUED) { 279 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); 280 dprintk("On job queue already\n"); 281 return; 282 } 283 284 spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out); 285 if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue) 286 && !m2m_ctx->out_q_ctx.buffered) { 287 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, 288 flags_out); 289 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); 290 dprintk("No input buffers available\n"); 291 return; 292 } 293 spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap); 294 if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue) 295 && !m2m_ctx->cap_q_ctx.buffered) { 296 spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, 297 flags_cap); 298 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, 299 flags_out); 300 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); 301 dprintk("No output buffers available\n"); 302 return; 303 } 304 spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap); 305 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out); 306 307 if (m2m_dev->m2m_ops->job_ready 308 && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) { 309 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); 310 dprintk("Driver not ready\n"); 311 return; 312 } 313 314 list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue); 315 m2m_ctx->job_flags |= TRANS_QUEUED; 316 317 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); 318 319 v4l2_m2m_try_run(m2m_dev); 320 } 321 EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule); 322 323 /** 324 * v4l2_m2m_cancel_job() - cancel pending jobs for the context 325 * @m2m_ctx: m2m context with jobs to be canceled 326 * 327 * In case of streamoff or release called on any context, 328 * 1] If the context is currently running, then abort job will be called 329 * 2] If the context is queued, then the context will be removed from 330 * the job_queue 331 */ 332 static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx) 333 { 334 struct v4l2_m2m_dev *m2m_dev; 335 unsigned long flags; 336 337 m2m_dev = m2m_ctx->m2m_dev; 338 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 339 340 m2m_ctx->job_flags |= TRANS_ABORT; 341 if (m2m_ctx->job_flags & TRANS_RUNNING) { 342 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 343 if (m2m_dev->m2m_ops->job_abort) 344 m2m_dev->m2m_ops->job_abort(m2m_ctx->priv); 345 dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx); 346 wait_event(m2m_ctx->finished, 347 !(m2m_ctx->job_flags & TRANS_RUNNING)); 348 } else if (m2m_ctx->job_flags & TRANS_QUEUED) { 349 list_del(&m2m_ctx->queue); 350 m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); 351 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 352 dprintk("m2m_ctx: %p had been on queue and was removed\n", 353 m2m_ctx); 354 } else { 355 /* Do nothing, was not on queue/running */ 356 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 357 } 358 } 359 360 void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, 361 struct v4l2_m2m_ctx *m2m_ctx) 362 { 363 unsigned long flags; 364 365 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 366 if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) { 367 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 368 dprintk("Called by an instance not currently running\n"); 369 return; 370 } 371 372 list_del(&m2m_dev->curr_ctx->queue); 373 m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); 374 wake_up(&m2m_dev->curr_ctx->finished); 375 m2m_dev->curr_ctx = NULL; 376 377 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 378 379 /* This instance might have more buffers ready, but since we do not 380 * allow more than one job on the job_queue per instance, each has 381 * to be scheduled separately after the previous one finishes. */ 382 v4l2_m2m_try_schedule(m2m_ctx); 383 } 384 EXPORT_SYMBOL(v4l2_m2m_job_finish); 385 386 int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 387 struct v4l2_requestbuffers *reqbufs) 388 { 389 struct vb2_queue *vq; 390 int ret; 391 392 vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type); 393 ret = vb2_reqbufs(vq, reqbufs); 394 /* If count == 0, then the owner has released all buffers and he 395 is no longer owner of the queue. Otherwise we have an owner. */ 396 if (ret == 0) 397 vq->owner = reqbufs->count ? file->private_data : NULL; 398 399 return ret; 400 } 401 EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs); 402 403 int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 404 struct v4l2_buffer *buf) 405 { 406 struct vb2_queue *vq; 407 int ret = 0; 408 unsigned int i; 409 410 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); 411 ret = vb2_querybuf(vq, buf); 412 413 /* Adjust MMAP memory offsets for the CAPTURE queue */ 414 if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) { 415 if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) { 416 for (i = 0; i < buf->length; ++i) 417 buf->m.planes[i].m.mem_offset 418 += DST_QUEUE_OFF_BASE; 419 } else { 420 buf->m.offset += DST_QUEUE_OFF_BASE; 421 } 422 } 423 424 return ret; 425 } 426 EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf); 427 428 int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 429 struct v4l2_buffer *buf) 430 { 431 struct vb2_queue *vq; 432 int ret; 433 434 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); 435 ret = vb2_qbuf(vq, buf); 436 if (!ret) 437 v4l2_m2m_try_schedule(m2m_ctx); 438 439 return ret; 440 } 441 EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf); 442 443 int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 444 struct v4l2_buffer *buf) 445 { 446 struct vb2_queue *vq; 447 448 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); 449 return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK); 450 } 451 EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf); 452 453 int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 454 struct v4l2_buffer *buf) 455 { 456 struct vb2_queue *vq; 457 int ret; 458 459 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); 460 ret = vb2_prepare_buf(vq, buf); 461 if (!ret) 462 v4l2_m2m_try_schedule(m2m_ctx); 463 464 return ret; 465 } 466 EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf); 467 468 int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 469 struct v4l2_create_buffers *create) 470 { 471 struct vb2_queue *vq; 472 473 vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type); 474 return vb2_create_bufs(vq, create); 475 } 476 EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs); 477 478 int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 479 struct v4l2_exportbuffer *eb) 480 { 481 struct vb2_queue *vq; 482 483 vq = v4l2_m2m_get_vq(m2m_ctx, eb->type); 484 return vb2_expbuf(vq, eb); 485 } 486 EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf); 487 488 int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 489 enum v4l2_buf_type type) 490 { 491 struct vb2_queue *vq; 492 int ret; 493 494 vq = v4l2_m2m_get_vq(m2m_ctx, type); 495 ret = vb2_streamon(vq, type); 496 if (!ret) 497 v4l2_m2m_try_schedule(m2m_ctx); 498 499 return ret; 500 } 501 EXPORT_SYMBOL_GPL(v4l2_m2m_streamon); 502 503 int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 504 enum v4l2_buf_type type) 505 { 506 struct v4l2_m2m_dev *m2m_dev; 507 struct v4l2_m2m_queue_ctx *q_ctx; 508 unsigned long flags_job, flags; 509 int ret; 510 511 /* wait until the current context is dequeued from job_queue */ 512 v4l2_m2m_cancel_job(m2m_ctx); 513 514 q_ctx = get_queue_ctx(m2m_ctx, type); 515 ret = vb2_streamoff(&q_ctx->q, type); 516 if (ret) 517 return ret; 518 519 m2m_dev = m2m_ctx->m2m_dev; 520 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); 521 /* We should not be scheduled anymore, since we're dropping a queue. */ 522 if (m2m_ctx->job_flags & TRANS_QUEUED) 523 list_del(&m2m_ctx->queue); 524 m2m_ctx->job_flags = 0; 525 526 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 527 /* Drop queue, since streamoff returns device to the same state as after 528 * calling reqbufs. */ 529 INIT_LIST_HEAD(&q_ctx->rdy_queue); 530 q_ctx->num_rdy = 0; 531 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 532 533 if (m2m_dev->curr_ctx == m2m_ctx) { 534 m2m_dev->curr_ctx = NULL; 535 wake_up(&m2m_ctx->finished); 536 } 537 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); 538 539 return 0; 540 } 541 EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff); 542 543 __poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 544 struct poll_table_struct *wait) 545 { 546 struct video_device *vfd = video_devdata(file); 547 __poll_t req_events = poll_requested_events(wait); 548 struct vb2_queue *src_q, *dst_q; 549 struct vb2_buffer *src_vb = NULL, *dst_vb = NULL; 550 __poll_t rc = 0; 551 unsigned long flags; 552 553 if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) { 554 struct v4l2_fh *fh = file->private_data; 555 556 if (v4l2_event_pending(fh)) 557 rc = EPOLLPRI; 558 else if (req_events & EPOLLPRI) 559 poll_wait(file, &fh->wait, wait); 560 if (!(req_events & (EPOLLOUT | EPOLLWRNORM | EPOLLIN | EPOLLRDNORM))) 561 return rc; 562 } 563 564 src_q = v4l2_m2m_get_src_vq(m2m_ctx); 565 dst_q = v4l2_m2m_get_dst_vq(m2m_ctx); 566 567 /* 568 * There has to be at least one buffer queued on each queued_list, which 569 * means either in driver already or waiting for driver to claim it 570 * and start processing. 571 */ 572 if ((!src_q->streaming || list_empty(&src_q->queued_list)) 573 && (!dst_q->streaming || list_empty(&dst_q->queued_list))) { 574 rc |= EPOLLERR; 575 goto end; 576 } 577 578 spin_lock_irqsave(&src_q->done_lock, flags); 579 if (list_empty(&src_q->done_list)) 580 poll_wait(file, &src_q->done_wq, wait); 581 spin_unlock_irqrestore(&src_q->done_lock, flags); 582 583 spin_lock_irqsave(&dst_q->done_lock, flags); 584 if (list_empty(&dst_q->done_list)) { 585 /* 586 * If the last buffer was dequeued from the capture queue, 587 * return immediately. DQBUF will return -EPIPE. 588 */ 589 if (dst_q->last_buffer_dequeued) { 590 spin_unlock_irqrestore(&dst_q->done_lock, flags); 591 return rc | EPOLLIN | EPOLLRDNORM; 592 } 593 594 poll_wait(file, &dst_q->done_wq, wait); 595 } 596 spin_unlock_irqrestore(&dst_q->done_lock, flags); 597 598 spin_lock_irqsave(&src_q->done_lock, flags); 599 if (!list_empty(&src_q->done_list)) 600 src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer, 601 done_entry); 602 if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE 603 || src_vb->state == VB2_BUF_STATE_ERROR)) 604 rc |= EPOLLOUT | EPOLLWRNORM; 605 spin_unlock_irqrestore(&src_q->done_lock, flags); 606 607 spin_lock_irqsave(&dst_q->done_lock, flags); 608 if (!list_empty(&dst_q->done_list)) 609 dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer, 610 done_entry); 611 if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE 612 || dst_vb->state == VB2_BUF_STATE_ERROR)) 613 rc |= EPOLLIN | EPOLLRDNORM; 614 spin_unlock_irqrestore(&dst_q->done_lock, flags); 615 616 end: 617 return rc; 618 } 619 EXPORT_SYMBOL_GPL(v4l2_m2m_poll); 620 621 int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 622 struct vm_area_struct *vma) 623 { 624 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; 625 struct vb2_queue *vq; 626 627 if (offset < DST_QUEUE_OFF_BASE) { 628 vq = v4l2_m2m_get_src_vq(m2m_ctx); 629 } else { 630 vq = v4l2_m2m_get_dst_vq(m2m_ctx); 631 vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT); 632 } 633 634 return vb2_mmap(vq, vma); 635 } 636 EXPORT_SYMBOL(v4l2_m2m_mmap); 637 638 #if defined(CONFIG_MEDIA_CONTROLLER) 639 void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev) 640 { 641 media_remove_intf_links(&m2m_dev->intf_devnode->intf); 642 media_devnode_remove(m2m_dev->intf_devnode); 643 644 media_entity_remove_links(m2m_dev->source); 645 media_entity_remove_links(&m2m_dev->sink); 646 media_entity_remove_links(&m2m_dev->proc); 647 media_device_unregister_entity(m2m_dev->source); 648 media_device_unregister_entity(&m2m_dev->sink); 649 media_device_unregister_entity(&m2m_dev->proc); 650 kfree(m2m_dev->source->name); 651 kfree(m2m_dev->sink.name); 652 kfree(m2m_dev->proc.name); 653 } 654 EXPORT_SYMBOL_GPL(v4l2_m2m_unregister_media_controller); 655 656 static int v4l2_m2m_register_entity(struct media_device *mdev, 657 struct v4l2_m2m_dev *m2m_dev, enum v4l2_m2m_entity_type type, 658 struct video_device *vdev, int function) 659 { 660 struct media_entity *entity; 661 struct media_pad *pads; 662 char *name; 663 unsigned int len; 664 int num_pads; 665 int ret; 666 667 switch (type) { 668 case MEM2MEM_ENT_TYPE_SOURCE: 669 entity = m2m_dev->source; 670 pads = &m2m_dev->source_pad; 671 pads[0].flags = MEDIA_PAD_FL_SOURCE; 672 num_pads = 1; 673 break; 674 case MEM2MEM_ENT_TYPE_SINK: 675 entity = &m2m_dev->sink; 676 pads = &m2m_dev->sink_pad; 677 pads[0].flags = MEDIA_PAD_FL_SINK; 678 num_pads = 1; 679 break; 680 case MEM2MEM_ENT_TYPE_PROC: 681 entity = &m2m_dev->proc; 682 pads = m2m_dev->proc_pads; 683 pads[0].flags = MEDIA_PAD_FL_SINK; 684 pads[1].flags = MEDIA_PAD_FL_SOURCE; 685 num_pads = 2; 686 break; 687 default: 688 return -EINVAL; 689 } 690 691 entity->obj_type = MEDIA_ENTITY_TYPE_BASE; 692 if (type != MEM2MEM_ENT_TYPE_PROC) { 693 entity->info.dev.major = VIDEO_MAJOR; 694 entity->info.dev.minor = vdev->minor; 695 } 696 len = strlen(vdev->name) + 2 + strlen(m2m_entity_name[type]); 697 name = kmalloc(len, GFP_KERNEL); 698 if (!name) 699 return -ENOMEM; 700 snprintf(name, len, "%s-%s", vdev->name, m2m_entity_name[type]); 701 entity->name = name; 702 entity->function = function; 703 704 ret = media_entity_pads_init(entity, num_pads, pads); 705 if (ret) 706 return ret; 707 ret = media_device_register_entity(mdev, entity); 708 if (ret) 709 return ret; 710 711 return 0; 712 } 713 714 int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev, 715 struct video_device *vdev, int function) 716 { 717 struct media_device *mdev = vdev->v4l2_dev->mdev; 718 struct media_link *link; 719 int ret; 720 721 if (!mdev) 722 return 0; 723 724 /* A memory-to-memory device consists in two 725 * DMA engine and one video processing entities. 726 * The DMA engine entities are linked to a V4L interface 727 */ 728 729 /* Create the three entities with their pads */ 730 m2m_dev->source = &vdev->entity; 731 ret = v4l2_m2m_register_entity(mdev, m2m_dev, 732 MEM2MEM_ENT_TYPE_SOURCE, vdev, MEDIA_ENT_F_IO_V4L); 733 if (ret) 734 return ret; 735 ret = v4l2_m2m_register_entity(mdev, m2m_dev, 736 MEM2MEM_ENT_TYPE_PROC, vdev, function); 737 if (ret) 738 goto err_rel_entity0; 739 ret = v4l2_m2m_register_entity(mdev, m2m_dev, 740 MEM2MEM_ENT_TYPE_SINK, vdev, MEDIA_ENT_F_IO_V4L); 741 if (ret) 742 goto err_rel_entity1; 743 744 /* Connect the three entities */ 745 ret = media_create_pad_link(m2m_dev->source, 0, &m2m_dev->proc, 1, 746 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); 747 if (ret) 748 goto err_rel_entity2; 749 750 ret = media_create_pad_link(&m2m_dev->proc, 0, &m2m_dev->sink, 0, 751 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); 752 if (ret) 753 goto err_rm_links0; 754 755 /* Create video interface */ 756 m2m_dev->intf_devnode = media_devnode_create(mdev, 757 MEDIA_INTF_T_V4L_VIDEO, 0, 758 VIDEO_MAJOR, vdev->minor); 759 if (!m2m_dev->intf_devnode) { 760 ret = -ENOMEM; 761 goto err_rm_links1; 762 } 763 764 /* Connect the two DMA engines to the interface */ 765 link = media_create_intf_link(m2m_dev->source, 766 &m2m_dev->intf_devnode->intf, 767 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); 768 if (!link) { 769 ret = -ENOMEM; 770 goto err_rm_devnode; 771 } 772 773 link = media_create_intf_link(&m2m_dev->sink, 774 &m2m_dev->intf_devnode->intf, 775 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); 776 if (!link) { 777 ret = -ENOMEM; 778 goto err_rm_intf_link; 779 } 780 return 0; 781 782 err_rm_intf_link: 783 media_remove_intf_links(&m2m_dev->intf_devnode->intf); 784 err_rm_devnode: 785 media_devnode_remove(m2m_dev->intf_devnode); 786 err_rm_links1: 787 media_entity_remove_links(&m2m_dev->sink); 788 err_rm_links0: 789 media_entity_remove_links(&m2m_dev->proc); 790 media_entity_remove_links(m2m_dev->source); 791 err_rel_entity2: 792 media_device_unregister_entity(&m2m_dev->proc); 793 kfree(m2m_dev->proc.name); 794 err_rel_entity1: 795 media_device_unregister_entity(&m2m_dev->sink); 796 kfree(m2m_dev->sink.name); 797 err_rel_entity0: 798 media_device_unregister_entity(m2m_dev->source); 799 kfree(m2m_dev->source->name); 800 return ret; 801 return 0; 802 } 803 EXPORT_SYMBOL_GPL(v4l2_m2m_register_media_controller); 804 #endif 805 806 struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops) 807 { 808 struct v4l2_m2m_dev *m2m_dev; 809 810 if (!m2m_ops || WARN_ON(!m2m_ops->device_run)) 811 return ERR_PTR(-EINVAL); 812 813 m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL); 814 if (!m2m_dev) 815 return ERR_PTR(-ENOMEM); 816 817 m2m_dev->curr_ctx = NULL; 818 m2m_dev->m2m_ops = m2m_ops; 819 INIT_LIST_HEAD(&m2m_dev->job_queue); 820 spin_lock_init(&m2m_dev->job_spinlock); 821 822 return m2m_dev; 823 } 824 EXPORT_SYMBOL_GPL(v4l2_m2m_init); 825 826 void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev) 827 { 828 kfree(m2m_dev); 829 } 830 EXPORT_SYMBOL_GPL(v4l2_m2m_release); 831 832 struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev, 833 void *drv_priv, 834 int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)) 835 { 836 struct v4l2_m2m_ctx *m2m_ctx; 837 struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx; 838 int ret; 839 840 m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL); 841 if (!m2m_ctx) 842 return ERR_PTR(-ENOMEM); 843 844 m2m_ctx->priv = drv_priv; 845 m2m_ctx->m2m_dev = m2m_dev; 846 init_waitqueue_head(&m2m_ctx->finished); 847 848 out_q_ctx = &m2m_ctx->out_q_ctx; 849 cap_q_ctx = &m2m_ctx->cap_q_ctx; 850 851 INIT_LIST_HEAD(&out_q_ctx->rdy_queue); 852 INIT_LIST_HEAD(&cap_q_ctx->rdy_queue); 853 spin_lock_init(&out_q_ctx->rdy_spinlock); 854 spin_lock_init(&cap_q_ctx->rdy_spinlock); 855 856 INIT_LIST_HEAD(&m2m_ctx->queue); 857 858 ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q); 859 860 if (ret) 861 goto err; 862 /* 863 * If both queues use same mutex assign it as the common buffer 864 * queues lock to the m2m context. This lock is used in the 865 * v4l2_m2m_ioctl_* helpers. 866 */ 867 if (out_q_ctx->q.lock == cap_q_ctx->q.lock) 868 m2m_ctx->q_lock = out_q_ctx->q.lock; 869 870 return m2m_ctx; 871 err: 872 kfree(m2m_ctx); 873 return ERR_PTR(ret); 874 } 875 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init); 876 877 void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx) 878 { 879 /* wait until the current context is dequeued from job_queue */ 880 v4l2_m2m_cancel_job(m2m_ctx); 881 882 vb2_queue_release(&m2m_ctx->cap_q_ctx.q); 883 vb2_queue_release(&m2m_ctx->out_q_ctx.q); 884 885 kfree(m2m_ctx); 886 } 887 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release); 888 889 void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, 890 struct vb2_v4l2_buffer *vbuf) 891 { 892 struct v4l2_m2m_buffer *b = container_of(vbuf, 893 struct v4l2_m2m_buffer, vb); 894 struct v4l2_m2m_queue_ctx *q_ctx; 895 unsigned long flags; 896 897 q_ctx = get_queue_ctx(m2m_ctx, vbuf->vb2_buf.vb2_queue->type); 898 if (!q_ctx) 899 return; 900 901 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 902 list_add_tail(&b->list, &q_ctx->rdy_queue); 903 q_ctx->num_rdy++; 904 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 905 } 906 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue); 907 908 /* Videobuf2 ioctl helpers */ 909 910 int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv, 911 struct v4l2_requestbuffers *rb) 912 { 913 struct v4l2_fh *fh = file->private_data; 914 915 return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb); 916 } 917 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs); 918 919 int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv, 920 struct v4l2_create_buffers *create) 921 { 922 struct v4l2_fh *fh = file->private_data; 923 924 return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create); 925 } 926 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs); 927 928 int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv, 929 struct v4l2_buffer *buf) 930 { 931 struct v4l2_fh *fh = file->private_data; 932 933 return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf); 934 } 935 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf); 936 937 int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv, 938 struct v4l2_buffer *buf) 939 { 940 struct v4l2_fh *fh = file->private_data; 941 942 return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf); 943 } 944 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf); 945 946 int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv, 947 struct v4l2_buffer *buf) 948 { 949 struct v4l2_fh *fh = file->private_data; 950 951 return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf); 952 } 953 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf); 954 955 int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *priv, 956 struct v4l2_buffer *buf) 957 { 958 struct v4l2_fh *fh = file->private_data; 959 960 return v4l2_m2m_prepare_buf(file, fh->m2m_ctx, buf); 961 } 962 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_prepare_buf); 963 964 int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv, 965 struct v4l2_exportbuffer *eb) 966 { 967 struct v4l2_fh *fh = file->private_data; 968 969 return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb); 970 } 971 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf); 972 973 int v4l2_m2m_ioctl_streamon(struct file *file, void *priv, 974 enum v4l2_buf_type type) 975 { 976 struct v4l2_fh *fh = file->private_data; 977 978 return v4l2_m2m_streamon(file, fh->m2m_ctx, type); 979 } 980 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon); 981 982 int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv, 983 enum v4l2_buf_type type) 984 { 985 struct v4l2_fh *fh = file->private_data; 986 987 return v4l2_m2m_streamoff(file, fh->m2m_ctx, type); 988 } 989 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff); 990 991 /* 992 * v4l2_file_operations helpers. It is assumed here same lock is used 993 * for the output and the capture buffer queue. 994 */ 995 996 int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma) 997 { 998 struct v4l2_fh *fh = file->private_data; 999 1000 return v4l2_m2m_mmap(file, fh->m2m_ctx, vma); 1001 } 1002 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap); 1003 1004 __poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait) 1005 { 1006 struct v4l2_fh *fh = file->private_data; 1007 struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx; 1008 __poll_t ret; 1009 1010 if (m2m_ctx->q_lock) 1011 mutex_lock(m2m_ctx->q_lock); 1012 1013 ret = v4l2_m2m_poll(file, m2m_ctx, wait); 1014 1015 if (m2m_ctx->q_lock) 1016 mutex_unlock(m2m_ctx->q_lock); 1017 1018 return ret; 1019 } 1020 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll); 1021 1022