1 /* 2 * Memory-to-memory device framework for Video for Linux 2 and videobuf. 3 * 4 * Helper functions for devices that use videobuf buffers for both their 5 * source and destination. 6 * 7 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. 8 * Pawel Osciak, <pawel@osciak.com> 9 * Marek Szyprowski, <m.szyprowski@samsung.com> 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by the 13 * Free Software Foundation; either version 2 of the License, or (at your 14 * option) any later version. 15 */ 16 #include <linux/module.h> 17 #include <linux/sched.h> 18 #include <linux/slab.h> 19 20 #include <media/videobuf2-v4l2.h> 21 #include <media/v4l2-mem2mem.h> 22 #include <media/v4l2-dev.h> 23 #include <media/v4l2-fh.h> 24 #include <media/v4l2-event.h> 25 26 MODULE_DESCRIPTION("Mem to mem device framework for videobuf"); 27 MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>"); 28 MODULE_LICENSE("GPL"); 29 30 static bool debug; 31 module_param(debug, bool, 0644); 32 33 #define dprintk(fmt, arg...) \ 34 do { \ 35 if (debug) \ 36 printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\ 37 } while (0) 38 39 40 /* Instance is already queued on the job_queue */ 41 #define TRANS_QUEUED (1 << 0) 42 /* Instance is currently running in hardware */ 43 #define TRANS_RUNNING (1 << 1) 44 /* Instance is currently aborting */ 45 #define TRANS_ABORT (1 << 2) 46 47 48 /* Offset base for buffers on the destination queue - used to distinguish 49 * between source and destination buffers when mmapping - they receive the same 50 * offsets but for different queues */ 51 #define DST_QUEUE_OFF_BASE (1 << 30) 52 53 54 /** 55 * struct v4l2_m2m_dev - per-device context 56 * @curr_ctx: currently running instance 57 * @job_queue: instances queued to run 58 * @job_spinlock: protects job_queue 59 * @m2m_ops: driver callbacks 60 */ 61 struct v4l2_m2m_dev { 62 struct v4l2_m2m_ctx *curr_ctx; 63 64 struct list_head job_queue; 65 spinlock_t job_spinlock; 66 67 const struct v4l2_m2m_ops *m2m_ops; 68 }; 69 70 static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx, 71 enum v4l2_buf_type type) 72 { 73 if (V4L2_TYPE_IS_OUTPUT(type)) 74 return &m2m_ctx->out_q_ctx; 75 else 76 return &m2m_ctx->cap_q_ctx; 77 } 78 79 struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx, 80 enum v4l2_buf_type type) 81 { 82 struct v4l2_m2m_queue_ctx *q_ctx; 83 84 q_ctx = get_queue_ctx(m2m_ctx, type); 85 if (!q_ctx) 86 return NULL; 87 88 return &q_ctx->q; 89 } 90 EXPORT_SYMBOL(v4l2_m2m_get_vq); 91 92 void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx) 93 { 94 struct v4l2_m2m_buffer *b; 95 unsigned long flags; 96 97 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 98 99 if (list_empty(&q_ctx->rdy_queue)) { 100 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 101 return NULL; 102 } 103 104 b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); 105 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 106 return &b->vb; 107 } 108 EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf); 109 110 void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx) 111 { 112 struct v4l2_m2m_buffer *b; 113 unsigned long flags; 114 115 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 116 if (list_empty(&q_ctx->rdy_queue)) { 117 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 118 return NULL; 119 } 120 b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); 121 list_del(&b->list); 122 q_ctx->num_rdy--; 123 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 124 125 return &b->vb; 126 } 127 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove); 128 129 /* 130 * Scheduling handlers 131 */ 132 133 void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev) 134 { 135 unsigned long flags; 136 void *ret = NULL; 137 138 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 139 if (m2m_dev->curr_ctx) 140 ret = m2m_dev->curr_ctx->priv; 141 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 142 143 return ret; 144 } 145 EXPORT_SYMBOL(v4l2_m2m_get_curr_priv); 146 147 /** 148 * v4l2_m2m_try_run() - select next job to perform and run it if possible 149 * 150 * Get next transaction (if present) from the waiting jobs list and run it. 151 */ 152 static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev) 153 { 154 unsigned long flags; 155 156 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 157 if (NULL != m2m_dev->curr_ctx) { 158 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 159 dprintk("Another instance is running, won't run now\n"); 160 return; 161 } 162 163 if (list_empty(&m2m_dev->job_queue)) { 164 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 165 dprintk("No job pending\n"); 166 return; 167 } 168 169 m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue, 170 struct v4l2_m2m_ctx, queue); 171 m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING; 172 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 173 174 m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv); 175 } 176 177 void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx) 178 { 179 struct v4l2_m2m_dev *m2m_dev; 180 unsigned long flags_job, flags_out, flags_cap; 181 182 m2m_dev = m2m_ctx->m2m_dev; 183 dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx); 184 185 if (!m2m_ctx->out_q_ctx.q.streaming 186 || !m2m_ctx->cap_q_ctx.q.streaming) { 187 dprintk("Streaming needs to be on for both queues\n"); 188 return; 189 } 190 191 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); 192 193 /* If the context is aborted then don't schedule it */ 194 if (m2m_ctx->job_flags & TRANS_ABORT) { 195 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); 196 dprintk("Aborted context\n"); 197 return; 198 } 199 200 if (m2m_ctx->job_flags & TRANS_QUEUED) { 201 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); 202 dprintk("On job queue already\n"); 203 return; 204 } 205 206 spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out); 207 if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue) 208 && !m2m_ctx->out_q_ctx.buffered) { 209 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, 210 flags_out); 211 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); 212 dprintk("No input buffers available\n"); 213 return; 214 } 215 spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap); 216 if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue) 217 && !m2m_ctx->cap_q_ctx.buffered) { 218 spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, 219 flags_cap); 220 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, 221 flags_out); 222 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); 223 dprintk("No output buffers available\n"); 224 return; 225 } 226 spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap); 227 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out); 228 229 if (m2m_dev->m2m_ops->job_ready 230 && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) { 231 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); 232 dprintk("Driver not ready\n"); 233 return; 234 } 235 236 list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue); 237 m2m_ctx->job_flags |= TRANS_QUEUED; 238 239 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); 240 241 v4l2_m2m_try_run(m2m_dev); 242 } 243 EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule); 244 245 /** 246 * v4l2_m2m_cancel_job() - cancel pending jobs for the context 247 * 248 * In case of streamoff or release called on any context, 249 * 1] If the context is currently running, then abort job will be called 250 * 2] If the context is queued, then the context will be removed from 251 * the job_queue 252 */ 253 static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx) 254 { 255 struct v4l2_m2m_dev *m2m_dev; 256 unsigned long flags; 257 258 m2m_dev = m2m_ctx->m2m_dev; 259 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 260 261 m2m_ctx->job_flags |= TRANS_ABORT; 262 if (m2m_ctx->job_flags & TRANS_RUNNING) { 263 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 264 m2m_dev->m2m_ops->job_abort(m2m_ctx->priv); 265 dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx); 266 wait_event(m2m_ctx->finished, 267 !(m2m_ctx->job_flags & TRANS_RUNNING)); 268 } else if (m2m_ctx->job_flags & TRANS_QUEUED) { 269 list_del(&m2m_ctx->queue); 270 m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); 271 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 272 dprintk("m2m_ctx: %p had been on queue and was removed\n", 273 m2m_ctx); 274 } else { 275 /* Do nothing, was not on queue/running */ 276 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 277 } 278 } 279 280 void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, 281 struct v4l2_m2m_ctx *m2m_ctx) 282 { 283 unsigned long flags; 284 285 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 286 if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) { 287 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 288 dprintk("Called by an instance not currently running\n"); 289 return; 290 } 291 292 list_del(&m2m_dev->curr_ctx->queue); 293 m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); 294 wake_up(&m2m_dev->curr_ctx->finished); 295 m2m_dev->curr_ctx = NULL; 296 297 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 298 299 /* This instance might have more buffers ready, but since we do not 300 * allow more than one job on the job_queue per instance, each has 301 * to be scheduled separately after the previous one finishes. */ 302 v4l2_m2m_try_schedule(m2m_ctx); 303 v4l2_m2m_try_run(m2m_dev); 304 } 305 EXPORT_SYMBOL(v4l2_m2m_job_finish); 306 307 int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 308 struct v4l2_requestbuffers *reqbufs) 309 { 310 struct vb2_queue *vq; 311 int ret; 312 313 vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type); 314 ret = vb2_reqbufs(vq, reqbufs); 315 /* If count == 0, then the owner has released all buffers and he 316 is no longer owner of the queue. Otherwise we have an owner. */ 317 if (ret == 0) 318 vq->owner = reqbufs->count ? file->private_data : NULL; 319 320 return ret; 321 } 322 EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs); 323 324 int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 325 struct v4l2_buffer *buf) 326 { 327 struct vb2_queue *vq; 328 int ret = 0; 329 unsigned int i; 330 331 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); 332 ret = vb2_querybuf(vq, buf); 333 334 /* Adjust MMAP memory offsets for the CAPTURE queue */ 335 if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) { 336 if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) { 337 for (i = 0; i < buf->length; ++i) 338 buf->m.planes[i].m.mem_offset 339 += DST_QUEUE_OFF_BASE; 340 } else { 341 buf->m.offset += DST_QUEUE_OFF_BASE; 342 } 343 } 344 345 return ret; 346 } 347 EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf); 348 349 int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 350 struct v4l2_buffer *buf) 351 { 352 struct vb2_queue *vq; 353 int ret; 354 355 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); 356 ret = vb2_qbuf(vq, buf); 357 if (!ret) 358 v4l2_m2m_try_schedule(m2m_ctx); 359 360 return ret; 361 } 362 EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf); 363 364 int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 365 struct v4l2_buffer *buf) 366 { 367 struct vb2_queue *vq; 368 369 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); 370 return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK); 371 } 372 EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf); 373 374 int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 375 struct v4l2_buffer *buf) 376 { 377 struct vb2_queue *vq; 378 int ret; 379 380 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); 381 ret = vb2_prepare_buf(vq, buf); 382 if (!ret) 383 v4l2_m2m_try_schedule(m2m_ctx); 384 385 return ret; 386 } 387 EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf); 388 389 int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 390 struct v4l2_create_buffers *create) 391 { 392 struct vb2_queue *vq; 393 394 vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type); 395 return vb2_create_bufs(vq, create); 396 } 397 EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs); 398 399 int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 400 struct v4l2_exportbuffer *eb) 401 { 402 struct vb2_queue *vq; 403 404 vq = v4l2_m2m_get_vq(m2m_ctx, eb->type); 405 return vb2_expbuf(vq, eb); 406 } 407 EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf); 408 409 int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 410 enum v4l2_buf_type type) 411 { 412 struct vb2_queue *vq; 413 int ret; 414 415 vq = v4l2_m2m_get_vq(m2m_ctx, type); 416 ret = vb2_streamon(vq, type); 417 if (!ret) 418 v4l2_m2m_try_schedule(m2m_ctx); 419 420 return ret; 421 } 422 EXPORT_SYMBOL_GPL(v4l2_m2m_streamon); 423 424 int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 425 enum v4l2_buf_type type) 426 { 427 struct v4l2_m2m_dev *m2m_dev; 428 struct v4l2_m2m_queue_ctx *q_ctx; 429 unsigned long flags_job, flags; 430 int ret; 431 432 /* wait until the current context is dequeued from job_queue */ 433 v4l2_m2m_cancel_job(m2m_ctx); 434 435 q_ctx = get_queue_ctx(m2m_ctx, type); 436 ret = vb2_streamoff(&q_ctx->q, type); 437 if (ret) 438 return ret; 439 440 m2m_dev = m2m_ctx->m2m_dev; 441 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); 442 /* We should not be scheduled anymore, since we're dropping a queue. */ 443 if (m2m_ctx->job_flags & TRANS_QUEUED) 444 list_del(&m2m_ctx->queue); 445 m2m_ctx->job_flags = 0; 446 447 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 448 /* Drop queue, since streamoff returns device to the same state as after 449 * calling reqbufs. */ 450 INIT_LIST_HEAD(&q_ctx->rdy_queue); 451 q_ctx->num_rdy = 0; 452 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 453 454 if (m2m_dev->curr_ctx == m2m_ctx) { 455 m2m_dev->curr_ctx = NULL; 456 wake_up(&m2m_ctx->finished); 457 } 458 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); 459 460 return 0; 461 } 462 EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff); 463 464 unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 465 struct poll_table_struct *wait) 466 { 467 struct video_device *vfd = video_devdata(file); 468 unsigned long req_events = poll_requested_events(wait); 469 struct vb2_queue *src_q, *dst_q; 470 struct vb2_buffer *src_vb = NULL, *dst_vb = NULL; 471 unsigned int rc = 0; 472 unsigned long flags; 473 474 if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) { 475 struct v4l2_fh *fh = file->private_data; 476 477 if (v4l2_event_pending(fh)) 478 rc = POLLPRI; 479 else if (req_events & POLLPRI) 480 poll_wait(file, &fh->wait, wait); 481 if (!(req_events & (POLLOUT | POLLWRNORM | POLLIN | POLLRDNORM))) 482 return rc; 483 } 484 485 src_q = v4l2_m2m_get_src_vq(m2m_ctx); 486 dst_q = v4l2_m2m_get_dst_vq(m2m_ctx); 487 488 /* 489 * There has to be at least one buffer queued on each queued_list, which 490 * means either in driver already or waiting for driver to claim it 491 * and start processing. 492 */ 493 if ((!src_q->streaming || list_empty(&src_q->queued_list)) 494 && (!dst_q->streaming || list_empty(&dst_q->queued_list))) { 495 rc |= POLLERR; 496 goto end; 497 } 498 499 spin_lock_irqsave(&src_q->done_lock, flags); 500 if (list_empty(&src_q->done_list)) 501 poll_wait(file, &src_q->done_wq, wait); 502 spin_unlock_irqrestore(&src_q->done_lock, flags); 503 504 spin_lock_irqsave(&dst_q->done_lock, flags); 505 if (list_empty(&dst_q->done_list)) { 506 /* 507 * If the last buffer was dequeued from the capture queue, 508 * return immediately. DQBUF will return -EPIPE. 509 */ 510 if (dst_q->last_buffer_dequeued) { 511 spin_unlock_irqrestore(&dst_q->done_lock, flags); 512 return rc | POLLIN | POLLRDNORM; 513 } 514 515 poll_wait(file, &dst_q->done_wq, wait); 516 } 517 spin_unlock_irqrestore(&dst_q->done_lock, flags); 518 519 spin_lock_irqsave(&src_q->done_lock, flags); 520 if (!list_empty(&src_q->done_list)) 521 src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer, 522 done_entry); 523 if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE 524 || src_vb->state == VB2_BUF_STATE_ERROR)) 525 rc |= POLLOUT | POLLWRNORM; 526 spin_unlock_irqrestore(&src_q->done_lock, flags); 527 528 spin_lock_irqsave(&dst_q->done_lock, flags); 529 if (!list_empty(&dst_q->done_list)) 530 dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer, 531 done_entry); 532 if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE 533 || dst_vb->state == VB2_BUF_STATE_ERROR)) 534 rc |= POLLIN | POLLRDNORM; 535 spin_unlock_irqrestore(&dst_q->done_lock, flags); 536 537 end: 538 return rc; 539 } 540 EXPORT_SYMBOL_GPL(v4l2_m2m_poll); 541 542 int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 543 struct vm_area_struct *vma) 544 { 545 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; 546 struct vb2_queue *vq; 547 548 if (offset < DST_QUEUE_OFF_BASE) { 549 vq = v4l2_m2m_get_src_vq(m2m_ctx); 550 } else { 551 vq = v4l2_m2m_get_dst_vq(m2m_ctx); 552 vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT); 553 } 554 555 return vb2_mmap(vq, vma); 556 } 557 EXPORT_SYMBOL(v4l2_m2m_mmap); 558 559 struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops) 560 { 561 struct v4l2_m2m_dev *m2m_dev; 562 563 if (!m2m_ops || WARN_ON(!m2m_ops->device_run) || 564 WARN_ON(!m2m_ops->job_abort)) 565 return ERR_PTR(-EINVAL); 566 567 m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL); 568 if (!m2m_dev) 569 return ERR_PTR(-ENOMEM); 570 571 m2m_dev->curr_ctx = NULL; 572 m2m_dev->m2m_ops = m2m_ops; 573 INIT_LIST_HEAD(&m2m_dev->job_queue); 574 spin_lock_init(&m2m_dev->job_spinlock); 575 576 return m2m_dev; 577 } 578 EXPORT_SYMBOL_GPL(v4l2_m2m_init); 579 580 void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev) 581 { 582 kfree(m2m_dev); 583 } 584 EXPORT_SYMBOL_GPL(v4l2_m2m_release); 585 586 struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev, 587 void *drv_priv, 588 int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)) 589 { 590 struct v4l2_m2m_ctx *m2m_ctx; 591 struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx; 592 int ret; 593 594 m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL); 595 if (!m2m_ctx) 596 return ERR_PTR(-ENOMEM); 597 598 m2m_ctx->priv = drv_priv; 599 m2m_ctx->m2m_dev = m2m_dev; 600 init_waitqueue_head(&m2m_ctx->finished); 601 602 out_q_ctx = &m2m_ctx->out_q_ctx; 603 cap_q_ctx = &m2m_ctx->cap_q_ctx; 604 605 INIT_LIST_HEAD(&out_q_ctx->rdy_queue); 606 INIT_LIST_HEAD(&cap_q_ctx->rdy_queue); 607 spin_lock_init(&out_q_ctx->rdy_spinlock); 608 spin_lock_init(&cap_q_ctx->rdy_spinlock); 609 610 INIT_LIST_HEAD(&m2m_ctx->queue); 611 612 ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q); 613 614 if (ret) 615 goto err; 616 /* 617 * If both queues use same mutex assign it as the common buffer 618 * queues lock to the m2m context. This lock is used in the 619 * v4l2_m2m_ioctl_* helpers. 620 */ 621 if (out_q_ctx->q.lock == cap_q_ctx->q.lock) 622 m2m_ctx->q_lock = out_q_ctx->q.lock; 623 624 return m2m_ctx; 625 err: 626 kfree(m2m_ctx); 627 return ERR_PTR(ret); 628 } 629 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init); 630 631 void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx) 632 { 633 /* wait until the current context is dequeued from job_queue */ 634 v4l2_m2m_cancel_job(m2m_ctx); 635 636 vb2_queue_release(&m2m_ctx->cap_q_ctx.q); 637 vb2_queue_release(&m2m_ctx->out_q_ctx.q); 638 639 kfree(m2m_ctx); 640 } 641 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release); 642 643 void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, 644 struct vb2_v4l2_buffer *vbuf) 645 { 646 struct v4l2_m2m_buffer *b = container_of(vbuf, 647 struct v4l2_m2m_buffer, vb); 648 struct v4l2_m2m_queue_ctx *q_ctx; 649 unsigned long flags; 650 651 q_ctx = get_queue_ctx(m2m_ctx, vbuf->vb2_buf.vb2_queue->type); 652 if (!q_ctx) 653 return; 654 655 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 656 list_add_tail(&b->list, &q_ctx->rdy_queue); 657 q_ctx->num_rdy++; 658 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 659 } 660 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue); 661 662 /* Videobuf2 ioctl helpers */ 663 664 int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv, 665 struct v4l2_requestbuffers *rb) 666 { 667 struct v4l2_fh *fh = file->private_data; 668 669 return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb); 670 } 671 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs); 672 673 int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv, 674 struct v4l2_create_buffers *create) 675 { 676 struct v4l2_fh *fh = file->private_data; 677 678 return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create); 679 } 680 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs); 681 682 int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv, 683 struct v4l2_buffer *buf) 684 { 685 struct v4l2_fh *fh = file->private_data; 686 687 return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf); 688 } 689 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf); 690 691 int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv, 692 struct v4l2_buffer *buf) 693 { 694 struct v4l2_fh *fh = file->private_data; 695 696 return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf); 697 } 698 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf); 699 700 int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv, 701 struct v4l2_buffer *buf) 702 { 703 struct v4l2_fh *fh = file->private_data; 704 705 return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf); 706 } 707 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf); 708 709 int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *priv, 710 struct v4l2_buffer *buf) 711 { 712 struct v4l2_fh *fh = file->private_data; 713 714 return v4l2_m2m_prepare_buf(file, fh->m2m_ctx, buf); 715 } 716 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_prepare_buf); 717 718 int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv, 719 struct v4l2_exportbuffer *eb) 720 { 721 struct v4l2_fh *fh = file->private_data; 722 723 return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb); 724 } 725 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf); 726 727 int v4l2_m2m_ioctl_streamon(struct file *file, void *priv, 728 enum v4l2_buf_type type) 729 { 730 struct v4l2_fh *fh = file->private_data; 731 732 return v4l2_m2m_streamon(file, fh->m2m_ctx, type); 733 } 734 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon); 735 736 int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv, 737 enum v4l2_buf_type type) 738 { 739 struct v4l2_fh *fh = file->private_data; 740 741 return v4l2_m2m_streamoff(file, fh->m2m_ctx, type); 742 } 743 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff); 744 745 /* 746 * v4l2_file_operations helpers. It is assumed here same lock is used 747 * for the output and the capture buffer queue. 748 */ 749 750 int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma) 751 { 752 struct v4l2_fh *fh = file->private_data; 753 754 return v4l2_m2m_mmap(file, fh->m2m_ctx, vma); 755 } 756 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap); 757 758 unsigned int v4l2_m2m_fop_poll(struct file *file, poll_table *wait) 759 { 760 struct v4l2_fh *fh = file->private_data; 761 struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx; 762 unsigned int ret; 763 764 if (m2m_ctx->q_lock) 765 mutex_lock(m2m_ctx->q_lock); 766 767 ret = v4l2_m2m_poll(file, m2m_ctx, wait); 768 769 if (m2m_ctx->q_lock) 770 mutex_unlock(m2m_ctx->q_lock); 771 772 return ret; 773 } 774 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll); 775 776