1 /* 2 * Memory-to-memory device framework for Video for Linux 2 and videobuf. 3 * 4 * Helper functions for devices that use videobuf buffers for both their 5 * source and destination. 6 * 7 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. 8 * Pawel Osciak, <pawel@osciak.com> 9 * Marek Szyprowski, <m.szyprowski@samsung.com> 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by the 13 * Free Software Foundation; either version 2 of the License, or (at your 14 * option) any later version. 15 */ 16 #include <linux/module.h> 17 #include <linux/sched.h> 18 #include <linux/slab.h> 19 20 #include <media/videobuf2-v4l2.h> 21 #include <media/v4l2-mem2mem.h> 22 #include <media/v4l2-dev.h> 23 #include <media/v4l2-fh.h> 24 #include <media/v4l2-event.h> 25 26 MODULE_DESCRIPTION("Mem to mem device framework for videobuf"); 27 MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>"); 28 MODULE_LICENSE("GPL"); 29 30 static bool debug; 31 module_param(debug, bool, 0644); 32 33 #define dprintk(fmt, arg...) \ 34 do { \ 35 if (debug) \ 36 printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\ 37 } while (0) 38 39 40 /* Instance is already queued on the job_queue */ 41 #define TRANS_QUEUED (1 << 0) 42 /* Instance is currently running in hardware */ 43 #define TRANS_RUNNING (1 << 1) 44 /* Instance is currently aborting */ 45 #define TRANS_ABORT (1 << 2) 46 47 48 /* Offset base for buffers on the destination queue - used to distinguish 49 * between source and destination buffers when mmapping - they receive the same 50 * offsets but for different queues */ 51 #define DST_QUEUE_OFF_BASE (1 << 30) 52 53 54 /** 55 * struct v4l2_m2m_dev - per-device context 56 * @curr_ctx: currently running instance 57 * @job_queue: instances queued to run 58 * @job_spinlock: protects job_queue 59 * @m2m_ops: driver callbacks 60 */ 61 struct v4l2_m2m_dev { 62 struct v4l2_m2m_ctx *curr_ctx; 63 64 struct list_head job_queue; 65 spinlock_t job_spinlock; 66 67 const struct v4l2_m2m_ops *m2m_ops; 68 }; 69 70 static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx, 71 enum v4l2_buf_type type) 72 { 73 if (V4L2_TYPE_IS_OUTPUT(type)) 74 return &m2m_ctx->out_q_ctx; 75 else 76 return &m2m_ctx->cap_q_ctx; 77 } 78 79 struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx, 80 enum v4l2_buf_type type) 81 { 82 struct v4l2_m2m_queue_ctx *q_ctx; 83 84 q_ctx = get_queue_ctx(m2m_ctx, type); 85 if (!q_ctx) 86 return NULL; 87 88 return &q_ctx->q; 89 } 90 EXPORT_SYMBOL(v4l2_m2m_get_vq); 91 92 void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx) 93 { 94 struct v4l2_m2m_buffer *b; 95 unsigned long flags; 96 97 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 98 99 if (list_empty(&q_ctx->rdy_queue)) { 100 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 101 return NULL; 102 } 103 104 b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); 105 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 106 return &b->vb; 107 } 108 EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf); 109 110 void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx) 111 { 112 struct v4l2_m2m_buffer *b; 113 unsigned long flags; 114 115 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 116 if (list_empty(&q_ctx->rdy_queue)) { 117 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 118 return NULL; 119 } 120 b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); 121 list_del(&b->list); 122 q_ctx->num_rdy--; 123 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 124 125 return &b->vb; 126 } 127 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove); 128 129 void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx, 130 struct vb2_v4l2_buffer *vbuf) 131 { 132 struct v4l2_m2m_buffer *b; 133 unsigned long flags; 134 135 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 136 b = container_of(vbuf, struct v4l2_m2m_buffer, vb); 137 list_del(&b->list); 138 q_ctx->num_rdy--; 139 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 140 } 141 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_buf); 142 143 struct vb2_v4l2_buffer * 144 v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx) 145 146 { 147 struct v4l2_m2m_buffer *b, *tmp; 148 struct vb2_v4l2_buffer *ret = NULL; 149 unsigned long flags; 150 151 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 152 list_for_each_entry_safe(b, tmp, &q_ctx->rdy_queue, list) { 153 if (b->vb.vb2_buf.index == idx) { 154 list_del(&b->list); 155 q_ctx->num_rdy--; 156 ret = &b->vb; 157 break; 158 } 159 } 160 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 161 162 return ret; 163 } 164 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_idx); 165 166 /* 167 * Scheduling handlers 168 */ 169 170 void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev) 171 { 172 unsigned long flags; 173 void *ret = NULL; 174 175 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 176 if (m2m_dev->curr_ctx) 177 ret = m2m_dev->curr_ctx->priv; 178 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 179 180 return ret; 181 } 182 EXPORT_SYMBOL(v4l2_m2m_get_curr_priv); 183 184 /** 185 * v4l2_m2m_try_run() - select next job to perform and run it if possible 186 * 187 * Get next transaction (if present) from the waiting jobs list and run it. 188 */ 189 static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev) 190 { 191 unsigned long flags; 192 193 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 194 if (NULL != m2m_dev->curr_ctx) { 195 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 196 dprintk("Another instance is running, won't run now\n"); 197 return; 198 } 199 200 if (list_empty(&m2m_dev->job_queue)) { 201 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 202 dprintk("No job pending\n"); 203 return; 204 } 205 206 m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue, 207 struct v4l2_m2m_ctx, queue); 208 m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING; 209 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 210 211 m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv); 212 } 213 214 void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx) 215 { 216 struct v4l2_m2m_dev *m2m_dev; 217 unsigned long flags_job, flags_out, flags_cap; 218 219 m2m_dev = m2m_ctx->m2m_dev; 220 dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx); 221 222 if (!m2m_ctx->out_q_ctx.q.streaming 223 || !m2m_ctx->cap_q_ctx.q.streaming) { 224 dprintk("Streaming needs to be on for both queues\n"); 225 return; 226 } 227 228 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); 229 230 /* If the context is aborted then don't schedule it */ 231 if (m2m_ctx->job_flags & TRANS_ABORT) { 232 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); 233 dprintk("Aborted context\n"); 234 return; 235 } 236 237 if (m2m_ctx->job_flags & TRANS_QUEUED) { 238 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); 239 dprintk("On job queue already\n"); 240 return; 241 } 242 243 spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out); 244 if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue) 245 && !m2m_ctx->out_q_ctx.buffered) { 246 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, 247 flags_out); 248 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); 249 dprintk("No input buffers available\n"); 250 return; 251 } 252 spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap); 253 if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue) 254 && !m2m_ctx->cap_q_ctx.buffered) { 255 spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, 256 flags_cap); 257 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, 258 flags_out); 259 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); 260 dprintk("No output buffers available\n"); 261 return; 262 } 263 spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap); 264 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out); 265 266 if (m2m_dev->m2m_ops->job_ready 267 && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) { 268 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); 269 dprintk("Driver not ready\n"); 270 return; 271 } 272 273 list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue); 274 m2m_ctx->job_flags |= TRANS_QUEUED; 275 276 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); 277 278 v4l2_m2m_try_run(m2m_dev); 279 } 280 EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule); 281 282 /** 283 * v4l2_m2m_cancel_job() - cancel pending jobs for the context 284 * 285 * In case of streamoff or release called on any context, 286 * 1] If the context is currently running, then abort job will be called 287 * 2] If the context is queued, then the context will be removed from 288 * the job_queue 289 */ 290 static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx) 291 { 292 struct v4l2_m2m_dev *m2m_dev; 293 unsigned long flags; 294 295 m2m_dev = m2m_ctx->m2m_dev; 296 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 297 298 m2m_ctx->job_flags |= TRANS_ABORT; 299 if (m2m_ctx->job_flags & TRANS_RUNNING) { 300 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 301 m2m_dev->m2m_ops->job_abort(m2m_ctx->priv); 302 dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx); 303 wait_event(m2m_ctx->finished, 304 !(m2m_ctx->job_flags & TRANS_RUNNING)); 305 } else if (m2m_ctx->job_flags & TRANS_QUEUED) { 306 list_del(&m2m_ctx->queue); 307 m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); 308 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 309 dprintk("m2m_ctx: %p had been on queue and was removed\n", 310 m2m_ctx); 311 } else { 312 /* Do nothing, was not on queue/running */ 313 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 314 } 315 } 316 317 void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, 318 struct v4l2_m2m_ctx *m2m_ctx) 319 { 320 unsigned long flags; 321 322 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 323 if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) { 324 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 325 dprintk("Called by an instance not currently running\n"); 326 return; 327 } 328 329 list_del(&m2m_dev->curr_ctx->queue); 330 m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); 331 wake_up(&m2m_dev->curr_ctx->finished); 332 m2m_dev->curr_ctx = NULL; 333 334 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 335 336 /* This instance might have more buffers ready, but since we do not 337 * allow more than one job on the job_queue per instance, each has 338 * to be scheduled separately after the previous one finishes. */ 339 v4l2_m2m_try_schedule(m2m_ctx); 340 v4l2_m2m_try_run(m2m_dev); 341 } 342 EXPORT_SYMBOL(v4l2_m2m_job_finish); 343 344 int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 345 struct v4l2_requestbuffers *reqbufs) 346 { 347 struct vb2_queue *vq; 348 int ret; 349 350 vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type); 351 ret = vb2_reqbufs(vq, reqbufs); 352 /* If count == 0, then the owner has released all buffers and he 353 is no longer owner of the queue. Otherwise we have an owner. */ 354 if (ret == 0) 355 vq->owner = reqbufs->count ? file->private_data : NULL; 356 357 return ret; 358 } 359 EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs); 360 361 int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 362 struct v4l2_buffer *buf) 363 { 364 struct vb2_queue *vq; 365 int ret = 0; 366 unsigned int i; 367 368 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); 369 ret = vb2_querybuf(vq, buf); 370 371 /* Adjust MMAP memory offsets for the CAPTURE queue */ 372 if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) { 373 if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) { 374 for (i = 0; i < buf->length; ++i) 375 buf->m.planes[i].m.mem_offset 376 += DST_QUEUE_OFF_BASE; 377 } else { 378 buf->m.offset += DST_QUEUE_OFF_BASE; 379 } 380 } 381 382 return ret; 383 } 384 EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf); 385 386 int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 387 struct v4l2_buffer *buf) 388 { 389 struct vb2_queue *vq; 390 int ret; 391 392 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); 393 ret = vb2_qbuf(vq, buf); 394 if (!ret) 395 v4l2_m2m_try_schedule(m2m_ctx); 396 397 return ret; 398 } 399 EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf); 400 401 int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 402 struct v4l2_buffer *buf) 403 { 404 struct vb2_queue *vq; 405 406 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); 407 return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK); 408 } 409 EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf); 410 411 int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 412 struct v4l2_buffer *buf) 413 { 414 struct vb2_queue *vq; 415 int ret; 416 417 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); 418 ret = vb2_prepare_buf(vq, buf); 419 if (!ret) 420 v4l2_m2m_try_schedule(m2m_ctx); 421 422 return ret; 423 } 424 EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf); 425 426 int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 427 struct v4l2_create_buffers *create) 428 { 429 struct vb2_queue *vq; 430 431 vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type); 432 return vb2_create_bufs(vq, create); 433 } 434 EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs); 435 436 int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 437 struct v4l2_exportbuffer *eb) 438 { 439 struct vb2_queue *vq; 440 441 vq = v4l2_m2m_get_vq(m2m_ctx, eb->type); 442 return vb2_expbuf(vq, eb); 443 } 444 EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf); 445 446 int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 447 enum v4l2_buf_type type) 448 { 449 struct vb2_queue *vq; 450 int ret; 451 452 vq = v4l2_m2m_get_vq(m2m_ctx, type); 453 ret = vb2_streamon(vq, type); 454 if (!ret) 455 v4l2_m2m_try_schedule(m2m_ctx); 456 457 return ret; 458 } 459 EXPORT_SYMBOL_GPL(v4l2_m2m_streamon); 460 461 int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 462 enum v4l2_buf_type type) 463 { 464 struct v4l2_m2m_dev *m2m_dev; 465 struct v4l2_m2m_queue_ctx *q_ctx; 466 unsigned long flags_job, flags; 467 int ret; 468 469 /* wait until the current context is dequeued from job_queue */ 470 v4l2_m2m_cancel_job(m2m_ctx); 471 472 q_ctx = get_queue_ctx(m2m_ctx, type); 473 ret = vb2_streamoff(&q_ctx->q, type); 474 if (ret) 475 return ret; 476 477 m2m_dev = m2m_ctx->m2m_dev; 478 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); 479 /* We should not be scheduled anymore, since we're dropping a queue. */ 480 if (m2m_ctx->job_flags & TRANS_QUEUED) 481 list_del(&m2m_ctx->queue); 482 m2m_ctx->job_flags = 0; 483 484 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 485 /* Drop queue, since streamoff returns device to the same state as after 486 * calling reqbufs. */ 487 INIT_LIST_HEAD(&q_ctx->rdy_queue); 488 q_ctx->num_rdy = 0; 489 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 490 491 if (m2m_dev->curr_ctx == m2m_ctx) { 492 m2m_dev->curr_ctx = NULL; 493 wake_up(&m2m_ctx->finished); 494 } 495 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); 496 497 return 0; 498 } 499 EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff); 500 501 unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 502 struct poll_table_struct *wait) 503 { 504 struct video_device *vfd = video_devdata(file); 505 unsigned long req_events = poll_requested_events(wait); 506 struct vb2_queue *src_q, *dst_q; 507 struct vb2_buffer *src_vb = NULL, *dst_vb = NULL; 508 unsigned int rc = 0; 509 unsigned long flags; 510 511 if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) { 512 struct v4l2_fh *fh = file->private_data; 513 514 if (v4l2_event_pending(fh)) 515 rc = POLLPRI; 516 else if (req_events & POLLPRI) 517 poll_wait(file, &fh->wait, wait); 518 if (!(req_events & (POLLOUT | POLLWRNORM | POLLIN | POLLRDNORM))) 519 return rc; 520 } 521 522 src_q = v4l2_m2m_get_src_vq(m2m_ctx); 523 dst_q = v4l2_m2m_get_dst_vq(m2m_ctx); 524 525 /* 526 * There has to be at least one buffer queued on each queued_list, which 527 * means either in driver already or waiting for driver to claim it 528 * and start processing. 529 */ 530 if ((!src_q->streaming || list_empty(&src_q->queued_list)) 531 && (!dst_q->streaming || list_empty(&dst_q->queued_list))) { 532 rc |= POLLERR; 533 goto end; 534 } 535 536 spin_lock_irqsave(&src_q->done_lock, flags); 537 if (list_empty(&src_q->done_list)) 538 poll_wait(file, &src_q->done_wq, wait); 539 spin_unlock_irqrestore(&src_q->done_lock, flags); 540 541 spin_lock_irqsave(&dst_q->done_lock, flags); 542 if (list_empty(&dst_q->done_list)) { 543 /* 544 * If the last buffer was dequeued from the capture queue, 545 * return immediately. DQBUF will return -EPIPE. 546 */ 547 if (dst_q->last_buffer_dequeued) { 548 spin_unlock_irqrestore(&dst_q->done_lock, flags); 549 return rc | POLLIN | POLLRDNORM; 550 } 551 552 poll_wait(file, &dst_q->done_wq, wait); 553 } 554 spin_unlock_irqrestore(&dst_q->done_lock, flags); 555 556 spin_lock_irqsave(&src_q->done_lock, flags); 557 if (!list_empty(&src_q->done_list)) 558 src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer, 559 done_entry); 560 if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE 561 || src_vb->state == VB2_BUF_STATE_ERROR)) 562 rc |= POLLOUT | POLLWRNORM; 563 spin_unlock_irqrestore(&src_q->done_lock, flags); 564 565 spin_lock_irqsave(&dst_q->done_lock, flags); 566 if (!list_empty(&dst_q->done_list)) 567 dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer, 568 done_entry); 569 if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE 570 || dst_vb->state == VB2_BUF_STATE_ERROR)) 571 rc |= POLLIN | POLLRDNORM; 572 spin_unlock_irqrestore(&dst_q->done_lock, flags); 573 574 end: 575 return rc; 576 } 577 EXPORT_SYMBOL_GPL(v4l2_m2m_poll); 578 579 int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 580 struct vm_area_struct *vma) 581 { 582 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; 583 struct vb2_queue *vq; 584 585 if (offset < DST_QUEUE_OFF_BASE) { 586 vq = v4l2_m2m_get_src_vq(m2m_ctx); 587 } else { 588 vq = v4l2_m2m_get_dst_vq(m2m_ctx); 589 vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT); 590 } 591 592 return vb2_mmap(vq, vma); 593 } 594 EXPORT_SYMBOL(v4l2_m2m_mmap); 595 596 struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops) 597 { 598 struct v4l2_m2m_dev *m2m_dev; 599 600 if (!m2m_ops || WARN_ON(!m2m_ops->device_run) || 601 WARN_ON(!m2m_ops->job_abort)) 602 return ERR_PTR(-EINVAL); 603 604 m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL); 605 if (!m2m_dev) 606 return ERR_PTR(-ENOMEM); 607 608 m2m_dev->curr_ctx = NULL; 609 m2m_dev->m2m_ops = m2m_ops; 610 INIT_LIST_HEAD(&m2m_dev->job_queue); 611 spin_lock_init(&m2m_dev->job_spinlock); 612 613 return m2m_dev; 614 } 615 EXPORT_SYMBOL_GPL(v4l2_m2m_init); 616 617 void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev) 618 { 619 kfree(m2m_dev); 620 } 621 EXPORT_SYMBOL_GPL(v4l2_m2m_release); 622 623 struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev, 624 void *drv_priv, 625 int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)) 626 { 627 struct v4l2_m2m_ctx *m2m_ctx; 628 struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx; 629 int ret; 630 631 m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL); 632 if (!m2m_ctx) 633 return ERR_PTR(-ENOMEM); 634 635 m2m_ctx->priv = drv_priv; 636 m2m_ctx->m2m_dev = m2m_dev; 637 init_waitqueue_head(&m2m_ctx->finished); 638 639 out_q_ctx = &m2m_ctx->out_q_ctx; 640 cap_q_ctx = &m2m_ctx->cap_q_ctx; 641 642 INIT_LIST_HEAD(&out_q_ctx->rdy_queue); 643 INIT_LIST_HEAD(&cap_q_ctx->rdy_queue); 644 spin_lock_init(&out_q_ctx->rdy_spinlock); 645 spin_lock_init(&cap_q_ctx->rdy_spinlock); 646 647 INIT_LIST_HEAD(&m2m_ctx->queue); 648 649 ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q); 650 651 if (ret) 652 goto err; 653 /* 654 * If both queues use same mutex assign it as the common buffer 655 * queues lock to the m2m context. This lock is used in the 656 * v4l2_m2m_ioctl_* helpers. 657 */ 658 if (out_q_ctx->q.lock == cap_q_ctx->q.lock) 659 m2m_ctx->q_lock = out_q_ctx->q.lock; 660 661 return m2m_ctx; 662 err: 663 kfree(m2m_ctx); 664 return ERR_PTR(ret); 665 } 666 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init); 667 668 void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx) 669 { 670 /* wait until the current context is dequeued from job_queue */ 671 v4l2_m2m_cancel_job(m2m_ctx); 672 673 vb2_queue_release(&m2m_ctx->cap_q_ctx.q); 674 vb2_queue_release(&m2m_ctx->out_q_ctx.q); 675 676 kfree(m2m_ctx); 677 } 678 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release); 679 680 void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, 681 struct vb2_v4l2_buffer *vbuf) 682 { 683 struct v4l2_m2m_buffer *b = container_of(vbuf, 684 struct v4l2_m2m_buffer, vb); 685 struct v4l2_m2m_queue_ctx *q_ctx; 686 unsigned long flags; 687 688 q_ctx = get_queue_ctx(m2m_ctx, vbuf->vb2_buf.vb2_queue->type); 689 if (!q_ctx) 690 return; 691 692 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 693 list_add_tail(&b->list, &q_ctx->rdy_queue); 694 q_ctx->num_rdy++; 695 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 696 } 697 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue); 698 699 /* Videobuf2 ioctl helpers */ 700 701 int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv, 702 struct v4l2_requestbuffers *rb) 703 { 704 struct v4l2_fh *fh = file->private_data; 705 706 return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb); 707 } 708 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs); 709 710 int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv, 711 struct v4l2_create_buffers *create) 712 { 713 struct v4l2_fh *fh = file->private_data; 714 715 return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create); 716 } 717 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs); 718 719 int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv, 720 struct v4l2_buffer *buf) 721 { 722 struct v4l2_fh *fh = file->private_data; 723 724 return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf); 725 } 726 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf); 727 728 int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv, 729 struct v4l2_buffer *buf) 730 { 731 struct v4l2_fh *fh = file->private_data; 732 733 return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf); 734 } 735 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf); 736 737 int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv, 738 struct v4l2_buffer *buf) 739 { 740 struct v4l2_fh *fh = file->private_data; 741 742 return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf); 743 } 744 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf); 745 746 int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *priv, 747 struct v4l2_buffer *buf) 748 { 749 struct v4l2_fh *fh = file->private_data; 750 751 return v4l2_m2m_prepare_buf(file, fh->m2m_ctx, buf); 752 } 753 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_prepare_buf); 754 755 int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv, 756 struct v4l2_exportbuffer *eb) 757 { 758 struct v4l2_fh *fh = file->private_data; 759 760 return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb); 761 } 762 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf); 763 764 int v4l2_m2m_ioctl_streamon(struct file *file, void *priv, 765 enum v4l2_buf_type type) 766 { 767 struct v4l2_fh *fh = file->private_data; 768 769 return v4l2_m2m_streamon(file, fh->m2m_ctx, type); 770 } 771 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon); 772 773 int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv, 774 enum v4l2_buf_type type) 775 { 776 struct v4l2_fh *fh = file->private_data; 777 778 return v4l2_m2m_streamoff(file, fh->m2m_ctx, type); 779 } 780 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff); 781 782 /* 783 * v4l2_file_operations helpers. It is assumed here same lock is used 784 * for the output and the capture buffer queue. 785 */ 786 787 int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma) 788 { 789 struct v4l2_fh *fh = file->private_data; 790 791 return v4l2_m2m_mmap(file, fh->m2m_ctx, vma); 792 } 793 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap); 794 795 unsigned int v4l2_m2m_fop_poll(struct file *file, poll_table *wait) 796 { 797 struct v4l2_fh *fh = file->private_data; 798 struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx; 799 unsigned int ret; 800 801 if (m2m_ctx->q_lock) 802 mutex_lock(m2m_ctx->q_lock); 803 804 ret = v4l2_m2m_poll(file, m2m_ctx, wait); 805 806 if (m2m_ctx->q_lock) 807 mutex_unlock(m2m_ctx->q_lock); 808 809 return ret; 810 } 811 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll); 812 813