1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Hantro VPU codec driver 4 * 5 * Copyright (C) 2018 Collabora, Ltd. 6 * Copyright 2018 Google LLC. 7 * Tomasz Figa <tfiga@chromium.org> 8 * 9 * Based on s5p-mfc driver by Samsung Electronics Co., Ltd. 10 * Copyright (C) 2011 Samsung Electronics Co., Ltd. 11 */ 12 13 #include <linux/clk.h> 14 #include <linux/module.h> 15 #include <linux/of.h> 16 #include <linux/platform_device.h> 17 #include <linux/pm.h> 18 #include <linux/pm_runtime.h> 19 #include <linux/slab.h> 20 #include <linux/videodev2.h> 21 #include <linux/workqueue.h> 22 #include <media/v4l2-event.h> 23 #include <media/v4l2-mem2mem.h> 24 #include <media/videobuf2-core.h> 25 #include <media/videobuf2-vmalloc.h> 26 27 #include "hantro_v4l2.h" 28 #include "hantro.h" 29 #include "hantro_hw.h" 30 31 #define DRIVER_NAME "hantro-vpu" 32 33 int hantro_debug; 34 module_param_named(debug, hantro_debug, int, 0644); 35 MODULE_PARM_DESC(debug, 36 "Debug level - higher value produces more verbose messages"); 37 38 void *hantro_get_ctrl(struct hantro_ctx *ctx, u32 id) 39 { 40 struct v4l2_ctrl *ctrl; 41 42 ctrl = v4l2_ctrl_find(&ctx->ctrl_handler, id); 43 return ctrl ? ctrl->p_cur.p : NULL; 44 } 45 46 dma_addr_t hantro_get_ref(struct hantro_ctx *ctx, u64 ts) 47 { 48 struct vb2_queue *q = v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx); 49 struct vb2_buffer *buf; 50 51 buf = vb2_find_buffer(q, ts); 52 if (!buf) 53 return 0; 54 return hantro_get_dec_buf_addr(ctx, buf); 55 } 56 57 static const struct v4l2_event hantro_eos_event = { 58 .type = V4L2_EVENT_EOS 59 }; 60 61 static void hantro_job_finish_no_pm(struct hantro_dev *vpu, 62 struct hantro_ctx *ctx, 63 enum vb2_buffer_state result) 64 { 65 struct vb2_v4l2_buffer *src, *dst; 66 67 src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); 68 dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); 69 70 if (WARN_ON(!src)) 71 return; 72 if (WARN_ON(!dst)) 73 return; 74 75 src->sequence = ctx->sequence_out++; 76 dst->sequence = ctx->sequence_cap++; 77 78 if (v4l2_m2m_is_last_draining_src_buf(ctx->fh.m2m_ctx, src)) { 79 dst->flags |= V4L2_BUF_FLAG_LAST; 80 v4l2_event_queue_fh(&ctx->fh, &hantro_eos_event); 81 v4l2_m2m_mark_stopped(ctx->fh.m2m_ctx); 82 } 83 84 v4l2_m2m_buf_done_and_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx, 85 result); 86 } 87 88 static void hantro_job_finish(struct hantro_dev *vpu, 89 struct hantro_ctx *ctx, 90 enum vb2_buffer_state result) 91 { 92 pm_runtime_mark_last_busy(vpu->dev); 93 pm_runtime_put_autosuspend(vpu->dev); 94 95 clk_bulk_disable(vpu->variant->num_clocks, vpu->clocks); 96 97 hantro_job_finish_no_pm(vpu, ctx, result); 98 } 99 100 void hantro_irq_done(struct hantro_dev *vpu, 101 enum vb2_buffer_state result) 102 { 103 struct hantro_ctx *ctx = 104 v4l2_m2m_get_curr_priv(vpu->m2m_dev); 105 106 /* 107 * If cancel_delayed_work returns false 108 * the timeout expired. The watchdog is running, 109 * and will take care of finishing the job. 110 */ 111 if (cancel_delayed_work(&vpu->watchdog_work)) { 112 if (result == VB2_BUF_STATE_DONE && ctx->codec_ops->done) 113 ctx->codec_ops->done(ctx); 114 hantro_job_finish(vpu, ctx, result); 115 } 116 } 117 118 void hantro_watchdog(struct work_struct *work) 119 { 120 struct hantro_dev *vpu; 121 struct hantro_ctx *ctx; 122 123 vpu = container_of(to_delayed_work(work), 124 struct hantro_dev, watchdog_work); 125 ctx = v4l2_m2m_get_curr_priv(vpu->m2m_dev); 126 if (ctx) { 127 vpu_err("frame processing timed out!\n"); 128 if (ctx->codec_ops->reset) 129 ctx->codec_ops->reset(ctx); 130 hantro_job_finish(vpu, ctx, VB2_BUF_STATE_ERROR); 131 } 132 } 133 134 void hantro_start_prepare_run(struct hantro_ctx *ctx) 135 { 136 struct vb2_v4l2_buffer *src_buf; 137 138 src_buf = hantro_get_src_buf(ctx); 139 v4l2_ctrl_request_setup(src_buf->vb2_buf.req_obj.req, 140 &ctx->ctrl_handler); 141 142 if (!ctx->is_encoder && !ctx->dev->variant->late_postproc) { 143 if (hantro_needs_postproc(ctx, ctx->vpu_dst_fmt)) 144 hantro_postproc_enable(ctx); 145 else 146 hantro_postproc_disable(ctx); 147 } 148 } 149 150 void hantro_end_prepare_run(struct hantro_ctx *ctx) 151 { 152 struct vb2_v4l2_buffer *src_buf; 153 154 if (!ctx->is_encoder && ctx->dev->variant->late_postproc) { 155 if (hantro_needs_postproc(ctx, ctx->vpu_dst_fmt)) 156 hantro_postproc_enable(ctx); 157 else 158 hantro_postproc_disable(ctx); 159 } 160 161 src_buf = hantro_get_src_buf(ctx); 162 v4l2_ctrl_request_complete(src_buf->vb2_buf.req_obj.req, 163 &ctx->ctrl_handler); 164 165 /* Kick the watchdog. */ 166 schedule_delayed_work(&ctx->dev->watchdog_work, 167 msecs_to_jiffies(2000)); 168 } 169 170 static void device_run(void *priv) 171 { 172 struct hantro_ctx *ctx = priv; 173 struct vb2_v4l2_buffer *src, *dst; 174 int ret; 175 176 src = hantro_get_src_buf(ctx); 177 dst = hantro_get_dst_buf(ctx); 178 179 ret = pm_runtime_resume_and_get(ctx->dev->dev); 180 if (ret < 0) 181 goto err_cancel_job; 182 183 ret = clk_bulk_enable(ctx->dev->variant->num_clocks, ctx->dev->clocks); 184 if (ret) 185 goto err_cancel_job; 186 187 v4l2_m2m_buf_copy_metadata(src, dst, true); 188 189 if (ctx->codec_ops->run(ctx)) 190 goto err_cancel_job; 191 192 return; 193 194 err_cancel_job: 195 hantro_job_finish_no_pm(ctx->dev, ctx, VB2_BUF_STATE_ERROR); 196 } 197 198 static const struct v4l2_m2m_ops vpu_m2m_ops = { 199 .device_run = device_run, 200 }; 201 202 static int 203 queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq) 204 { 205 struct hantro_ctx *ctx = priv; 206 int ret; 207 208 src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; 209 src_vq->io_modes = VB2_MMAP | VB2_DMABUF; 210 src_vq->drv_priv = ctx; 211 src_vq->ops = &hantro_queue_ops; 212 src_vq->mem_ops = &vb2_dma_contig_memops; 213 214 /* 215 * Driver does mostly sequential access, so sacrifice TLB efficiency 216 * for faster allocation. Also, no CPU access on the source queue, 217 * so no kernel mapping needed. 218 */ 219 src_vq->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES | 220 DMA_ATTR_NO_KERNEL_MAPPING; 221 src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); 222 src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; 223 src_vq->lock = &ctx->dev->vpu_mutex; 224 src_vq->dev = ctx->dev->v4l2_dev.dev; 225 src_vq->supports_requests = true; 226 227 ret = vb2_queue_init(src_vq); 228 if (ret) 229 return ret; 230 231 dst_vq->bidirectional = true; 232 dst_vq->mem_ops = &vb2_dma_contig_memops; 233 dst_vq->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES; 234 /* 235 * The Kernel needs access to the JPEG destination buffer for the 236 * JPEG encoder to fill in the JPEG headers. 237 */ 238 if (!ctx->is_encoder) 239 dst_vq->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING; 240 241 dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; 242 dst_vq->io_modes = VB2_MMAP | VB2_DMABUF; 243 dst_vq->drv_priv = ctx; 244 dst_vq->ops = &hantro_queue_ops; 245 dst_vq->buf_struct_size = sizeof(struct hantro_decoded_buffer); 246 dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; 247 dst_vq->lock = &ctx->dev->vpu_mutex; 248 dst_vq->dev = ctx->dev->v4l2_dev.dev; 249 250 return vb2_queue_init(dst_vq); 251 } 252 253 static int hantro_try_ctrl(struct v4l2_ctrl *ctrl) 254 { 255 if (ctrl->id == V4L2_CID_STATELESS_H264_SPS) { 256 const struct v4l2_ctrl_h264_sps *sps = ctrl->p_new.p_h264_sps; 257 258 if (sps->chroma_format_idc > 1) 259 /* Only 4:0:0 and 4:2:0 are supported */ 260 return -EINVAL; 261 if (sps->bit_depth_luma_minus8 != sps->bit_depth_chroma_minus8) 262 /* Luma and chroma bit depth mismatch */ 263 return -EINVAL; 264 if (sps->bit_depth_luma_minus8 != 0) 265 /* Only 8-bit is supported */ 266 return -EINVAL; 267 } else if (ctrl->id == V4L2_CID_STATELESS_HEVC_SPS) { 268 const struct v4l2_ctrl_hevc_sps *sps = ctrl->p_new.p_hevc_sps; 269 270 if (sps->bit_depth_luma_minus8 != 0 && sps->bit_depth_luma_minus8 != 2) 271 /* Only 8-bit and 10-bit are supported */ 272 return -EINVAL; 273 } else if (ctrl->id == V4L2_CID_STATELESS_VP9_FRAME) { 274 const struct v4l2_ctrl_vp9_frame *dec_params = ctrl->p_new.p_vp9_frame; 275 276 /* We only support profile 0 */ 277 if (dec_params->profile != 0) 278 return -EINVAL; 279 } else if (ctrl->id == V4L2_CID_STATELESS_AV1_SEQUENCE) { 280 const struct v4l2_ctrl_av1_sequence *sequence = ctrl->p_new.p_av1_sequence; 281 282 if (sequence->bit_depth != 8 && sequence->bit_depth != 10) 283 return -EINVAL; 284 } 285 286 return 0; 287 } 288 289 static int hantro_jpeg_s_ctrl(struct v4l2_ctrl *ctrl) 290 { 291 struct hantro_ctx *ctx; 292 293 ctx = container_of(ctrl->handler, 294 struct hantro_ctx, ctrl_handler); 295 296 vpu_debug(1, "s_ctrl: id = %d, val = %d\n", ctrl->id, ctrl->val); 297 298 switch (ctrl->id) { 299 case V4L2_CID_JPEG_COMPRESSION_QUALITY: 300 ctx->jpeg_quality = ctrl->val; 301 break; 302 default: 303 return -EINVAL; 304 } 305 306 return 0; 307 } 308 309 static int hantro_vp9_s_ctrl(struct v4l2_ctrl *ctrl) 310 { 311 struct hantro_ctx *ctx; 312 313 ctx = container_of(ctrl->handler, 314 struct hantro_ctx, ctrl_handler); 315 316 switch (ctrl->id) { 317 case V4L2_CID_STATELESS_VP9_FRAME: { 318 int bit_depth = ctrl->p_new.p_vp9_frame->bit_depth; 319 320 if (ctx->bit_depth == bit_depth) 321 return 0; 322 323 return hantro_reset_raw_fmt(ctx, bit_depth, HANTRO_AUTO_POSTPROC); 324 } 325 default: 326 return -EINVAL; 327 } 328 329 return 0; 330 } 331 332 static int hantro_hevc_s_ctrl(struct v4l2_ctrl *ctrl) 333 { 334 struct hantro_ctx *ctx; 335 336 ctx = container_of(ctrl->handler, 337 struct hantro_ctx, ctrl_handler); 338 339 switch (ctrl->id) { 340 case V4L2_CID_STATELESS_HEVC_SPS: { 341 const struct v4l2_ctrl_hevc_sps *sps = ctrl->p_new.p_hevc_sps; 342 int bit_depth = sps->bit_depth_luma_minus8 + 8; 343 344 if (ctx->bit_depth == bit_depth) 345 return 0; 346 347 return hantro_reset_raw_fmt(ctx, bit_depth, HANTRO_AUTO_POSTPROC); 348 } 349 default: 350 return -EINVAL; 351 } 352 353 return 0; 354 } 355 356 static int hantro_av1_s_ctrl(struct v4l2_ctrl *ctrl) 357 { 358 struct hantro_ctx *ctx; 359 360 ctx = container_of(ctrl->handler, 361 struct hantro_ctx, ctrl_handler); 362 363 switch (ctrl->id) { 364 case V4L2_CID_STATELESS_AV1_SEQUENCE: 365 { 366 int bit_depth = ctrl->p_new.p_av1_sequence->bit_depth; 367 bool need_postproc = HANTRO_AUTO_POSTPROC; 368 369 if (ctrl->p_new.p_av1_sequence->flags 370 & V4L2_AV1_SEQUENCE_FLAG_FILM_GRAIN_PARAMS_PRESENT) 371 need_postproc = HANTRO_FORCE_POSTPROC; 372 373 if (ctx->bit_depth == bit_depth && 374 ctx->need_postproc == need_postproc) 375 return 0; 376 377 return hantro_reset_raw_fmt(ctx, bit_depth, need_postproc); 378 } 379 default: 380 return -EINVAL; 381 } 382 383 return 0; 384 } 385 386 static const struct v4l2_ctrl_ops hantro_ctrl_ops = { 387 .try_ctrl = hantro_try_ctrl, 388 }; 389 390 static const struct v4l2_ctrl_ops hantro_jpeg_ctrl_ops = { 391 .s_ctrl = hantro_jpeg_s_ctrl, 392 }; 393 394 static const struct v4l2_ctrl_ops hantro_vp9_ctrl_ops = { 395 .s_ctrl = hantro_vp9_s_ctrl, 396 }; 397 398 static const struct v4l2_ctrl_ops hantro_hevc_ctrl_ops = { 399 .try_ctrl = hantro_try_ctrl, 400 .s_ctrl = hantro_hevc_s_ctrl, 401 }; 402 403 static const struct v4l2_ctrl_ops hantro_av1_ctrl_ops = { 404 .try_ctrl = hantro_try_ctrl, 405 .s_ctrl = hantro_av1_s_ctrl, 406 }; 407 408 #define HANTRO_JPEG_ACTIVE_MARKERS (V4L2_JPEG_ACTIVE_MARKER_APP0 | \ 409 V4L2_JPEG_ACTIVE_MARKER_COM | \ 410 V4L2_JPEG_ACTIVE_MARKER_DQT | \ 411 V4L2_JPEG_ACTIVE_MARKER_DHT) 412 413 static const struct hantro_ctrl controls[] = { 414 { 415 .codec = HANTRO_JPEG_ENCODER, 416 .cfg = { 417 .id = V4L2_CID_JPEG_COMPRESSION_QUALITY, 418 .min = 5, 419 .max = 100, 420 .step = 1, 421 .def = 50, 422 .ops = &hantro_jpeg_ctrl_ops, 423 }, 424 }, { 425 .codec = HANTRO_JPEG_ENCODER, 426 .cfg = { 427 .id = V4L2_CID_JPEG_ACTIVE_MARKER, 428 .max = HANTRO_JPEG_ACTIVE_MARKERS, 429 .def = HANTRO_JPEG_ACTIVE_MARKERS, 430 /* 431 * Changing the set of active markers/segments also 432 * messes up the alignment of the JPEG header, which 433 * is needed to allow the hardware to write directly 434 * to the output buffer. Implementing this introduces 435 * a lot of complexity for little gain, as the markers 436 * enabled is already the minimum required set. 437 */ 438 .flags = V4L2_CTRL_FLAG_READ_ONLY, 439 }, 440 }, { 441 .codec = HANTRO_MPEG2_DECODER, 442 .cfg = { 443 .id = V4L2_CID_STATELESS_MPEG2_SEQUENCE, 444 }, 445 }, { 446 .codec = HANTRO_MPEG2_DECODER, 447 .cfg = { 448 .id = V4L2_CID_STATELESS_MPEG2_PICTURE, 449 }, 450 }, { 451 .codec = HANTRO_MPEG2_DECODER, 452 .cfg = { 453 .id = V4L2_CID_STATELESS_MPEG2_QUANTISATION, 454 }, 455 }, { 456 .codec = HANTRO_VP8_DECODER, 457 .cfg = { 458 .id = V4L2_CID_STATELESS_VP8_FRAME, 459 }, 460 }, { 461 .codec = HANTRO_H264_DECODER, 462 .cfg = { 463 .id = V4L2_CID_STATELESS_H264_DECODE_PARAMS, 464 }, 465 }, { 466 .codec = HANTRO_H264_DECODER, 467 .cfg = { 468 .id = V4L2_CID_STATELESS_H264_SPS, 469 .ops = &hantro_ctrl_ops, 470 }, 471 }, { 472 .codec = HANTRO_H264_DECODER, 473 .cfg = { 474 .id = V4L2_CID_STATELESS_H264_PPS, 475 }, 476 }, { 477 .codec = HANTRO_H264_DECODER, 478 .cfg = { 479 .id = V4L2_CID_STATELESS_H264_SCALING_MATRIX, 480 }, 481 }, { 482 .codec = HANTRO_H264_DECODER, 483 .cfg = { 484 .id = V4L2_CID_STATELESS_H264_DECODE_MODE, 485 .min = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED, 486 .def = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED, 487 .max = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED, 488 }, 489 }, { 490 .codec = HANTRO_H264_DECODER, 491 .cfg = { 492 .id = V4L2_CID_STATELESS_H264_START_CODE, 493 .min = V4L2_STATELESS_H264_START_CODE_ANNEX_B, 494 .def = V4L2_STATELESS_H264_START_CODE_ANNEX_B, 495 .max = V4L2_STATELESS_H264_START_CODE_ANNEX_B, 496 }, 497 }, { 498 .codec = HANTRO_H264_DECODER, 499 .cfg = { 500 .id = V4L2_CID_MPEG_VIDEO_H264_PROFILE, 501 .min = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE, 502 .max = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH, 503 .menu_skip_mask = 504 BIT(V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED), 505 .def = V4L2_MPEG_VIDEO_H264_PROFILE_MAIN, 506 } 507 }, { 508 .codec = HANTRO_HEVC_DECODER, 509 .cfg = { 510 .id = V4L2_CID_STATELESS_HEVC_DECODE_MODE, 511 .min = V4L2_STATELESS_HEVC_DECODE_MODE_FRAME_BASED, 512 .max = V4L2_STATELESS_HEVC_DECODE_MODE_FRAME_BASED, 513 .def = V4L2_STATELESS_HEVC_DECODE_MODE_FRAME_BASED, 514 }, 515 }, { 516 .codec = HANTRO_HEVC_DECODER, 517 .cfg = { 518 .id = V4L2_CID_STATELESS_HEVC_START_CODE, 519 .min = V4L2_STATELESS_HEVC_START_CODE_ANNEX_B, 520 .max = V4L2_STATELESS_HEVC_START_CODE_ANNEX_B, 521 .def = V4L2_STATELESS_HEVC_START_CODE_ANNEX_B, 522 }, 523 }, { 524 .codec = HANTRO_HEVC_DECODER, 525 .cfg = { 526 .id = V4L2_CID_MPEG_VIDEO_HEVC_PROFILE, 527 .min = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN, 528 .max = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10, 529 .def = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN, 530 }, 531 }, { 532 .codec = HANTRO_HEVC_DECODER, 533 .cfg = { 534 .id = V4L2_CID_MPEG_VIDEO_HEVC_LEVEL, 535 .min = V4L2_MPEG_VIDEO_HEVC_LEVEL_1, 536 .max = V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1, 537 }, 538 }, { 539 .codec = HANTRO_HEVC_DECODER, 540 .cfg = { 541 .id = V4L2_CID_STATELESS_HEVC_SPS, 542 .ops = &hantro_hevc_ctrl_ops, 543 }, 544 }, { 545 .codec = HANTRO_HEVC_DECODER, 546 .cfg = { 547 .id = V4L2_CID_STATELESS_HEVC_PPS, 548 }, 549 }, { 550 .codec = HANTRO_HEVC_DECODER, 551 .cfg = { 552 .id = V4L2_CID_STATELESS_HEVC_DECODE_PARAMS, 553 }, 554 }, { 555 .codec = HANTRO_HEVC_DECODER, 556 .cfg = { 557 .id = V4L2_CID_STATELESS_HEVC_SCALING_MATRIX, 558 }, 559 }, { 560 .codec = HANTRO_VP9_DECODER, 561 .cfg = { 562 .id = V4L2_CID_STATELESS_VP9_FRAME, 563 .ops = &hantro_vp9_ctrl_ops, 564 }, 565 }, { 566 .codec = HANTRO_VP9_DECODER, 567 .cfg = { 568 .id = V4L2_CID_STATELESS_VP9_COMPRESSED_HDR, 569 }, 570 }, { 571 .codec = HANTRO_AV1_DECODER, 572 .cfg = { 573 .id = V4L2_CID_STATELESS_AV1_FRAME, 574 }, 575 }, { 576 .codec = HANTRO_AV1_DECODER, 577 .cfg = { 578 .id = V4L2_CID_STATELESS_AV1_TILE_GROUP_ENTRY, 579 .dims = { V4L2_AV1_MAX_TILE_COUNT }, 580 }, 581 }, { 582 .codec = HANTRO_AV1_DECODER, 583 .cfg = { 584 .id = V4L2_CID_STATELESS_AV1_SEQUENCE, 585 .ops = &hantro_av1_ctrl_ops, 586 }, 587 }, { 588 .codec = HANTRO_AV1_DECODER, 589 .cfg = { 590 .id = V4L2_CID_STATELESS_AV1_FILM_GRAIN, 591 }, 592 }, 593 }; 594 595 static int hantro_ctrls_setup(struct hantro_dev *vpu, 596 struct hantro_ctx *ctx, 597 int allowed_codecs) 598 { 599 int i, num_ctrls = ARRAY_SIZE(controls); 600 601 v4l2_ctrl_handler_init(&ctx->ctrl_handler, num_ctrls); 602 603 for (i = 0; i < num_ctrls; i++) { 604 if (!(allowed_codecs & controls[i].codec)) 605 continue; 606 607 v4l2_ctrl_new_custom(&ctx->ctrl_handler, 608 &controls[i].cfg, NULL); 609 if (ctx->ctrl_handler.error) { 610 vpu_err("Adding control (%d) failed %d\n", 611 controls[i].cfg.id, 612 ctx->ctrl_handler.error); 613 v4l2_ctrl_handler_free(&ctx->ctrl_handler); 614 return ctx->ctrl_handler.error; 615 } 616 } 617 return v4l2_ctrl_handler_setup(&ctx->ctrl_handler); 618 } 619 620 /* 621 * V4L2 file operations. 622 */ 623 624 static int hantro_open(struct file *filp) 625 { 626 struct hantro_dev *vpu = video_drvdata(filp); 627 struct video_device *vdev = video_devdata(filp); 628 struct hantro_func *func = hantro_vdev_to_func(vdev); 629 struct hantro_ctx *ctx; 630 int allowed_codecs, ret; 631 632 /* 633 * We do not need any extra locking here, because we operate only 634 * on local data here, except reading few fields from dev, which 635 * do not change through device's lifetime (which is guaranteed by 636 * reference on module from open()) and V4L2 internal objects (such 637 * as vdev and ctx->fh), which have proper locking done in respective 638 * helper functions used here. 639 */ 640 641 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 642 if (!ctx) 643 return -ENOMEM; 644 645 ctx->dev = vpu; 646 if (func->id == MEDIA_ENT_F_PROC_VIDEO_ENCODER) { 647 allowed_codecs = vpu->variant->codec & HANTRO_ENCODERS; 648 ctx->is_encoder = true; 649 } else if (func->id == MEDIA_ENT_F_PROC_VIDEO_DECODER) { 650 allowed_codecs = vpu->variant->codec & HANTRO_DECODERS; 651 ctx->is_encoder = false; 652 } else { 653 ret = -ENODEV; 654 goto err_ctx_free; 655 } 656 657 ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(vpu->m2m_dev, ctx, queue_init); 658 if (IS_ERR(ctx->fh.m2m_ctx)) { 659 ret = PTR_ERR(ctx->fh.m2m_ctx); 660 goto err_ctx_free; 661 } 662 663 v4l2_fh_init(&ctx->fh, vdev); 664 filp->private_data = &ctx->fh; 665 v4l2_fh_add(&ctx->fh); 666 667 hantro_reset_fmts(ctx); 668 669 ret = hantro_ctrls_setup(vpu, ctx, allowed_codecs); 670 if (ret) { 671 vpu_err("Failed to set up controls\n"); 672 goto err_fh_free; 673 } 674 ctx->fh.ctrl_handler = &ctx->ctrl_handler; 675 676 return 0; 677 678 err_fh_free: 679 v4l2_fh_del(&ctx->fh); 680 v4l2_fh_exit(&ctx->fh); 681 err_ctx_free: 682 kfree(ctx); 683 return ret; 684 } 685 686 static int hantro_release(struct file *filp) 687 { 688 struct hantro_ctx *ctx = 689 container_of(filp->private_data, struct hantro_ctx, fh); 690 691 /* 692 * No need for extra locking because this was the last reference 693 * to this file. 694 */ 695 v4l2_m2m_ctx_release(ctx->fh.m2m_ctx); 696 v4l2_fh_del(&ctx->fh); 697 v4l2_fh_exit(&ctx->fh); 698 v4l2_ctrl_handler_free(&ctx->ctrl_handler); 699 kfree(ctx); 700 701 return 0; 702 } 703 704 static const struct v4l2_file_operations hantro_fops = { 705 .owner = THIS_MODULE, 706 .open = hantro_open, 707 .release = hantro_release, 708 .poll = v4l2_m2m_fop_poll, 709 .unlocked_ioctl = video_ioctl2, 710 .mmap = v4l2_m2m_fop_mmap, 711 }; 712 713 static const struct of_device_id of_hantro_match[] = { 714 #ifdef CONFIG_VIDEO_HANTRO_ROCKCHIP 715 { .compatible = "rockchip,px30-vpu", .data = &px30_vpu_variant, }, 716 { .compatible = "rockchip,rk3036-vpu", .data = &rk3036_vpu_variant, }, 717 { .compatible = "rockchip,rk3066-vpu", .data = &rk3066_vpu_variant, }, 718 { .compatible = "rockchip,rk3288-vpu", .data = &rk3288_vpu_variant, }, 719 { .compatible = "rockchip,rk3328-vpu", .data = &rk3328_vpu_variant, }, 720 { .compatible = "rockchip,rk3399-vpu", .data = &rk3399_vpu_variant, }, 721 { .compatible = "rockchip,rk3568-vepu", .data = &rk3568_vepu_variant, }, 722 { .compatible = "rockchip,rk3568-vpu", .data = &rk3568_vpu_variant, }, 723 { .compatible = "rockchip,rk3588-av1-vpu", .data = &rk3588_vpu981_variant, }, 724 #endif 725 #ifdef CONFIG_VIDEO_HANTRO_IMX8M 726 { .compatible = "nxp,imx8mm-vpu-g1", .data = &imx8mm_vpu_g1_variant, }, 727 { .compatible = "nxp,imx8mq-vpu", .data = &imx8mq_vpu_variant, }, 728 { .compatible = "nxp,imx8mq-vpu-g1", .data = &imx8mq_vpu_g1_variant }, 729 { .compatible = "nxp,imx8mq-vpu-g2", .data = &imx8mq_vpu_g2_variant }, 730 #endif 731 #ifdef CONFIG_VIDEO_HANTRO_SAMA5D4 732 { .compatible = "microchip,sama5d4-vdec", .data = &sama5d4_vdec_variant, }, 733 #endif 734 #ifdef CONFIG_VIDEO_HANTRO_SUNXI 735 { .compatible = "allwinner,sun50i-h6-vpu-g2", .data = &sunxi_vpu_variant, }, 736 #endif 737 { /* sentinel */ } 738 }; 739 MODULE_DEVICE_TABLE(of, of_hantro_match); 740 741 static int hantro_register_entity(struct media_device *mdev, 742 struct media_entity *entity, 743 const char *entity_name, 744 struct media_pad *pads, int num_pads, 745 int function, struct video_device *vdev) 746 { 747 char *name; 748 int ret; 749 750 entity->obj_type = MEDIA_ENTITY_TYPE_BASE; 751 if (function == MEDIA_ENT_F_IO_V4L) { 752 entity->info.dev.major = VIDEO_MAJOR; 753 entity->info.dev.minor = vdev->minor; 754 } 755 756 name = devm_kasprintf(mdev->dev, GFP_KERNEL, "%s-%s", vdev->name, 757 entity_name); 758 if (!name) 759 return -ENOMEM; 760 761 entity->name = name; 762 entity->function = function; 763 764 ret = media_entity_pads_init(entity, num_pads, pads); 765 if (ret) 766 return ret; 767 768 ret = media_device_register_entity(mdev, entity); 769 if (ret) 770 return ret; 771 772 return 0; 773 } 774 775 static int hantro_attach_func(struct hantro_dev *vpu, 776 struct hantro_func *func) 777 { 778 struct media_device *mdev = &vpu->mdev; 779 struct media_link *link; 780 int ret; 781 782 /* Create the three encoder entities with their pads */ 783 func->source_pad.flags = MEDIA_PAD_FL_SOURCE; 784 ret = hantro_register_entity(mdev, &func->vdev.entity, "source", 785 &func->source_pad, 1, MEDIA_ENT_F_IO_V4L, 786 &func->vdev); 787 if (ret) 788 return ret; 789 790 func->proc_pads[0].flags = MEDIA_PAD_FL_SINK; 791 func->proc_pads[1].flags = MEDIA_PAD_FL_SOURCE; 792 ret = hantro_register_entity(mdev, &func->proc, "proc", 793 func->proc_pads, 2, func->id, 794 &func->vdev); 795 if (ret) 796 goto err_rel_entity0; 797 798 func->sink_pad.flags = MEDIA_PAD_FL_SINK; 799 ret = hantro_register_entity(mdev, &func->sink, "sink", 800 &func->sink_pad, 1, MEDIA_ENT_F_IO_V4L, 801 &func->vdev); 802 if (ret) 803 goto err_rel_entity1; 804 805 /* Connect the three entities */ 806 ret = media_create_pad_link(&func->vdev.entity, 0, &func->proc, 0, 807 MEDIA_LNK_FL_IMMUTABLE | 808 MEDIA_LNK_FL_ENABLED); 809 if (ret) 810 goto err_rel_entity2; 811 812 ret = media_create_pad_link(&func->proc, 1, &func->sink, 0, 813 MEDIA_LNK_FL_IMMUTABLE | 814 MEDIA_LNK_FL_ENABLED); 815 if (ret) 816 goto err_rm_links0; 817 818 /* Create video interface */ 819 func->intf_devnode = media_devnode_create(mdev, MEDIA_INTF_T_V4L_VIDEO, 820 0, VIDEO_MAJOR, 821 func->vdev.minor); 822 if (!func->intf_devnode) { 823 ret = -ENOMEM; 824 goto err_rm_links1; 825 } 826 827 /* Connect the two DMA engines to the interface */ 828 link = media_create_intf_link(&func->vdev.entity, 829 &func->intf_devnode->intf, 830 MEDIA_LNK_FL_IMMUTABLE | 831 MEDIA_LNK_FL_ENABLED); 832 if (!link) { 833 ret = -ENOMEM; 834 goto err_rm_devnode; 835 } 836 837 link = media_create_intf_link(&func->sink, &func->intf_devnode->intf, 838 MEDIA_LNK_FL_IMMUTABLE | 839 MEDIA_LNK_FL_ENABLED); 840 if (!link) { 841 ret = -ENOMEM; 842 goto err_rm_devnode; 843 } 844 return 0; 845 846 err_rm_devnode: 847 media_devnode_remove(func->intf_devnode); 848 849 err_rm_links1: 850 media_entity_remove_links(&func->sink); 851 852 err_rm_links0: 853 media_entity_remove_links(&func->proc); 854 media_entity_remove_links(&func->vdev.entity); 855 856 err_rel_entity2: 857 media_device_unregister_entity(&func->sink); 858 859 err_rel_entity1: 860 media_device_unregister_entity(&func->proc); 861 862 err_rel_entity0: 863 media_device_unregister_entity(&func->vdev.entity); 864 return ret; 865 } 866 867 static void hantro_detach_func(struct hantro_func *func) 868 { 869 media_devnode_remove(func->intf_devnode); 870 media_entity_remove_links(&func->sink); 871 media_entity_remove_links(&func->proc); 872 media_entity_remove_links(&func->vdev.entity); 873 media_device_unregister_entity(&func->sink); 874 media_device_unregister_entity(&func->proc); 875 media_device_unregister_entity(&func->vdev.entity); 876 } 877 878 static int hantro_add_func(struct hantro_dev *vpu, unsigned int funcid) 879 { 880 const struct of_device_id *match; 881 struct hantro_func *func; 882 struct video_device *vfd; 883 int ret; 884 885 match = of_match_node(of_hantro_match, vpu->dev->of_node); 886 func = devm_kzalloc(vpu->dev, sizeof(*func), GFP_KERNEL); 887 if (!func) { 888 v4l2_err(&vpu->v4l2_dev, "Failed to allocate video device\n"); 889 return -ENOMEM; 890 } 891 892 func->id = funcid; 893 894 vfd = &func->vdev; 895 vfd->fops = &hantro_fops; 896 vfd->release = video_device_release_empty; 897 vfd->lock = &vpu->vpu_mutex; 898 vfd->v4l2_dev = &vpu->v4l2_dev; 899 vfd->vfl_dir = VFL_DIR_M2M; 900 vfd->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE; 901 vfd->ioctl_ops = &hantro_ioctl_ops; 902 snprintf(vfd->name, sizeof(vfd->name), "%s-%s", match->compatible, 903 funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER ? "enc" : "dec"); 904 905 if (funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER) { 906 vpu->encoder = func; 907 v4l2_disable_ioctl(vfd, VIDIOC_TRY_DECODER_CMD); 908 v4l2_disable_ioctl(vfd, VIDIOC_DECODER_CMD); 909 } else { 910 vpu->decoder = func; 911 v4l2_disable_ioctl(vfd, VIDIOC_TRY_ENCODER_CMD); 912 v4l2_disable_ioctl(vfd, VIDIOC_ENCODER_CMD); 913 } 914 915 video_set_drvdata(vfd, vpu); 916 917 ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1); 918 if (ret) { 919 v4l2_err(&vpu->v4l2_dev, "Failed to register video device\n"); 920 return ret; 921 } 922 923 ret = hantro_attach_func(vpu, func); 924 if (ret) { 925 v4l2_err(&vpu->v4l2_dev, 926 "Failed to attach functionality to the media device\n"); 927 goto err_unreg_dev; 928 } 929 930 v4l2_info(&vpu->v4l2_dev, "registered %s as /dev/video%d\n", vfd->name, 931 vfd->num); 932 933 return 0; 934 935 err_unreg_dev: 936 video_unregister_device(vfd); 937 return ret; 938 } 939 940 static int hantro_add_enc_func(struct hantro_dev *vpu) 941 { 942 if (!vpu->variant->enc_fmts) 943 return 0; 944 945 return hantro_add_func(vpu, MEDIA_ENT_F_PROC_VIDEO_ENCODER); 946 } 947 948 static int hantro_add_dec_func(struct hantro_dev *vpu) 949 { 950 if (!vpu->variant->dec_fmts) 951 return 0; 952 953 return hantro_add_func(vpu, MEDIA_ENT_F_PROC_VIDEO_DECODER); 954 } 955 956 static void hantro_remove_func(struct hantro_dev *vpu, 957 unsigned int funcid) 958 { 959 struct hantro_func *func; 960 961 if (funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER) 962 func = vpu->encoder; 963 else 964 func = vpu->decoder; 965 966 if (!func) 967 return; 968 969 hantro_detach_func(func); 970 video_unregister_device(&func->vdev); 971 } 972 973 static void hantro_remove_enc_func(struct hantro_dev *vpu) 974 { 975 hantro_remove_func(vpu, MEDIA_ENT_F_PROC_VIDEO_ENCODER); 976 } 977 978 static void hantro_remove_dec_func(struct hantro_dev *vpu) 979 { 980 hantro_remove_func(vpu, MEDIA_ENT_F_PROC_VIDEO_DECODER); 981 } 982 983 static const struct media_device_ops hantro_m2m_media_ops = { 984 .req_validate = vb2_request_validate, 985 .req_queue = v4l2_m2m_request_queue, 986 }; 987 988 static int hantro_probe(struct platform_device *pdev) 989 { 990 const struct of_device_id *match; 991 struct hantro_dev *vpu; 992 int num_bases; 993 int i, ret; 994 995 vpu = devm_kzalloc(&pdev->dev, sizeof(*vpu), GFP_KERNEL); 996 if (!vpu) 997 return -ENOMEM; 998 999 vpu->dev = &pdev->dev; 1000 vpu->pdev = pdev; 1001 mutex_init(&vpu->vpu_mutex); 1002 spin_lock_init(&vpu->irqlock); 1003 1004 match = of_match_node(of_hantro_match, pdev->dev.of_node); 1005 vpu->variant = match->data; 1006 1007 /* 1008 * Support for nxp,imx8mq-vpu is kept for backwards compatibility 1009 * but it's deprecated. Please update your DTS file to use 1010 * nxp,imx8mq-vpu-g1 or nxp,imx8mq-vpu-g2 instead. 1011 */ 1012 if (of_device_is_compatible(pdev->dev.of_node, "nxp,imx8mq-vpu")) 1013 dev_warn(&pdev->dev, "%s compatible is deprecated\n", 1014 match->compatible); 1015 1016 INIT_DELAYED_WORK(&vpu->watchdog_work, hantro_watchdog); 1017 1018 vpu->clocks = devm_kcalloc(&pdev->dev, vpu->variant->num_clocks, 1019 sizeof(*vpu->clocks), GFP_KERNEL); 1020 if (!vpu->clocks) 1021 return -ENOMEM; 1022 1023 if (vpu->variant->num_clocks > 1) { 1024 for (i = 0; i < vpu->variant->num_clocks; i++) 1025 vpu->clocks[i].id = vpu->variant->clk_names[i]; 1026 1027 ret = devm_clk_bulk_get(&pdev->dev, vpu->variant->num_clocks, 1028 vpu->clocks); 1029 if (ret) 1030 return ret; 1031 } else { 1032 /* 1033 * If the driver has a single clk, chances are there will be no 1034 * actual name in the DT bindings. 1035 */ 1036 vpu->clocks[0].clk = devm_clk_get(&pdev->dev, NULL); 1037 if (IS_ERR(vpu->clocks[0].clk)) 1038 return PTR_ERR(vpu->clocks[0].clk); 1039 } 1040 1041 vpu->resets = devm_reset_control_array_get_optional_exclusive(&pdev->dev); 1042 if (IS_ERR(vpu->resets)) 1043 return PTR_ERR(vpu->resets); 1044 1045 num_bases = vpu->variant->num_regs ?: 1; 1046 vpu->reg_bases = devm_kcalloc(&pdev->dev, num_bases, 1047 sizeof(*vpu->reg_bases), GFP_KERNEL); 1048 if (!vpu->reg_bases) 1049 return -ENOMEM; 1050 1051 for (i = 0; i < num_bases; i++) { 1052 vpu->reg_bases[i] = vpu->variant->reg_names ? 1053 devm_platform_ioremap_resource_byname(pdev, vpu->variant->reg_names[i]) : 1054 devm_platform_ioremap_resource(pdev, 0); 1055 if (IS_ERR(vpu->reg_bases[i])) 1056 return PTR_ERR(vpu->reg_bases[i]); 1057 } 1058 vpu->enc_base = vpu->reg_bases[0] + vpu->variant->enc_offset; 1059 vpu->dec_base = vpu->reg_bases[0] + vpu->variant->dec_offset; 1060 1061 /** 1062 * TODO: Eventually allow taking advantage of full 64-bit address space. 1063 * Until then we assume the MSB portion of buffers' base addresses is 1064 * always 0 due to this masking operation. 1065 */ 1066 ret = dma_set_coherent_mask(vpu->dev, DMA_BIT_MASK(32)); 1067 if (ret) { 1068 dev_err(vpu->dev, "Could not set DMA coherent mask.\n"); 1069 return ret; 1070 } 1071 vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32)); 1072 1073 for (i = 0; i < vpu->variant->num_irqs; i++) { 1074 const char *irq_name; 1075 int irq; 1076 1077 if (!vpu->variant->irqs[i].handler) 1078 continue; 1079 1080 if (vpu->variant->num_irqs > 1) { 1081 irq_name = vpu->variant->irqs[i].name; 1082 irq = platform_get_irq_byname(vpu->pdev, irq_name); 1083 } else { 1084 /* 1085 * If the driver has a single IRQ, chances are there 1086 * will be no actual name in the DT bindings. 1087 */ 1088 irq_name = "default"; 1089 irq = platform_get_irq(vpu->pdev, 0); 1090 } 1091 if (irq < 0) 1092 return irq; 1093 1094 ret = devm_request_irq(vpu->dev, irq, 1095 vpu->variant->irqs[i].handler, 0, 1096 dev_name(vpu->dev), vpu); 1097 if (ret) { 1098 dev_err(vpu->dev, "Could not request %s IRQ.\n", 1099 irq_name); 1100 return ret; 1101 } 1102 } 1103 1104 if (vpu->variant->init) { 1105 ret = vpu->variant->init(vpu); 1106 if (ret) { 1107 dev_err(&pdev->dev, "Failed to init VPU hardware\n"); 1108 return ret; 1109 } 1110 } 1111 1112 pm_runtime_set_autosuspend_delay(vpu->dev, 100); 1113 pm_runtime_use_autosuspend(vpu->dev); 1114 pm_runtime_enable(vpu->dev); 1115 1116 ret = reset_control_deassert(vpu->resets); 1117 if (ret) { 1118 dev_err(&pdev->dev, "Failed to deassert resets\n"); 1119 goto err_pm_disable; 1120 } 1121 1122 ret = clk_bulk_prepare(vpu->variant->num_clocks, vpu->clocks); 1123 if (ret) { 1124 dev_err(&pdev->dev, "Failed to prepare clocks\n"); 1125 goto err_rst_assert; 1126 } 1127 1128 ret = v4l2_device_register(&pdev->dev, &vpu->v4l2_dev); 1129 if (ret) { 1130 dev_err(&pdev->dev, "Failed to register v4l2 device\n"); 1131 goto err_clk_unprepare; 1132 } 1133 platform_set_drvdata(pdev, vpu); 1134 1135 vpu->m2m_dev = v4l2_m2m_init(&vpu_m2m_ops); 1136 if (IS_ERR(vpu->m2m_dev)) { 1137 v4l2_err(&vpu->v4l2_dev, "Failed to init mem2mem device\n"); 1138 ret = PTR_ERR(vpu->m2m_dev); 1139 goto err_v4l2_unreg; 1140 } 1141 1142 vpu->mdev.dev = vpu->dev; 1143 strscpy(vpu->mdev.model, DRIVER_NAME, sizeof(vpu->mdev.model)); 1144 media_device_init(&vpu->mdev); 1145 vpu->mdev.ops = &hantro_m2m_media_ops; 1146 vpu->v4l2_dev.mdev = &vpu->mdev; 1147 1148 ret = hantro_add_enc_func(vpu); 1149 if (ret) { 1150 dev_err(&pdev->dev, "Failed to register encoder\n"); 1151 goto err_m2m_rel; 1152 } 1153 1154 ret = hantro_add_dec_func(vpu); 1155 if (ret) { 1156 dev_err(&pdev->dev, "Failed to register decoder\n"); 1157 goto err_rm_enc_func; 1158 } 1159 1160 ret = media_device_register(&vpu->mdev); 1161 if (ret) { 1162 v4l2_err(&vpu->v4l2_dev, "Failed to register mem2mem media device\n"); 1163 goto err_rm_dec_func; 1164 } 1165 1166 return 0; 1167 1168 err_rm_dec_func: 1169 hantro_remove_dec_func(vpu); 1170 err_rm_enc_func: 1171 hantro_remove_enc_func(vpu); 1172 err_m2m_rel: 1173 media_device_cleanup(&vpu->mdev); 1174 v4l2_m2m_release(vpu->m2m_dev); 1175 err_v4l2_unreg: 1176 v4l2_device_unregister(&vpu->v4l2_dev); 1177 err_clk_unprepare: 1178 clk_bulk_unprepare(vpu->variant->num_clocks, vpu->clocks); 1179 err_rst_assert: 1180 reset_control_assert(vpu->resets); 1181 err_pm_disable: 1182 pm_runtime_dont_use_autosuspend(vpu->dev); 1183 pm_runtime_disable(vpu->dev); 1184 return ret; 1185 } 1186 1187 static void hantro_remove(struct platform_device *pdev) 1188 { 1189 struct hantro_dev *vpu = platform_get_drvdata(pdev); 1190 1191 v4l2_info(&vpu->v4l2_dev, "Removing %s\n", pdev->name); 1192 1193 media_device_unregister(&vpu->mdev); 1194 hantro_remove_dec_func(vpu); 1195 hantro_remove_enc_func(vpu); 1196 media_device_cleanup(&vpu->mdev); 1197 v4l2_m2m_release(vpu->m2m_dev); 1198 v4l2_device_unregister(&vpu->v4l2_dev); 1199 clk_bulk_unprepare(vpu->variant->num_clocks, vpu->clocks); 1200 reset_control_assert(vpu->resets); 1201 pm_runtime_dont_use_autosuspend(vpu->dev); 1202 pm_runtime_disable(vpu->dev); 1203 } 1204 1205 #ifdef CONFIG_PM 1206 static int hantro_runtime_resume(struct device *dev) 1207 { 1208 struct hantro_dev *vpu = dev_get_drvdata(dev); 1209 1210 if (vpu->variant->runtime_resume) 1211 return vpu->variant->runtime_resume(vpu); 1212 1213 return 0; 1214 } 1215 #endif 1216 1217 static const struct dev_pm_ops hantro_pm_ops = { 1218 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 1219 pm_runtime_force_resume) 1220 SET_RUNTIME_PM_OPS(NULL, hantro_runtime_resume, NULL) 1221 }; 1222 1223 static struct platform_driver hantro_driver = { 1224 .probe = hantro_probe, 1225 .remove_new = hantro_remove, 1226 .driver = { 1227 .name = DRIVER_NAME, 1228 .of_match_table = of_hantro_match, 1229 .pm = &hantro_pm_ops, 1230 }, 1231 }; 1232 module_platform_driver(hantro_driver); 1233 1234 MODULE_LICENSE("GPL v2"); 1235 MODULE_AUTHOR("Alpha Lin <Alpha.Lin@Rock-Chips.com>"); 1236 MODULE_AUTHOR("Tomasz Figa <tfiga@chromium.org>"); 1237 MODULE_AUTHOR("Ezequiel Garcia <ezequiel@collabora.com>"); 1238 MODULE_DESCRIPTION("Hantro VPU codec driver"); 1239