1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Rockchip Video Decoder driver 4 * 5 * Copyright (C) 2019 Collabora, Ltd. 6 * 7 * Based on rkvdec driver by Google LLC. (Tomasz Figa <tfiga@chromium.org>) 8 * Based on s5p-mfc driver by Samsung Electronics Co., Ltd. 9 * Copyright (C) 2011 Samsung Electronics Co., Ltd. 10 */ 11 12 #include <linux/clk.h> 13 #include <linux/interrupt.h> 14 #include <linux/module.h> 15 #include <linux/of.h> 16 #include <linux/platform_device.h> 17 #include <linux/pm.h> 18 #include <linux/pm_runtime.h> 19 #include <linux/slab.h> 20 #include <linux/videodev2.h> 21 #include <linux/workqueue.h> 22 #include <media/v4l2-event.h> 23 #include <media/v4l2-mem2mem.h> 24 #include <media/videobuf2-core.h> 25 #include <media/videobuf2-vmalloc.h> 26 27 #include "rkvdec.h" 28 #include "rkvdec-regs.h" 29 30 static int rkvdec_try_ctrl(struct v4l2_ctrl *ctrl) 31 { 32 if (ctrl->id == V4L2_CID_STATELESS_H264_SPS) { 33 const struct v4l2_ctrl_h264_sps *sps = ctrl->p_new.p_h264_sps; 34 /* 35 * TODO: The hardware supports 10-bit and 4:2:2 profiles, 36 * but it's currently broken in the driver. 37 * Reject them for now, until it's fixed. 38 */ 39 if (sps->chroma_format_idc > 1) 40 /* Only 4:0:0 and 4:2:0 are supported */ 41 return -EINVAL; 42 if (sps->bit_depth_luma_minus8 != sps->bit_depth_chroma_minus8) 43 /* Luma and chroma bit depth mismatch */ 44 return -EINVAL; 45 if (sps->bit_depth_luma_minus8 != 0) 46 /* Only 8-bit is supported */ 47 return -EINVAL; 48 } 49 return 0; 50 } 51 52 static const struct v4l2_ctrl_ops rkvdec_ctrl_ops = { 53 .try_ctrl = rkvdec_try_ctrl, 54 }; 55 56 static const struct rkvdec_ctrl_desc rkvdec_h264_ctrl_descs[] = { 57 { 58 .mandatory = true, 59 .cfg.id = V4L2_CID_STATELESS_H264_DECODE_PARAMS, 60 }, 61 { 62 .mandatory = true, 63 .cfg.id = V4L2_CID_STATELESS_H264_SPS, 64 .cfg.ops = &rkvdec_ctrl_ops, 65 }, 66 { 67 .mandatory = true, 68 .cfg.id = V4L2_CID_STATELESS_H264_PPS, 69 }, 70 { 71 .cfg.id = V4L2_CID_STATELESS_H264_SCALING_MATRIX, 72 }, 73 { 74 .cfg.id = V4L2_CID_STATELESS_H264_DECODE_MODE, 75 .cfg.min = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED, 76 .cfg.max = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED, 77 .cfg.def = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED, 78 }, 79 { 80 .cfg.id = V4L2_CID_STATELESS_H264_START_CODE, 81 .cfg.min = V4L2_STATELESS_H264_START_CODE_ANNEX_B, 82 .cfg.def = V4L2_STATELESS_H264_START_CODE_ANNEX_B, 83 .cfg.max = V4L2_STATELESS_H264_START_CODE_ANNEX_B, 84 }, 85 { 86 .cfg.id = V4L2_CID_MPEG_VIDEO_H264_PROFILE, 87 .cfg.min = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE, 88 .cfg.max = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH, 89 .cfg.menu_skip_mask = 90 BIT(V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED), 91 .cfg.def = V4L2_MPEG_VIDEO_H264_PROFILE_MAIN, 92 }, 93 { 94 .cfg.id = V4L2_CID_MPEG_VIDEO_H264_LEVEL, 95 .cfg.min = V4L2_MPEG_VIDEO_H264_LEVEL_1_0, 96 .cfg.max = V4L2_MPEG_VIDEO_H264_LEVEL_5_1, 97 }, 98 }; 99 100 static const struct rkvdec_ctrls rkvdec_h264_ctrls = { 101 .ctrls = rkvdec_h264_ctrl_descs, 102 .num_ctrls = ARRAY_SIZE(rkvdec_h264_ctrl_descs), 103 }; 104 105 static const u32 rkvdec_h264_decoded_fmts[] = { 106 V4L2_PIX_FMT_NV12, 107 }; 108 109 static const struct rkvdec_coded_fmt_desc rkvdec_coded_fmts[] = { 110 { 111 .fourcc = V4L2_PIX_FMT_H264_SLICE, 112 .frmsize = { 113 .min_width = 48, 114 .max_width = 4096, 115 .step_width = 16, 116 .min_height = 48, 117 .max_height = 2304, 118 .step_height = 16, 119 }, 120 .ctrls = &rkvdec_h264_ctrls, 121 .ops = &rkvdec_h264_fmt_ops, 122 .num_decoded_fmts = ARRAY_SIZE(rkvdec_h264_decoded_fmts), 123 .decoded_fmts = rkvdec_h264_decoded_fmts, 124 } 125 }; 126 127 static const struct rkvdec_coded_fmt_desc * 128 rkvdec_find_coded_fmt_desc(u32 fourcc) 129 { 130 unsigned int i; 131 132 for (i = 0; i < ARRAY_SIZE(rkvdec_coded_fmts); i++) { 133 if (rkvdec_coded_fmts[i].fourcc == fourcc) 134 return &rkvdec_coded_fmts[i]; 135 } 136 137 return NULL; 138 } 139 140 static void rkvdec_reset_fmt(struct rkvdec_ctx *ctx, struct v4l2_format *f, 141 u32 fourcc) 142 { 143 memset(f, 0, sizeof(*f)); 144 f->fmt.pix_mp.pixelformat = fourcc; 145 f->fmt.pix_mp.field = V4L2_FIELD_NONE; 146 f->fmt.pix_mp.colorspace = V4L2_COLORSPACE_REC709; 147 f->fmt.pix_mp.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; 148 f->fmt.pix_mp.quantization = V4L2_QUANTIZATION_DEFAULT; 149 f->fmt.pix_mp.xfer_func = V4L2_XFER_FUNC_DEFAULT; 150 } 151 152 static void rkvdec_reset_coded_fmt(struct rkvdec_ctx *ctx) 153 { 154 struct v4l2_format *f = &ctx->coded_fmt; 155 156 ctx->coded_fmt_desc = &rkvdec_coded_fmts[0]; 157 rkvdec_reset_fmt(ctx, f, ctx->coded_fmt_desc->fourcc); 158 159 f->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; 160 f->fmt.pix_mp.width = ctx->coded_fmt_desc->frmsize.min_width; 161 f->fmt.pix_mp.height = ctx->coded_fmt_desc->frmsize.min_height; 162 163 if (ctx->coded_fmt_desc->ops->adjust_fmt) 164 ctx->coded_fmt_desc->ops->adjust_fmt(ctx, f); 165 } 166 167 static void rkvdec_reset_decoded_fmt(struct rkvdec_ctx *ctx) 168 { 169 struct v4l2_format *f = &ctx->decoded_fmt; 170 171 rkvdec_reset_fmt(ctx, f, ctx->coded_fmt_desc->decoded_fmts[0]); 172 f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; 173 v4l2_fill_pixfmt_mp(&f->fmt.pix_mp, 174 ctx->coded_fmt_desc->decoded_fmts[0], 175 ctx->coded_fmt.fmt.pix_mp.width, 176 ctx->coded_fmt.fmt.pix_mp.height); 177 f->fmt.pix_mp.plane_fmt[0].sizeimage += 128 * 178 DIV_ROUND_UP(f->fmt.pix_mp.width, 16) * 179 DIV_ROUND_UP(f->fmt.pix_mp.height, 16); 180 } 181 182 static int rkvdec_enum_framesizes(struct file *file, void *priv, 183 struct v4l2_frmsizeenum *fsize) 184 { 185 const struct rkvdec_coded_fmt_desc *fmt; 186 187 if (fsize->index != 0) 188 return -EINVAL; 189 190 fmt = rkvdec_find_coded_fmt_desc(fsize->pixel_format); 191 if (!fmt) 192 return -EINVAL; 193 194 fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; 195 fsize->stepwise = fmt->frmsize; 196 return 0; 197 } 198 199 static int rkvdec_querycap(struct file *file, void *priv, 200 struct v4l2_capability *cap) 201 { 202 struct rkvdec_dev *rkvdec = video_drvdata(file); 203 struct video_device *vdev = video_devdata(file); 204 205 strscpy(cap->driver, rkvdec->dev->driver->name, 206 sizeof(cap->driver)); 207 strscpy(cap->card, vdev->name, sizeof(cap->card)); 208 snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s", 209 rkvdec->dev->driver->name); 210 return 0; 211 } 212 213 static int rkvdec_try_capture_fmt(struct file *file, void *priv, 214 struct v4l2_format *f) 215 { 216 struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp; 217 struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv); 218 const struct rkvdec_coded_fmt_desc *coded_desc; 219 unsigned int i; 220 221 /* 222 * The codec context should point to a coded format desc, if the format 223 * on the coded end has not been set yet, it should point to the 224 * default value. 225 */ 226 coded_desc = ctx->coded_fmt_desc; 227 if (WARN_ON(!coded_desc)) 228 return -EINVAL; 229 230 for (i = 0; i < coded_desc->num_decoded_fmts; i++) { 231 if (coded_desc->decoded_fmts[i] == pix_mp->pixelformat) 232 break; 233 } 234 235 if (i == coded_desc->num_decoded_fmts) 236 pix_mp->pixelformat = coded_desc->decoded_fmts[0]; 237 238 /* Always apply the frmsize constraint of the coded end. */ 239 v4l2_apply_frmsize_constraints(&pix_mp->width, 240 &pix_mp->height, 241 &coded_desc->frmsize); 242 243 v4l2_fill_pixfmt_mp(pix_mp, pix_mp->pixelformat, 244 pix_mp->width, pix_mp->height); 245 pix_mp->plane_fmt[0].sizeimage += 246 128 * 247 DIV_ROUND_UP(pix_mp->width, 16) * 248 DIV_ROUND_UP(pix_mp->height, 16); 249 pix_mp->field = V4L2_FIELD_NONE; 250 251 return 0; 252 } 253 254 static int rkvdec_try_output_fmt(struct file *file, void *priv, 255 struct v4l2_format *f) 256 { 257 struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp; 258 struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv); 259 const struct rkvdec_coded_fmt_desc *desc; 260 261 desc = rkvdec_find_coded_fmt_desc(pix_mp->pixelformat); 262 if (!desc) { 263 pix_mp->pixelformat = rkvdec_coded_fmts[0].fourcc; 264 desc = &rkvdec_coded_fmts[0]; 265 } 266 267 v4l2_apply_frmsize_constraints(&pix_mp->width, 268 &pix_mp->height, 269 &desc->frmsize); 270 271 pix_mp->field = V4L2_FIELD_NONE; 272 /* All coded formats are considered single planar for now. */ 273 pix_mp->num_planes = 1; 274 275 if (desc->ops->adjust_fmt) { 276 int ret; 277 278 ret = desc->ops->adjust_fmt(ctx, f); 279 if (ret) 280 return ret; 281 } 282 283 return 0; 284 } 285 286 static int rkvdec_s_fmt(struct file *file, void *priv, 287 struct v4l2_format *f, 288 int (*try_fmt)(struct file *, void *, 289 struct v4l2_format *)) 290 { 291 struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv); 292 struct vb2_queue *vq; 293 294 if (!try_fmt) 295 return -EINVAL; 296 297 vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type); 298 if (vb2_is_busy(vq)) 299 return -EBUSY; 300 301 return try_fmt(file, priv, f); 302 } 303 304 static int rkvdec_s_capture_fmt(struct file *file, void *priv, 305 struct v4l2_format *f) 306 { 307 struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv); 308 int ret; 309 310 ret = rkvdec_s_fmt(file, priv, f, rkvdec_try_capture_fmt); 311 if (ret) 312 return ret; 313 314 ctx->decoded_fmt = *f; 315 return 0; 316 } 317 318 static int rkvdec_s_output_fmt(struct file *file, void *priv, 319 struct v4l2_format *f) 320 { 321 struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv); 322 struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx; 323 const struct rkvdec_coded_fmt_desc *desc; 324 struct v4l2_format *cap_fmt; 325 struct vb2_queue *peer_vq; 326 int ret; 327 328 /* 329 * Since format change on the OUTPUT queue will reset the CAPTURE 330 * queue, we can't allow doing so when the CAPTURE queue has buffers 331 * allocated. 332 */ 333 peer_vq = v4l2_m2m_get_vq(m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE); 334 if (vb2_is_busy(peer_vq)) 335 return -EBUSY; 336 337 ret = rkvdec_s_fmt(file, priv, f, rkvdec_try_output_fmt); 338 if (ret) 339 return ret; 340 341 desc = rkvdec_find_coded_fmt_desc(f->fmt.pix_mp.pixelformat); 342 if (!desc) 343 return -EINVAL; 344 ctx->coded_fmt_desc = desc; 345 ctx->coded_fmt = *f; 346 347 /* 348 * Current decoded format might have become invalid with newly 349 * selected codec, so reset it to default just to be safe and 350 * keep internal driver state sane. User is mandated to set 351 * the decoded format again after we return, so we don't need 352 * anything smarter. 353 * 354 * Note that this will propagates any size changes to the decoded format. 355 */ 356 rkvdec_reset_decoded_fmt(ctx); 357 358 /* Propagate colorspace information to capture. */ 359 cap_fmt = &ctx->decoded_fmt; 360 cap_fmt->fmt.pix_mp.colorspace = f->fmt.pix_mp.colorspace; 361 cap_fmt->fmt.pix_mp.xfer_func = f->fmt.pix_mp.xfer_func; 362 cap_fmt->fmt.pix_mp.ycbcr_enc = f->fmt.pix_mp.ycbcr_enc; 363 cap_fmt->fmt.pix_mp.quantization = f->fmt.pix_mp.quantization; 364 365 return 0; 366 } 367 368 static int rkvdec_g_output_fmt(struct file *file, void *priv, 369 struct v4l2_format *f) 370 { 371 struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv); 372 373 *f = ctx->coded_fmt; 374 return 0; 375 } 376 377 static int rkvdec_g_capture_fmt(struct file *file, void *priv, 378 struct v4l2_format *f) 379 { 380 struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv); 381 382 *f = ctx->decoded_fmt; 383 return 0; 384 } 385 386 static int rkvdec_enum_output_fmt(struct file *file, void *priv, 387 struct v4l2_fmtdesc *f) 388 { 389 if (f->index >= ARRAY_SIZE(rkvdec_coded_fmts)) 390 return -EINVAL; 391 392 f->pixelformat = rkvdec_coded_fmts[f->index].fourcc; 393 return 0; 394 } 395 396 static int rkvdec_enum_capture_fmt(struct file *file, void *priv, 397 struct v4l2_fmtdesc *f) 398 { 399 struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv); 400 401 if (WARN_ON(!ctx->coded_fmt_desc)) 402 return -EINVAL; 403 404 if (f->index >= ctx->coded_fmt_desc->num_decoded_fmts) 405 return -EINVAL; 406 407 f->pixelformat = ctx->coded_fmt_desc->decoded_fmts[f->index]; 408 return 0; 409 } 410 411 static const struct v4l2_ioctl_ops rkvdec_ioctl_ops = { 412 .vidioc_querycap = rkvdec_querycap, 413 .vidioc_enum_framesizes = rkvdec_enum_framesizes, 414 415 .vidioc_try_fmt_vid_cap_mplane = rkvdec_try_capture_fmt, 416 .vidioc_try_fmt_vid_out_mplane = rkvdec_try_output_fmt, 417 .vidioc_s_fmt_vid_out_mplane = rkvdec_s_output_fmt, 418 .vidioc_s_fmt_vid_cap_mplane = rkvdec_s_capture_fmt, 419 .vidioc_g_fmt_vid_out_mplane = rkvdec_g_output_fmt, 420 .vidioc_g_fmt_vid_cap_mplane = rkvdec_g_capture_fmt, 421 .vidioc_enum_fmt_vid_out = rkvdec_enum_output_fmt, 422 .vidioc_enum_fmt_vid_cap = rkvdec_enum_capture_fmt, 423 424 .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs, 425 .vidioc_querybuf = v4l2_m2m_ioctl_querybuf, 426 .vidioc_qbuf = v4l2_m2m_ioctl_qbuf, 427 .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf, 428 .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf, 429 .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs, 430 .vidioc_expbuf = v4l2_m2m_ioctl_expbuf, 431 432 .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, 433 .vidioc_unsubscribe_event = v4l2_event_unsubscribe, 434 435 .vidioc_streamon = v4l2_m2m_ioctl_streamon, 436 .vidioc_streamoff = v4l2_m2m_ioctl_streamoff, 437 }; 438 439 static int rkvdec_queue_setup(struct vb2_queue *vq, unsigned int *num_buffers, 440 unsigned int *num_planes, unsigned int sizes[], 441 struct device *alloc_devs[]) 442 { 443 struct rkvdec_ctx *ctx = vb2_get_drv_priv(vq); 444 struct v4l2_format *f; 445 unsigned int i; 446 447 if (V4L2_TYPE_IS_OUTPUT(vq->type)) 448 f = &ctx->coded_fmt; 449 else 450 f = &ctx->decoded_fmt; 451 452 if (*num_planes) { 453 if (*num_planes != f->fmt.pix_mp.num_planes) 454 return -EINVAL; 455 456 for (i = 0; i < f->fmt.pix_mp.num_planes; i++) { 457 if (sizes[i] < f->fmt.pix_mp.plane_fmt[i].sizeimage) 458 return -EINVAL; 459 } 460 } else { 461 *num_planes = f->fmt.pix_mp.num_planes; 462 for (i = 0; i < f->fmt.pix_mp.num_planes; i++) 463 sizes[i] = f->fmt.pix_mp.plane_fmt[i].sizeimage; 464 } 465 466 return 0; 467 } 468 469 static int rkvdec_buf_prepare(struct vb2_buffer *vb) 470 { 471 struct vb2_queue *vq = vb->vb2_queue; 472 struct rkvdec_ctx *ctx = vb2_get_drv_priv(vq); 473 struct v4l2_format *f; 474 unsigned int i; 475 476 if (V4L2_TYPE_IS_OUTPUT(vq->type)) 477 f = &ctx->coded_fmt; 478 else 479 f = &ctx->decoded_fmt; 480 481 for (i = 0; i < f->fmt.pix_mp.num_planes; ++i) { 482 u32 sizeimage = f->fmt.pix_mp.plane_fmt[i].sizeimage; 483 484 if (vb2_plane_size(vb, i) < sizeimage) 485 return -EINVAL; 486 } 487 vb2_set_plane_payload(vb, 0, f->fmt.pix_mp.plane_fmt[0].sizeimage); 488 return 0; 489 } 490 491 static void rkvdec_buf_queue(struct vb2_buffer *vb) 492 { 493 struct rkvdec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); 494 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 495 496 v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf); 497 } 498 499 static int rkvdec_buf_out_validate(struct vb2_buffer *vb) 500 { 501 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 502 503 vbuf->field = V4L2_FIELD_NONE; 504 return 0; 505 } 506 507 static void rkvdec_buf_request_complete(struct vb2_buffer *vb) 508 { 509 struct rkvdec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); 510 511 v4l2_ctrl_request_complete(vb->req_obj.req, &ctx->ctrl_hdl); 512 } 513 514 static int rkvdec_start_streaming(struct vb2_queue *q, unsigned int count) 515 { 516 struct rkvdec_ctx *ctx = vb2_get_drv_priv(q); 517 const struct rkvdec_coded_fmt_desc *desc; 518 int ret; 519 520 if (V4L2_TYPE_IS_CAPTURE(q->type)) 521 return 0; 522 523 desc = ctx->coded_fmt_desc; 524 if (WARN_ON(!desc)) 525 return -EINVAL; 526 527 if (desc->ops->start) { 528 ret = desc->ops->start(ctx); 529 if (ret) 530 return ret; 531 } 532 533 return 0; 534 } 535 536 static void rkvdec_queue_cleanup(struct vb2_queue *vq, u32 state) 537 { 538 struct rkvdec_ctx *ctx = vb2_get_drv_priv(vq); 539 540 while (true) { 541 struct vb2_v4l2_buffer *vbuf; 542 543 if (V4L2_TYPE_IS_OUTPUT(vq->type)) 544 vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); 545 else 546 vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); 547 548 if (!vbuf) 549 break; 550 551 v4l2_ctrl_request_complete(vbuf->vb2_buf.req_obj.req, 552 &ctx->ctrl_hdl); 553 v4l2_m2m_buf_done(vbuf, state); 554 } 555 } 556 557 static void rkvdec_stop_streaming(struct vb2_queue *q) 558 { 559 struct rkvdec_ctx *ctx = vb2_get_drv_priv(q); 560 561 if (V4L2_TYPE_IS_OUTPUT(q->type)) { 562 const struct rkvdec_coded_fmt_desc *desc = ctx->coded_fmt_desc; 563 564 if (WARN_ON(!desc)) 565 return; 566 567 if (desc->ops->stop) 568 desc->ops->stop(ctx); 569 } 570 571 rkvdec_queue_cleanup(q, VB2_BUF_STATE_ERROR); 572 } 573 574 static const struct vb2_ops rkvdec_queue_ops = { 575 .queue_setup = rkvdec_queue_setup, 576 .buf_prepare = rkvdec_buf_prepare, 577 .buf_queue = rkvdec_buf_queue, 578 .buf_out_validate = rkvdec_buf_out_validate, 579 .buf_request_complete = rkvdec_buf_request_complete, 580 .start_streaming = rkvdec_start_streaming, 581 .stop_streaming = rkvdec_stop_streaming, 582 .wait_prepare = vb2_ops_wait_prepare, 583 .wait_finish = vb2_ops_wait_finish, 584 }; 585 586 static int rkvdec_request_validate(struct media_request *req) 587 { 588 struct media_request_object *obj; 589 const struct rkvdec_ctrls *ctrls; 590 struct v4l2_ctrl_handler *hdl; 591 struct rkvdec_ctx *ctx = NULL; 592 unsigned int count, i; 593 int ret; 594 595 list_for_each_entry(obj, &req->objects, list) { 596 if (vb2_request_object_is_buffer(obj)) { 597 struct vb2_buffer *vb; 598 599 vb = container_of(obj, struct vb2_buffer, req_obj); 600 ctx = vb2_get_drv_priv(vb->vb2_queue); 601 break; 602 } 603 } 604 605 if (!ctx) 606 return -EINVAL; 607 608 count = vb2_request_buffer_cnt(req); 609 if (!count) 610 return -ENOENT; 611 else if (count > 1) 612 return -EINVAL; 613 614 hdl = v4l2_ctrl_request_hdl_find(req, &ctx->ctrl_hdl); 615 if (!hdl) 616 return -ENOENT; 617 618 ret = 0; 619 ctrls = ctx->coded_fmt_desc->ctrls; 620 for (i = 0; ctrls && i < ctrls->num_ctrls; i++) { 621 u32 id = ctrls->ctrls[i].cfg.id; 622 struct v4l2_ctrl *ctrl; 623 624 if (!ctrls->ctrls[i].mandatory) 625 continue; 626 627 ctrl = v4l2_ctrl_request_hdl_ctrl_find(hdl, id); 628 if (!ctrl) { 629 ret = -ENOENT; 630 break; 631 } 632 } 633 634 v4l2_ctrl_request_hdl_put(hdl); 635 636 if (ret) 637 return ret; 638 639 return vb2_request_validate(req); 640 } 641 642 static const struct media_device_ops rkvdec_media_ops = { 643 .req_validate = rkvdec_request_validate, 644 .req_queue = v4l2_m2m_request_queue, 645 }; 646 647 static void rkvdec_job_finish_no_pm(struct rkvdec_ctx *ctx, 648 enum vb2_buffer_state result) 649 { 650 if (ctx->coded_fmt_desc->ops->done) { 651 struct vb2_v4l2_buffer *src_buf, *dst_buf; 652 653 src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); 654 dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); 655 ctx->coded_fmt_desc->ops->done(ctx, src_buf, dst_buf, result); 656 } 657 658 v4l2_m2m_buf_done_and_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx, 659 result); 660 } 661 662 static void rkvdec_job_finish(struct rkvdec_ctx *ctx, 663 enum vb2_buffer_state result) 664 { 665 struct rkvdec_dev *rkvdec = ctx->dev; 666 667 pm_runtime_mark_last_busy(rkvdec->dev); 668 pm_runtime_put_autosuspend(rkvdec->dev); 669 rkvdec_job_finish_no_pm(ctx, result); 670 } 671 672 void rkvdec_run_preamble(struct rkvdec_ctx *ctx, struct rkvdec_run *run) 673 { 674 struct media_request *src_req; 675 676 memset(run, 0, sizeof(*run)); 677 678 run->bufs.src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); 679 run->bufs.dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); 680 681 /* Apply request(s) controls if needed. */ 682 src_req = run->bufs.src->vb2_buf.req_obj.req; 683 if (src_req) 684 v4l2_ctrl_request_setup(src_req, &ctx->ctrl_hdl); 685 686 v4l2_m2m_buf_copy_metadata(run->bufs.src, run->bufs.dst, true); 687 } 688 689 void rkvdec_run_postamble(struct rkvdec_ctx *ctx, struct rkvdec_run *run) 690 { 691 struct media_request *src_req = run->bufs.src->vb2_buf.req_obj.req; 692 693 if (src_req) 694 v4l2_ctrl_request_complete(src_req, &ctx->ctrl_hdl); 695 } 696 697 static void rkvdec_device_run(void *priv) 698 { 699 struct rkvdec_ctx *ctx = priv; 700 struct rkvdec_dev *rkvdec = ctx->dev; 701 const struct rkvdec_coded_fmt_desc *desc = ctx->coded_fmt_desc; 702 int ret; 703 704 if (WARN_ON(!desc)) 705 return; 706 707 ret = pm_runtime_get_sync(rkvdec->dev); 708 if (ret < 0) { 709 rkvdec_job_finish_no_pm(ctx, VB2_BUF_STATE_ERROR); 710 return; 711 } 712 713 ret = desc->ops->run(ctx); 714 if (ret) 715 rkvdec_job_finish(ctx, VB2_BUF_STATE_ERROR); 716 } 717 718 static struct v4l2_m2m_ops rkvdec_m2m_ops = { 719 .device_run = rkvdec_device_run, 720 }; 721 722 static int rkvdec_queue_init(void *priv, 723 struct vb2_queue *src_vq, 724 struct vb2_queue *dst_vq) 725 { 726 struct rkvdec_ctx *ctx = priv; 727 struct rkvdec_dev *rkvdec = ctx->dev; 728 int ret; 729 730 src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; 731 src_vq->io_modes = VB2_MMAP | VB2_DMABUF; 732 src_vq->drv_priv = ctx; 733 src_vq->ops = &rkvdec_queue_ops; 734 src_vq->mem_ops = &vb2_dma_contig_memops; 735 736 /* 737 * Driver does mostly sequential access, so sacrifice TLB efficiency 738 * for faster allocation. Also, no CPU access on the source queue, 739 * so no kernel mapping needed. 740 */ 741 src_vq->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES | 742 DMA_ATTR_NO_KERNEL_MAPPING; 743 src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); 744 src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; 745 src_vq->lock = &rkvdec->vdev_lock; 746 src_vq->dev = rkvdec->v4l2_dev.dev; 747 src_vq->supports_requests = true; 748 src_vq->requires_requests = true; 749 750 ret = vb2_queue_init(src_vq); 751 if (ret) 752 return ret; 753 754 dst_vq->bidirectional = true; 755 dst_vq->mem_ops = &vb2_dma_contig_memops; 756 dst_vq->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES | 757 DMA_ATTR_NO_KERNEL_MAPPING; 758 dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; 759 dst_vq->io_modes = VB2_MMAP | VB2_DMABUF; 760 dst_vq->drv_priv = ctx; 761 dst_vq->ops = &rkvdec_queue_ops; 762 dst_vq->buf_struct_size = sizeof(struct rkvdec_decoded_buffer); 763 dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; 764 dst_vq->lock = &rkvdec->vdev_lock; 765 dst_vq->dev = rkvdec->v4l2_dev.dev; 766 767 return vb2_queue_init(dst_vq); 768 } 769 770 static int rkvdec_add_ctrls(struct rkvdec_ctx *ctx, 771 const struct rkvdec_ctrls *ctrls) 772 { 773 unsigned int i; 774 775 for (i = 0; i < ctrls->num_ctrls; i++) { 776 const struct v4l2_ctrl_config *cfg = &ctrls->ctrls[i].cfg; 777 778 v4l2_ctrl_new_custom(&ctx->ctrl_hdl, cfg, ctx); 779 if (ctx->ctrl_hdl.error) 780 return ctx->ctrl_hdl.error; 781 } 782 783 return 0; 784 } 785 786 static int rkvdec_init_ctrls(struct rkvdec_ctx *ctx) 787 { 788 unsigned int i, nctrls = 0; 789 int ret; 790 791 for (i = 0; i < ARRAY_SIZE(rkvdec_coded_fmts); i++) 792 nctrls += rkvdec_coded_fmts[i].ctrls->num_ctrls; 793 794 v4l2_ctrl_handler_init(&ctx->ctrl_hdl, nctrls); 795 796 for (i = 0; i < ARRAY_SIZE(rkvdec_coded_fmts); i++) { 797 ret = rkvdec_add_ctrls(ctx, rkvdec_coded_fmts[i].ctrls); 798 if (ret) 799 goto err_free_handler; 800 } 801 802 ret = v4l2_ctrl_handler_setup(&ctx->ctrl_hdl); 803 if (ret) 804 goto err_free_handler; 805 806 ctx->fh.ctrl_handler = &ctx->ctrl_hdl; 807 return 0; 808 809 err_free_handler: 810 v4l2_ctrl_handler_free(&ctx->ctrl_hdl); 811 return ret; 812 } 813 814 static int rkvdec_open(struct file *filp) 815 { 816 struct rkvdec_dev *rkvdec = video_drvdata(filp); 817 struct rkvdec_ctx *ctx; 818 int ret; 819 820 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 821 if (!ctx) 822 return -ENOMEM; 823 824 ctx->dev = rkvdec; 825 rkvdec_reset_coded_fmt(ctx); 826 rkvdec_reset_decoded_fmt(ctx); 827 v4l2_fh_init(&ctx->fh, video_devdata(filp)); 828 829 ret = rkvdec_init_ctrls(ctx); 830 if (ret) 831 goto err_free_ctx; 832 833 ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(rkvdec->m2m_dev, ctx, 834 rkvdec_queue_init); 835 if (IS_ERR(ctx->fh.m2m_ctx)) { 836 ret = PTR_ERR(ctx->fh.m2m_ctx); 837 goto err_cleanup_ctrls; 838 } 839 840 filp->private_data = &ctx->fh; 841 v4l2_fh_add(&ctx->fh); 842 843 return 0; 844 845 err_cleanup_ctrls: 846 v4l2_ctrl_handler_free(&ctx->ctrl_hdl); 847 848 err_free_ctx: 849 kfree(ctx); 850 return ret; 851 } 852 853 static int rkvdec_release(struct file *filp) 854 { 855 struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(filp->private_data); 856 857 v4l2_fh_del(&ctx->fh); 858 v4l2_m2m_ctx_release(ctx->fh.m2m_ctx); 859 v4l2_ctrl_handler_free(&ctx->ctrl_hdl); 860 v4l2_fh_exit(&ctx->fh); 861 kfree(ctx); 862 863 return 0; 864 } 865 866 static const struct v4l2_file_operations rkvdec_fops = { 867 .owner = THIS_MODULE, 868 .open = rkvdec_open, 869 .release = rkvdec_release, 870 .poll = v4l2_m2m_fop_poll, 871 .unlocked_ioctl = video_ioctl2, 872 .mmap = v4l2_m2m_fop_mmap, 873 }; 874 875 static int rkvdec_v4l2_init(struct rkvdec_dev *rkvdec) 876 { 877 int ret; 878 879 ret = v4l2_device_register(rkvdec->dev, &rkvdec->v4l2_dev); 880 if (ret) { 881 dev_err(rkvdec->dev, "Failed to register V4L2 device\n"); 882 return ret; 883 } 884 885 rkvdec->m2m_dev = v4l2_m2m_init(&rkvdec_m2m_ops); 886 if (IS_ERR(rkvdec->m2m_dev)) { 887 v4l2_err(&rkvdec->v4l2_dev, "Failed to init mem2mem device\n"); 888 ret = PTR_ERR(rkvdec->m2m_dev); 889 goto err_unregister_v4l2; 890 } 891 892 rkvdec->mdev.dev = rkvdec->dev; 893 strscpy(rkvdec->mdev.model, "rkvdec", sizeof(rkvdec->mdev.model)); 894 strscpy(rkvdec->mdev.bus_info, "platform:rkvdec", 895 sizeof(rkvdec->mdev.bus_info)); 896 media_device_init(&rkvdec->mdev); 897 rkvdec->mdev.ops = &rkvdec_media_ops; 898 rkvdec->v4l2_dev.mdev = &rkvdec->mdev; 899 900 rkvdec->vdev.lock = &rkvdec->vdev_lock; 901 rkvdec->vdev.v4l2_dev = &rkvdec->v4l2_dev; 902 rkvdec->vdev.fops = &rkvdec_fops; 903 rkvdec->vdev.release = video_device_release_empty; 904 rkvdec->vdev.vfl_dir = VFL_DIR_M2M; 905 rkvdec->vdev.device_caps = V4L2_CAP_STREAMING | 906 V4L2_CAP_VIDEO_M2M_MPLANE; 907 rkvdec->vdev.ioctl_ops = &rkvdec_ioctl_ops; 908 video_set_drvdata(&rkvdec->vdev, rkvdec); 909 strscpy(rkvdec->vdev.name, "rkvdec", sizeof(rkvdec->vdev.name)); 910 911 ret = video_register_device(&rkvdec->vdev, VFL_TYPE_VIDEO, -1); 912 if (ret) { 913 v4l2_err(&rkvdec->v4l2_dev, "Failed to register video device\n"); 914 goto err_cleanup_mc; 915 } 916 917 ret = v4l2_m2m_register_media_controller(rkvdec->m2m_dev, &rkvdec->vdev, 918 MEDIA_ENT_F_PROC_VIDEO_DECODER); 919 if (ret) { 920 v4l2_err(&rkvdec->v4l2_dev, 921 "Failed to initialize V4L2 M2M media controller\n"); 922 goto err_unregister_vdev; 923 } 924 925 ret = media_device_register(&rkvdec->mdev); 926 if (ret) { 927 v4l2_err(&rkvdec->v4l2_dev, "Failed to register media device\n"); 928 goto err_unregister_mc; 929 } 930 931 return 0; 932 933 err_unregister_mc: 934 v4l2_m2m_unregister_media_controller(rkvdec->m2m_dev); 935 936 err_unregister_vdev: 937 video_unregister_device(&rkvdec->vdev); 938 939 err_cleanup_mc: 940 media_device_cleanup(&rkvdec->mdev); 941 v4l2_m2m_release(rkvdec->m2m_dev); 942 943 err_unregister_v4l2: 944 v4l2_device_unregister(&rkvdec->v4l2_dev); 945 return ret; 946 } 947 948 static void rkvdec_v4l2_cleanup(struct rkvdec_dev *rkvdec) 949 { 950 media_device_unregister(&rkvdec->mdev); 951 v4l2_m2m_unregister_media_controller(rkvdec->m2m_dev); 952 video_unregister_device(&rkvdec->vdev); 953 media_device_cleanup(&rkvdec->mdev); 954 v4l2_m2m_release(rkvdec->m2m_dev); 955 v4l2_device_unregister(&rkvdec->v4l2_dev); 956 } 957 958 static irqreturn_t rkvdec_irq_handler(int irq, void *priv) 959 { 960 struct rkvdec_dev *rkvdec = priv; 961 enum vb2_buffer_state state; 962 u32 status; 963 964 status = readl(rkvdec->regs + RKVDEC_REG_INTERRUPT); 965 state = (status & RKVDEC_RDY_STA) ? 966 VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR; 967 968 writel(0, rkvdec->regs + RKVDEC_REG_INTERRUPT); 969 if (cancel_delayed_work(&rkvdec->watchdog_work)) { 970 struct rkvdec_ctx *ctx; 971 972 ctx = v4l2_m2m_get_curr_priv(rkvdec->m2m_dev); 973 rkvdec_job_finish(ctx, state); 974 } 975 976 return IRQ_HANDLED; 977 } 978 979 static void rkvdec_watchdog_func(struct work_struct *work) 980 { 981 struct rkvdec_dev *rkvdec; 982 struct rkvdec_ctx *ctx; 983 984 rkvdec = container_of(to_delayed_work(work), struct rkvdec_dev, 985 watchdog_work); 986 ctx = v4l2_m2m_get_curr_priv(rkvdec->m2m_dev); 987 if (ctx) { 988 dev_err(rkvdec->dev, "Frame processing timed out!\n"); 989 writel(RKVDEC_IRQ_DIS, rkvdec->regs + RKVDEC_REG_INTERRUPT); 990 writel(0, rkvdec->regs + RKVDEC_REG_SYSCTRL); 991 rkvdec_job_finish(ctx, VB2_BUF_STATE_ERROR); 992 } 993 } 994 995 static const struct of_device_id of_rkvdec_match[] = { 996 { .compatible = "rockchip,rk3399-vdec" }, 997 { /* sentinel */ } 998 }; 999 MODULE_DEVICE_TABLE(of, of_rkvdec_match); 1000 1001 static const char * const rkvdec_clk_names[] = { 1002 "axi", "ahb", "cabac", "core" 1003 }; 1004 1005 static int rkvdec_probe(struct platform_device *pdev) 1006 { 1007 struct rkvdec_dev *rkvdec; 1008 struct resource *res; 1009 unsigned int i; 1010 int ret, irq; 1011 1012 rkvdec = devm_kzalloc(&pdev->dev, sizeof(*rkvdec), GFP_KERNEL); 1013 if (!rkvdec) 1014 return -ENOMEM; 1015 1016 platform_set_drvdata(pdev, rkvdec); 1017 rkvdec->dev = &pdev->dev; 1018 mutex_init(&rkvdec->vdev_lock); 1019 INIT_DELAYED_WORK(&rkvdec->watchdog_work, rkvdec_watchdog_func); 1020 1021 rkvdec->clocks = devm_kcalloc(&pdev->dev, ARRAY_SIZE(rkvdec_clk_names), 1022 sizeof(*rkvdec->clocks), GFP_KERNEL); 1023 if (!rkvdec->clocks) 1024 return -ENOMEM; 1025 1026 for (i = 0; i < ARRAY_SIZE(rkvdec_clk_names); i++) 1027 rkvdec->clocks[i].id = rkvdec_clk_names[i]; 1028 1029 ret = devm_clk_bulk_get(&pdev->dev, ARRAY_SIZE(rkvdec_clk_names), 1030 rkvdec->clocks); 1031 if (ret) 1032 return ret; 1033 1034 /* 1035 * Bump ACLK to max. possible freq. (500 MHz) to improve performance 1036 * When 4k video playback. 1037 */ 1038 clk_set_rate(rkvdec->clocks[0].clk, 500 * 1000 * 1000); 1039 1040 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1041 rkvdec->regs = devm_ioremap_resource(&pdev->dev, res); 1042 if (IS_ERR(rkvdec->regs)) 1043 return PTR_ERR(rkvdec->regs); 1044 1045 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 1046 if (ret) { 1047 dev_err(&pdev->dev, "Could not set DMA coherent mask.\n"); 1048 return ret; 1049 } 1050 1051 vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32)); 1052 1053 irq = platform_get_irq(pdev, 0); 1054 if (irq <= 0) 1055 return -ENXIO; 1056 1057 ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, 1058 rkvdec_irq_handler, IRQF_ONESHOT, 1059 dev_name(&pdev->dev), rkvdec); 1060 if (ret) { 1061 dev_err(&pdev->dev, "Could not request vdec IRQ\n"); 1062 return ret; 1063 } 1064 1065 pm_runtime_set_autosuspend_delay(&pdev->dev, 100); 1066 pm_runtime_use_autosuspend(&pdev->dev); 1067 pm_runtime_enable(&pdev->dev); 1068 1069 ret = rkvdec_v4l2_init(rkvdec); 1070 if (ret) 1071 goto err_disable_runtime_pm; 1072 1073 return 0; 1074 1075 err_disable_runtime_pm: 1076 pm_runtime_dont_use_autosuspend(&pdev->dev); 1077 pm_runtime_disable(&pdev->dev); 1078 return ret; 1079 } 1080 1081 static int rkvdec_remove(struct platform_device *pdev) 1082 { 1083 struct rkvdec_dev *rkvdec = platform_get_drvdata(pdev); 1084 1085 rkvdec_v4l2_cleanup(rkvdec); 1086 pm_runtime_disable(&pdev->dev); 1087 pm_runtime_dont_use_autosuspend(&pdev->dev); 1088 return 0; 1089 } 1090 1091 #ifdef CONFIG_PM 1092 static int rkvdec_runtime_resume(struct device *dev) 1093 { 1094 struct rkvdec_dev *rkvdec = dev_get_drvdata(dev); 1095 1096 return clk_bulk_prepare_enable(ARRAY_SIZE(rkvdec_clk_names), 1097 rkvdec->clocks); 1098 } 1099 1100 static int rkvdec_runtime_suspend(struct device *dev) 1101 { 1102 struct rkvdec_dev *rkvdec = dev_get_drvdata(dev); 1103 1104 clk_bulk_disable_unprepare(ARRAY_SIZE(rkvdec_clk_names), 1105 rkvdec->clocks); 1106 return 0; 1107 } 1108 #endif 1109 1110 static const struct dev_pm_ops rkvdec_pm_ops = { 1111 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 1112 pm_runtime_force_resume) 1113 SET_RUNTIME_PM_OPS(rkvdec_runtime_suspend, rkvdec_runtime_resume, NULL) 1114 }; 1115 1116 static struct platform_driver rkvdec_driver = { 1117 .probe = rkvdec_probe, 1118 .remove = rkvdec_remove, 1119 .driver = { 1120 .name = "rkvdec", 1121 .of_match_table = of_rkvdec_match, 1122 .pm = &rkvdec_pm_ops, 1123 }, 1124 }; 1125 module_platform_driver(rkvdec_driver); 1126 1127 MODULE_AUTHOR("Boris Brezillon <boris.brezillon@collabora.com>"); 1128 MODULE_DESCRIPTION("Rockchip Video Decoder driver"); 1129 MODULE_LICENSE("GPL v2"); 1130