1 /* 2 * Copyright (C) 2012 Samsung Electronics Co.Ltd 3 * Authors: 4 * YoungJun Cho <yj44.cho@samsung.com> 5 * Eunchul Kim <chulspro.kim@samsung.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundationr 10 */ 11 12 #include <linux/kernel.h> 13 #include <linux/module.h> 14 #include <linux/err.h> 15 #include <linux/interrupt.h> 16 #include <linux/io.h> 17 #include <linux/platform_device.h> 18 #include <linux/clk.h> 19 #include <linux/pm_runtime.h> 20 21 #include <drm/drmP.h> 22 #include <drm/exynos_drm.h> 23 #include "regs-rotator.h" 24 #include "exynos_drm.h" 25 #include "exynos_drm_ipp.h" 26 27 /* 28 * Rotator supports image crop/rotator and input/output DMA operations. 29 * input DMA reads image data from the memory. 30 * output DMA writes image data to memory. 31 * 32 * M2M operation : supports crop/scale/rotation/csc so on. 33 * Memory ----> Rotator H/W ----> Memory. 34 */ 35 36 /* 37 * TODO 38 * 1. check suspend/resume api if needed. 39 * 2. need to check use case platform_device_id. 40 * 3. check src/dst size with, height. 41 * 4. need to add supported list in prop_list. 42 */ 43 44 #define get_rot_context(dev) platform_get_drvdata(to_platform_device(dev)) 45 #define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\ 46 struct rot_context, ippdrv); 47 #define rot_read(offset) readl(rot->regs + (offset)) 48 #define rot_write(cfg, offset) writel(cfg, rot->regs + (offset)) 49 50 enum rot_irq_status { 51 ROT_IRQ_STATUS_COMPLETE = 8, 52 ROT_IRQ_STATUS_ILLEGAL = 9, 53 }; 54 55 /* 56 * A structure of limitation. 57 * 58 * @min_w: minimum width. 59 * @min_h: minimum height. 60 * @max_w: maximum width. 61 * @max_h: maximum height. 62 * @align: align size. 63 */ 64 struct rot_limit { 65 u32 min_w; 66 u32 min_h; 67 u32 max_w; 68 u32 max_h; 69 u32 align; 70 }; 71 72 /* 73 * A structure of limitation table. 74 * 75 * @ycbcr420_2p: case of YUV. 76 * @rgb888: case of RGB. 77 */ 78 struct rot_limit_table { 79 struct rot_limit ycbcr420_2p; 80 struct rot_limit rgb888; 81 }; 82 83 /* 84 * A structure of rotator context. 85 * @ippdrv: prepare initialization using ippdrv. 86 * @regs_res: register resources. 87 * @regs: memory mapped io registers. 88 * @clock: rotator gate clock. 89 * @limit_tbl: limitation of rotator. 90 * @irq: irq number. 91 * @cur_buf_id: current operation buffer id. 92 * @suspended: suspended state. 93 */ 94 struct rot_context { 95 struct exynos_drm_ippdrv ippdrv; 96 struct resource *regs_res; 97 void __iomem *regs; 98 struct clk *clock; 99 struct rot_limit_table *limit_tbl; 100 int irq; 101 int cur_buf_id[EXYNOS_DRM_OPS_MAX]; 102 bool suspended; 103 }; 104 105 static void rotator_reg_set_irq(struct rot_context *rot, bool enable) 106 { 107 u32 val = rot_read(ROT_CONFIG); 108 109 if (enable == true) 110 val |= ROT_CONFIG_IRQ; 111 else 112 val &= ~ROT_CONFIG_IRQ; 113 114 rot_write(val, ROT_CONFIG); 115 } 116 117 static u32 rotator_reg_get_fmt(struct rot_context *rot) 118 { 119 u32 val = rot_read(ROT_CONTROL); 120 121 val &= ROT_CONTROL_FMT_MASK; 122 123 return val; 124 } 125 126 static enum rot_irq_status rotator_reg_get_irq_status(struct rot_context *rot) 127 { 128 u32 val = rot_read(ROT_STATUS); 129 130 val = ROT_STATUS_IRQ(val); 131 132 if (val == ROT_STATUS_IRQ_VAL_COMPLETE) 133 return ROT_IRQ_STATUS_COMPLETE; 134 135 return ROT_IRQ_STATUS_ILLEGAL; 136 } 137 138 static irqreturn_t rotator_irq_handler(int irq, void *arg) 139 { 140 struct rot_context *rot = arg; 141 struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv; 142 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node; 143 struct drm_exynos_ipp_event_work *event_work = c_node->event_work; 144 enum rot_irq_status irq_status; 145 u32 val; 146 147 /* Get execution result */ 148 irq_status = rotator_reg_get_irq_status(rot); 149 150 /* clear status */ 151 val = rot_read(ROT_STATUS); 152 val |= ROT_STATUS_IRQ_PENDING((u32)irq_status); 153 rot_write(val, ROT_STATUS); 154 155 if (irq_status == ROT_IRQ_STATUS_COMPLETE) { 156 event_work->ippdrv = ippdrv; 157 event_work->buf_id[EXYNOS_DRM_OPS_DST] = 158 rot->cur_buf_id[EXYNOS_DRM_OPS_DST]; 159 queue_work(ippdrv->event_workq, 160 (struct work_struct *)event_work); 161 } else 162 DRM_ERROR("the SFR is set illegally\n"); 163 164 return IRQ_HANDLED; 165 } 166 167 static void rotator_align_size(struct rot_context *rot, u32 fmt, u32 *hsize, 168 u32 *vsize) 169 { 170 struct rot_limit_table *limit_tbl = rot->limit_tbl; 171 struct rot_limit *limit; 172 u32 mask, val; 173 174 /* Get size limit */ 175 if (fmt == ROT_CONTROL_FMT_RGB888) 176 limit = &limit_tbl->rgb888; 177 else 178 limit = &limit_tbl->ycbcr420_2p; 179 180 /* Get mask for rounding to nearest aligned val */ 181 mask = ~((1 << limit->align) - 1); 182 183 /* Set aligned width */ 184 val = ROT_ALIGN(*hsize, limit->align, mask); 185 if (val < limit->min_w) 186 *hsize = ROT_MIN(limit->min_w, mask); 187 else if (val > limit->max_w) 188 *hsize = ROT_MAX(limit->max_w, mask); 189 else 190 *hsize = val; 191 192 /* Set aligned height */ 193 val = ROT_ALIGN(*vsize, limit->align, mask); 194 if (val < limit->min_h) 195 *vsize = ROT_MIN(limit->min_h, mask); 196 else if (val > limit->max_h) 197 *vsize = ROT_MAX(limit->max_h, mask); 198 else 199 *vsize = val; 200 } 201 202 static int rotator_src_set_fmt(struct device *dev, u32 fmt) 203 { 204 struct rot_context *rot = dev_get_drvdata(dev); 205 u32 val; 206 207 val = rot_read(ROT_CONTROL); 208 val &= ~ROT_CONTROL_FMT_MASK; 209 210 switch (fmt) { 211 case DRM_FORMAT_NV12: 212 val |= ROT_CONTROL_FMT_YCBCR420_2P; 213 break; 214 case DRM_FORMAT_XRGB8888: 215 val |= ROT_CONTROL_FMT_RGB888; 216 break; 217 default: 218 DRM_ERROR("invalid image format\n"); 219 return -EINVAL; 220 } 221 222 rot_write(val, ROT_CONTROL); 223 224 return 0; 225 } 226 227 static inline bool rotator_check_reg_fmt(u32 fmt) 228 { 229 if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) || 230 (fmt == ROT_CONTROL_FMT_RGB888)) 231 return true; 232 233 return false; 234 } 235 236 static int rotator_src_set_size(struct device *dev, int swap, 237 struct drm_exynos_pos *pos, 238 struct drm_exynos_sz *sz) 239 { 240 struct rot_context *rot = dev_get_drvdata(dev); 241 u32 fmt, hsize, vsize; 242 u32 val; 243 244 /* Get format */ 245 fmt = rotator_reg_get_fmt(rot); 246 if (!rotator_check_reg_fmt(fmt)) { 247 DRM_ERROR("%s:invalid format.\n", __func__); 248 return -EINVAL; 249 } 250 251 /* Align buffer size */ 252 hsize = sz->hsize; 253 vsize = sz->vsize; 254 rotator_align_size(rot, fmt, &hsize, &vsize); 255 256 /* Set buffer size configuration */ 257 val = ROT_SET_BUF_SIZE_H(vsize) | ROT_SET_BUF_SIZE_W(hsize); 258 rot_write(val, ROT_SRC_BUF_SIZE); 259 260 /* Set crop image position configuration */ 261 val = ROT_CROP_POS_Y(pos->y) | ROT_CROP_POS_X(pos->x); 262 rot_write(val, ROT_SRC_CROP_POS); 263 val = ROT_SRC_CROP_SIZE_H(pos->h) | ROT_SRC_CROP_SIZE_W(pos->w); 264 rot_write(val, ROT_SRC_CROP_SIZE); 265 266 return 0; 267 } 268 269 static int rotator_src_set_addr(struct device *dev, 270 struct drm_exynos_ipp_buf_info *buf_info, 271 u32 buf_id, enum drm_exynos_ipp_buf_type buf_type) 272 { 273 struct rot_context *rot = dev_get_drvdata(dev); 274 dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX]; 275 u32 val, fmt, hsize, vsize; 276 int i; 277 278 /* Set current buf_id */ 279 rot->cur_buf_id[EXYNOS_DRM_OPS_SRC] = buf_id; 280 281 switch (buf_type) { 282 case IPP_BUF_ENQUEUE: 283 /* Set address configuration */ 284 for_each_ipp_planar(i) 285 addr[i] = buf_info->base[i]; 286 287 /* Get format */ 288 fmt = rotator_reg_get_fmt(rot); 289 if (!rotator_check_reg_fmt(fmt)) { 290 DRM_ERROR("%s:invalid format.\n", __func__); 291 return -EINVAL; 292 } 293 294 /* Re-set cb planar for NV12 format */ 295 if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) && 296 !addr[EXYNOS_DRM_PLANAR_CB]) { 297 298 val = rot_read(ROT_SRC_BUF_SIZE); 299 hsize = ROT_GET_BUF_SIZE_W(val); 300 vsize = ROT_GET_BUF_SIZE_H(val); 301 302 /* Set cb planar */ 303 addr[EXYNOS_DRM_PLANAR_CB] = 304 addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize; 305 } 306 307 for_each_ipp_planar(i) 308 rot_write(addr[i], ROT_SRC_BUF_ADDR(i)); 309 break; 310 case IPP_BUF_DEQUEUE: 311 for_each_ipp_planar(i) 312 rot_write(0x0, ROT_SRC_BUF_ADDR(i)); 313 break; 314 default: 315 /* Nothing to do */ 316 break; 317 } 318 319 return 0; 320 } 321 322 static int rotator_dst_set_transf(struct device *dev, 323 enum drm_exynos_degree degree, 324 enum drm_exynos_flip flip, bool *swap) 325 { 326 struct rot_context *rot = dev_get_drvdata(dev); 327 u32 val; 328 329 /* Set transform configuration */ 330 val = rot_read(ROT_CONTROL); 331 val &= ~ROT_CONTROL_FLIP_MASK; 332 333 switch (flip) { 334 case EXYNOS_DRM_FLIP_VERTICAL: 335 val |= ROT_CONTROL_FLIP_VERTICAL; 336 break; 337 case EXYNOS_DRM_FLIP_HORIZONTAL: 338 val |= ROT_CONTROL_FLIP_HORIZONTAL; 339 break; 340 default: 341 /* Flip None */ 342 break; 343 } 344 345 val &= ~ROT_CONTROL_ROT_MASK; 346 347 switch (degree) { 348 case EXYNOS_DRM_DEGREE_90: 349 val |= ROT_CONTROL_ROT_90; 350 break; 351 case EXYNOS_DRM_DEGREE_180: 352 val |= ROT_CONTROL_ROT_180; 353 break; 354 case EXYNOS_DRM_DEGREE_270: 355 val |= ROT_CONTROL_ROT_270; 356 break; 357 default: 358 /* Rotation 0 Degree */ 359 break; 360 } 361 362 rot_write(val, ROT_CONTROL); 363 364 /* Check degree for setting buffer size swap */ 365 if ((degree == EXYNOS_DRM_DEGREE_90) || 366 (degree == EXYNOS_DRM_DEGREE_270)) 367 *swap = true; 368 else 369 *swap = false; 370 371 return 0; 372 } 373 374 static int rotator_dst_set_size(struct device *dev, int swap, 375 struct drm_exynos_pos *pos, 376 struct drm_exynos_sz *sz) 377 { 378 struct rot_context *rot = dev_get_drvdata(dev); 379 u32 val, fmt, hsize, vsize; 380 381 /* Get format */ 382 fmt = rotator_reg_get_fmt(rot); 383 if (!rotator_check_reg_fmt(fmt)) { 384 DRM_ERROR("%s:invalid format.\n", __func__); 385 return -EINVAL; 386 } 387 388 /* Align buffer size */ 389 hsize = sz->hsize; 390 vsize = sz->vsize; 391 rotator_align_size(rot, fmt, &hsize, &vsize); 392 393 /* Set buffer size configuration */ 394 val = ROT_SET_BUF_SIZE_H(vsize) | ROT_SET_BUF_SIZE_W(hsize); 395 rot_write(val, ROT_DST_BUF_SIZE); 396 397 /* Set crop image position configuration */ 398 val = ROT_CROP_POS_Y(pos->y) | ROT_CROP_POS_X(pos->x); 399 rot_write(val, ROT_DST_CROP_POS); 400 401 return 0; 402 } 403 404 static int rotator_dst_set_addr(struct device *dev, 405 struct drm_exynos_ipp_buf_info *buf_info, 406 u32 buf_id, enum drm_exynos_ipp_buf_type buf_type) 407 { 408 struct rot_context *rot = dev_get_drvdata(dev); 409 dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX]; 410 u32 val, fmt, hsize, vsize; 411 int i; 412 413 /* Set current buf_id */ 414 rot->cur_buf_id[EXYNOS_DRM_OPS_DST] = buf_id; 415 416 switch (buf_type) { 417 case IPP_BUF_ENQUEUE: 418 /* Set address configuration */ 419 for_each_ipp_planar(i) 420 addr[i] = buf_info->base[i]; 421 422 /* Get format */ 423 fmt = rotator_reg_get_fmt(rot); 424 if (!rotator_check_reg_fmt(fmt)) { 425 DRM_ERROR("%s:invalid format.\n", __func__); 426 return -EINVAL; 427 } 428 429 /* Re-set cb planar for NV12 format */ 430 if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) && 431 !addr[EXYNOS_DRM_PLANAR_CB]) { 432 /* Get buf size */ 433 val = rot_read(ROT_DST_BUF_SIZE); 434 435 hsize = ROT_GET_BUF_SIZE_W(val); 436 vsize = ROT_GET_BUF_SIZE_H(val); 437 438 /* Set cb planar */ 439 addr[EXYNOS_DRM_PLANAR_CB] = 440 addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize; 441 } 442 443 for_each_ipp_planar(i) 444 rot_write(addr[i], ROT_DST_BUF_ADDR(i)); 445 break; 446 case IPP_BUF_DEQUEUE: 447 for_each_ipp_planar(i) 448 rot_write(0x0, ROT_DST_BUF_ADDR(i)); 449 break; 450 default: 451 /* Nothing to do */ 452 break; 453 } 454 455 return 0; 456 } 457 458 static struct exynos_drm_ipp_ops rot_src_ops = { 459 .set_fmt = rotator_src_set_fmt, 460 .set_size = rotator_src_set_size, 461 .set_addr = rotator_src_set_addr, 462 }; 463 464 static struct exynos_drm_ipp_ops rot_dst_ops = { 465 .set_transf = rotator_dst_set_transf, 466 .set_size = rotator_dst_set_size, 467 .set_addr = rotator_dst_set_addr, 468 }; 469 470 static int rotator_init_prop_list(struct exynos_drm_ippdrv *ippdrv) 471 { 472 struct drm_exynos_ipp_prop_list *prop_list; 473 474 DRM_DEBUG_KMS("%s\n", __func__); 475 476 prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL); 477 if (!prop_list) { 478 DRM_ERROR("failed to alloc property list.\n"); 479 return -ENOMEM; 480 } 481 482 prop_list->version = 1; 483 prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) | 484 (1 << EXYNOS_DRM_FLIP_HORIZONTAL); 485 prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) | 486 (1 << EXYNOS_DRM_DEGREE_90) | 487 (1 << EXYNOS_DRM_DEGREE_180) | 488 (1 << EXYNOS_DRM_DEGREE_270); 489 prop_list->csc = 0; 490 prop_list->crop = 0; 491 prop_list->scale = 0; 492 493 ippdrv->prop_list = prop_list; 494 495 return 0; 496 } 497 498 static inline bool rotator_check_drm_fmt(u32 fmt) 499 { 500 switch (fmt) { 501 case DRM_FORMAT_XRGB8888: 502 case DRM_FORMAT_NV12: 503 return true; 504 default: 505 DRM_DEBUG_KMS("%s:not support format\n", __func__); 506 return false; 507 } 508 } 509 510 static inline bool rotator_check_drm_flip(enum drm_exynos_flip flip) 511 { 512 switch (flip) { 513 case EXYNOS_DRM_FLIP_NONE: 514 case EXYNOS_DRM_FLIP_VERTICAL: 515 case EXYNOS_DRM_FLIP_HORIZONTAL: 516 case EXYNOS_DRM_FLIP_BOTH: 517 return true; 518 default: 519 DRM_DEBUG_KMS("%s:invalid flip\n", __func__); 520 return false; 521 } 522 } 523 524 static int rotator_ippdrv_check_property(struct device *dev, 525 struct drm_exynos_ipp_property *property) 526 { 527 struct drm_exynos_ipp_config *src_config = 528 &property->config[EXYNOS_DRM_OPS_SRC]; 529 struct drm_exynos_ipp_config *dst_config = 530 &property->config[EXYNOS_DRM_OPS_DST]; 531 struct drm_exynos_pos *src_pos = &src_config->pos; 532 struct drm_exynos_pos *dst_pos = &dst_config->pos; 533 struct drm_exynos_sz *src_sz = &src_config->sz; 534 struct drm_exynos_sz *dst_sz = &dst_config->sz; 535 bool swap = false; 536 537 /* Check format configuration */ 538 if (src_config->fmt != dst_config->fmt) { 539 DRM_DEBUG_KMS("%s:not support csc feature\n", __func__); 540 return -EINVAL; 541 } 542 543 if (!rotator_check_drm_fmt(dst_config->fmt)) { 544 DRM_DEBUG_KMS("%s:invalid format\n", __func__); 545 return -EINVAL; 546 } 547 548 /* Check transform configuration */ 549 if (src_config->degree != EXYNOS_DRM_DEGREE_0) { 550 DRM_DEBUG_KMS("%s:not support source-side rotation\n", 551 __func__); 552 return -EINVAL; 553 } 554 555 switch (dst_config->degree) { 556 case EXYNOS_DRM_DEGREE_90: 557 case EXYNOS_DRM_DEGREE_270: 558 swap = true; 559 case EXYNOS_DRM_DEGREE_0: 560 case EXYNOS_DRM_DEGREE_180: 561 /* No problem */ 562 break; 563 default: 564 DRM_DEBUG_KMS("%s:invalid degree\n", __func__); 565 return -EINVAL; 566 } 567 568 if (src_config->flip != EXYNOS_DRM_FLIP_NONE) { 569 DRM_DEBUG_KMS("%s:not support source-side flip\n", __func__); 570 return -EINVAL; 571 } 572 573 if (!rotator_check_drm_flip(dst_config->flip)) { 574 DRM_DEBUG_KMS("%s:invalid flip\n", __func__); 575 return -EINVAL; 576 } 577 578 /* Check size configuration */ 579 if ((src_pos->x + src_pos->w > src_sz->hsize) || 580 (src_pos->y + src_pos->h > src_sz->vsize)) { 581 DRM_DEBUG_KMS("%s:out of source buffer bound\n", __func__); 582 return -EINVAL; 583 } 584 585 if (swap) { 586 if ((dst_pos->x + dst_pos->h > dst_sz->vsize) || 587 (dst_pos->y + dst_pos->w > dst_sz->hsize)) { 588 DRM_DEBUG_KMS("%s:out of destination buffer bound\n", 589 __func__); 590 return -EINVAL; 591 } 592 593 if ((src_pos->w != dst_pos->h) || (src_pos->h != dst_pos->w)) { 594 DRM_DEBUG_KMS("%s:not support scale feature\n", 595 __func__); 596 return -EINVAL; 597 } 598 } else { 599 if ((dst_pos->x + dst_pos->w > dst_sz->hsize) || 600 (dst_pos->y + dst_pos->h > dst_sz->vsize)) { 601 DRM_DEBUG_KMS("%s:out of destination buffer bound\n", 602 __func__); 603 return -EINVAL; 604 } 605 606 if ((src_pos->w != dst_pos->w) || (src_pos->h != dst_pos->h)) { 607 DRM_DEBUG_KMS("%s:not support scale feature\n", 608 __func__); 609 return -EINVAL; 610 } 611 } 612 613 return 0; 614 } 615 616 static int rotator_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd) 617 { 618 struct rot_context *rot = dev_get_drvdata(dev); 619 u32 val; 620 621 if (rot->suspended) { 622 DRM_ERROR("suspended state\n"); 623 return -EPERM; 624 } 625 626 if (cmd != IPP_CMD_M2M) { 627 DRM_ERROR("not support cmd: %d\n", cmd); 628 return -EINVAL; 629 } 630 631 /* Set interrupt enable */ 632 rotator_reg_set_irq(rot, true); 633 634 val = rot_read(ROT_CONTROL); 635 val |= ROT_CONTROL_START; 636 637 rot_write(val, ROT_CONTROL); 638 639 return 0; 640 } 641 642 static int rotator_probe(struct platform_device *pdev) 643 { 644 struct device *dev = &pdev->dev; 645 struct rot_context *rot; 646 struct exynos_drm_ippdrv *ippdrv; 647 int ret; 648 649 rot = devm_kzalloc(dev, sizeof(*rot), GFP_KERNEL); 650 if (!rot) { 651 dev_err(dev, "failed to allocate rot\n"); 652 return -ENOMEM; 653 } 654 655 rot->limit_tbl = (struct rot_limit_table *) 656 platform_get_device_id(pdev)->driver_data; 657 658 rot->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 659 rot->regs = devm_ioremap_resource(dev, rot->regs_res); 660 if (IS_ERR(rot->regs)) 661 return PTR_ERR(rot->regs); 662 663 rot->irq = platform_get_irq(pdev, 0); 664 if (rot->irq < 0) { 665 dev_err(dev, "failed to get irq\n"); 666 return rot->irq; 667 } 668 669 ret = devm_request_threaded_irq(dev, rot->irq, NULL, 670 rotator_irq_handler, IRQF_ONESHOT, "drm_rotator", rot); 671 if (ret < 0) { 672 dev_err(dev, "failed to request irq\n"); 673 return ret; 674 } 675 676 rot->clock = devm_clk_get(dev, "rotator"); 677 if (IS_ERR(rot->clock)) { 678 dev_err(dev, "failed to get clock\n"); 679 return PTR_ERR(rot->clock); 680 } 681 682 pm_runtime_enable(dev); 683 684 ippdrv = &rot->ippdrv; 685 ippdrv->dev = dev; 686 ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &rot_src_ops; 687 ippdrv->ops[EXYNOS_DRM_OPS_DST] = &rot_dst_ops; 688 ippdrv->check_property = rotator_ippdrv_check_property; 689 ippdrv->start = rotator_ippdrv_start; 690 ret = rotator_init_prop_list(ippdrv); 691 if (ret < 0) { 692 dev_err(dev, "failed to init property list.\n"); 693 goto err_ippdrv_register; 694 } 695 696 DRM_DEBUG_KMS("%s:ippdrv[0x%x]\n", __func__, (int)ippdrv); 697 698 platform_set_drvdata(pdev, rot); 699 700 ret = exynos_drm_ippdrv_register(ippdrv); 701 if (ret < 0) { 702 dev_err(dev, "failed to register drm rotator device\n"); 703 goto err_ippdrv_register; 704 } 705 706 dev_info(dev, "The exynos rotator is probed successfully\n"); 707 708 return 0; 709 710 err_ippdrv_register: 711 pm_runtime_disable(dev); 712 return ret; 713 } 714 715 static int rotator_remove(struct platform_device *pdev) 716 { 717 struct device *dev = &pdev->dev; 718 struct rot_context *rot = dev_get_drvdata(dev); 719 struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv; 720 721 exynos_drm_ippdrv_unregister(ippdrv); 722 723 pm_runtime_disable(dev); 724 725 return 0; 726 } 727 728 static struct rot_limit_table rot_limit_tbl = { 729 .ycbcr420_2p = { 730 .min_w = 32, 731 .min_h = 32, 732 .max_w = SZ_32K, 733 .max_h = SZ_32K, 734 .align = 3, 735 }, 736 .rgb888 = { 737 .min_w = 8, 738 .min_h = 8, 739 .max_w = SZ_8K, 740 .max_h = SZ_8K, 741 .align = 2, 742 }, 743 }; 744 745 static struct platform_device_id rotator_driver_ids[] = { 746 { 747 .name = "exynos-rot", 748 .driver_data = (unsigned long)&rot_limit_tbl, 749 }, 750 {}, 751 }; 752 753 static int rotator_clk_crtl(struct rot_context *rot, bool enable) 754 { 755 DRM_DEBUG_KMS("%s\n", __func__); 756 757 if (enable) { 758 clk_enable(rot->clock); 759 rot->suspended = false; 760 } else { 761 clk_disable(rot->clock); 762 rot->suspended = true; 763 } 764 765 return 0; 766 } 767 768 769 #ifdef CONFIG_PM_SLEEP 770 static int rotator_suspend(struct device *dev) 771 { 772 struct rot_context *rot = dev_get_drvdata(dev); 773 774 DRM_DEBUG_KMS("%s\n", __func__); 775 776 if (pm_runtime_suspended(dev)) 777 return 0; 778 779 return rotator_clk_crtl(rot, false); 780 } 781 782 static int rotator_resume(struct device *dev) 783 { 784 struct rot_context *rot = dev_get_drvdata(dev); 785 786 DRM_DEBUG_KMS("%s\n", __func__); 787 788 if (!pm_runtime_suspended(dev)) 789 return rotator_clk_crtl(rot, true); 790 791 return 0; 792 } 793 #endif 794 795 #ifdef CONFIG_PM_RUNTIME 796 static int rotator_runtime_suspend(struct device *dev) 797 { 798 struct rot_context *rot = dev_get_drvdata(dev); 799 800 DRM_DEBUG_KMS("%s\n", __func__); 801 802 return rotator_clk_crtl(rot, false); 803 } 804 805 static int rotator_runtime_resume(struct device *dev) 806 { 807 struct rot_context *rot = dev_get_drvdata(dev); 808 809 DRM_DEBUG_KMS("%s\n", __func__); 810 811 return rotator_clk_crtl(rot, true); 812 } 813 #endif 814 815 static const struct dev_pm_ops rotator_pm_ops = { 816 SET_SYSTEM_SLEEP_PM_OPS(rotator_suspend, rotator_resume) 817 SET_RUNTIME_PM_OPS(rotator_runtime_suspend, rotator_runtime_resume, 818 NULL) 819 }; 820 821 struct platform_driver rotator_driver = { 822 .probe = rotator_probe, 823 .remove = rotator_remove, 824 .id_table = rotator_driver_ids, 825 .driver = { 826 .name = "exynos-rot", 827 .owner = THIS_MODULE, 828 .pm = &rotator_pm_ops, 829 }, 830 }; 831