1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. 4 * Author: James.Qian.Wang <james.qian.wang@arm.com> 5 * 6 */ 7 8 #include <drm/drm_print.h> 9 #include <linux/clk.h> 10 #include "komeda_dev.h" 11 #include "komeda_kms.h" 12 #include "komeda_pipeline.h" 13 #include "komeda_framebuffer.h" 14 15 static inline bool is_switching_user(void *old, void *new) 16 { 17 if (!old || !new) 18 return false; 19 20 return old != new; 21 } 22 23 static struct komeda_pipeline_state * 24 komeda_pipeline_get_state(struct komeda_pipeline *pipe, 25 struct drm_atomic_state *state) 26 { 27 struct drm_private_state *priv_st; 28 29 priv_st = drm_atomic_get_private_obj_state(state, &pipe->obj); 30 if (IS_ERR(priv_st)) 31 return ERR_CAST(priv_st); 32 33 return priv_to_pipe_st(priv_st); 34 } 35 36 struct komeda_pipeline_state * 37 komeda_pipeline_get_old_state(struct komeda_pipeline *pipe, 38 struct drm_atomic_state *state) 39 { 40 struct drm_private_state *priv_st; 41 42 priv_st = drm_atomic_get_old_private_obj_state(state, &pipe->obj); 43 if (priv_st) 44 return priv_to_pipe_st(priv_st); 45 return NULL; 46 } 47 48 static struct komeda_pipeline_state * 49 komeda_pipeline_get_new_state(struct komeda_pipeline *pipe, 50 struct drm_atomic_state *state) 51 { 52 struct drm_private_state *priv_st; 53 54 priv_st = drm_atomic_get_new_private_obj_state(state, &pipe->obj); 55 if (priv_st) 56 return priv_to_pipe_st(priv_st); 57 return NULL; 58 } 59 60 /* Assign pipeline for crtc */ 61 static struct komeda_pipeline_state * 62 komeda_pipeline_get_state_and_set_crtc(struct komeda_pipeline *pipe, 63 struct drm_atomic_state *state, 64 struct drm_crtc *crtc) 65 { 66 struct komeda_pipeline_state *st; 67 68 st = komeda_pipeline_get_state(pipe, state); 69 if (IS_ERR(st)) 70 return st; 71 72 if (is_switching_user(crtc, st->crtc)) { 73 DRM_DEBUG_ATOMIC("CRTC%d required pipeline%d is busy.\n", 74 drm_crtc_index(crtc), pipe->id); 75 return ERR_PTR(-EBUSY); 76 } 77 78 /* pipeline only can be disabled when the it is free or unused */ 79 if (!crtc && st->active_comps) { 80 DRM_DEBUG_ATOMIC("Disabling a busy pipeline:%d.\n", pipe->id); 81 return ERR_PTR(-EBUSY); 82 } 83 84 st->crtc = crtc; 85 86 if (crtc) { 87 struct komeda_crtc_state *kcrtc_st; 88 89 kcrtc_st = to_kcrtc_st(drm_atomic_get_new_crtc_state(state, 90 crtc)); 91 92 kcrtc_st->active_pipes |= BIT(pipe->id); 93 kcrtc_st->affected_pipes |= BIT(pipe->id); 94 } 95 return st; 96 } 97 98 static struct komeda_component_state * 99 komeda_component_get_state(struct komeda_component *c, 100 struct drm_atomic_state *state) 101 { 102 struct drm_private_state *priv_st; 103 104 WARN_ON(!drm_modeset_is_locked(&c->pipeline->obj.lock)); 105 106 priv_st = drm_atomic_get_private_obj_state(state, &c->obj); 107 if (IS_ERR(priv_st)) 108 return ERR_CAST(priv_st); 109 110 return priv_to_comp_st(priv_st); 111 } 112 113 static struct komeda_component_state * 114 komeda_component_get_old_state(struct komeda_component *c, 115 struct drm_atomic_state *state) 116 { 117 struct drm_private_state *priv_st; 118 119 priv_st = drm_atomic_get_old_private_obj_state(state, &c->obj); 120 if (priv_st) 121 return priv_to_comp_st(priv_st); 122 return NULL; 123 } 124 125 /** 126 * komeda_component_get_state_and_set_user() 127 * 128 * @c: component to get state and set user 129 * @state: global atomic state 130 * @user: direct user, the binding user 131 * @crtc: the CRTC user, the big boss :) 132 * 133 * This function accepts two users: 134 * - The direct user: can be plane/crtc/wb_connector depends on component 135 * - The big boss (CRTC) 136 * CRTC is the big boss (the final user), because all component resources 137 * eventually will be assigned to CRTC, like the layer will be binding to 138 * kms_plane, but kms plane will be binding to a CRTC eventually. 139 * 140 * The big boss (CRTC) is for pipeline assignment, since &komeda_component isn't 141 * independent and can be assigned to CRTC freely, but belongs to a specific 142 * pipeline, only pipeline can be shared between crtc, and pipeline as a whole 143 * (include all the internal components) assigned to a specific CRTC. 144 * 145 * So when set a user to komeda_component, need first to check the status of 146 * component->pipeline to see if the pipeline is available on this specific 147 * CRTC. if the pipeline is busy (assigned to another CRTC), even the required 148 * component is free, the component still cannot be assigned to the direct user. 149 */ 150 static struct komeda_component_state * 151 komeda_component_get_state_and_set_user(struct komeda_component *c, 152 struct drm_atomic_state *state, 153 void *user, 154 struct drm_crtc *crtc) 155 { 156 struct komeda_pipeline_state *pipe_st; 157 struct komeda_component_state *st; 158 159 /* First check if the pipeline is available */ 160 pipe_st = komeda_pipeline_get_state_and_set_crtc(c->pipeline, 161 state, crtc); 162 if (IS_ERR(pipe_st)) 163 return ERR_CAST(pipe_st); 164 165 st = komeda_component_get_state(c, state); 166 if (IS_ERR(st)) 167 return st; 168 169 /* check if the component has been occupied */ 170 if (is_switching_user(user, st->binding_user)) { 171 DRM_DEBUG_ATOMIC("required %s is busy.\n", c->name); 172 return ERR_PTR(-EBUSY); 173 } 174 175 st->binding_user = user; 176 /* mark the component as active if user is valid */ 177 if (st->binding_user) 178 pipe_st->active_comps |= BIT(c->id); 179 180 return st; 181 } 182 183 static void 184 komeda_component_add_input(struct komeda_component_state *state, 185 struct komeda_component_output *input, 186 int idx) 187 { 188 struct komeda_component *c = state->component; 189 190 WARN_ON((idx < 0 || idx >= c->max_active_inputs)); 191 192 /* since the inputs[i] is only valid when it is active. So if a input[i] 193 * is a newly enabled input which switches from disable to enable, then 194 * the old inputs[i] is undefined (NOT zeroed), we can not rely on 195 * memcmp, but directly mark it changed 196 */ 197 if (!has_bit(idx, state->affected_inputs) || 198 memcmp(&state->inputs[idx], input, sizeof(*input))) { 199 memcpy(&state->inputs[idx], input, sizeof(*input)); 200 state->changed_active_inputs |= BIT(idx); 201 } 202 state->active_inputs |= BIT(idx); 203 state->affected_inputs |= BIT(idx); 204 } 205 206 static int 207 komeda_component_check_input(struct komeda_component_state *state, 208 struct komeda_component_output *input, 209 int idx) 210 { 211 struct komeda_component *c = state->component; 212 213 if ((idx < 0) || (idx >= c->max_active_inputs)) { 214 DRM_DEBUG_ATOMIC("%s required an invalid %s-input[%d].\n", 215 input->component->name, c->name, idx); 216 return -EINVAL; 217 } 218 219 if (has_bit(idx, state->active_inputs)) { 220 DRM_DEBUG_ATOMIC("%s required %s-input[%d] has been occupied already.\n", 221 input->component->name, c->name, idx); 222 return -EINVAL; 223 } 224 225 return 0; 226 } 227 228 static void 229 komeda_component_set_output(struct komeda_component_output *output, 230 struct komeda_component *comp, 231 u8 output_port) 232 { 233 output->component = comp; 234 output->output_port = output_port; 235 } 236 237 static int 238 komeda_component_validate_private(struct komeda_component *c, 239 struct komeda_component_state *st) 240 { 241 int err; 242 243 if (!c->funcs->validate) 244 return 0; 245 246 err = c->funcs->validate(c, st); 247 if (err) 248 DRM_DEBUG_ATOMIC("%s validate private failed.\n", c->name); 249 250 return err; 251 } 252 253 /* Get current available scaler from the component->supported_outputs */ 254 static struct komeda_scaler * 255 komeda_component_get_avail_scaler(struct komeda_component *c, 256 struct drm_atomic_state *state) 257 { 258 struct komeda_pipeline_state *pipe_st; 259 u32 avail_scalers; 260 261 pipe_st = komeda_pipeline_get_state(c->pipeline, state); 262 if (!pipe_st) 263 return NULL; 264 265 avail_scalers = (pipe_st->active_comps & KOMEDA_PIPELINE_SCALERS) ^ 266 KOMEDA_PIPELINE_SCALERS; 267 268 c = komeda_component_pickup_output(c, avail_scalers); 269 270 return to_scaler(c); 271 } 272 273 static void 274 komeda_rotate_data_flow(struct komeda_data_flow_cfg *dflow, u32 rot) 275 { 276 if (drm_rotation_90_or_270(rot)) { 277 swap(dflow->in_h, dflow->in_w); 278 swap(dflow->total_in_h, dflow->total_in_w); 279 } 280 } 281 282 static int 283 komeda_layer_check_cfg(struct komeda_layer *layer, 284 struct komeda_fb *kfb, 285 struct komeda_data_flow_cfg *dflow) 286 { 287 u32 src_x, src_y, src_w, src_h; 288 u32 line_sz, max_line_sz; 289 290 if (!komeda_fb_is_layer_supported(kfb, layer->layer_type, dflow->rot)) 291 return -EINVAL; 292 293 if (layer->base.id == KOMEDA_COMPONENT_WB_LAYER) { 294 src_x = dflow->out_x; 295 src_y = dflow->out_y; 296 src_w = dflow->out_w; 297 src_h = dflow->out_h; 298 } else { 299 src_x = dflow->in_x; 300 src_y = dflow->in_y; 301 src_w = dflow->in_w; 302 src_h = dflow->in_h; 303 } 304 305 if (komeda_fb_check_src_coords(kfb, src_x, src_y, src_w, src_h)) 306 return -EINVAL; 307 308 if (!in_range(&layer->hsize_in, src_w)) { 309 DRM_DEBUG_ATOMIC("invalidate src_w %d.\n", src_w); 310 return -EINVAL; 311 } 312 313 if (!in_range(&layer->vsize_in, src_h)) { 314 DRM_DEBUG_ATOMIC("invalidate src_h %d.\n", src_h); 315 return -EINVAL; 316 } 317 318 if (drm_rotation_90_or_270(dflow->rot)) 319 line_sz = dflow->in_h; 320 else 321 line_sz = dflow->in_w; 322 323 if (kfb->base.format->hsub > 1) 324 max_line_sz = layer->yuv_line_sz; 325 else 326 max_line_sz = layer->line_sz; 327 328 if (line_sz > max_line_sz) { 329 DRM_DEBUG_ATOMIC("Required line_sz: %d exceeds the max size %d\n", 330 line_sz, max_line_sz); 331 return -EINVAL; 332 } 333 334 return 0; 335 } 336 337 static int 338 komeda_layer_validate(struct komeda_layer *layer, 339 struct komeda_plane_state *kplane_st, 340 struct komeda_data_flow_cfg *dflow) 341 { 342 struct drm_plane_state *plane_st = &kplane_st->base; 343 struct drm_framebuffer *fb = plane_st->fb; 344 struct komeda_fb *kfb = to_kfb(fb); 345 struct komeda_component_state *c_st; 346 struct komeda_layer_state *st; 347 int i, err; 348 349 err = komeda_layer_check_cfg(layer, kfb, dflow); 350 if (err) 351 return err; 352 353 c_st = komeda_component_get_state_and_set_user(&layer->base, 354 plane_st->state, plane_st->plane, plane_st->crtc); 355 if (IS_ERR(c_st)) 356 return PTR_ERR(c_st); 357 358 st = to_layer_st(c_st); 359 360 st->rot = dflow->rot; 361 362 if (fb->modifier) { 363 st->hsize = kfb->aligned_w; 364 st->vsize = kfb->aligned_h; 365 st->afbc_crop_l = dflow->in_x; 366 st->afbc_crop_r = kfb->aligned_w - dflow->in_x - dflow->in_w; 367 st->afbc_crop_t = dflow->in_y; 368 st->afbc_crop_b = kfb->aligned_h - dflow->in_y - dflow->in_h; 369 } else { 370 st->hsize = dflow->in_w; 371 st->vsize = dflow->in_h; 372 st->afbc_crop_l = 0; 373 st->afbc_crop_r = 0; 374 st->afbc_crop_t = 0; 375 st->afbc_crop_b = 0; 376 } 377 378 for (i = 0; i < fb->format->num_planes; i++) 379 st->addr[i] = komeda_fb_get_pixel_addr(kfb, dflow->in_x, 380 dflow->in_y, i); 381 382 err = komeda_component_validate_private(&layer->base, c_st); 383 if (err) 384 return err; 385 386 /* update the data flow for the next stage */ 387 komeda_component_set_output(&dflow->input, &layer->base, 0); 388 389 /* 390 * The rotation has been handled by layer, so adjusted the data flow for 391 * the next stage. 392 */ 393 komeda_rotate_data_flow(dflow, st->rot); 394 395 return 0; 396 } 397 398 static int 399 komeda_wb_layer_validate(struct komeda_layer *wb_layer, 400 struct drm_connector_state *conn_st, 401 struct komeda_data_flow_cfg *dflow) 402 { 403 struct komeda_fb *kfb = to_kfb(conn_st->writeback_job->fb); 404 struct komeda_component_state *c_st; 405 struct komeda_layer_state *st; 406 int i, err; 407 408 err = komeda_layer_check_cfg(wb_layer, kfb, dflow); 409 if (err) 410 return err; 411 412 c_st = komeda_component_get_state_and_set_user(&wb_layer->base, 413 conn_st->state, conn_st->connector, conn_st->crtc); 414 if (IS_ERR(c_st)) 415 return PTR_ERR(c_st); 416 417 st = to_layer_st(c_st); 418 419 st->hsize = dflow->out_w; 420 st->vsize = dflow->out_h; 421 422 for (i = 0; i < kfb->base.format->num_planes; i++) 423 st->addr[i] = komeda_fb_get_pixel_addr(kfb, dflow->out_x, 424 dflow->out_y, i); 425 426 komeda_component_add_input(&st->base, &dflow->input, 0); 427 komeda_component_set_output(&dflow->input, &wb_layer->base, 0); 428 429 return 0; 430 } 431 432 static bool scaling_ratio_valid(u32 size_in, u32 size_out, 433 u32 max_upscaling, u32 max_downscaling) 434 { 435 if (size_out > size_in * max_upscaling) 436 return false; 437 else if (size_in > size_out * max_downscaling) 438 return false; 439 return true; 440 } 441 442 static int 443 komeda_scaler_check_cfg(struct komeda_scaler *scaler, 444 struct komeda_crtc_state *kcrtc_st, 445 struct komeda_data_flow_cfg *dflow) 446 { 447 u32 hsize_in, vsize_in, hsize_out, vsize_out; 448 u32 max_upscaling; 449 450 hsize_in = dflow->in_w; 451 vsize_in = dflow->in_h; 452 hsize_out = dflow->out_w; 453 vsize_out = dflow->out_h; 454 455 if (!in_range(&scaler->hsize, hsize_in) || 456 !in_range(&scaler->hsize, hsize_out)) { 457 DRM_DEBUG_ATOMIC("Invalid horizontal sizes"); 458 return -EINVAL; 459 } 460 461 if (!in_range(&scaler->vsize, vsize_in) || 462 !in_range(&scaler->vsize, vsize_out)) { 463 DRM_DEBUG_ATOMIC("Invalid vertical sizes"); 464 return -EINVAL; 465 } 466 467 /* If input comes from compiz that means the scaling is for writeback 468 * and scaler can not do upscaling for writeback 469 */ 470 if (has_bit(dflow->input.component->id, KOMEDA_PIPELINE_COMPIZS)) 471 max_upscaling = 1; 472 else 473 max_upscaling = scaler->max_upscaling; 474 475 if (!scaling_ratio_valid(hsize_in, hsize_out, max_upscaling, 476 scaler->max_downscaling)) { 477 DRM_DEBUG_ATOMIC("Invalid horizontal scaling ratio"); 478 return -EINVAL; 479 } 480 481 if (!scaling_ratio_valid(vsize_in, vsize_out, max_upscaling, 482 scaler->max_downscaling)) { 483 DRM_DEBUG_ATOMIC("Invalid vertical scaling ratio"); 484 return -EINVAL; 485 } 486 487 if (hsize_in > hsize_out || vsize_in > vsize_out) { 488 struct komeda_pipeline *pipe = scaler->base.pipeline; 489 int err; 490 491 err = pipe->funcs->downscaling_clk_check(pipe, 492 &kcrtc_st->base.adjusted_mode, 493 komeda_crtc_get_aclk(kcrtc_st), dflow); 494 if (err) { 495 DRM_DEBUG_ATOMIC("aclk can't satisfy the clock requirement of the downscaling\n"); 496 return err; 497 } 498 } 499 500 return 0; 501 } 502 503 static int 504 komeda_scaler_validate(void *user, 505 struct komeda_crtc_state *kcrtc_st, 506 struct komeda_data_flow_cfg *dflow) 507 { 508 struct drm_atomic_state *drm_st = kcrtc_st->base.state; 509 struct komeda_component_state *c_st; 510 struct komeda_scaler_state *st; 511 struct komeda_scaler *scaler; 512 int err = 0; 513 514 if (!(dflow->en_scaling || dflow->en_img_enhancement)) 515 return 0; 516 517 scaler = komeda_component_get_avail_scaler(dflow->input.component, 518 drm_st); 519 if (!scaler) { 520 DRM_DEBUG_ATOMIC("No scaler available"); 521 return -EINVAL; 522 } 523 524 err = komeda_scaler_check_cfg(scaler, kcrtc_st, dflow); 525 if (err) 526 return err; 527 528 c_st = komeda_component_get_state_and_set_user(&scaler->base, 529 drm_st, user, kcrtc_st->base.crtc); 530 if (IS_ERR(c_st)) 531 return PTR_ERR(c_st); 532 533 st = to_scaler_st(c_st); 534 535 st->hsize_in = dflow->in_w; 536 st->vsize_in = dflow->in_h; 537 st->hsize_out = dflow->out_w; 538 st->vsize_out = dflow->out_h; 539 st->right_crop = dflow->right_crop; 540 st->left_crop = dflow->left_crop; 541 st->total_vsize_in = dflow->total_in_h; 542 st->total_hsize_in = dflow->total_in_w; 543 st->total_hsize_out = dflow->total_out_w; 544 545 /* Enable alpha processing if the next stage needs the pixel alpha */ 546 st->en_alpha = dflow->pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE; 547 st->en_scaling = dflow->en_scaling; 548 st->en_img_enhancement = dflow->en_img_enhancement; 549 st->en_split = dflow->en_split; 550 st->right_part = dflow->right_part; 551 552 komeda_component_add_input(&st->base, &dflow->input, 0); 553 komeda_component_set_output(&dflow->input, &scaler->base, 0); 554 return err; 555 } 556 557 static void komeda_split_data_flow(struct komeda_scaler *scaler, 558 struct komeda_data_flow_cfg *dflow, 559 struct komeda_data_flow_cfg *l_dflow, 560 struct komeda_data_flow_cfg *r_dflow); 561 562 static int 563 komeda_splitter_validate(struct komeda_splitter *splitter, 564 struct drm_connector_state *conn_st, 565 struct komeda_data_flow_cfg *dflow, 566 struct komeda_data_flow_cfg *l_output, 567 struct komeda_data_flow_cfg *r_output) 568 { 569 struct komeda_component_state *c_st; 570 struct komeda_splitter_state *st; 571 572 if (!splitter) { 573 DRM_DEBUG_ATOMIC("Current HW doesn't support splitter.\n"); 574 return -EINVAL; 575 } 576 577 if (!in_range(&splitter->hsize, dflow->in_w)) { 578 DRM_DEBUG_ATOMIC("split in_w:%d is out of the acceptable range.\n", 579 dflow->in_w); 580 return -EINVAL; 581 } 582 583 if (!in_range(&splitter->vsize, dflow->in_h)) { 584 DRM_DEBUG_ATOMIC("split in_h: %d exceeds the acceptable range.\n", 585 dflow->in_h); 586 return -EINVAL; 587 } 588 589 c_st = komeda_component_get_state_and_set_user(&splitter->base, 590 conn_st->state, conn_st->connector, conn_st->crtc); 591 592 if (IS_ERR(c_st)) 593 return PTR_ERR(c_st); 594 595 komeda_split_data_flow(splitter->base.pipeline->scalers[0], 596 dflow, l_output, r_output); 597 598 st = to_splitter_st(c_st); 599 st->hsize = dflow->in_w; 600 st->vsize = dflow->in_h; 601 st->overlap = dflow->overlap; 602 603 komeda_component_add_input(&st->base, &dflow->input, 0); 604 komeda_component_set_output(&l_output->input, &splitter->base, 0); 605 komeda_component_set_output(&r_output->input, &splitter->base, 1); 606 607 return 0; 608 } 609 610 static int 611 komeda_merger_validate(struct komeda_merger *merger, 612 void *user, 613 struct komeda_crtc_state *kcrtc_st, 614 struct komeda_data_flow_cfg *left_input, 615 struct komeda_data_flow_cfg *right_input, 616 struct komeda_data_flow_cfg *output) 617 { 618 struct komeda_component_state *c_st; 619 struct komeda_merger_state *st; 620 int err = 0; 621 622 if (!merger) { 623 DRM_DEBUG_ATOMIC("No merger is available"); 624 return -EINVAL; 625 } 626 627 if (!in_range(&merger->hsize_merged, output->out_w)) { 628 DRM_DEBUG_ATOMIC("merged_w: %d is out of the accepted range.\n", 629 output->out_w); 630 return -EINVAL; 631 } 632 633 if (!in_range(&merger->vsize_merged, output->out_h)) { 634 DRM_DEBUG_ATOMIC("merged_h: %d is out of the accepted range.\n", 635 output->out_h); 636 return -EINVAL; 637 } 638 639 c_st = komeda_component_get_state_and_set_user(&merger->base, 640 kcrtc_st->base.state, kcrtc_st->base.crtc, kcrtc_st->base.crtc); 641 642 if (IS_ERR(c_st)) 643 return PTR_ERR(c_st); 644 645 st = to_merger_st(c_st); 646 st->hsize_merged = output->out_w; 647 st->vsize_merged = output->out_h; 648 649 komeda_component_add_input(c_st, &left_input->input, 0); 650 komeda_component_add_input(c_st, &right_input->input, 1); 651 komeda_component_set_output(&output->input, &merger->base, 0); 652 653 return err; 654 } 655 656 void pipeline_composition_size(struct komeda_crtc_state *kcrtc_st, 657 u16 *hsize, u16 *vsize) 658 { 659 struct drm_display_mode *m = &kcrtc_st->base.adjusted_mode; 660 661 if (hsize) 662 *hsize = m->hdisplay; 663 if (vsize) 664 *vsize = m->vdisplay; 665 } 666 667 static int 668 komeda_compiz_set_input(struct komeda_compiz *compiz, 669 struct komeda_crtc_state *kcrtc_st, 670 struct komeda_data_flow_cfg *dflow) 671 { 672 struct drm_atomic_state *drm_st = kcrtc_st->base.state; 673 struct komeda_component_state *c_st, *old_st; 674 struct komeda_compiz_input_cfg *cin; 675 u16 compiz_w, compiz_h; 676 int idx = dflow->blending_zorder; 677 678 pipeline_composition_size(kcrtc_st, &compiz_w, &compiz_h); 679 /* check display rect */ 680 if ((dflow->out_x + dflow->out_w > compiz_w) || 681 (dflow->out_y + dflow->out_h > compiz_h) || 682 dflow->out_w == 0 || dflow->out_h == 0) { 683 DRM_DEBUG_ATOMIC("invalid disp rect [x=%d, y=%d, w=%d, h=%d]\n", 684 dflow->out_x, dflow->out_y, 685 dflow->out_w, dflow->out_h); 686 return -EINVAL; 687 } 688 689 c_st = komeda_component_get_state_and_set_user(&compiz->base, drm_st, 690 kcrtc_st->base.crtc, kcrtc_st->base.crtc); 691 if (IS_ERR(c_st)) 692 return PTR_ERR(c_st); 693 694 if (komeda_component_check_input(c_st, &dflow->input, idx)) 695 return -EINVAL; 696 697 cin = &(to_compiz_st(c_st)->cins[idx]); 698 699 cin->hsize = dflow->out_w; 700 cin->vsize = dflow->out_h; 701 cin->hoffset = dflow->out_x; 702 cin->voffset = dflow->out_y; 703 cin->pixel_blend_mode = dflow->pixel_blend_mode; 704 cin->layer_alpha = dflow->layer_alpha; 705 706 old_st = komeda_component_get_old_state(&compiz->base, drm_st); 707 WARN_ON(!old_st); 708 709 /* compare with old to check if this input has been changed */ 710 if (memcmp(&(to_compiz_st(old_st)->cins[idx]), cin, sizeof(*cin))) 711 c_st->changed_active_inputs |= BIT(idx); 712 713 komeda_component_add_input(c_st, &dflow->input, idx); 714 komeda_component_set_output(&dflow->input, &compiz->base, 0); 715 716 return 0; 717 } 718 719 static int 720 komeda_compiz_validate(struct komeda_compiz *compiz, 721 struct komeda_crtc_state *state, 722 struct komeda_data_flow_cfg *dflow) 723 { 724 struct komeda_component_state *c_st; 725 struct komeda_compiz_state *st; 726 727 c_st = komeda_component_get_state_and_set_user(&compiz->base, 728 state->base.state, state->base.crtc, state->base.crtc); 729 if (IS_ERR(c_st)) 730 return PTR_ERR(c_st); 731 732 st = to_compiz_st(c_st); 733 734 pipeline_composition_size(state, &st->hsize, &st->vsize); 735 736 komeda_component_set_output(&dflow->input, &compiz->base, 0); 737 738 /* compiz output dflow will be fed to the next pipeline stage, prepare 739 * the data flow configuration for the next stage 740 */ 741 if (dflow) { 742 dflow->in_w = st->hsize; 743 dflow->in_h = st->vsize; 744 dflow->out_w = dflow->in_w; 745 dflow->out_h = dflow->in_h; 746 /* the output data of compiz doesn't have alpha, it only can be 747 * used as bottom layer when blend it with master layers 748 */ 749 dflow->pixel_blend_mode = DRM_MODE_BLEND_PIXEL_NONE; 750 dflow->layer_alpha = 0xFF; 751 dflow->blending_zorder = 0; 752 } 753 754 return 0; 755 } 756 757 static int 758 komeda_improc_validate(struct komeda_improc *improc, 759 struct komeda_crtc_state *kcrtc_st, 760 struct komeda_data_flow_cfg *dflow) 761 { 762 struct drm_crtc *crtc = kcrtc_st->base.crtc; 763 struct drm_crtc_state *crtc_st = &kcrtc_st->base; 764 struct komeda_component_state *c_st; 765 struct komeda_improc_state *st; 766 767 c_st = komeda_component_get_state_and_set_user(&improc->base, 768 kcrtc_st->base.state, crtc, crtc); 769 if (IS_ERR(c_st)) 770 return PTR_ERR(c_st); 771 772 st = to_improc_st(c_st); 773 774 st->hsize = dflow->in_w; 775 st->vsize = dflow->in_h; 776 777 if (drm_atomic_crtc_needs_modeset(crtc_st)) { 778 u32 output_depths, output_formats; 779 u32 avail_depths, avail_formats; 780 781 komeda_crtc_get_color_config(crtc_st, &output_depths, 782 &output_formats); 783 784 avail_depths = output_depths & improc->supported_color_depths; 785 if (avail_depths == 0) { 786 DRM_DEBUG_ATOMIC("No available color depths, conn depths: 0x%x & display: 0x%x\n", 787 output_depths, 788 improc->supported_color_depths); 789 return -EINVAL; 790 } 791 792 avail_formats = output_formats & 793 improc->supported_color_formats; 794 if (!avail_formats) { 795 DRM_DEBUG_ATOMIC("No available color_formats, conn formats 0x%x & display: 0x%x\n", 796 output_formats, 797 improc->supported_color_formats); 798 return -EINVAL; 799 } 800 801 st->color_depth = __fls(avail_depths); 802 st->color_format = BIT(__ffs(avail_formats)); 803 } 804 805 komeda_component_add_input(&st->base, &dflow->input, 0); 806 komeda_component_set_output(&dflow->input, &improc->base, 0); 807 808 return 0; 809 } 810 811 static int 812 komeda_timing_ctrlr_validate(struct komeda_timing_ctrlr *ctrlr, 813 struct komeda_crtc_state *kcrtc_st, 814 struct komeda_data_flow_cfg *dflow) 815 { 816 struct drm_crtc *crtc = kcrtc_st->base.crtc; 817 struct komeda_timing_ctrlr_state *st; 818 struct komeda_component_state *c_st; 819 820 c_st = komeda_component_get_state_and_set_user(&ctrlr->base, 821 kcrtc_st->base.state, crtc, crtc); 822 if (IS_ERR(c_st)) 823 return PTR_ERR(c_st); 824 825 st = to_ctrlr_st(c_st); 826 827 komeda_component_add_input(&st->base, &dflow->input, 0); 828 komeda_component_set_output(&dflow->input, &ctrlr->base, 0); 829 830 return 0; 831 } 832 833 void komeda_complete_data_flow_cfg(struct komeda_layer *layer, 834 struct komeda_data_flow_cfg *dflow, 835 struct drm_framebuffer *fb) 836 { 837 struct komeda_scaler *scaler = layer->base.pipeline->scalers[0]; 838 u32 w = dflow->in_w; 839 u32 h = dflow->in_h; 840 841 dflow->total_in_w = dflow->in_w; 842 dflow->total_in_h = dflow->in_h; 843 dflow->total_out_w = dflow->out_w; 844 845 /* if format doesn't have alpha, fix blend mode to PIXEL_NONE */ 846 if (!fb->format->has_alpha) 847 dflow->pixel_blend_mode = DRM_MODE_BLEND_PIXEL_NONE; 848 849 if (drm_rotation_90_or_270(dflow->rot)) 850 swap(w, h); 851 852 dflow->en_scaling = (w != dflow->out_w) || (h != dflow->out_h); 853 dflow->is_yuv = fb->format->is_yuv; 854 855 /* try to enable image enhancer if data flow is a 2x+ upscaling */ 856 dflow->en_img_enhancement = dflow->out_w >= 2 * w || 857 dflow->out_h >= 2 * h; 858 859 /* try to enable split if scaling exceed the scaler's acceptable 860 * input/output range. 861 */ 862 if (dflow->en_scaling && scaler) 863 dflow->en_split = !in_range(&scaler->hsize, dflow->in_w) || 864 !in_range(&scaler->hsize, dflow->out_w); 865 } 866 867 static bool merger_is_available(struct komeda_pipeline *pipe, 868 struct komeda_data_flow_cfg *dflow) 869 { 870 u32 avail_inputs = pipe->merger ? 871 pipe->merger->base.supported_inputs : 0; 872 873 return has_bit(dflow->input.component->id, avail_inputs); 874 } 875 876 int komeda_build_layer_data_flow(struct komeda_layer *layer, 877 struct komeda_plane_state *kplane_st, 878 struct komeda_crtc_state *kcrtc_st, 879 struct komeda_data_flow_cfg *dflow) 880 { 881 struct drm_plane *plane = kplane_st->base.plane; 882 struct komeda_pipeline *pipe = layer->base.pipeline; 883 int err; 884 885 DRM_DEBUG_ATOMIC("%s handling [PLANE:%d:%s]: src[x/y:%d/%d, w/h:%d/%d] disp[x/y:%d/%d, w/h:%d/%d]", 886 layer->base.name, plane->base.id, plane->name, 887 dflow->in_x, dflow->in_y, dflow->in_w, dflow->in_h, 888 dflow->out_x, dflow->out_y, dflow->out_w, dflow->out_h); 889 890 err = komeda_layer_validate(layer, kplane_st, dflow); 891 if (err) 892 return err; 893 894 err = komeda_scaler_validate(plane, kcrtc_st, dflow); 895 if (err) 896 return err; 897 898 /* if split, check if can put the data flow into merger */ 899 if (dflow->en_split && merger_is_available(pipe, dflow)) 900 return 0; 901 902 err = komeda_compiz_set_input(pipe->compiz, kcrtc_st, dflow); 903 904 return err; 905 } 906 907 /* 908 * Split is introduced for workaround scaler's input/output size limitation. 909 * The idea is simple, if one scaler can not fit the requirement, use two. 910 * So split splits the big source image to two half parts (left/right) and do 911 * the scaling by two scaler separately and independently. 912 * But split also imports an edge problem in the middle of the image when 913 * scaling, to avoid it, split isn't a simple half-and-half, but add an extra 914 * pixels (overlap) to both side, after split the left/right will be: 915 * - left: [0, src_length/2 + overlap] 916 * - right: [src_length/2 - overlap, src_length] 917 * The extra overlap do eliminate the edge problem, but which may also generates 918 * unnecessary pixels when scaling, we need to crop them before scaler output 919 * the result to the next stage. and for the how to crop, it depends on the 920 * unneeded pixels, another words the position where overlay has been added. 921 * - left: crop the right 922 * - right: crop the left 923 * 924 * The diagram for how to do the split 925 * 926 * <---------------------left->out_w ----------------> 927 * |--------------------------------|---right_crop-----| <- left after split 928 * \ \ / 929 * \ \<--overlap--->/ 930 * |-----------------|-------------|(Middle)------|-----------------| <- src 931 * /<---overlap--->\ \ 932 * / \ \ 933 * right after split->|-----left_crop---|--------------------------------| 934 * ^<------------------- right->out_w --------------->^ 935 * 936 * NOTE: To consistent with HW the output_w always contains the crop size. 937 */ 938 939 static void komeda_split_data_flow(struct komeda_scaler *scaler, 940 struct komeda_data_flow_cfg *dflow, 941 struct komeda_data_flow_cfg *l_dflow, 942 struct komeda_data_flow_cfg *r_dflow) 943 { 944 bool r90 = drm_rotation_90_or_270(dflow->rot); 945 bool flip_h = has_flip_h(dflow->rot); 946 u32 l_out, r_out, overlap; 947 948 memcpy(l_dflow, dflow, sizeof(*dflow)); 949 memcpy(r_dflow, dflow, sizeof(*dflow)); 950 951 l_dflow->right_part = false; 952 r_dflow->right_part = true; 953 r_dflow->blending_zorder = dflow->blending_zorder + 1; 954 955 overlap = 0; 956 if (dflow->en_scaling && scaler) 957 overlap += scaler->scaling_split_overlap; 958 959 /* original dflow may fed into splitter, and which doesn't need 960 * enhancement overlap 961 */ 962 dflow->overlap = overlap; 963 964 if (dflow->en_img_enhancement && scaler) 965 overlap += scaler->enh_split_overlap; 966 967 l_dflow->overlap = overlap; 968 r_dflow->overlap = overlap; 969 970 /* split the origin content */ 971 /* left/right here always means the left/right part of display image, 972 * not the source Image 973 */ 974 /* DRM rotation is anti-clockwise */ 975 if (r90) { 976 if (dflow->en_scaling) { 977 l_dflow->in_h = ALIGN(dflow->in_h, 2) / 2 + l_dflow->overlap; 978 r_dflow->in_h = l_dflow->in_h; 979 } else if (dflow->en_img_enhancement) { 980 /* enhancer only */ 981 l_dflow->in_h = ALIGN(dflow->in_h, 2) / 2 + l_dflow->overlap; 982 r_dflow->in_h = dflow->in_h / 2 + r_dflow->overlap; 983 } else { 984 /* split without scaler, no overlap */ 985 l_dflow->in_h = ALIGN(((dflow->in_h + 1) >> 1), 2); 986 r_dflow->in_h = dflow->in_h - l_dflow->in_h; 987 } 988 989 /* Consider YUV format, after split, the split source w/h 990 * may not aligned to 2. we have two choices for such case. 991 * 1. scaler is enabled (overlap != 0), we can do a alignment 992 * both left/right and crop the extra data by scaler. 993 * 2. scaler is not enabled, only align the split left 994 * src/disp, and the rest part assign to right 995 */ 996 if ((overlap != 0) && dflow->is_yuv) { 997 l_dflow->in_h = ALIGN(l_dflow->in_h, 2); 998 r_dflow->in_h = ALIGN(r_dflow->in_h, 2); 999 } 1000 1001 if (flip_h) 1002 l_dflow->in_y = dflow->in_y + dflow->in_h - l_dflow->in_h; 1003 else 1004 r_dflow->in_y = dflow->in_y + dflow->in_h - r_dflow->in_h; 1005 } else { 1006 if (dflow->en_scaling) { 1007 l_dflow->in_w = ALIGN(dflow->in_w, 2) / 2 + l_dflow->overlap; 1008 r_dflow->in_w = l_dflow->in_w; 1009 } else if (dflow->en_img_enhancement) { 1010 l_dflow->in_w = ALIGN(dflow->in_w, 2) / 2 + l_dflow->overlap; 1011 r_dflow->in_w = dflow->in_w / 2 + r_dflow->overlap; 1012 } else { 1013 l_dflow->in_w = ALIGN(((dflow->in_w + 1) >> 1), 2); 1014 r_dflow->in_w = dflow->in_w - l_dflow->in_w; 1015 } 1016 1017 /* do YUV alignment when scaler enabled */ 1018 if ((overlap != 0) && dflow->is_yuv) { 1019 l_dflow->in_w = ALIGN(l_dflow->in_w, 2); 1020 r_dflow->in_w = ALIGN(r_dflow->in_w, 2); 1021 } 1022 1023 /* on flip_h, the left display content from the right-source */ 1024 if (flip_h) 1025 l_dflow->in_x = dflow->in_w + dflow->in_x - l_dflow->in_w; 1026 else 1027 r_dflow->in_x = dflow->in_w + dflow->in_x - r_dflow->in_w; 1028 } 1029 1030 /* split the disp_rect */ 1031 if (dflow->en_scaling || dflow->en_img_enhancement) 1032 l_dflow->out_w = ((dflow->out_w + 1) >> 1); 1033 else 1034 l_dflow->out_w = ALIGN(((dflow->out_w + 1) >> 1), 2); 1035 1036 r_dflow->out_w = dflow->out_w - l_dflow->out_w; 1037 1038 l_dflow->out_x = dflow->out_x; 1039 r_dflow->out_x = l_dflow->out_w + l_dflow->out_x; 1040 1041 /* calculate the scaling crop */ 1042 /* left scaler output more data and do crop */ 1043 if (r90) { 1044 l_out = (dflow->out_w * l_dflow->in_h) / dflow->in_h; 1045 r_out = (dflow->out_w * r_dflow->in_h) / dflow->in_h; 1046 } else { 1047 l_out = (dflow->out_w * l_dflow->in_w) / dflow->in_w; 1048 r_out = (dflow->out_w * r_dflow->in_w) / dflow->in_w; 1049 } 1050 1051 l_dflow->left_crop = 0; 1052 l_dflow->right_crop = l_out - l_dflow->out_w; 1053 r_dflow->left_crop = r_out - r_dflow->out_w; 1054 r_dflow->right_crop = 0; 1055 1056 /* out_w includes the crop length */ 1057 l_dflow->out_w += l_dflow->right_crop + l_dflow->left_crop; 1058 r_dflow->out_w += r_dflow->right_crop + r_dflow->left_crop; 1059 } 1060 1061 /* For layer split, a plane state will be split to two data flows and handled 1062 * by two separated komeda layer input pipelines. komeda supports two types of 1063 * layer split: 1064 * - none-scaling split: 1065 * / layer-left -> \ 1066 * plane_state compiz-> ... 1067 * \ layer-right-> / 1068 * 1069 * - scaling split: 1070 * / layer-left -> scaler->\ 1071 * plane_state merger -> compiz-> ... 1072 * \ layer-right-> scaler->/ 1073 * 1074 * Since merger only supports scaler as input, so for none-scaling split, two 1075 * layer data flows will be output to compiz directly. for scaling_split, two 1076 * data flow will be merged by merger firstly, then merger outputs one merged 1077 * data flow to compiz. 1078 */ 1079 int komeda_build_layer_split_data_flow(struct komeda_layer *left, 1080 struct komeda_plane_state *kplane_st, 1081 struct komeda_crtc_state *kcrtc_st, 1082 struct komeda_data_flow_cfg *dflow) 1083 { 1084 struct drm_plane *plane = kplane_st->base.plane; 1085 struct komeda_pipeline *pipe = left->base.pipeline; 1086 struct komeda_layer *right = left->right; 1087 struct komeda_data_flow_cfg l_dflow, r_dflow; 1088 int err; 1089 1090 komeda_split_data_flow(pipe->scalers[0], dflow, &l_dflow, &r_dflow); 1091 1092 DRM_DEBUG_ATOMIC("Assign %s + %s to [PLANE:%d:%s]: " 1093 "src[x/y:%d/%d, w/h:%d/%d] disp[x/y:%d/%d, w/h:%d/%d]", 1094 left->base.name, right->base.name, 1095 plane->base.id, plane->name, 1096 dflow->in_x, dflow->in_y, dflow->in_w, dflow->in_h, 1097 dflow->out_x, dflow->out_y, dflow->out_w, dflow->out_h); 1098 1099 err = komeda_build_layer_data_flow(left, kplane_st, kcrtc_st, &l_dflow); 1100 if (err) 1101 return err; 1102 1103 err = komeda_build_layer_data_flow(right, kplane_st, kcrtc_st, &r_dflow); 1104 if (err) 1105 return err; 1106 1107 /* The rotation has been handled by layer, so adjusted the data flow */ 1108 komeda_rotate_data_flow(dflow, dflow->rot); 1109 1110 /* left and right dflow has been merged to compiz already, 1111 * no need merger to merge them anymore. 1112 */ 1113 if (r_dflow.input.component == l_dflow.input.component) 1114 return 0; 1115 1116 /* line merger path */ 1117 err = komeda_merger_validate(pipe->merger, plane, kcrtc_st, 1118 &l_dflow, &r_dflow, dflow); 1119 if (err) 1120 return err; 1121 1122 err = komeda_compiz_set_input(pipe->compiz, kcrtc_st, dflow); 1123 1124 return err; 1125 } 1126 1127 /* writeback data path: compiz -> scaler -> wb_layer -> memory */ 1128 int komeda_build_wb_data_flow(struct komeda_layer *wb_layer, 1129 struct drm_connector_state *conn_st, 1130 struct komeda_crtc_state *kcrtc_st, 1131 struct komeda_data_flow_cfg *dflow) 1132 { 1133 struct drm_connector *conn = conn_st->connector; 1134 int err; 1135 1136 err = komeda_scaler_validate(conn, kcrtc_st, dflow); 1137 if (err) 1138 return err; 1139 1140 return komeda_wb_layer_validate(wb_layer, conn_st, dflow); 1141 } 1142 1143 /* writeback scaling split data path: 1144 * /-> scaler ->\ 1145 * compiz -> splitter merger -> wb_layer -> memory 1146 * \-> scaler ->/ 1147 */ 1148 int komeda_build_wb_split_data_flow(struct komeda_layer *wb_layer, 1149 struct drm_connector_state *conn_st, 1150 struct komeda_crtc_state *kcrtc_st, 1151 struct komeda_data_flow_cfg *dflow) 1152 { 1153 struct komeda_pipeline *pipe = wb_layer->base.pipeline; 1154 struct drm_connector *conn = conn_st->connector; 1155 struct komeda_data_flow_cfg l_dflow, r_dflow; 1156 int err; 1157 1158 err = komeda_splitter_validate(pipe->splitter, conn_st, 1159 dflow, &l_dflow, &r_dflow); 1160 if (err) 1161 return err; 1162 err = komeda_scaler_validate(conn, kcrtc_st, &l_dflow); 1163 if (err) 1164 return err; 1165 1166 err = komeda_scaler_validate(conn, kcrtc_st, &r_dflow); 1167 if (err) 1168 return err; 1169 1170 err = komeda_merger_validate(pipe->merger, conn_st, kcrtc_st, 1171 &l_dflow, &r_dflow, dflow); 1172 if (err) 1173 return err; 1174 1175 return komeda_wb_layer_validate(wb_layer, conn_st, dflow); 1176 } 1177 1178 /* build display output data flow, the data path is: 1179 * compiz -> improc -> timing_ctrlr 1180 */ 1181 int komeda_build_display_data_flow(struct komeda_crtc *kcrtc, 1182 struct komeda_crtc_state *kcrtc_st) 1183 { 1184 struct komeda_pipeline *master = kcrtc->master; 1185 struct komeda_pipeline *slave = kcrtc->slave; 1186 struct komeda_data_flow_cfg m_dflow; /* master data flow */ 1187 struct komeda_data_flow_cfg s_dflow; /* slave data flow */ 1188 int err; 1189 1190 memset(&m_dflow, 0, sizeof(m_dflow)); 1191 memset(&s_dflow, 0, sizeof(s_dflow)); 1192 1193 if (slave && has_bit(slave->id, kcrtc_st->active_pipes)) { 1194 err = komeda_compiz_validate(slave->compiz, kcrtc_st, &s_dflow); 1195 if (err) 1196 return err; 1197 1198 /* merge the slave dflow into master pipeline */ 1199 err = komeda_compiz_set_input(master->compiz, kcrtc_st, 1200 &s_dflow); 1201 if (err) 1202 return err; 1203 } 1204 1205 err = komeda_compiz_validate(master->compiz, kcrtc_st, &m_dflow); 1206 if (err) 1207 return err; 1208 1209 err = komeda_improc_validate(master->improc, kcrtc_st, &m_dflow); 1210 if (err) 1211 return err; 1212 1213 err = komeda_timing_ctrlr_validate(master->ctrlr, kcrtc_st, &m_dflow); 1214 if (err) 1215 return err; 1216 1217 return 0; 1218 } 1219 1220 static void 1221 komeda_pipeline_unbound_components(struct komeda_pipeline *pipe, 1222 struct komeda_pipeline_state *new) 1223 { 1224 struct drm_atomic_state *drm_st = new->obj.state; 1225 struct komeda_pipeline_state *old = priv_to_pipe_st(pipe->obj.state); 1226 struct komeda_component_state *c_st; 1227 struct komeda_component *c; 1228 u32 disabling_comps, id; 1229 1230 WARN_ON(!old); 1231 1232 disabling_comps = (~new->active_comps) & old->active_comps; 1233 1234 /* unbound all disabling component */ 1235 dp_for_each_set_bit(id, disabling_comps) { 1236 c = komeda_pipeline_get_component(pipe, id); 1237 c_st = komeda_component_get_state_and_set_user(c, 1238 drm_st, NULL, new->crtc); 1239 WARN_ON(IS_ERR(c_st)); 1240 } 1241 } 1242 1243 /* release unclaimed pipeline resource */ 1244 int komeda_release_unclaimed_resources(struct komeda_pipeline *pipe, 1245 struct komeda_crtc_state *kcrtc_st) 1246 { 1247 struct drm_atomic_state *drm_st = kcrtc_st->base.state; 1248 struct komeda_pipeline_state *st; 1249 1250 /* ignore the pipeline which is not affected */ 1251 if (!pipe || !has_bit(pipe->id, kcrtc_st->affected_pipes)) 1252 return 0; 1253 1254 if (has_bit(pipe->id, kcrtc_st->active_pipes)) 1255 st = komeda_pipeline_get_new_state(pipe, drm_st); 1256 else 1257 st = komeda_pipeline_get_state_and_set_crtc(pipe, drm_st, NULL); 1258 1259 if (WARN_ON(IS_ERR_OR_NULL(st))) 1260 return -EINVAL; 1261 1262 komeda_pipeline_unbound_components(pipe, st); 1263 1264 return 0; 1265 } 1266 1267 /* Since standalong disabled components must be disabled separately and in the 1268 * last, So a complete disable operation may needs to call pipeline_disable 1269 * twice (two phase disabling). 1270 * Phase 1: disable the common components, flush it. 1271 * Phase 2: disable the standalone disabled components, flush it. 1272 * 1273 * RETURNS: 1274 * true: disable is not complete, needs a phase 2 disable. 1275 * false: disable is complete. 1276 */ 1277 bool komeda_pipeline_disable(struct komeda_pipeline *pipe, 1278 struct drm_atomic_state *old_state) 1279 { 1280 struct komeda_pipeline_state *old; 1281 struct komeda_component *c; 1282 struct komeda_component_state *c_st; 1283 u32 id, disabling_comps = 0; 1284 1285 old = komeda_pipeline_get_old_state(pipe, old_state); 1286 1287 disabling_comps = old->active_comps & 1288 (~pipe->standalone_disabled_comps); 1289 if (!disabling_comps) 1290 disabling_comps = old->active_comps & 1291 pipe->standalone_disabled_comps; 1292 1293 DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, disabling_comps: 0x%x.\n", 1294 pipe->id, old->active_comps, disabling_comps); 1295 1296 dp_for_each_set_bit(id, disabling_comps) { 1297 c = komeda_pipeline_get_component(pipe, id); 1298 c_st = priv_to_comp_st(c->obj.state); 1299 1300 /* 1301 * If we disabled a component then all active_inputs should be 1302 * put in the list of changed_active_inputs, so they get 1303 * re-enabled. 1304 * This usually happens during a modeset when the pipeline is 1305 * first disabled and then the actual state gets committed 1306 * again. 1307 */ 1308 c_st->changed_active_inputs |= c_st->active_inputs; 1309 1310 c->funcs->disable(c); 1311 } 1312 1313 /* Update the pipeline state, if there are components that are still 1314 * active, return true for calling the phase 2 disable. 1315 */ 1316 old->active_comps &= ~disabling_comps; 1317 1318 return old->active_comps ? true : false; 1319 } 1320 1321 void komeda_pipeline_update(struct komeda_pipeline *pipe, 1322 struct drm_atomic_state *old_state) 1323 { 1324 struct komeda_pipeline_state *new = priv_to_pipe_st(pipe->obj.state); 1325 struct komeda_pipeline_state *old; 1326 struct komeda_component *c; 1327 u32 id, changed_comps = 0; 1328 1329 old = komeda_pipeline_get_old_state(pipe, old_state); 1330 1331 changed_comps = new->active_comps | old->active_comps; 1332 1333 DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, changed: 0x%x.\n", 1334 pipe->id, new->active_comps, changed_comps); 1335 1336 dp_for_each_set_bit(id, changed_comps) { 1337 c = komeda_pipeline_get_component(pipe, id); 1338 1339 if (new->active_comps & BIT(c->id)) 1340 c->funcs->update(c, priv_to_comp_st(c->obj.state)); 1341 else 1342 c->funcs->disable(c); 1343 } 1344 } 1345