1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
4  * Author: James.Qian.Wang <james.qian.wang@arm.com>
5  *
6  */
7 
8 #include <drm/drm_print.h>
9 #include <linux/clk.h>
10 #include "komeda_dev.h"
11 #include "komeda_kms.h"
12 #include "komeda_pipeline.h"
13 #include "komeda_framebuffer.h"
14 
15 static inline bool is_switching_user(void *old, void *new)
16 {
17 	if (!old || !new)
18 		return false;
19 
20 	return old != new;
21 }
22 
23 static struct komeda_pipeline_state *
24 komeda_pipeline_get_state(struct komeda_pipeline *pipe,
25 			  struct drm_atomic_state *state)
26 {
27 	struct drm_private_state *priv_st;
28 
29 	priv_st = drm_atomic_get_private_obj_state(state, &pipe->obj);
30 	if (IS_ERR(priv_st))
31 		return ERR_CAST(priv_st);
32 
33 	return priv_to_pipe_st(priv_st);
34 }
35 
36 struct komeda_pipeline_state *
37 komeda_pipeline_get_old_state(struct komeda_pipeline *pipe,
38 			      struct drm_atomic_state *state)
39 {
40 	struct drm_private_state *priv_st;
41 
42 	priv_st = drm_atomic_get_old_private_obj_state(state, &pipe->obj);
43 	if (priv_st)
44 		return priv_to_pipe_st(priv_st);
45 	return NULL;
46 }
47 
48 static struct komeda_pipeline_state *
49 komeda_pipeline_get_new_state(struct komeda_pipeline *pipe,
50 			      struct drm_atomic_state *state)
51 {
52 	struct drm_private_state *priv_st;
53 
54 	priv_st = drm_atomic_get_new_private_obj_state(state, &pipe->obj);
55 	if (priv_st)
56 		return priv_to_pipe_st(priv_st);
57 	return NULL;
58 }
59 
60 /* Assign pipeline for crtc */
61 static struct komeda_pipeline_state *
62 komeda_pipeline_get_state_and_set_crtc(struct komeda_pipeline *pipe,
63 				       struct drm_atomic_state *state,
64 				       struct drm_crtc *crtc)
65 {
66 	struct komeda_pipeline_state *st;
67 
68 	st = komeda_pipeline_get_state(pipe, state);
69 	if (IS_ERR(st))
70 		return st;
71 
72 	if (is_switching_user(crtc, st->crtc)) {
73 		DRM_DEBUG_ATOMIC("CRTC%d required pipeline%d is busy.\n",
74 				 drm_crtc_index(crtc), pipe->id);
75 		return ERR_PTR(-EBUSY);
76 	}
77 
78 	/* pipeline only can be disabled when the it is free or unused */
79 	if (!crtc && st->active_comps) {
80 		DRM_DEBUG_ATOMIC("Disabling a busy pipeline:%d.\n", pipe->id);
81 		return ERR_PTR(-EBUSY);
82 	}
83 
84 	st->crtc = crtc;
85 
86 	if (crtc) {
87 		struct komeda_crtc_state *kcrtc_st;
88 
89 		kcrtc_st = to_kcrtc_st(drm_atomic_get_new_crtc_state(state,
90 								     crtc));
91 
92 		kcrtc_st->active_pipes |= BIT(pipe->id);
93 		kcrtc_st->affected_pipes |= BIT(pipe->id);
94 	}
95 	return st;
96 }
97 
98 static struct komeda_component_state *
99 komeda_component_get_state(struct komeda_component *c,
100 			   struct drm_atomic_state *state)
101 {
102 	struct drm_private_state *priv_st;
103 
104 	WARN_ON(!drm_modeset_is_locked(&c->pipeline->obj.lock));
105 
106 	priv_st = drm_atomic_get_private_obj_state(state, &c->obj);
107 	if (IS_ERR(priv_st))
108 		return ERR_CAST(priv_st);
109 
110 	return priv_to_comp_st(priv_st);
111 }
112 
113 static struct komeda_component_state *
114 komeda_component_get_old_state(struct komeda_component *c,
115 			       struct drm_atomic_state *state)
116 {
117 	struct drm_private_state *priv_st;
118 
119 	priv_st = drm_atomic_get_old_private_obj_state(state, &c->obj);
120 	if (priv_st)
121 		return priv_to_comp_st(priv_st);
122 	return NULL;
123 }
124 
125 /**
126  * komeda_component_get_state_and_set_user()
127  *
128  * @c: component to get state and set user
129  * @state: global atomic state
130  * @user: direct user, the binding user
131  * @crtc: the CRTC user, the big boss :)
132  *
133  * This function accepts two users:
134  * -   The direct user: can be plane/crtc/wb_connector depends on component
135  * -   The big boss (CRTC)
136  * CRTC is the big boss (the final user), because all component resources
137  * eventually will be assigned to CRTC, like the layer will be binding to
138  * kms_plane, but kms plane will be binding to a CRTC eventually.
139  *
140  * The big boss (CRTC) is for pipeline assignment, since &komeda_component isn't
141  * independent and can be assigned to CRTC freely, but belongs to a specific
142  * pipeline, only pipeline can be shared between crtc, and pipeline as a whole
143  * (include all the internal components) assigned to a specific CRTC.
144  *
145  * So when set a user to komeda_component, need first to check the status of
146  * component->pipeline to see if the pipeline is available on this specific
147  * CRTC. if the pipeline is busy (assigned to another CRTC), even the required
148  * component is free, the component still cannot be assigned to the direct user.
149  */
150 static struct komeda_component_state *
151 komeda_component_get_state_and_set_user(struct komeda_component *c,
152 					struct drm_atomic_state *state,
153 					void *user,
154 					struct drm_crtc *crtc)
155 {
156 	struct komeda_pipeline_state *pipe_st;
157 	struct komeda_component_state *st;
158 
159 	/* First check if the pipeline is available */
160 	pipe_st = komeda_pipeline_get_state_and_set_crtc(c->pipeline,
161 							 state, crtc);
162 	if (IS_ERR(pipe_st))
163 		return ERR_CAST(pipe_st);
164 
165 	st = komeda_component_get_state(c, state);
166 	if (IS_ERR(st))
167 		return st;
168 
169 	/* check if the component has been occupied */
170 	if (is_switching_user(user, st->binding_user)) {
171 		DRM_DEBUG_ATOMIC("required %s is busy.\n", c->name);
172 		return ERR_PTR(-EBUSY);
173 	}
174 
175 	st->binding_user = user;
176 	/* mark the component as active if user is valid */
177 	if (st->binding_user)
178 		pipe_st->active_comps |= BIT(c->id);
179 
180 	return st;
181 }
182 
183 static void
184 komeda_component_add_input(struct komeda_component_state *state,
185 			   struct komeda_component_output *input,
186 			   int idx)
187 {
188 	struct komeda_component *c = state->component;
189 
190 	WARN_ON((idx < 0 || idx >= c->max_active_inputs));
191 
192 	/* since the inputs[i] is only valid when it is active. So if a input[i]
193 	 * is a newly enabled input which switches from disable to enable, then
194 	 * the old inputs[i] is undefined (NOT zeroed), we can not rely on
195 	 * memcmp, but directly mark it changed
196 	 */
197 	if (!has_bit(idx, state->affected_inputs) ||
198 	    memcmp(&state->inputs[idx], input, sizeof(*input))) {
199 		memcpy(&state->inputs[idx], input, sizeof(*input));
200 		state->changed_active_inputs |= BIT(idx);
201 	}
202 	state->active_inputs |= BIT(idx);
203 	state->affected_inputs |= BIT(idx);
204 }
205 
206 static int
207 komeda_component_check_input(struct komeda_component_state *state,
208 			     struct komeda_component_output *input,
209 			     int idx)
210 {
211 	struct komeda_component *c = state->component;
212 
213 	if ((idx < 0) || (idx >= c->max_active_inputs)) {
214 		DRM_DEBUG_ATOMIC("%s required an invalid %s-input[%d].\n",
215 				 input->component->name, c->name, idx);
216 		return -EINVAL;
217 	}
218 
219 	if (has_bit(idx, state->active_inputs)) {
220 		DRM_DEBUG_ATOMIC("%s required %s-input[%d] has been occupied already.\n",
221 				 input->component->name, c->name, idx);
222 		return -EINVAL;
223 	}
224 
225 	return 0;
226 }
227 
228 static void
229 komeda_component_set_output(struct komeda_component_output *output,
230 			    struct komeda_component *comp,
231 			    u8 output_port)
232 {
233 	output->component = comp;
234 	output->output_port = output_port;
235 }
236 
237 static int
238 komeda_component_validate_private(struct komeda_component *c,
239 				  struct komeda_component_state *st)
240 {
241 	int err;
242 
243 	if (!c->funcs->validate)
244 		return 0;
245 
246 	err = c->funcs->validate(c, st);
247 	if (err)
248 		DRM_DEBUG_ATOMIC("%s validate private failed.\n", c->name);
249 
250 	return err;
251 }
252 
253 /* Get current available scaler from the component->supported_outputs */
254 static struct komeda_scaler *
255 komeda_component_get_avail_scaler(struct komeda_component *c,
256 				  struct drm_atomic_state *state)
257 {
258 	struct komeda_pipeline_state *pipe_st;
259 	u32 avail_scalers;
260 
261 	pipe_st = komeda_pipeline_get_state(c->pipeline, state);
262 	if (!pipe_st)
263 		return NULL;
264 
265 	avail_scalers = (pipe_st->active_comps & KOMEDA_PIPELINE_SCALERS) ^
266 			KOMEDA_PIPELINE_SCALERS;
267 
268 	c = komeda_component_pickup_output(c, avail_scalers);
269 
270 	return to_scaler(c);
271 }
272 
273 static void
274 komeda_rotate_data_flow(struct komeda_data_flow_cfg *dflow, u32 rot)
275 {
276 	if (drm_rotation_90_or_270(rot)) {
277 		swap(dflow->in_h, dflow->in_w);
278 		swap(dflow->total_in_h, dflow->total_in_w);
279 	}
280 }
281 
282 static int
283 komeda_layer_check_cfg(struct komeda_layer *layer,
284 		       struct komeda_fb *kfb,
285 		       struct komeda_data_flow_cfg *dflow)
286 {
287 	u32 src_x, src_y, src_w, src_h;
288 
289 	if (!komeda_fb_is_layer_supported(kfb, layer->layer_type, dflow->rot))
290 		return -EINVAL;
291 
292 	if (layer->base.id == KOMEDA_COMPONENT_WB_LAYER) {
293 		src_x = dflow->out_x;
294 		src_y = dflow->out_y;
295 		src_w = dflow->out_w;
296 		src_h = dflow->out_h;
297 	} else {
298 		src_x = dflow->in_x;
299 		src_y = dflow->in_y;
300 		src_w = dflow->in_w;
301 		src_h = dflow->in_h;
302 	}
303 
304 	if (komeda_fb_check_src_coords(kfb, src_x, src_y, src_w, src_h))
305 		return -EINVAL;
306 
307 	if (!in_range(&layer->hsize_in, src_w)) {
308 		DRM_DEBUG_ATOMIC("invalidate src_w %d.\n", src_w);
309 		return -EINVAL;
310 	}
311 
312 	if (!in_range(&layer->vsize_in, src_h)) {
313 		DRM_DEBUG_ATOMIC("invalidate src_h %d.\n", src_h);
314 		return -EINVAL;
315 	}
316 
317 	return 0;
318 }
319 
320 static int
321 komeda_layer_validate(struct komeda_layer *layer,
322 		      struct komeda_plane_state *kplane_st,
323 		      struct komeda_data_flow_cfg *dflow)
324 {
325 	struct drm_plane_state *plane_st = &kplane_st->base;
326 	struct drm_framebuffer *fb = plane_st->fb;
327 	struct komeda_fb *kfb = to_kfb(fb);
328 	struct komeda_component_state *c_st;
329 	struct komeda_layer_state *st;
330 	int i, err;
331 
332 	err = komeda_layer_check_cfg(layer, kfb, dflow);
333 	if (err)
334 		return err;
335 
336 	c_st = komeda_component_get_state_and_set_user(&layer->base,
337 			plane_st->state, plane_st->plane, plane_st->crtc);
338 	if (IS_ERR(c_st))
339 		return PTR_ERR(c_st);
340 
341 	st = to_layer_st(c_st);
342 
343 	st->rot = dflow->rot;
344 
345 	if (fb->modifier) {
346 		st->hsize = kfb->aligned_w;
347 		st->vsize = kfb->aligned_h;
348 		st->afbc_crop_l = dflow->in_x;
349 		st->afbc_crop_r = kfb->aligned_w - dflow->in_x - dflow->in_w;
350 		st->afbc_crop_t = dflow->in_y;
351 		st->afbc_crop_b = kfb->aligned_h - dflow->in_y - dflow->in_h;
352 	} else {
353 		st->hsize = dflow->in_w;
354 		st->vsize = dflow->in_h;
355 		st->afbc_crop_l = 0;
356 		st->afbc_crop_r = 0;
357 		st->afbc_crop_t = 0;
358 		st->afbc_crop_b = 0;
359 	}
360 
361 	for (i = 0; i < fb->format->num_planes; i++)
362 		st->addr[i] = komeda_fb_get_pixel_addr(kfb, dflow->in_x,
363 						       dflow->in_y, i);
364 
365 	err = komeda_component_validate_private(&layer->base, c_st);
366 	if (err)
367 		return err;
368 
369 	/* update the data flow for the next stage */
370 	komeda_component_set_output(&dflow->input, &layer->base, 0);
371 
372 	/*
373 	 * The rotation has been handled by layer, so adjusted the data flow for
374 	 * the next stage.
375 	 */
376 	komeda_rotate_data_flow(dflow, st->rot);
377 
378 	return 0;
379 }
380 
381 static int
382 komeda_wb_layer_validate(struct komeda_layer *wb_layer,
383 			 struct drm_connector_state *conn_st,
384 			 struct komeda_data_flow_cfg *dflow)
385 {
386 	struct komeda_fb *kfb = to_kfb(conn_st->writeback_job->fb);
387 	struct komeda_component_state *c_st;
388 	struct komeda_layer_state *st;
389 	int i, err;
390 
391 	err = komeda_layer_check_cfg(wb_layer, kfb, dflow);
392 	if (err)
393 		return err;
394 
395 	c_st = komeda_component_get_state_and_set_user(&wb_layer->base,
396 			conn_st->state, conn_st->connector, conn_st->crtc);
397 	if (IS_ERR(c_st))
398 		return PTR_ERR(c_st);
399 
400 	st = to_layer_st(c_st);
401 
402 	st->hsize = dflow->out_w;
403 	st->vsize = dflow->out_h;
404 
405 	for (i = 0; i < kfb->base.format->num_planes; i++)
406 		st->addr[i] = komeda_fb_get_pixel_addr(kfb, dflow->out_x,
407 						       dflow->out_y, i);
408 
409 	komeda_component_add_input(&st->base, &dflow->input, 0);
410 	komeda_component_set_output(&dflow->input, &wb_layer->base, 0);
411 
412 	return 0;
413 }
414 
415 static bool scaling_ratio_valid(u32 size_in, u32 size_out,
416 				u32 max_upscaling, u32 max_downscaling)
417 {
418 	if (size_out > size_in * max_upscaling)
419 		return false;
420 	else if (size_in > size_out * max_downscaling)
421 		return false;
422 	return true;
423 }
424 
425 static int
426 komeda_scaler_check_cfg(struct komeda_scaler *scaler,
427 			struct komeda_crtc_state *kcrtc_st,
428 			struct komeda_data_flow_cfg *dflow)
429 {
430 	u32 hsize_in, vsize_in, hsize_out, vsize_out;
431 	u32 max_upscaling;
432 
433 	hsize_in = dflow->in_w;
434 	vsize_in = dflow->in_h;
435 	hsize_out = dflow->out_w;
436 	vsize_out = dflow->out_h;
437 
438 	if (!in_range(&scaler->hsize, hsize_in) ||
439 	    !in_range(&scaler->hsize, hsize_out)) {
440 		DRM_DEBUG_ATOMIC("Invalid horizontal sizes");
441 		return -EINVAL;
442 	}
443 
444 	if (!in_range(&scaler->vsize, vsize_in) ||
445 	    !in_range(&scaler->vsize, vsize_out)) {
446 		DRM_DEBUG_ATOMIC("Invalid vertical sizes");
447 		return -EINVAL;
448 	}
449 
450 	/* If input comes from compiz that means the scaling is for writeback
451 	 * and scaler can not do upscaling for writeback
452 	 */
453 	if (has_bit(dflow->input.component->id, KOMEDA_PIPELINE_COMPIZS))
454 		max_upscaling = 1;
455 	else
456 		max_upscaling = scaler->max_upscaling;
457 
458 	if (!scaling_ratio_valid(hsize_in, hsize_out, max_upscaling,
459 				 scaler->max_downscaling)) {
460 		DRM_DEBUG_ATOMIC("Invalid horizontal scaling ratio");
461 		return -EINVAL;
462 	}
463 
464 	if (!scaling_ratio_valid(vsize_in, vsize_out, max_upscaling,
465 				 scaler->max_downscaling)) {
466 		DRM_DEBUG_ATOMIC("Invalid vertical scaling ratio");
467 		return -EINVAL;
468 	}
469 
470 	if (hsize_in > hsize_out || vsize_in > vsize_out) {
471 		struct komeda_pipeline *pipe = scaler->base.pipeline;
472 		int err;
473 
474 		err = pipe->funcs->downscaling_clk_check(pipe,
475 					&kcrtc_st->base.adjusted_mode,
476 					komeda_calc_aclk(kcrtc_st), dflow);
477 		if (err) {
478 			DRM_DEBUG_ATOMIC("aclk can't satisfy the clock requirement of the downscaling\n");
479 			return err;
480 		}
481 	}
482 
483 	return 0;
484 }
485 
486 static int
487 komeda_scaler_validate(void *user,
488 		       struct komeda_crtc_state *kcrtc_st,
489 		       struct komeda_data_flow_cfg *dflow)
490 {
491 	struct drm_atomic_state *drm_st = kcrtc_st->base.state;
492 	struct komeda_component_state *c_st;
493 	struct komeda_scaler_state *st;
494 	struct komeda_scaler *scaler;
495 	int err = 0;
496 
497 	if (!(dflow->en_scaling || dflow->en_img_enhancement))
498 		return 0;
499 
500 	scaler = komeda_component_get_avail_scaler(dflow->input.component,
501 						   drm_st);
502 	if (!scaler) {
503 		DRM_DEBUG_ATOMIC("No scaler available");
504 		return -EINVAL;
505 	}
506 
507 	err = komeda_scaler_check_cfg(scaler, kcrtc_st, dflow);
508 	if (err)
509 		return err;
510 
511 	c_st = komeda_component_get_state_and_set_user(&scaler->base,
512 			drm_st, user, kcrtc_st->base.crtc);
513 	if (IS_ERR(c_st))
514 		return PTR_ERR(c_st);
515 
516 	st = to_scaler_st(c_st);
517 
518 	st->hsize_in = dflow->in_w;
519 	st->vsize_in = dflow->in_h;
520 	st->hsize_out = dflow->out_w;
521 	st->vsize_out = dflow->out_h;
522 	st->right_crop = dflow->right_crop;
523 	st->left_crop = dflow->left_crop;
524 	st->total_vsize_in = dflow->total_in_h;
525 	st->total_hsize_in = dflow->total_in_w;
526 	st->total_hsize_out = dflow->total_out_w;
527 
528 	/* Enable alpha processing if the next stage needs the pixel alpha */
529 	st->en_alpha = dflow->pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE;
530 	st->en_scaling = dflow->en_scaling;
531 	st->en_img_enhancement = dflow->en_img_enhancement;
532 	st->en_split = dflow->en_split;
533 	st->right_part = dflow->right_part;
534 
535 	komeda_component_add_input(&st->base, &dflow->input, 0);
536 	komeda_component_set_output(&dflow->input, &scaler->base, 0);
537 	return err;
538 }
539 
540 static void komeda_split_data_flow(struct komeda_scaler *scaler,
541 				   struct komeda_data_flow_cfg *dflow,
542 				   struct komeda_data_flow_cfg *l_dflow,
543 				   struct komeda_data_flow_cfg *r_dflow);
544 
545 static int
546 komeda_splitter_validate(struct komeda_splitter *splitter,
547 			 struct drm_connector_state *conn_st,
548 			 struct komeda_data_flow_cfg *dflow,
549 			 struct komeda_data_flow_cfg *l_output,
550 			 struct komeda_data_flow_cfg *r_output)
551 {
552 	struct komeda_component_state *c_st;
553 	struct komeda_splitter_state *st;
554 
555 	if (!splitter) {
556 		DRM_DEBUG_ATOMIC("Current HW doesn't support splitter.\n");
557 		return -EINVAL;
558 	}
559 
560 	if (!in_range(&splitter->hsize, dflow->in_w)) {
561 		DRM_DEBUG_ATOMIC("split in_w:%d is out of the acceptable range.\n",
562 				 dflow->in_w);
563 		return -EINVAL;
564 	}
565 
566 	if (!in_range(&splitter->vsize, dflow->in_h)) {
567 		DRM_DEBUG_ATOMIC("split in_in: %d exceed the acceptable range.\n",
568 				 dflow->in_w);
569 		return -EINVAL;
570 	}
571 
572 	c_st = komeda_component_get_state_and_set_user(&splitter->base,
573 			conn_st->state, conn_st->connector, conn_st->crtc);
574 
575 	if (IS_ERR(c_st))
576 		return PTR_ERR(c_st);
577 
578 	komeda_split_data_flow(splitter->base.pipeline->scalers[0],
579 			       dflow, l_output, r_output);
580 
581 	st = to_splitter_st(c_st);
582 	st->hsize = dflow->in_w;
583 	st->vsize = dflow->in_h;
584 	st->overlap = dflow->overlap;
585 
586 	komeda_component_add_input(&st->base, &dflow->input, 0);
587 	komeda_component_set_output(&l_output->input, &splitter->base, 0);
588 	komeda_component_set_output(&r_output->input, &splitter->base, 1);
589 
590 	return 0;
591 }
592 
593 static int
594 komeda_merger_validate(struct komeda_merger *merger,
595 		       void *user,
596 		       struct komeda_crtc_state *kcrtc_st,
597 		       struct komeda_data_flow_cfg *left_input,
598 		       struct komeda_data_flow_cfg *right_input,
599 		       struct komeda_data_flow_cfg *output)
600 {
601 	struct komeda_component_state *c_st;
602 	struct komeda_merger_state *st;
603 	int err = 0;
604 
605 	if (!merger) {
606 		DRM_DEBUG_ATOMIC("No merger is available");
607 		return -EINVAL;
608 	}
609 
610 	if (!in_range(&merger->hsize_merged, output->out_w)) {
611 		DRM_DEBUG_ATOMIC("merged_w: %d is out of the accepted range.\n",
612 				 output->out_w);
613 		return -EINVAL;
614 	}
615 
616 	if (!in_range(&merger->vsize_merged, output->out_h)) {
617 		DRM_DEBUG_ATOMIC("merged_h: %d is out of the accepted range.\n",
618 				 output->out_h);
619 		return -EINVAL;
620 	}
621 
622 	c_st = komeda_component_get_state_and_set_user(&merger->base,
623 			kcrtc_st->base.state, kcrtc_st->base.crtc, kcrtc_st->base.crtc);
624 
625 	if (IS_ERR(c_st))
626 		return PTR_ERR(c_st);
627 
628 	st = to_merger_st(c_st);
629 	st->hsize_merged = output->out_w;
630 	st->vsize_merged = output->out_h;
631 
632 	komeda_component_add_input(c_st, &left_input->input, 0);
633 	komeda_component_add_input(c_st, &right_input->input, 1);
634 	komeda_component_set_output(&output->input, &merger->base, 0);
635 
636 	return err;
637 }
638 
639 void pipeline_composition_size(struct komeda_crtc_state *kcrtc_st,
640 			       u16 *hsize, u16 *vsize)
641 {
642 	struct drm_display_mode *m = &kcrtc_st->base.adjusted_mode;
643 
644 	if (hsize)
645 		*hsize = m->hdisplay;
646 	if (vsize)
647 		*vsize = m->vdisplay;
648 }
649 
650 static int
651 komeda_compiz_set_input(struct komeda_compiz *compiz,
652 			struct komeda_crtc_state *kcrtc_st,
653 			struct komeda_data_flow_cfg *dflow)
654 {
655 	struct drm_atomic_state *drm_st = kcrtc_st->base.state;
656 	struct komeda_component_state *c_st, *old_st;
657 	struct komeda_compiz_input_cfg *cin;
658 	u16 compiz_w, compiz_h;
659 	int idx = dflow->blending_zorder;
660 
661 	pipeline_composition_size(kcrtc_st, &compiz_w, &compiz_h);
662 	/* check display rect */
663 	if ((dflow->out_x + dflow->out_w > compiz_w) ||
664 	    (dflow->out_y + dflow->out_h > compiz_h) ||
665 	     dflow->out_w == 0 || dflow->out_h == 0) {
666 		DRM_DEBUG_ATOMIC("invalid disp rect [x=%d, y=%d, w=%d, h=%d]\n",
667 				 dflow->out_x, dflow->out_y,
668 				 dflow->out_w, dflow->out_h);
669 		return -EINVAL;
670 	}
671 
672 	c_st = komeda_component_get_state_and_set_user(&compiz->base, drm_st,
673 			kcrtc_st->base.crtc, kcrtc_st->base.crtc);
674 	if (IS_ERR(c_st))
675 		return PTR_ERR(c_st);
676 
677 	if (komeda_component_check_input(c_st, &dflow->input, idx))
678 		return -EINVAL;
679 
680 	cin = &(to_compiz_st(c_st)->cins[idx]);
681 
682 	cin->hsize   = dflow->out_w;
683 	cin->vsize   = dflow->out_h;
684 	cin->hoffset = dflow->out_x;
685 	cin->voffset = dflow->out_y;
686 	cin->pixel_blend_mode = dflow->pixel_blend_mode;
687 	cin->layer_alpha = dflow->layer_alpha;
688 
689 	old_st = komeda_component_get_old_state(&compiz->base, drm_st);
690 	WARN_ON(!old_st);
691 
692 	/* compare with old to check if this input has been changed */
693 	if (memcmp(&(to_compiz_st(old_st)->cins[idx]), cin, sizeof(*cin)))
694 		c_st->changed_active_inputs |= BIT(idx);
695 
696 	komeda_component_add_input(c_st, &dflow->input, idx);
697 	komeda_component_set_output(&dflow->input, &compiz->base, 0);
698 
699 	return 0;
700 }
701 
702 static int
703 komeda_compiz_validate(struct komeda_compiz *compiz,
704 		       struct komeda_crtc_state *state,
705 		       struct komeda_data_flow_cfg *dflow)
706 {
707 	struct komeda_component_state *c_st;
708 	struct komeda_compiz_state *st;
709 
710 	c_st = komeda_component_get_state_and_set_user(&compiz->base,
711 			state->base.state, state->base.crtc, state->base.crtc);
712 	if (IS_ERR(c_st))
713 		return PTR_ERR(c_st);
714 
715 	st = to_compiz_st(c_st);
716 
717 	pipeline_composition_size(state, &st->hsize, &st->vsize);
718 
719 	komeda_component_set_output(&dflow->input, &compiz->base, 0);
720 
721 	/* compiz output dflow will be fed to the next pipeline stage, prepare
722 	 * the data flow configuration for the next stage
723 	 */
724 	if (dflow) {
725 		dflow->in_w = st->hsize;
726 		dflow->in_h = st->vsize;
727 		dflow->out_w = dflow->in_w;
728 		dflow->out_h = dflow->in_h;
729 		/* the output data of compiz doesn't have alpha, it only can be
730 		 * used as bottom layer when blend it with master layers
731 		 */
732 		dflow->pixel_blend_mode = DRM_MODE_BLEND_PIXEL_NONE;
733 		dflow->layer_alpha = 0xFF;
734 		dflow->blending_zorder = 0;
735 	}
736 
737 	return 0;
738 }
739 
740 static int
741 komeda_improc_validate(struct komeda_improc *improc,
742 		       struct komeda_crtc_state *kcrtc_st,
743 		       struct komeda_data_flow_cfg *dflow)
744 {
745 	struct drm_crtc *crtc = kcrtc_st->base.crtc;
746 	struct komeda_component_state *c_st;
747 	struct komeda_improc_state *st;
748 
749 	c_st = komeda_component_get_state_and_set_user(&improc->base,
750 			kcrtc_st->base.state, crtc, crtc);
751 	if (IS_ERR(c_st))
752 		return PTR_ERR(c_st);
753 
754 	st = to_improc_st(c_st);
755 
756 	st->hsize = dflow->in_w;
757 	st->vsize = dflow->in_h;
758 
759 	komeda_component_add_input(&st->base, &dflow->input, 0);
760 	komeda_component_set_output(&dflow->input, &improc->base, 0);
761 
762 	return 0;
763 }
764 
765 static int
766 komeda_timing_ctrlr_validate(struct komeda_timing_ctrlr *ctrlr,
767 			     struct komeda_crtc_state *kcrtc_st,
768 			     struct komeda_data_flow_cfg *dflow)
769 {
770 	struct drm_crtc *crtc = kcrtc_st->base.crtc;
771 	struct komeda_timing_ctrlr_state *st;
772 	struct komeda_component_state *c_st;
773 
774 	c_st = komeda_component_get_state_and_set_user(&ctrlr->base,
775 			kcrtc_st->base.state, crtc, crtc);
776 	if (IS_ERR(c_st))
777 		return PTR_ERR(c_st);
778 
779 	st = to_ctrlr_st(c_st);
780 
781 	komeda_component_add_input(&st->base, &dflow->input, 0);
782 	komeda_component_set_output(&dflow->input, &ctrlr->base, 0);
783 
784 	return 0;
785 }
786 
787 void komeda_complete_data_flow_cfg(struct komeda_data_flow_cfg *dflow,
788 				   struct drm_framebuffer *fb)
789 {
790 	u32 w = dflow->in_w;
791 	u32 h = dflow->in_h;
792 
793 	dflow->total_in_w = dflow->in_w;
794 	dflow->total_in_h = dflow->in_h;
795 	dflow->total_out_w = dflow->out_w;
796 
797 	/* if format doesn't have alpha, fix blend mode to PIXEL_NONE */
798 	if (!fb->format->has_alpha)
799 		dflow->pixel_blend_mode = DRM_MODE_BLEND_PIXEL_NONE;
800 
801 	if (drm_rotation_90_or_270(dflow->rot))
802 		swap(w, h);
803 
804 	dflow->en_scaling = (w != dflow->out_w) || (h != dflow->out_h);
805 	dflow->is_yuv = fb->format->is_yuv;
806 }
807 
808 static bool merger_is_available(struct komeda_pipeline *pipe,
809 				struct komeda_data_flow_cfg *dflow)
810 {
811 	u32 avail_inputs = pipe->merger ?
812 			   pipe->merger->base.supported_inputs : 0;
813 
814 	return has_bit(dflow->input.component->id, avail_inputs);
815 }
816 
817 int komeda_build_layer_data_flow(struct komeda_layer *layer,
818 				 struct komeda_plane_state *kplane_st,
819 				 struct komeda_crtc_state *kcrtc_st,
820 				 struct komeda_data_flow_cfg *dflow)
821 {
822 	struct drm_plane *plane = kplane_st->base.plane;
823 	struct komeda_pipeline *pipe = layer->base.pipeline;
824 	int err;
825 
826 	DRM_DEBUG_ATOMIC("%s handling [PLANE:%d:%s]: src[x/y:%d/%d, w/h:%d/%d] disp[x/y:%d/%d, w/h:%d/%d]",
827 			 layer->base.name, plane->base.id, plane->name,
828 			 dflow->in_x, dflow->in_y, dflow->in_w, dflow->in_h,
829 			 dflow->out_x, dflow->out_y, dflow->out_w, dflow->out_h);
830 
831 	err = komeda_layer_validate(layer, kplane_st, dflow);
832 	if (err)
833 		return err;
834 
835 	err = komeda_scaler_validate(plane, kcrtc_st, dflow);
836 	if (err)
837 		return err;
838 
839 	/* if split, check if can put the data flow into merger */
840 	if (dflow->en_split && merger_is_available(pipe, dflow))
841 		return 0;
842 
843 	err = komeda_compiz_set_input(pipe->compiz, kcrtc_st, dflow);
844 
845 	return err;
846 }
847 
848 /*
849  * Split is introduced for workaround scaler's input/output size limitation.
850  * The idea is simple, if one scaler can not fit the requirement, use two.
851  * So split splits the big source image to two half parts (left/right) and do
852  * the scaling by two scaler separately and independently.
853  * But split also imports an edge problem in the middle of the image when
854  * scaling, to avoid it, split isn't a simple half-and-half, but add an extra
855  * pixels (overlap) to both side, after split the left/right will be:
856  * - left: [0, src_length/2 + overlap]
857  * - right: [src_length/2 - overlap, src_length]
858  * The extra overlap do eliminate the edge problem, but which may also generates
859  * unnecessary pixels when scaling, we need to crop them before scaler output
860  * the result to the next stage. and for the how to crop, it depends on the
861  * unneeded pixels, another words the position where overlay has been added.
862  * - left: crop the right
863  * - right: crop the left
864  *
865  * The diagram for how to do the split
866  *
867  *  <---------------------left->out_w ---------------->
868  * |--------------------------------|---right_crop-----| <- left after split
869  *  \                                \                /
870  *   \                                \<--overlap--->/
871  *   |-----------------|-------------|(Middle)------|-----------------| <- src
872  *                     /<---overlap--->\                               \
873  *                    /                 \                               \
874  * right after split->|-----left_crop---|--------------------------------|
875  *                    ^<------------------- right->out_w --------------->^
876  *
877  * NOTE: To consistent with HW the output_w always contains the crop size.
878  */
879 
880 static void komeda_split_data_flow(struct komeda_scaler *scaler,
881 				   struct komeda_data_flow_cfg *dflow,
882 				   struct komeda_data_flow_cfg *l_dflow,
883 				   struct komeda_data_flow_cfg *r_dflow)
884 {
885 	bool r90 = drm_rotation_90_or_270(dflow->rot);
886 	bool flip_h = has_flip_h(dflow->rot);
887 	u32 l_out, r_out, overlap;
888 
889 	memcpy(l_dflow, dflow, sizeof(*dflow));
890 	memcpy(r_dflow, dflow, sizeof(*dflow));
891 
892 	l_dflow->right_part = false;
893 	r_dflow->right_part = true;
894 	r_dflow->blending_zorder = dflow->blending_zorder + 1;
895 
896 	overlap = 0;
897 	if (dflow->en_scaling && scaler)
898 		overlap += scaler->scaling_split_overlap;
899 
900 	/* original dflow may fed into splitter, and which doesn't need
901 	 * enhancement overlap
902 	 */
903 	dflow->overlap = overlap;
904 
905 	if (dflow->en_img_enhancement && scaler)
906 		overlap += scaler->enh_split_overlap;
907 
908 	l_dflow->overlap = overlap;
909 	r_dflow->overlap = overlap;
910 
911 	/* split the origin content */
912 	/* left/right here always means the left/right part of display image,
913 	 * not the source Image
914 	 */
915 	/* DRM rotation is anti-clockwise */
916 	if (r90) {
917 		if (dflow->en_scaling) {
918 			l_dflow->in_h = ALIGN(dflow->in_h, 2) / 2 + l_dflow->overlap;
919 			r_dflow->in_h = l_dflow->in_h;
920 		} else if (dflow->en_img_enhancement) {
921 			/* enhancer only */
922 			l_dflow->in_h = ALIGN(dflow->in_h, 2) / 2 + l_dflow->overlap;
923 			r_dflow->in_h = dflow->in_h / 2 + r_dflow->overlap;
924 		} else {
925 			/* split without scaler, no overlap */
926 			l_dflow->in_h = ALIGN(((dflow->in_h + 1) >> 1), 2);
927 			r_dflow->in_h = dflow->in_h - l_dflow->in_h;
928 		}
929 
930 		/* Consider YUV format, after split, the split source w/h
931 		 * may not aligned to 2. we have two choices for such case.
932 		 * 1. scaler is enabled (overlap != 0), we can do a alignment
933 		 *    both left/right and crop the extra data by scaler.
934 		 * 2. scaler is not enabled, only align the split left
935 		 *    src/disp, and the rest part assign to right
936 		 */
937 		if ((overlap != 0) && dflow->is_yuv) {
938 			l_dflow->in_h = ALIGN(l_dflow->in_h, 2);
939 			r_dflow->in_h = ALIGN(r_dflow->in_h, 2);
940 		}
941 
942 		if (flip_h)
943 			l_dflow->in_y = dflow->in_y + dflow->in_h - l_dflow->in_h;
944 		else
945 			r_dflow->in_y = dflow->in_y + dflow->in_h - r_dflow->in_h;
946 	} else {
947 		if (dflow->en_scaling) {
948 			l_dflow->in_w = ALIGN(dflow->in_w, 2) / 2 + l_dflow->overlap;
949 			r_dflow->in_w = l_dflow->in_w;
950 		} else if (dflow->en_img_enhancement) {
951 			l_dflow->in_w = ALIGN(dflow->in_w, 2) / 2 + l_dflow->overlap;
952 			r_dflow->in_w = dflow->in_w / 2 + r_dflow->overlap;
953 		} else {
954 			l_dflow->in_w = ALIGN(((dflow->in_w + 1) >> 1), 2);
955 			r_dflow->in_w = dflow->in_w - l_dflow->in_w;
956 		}
957 
958 		/* do YUV alignment when scaler enabled */
959 		if ((overlap != 0) && dflow->is_yuv) {
960 			l_dflow->in_w = ALIGN(l_dflow->in_w, 2);
961 			r_dflow->in_w = ALIGN(r_dflow->in_w, 2);
962 		}
963 
964 		/* on flip_h, the left display content from the right-source */
965 		if (flip_h)
966 			l_dflow->in_x = dflow->in_w + dflow->in_x - l_dflow->in_w;
967 		else
968 			r_dflow->in_x = dflow->in_w + dflow->in_x - r_dflow->in_w;
969 	}
970 
971 	/* split the disp_rect */
972 	if (dflow->en_scaling || dflow->en_img_enhancement)
973 		l_dflow->out_w = ((dflow->out_w + 1) >> 1);
974 	else
975 		l_dflow->out_w = ALIGN(((dflow->out_w + 1) >> 1), 2);
976 
977 	r_dflow->out_w = dflow->out_w - l_dflow->out_w;
978 
979 	l_dflow->out_x = dflow->out_x;
980 	r_dflow->out_x = l_dflow->out_w + l_dflow->out_x;
981 
982 	/* calculate the scaling crop */
983 	/* left scaler output more data and do crop */
984 	if (r90) {
985 		l_out = (dflow->out_w * l_dflow->in_h) / dflow->in_h;
986 		r_out = (dflow->out_w * r_dflow->in_h) / dflow->in_h;
987 	} else {
988 		l_out = (dflow->out_w * l_dflow->in_w) / dflow->in_w;
989 		r_out = (dflow->out_w * r_dflow->in_w) / dflow->in_w;
990 	}
991 
992 	l_dflow->left_crop  = 0;
993 	l_dflow->right_crop = l_out - l_dflow->out_w;
994 	r_dflow->left_crop  = r_out - r_dflow->out_w;
995 	r_dflow->right_crop = 0;
996 
997 	/* out_w includes the crop length */
998 	l_dflow->out_w += l_dflow->right_crop + l_dflow->left_crop;
999 	r_dflow->out_w += r_dflow->right_crop + r_dflow->left_crop;
1000 }
1001 
1002 /* For layer split, a plane state will be split to two data flows and handled
1003  * by two separated komeda layer input pipelines. komeda supports two types of
1004  * layer split:
1005  * - none-scaling split:
1006  *             / layer-left -> \
1007  * plane_state                  compiz-> ...
1008  *             \ layer-right-> /
1009  *
1010  * - scaling split:
1011  *             / layer-left -> scaler->\
1012  * plane_state                          merger -> compiz-> ...
1013  *             \ layer-right-> scaler->/
1014  *
1015  * Since merger only supports scaler as input, so for none-scaling split, two
1016  * layer data flows will be output to compiz directly. for scaling_split, two
1017  * data flow will be merged by merger firstly, then merger outputs one merged
1018  * data flow to compiz.
1019  */
1020 int komeda_build_layer_split_data_flow(struct komeda_layer *left,
1021 				       struct komeda_plane_state *kplane_st,
1022 				       struct komeda_crtc_state *kcrtc_st,
1023 				       struct komeda_data_flow_cfg *dflow)
1024 {
1025 	struct drm_plane *plane = kplane_st->base.plane;
1026 	struct komeda_pipeline *pipe = left->base.pipeline;
1027 	struct komeda_layer *right = left->right;
1028 	struct komeda_data_flow_cfg l_dflow, r_dflow;
1029 	int err;
1030 
1031 	komeda_split_data_flow(pipe->scalers[0], dflow, &l_dflow, &r_dflow);
1032 
1033 	DRM_DEBUG_ATOMIC("Assign %s + %s to [PLANE:%d:%s]: "
1034 			 "src[x/y:%d/%d, w/h:%d/%d] disp[x/y:%d/%d, w/h:%d/%d]",
1035 			 left->base.name, right->base.name,
1036 			 plane->base.id, plane->name,
1037 			 dflow->in_x, dflow->in_y, dflow->in_w, dflow->in_h,
1038 			 dflow->out_x, dflow->out_y, dflow->out_w, dflow->out_h);
1039 
1040 	err = komeda_build_layer_data_flow(left, kplane_st, kcrtc_st, &l_dflow);
1041 	if (err)
1042 		return err;
1043 
1044 	err = komeda_build_layer_data_flow(right, kplane_st, kcrtc_st, &r_dflow);
1045 	if (err)
1046 		return err;
1047 
1048 	/* The rotation has been handled by layer, so adjusted the data flow */
1049 	komeda_rotate_data_flow(dflow, dflow->rot);
1050 
1051 	/* left and right dflow has been merged to compiz already,
1052 	 * no need merger to merge them anymore.
1053 	 */
1054 	if (r_dflow.input.component == l_dflow.input.component)
1055 		return 0;
1056 
1057 	/* line merger path */
1058 	err = komeda_merger_validate(pipe->merger, plane, kcrtc_st,
1059 				     &l_dflow, &r_dflow, dflow);
1060 	if (err)
1061 		return err;
1062 
1063 	err = komeda_compiz_set_input(pipe->compiz, kcrtc_st, dflow);
1064 
1065 	return err;
1066 }
1067 
1068 /* writeback data path: compiz -> scaler -> wb_layer -> memory */
1069 int komeda_build_wb_data_flow(struct komeda_layer *wb_layer,
1070 			      struct drm_connector_state *conn_st,
1071 			      struct komeda_crtc_state *kcrtc_st,
1072 			      struct komeda_data_flow_cfg *dflow)
1073 {
1074 	struct drm_connector *conn = conn_st->connector;
1075 	int err;
1076 
1077 	err = komeda_scaler_validate(conn, kcrtc_st, dflow);
1078 	if (err)
1079 		return err;
1080 
1081 	return komeda_wb_layer_validate(wb_layer, conn_st, dflow);
1082 }
1083 
1084 /* writeback scaling split data path:
1085  *                   /-> scaler ->\
1086  * compiz -> splitter              merger -> wb_layer -> memory
1087  *                   \-> scaler ->/
1088  */
1089 int komeda_build_wb_split_data_flow(struct komeda_layer *wb_layer,
1090 				    struct drm_connector_state *conn_st,
1091 				    struct komeda_crtc_state *kcrtc_st,
1092 				    struct komeda_data_flow_cfg *dflow)
1093 {
1094 	struct komeda_pipeline *pipe = wb_layer->base.pipeline;
1095 	struct drm_connector *conn = conn_st->connector;
1096 	struct komeda_data_flow_cfg l_dflow, r_dflow;
1097 	int err;
1098 
1099 	err = komeda_splitter_validate(pipe->splitter, conn_st,
1100 				       dflow, &l_dflow, &r_dflow);
1101 	if (err)
1102 		return err;
1103 	err = komeda_scaler_validate(conn, kcrtc_st, &l_dflow);
1104 	if (err)
1105 		return err;
1106 
1107 	err = komeda_scaler_validate(conn, kcrtc_st, &r_dflow);
1108 	if (err)
1109 		return err;
1110 
1111 	err = komeda_merger_validate(pipe->merger, conn_st, kcrtc_st,
1112 				     &l_dflow, &r_dflow, dflow);
1113 	if (err)
1114 		return err;
1115 
1116 	return komeda_wb_layer_validate(wb_layer, conn_st, dflow);
1117 }
1118 
1119 /* build display output data flow, the data path is:
1120  * compiz -> improc -> timing_ctrlr
1121  */
1122 int komeda_build_display_data_flow(struct komeda_crtc *kcrtc,
1123 				   struct komeda_crtc_state *kcrtc_st)
1124 {
1125 	struct komeda_pipeline *master = kcrtc->master;
1126 	struct komeda_pipeline *slave  = kcrtc->slave;
1127 	struct komeda_data_flow_cfg m_dflow; /* master data flow */
1128 	struct komeda_data_flow_cfg s_dflow; /* slave data flow */
1129 	int err;
1130 
1131 	memset(&m_dflow, 0, sizeof(m_dflow));
1132 	memset(&s_dflow, 0, sizeof(s_dflow));
1133 
1134 	if (slave && has_bit(slave->id, kcrtc_st->active_pipes)) {
1135 		err = komeda_compiz_validate(slave->compiz, kcrtc_st, &s_dflow);
1136 		if (err)
1137 			return err;
1138 
1139 		/* merge the slave dflow into master pipeline */
1140 		err = komeda_compiz_set_input(master->compiz, kcrtc_st,
1141 					      &s_dflow);
1142 		if (err)
1143 			return err;
1144 	}
1145 
1146 	err = komeda_compiz_validate(master->compiz, kcrtc_st, &m_dflow);
1147 	if (err)
1148 		return err;
1149 
1150 	err = komeda_improc_validate(master->improc, kcrtc_st, &m_dflow);
1151 	if (err)
1152 		return err;
1153 
1154 	err = komeda_timing_ctrlr_validate(master->ctrlr, kcrtc_st, &m_dflow);
1155 	if (err)
1156 		return err;
1157 
1158 	return 0;
1159 }
1160 
1161 static void
1162 komeda_pipeline_unbound_components(struct komeda_pipeline *pipe,
1163 				   struct komeda_pipeline_state *new)
1164 {
1165 	struct drm_atomic_state *drm_st = new->obj.state;
1166 	struct komeda_pipeline_state *old = priv_to_pipe_st(pipe->obj.state);
1167 	struct komeda_component_state *c_st;
1168 	struct komeda_component *c;
1169 	u32 disabling_comps, id;
1170 
1171 	WARN_ON(!old);
1172 
1173 	disabling_comps = (~new->active_comps) & old->active_comps;
1174 
1175 	/* unbound all disabling component */
1176 	dp_for_each_set_bit(id, disabling_comps) {
1177 		c = komeda_pipeline_get_component(pipe, id);
1178 		c_st = komeda_component_get_state_and_set_user(c,
1179 				drm_st, NULL, new->crtc);
1180 		WARN_ON(IS_ERR(c_st));
1181 	}
1182 }
1183 
1184 /* release unclaimed pipeline resource */
1185 int komeda_release_unclaimed_resources(struct komeda_pipeline *pipe,
1186 				       struct komeda_crtc_state *kcrtc_st)
1187 {
1188 	struct drm_atomic_state *drm_st = kcrtc_st->base.state;
1189 	struct komeda_pipeline_state *st;
1190 
1191 	/* ignore the pipeline which is not affected */
1192 	if (!pipe || !has_bit(pipe->id, kcrtc_st->affected_pipes))
1193 		return 0;
1194 
1195 	if (has_bit(pipe->id, kcrtc_st->active_pipes))
1196 		st = komeda_pipeline_get_new_state(pipe, drm_st);
1197 	else
1198 		st = komeda_pipeline_get_state_and_set_crtc(pipe, drm_st, NULL);
1199 
1200 	if (WARN_ON(IS_ERR_OR_NULL(st)))
1201 		return -EINVAL;
1202 
1203 	komeda_pipeline_unbound_components(pipe, st);
1204 
1205 	return 0;
1206 }
1207 
1208 void komeda_pipeline_disable(struct komeda_pipeline *pipe,
1209 			     struct drm_atomic_state *old_state)
1210 {
1211 	struct komeda_pipeline_state *old;
1212 	struct komeda_component *c;
1213 	struct komeda_component_state *c_st;
1214 	u32 id, disabling_comps = 0;
1215 
1216 	old = komeda_pipeline_get_old_state(pipe, old_state);
1217 
1218 	disabling_comps = old->active_comps;
1219 	DRM_DEBUG_ATOMIC("PIPE%d: disabling_comps: 0x%x.\n",
1220 			 pipe->id, disabling_comps);
1221 
1222 	dp_for_each_set_bit(id, disabling_comps) {
1223 		c = komeda_pipeline_get_component(pipe, id);
1224 		c_st = priv_to_comp_st(c->obj.state);
1225 
1226 		/*
1227 		 * If we disabled a component then all active_inputs should be
1228 		 * put in the list of changed_active_inputs, so they get
1229 		 * re-enabled.
1230 		 * This usually happens during a modeset when the pipeline is
1231 		 * first disabled and then the actual state gets committed
1232 		 * again.
1233 		 */
1234 		c_st->changed_active_inputs |= c_st->active_inputs;
1235 
1236 		c->funcs->disable(c);
1237 	}
1238 }
1239 
1240 void komeda_pipeline_update(struct komeda_pipeline *pipe,
1241 			    struct drm_atomic_state *old_state)
1242 {
1243 	struct komeda_pipeline_state *new = priv_to_pipe_st(pipe->obj.state);
1244 	struct komeda_pipeline_state *old;
1245 	struct komeda_component *c;
1246 	u32 id, changed_comps = 0;
1247 
1248 	old = komeda_pipeline_get_old_state(pipe, old_state);
1249 
1250 	changed_comps = new->active_comps | old->active_comps;
1251 
1252 	DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, changed: 0x%x.\n",
1253 			 pipe->id, new->active_comps, changed_comps);
1254 
1255 	dp_for_each_set_bit(id, changed_comps) {
1256 		c = komeda_pipeline_get_component(pipe, id);
1257 
1258 		if (new->active_comps & BIT(c->id))
1259 			c->funcs->update(c, priv_to_comp_st(c->obj.state));
1260 		else
1261 			c->funcs->disable(c);
1262 	}
1263 }
1264