Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
4 * Author: James.Qian.Wang <james.qian.wang@arm.com>
5 *
6 */
7
8#include <drm/drm_print.h>
9#include <linux/clk.h>
10#include "komeda_dev.h"
11#include "komeda_kms.h"
12#include "komeda_pipeline.h"
13#include "komeda_framebuffer.h"
14
15static inline bool is_switching_user(void *old, void *new)
16{
17 if (!old || !new)
18 return false;
19
20 return old != new;
21}
22
23static struct komeda_pipeline_state *
24komeda_pipeline_get_state(struct komeda_pipeline *pipe,
25 struct drm_atomic_state *state)
26{
27 struct drm_private_state *priv_st;
28
29 priv_st = drm_atomic_get_private_obj_state(state, &pipe->obj);
30 if (IS_ERR(priv_st))
31 return ERR_CAST(priv_st);
32
33 return priv_to_pipe_st(priv_st);
34}
35
36struct komeda_pipeline_state *
37komeda_pipeline_get_old_state(struct komeda_pipeline *pipe,
38 struct drm_atomic_state *state)
39{
40 struct drm_private_state *priv_st;
41
42 priv_st = drm_atomic_get_old_private_obj_state(state, &pipe->obj);
43 if (priv_st)
44 return priv_to_pipe_st(priv_st);
45 return NULL;
46}
47
48static struct komeda_pipeline_state *
49komeda_pipeline_get_new_state(struct komeda_pipeline *pipe,
50 struct drm_atomic_state *state)
51{
52 struct drm_private_state *priv_st;
53
54 priv_st = drm_atomic_get_new_private_obj_state(state, &pipe->obj);
55 if (priv_st)
56 return priv_to_pipe_st(priv_st);
57 return NULL;
58}
59
60/* Assign pipeline for crtc */
61static struct komeda_pipeline_state *
62komeda_pipeline_get_state_and_set_crtc(struct komeda_pipeline *pipe,
63 struct drm_atomic_state *state,
64 struct drm_crtc *crtc)
65{
66 struct komeda_pipeline_state *st;
67
68 st = komeda_pipeline_get_state(pipe, state);
69 if (IS_ERR(st))
70 return st;
71
72 if (is_switching_user(crtc, st->crtc)) {
73 DRM_DEBUG_ATOMIC("CRTC%d required pipeline%d is busy.\n",
74 drm_crtc_index(crtc), pipe->id);
75 return ERR_PTR(-EBUSY);
76 }
77
78 /* pipeline only can be disabled when the it is free or unused */
79 if (!crtc && st->active_comps) {
80 DRM_DEBUG_ATOMIC("Disabling a busy pipeline:%d.\n", pipe->id);
81 return ERR_PTR(-EBUSY);
82 }
83
84 st->crtc = crtc;
85
86 if (crtc) {
87 struct komeda_crtc_state *kcrtc_st;
88
89 kcrtc_st = to_kcrtc_st(drm_atomic_get_new_crtc_state(state,
90 crtc));
91
92 kcrtc_st->active_pipes |= BIT(pipe->id);
93 kcrtc_st->affected_pipes |= BIT(pipe->id);
94 }
95 return st;
96}
97
98static struct komeda_component_state *
99komeda_component_get_state(struct komeda_component *c,
100 struct drm_atomic_state *state)
101{
102 struct drm_private_state *priv_st;
103
104 WARN_ON(!drm_modeset_is_locked(&c->pipeline->obj.lock));
105
106 priv_st = drm_atomic_get_private_obj_state(state, &c->obj);
107 if (IS_ERR(priv_st))
108 return ERR_CAST(priv_st);
109
110 return priv_to_comp_st(priv_st);
111}
112
113static struct komeda_component_state *
114komeda_component_get_old_state(struct komeda_component *c,
115 struct drm_atomic_state *state)
116{
117 struct drm_private_state *priv_st;
118
119 priv_st = drm_atomic_get_old_private_obj_state(state, &c->obj);
120 if (priv_st)
121 return priv_to_comp_st(priv_st);
122 return NULL;
123}
124
125/**
126 * komeda_component_get_state_and_set_user()
127 *
128 * @c: component to get state and set user
129 * @state: global atomic state
130 * @user: direct user, the binding user
131 * @crtc: the CRTC user, the big boss :)
132 *
133 * This function accepts two users:
134 * - The direct user: can be plane/crtc/wb_connector depends on component
135 * - The big boss (CRTC)
136 * CRTC is the big boss (the final user), because all component resources
137 * eventually will be assigned to CRTC, like the layer will be binding to
138 * kms_plane, but kms plane will be binding to a CRTC eventually.
139 *
140 * The big boss (CRTC) is for pipeline assignment, since &komeda_component isn't
141 * independent and can be assigned to CRTC freely, but belongs to a specific
142 * pipeline, only pipeline can be shared between crtc, and pipeline as a whole
143 * (include all the internal components) assigned to a specific CRTC.
144 *
145 * So when set a user to komeda_component, need first to check the status of
146 * component->pipeline to see if the pipeline is available on this specific
147 * CRTC. if the pipeline is busy (assigned to another CRTC), even the required
148 * component is free, the component still cannot be assigned to the direct user.
149 */
150static struct komeda_component_state *
151komeda_component_get_state_and_set_user(struct komeda_component *c,
152 struct drm_atomic_state *state,
153 void *user,
154 struct drm_crtc *crtc)
155{
156 struct komeda_pipeline_state *pipe_st;
157 struct komeda_component_state *st;
158
159 /* First check if the pipeline is available */
160 pipe_st = komeda_pipeline_get_state_and_set_crtc(c->pipeline,
161 state, crtc);
162 if (IS_ERR(pipe_st))
163 return ERR_CAST(pipe_st);
164
165 st = komeda_component_get_state(c, state);
166 if (IS_ERR(st))
167 return st;
168
169 /* check if the component has been occupied */
170 if (is_switching_user(user, st->binding_user)) {
171 DRM_DEBUG_ATOMIC("required %s is busy.\n", c->name);
172 return ERR_PTR(-EBUSY);
173 }
174
175 st->binding_user = user;
176 /* mark the component as active if user is valid */
177 if (st->binding_user)
178 pipe_st->active_comps |= BIT(c->id);
179
180 return st;
181}
182
183static void
184komeda_component_add_input(struct komeda_component_state *state,
185 struct komeda_component_output *input,
186 int idx)
187{
188 struct komeda_component *c = state->component;
189
190 WARN_ON((idx < 0 || idx >= c->max_active_inputs));
191
192 /* since the inputs[i] is only valid when it is active. So if a input[i]
193 * is a newly enabled input which switches from disable to enable, then
194 * the old inputs[i] is undefined (NOT zeroed), we can not rely on
195 * memcmp, but directly mark it changed
196 */
197 if (!has_bit(idx, state->affected_inputs) ||
198 memcmp(&state->inputs[idx], input, sizeof(*input))) {
199 memcpy(&state->inputs[idx], input, sizeof(*input));
200 state->changed_active_inputs |= BIT(idx);
201 }
202 state->active_inputs |= BIT(idx);
203 state->affected_inputs |= BIT(idx);
204}
205
206static int
207komeda_component_check_input(struct komeda_component_state *state,
208 struct komeda_component_output *input,
209 int idx)
210{
211 struct komeda_component *c = state->component;
212
213 if ((idx < 0) || (idx >= c->max_active_inputs)) {
214 DRM_DEBUG_ATOMIC("%s required an invalid %s-input[%d].\n",
215 input->component->name, c->name, idx);
216 return -EINVAL;
217 }
218
219 if (has_bit(idx, state->active_inputs)) {
220 DRM_DEBUG_ATOMIC("%s required %s-input[%d] has been occupied already.\n",
221 input->component->name, c->name, idx);
222 return -EINVAL;
223 }
224
225 return 0;
226}
227
228static void
229komeda_component_set_output(struct komeda_component_output *output,
230 struct komeda_component *comp,
231 u8 output_port)
232{
233 output->component = comp;
234 output->output_port = output_port;
235}
236
237static int
238komeda_component_validate_private(struct komeda_component *c,
239 struct komeda_component_state *st)
240{
241 int err;
242
243 if (!c->funcs->validate)
244 return 0;
245
246 err = c->funcs->validate(c, st);
247 if (err)
248 DRM_DEBUG_ATOMIC("%s validate private failed.\n", c->name);
249
250 return err;
251}
252
253/* Get current available scaler from the component->supported_outputs */
254static struct komeda_scaler *
255komeda_component_get_avail_scaler(struct komeda_component *c,
256 struct drm_atomic_state *state)
257{
258 struct komeda_pipeline_state *pipe_st;
259 u32 avail_scalers;
260
261 pipe_st = komeda_pipeline_get_state(c->pipeline, state);
262 if (!pipe_st)
263 return NULL;
264
265 avail_scalers = (pipe_st->active_comps & KOMEDA_PIPELINE_SCALERS) ^
266 KOMEDA_PIPELINE_SCALERS;
267
268 c = komeda_component_pickup_output(c, avail_scalers);
269
270 return to_scaler(c);
271}
272
273static void
274komeda_rotate_data_flow(struct komeda_data_flow_cfg *dflow, u32 rot)
275{
276 if (drm_rotation_90_or_270(rot)) {
277 swap(dflow->in_h, dflow->in_w);
278 swap(dflow->total_in_h, dflow->total_in_w);
279 }
280}
281
282static int
283komeda_layer_check_cfg(struct komeda_layer *layer,
284 struct komeda_fb *kfb,
285 struct komeda_data_flow_cfg *dflow)
286{
287 u32 src_x, src_y, src_w, src_h;
288
289 if (!komeda_fb_is_layer_supported(kfb, layer->layer_type, dflow->rot))
290 return -EINVAL;
291
292 if (layer->base.id == KOMEDA_COMPONENT_WB_LAYER) {
293 src_x = dflow->out_x;
294 src_y = dflow->out_y;
295 src_w = dflow->out_w;
296 src_h = dflow->out_h;
297 } else {
298 src_x = dflow->in_x;
299 src_y = dflow->in_y;
300 src_w = dflow->in_w;
301 src_h = dflow->in_h;
302 }
303
304 if (komeda_fb_check_src_coords(kfb, src_x, src_y, src_w, src_h))
305 return -EINVAL;
306
307 if (!in_range(&layer->hsize_in, src_w)) {
308 DRM_DEBUG_ATOMIC("invalidate src_w %d.\n", src_w);
309 return -EINVAL;
310 }
311
312 if (!in_range(&layer->vsize_in, src_h)) {
313 DRM_DEBUG_ATOMIC("invalidate src_h %d.\n", src_h);
314 return -EINVAL;
315 }
316
317 return 0;
318}
319
320static int
321komeda_layer_validate(struct komeda_layer *layer,
322 struct komeda_plane_state *kplane_st,
323 struct komeda_data_flow_cfg *dflow)
324{
325 struct drm_plane_state *plane_st = &kplane_st->base;
326 struct drm_framebuffer *fb = plane_st->fb;
327 struct komeda_fb *kfb = to_kfb(fb);
328 struct komeda_component_state *c_st;
329 struct komeda_layer_state *st;
330 int i, err;
331
332 err = komeda_layer_check_cfg(layer, kfb, dflow);
333 if (err)
334 return err;
335
336 c_st = komeda_component_get_state_and_set_user(&layer->base,
337 plane_st->state, plane_st->plane, plane_st->crtc);
338 if (IS_ERR(c_st))
339 return PTR_ERR(c_st);
340
341 st = to_layer_st(c_st);
342
343 st->rot = dflow->rot;
344
345 if (fb->modifier) {
346 st->hsize = kfb->aligned_w;
347 st->vsize = kfb->aligned_h;
348 st->afbc_crop_l = dflow->in_x;
349 st->afbc_crop_r = kfb->aligned_w - dflow->in_x - dflow->in_w;
350 st->afbc_crop_t = dflow->in_y;
351 st->afbc_crop_b = kfb->aligned_h - dflow->in_y - dflow->in_h;
352 } else {
353 st->hsize = dflow->in_w;
354 st->vsize = dflow->in_h;
355 st->afbc_crop_l = 0;
356 st->afbc_crop_r = 0;
357 st->afbc_crop_t = 0;
358 st->afbc_crop_b = 0;
359 }
360
361 for (i = 0; i < fb->format->num_planes; i++)
362 st->addr[i] = komeda_fb_get_pixel_addr(kfb, dflow->in_x,
363 dflow->in_y, i);
364
365 err = komeda_component_validate_private(&layer->base, c_st);
366 if (err)
367 return err;
368
369 /* update the data flow for the next stage */
370 komeda_component_set_output(&dflow->input, &layer->base, 0);
371
372 /*
373 * The rotation has been handled by layer, so adjusted the data flow for
374 * the next stage.
375 */
376 komeda_rotate_data_flow(dflow, st->rot);
377
378 return 0;
379}
380
381static int
382komeda_wb_layer_validate(struct komeda_layer *wb_layer,
383 struct drm_connector_state *conn_st,
384 struct komeda_data_flow_cfg *dflow)
385{
386 struct komeda_fb *kfb = to_kfb(conn_st->writeback_job->fb);
387 struct komeda_component_state *c_st;
388 struct komeda_layer_state *st;
389 int i, err;
390
391 err = komeda_layer_check_cfg(wb_layer, kfb, dflow);
392 if (err)
393 return err;
394
395 c_st = komeda_component_get_state_and_set_user(&wb_layer->base,
396 conn_st->state, conn_st->connector, conn_st->crtc);
397 if (IS_ERR(c_st))
398 return PTR_ERR(c_st);
399
400 st = to_layer_st(c_st);
401
402 st->hsize = dflow->out_w;
403 st->vsize = dflow->out_h;
404
405 for (i = 0; i < kfb->base.format->num_planes; i++)
406 st->addr[i] = komeda_fb_get_pixel_addr(kfb, dflow->out_x,
407 dflow->out_y, i);
408
409 komeda_component_add_input(&st->base, &dflow->input, 0);
410 komeda_component_set_output(&dflow->input, &wb_layer->base, 0);
411
412 return 0;
413}
414
415static bool scaling_ratio_valid(u32 size_in, u32 size_out,
416 u32 max_upscaling, u32 max_downscaling)
417{
418 if (size_out > size_in * max_upscaling)
419 return false;
420 else if (size_in > size_out * max_downscaling)
421 return false;
422 return true;
423}
424
425static int
426komeda_scaler_check_cfg(struct komeda_scaler *scaler,
427 struct komeda_crtc_state *kcrtc_st,
428 struct komeda_data_flow_cfg *dflow)
429{
430 u32 hsize_in, vsize_in, hsize_out, vsize_out;
431 u32 max_upscaling;
432
433 hsize_in = dflow->in_w;
434 vsize_in = dflow->in_h;
435 hsize_out = dflow->out_w;
436 vsize_out = dflow->out_h;
437
438 if (!in_range(&scaler->hsize, hsize_in) ||
439 !in_range(&scaler->hsize, hsize_out)) {
440 DRM_DEBUG_ATOMIC("Invalid horizontal sizes");
441 return -EINVAL;
442 }
443
444 if (!in_range(&scaler->vsize, vsize_in) ||
445 !in_range(&scaler->vsize, vsize_out)) {
446 DRM_DEBUG_ATOMIC("Invalid vertical sizes");
447 return -EINVAL;
448 }
449
450 /* If input comes from compiz that means the scaling is for writeback
451 * and scaler can not do upscaling for writeback
452 */
453 if (has_bit(dflow->input.component->id, KOMEDA_PIPELINE_COMPIZS))
454 max_upscaling = 1;
455 else
456 max_upscaling = scaler->max_upscaling;
457
458 if (!scaling_ratio_valid(hsize_in, hsize_out, max_upscaling,
459 scaler->max_downscaling)) {
460 DRM_DEBUG_ATOMIC("Invalid horizontal scaling ratio");
461 return -EINVAL;
462 }
463
464 if (!scaling_ratio_valid(vsize_in, vsize_out, max_upscaling,
465 scaler->max_downscaling)) {
466 DRM_DEBUG_ATOMIC("Invalid vertical scaling ratio");
467 return -EINVAL;
468 }
469
470 if (hsize_in > hsize_out || vsize_in > vsize_out) {
471 struct komeda_pipeline *pipe = scaler->base.pipeline;
472 int err;
473
474 err = pipe->funcs->downscaling_clk_check(pipe,
475 &kcrtc_st->base.adjusted_mode,
476 komeda_crtc_get_aclk(kcrtc_st), dflow);
477 if (err) {
478 DRM_DEBUG_ATOMIC("aclk can't satisfy the clock requirement of the downscaling\n");
479 return err;
480 }
481 }
482
483 return 0;
484}
485
486static int
487komeda_scaler_validate(void *user,
488 struct komeda_crtc_state *kcrtc_st,
489 struct komeda_data_flow_cfg *dflow)
490{
491 struct drm_atomic_state *drm_st = kcrtc_st->base.state;
492 struct komeda_component_state *c_st;
493 struct komeda_scaler_state *st;
494 struct komeda_scaler *scaler;
495 int err = 0;
496
497 if (!(dflow->en_scaling || dflow->en_img_enhancement))
498 return 0;
499
500 scaler = komeda_component_get_avail_scaler(dflow->input.component,
501 drm_st);
502 if (!scaler) {
503 DRM_DEBUG_ATOMIC("No scaler available");
504 return -EINVAL;
505 }
506
507 err = komeda_scaler_check_cfg(scaler, kcrtc_st, dflow);
508 if (err)
509 return err;
510
511 c_st = komeda_component_get_state_and_set_user(&scaler->base,
512 drm_st, user, kcrtc_st->base.crtc);
513 if (IS_ERR(c_st))
514 return PTR_ERR(c_st);
515
516 st = to_scaler_st(c_st);
517
518 st->hsize_in = dflow->in_w;
519 st->vsize_in = dflow->in_h;
520 st->hsize_out = dflow->out_w;
521 st->vsize_out = dflow->out_h;
522 st->right_crop = dflow->right_crop;
523 st->left_crop = dflow->left_crop;
524 st->total_vsize_in = dflow->total_in_h;
525 st->total_hsize_in = dflow->total_in_w;
526 st->total_hsize_out = dflow->total_out_w;
527
528 /* Enable alpha processing if the next stage needs the pixel alpha */
529 st->en_alpha = dflow->pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE;
530 st->en_scaling = dflow->en_scaling;
531 st->en_img_enhancement = dflow->en_img_enhancement;
532 st->en_split = dflow->en_split;
533 st->right_part = dflow->right_part;
534
535 komeda_component_add_input(&st->base, &dflow->input, 0);
536 komeda_component_set_output(&dflow->input, &scaler->base, 0);
537 return err;
538}
539
540static void komeda_split_data_flow(struct komeda_scaler *scaler,
541 struct komeda_data_flow_cfg *dflow,
542 struct komeda_data_flow_cfg *l_dflow,
543 struct komeda_data_flow_cfg *r_dflow);
544
545static int
546komeda_splitter_validate(struct komeda_splitter *splitter,
547 struct drm_connector_state *conn_st,
548 struct komeda_data_flow_cfg *dflow,
549 struct komeda_data_flow_cfg *l_output,
550 struct komeda_data_flow_cfg *r_output)
551{
552 struct komeda_component_state *c_st;
553 struct komeda_splitter_state *st;
554
555 if (!splitter) {
556 DRM_DEBUG_ATOMIC("Current HW doesn't support splitter.\n");
557 return -EINVAL;
558 }
559
560 if (!in_range(&splitter->hsize, dflow->in_w)) {
561 DRM_DEBUG_ATOMIC("split in_w:%d is out of the acceptable range.\n",
562 dflow->in_w);
563 return -EINVAL;
564 }
565
566 if (!in_range(&splitter->vsize, dflow->in_h)) {
567 DRM_DEBUG_ATOMIC("split in_h: %d exceeds the acceptable range.\n",
568 dflow->in_h);
569 return -EINVAL;
570 }
571
572 c_st = komeda_component_get_state_and_set_user(&splitter->base,
573 conn_st->state, conn_st->connector, conn_st->crtc);
574
575 if (IS_ERR(c_st))
576 return PTR_ERR(c_st);
577
578 komeda_split_data_flow(splitter->base.pipeline->scalers[0],
579 dflow, l_output, r_output);
580
581 st = to_splitter_st(c_st);
582 st->hsize = dflow->in_w;
583 st->vsize = dflow->in_h;
584 st->overlap = dflow->overlap;
585
586 komeda_component_add_input(&st->base, &dflow->input, 0);
587 komeda_component_set_output(&l_output->input, &splitter->base, 0);
588 komeda_component_set_output(&r_output->input, &splitter->base, 1);
589
590 return 0;
591}
592
593static int
594komeda_merger_validate(struct komeda_merger *merger,
595 void *user,
596 struct komeda_crtc_state *kcrtc_st,
597 struct komeda_data_flow_cfg *left_input,
598 struct komeda_data_flow_cfg *right_input,
599 struct komeda_data_flow_cfg *output)
600{
601 struct komeda_component_state *c_st;
602 struct komeda_merger_state *st;
603 int err = 0;
604
605 if (!merger) {
606 DRM_DEBUG_ATOMIC("No merger is available");
607 return -EINVAL;
608 }
609
610 if (!in_range(&merger->hsize_merged, output->out_w)) {
611 DRM_DEBUG_ATOMIC("merged_w: %d is out of the accepted range.\n",
612 output->out_w);
613 return -EINVAL;
614 }
615
616 if (!in_range(&merger->vsize_merged, output->out_h)) {
617 DRM_DEBUG_ATOMIC("merged_h: %d is out of the accepted range.\n",
618 output->out_h);
619 return -EINVAL;
620 }
621
622 c_st = komeda_component_get_state_and_set_user(&merger->base,
623 kcrtc_st->base.state, kcrtc_st->base.crtc, kcrtc_st->base.crtc);
624
625 if (IS_ERR(c_st))
626 return PTR_ERR(c_st);
627
628 st = to_merger_st(c_st);
629 st->hsize_merged = output->out_w;
630 st->vsize_merged = output->out_h;
631
632 komeda_component_add_input(c_st, &left_input->input, 0);
633 komeda_component_add_input(c_st, &right_input->input, 1);
634 komeda_component_set_output(&output->input, &merger->base, 0);
635
636 return err;
637}
638
639void pipeline_composition_size(struct komeda_crtc_state *kcrtc_st,
640 u16 *hsize, u16 *vsize)
641{
642 struct drm_display_mode *m = &kcrtc_st->base.adjusted_mode;
643
644 if (hsize)
645 *hsize = m->hdisplay;
646 if (vsize)
647 *vsize = m->vdisplay;
648}
649
650static int
651komeda_compiz_set_input(struct komeda_compiz *compiz,
652 struct komeda_crtc_state *kcrtc_st,
653 struct komeda_data_flow_cfg *dflow)
654{
655 struct drm_atomic_state *drm_st = kcrtc_st->base.state;
656 struct komeda_component_state *c_st, *old_st;
657 struct komeda_compiz_input_cfg *cin;
658 u16 compiz_w, compiz_h;
659 int idx = dflow->blending_zorder;
660
661 pipeline_composition_size(kcrtc_st, &compiz_w, &compiz_h);
662 /* check display rect */
663 if ((dflow->out_x + dflow->out_w > compiz_w) ||
664 (dflow->out_y + dflow->out_h > compiz_h) ||
665 dflow->out_w == 0 || dflow->out_h == 0) {
666 DRM_DEBUG_ATOMIC("invalid disp rect [x=%d, y=%d, w=%d, h=%d]\n",
667 dflow->out_x, dflow->out_y,
668 dflow->out_w, dflow->out_h);
669 return -EINVAL;
670 }
671
672 c_st = komeda_component_get_state_and_set_user(&compiz->base, drm_st,
673 kcrtc_st->base.crtc, kcrtc_st->base.crtc);
674 if (IS_ERR(c_st))
675 return PTR_ERR(c_st);
676
677 if (komeda_component_check_input(c_st, &dflow->input, idx))
678 return -EINVAL;
679
680 cin = &(to_compiz_st(c_st)->cins[idx]);
681
682 cin->hsize = dflow->out_w;
683 cin->vsize = dflow->out_h;
684 cin->hoffset = dflow->out_x;
685 cin->voffset = dflow->out_y;
686 cin->pixel_blend_mode = dflow->pixel_blend_mode;
687 cin->layer_alpha = dflow->layer_alpha;
688
689 old_st = komeda_component_get_old_state(&compiz->base, drm_st);
690 WARN_ON(!old_st);
691
692 /* compare with old to check if this input has been changed */
693 if (memcmp(&(to_compiz_st(old_st)->cins[idx]), cin, sizeof(*cin)))
694 c_st->changed_active_inputs |= BIT(idx);
695
696 komeda_component_add_input(c_st, &dflow->input, idx);
697 komeda_component_set_output(&dflow->input, &compiz->base, 0);
698
699 return 0;
700}
701
702static int
703komeda_compiz_validate(struct komeda_compiz *compiz,
704 struct komeda_crtc_state *state,
705 struct komeda_data_flow_cfg *dflow)
706{
707 struct komeda_component_state *c_st;
708 struct komeda_compiz_state *st;
709
710 c_st = komeda_component_get_state_and_set_user(&compiz->base,
711 state->base.state, state->base.crtc, state->base.crtc);
712 if (IS_ERR(c_st))
713 return PTR_ERR(c_st);
714
715 st = to_compiz_st(c_st);
716
717 pipeline_composition_size(state, &st->hsize, &st->vsize);
718
719 komeda_component_set_output(&dflow->input, &compiz->base, 0);
720
721 /* compiz output dflow will be fed to the next pipeline stage, prepare
722 * the data flow configuration for the next stage
723 */
724 if (dflow) {
725 dflow->in_w = st->hsize;
726 dflow->in_h = st->vsize;
727 dflow->out_w = dflow->in_w;
728 dflow->out_h = dflow->in_h;
729 /* the output data of compiz doesn't have alpha, it only can be
730 * used as bottom layer when blend it with master layers
731 */
732 dflow->pixel_blend_mode = DRM_MODE_BLEND_PIXEL_NONE;
733 dflow->layer_alpha = 0xFF;
734 dflow->blending_zorder = 0;
735 }
736
737 return 0;
738}
739
740static int
741komeda_improc_validate(struct komeda_improc *improc,
742 struct komeda_crtc_state *kcrtc_st,
743 struct komeda_data_flow_cfg *dflow)
744{
745 struct drm_crtc *crtc = kcrtc_st->base.crtc;
746 struct komeda_component_state *c_st;
747 struct komeda_improc_state *st;
748
749 c_st = komeda_component_get_state_and_set_user(&improc->base,
750 kcrtc_st->base.state, crtc, crtc);
751 if (IS_ERR(c_st))
752 return PTR_ERR(c_st);
753
754 st = to_improc_st(c_st);
755
756 st->hsize = dflow->in_w;
757 st->vsize = dflow->in_h;
758
759 komeda_component_add_input(&st->base, &dflow->input, 0);
760 komeda_component_set_output(&dflow->input, &improc->base, 0);
761
762 return 0;
763}
764
765static int
766komeda_timing_ctrlr_validate(struct komeda_timing_ctrlr *ctrlr,
767 struct komeda_crtc_state *kcrtc_st,
768 struct komeda_data_flow_cfg *dflow)
769{
770 struct drm_crtc *crtc = kcrtc_st->base.crtc;
771 struct komeda_timing_ctrlr_state *st;
772 struct komeda_component_state *c_st;
773
774 c_st = komeda_component_get_state_and_set_user(&ctrlr->base,
775 kcrtc_st->base.state, crtc, crtc);
776 if (IS_ERR(c_st))
777 return PTR_ERR(c_st);
778
779 st = to_ctrlr_st(c_st);
780
781 komeda_component_add_input(&st->base, &dflow->input, 0);
782 komeda_component_set_output(&dflow->input, &ctrlr->base, 0);
783
784 return 0;
785}
786
787void komeda_complete_data_flow_cfg(struct komeda_layer *layer,
788 struct komeda_data_flow_cfg *dflow,
789 struct drm_framebuffer *fb)
790{
791 struct komeda_scaler *scaler = layer->base.pipeline->scalers[0];
792 u32 w = dflow->in_w;
793 u32 h = dflow->in_h;
794
795 dflow->total_in_w = dflow->in_w;
796 dflow->total_in_h = dflow->in_h;
797 dflow->total_out_w = dflow->out_w;
798
799 /* if format doesn't have alpha, fix blend mode to PIXEL_NONE */
800 if (!fb->format->has_alpha)
801 dflow->pixel_blend_mode = DRM_MODE_BLEND_PIXEL_NONE;
802
803 if (drm_rotation_90_or_270(dflow->rot))
804 swap(w, h);
805
806 dflow->en_scaling = (w != dflow->out_w) || (h != dflow->out_h);
807 dflow->is_yuv = fb->format->is_yuv;
808
809 /* try to enable image enhancer if data flow is a 2x+ upscaling */
810 dflow->en_img_enhancement = dflow->out_w >= 2 * w ||
811 dflow->out_h >= 2 * h;
812
813 /* try to enable split if scaling exceed the scaler's acceptable
814 * input/output range.
815 */
816 if (dflow->en_scaling && scaler)
817 dflow->en_split = !in_range(&scaler->hsize, dflow->in_w) ||
818 !in_range(&scaler->hsize, dflow->out_w);
819}
820
821static bool merger_is_available(struct komeda_pipeline *pipe,
822 struct komeda_data_flow_cfg *dflow)
823{
824 u32 avail_inputs = pipe->merger ?
825 pipe->merger->base.supported_inputs : 0;
826
827 return has_bit(dflow->input.component->id, avail_inputs);
828}
829
830int komeda_build_layer_data_flow(struct komeda_layer *layer,
831 struct komeda_plane_state *kplane_st,
832 struct komeda_crtc_state *kcrtc_st,
833 struct komeda_data_flow_cfg *dflow)
834{
835 struct drm_plane *plane = kplane_st->base.plane;
836 struct komeda_pipeline *pipe = layer->base.pipeline;
837 int err;
838
839 DRM_DEBUG_ATOMIC("%s handling [PLANE:%d:%s]: src[x/y:%d/%d, w/h:%d/%d] disp[x/y:%d/%d, w/h:%d/%d]",
840 layer->base.name, plane->base.id, plane->name,
841 dflow->in_x, dflow->in_y, dflow->in_w, dflow->in_h,
842 dflow->out_x, dflow->out_y, dflow->out_w, dflow->out_h);
843
844 err = komeda_layer_validate(layer, kplane_st, dflow);
845 if (err)
846 return err;
847
848 err = komeda_scaler_validate(plane, kcrtc_st, dflow);
849 if (err)
850 return err;
851
852 /* if split, check if can put the data flow into merger */
853 if (dflow->en_split && merger_is_available(pipe, dflow))
854 return 0;
855
856 err = komeda_compiz_set_input(pipe->compiz, kcrtc_st, dflow);
857
858 return err;
859}
860
861/*
862 * Split is introduced for workaround scaler's input/output size limitation.
863 * The idea is simple, if one scaler can not fit the requirement, use two.
864 * So split splits the big source image to two half parts (left/right) and do
865 * the scaling by two scaler separately and independently.
866 * But split also imports an edge problem in the middle of the image when
867 * scaling, to avoid it, split isn't a simple half-and-half, but add an extra
868 * pixels (overlap) to both side, after split the left/right will be:
869 * - left: [0, src_length/2 + overlap]
870 * - right: [src_length/2 - overlap, src_length]
871 * The extra overlap do eliminate the edge problem, but which may also generates
872 * unnecessary pixels when scaling, we need to crop them before scaler output
873 * the result to the next stage. and for the how to crop, it depends on the
874 * unneeded pixels, another words the position where overlay has been added.
875 * - left: crop the right
876 * - right: crop the left
877 *
878 * The diagram for how to do the split
879 *
880 * <---------------------left->out_w ---------------->
881 * |--------------------------------|---right_crop-----| <- left after split
882 * \ \ /
883 * \ \<--overlap--->/
884 * |-----------------|-------------|(Middle)------|-----------------| <- src
885 * /<---overlap--->\ \
886 * / \ \
887 * right after split->|-----left_crop---|--------------------------------|
888 * ^<------------------- right->out_w --------------->^
889 *
890 * NOTE: To consistent with HW the output_w always contains the crop size.
891 */
892
893static void komeda_split_data_flow(struct komeda_scaler *scaler,
894 struct komeda_data_flow_cfg *dflow,
895 struct komeda_data_flow_cfg *l_dflow,
896 struct komeda_data_flow_cfg *r_dflow)
897{
898 bool r90 = drm_rotation_90_or_270(dflow->rot);
899 bool flip_h = has_flip_h(dflow->rot);
900 u32 l_out, r_out, overlap;
901
902 memcpy(l_dflow, dflow, sizeof(*dflow));
903 memcpy(r_dflow, dflow, sizeof(*dflow));
904
905 l_dflow->right_part = false;
906 r_dflow->right_part = true;
907 r_dflow->blending_zorder = dflow->blending_zorder + 1;
908
909 overlap = 0;
910 if (dflow->en_scaling && scaler)
911 overlap += scaler->scaling_split_overlap;
912
913 /* original dflow may fed into splitter, and which doesn't need
914 * enhancement overlap
915 */
916 dflow->overlap = overlap;
917
918 if (dflow->en_img_enhancement && scaler)
919 overlap += scaler->enh_split_overlap;
920
921 l_dflow->overlap = overlap;
922 r_dflow->overlap = overlap;
923
924 /* split the origin content */
925 /* left/right here always means the left/right part of display image,
926 * not the source Image
927 */
928 /* DRM rotation is anti-clockwise */
929 if (r90) {
930 if (dflow->en_scaling) {
931 l_dflow->in_h = ALIGN(dflow->in_h, 2) / 2 + l_dflow->overlap;
932 r_dflow->in_h = l_dflow->in_h;
933 } else if (dflow->en_img_enhancement) {
934 /* enhancer only */
935 l_dflow->in_h = ALIGN(dflow->in_h, 2) / 2 + l_dflow->overlap;
936 r_dflow->in_h = dflow->in_h / 2 + r_dflow->overlap;
937 } else {
938 /* split without scaler, no overlap */
939 l_dflow->in_h = ALIGN(((dflow->in_h + 1) >> 1), 2);
940 r_dflow->in_h = dflow->in_h - l_dflow->in_h;
941 }
942
943 /* Consider YUV format, after split, the split source w/h
944 * may not aligned to 2. we have two choices for such case.
945 * 1. scaler is enabled (overlap != 0), we can do a alignment
946 * both left/right and crop the extra data by scaler.
947 * 2. scaler is not enabled, only align the split left
948 * src/disp, and the rest part assign to right
949 */
950 if ((overlap != 0) && dflow->is_yuv) {
951 l_dflow->in_h = ALIGN(l_dflow->in_h, 2);
952 r_dflow->in_h = ALIGN(r_dflow->in_h, 2);
953 }
954
955 if (flip_h)
956 l_dflow->in_y = dflow->in_y + dflow->in_h - l_dflow->in_h;
957 else
958 r_dflow->in_y = dflow->in_y + dflow->in_h - r_dflow->in_h;
959 } else {
960 if (dflow->en_scaling) {
961 l_dflow->in_w = ALIGN(dflow->in_w, 2) / 2 + l_dflow->overlap;
962 r_dflow->in_w = l_dflow->in_w;
963 } else if (dflow->en_img_enhancement) {
964 l_dflow->in_w = ALIGN(dflow->in_w, 2) / 2 + l_dflow->overlap;
965 r_dflow->in_w = dflow->in_w / 2 + r_dflow->overlap;
966 } else {
967 l_dflow->in_w = ALIGN(((dflow->in_w + 1) >> 1), 2);
968 r_dflow->in_w = dflow->in_w - l_dflow->in_w;
969 }
970
971 /* do YUV alignment when scaler enabled */
972 if ((overlap != 0) && dflow->is_yuv) {
973 l_dflow->in_w = ALIGN(l_dflow->in_w, 2);
974 r_dflow->in_w = ALIGN(r_dflow->in_w, 2);
975 }
976
977 /* on flip_h, the left display content from the right-source */
978 if (flip_h)
979 l_dflow->in_x = dflow->in_w + dflow->in_x - l_dflow->in_w;
980 else
981 r_dflow->in_x = dflow->in_w + dflow->in_x - r_dflow->in_w;
982 }
983
984 /* split the disp_rect */
985 if (dflow->en_scaling || dflow->en_img_enhancement)
986 l_dflow->out_w = ((dflow->out_w + 1) >> 1);
987 else
988 l_dflow->out_w = ALIGN(((dflow->out_w + 1) >> 1), 2);
989
990 r_dflow->out_w = dflow->out_w - l_dflow->out_w;
991
992 l_dflow->out_x = dflow->out_x;
993 r_dflow->out_x = l_dflow->out_w + l_dflow->out_x;
994
995 /* calculate the scaling crop */
996 /* left scaler output more data and do crop */
997 if (r90) {
998 l_out = (dflow->out_w * l_dflow->in_h) / dflow->in_h;
999 r_out = (dflow->out_w * r_dflow->in_h) / dflow->in_h;
1000 } else {
1001 l_out = (dflow->out_w * l_dflow->in_w) / dflow->in_w;
1002 r_out = (dflow->out_w * r_dflow->in_w) / dflow->in_w;
1003 }
1004
1005 l_dflow->left_crop = 0;
1006 l_dflow->right_crop = l_out - l_dflow->out_w;
1007 r_dflow->left_crop = r_out - r_dflow->out_w;
1008 r_dflow->right_crop = 0;
1009
1010 /* out_w includes the crop length */
1011 l_dflow->out_w += l_dflow->right_crop + l_dflow->left_crop;
1012 r_dflow->out_w += r_dflow->right_crop + r_dflow->left_crop;
1013}
1014
1015/* For layer split, a plane state will be split to two data flows and handled
1016 * by two separated komeda layer input pipelines. komeda supports two types of
1017 * layer split:
1018 * - none-scaling split:
1019 * / layer-left -> \
1020 * plane_state compiz-> ...
1021 * \ layer-right-> /
1022 *
1023 * - scaling split:
1024 * / layer-left -> scaler->\
1025 * plane_state merger -> compiz-> ...
1026 * \ layer-right-> scaler->/
1027 *
1028 * Since merger only supports scaler as input, so for none-scaling split, two
1029 * layer data flows will be output to compiz directly. for scaling_split, two
1030 * data flow will be merged by merger firstly, then merger outputs one merged
1031 * data flow to compiz.
1032 */
1033int komeda_build_layer_split_data_flow(struct komeda_layer *left,
1034 struct komeda_plane_state *kplane_st,
1035 struct komeda_crtc_state *kcrtc_st,
1036 struct komeda_data_flow_cfg *dflow)
1037{
1038 struct drm_plane *plane = kplane_st->base.plane;
1039 struct komeda_pipeline *pipe = left->base.pipeline;
1040 struct komeda_layer *right = left->right;
1041 struct komeda_data_flow_cfg l_dflow, r_dflow;
1042 int err;
1043
1044 komeda_split_data_flow(pipe->scalers[0], dflow, &l_dflow, &r_dflow);
1045
1046 DRM_DEBUG_ATOMIC("Assign %s + %s to [PLANE:%d:%s]: "
1047 "src[x/y:%d/%d, w/h:%d/%d] disp[x/y:%d/%d, w/h:%d/%d]",
1048 left->base.name, right->base.name,
1049 plane->base.id, plane->name,
1050 dflow->in_x, dflow->in_y, dflow->in_w, dflow->in_h,
1051 dflow->out_x, dflow->out_y, dflow->out_w, dflow->out_h);
1052
1053 err = komeda_build_layer_data_flow(left, kplane_st, kcrtc_st, &l_dflow);
1054 if (err)
1055 return err;
1056
1057 err = komeda_build_layer_data_flow(right, kplane_st, kcrtc_st, &r_dflow);
1058 if (err)
1059 return err;
1060
1061 /* The rotation has been handled by layer, so adjusted the data flow */
1062 komeda_rotate_data_flow(dflow, dflow->rot);
1063
1064 /* left and right dflow has been merged to compiz already,
1065 * no need merger to merge them anymore.
1066 */
1067 if (r_dflow.input.component == l_dflow.input.component)
1068 return 0;
1069
1070 /* line merger path */
1071 err = komeda_merger_validate(pipe->merger, plane, kcrtc_st,
1072 &l_dflow, &r_dflow, dflow);
1073 if (err)
1074 return err;
1075
1076 err = komeda_compiz_set_input(pipe->compiz, kcrtc_st, dflow);
1077
1078 return err;
1079}
1080
1081/* writeback data path: compiz -> scaler -> wb_layer -> memory */
1082int komeda_build_wb_data_flow(struct komeda_layer *wb_layer,
1083 struct drm_connector_state *conn_st,
1084 struct komeda_crtc_state *kcrtc_st,
1085 struct komeda_data_flow_cfg *dflow)
1086{
1087 struct drm_connector *conn = conn_st->connector;
1088 int err;
1089
1090 err = komeda_scaler_validate(conn, kcrtc_st, dflow);
1091 if (err)
1092 return err;
1093
1094 return komeda_wb_layer_validate(wb_layer, conn_st, dflow);
1095}
1096
1097/* writeback scaling split data path:
1098 * /-> scaler ->\
1099 * compiz -> splitter merger -> wb_layer -> memory
1100 * \-> scaler ->/
1101 */
1102int komeda_build_wb_split_data_flow(struct komeda_layer *wb_layer,
1103 struct drm_connector_state *conn_st,
1104 struct komeda_crtc_state *kcrtc_st,
1105 struct komeda_data_flow_cfg *dflow)
1106{
1107 struct komeda_pipeline *pipe = wb_layer->base.pipeline;
1108 struct drm_connector *conn = conn_st->connector;
1109 struct komeda_data_flow_cfg l_dflow, r_dflow;
1110 int err;
1111
1112 err = komeda_splitter_validate(pipe->splitter, conn_st,
1113 dflow, &l_dflow, &r_dflow);
1114 if (err)
1115 return err;
1116 err = komeda_scaler_validate(conn, kcrtc_st, &l_dflow);
1117 if (err)
1118 return err;
1119
1120 err = komeda_scaler_validate(conn, kcrtc_st, &r_dflow);
1121 if (err)
1122 return err;
1123
1124 err = komeda_merger_validate(pipe->merger, conn_st, kcrtc_st,
1125 &l_dflow, &r_dflow, dflow);
1126 if (err)
1127 return err;
1128
1129 return komeda_wb_layer_validate(wb_layer, conn_st, dflow);
1130}
1131
1132/* build display output data flow, the data path is:
1133 * compiz -> improc -> timing_ctrlr
1134 */
1135int komeda_build_display_data_flow(struct komeda_crtc *kcrtc,
1136 struct komeda_crtc_state *kcrtc_st)
1137{
1138 struct komeda_pipeline *master = kcrtc->master;
1139 struct komeda_pipeline *slave = kcrtc->slave;
1140 struct komeda_data_flow_cfg m_dflow; /* master data flow */
1141 struct komeda_data_flow_cfg s_dflow; /* slave data flow */
1142 int err;
1143
1144 memset(&m_dflow, 0, sizeof(m_dflow));
1145 memset(&s_dflow, 0, sizeof(s_dflow));
1146
1147 if (slave && has_bit(slave->id, kcrtc_st->active_pipes)) {
1148 err = komeda_compiz_validate(slave->compiz, kcrtc_st, &s_dflow);
1149 if (err)
1150 return err;
1151
1152 /* merge the slave dflow into master pipeline */
1153 err = komeda_compiz_set_input(master->compiz, kcrtc_st,
1154 &s_dflow);
1155 if (err)
1156 return err;
1157 }
1158
1159 err = komeda_compiz_validate(master->compiz, kcrtc_st, &m_dflow);
1160 if (err)
1161 return err;
1162
1163 err = komeda_improc_validate(master->improc, kcrtc_st, &m_dflow);
1164 if (err)
1165 return err;
1166
1167 err = komeda_timing_ctrlr_validate(master->ctrlr, kcrtc_st, &m_dflow);
1168 if (err)
1169 return err;
1170
1171 return 0;
1172}
1173
1174static void
1175komeda_pipeline_unbound_components(struct komeda_pipeline *pipe,
1176 struct komeda_pipeline_state *new)
1177{
1178 struct drm_atomic_state *drm_st = new->obj.state;
1179 struct komeda_pipeline_state *old = priv_to_pipe_st(pipe->obj.state);
1180 struct komeda_component_state *c_st;
1181 struct komeda_component *c;
1182 u32 disabling_comps, id;
1183
1184 WARN_ON(!old);
1185
1186 disabling_comps = (~new->active_comps) & old->active_comps;
1187
1188 /* unbound all disabling component */
1189 dp_for_each_set_bit(id, disabling_comps) {
1190 c = komeda_pipeline_get_component(pipe, id);
1191 c_st = komeda_component_get_state_and_set_user(c,
1192 drm_st, NULL, new->crtc);
1193 WARN_ON(IS_ERR(c_st));
1194 }
1195}
1196
1197/* release unclaimed pipeline resource */
1198int komeda_release_unclaimed_resources(struct komeda_pipeline *pipe,
1199 struct komeda_crtc_state *kcrtc_st)
1200{
1201 struct drm_atomic_state *drm_st = kcrtc_st->base.state;
1202 struct komeda_pipeline_state *st;
1203
1204 /* ignore the pipeline which is not affected */
1205 if (!pipe || !has_bit(pipe->id, kcrtc_st->affected_pipes))
1206 return 0;
1207
1208 if (has_bit(pipe->id, kcrtc_st->active_pipes))
1209 st = komeda_pipeline_get_new_state(pipe, drm_st);
1210 else
1211 st = komeda_pipeline_get_state_and_set_crtc(pipe, drm_st, NULL);
1212
1213 if (WARN_ON(IS_ERR_OR_NULL(st)))
1214 return -EINVAL;
1215
1216 komeda_pipeline_unbound_components(pipe, st);
1217
1218 return 0;
1219}
1220
1221void komeda_pipeline_disable(struct komeda_pipeline *pipe,
1222 struct drm_atomic_state *old_state)
1223{
1224 struct komeda_pipeline_state *old;
1225 struct komeda_component *c;
1226 struct komeda_component_state *c_st;
1227 u32 id, disabling_comps = 0;
1228
1229 old = komeda_pipeline_get_old_state(pipe, old_state);
1230
1231 disabling_comps = old->active_comps;
1232 DRM_DEBUG_ATOMIC("PIPE%d: disabling_comps: 0x%x.\n",
1233 pipe->id, disabling_comps);
1234
1235 dp_for_each_set_bit(id, disabling_comps) {
1236 c = komeda_pipeline_get_component(pipe, id);
1237 c_st = priv_to_comp_st(c->obj.state);
1238
1239 /*
1240 * If we disabled a component then all active_inputs should be
1241 * put in the list of changed_active_inputs, so they get
1242 * re-enabled.
1243 * This usually happens during a modeset when the pipeline is
1244 * first disabled and then the actual state gets committed
1245 * again.
1246 */
1247 c_st->changed_active_inputs |= c_st->active_inputs;
1248
1249 c->funcs->disable(c);
1250 }
1251}
1252
1253void komeda_pipeline_update(struct komeda_pipeline *pipe,
1254 struct drm_atomic_state *old_state)
1255{
1256 struct komeda_pipeline_state *new = priv_to_pipe_st(pipe->obj.state);
1257 struct komeda_pipeline_state *old;
1258 struct komeda_component *c;
1259 u32 id, changed_comps = 0;
1260
1261 old = komeda_pipeline_get_old_state(pipe, old_state);
1262
1263 changed_comps = new->active_comps | old->active_comps;
1264
1265 DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, changed: 0x%x.\n",
1266 pipe->id, new->active_comps, changed_comps);
1267
1268 dp_for_each_set_bit(id, changed_comps) {
1269 c = komeda_pipeline_get_component(pipe, id);
1270
1271 if (new->active_comps & BIT(c->id))
1272 c->funcs->update(c, priv_to_comp_st(c->obj.state));
1273 else
1274 c->funcs->disable(c);
1275 }
1276}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
4 * Author: James.Qian.Wang <james.qian.wang@arm.com>
5 *
6 */
7
8#include <drm/drm_print.h>
9#include <linux/clk.h>
10#include "komeda_dev.h"
11#include "komeda_kms.h"
12#include "komeda_pipeline.h"
13#include "komeda_framebuffer.h"
14
15static inline bool is_switching_user(void *old, void *new)
16{
17 if (!old || !new)
18 return false;
19
20 return old != new;
21}
22
23static struct komeda_pipeline_state *
24komeda_pipeline_get_state(struct komeda_pipeline *pipe,
25 struct drm_atomic_state *state)
26{
27 struct drm_private_state *priv_st;
28
29 priv_st = drm_atomic_get_private_obj_state(state, &pipe->obj);
30 if (IS_ERR(priv_st))
31 return ERR_CAST(priv_st);
32
33 return priv_to_pipe_st(priv_st);
34}
35
36struct komeda_pipeline_state *
37komeda_pipeline_get_old_state(struct komeda_pipeline *pipe,
38 struct drm_atomic_state *state)
39{
40 struct drm_private_state *priv_st;
41
42 priv_st = drm_atomic_get_old_private_obj_state(state, &pipe->obj);
43 if (priv_st)
44 return priv_to_pipe_st(priv_st);
45 return NULL;
46}
47
48static struct komeda_pipeline_state *
49komeda_pipeline_get_new_state(struct komeda_pipeline *pipe,
50 struct drm_atomic_state *state)
51{
52 struct drm_private_state *priv_st;
53
54 priv_st = drm_atomic_get_new_private_obj_state(state, &pipe->obj);
55 if (priv_st)
56 return priv_to_pipe_st(priv_st);
57 return NULL;
58}
59
60/* Assign pipeline for crtc */
61static struct komeda_pipeline_state *
62komeda_pipeline_get_state_and_set_crtc(struct komeda_pipeline *pipe,
63 struct drm_atomic_state *state,
64 struct drm_crtc *crtc)
65{
66 struct komeda_pipeline_state *st;
67
68 st = komeda_pipeline_get_state(pipe, state);
69 if (IS_ERR(st))
70 return st;
71
72 if (is_switching_user(crtc, st->crtc)) {
73 DRM_DEBUG_ATOMIC("CRTC%d required pipeline%d is busy.\n",
74 drm_crtc_index(crtc), pipe->id);
75 return ERR_PTR(-EBUSY);
76 }
77
78 /* pipeline only can be disabled when the it is free or unused */
79 if (!crtc && st->active_comps) {
80 DRM_DEBUG_ATOMIC("Disabling a busy pipeline:%d.\n", pipe->id);
81 return ERR_PTR(-EBUSY);
82 }
83
84 st->crtc = crtc;
85
86 if (crtc) {
87 struct komeda_crtc_state *kcrtc_st;
88
89 kcrtc_st = to_kcrtc_st(drm_atomic_get_new_crtc_state(state,
90 crtc));
91
92 kcrtc_st->active_pipes |= BIT(pipe->id);
93 kcrtc_st->affected_pipes |= BIT(pipe->id);
94 }
95 return st;
96}
97
98static struct komeda_component_state *
99komeda_component_get_state(struct komeda_component *c,
100 struct drm_atomic_state *state)
101{
102 struct drm_private_state *priv_st;
103
104 WARN_ON(!drm_modeset_is_locked(&c->pipeline->obj.lock));
105
106 priv_st = drm_atomic_get_private_obj_state(state, &c->obj);
107 if (IS_ERR(priv_st))
108 return ERR_CAST(priv_st);
109
110 return priv_to_comp_st(priv_st);
111}
112
113static struct komeda_component_state *
114komeda_component_get_old_state(struct komeda_component *c,
115 struct drm_atomic_state *state)
116{
117 struct drm_private_state *priv_st;
118
119 priv_st = drm_atomic_get_old_private_obj_state(state, &c->obj);
120 if (priv_st)
121 return priv_to_comp_st(priv_st);
122 return NULL;
123}
124
125/**
126 * komeda_component_get_state_and_set_user()
127 *
128 * @c: component to get state and set user
129 * @state: global atomic state
130 * @user: direct user, the binding user
131 * @crtc: the CRTC user, the big boss :)
132 *
133 * This function accepts two users:
134 * - The direct user: can be plane/crtc/wb_connector depends on component
135 * - The big boss (CRTC)
136 * CRTC is the big boss (the final user), because all component resources
137 * eventually will be assigned to CRTC, like the layer will be binding to
138 * kms_plane, but kms plane will be binding to a CRTC eventually.
139 *
140 * The big boss (CRTC) is for pipeline assignment, since &komeda_component isn't
141 * independent and can be assigned to CRTC freely, but belongs to a specific
142 * pipeline, only pipeline can be shared between crtc, and pipeline as a whole
143 * (include all the internal components) assigned to a specific CRTC.
144 *
145 * So when set a user to komeda_component, need first to check the status of
146 * component->pipeline to see if the pipeline is available on this specific
147 * CRTC. if the pipeline is busy (assigned to another CRTC), even the required
148 * component is free, the component still cannot be assigned to the direct user.
149 */
150static struct komeda_component_state *
151komeda_component_get_state_and_set_user(struct komeda_component *c,
152 struct drm_atomic_state *state,
153 void *user,
154 struct drm_crtc *crtc)
155{
156 struct komeda_pipeline_state *pipe_st;
157 struct komeda_component_state *st;
158
159 /* First check if the pipeline is available */
160 pipe_st = komeda_pipeline_get_state_and_set_crtc(c->pipeline,
161 state, crtc);
162 if (IS_ERR(pipe_st))
163 return ERR_CAST(pipe_st);
164
165 st = komeda_component_get_state(c, state);
166 if (IS_ERR(st))
167 return st;
168
169 /* check if the component has been occupied */
170 if (is_switching_user(user, st->binding_user)) {
171 DRM_DEBUG_ATOMIC("required %s is busy.\n", c->name);
172 return ERR_PTR(-EBUSY);
173 }
174
175 st->binding_user = user;
176 /* mark the component as active if user is valid */
177 if (st->binding_user)
178 pipe_st->active_comps |= BIT(c->id);
179
180 return st;
181}
182
183static void
184komeda_component_add_input(struct komeda_component_state *state,
185 struct komeda_component_output *input,
186 int idx)
187{
188 struct komeda_component *c = state->component;
189
190 WARN_ON((idx < 0 || idx >= c->max_active_inputs));
191
192 /* since the inputs[i] is only valid when it is active. So if a input[i]
193 * is a newly enabled input which switches from disable to enable, then
194 * the old inputs[i] is undefined (NOT zeroed), we can not rely on
195 * memcmp, but directly mark it changed
196 */
197 if (!has_bit(idx, state->affected_inputs) ||
198 memcmp(&state->inputs[idx], input, sizeof(*input))) {
199 memcpy(&state->inputs[idx], input, sizeof(*input));
200 state->changed_active_inputs |= BIT(idx);
201 }
202 state->active_inputs |= BIT(idx);
203 state->affected_inputs |= BIT(idx);
204}
205
206static int
207komeda_component_check_input(struct komeda_component_state *state,
208 struct komeda_component_output *input,
209 int idx)
210{
211 struct komeda_component *c = state->component;
212
213 if ((idx < 0) || (idx >= c->max_active_inputs)) {
214 DRM_DEBUG_ATOMIC("%s required an invalid %s-input[%d].\n",
215 input->component->name, c->name, idx);
216 return -EINVAL;
217 }
218
219 if (has_bit(idx, state->active_inputs)) {
220 DRM_DEBUG_ATOMIC("%s required %s-input[%d] has been occupied already.\n",
221 input->component->name, c->name, idx);
222 return -EINVAL;
223 }
224
225 return 0;
226}
227
228static void
229komeda_component_set_output(struct komeda_component_output *output,
230 struct komeda_component *comp,
231 u8 output_port)
232{
233 output->component = comp;
234 output->output_port = output_port;
235}
236
237static int
238komeda_component_validate_private(struct komeda_component *c,
239 struct komeda_component_state *st)
240{
241 int err;
242
243 if (!c->funcs->validate)
244 return 0;
245
246 err = c->funcs->validate(c, st);
247 if (err)
248 DRM_DEBUG_ATOMIC("%s validate private failed.\n", c->name);
249
250 return err;
251}
252
253/* Get current available scaler from the component->supported_outputs */
254static struct komeda_scaler *
255komeda_component_get_avail_scaler(struct komeda_component *c,
256 struct drm_atomic_state *state)
257{
258 struct komeda_pipeline_state *pipe_st;
259 u32 avail_scalers;
260
261 pipe_st = komeda_pipeline_get_state(c->pipeline, state);
262 if (!pipe_st)
263 return NULL;
264
265 avail_scalers = (pipe_st->active_comps & KOMEDA_PIPELINE_SCALERS) ^
266 KOMEDA_PIPELINE_SCALERS;
267
268 c = komeda_component_pickup_output(c, avail_scalers);
269
270 return to_scaler(c);
271}
272
273static void
274komeda_rotate_data_flow(struct komeda_data_flow_cfg *dflow, u32 rot)
275{
276 if (drm_rotation_90_or_270(rot)) {
277 swap(dflow->in_h, dflow->in_w);
278 swap(dflow->total_in_h, dflow->total_in_w);
279 }
280}
281
282static int
283komeda_layer_check_cfg(struct komeda_layer *layer,
284 struct komeda_fb *kfb,
285 struct komeda_data_flow_cfg *dflow)
286{
287 u32 src_x, src_y, src_w, src_h;
288 u32 line_sz, max_line_sz;
289
290 if (!komeda_fb_is_layer_supported(kfb, layer->layer_type, dflow->rot))
291 return -EINVAL;
292
293 if (layer->base.id == KOMEDA_COMPONENT_WB_LAYER) {
294 src_x = dflow->out_x;
295 src_y = dflow->out_y;
296 src_w = dflow->out_w;
297 src_h = dflow->out_h;
298 } else {
299 src_x = dflow->in_x;
300 src_y = dflow->in_y;
301 src_w = dflow->in_w;
302 src_h = dflow->in_h;
303 }
304
305 if (komeda_fb_check_src_coords(kfb, src_x, src_y, src_w, src_h))
306 return -EINVAL;
307
308 if (!in_range(&layer->hsize_in, src_w)) {
309 DRM_DEBUG_ATOMIC("invalidate src_w %d.\n", src_w);
310 return -EINVAL;
311 }
312
313 if (!in_range(&layer->vsize_in, src_h)) {
314 DRM_DEBUG_ATOMIC("invalidate src_h %d.\n", src_h);
315 return -EINVAL;
316 }
317
318 if (drm_rotation_90_or_270(dflow->rot))
319 line_sz = dflow->in_h;
320 else
321 line_sz = dflow->in_w;
322
323 if (kfb->base.format->hsub > 1)
324 max_line_sz = layer->yuv_line_sz;
325 else
326 max_line_sz = layer->line_sz;
327
328 if (line_sz > max_line_sz) {
329 DRM_DEBUG_ATOMIC("Required line_sz: %d exceeds the max size %d\n",
330 line_sz, max_line_sz);
331 return -EINVAL;
332 }
333
334 return 0;
335}
336
337static int
338komeda_layer_validate(struct komeda_layer *layer,
339 struct komeda_plane_state *kplane_st,
340 struct komeda_data_flow_cfg *dflow)
341{
342 struct drm_plane_state *plane_st = &kplane_st->base;
343 struct drm_framebuffer *fb = plane_st->fb;
344 struct komeda_fb *kfb = to_kfb(fb);
345 struct komeda_component_state *c_st;
346 struct komeda_layer_state *st;
347 int i, err;
348
349 err = komeda_layer_check_cfg(layer, kfb, dflow);
350 if (err)
351 return err;
352
353 c_st = komeda_component_get_state_and_set_user(&layer->base,
354 plane_st->state, plane_st->plane, plane_st->crtc);
355 if (IS_ERR(c_st))
356 return PTR_ERR(c_st);
357
358 st = to_layer_st(c_st);
359
360 st->rot = dflow->rot;
361
362 if (fb->modifier) {
363 st->hsize = kfb->aligned_w;
364 st->vsize = kfb->aligned_h;
365 st->afbc_crop_l = dflow->in_x;
366 st->afbc_crop_r = kfb->aligned_w - dflow->in_x - dflow->in_w;
367 st->afbc_crop_t = dflow->in_y;
368 st->afbc_crop_b = kfb->aligned_h - dflow->in_y - dflow->in_h;
369 } else {
370 st->hsize = dflow->in_w;
371 st->vsize = dflow->in_h;
372 st->afbc_crop_l = 0;
373 st->afbc_crop_r = 0;
374 st->afbc_crop_t = 0;
375 st->afbc_crop_b = 0;
376 }
377
378 for (i = 0; i < fb->format->num_planes; i++)
379 st->addr[i] = komeda_fb_get_pixel_addr(kfb, dflow->in_x,
380 dflow->in_y, i);
381
382 err = komeda_component_validate_private(&layer->base, c_st);
383 if (err)
384 return err;
385
386 /* update the data flow for the next stage */
387 komeda_component_set_output(&dflow->input, &layer->base, 0);
388
389 /*
390 * The rotation has been handled by layer, so adjusted the data flow for
391 * the next stage.
392 */
393 komeda_rotate_data_flow(dflow, st->rot);
394
395 return 0;
396}
397
398static int
399komeda_wb_layer_validate(struct komeda_layer *wb_layer,
400 struct drm_connector_state *conn_st,
401 struct komeda_data_flow_cfg *dflow)
402{
403 struct komeda_fb *kfb = to_kfb(conn_st->writeback_job->fb);
404 struct komeda_component_state *c_st;
405 struct komeda_layer_state *st;
406 int i, err;
407
408 err = komeda_layer_check_cfg(wb_layer, kfb, dflow);
409 if (err)
410 return err;
411
412 c_st = komeda_component_get_state_and_set_user(&wb_layer->base,
413 conn_st->state, conn_st->connector, conn_st->crtc);
414 if (IS_ERR(c_st))
415 return PTR_ERR(c_st);
416
417 st = to_layer_st(c_st);
418
419 st->hsize = dflow->out_w;
420 st->vsize = dflow->out_h;
421
422 for (i = 0; i < kfb->base.format->num_planes; i++)
423 st->addr[i] = komeda_fb_get_pixel_addr(kfb, dflow->out_x,
424 dflow->out_y, i);
425
426 komeda_component_add_input(&st->base, &dflow->input, 0);
427 komeda_component_set_output(&dflow->input, &wb_layer->base, 0);
428
429 return 0;
430}
431
432static bool scaling_ratio_valid(u32 size_in, u32 size_out,
433 u32 max_upscaling, u32 max_downscaling)
434{
435 if (size_out > size_in * max_upscaling)
436 return false;
437 else if (size_in > size_out * max_downscaling)
438 return false;
439 return true;
440}
441
442static int
443komeda_scaler_check_cfg(struct komeda_scaler *scaler,
444 struct komeda_crtc_state *kcrtc_st,
445 struct komeda_data_flow_cfg *dflow)
446{
447 u32 hsize_in, vsize_in, hsize_out, vsize_out;
448 u32 max_upscaling;
449
450 hsize_in = dflow->in_w;
451 vsize_in = dflow->in_h;
452 hsize_out = dflow->out_w;
453 vsize_out = dflow->out_h;
454
455 if (!in_range(&scaler->hsize, hsize_in) ||
456 !in_range(&scaler->hsize, hsize_out)) {
457 DRM_DEBUG_ATOMIC("Invalid horizontal sizes");
458 return -EINVAL;
459 }
460
461 if (!in_range(&scaler->vsize, vsize_in) ||
462 !in_range(&scaler->vsize, vsize_out)) {
463 DRM_DEBUG_ATOMIC("Invalid vertical sizes");
464 return -EINVAL;
465 }
466
467 /* If input comes from compiz that means the scaling is for writeback
468 * and scaler can not do upscaling for writeback
469 */
470 if (has_bit(dflow->input.component->id, KOMEDA_PIPELINE_COMPIZS))
471 max_upscaling = 1;
472 else
473 max_upscaling = scaler->max_upscaling;
474
475 if (!scaling_ratio_valid(hsize_in, hsize_out, max_upscaling,
476 scaler->max_downscaling)) {
477 DRM_DEBUG_ATOMIC("Invalid horizontal scaling ratio");
478 return -EINVAL;
479 }
480
481 if (!scaling_ratio_valid(vsize_in, vsize_out, max_upscaling,
482 scaler->max_downscaling)) {
483 DRM_DEBUG_ATOMIC("Invalid vertical scaling ratio");
484 return -EINVAL;
485 }
486
487 if (hsize_in > hsize_out || vsize_in > vsize_out) {
488 struct komeda_pipeline *pipe = scaler->base.pipeline;
489 int err;
490
491 err = pipe->funcs->downscaling_clk_check(pipe,
492 &kcrtc_st->base.adjusted_mode,
493 komeda_crtc_get_aclk(kcrtc_st), dflow);
494 if (err) {
495 DRM_DEBUG_ATOMIC("aclk can't satisfy the clock requirement of the downscaling\n");
496 return err;
497 }
498 }
499
500 return 0;
501}
502
503static int
504komeda_scaler_validate(void *user,
505 struct komeda_crtc_state *kcrtc_st,
506 struct komeda_data_flow_cfg *dflow)
507{
508 struct drm_atomic_state *drm_st = kcrtc_st->base.state;
509 struct komeda_component_state *c_st;
510 struct komeda_scaler_state *st;
511 struct komeda_scaler *scaler;
512 int err = 0;
513
514 if (!(dflow->en_scaling || dflow->en_img_enhancement))
515 return 0;
516
517 scaler = komeda_component_get_avail_scaler(dflow->input.component,
518 drm_st);
519 if (!scaler) {
520 DRM_DEBUG_ATOMIC("No scaler available");
521 return -EINVAL;
522 }
523
524 err = komeda_scaler_check_cfg(scaler, kcrtc_st, dflow);
525 if (err)
526 return err;
527
528 c_st = komeda_component_get_state_and_set_user(&scaler->base,
529 drm_st, user, kcrtc_st->base.crtc);
530 if (IS_ERR(c_st))
531 return PTR_ERR(c_st);
532
533 st = to_scaler_st(c_st);
534
535 st->hsize_in = dflow->in_w;
536 st->vsize_in = dflow->in_h;
537 st->hsize_out = dflow->out_w;
538 st->vsize_out = dflow->out_h;
539 st->right_crop = dflow->right_crop;
540 st->left_crop = dflow->left_crop;
541 st->total_vsize_in = dflow->total_in_h;
542 st->total_hsize_in = dflow->total_in_w;
543 st->total_hsize_out = dflow->total_out_w;
544
545 /* Enable alpha processing if the next stage needs the pixel alpha */
546 st->en_alpha = dflow->pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE;
547 st->en_scaling = dflow->en_scaling;
548 st->en_img_enhancement = dflow->en_img_enhancement;
549 st->en_split = dflow->en_split;
550 st->right_part = dflow->right_part;
551
552 komeda_component_add_input(&st->base, &dflow->input, 0);
553 komeda_component_set_output(&dflow->input, &scaler->base, 0);
554 return err;
555}
556
557static void komeda_split_data_flow(struct komeda_scaler *scaler,
558 struct komeda_data_flow_cfg *dflow,
559 struct komeda_data_flow_cfg *l_dflow,
560 struct komeda_data_flow_cfg *r_dflow);
561
562static int
563komeda_splitter_validate(struct komeda_splitter *splitter,
564 struct drm_connector_state *conn_st,
565 struct komeda_data_flow_cfg *dflow,
566 struct komeda_data_flow_cfg *l_output,
567 struct komeda_data_flow_cfg *r_output)
568{
569 struct komeda_component_state *c_st;
570 struct komeda_splitter_state *st;
571
572 if (!splitter) {
573 DRM_DEBUG_ATOMIC("Current HW doesn't support splitter.\n");
574 return -EINVAL;
575 }
576
577 if (!in_range(&splitter->hsize, dflow->in_w)) {
578 DRM_DEBUG_ATOMIC("split in_w:%d is out of the acceptable range.\n",
579 dflow->in_w);
580 return -EINVAL;
581 }
582
583 if (!in_range(&splitter->vsize, dflow->in_h)) {
584 DRM_DEBUG_ATOMIC("split in_h: %d exceeds the acceptable range.\n",
585 dflow->in_h);
586 return -EINVAL;
587 }
588
589 c_st = komeda_component_get_state_and_set_user(&splitter->base,
590 conn_st->state, conn_st->connector, conn_st->crtc);
591
592 if (IS_ERR(c_st))
593 return PTR_ERR(c_st);
594
595 komeda_split_data_flow(splitter->base.pipeline->scalers[0],
596 dflow, l_output, r_output);
597
598 st = to_splitter_st(c_st);
599 st->hsize = dflow->in_w;
600 st->vsize = dflow->in_h;
601 st->overlap = dflow->overlap;
602
603 komeda_component_add_input(&st->base, &dflow->input, 0);
604 komeda_component_set_output(&l_output->input, &splitter->base, 0);
605 komeda_component_set_output(&r_output->input, &splitter->base, 1);
606
607 return 0;
608}
609
610static int
611komeda_merger_validate(struct komeda_merger *merger,
612 void *user,
613 struct komeda_crtc_state *kcrtc_st,
614 struct komeda_data_flow_cfg *left_input,
615 struct komeda_data_flow_cfg *right_input,
616 struct komeda_data_flow_cfg *output)
617{
618 struct komeda_component_state *c_st;
619 struct komeda_merger_state *st;
620 int err = 0;
621
622 if (!merger) {
623 DRM_DEBUG_ATOMIC("No merger is available");
624 return -EINVAL;
625 }
626
627 if (!in_range(&merger->hsize_merged, output->out_w)) {
628 DRM_DEBUG_ATOMIC("merged_w: %d is out of the accepted range.\n",
629 output->out_w);
630 return -EINVAL;
631 }
632
633 if (!in_range(&merger->vsize_merged, output->out_h)) {
634 DRM_DEBUG_ATOMIC("merged_h: %d is out of the accepted range.\n",
635 output->out_h);
636 return -EINVAL;
637 }
638
639 c_st = komeda_component_get_state_and_set_user(&merger->base,
640 kcrtc_st->base.state, kcrtc_st->base.crtc, kcrtc_st->base.crtc);
641
642 if (IS_ERR(c_st))
643 return PTR_ERR(c_st);
644
645 st = to_merger_st(c_st);
646 st->hsize_merged = output->out_w;
647 st->vsize_merged = output->out_h;
648
649 komeda_component_add_input(c_st, &left_input->input, 0);
650 komeda_component_add_input(c_st, &right_input->input, 1);
651 komeda_component_set_output(&output->input, &merger->base, 0);
652
653 return err;
654}
655
656void pipeline_composition_size(struct komeda_crtc_state *kcrtc_st,
657 u16 *hsize, u16 *vsize)
658{
659 struct drm_display_mode *m = &kcrtc_st->base.adjusted_mode;
660
661 if (hsize)
662 *hsize = m->hdisplay;
663 if (vsize)
664 *vsize = m->vdisplay;
665}
666
667static int
668komeda_compiz_set_input(struct komeda_compiz *compiz,
669 struct komeda_crtc_state *kcrtc_st,
670 struct komeda_data_flow_cfg *dflow)
671{
672 struct drm_atomic_state *drm_st = kcrtc_st->base.state;
673 struct komeda_component_state *c_st, *old_st;
674 struct komeda_compiz_input_cfg *cin;
675 u16 compiz_w, compiz_h;
676 int idx = dflow->blending_zorder;
677
678 pipeline_composition_size(kcrtc_st, &compiz_w, &compiz_h);
679 /* check display rect */
680 if ((dflow->out_x + dflow->out_w > compiz_w) ||
681 (dflow->out_y + dflow->out_h > compiz_h) ||
682 dflow->out_w == 0 || dflow->out_h == 0) {
683 DRM_DEBUG_ATOMIC("invalid disp rect [x=%d, y=%d, w=%d, h=%d]\n",
684 dflow->out_x, dflow->out_y,
685 dflow->out_w, dflow->out_h);
686 return -EINVAL;
687 }
688
689 c_st = komeda_component_get_state_and_set_user(&compiz->base, drm_st,
690 kcrtc_st->base.crtc, kcrtc_st->base.crtc);
691 if (IS_ERR(c_st))
692 return PTR_ERR(c_st);
693
694 if (komeda_component_check_input(c_st, &dflow->input, idx))
695 return -EINVAL;
696
697 cin = &(to_compiz_st(c_st)->cins[idx]);
698
699 cin->hsize = dflow->out_w;
700 cin->vsize = dflow->out_h;
701 cin->hoffset = dflow->out_x;
702 cin->voffset = dflow->out_y;
703 cin->pixel_blend_mode = dflow->pixel_blend_mode;
704 cin->layer_alpha = dflow->layer_alpha;
705
706 old_st = komeda_component_get_old_state(&compiz->base, drm_st);
707 WARN_ON(!old_st);
708
709 /* compare with old to check if this input has been changed */
710 if (memcmp(&(to_compiz_st(old_st)->cins[idx]), cin, sizeof(*cin)))
711 c_st->changed_active_inputs |= BIT(idx);
712
713 komeda_component_add_input(c_st, &dflow->input, idx);
714 komeda_component_set_output(&dflow->input, &compiz->base, 0);
715
716 return 0;
717}
718
719static int
720komeda_compiz_validate(struct komeda_compiz *compiz,
721 struct komeda_crtc_state *state,
722 struct komeda_data_flow_cfg *dflow)
723{
724 struct komeda_component_state *c_st;
725 struct komeda_compiz_state *st;
726
727 c_st = komeda_component_get_state_and_set_user(&compiz->base,
728 state->base.state, state->base.crtc, state->base.crtc);
729 if (IS_ERR(c_st))
730 return PTR_ERR(c_st);
731
732 st = to_compiz_st(c_st);
733
734 pipeline_composition_size(state, &st->hsize, &st->vsize);
735
736 komeda_component_set_output(&dflow->input, &compiz->base, 0);
737
738 /* compiz output dflow will be fed to the next pipeline stage, prepare
739 * the data flow configuration for the next stage
740 */
741 if (dflow) {
742 dflow->in_w = st->hsize;
743 dflow->in_h = st->vsize;
744 dflow->out_w = dflow->in_w;
745 dflow->out_h = dflow->in_h;
746 /* the output data of compiz doesn't have alpha, it only can be
747 * used as bottom layer when blend it with master layers
748 */
749 dflow->pixel_blend_mode = DRM_MODE_BLEND_PIXEL_NONE;
750 dflow->layer_alpha = 0xFF;
751 dflow->blending_zorder = 0;
752 }
753
754 return 0;
755}
756
757static int
758komeda_improc_validate(struct komeda_improc *improc,
759 struct komeda_crtc_state *kcrtc_st,
760 struct komeda_data_flow_cfg *dflow)
761{
762 struct drm_crtc *crtc = kcrtc_st->base.crtc;
763 struct drm_crtc_state *crtc_st = &kcrtc_st->base;
764 struct komeda_component_state *c_st;
765 struct komeda_improc_state *st;
766
767 c_st = komeda_component_get_state_and_set_user(&improc->base,
768 kcrtc_st->base.state, crtc, crtc);
769 if (IS_ERR(c_st))
770 return PTR_ERR(c_st);
771
772 st = to_improc_st(c_st);
773
774 st->hsize = dflow->in_w;
775 st->vsize = dflow->in_h;
776
777 if (drm_atomic_crtc_needs_modeset(crtc_st)) {
778 u32 output_depths, output_formats;
779 u32 avail_depths, avail_formats;
780
781 komeda_crtc_get_color_config(crtc_st, &output_depths,
782 &output_formats);
783
784 avail_depths = output_depths & improc->supported_color_depths;
785 if (avail_depths == 0) {
786 DRM_DEBUG_ATOMIC("No available color depths, conn depths: 0x%x & display: 0x%x\n",
787 output_depths,
788 improc->supported_color_depths);
789 return -EINVAL;
790 }
791
792 avail_formats = output_formats &
793 improc->supported_color_formats;
794 if (!avail_formats) {
795 DRM_DEBUG_ATOMIC("No available color_formats, conn formats 0x%x & display: 0x%x\n",
796 output_formats,
797 improc->supported_color_formats);
798 return -EINVAL;
799 }
800
801 st->color_depth = __fls(avail_depths);
802 st->color_format = BIT(__ffs(avail_formats));
803 }
804
805 if (kcrtc_st->base.color_mgmt_changed) {
806 drm_lut_to_fgamma_coeffs(kcrtc_st->base.gamma_lut,
807 st->fgamma_coeffs);
808 drm_ctm_to_coeffs(kcrtc_st->base.ctm, st->ctm_coeffs);
809 }
810
811 komeda_component_add_input(&st->base, &dflow->input, 0);
812 komeda_component_set_output(&dflow->input, &improc->base, 0);
813
814 return 0;
815}
816
817static int
818komeda_timing_ctrlr_validate(struct komeda_timing_ctrlr *ctrlr,
819 struct komeda_crtc_state *kcrtc_st,
820 struct komeda_data_flow_cfg *dflow)
821{
822 struct drm_crtc *crtc = kcrtc_st->base.crtc;
823 struct komeda_timing_ctrlr_state *st;
824 struct komeda_component_state *c_st;
825
826 c_st = komeda_component_get_state_and_set_user(&ctrlr->base,
827 kcrtc_st->base.state, crtc, crtc);
828 if (IS_ERR(c_st))
829 return PTR_ERR(c_st);
830
831 st = to_ctrlr_st(c_st);
832
833 komeda_component_add_input(&st->base, &dflow->input, 0);
834 komeda_component_set_output(&dflow->input, &ctrlr->base, 0);
835
836 return 0;
837}
838
839void komeda_complete_data_flow_cfg(struct komeda_layer *layer,
840 struct komeda_data_flow_cfg *dflow,
841 struct drm_framebuffer *fb)
842{
843 struct komeda_scaler *scaler = layer->base.pipeline->scalers[0];
844 u32 w = dflow->in_w;
845 u32 h = dflow->in_h;
846
847 dflow->total_in_w = dflow->in_w;
848 dflow->total_in_h = dflow->in_h;
849 dflow->total_out_w = dflow->out_w;
850
851 /* if format doesn't have alpha, fix blend mode to PIXEL_NONE */
852 if (!fb->format->has_alpha)
853 dflow->pixel_blend_mode = DRM_MODE_BLEND_PIXEL_NONE;
854
855 if (drm_rotation_90_or_270(dflow->rot))
856 swap(w, h);
857
858 dflow->en_scaling = (w != dflow->out_w) || (h != dflow->out_h);
859 dflow->is_yuv = fb->format->is_yuv;
860
861 /* try to enable image enhancer if data flow is a 2x+ upscaling */
862 dflow->en_img_enhancement = dflow->out_w >= 2 * w ||
863 dflow->out_h >= 2 * h;
864
865 /* try to enable split if scaling exceed the scaler's acceptable
866 * input/output range.
867 */
868 if (dflow->en_scaling && scaler)
869 dflow->en_split = !in_range(&scaler->hsize, dflow->in_w) ||
870 !in_range(&scaler->hsize, dflow->out_w);
871}
872
873static bool merger_is_available(struct komeda_pipeline *pipe,
874 struct komeda_data_flow_cfg *dflow)
875{
876 u32 avail_inputs = pipe->merger ?
877 pipe->merger->base.supported_inputs : 0;
878
879 return has_bit(dflow->input.component->id, avail_inputs);
880}
881
882int komeda_build_layer_data_flow(struct komeda_layer *layer,
883 struct komeda_plane_state *kplane_st,
884 struct komeda_crtc_state *kcrtc_st,
885 struct komeda_data_flow_cfg *dflow)
886{
887 struct drm_plane *plane = kplane_st->base.plane;
888 struct komeda_pipeline *pipe = layer->base.pipeline;
889 int err;
890
891 DRM_DEBUG_ATOMIC("%s handling [PLANE:%d:%s]: src[x/y:%d/%d, w/h:%d/%d] disp[x/y:%d/%d, w/h:%d/%d]",
892 layer->base.name, plane->base.id, plane->name,
893 dflow->in_x, dflow->in_y, dflow->in_w, dflow->in_h,
894 dflow->out_x, dflow->out_y, dflow->out_w, dflow->out_h);
895
896 err = komeda_layer_validate(layer, kplane_st, dflow);
897 if (err)
898 return err;
899
900 err = komeda_scaler_validate(plane, kcrtc_st, dflow);
901 if (err)
902 return err;
903
904 /* if split, check if can put the data flow into merger */
905 if (dflow->en_split && merger_is_available(pipe, dflow))
906 return 0;
907
908 err = komeda_compiz_set_input(pipe->compiz, kcrtc_st, dflow);
909
910 return err;
911}
912
913/*
914 * Split is introduced for workaround scaler's input/output size limitation.
915 * The idea is simple, if one scaler can not fit the requirement, use two.
916 * So split splits the big source image to two half parts (left/right) and do
917 * the scaling by two scaler separately and independently.
918 * But split also imports an edge problem in the middle of the image when
919 * scaling, to avoid it, split isn't a simple half-and-half, but add an extra
920 * pixels (overlap) to both side, after split the left/right will be:
921 * - left: [0, src_length/2 + overlap]
922 * - right: [src_length/2 - overlap, src_length]
923 * The extra overlap do eliminate the edge problem, but which may also generates
924 * unnecessary pixels when scaling, we need to crop them before scaler output
925 * the result to the next stage. and for the how to crop, it depends on the
926 * unneeded pixels, another words the position where overlay has been added.
927 * - left: crop the right
928 * - right: crop the left
929 *
930 * The diagram for how to do the split
931 *
932 * <---------------------left->out_w ---------------->
933 * |--------------------------------|---right_crop-----| <- left after split
934 * \ \ /
935 * \ \<--overlap--->/
936 * |-----------------|-------------|(Middle)------|-----------------| <- src
937 * /<---overlap--->\ \
938 * / \ \
939 * right after split->|-----left_crop---|--------------------------------|
940 * ^<------------------- right->out_w --------------->^
941 *
942 * NOTE: To consistent with HW the output_w always contains the crop size.
943 */
944
945static void komeda_split_data_flow(struct komeda_scaler *scaler,
946 struct komeda_data_flow_cfg *dflow,
947 struct komeda_data_flow_cfg *l_dflow,
948 struct komeda_data_flow_cfg *r_dflow)
949{
950 bool r90 = drm_rotation_90_or_270(dflow->rot);
951 bool flip_h = has_flip_h(dflow->rot);
952 u32 l_out, r_out, overlap;
953
954 memcpy(l_dflow, dflow, sizeof(*dflow));
955 memcpy(r_dflow, dflow, sizeof(*dflow));
956
957 l_dflow->right_part = false;
958 r_dflow->right_part = true;
959 r_dflow->blending_zorder = dflow->blending_zorder + 1;
960
961 overlap = 0;
962 if (dflow->en_scaling && scaler)
963 overlap += scaler->scaling_split_overlap;
964
965 /* original dflow may fed into splitter, and which doesn't need
966 * enhancement overlap
967 */
968 dflow->overlap = overlap;
969
970 if (dflow->en_img_enhancement && scaler)
971 overlap += scaler->enh_split_overlap;
972
973 l_dflow->overlap = overlap;
974 r_dflow->overlap = overlap;
975
976 /* split the origin content */
977 /* left/right here always means the left/right part of display image,
978 * not the source Image
979 */
980 /* DRM rotation is anti-clockwise */
981 if (r90) {
982 if (dflow->en_scaling) {
983 l_dflow->in_h = ALIGN(dflow->in_h, 2) / 2 + l_dflow->overlap;
984 r_dflow->in_h = l_dflow->in_h;
985 } else if (dflow->en_img_enhancement) {
986 /* enhancer only */
987 l_dflow->in_h = ALIGN(dflow->in_h, 2) / 2 + l_dflow->overlap;
988 r_dflow->in_h = dflow->in_h / 2 + r_dflow->overlap;
989 } else {
990 /* split without scaler, no overlap */
991 l_dflow->in_h = ALIGN(((dflow->in_h + 1) >> 1), 2);
992 r_dflow->in_h = dflow->in_h - l_dflow->in_h;
993 }
994
995 /* Consider YUV format, after split, the split source w/h
996 * may not aligned to 2. we have two choices for such case.
997 * 1. scaler is enabled (overlap != 0), we can do a alignment
998 * both left/right and crop the extra data by scaler.
999 * 2. scaler is not enabled, only align the split left
1000 * src/disp, and the rest part assign to right
1001 */
1002 if ((overlap != 0) && dflow->is_yuv) {
1003 l_dflow->in_h = ALIGN(l_dflow->in_h, 2);
1004 r_dflow->in_h = ALIGN(r_dflow->in_h, 2);
1005 }
1006
1007 if (flip_h)
1008 l_dflow->in_y = dflow->in_y + dflow->in_h - l_dflow->in_h;
1009 else
1010 r_dflow->in_y = dflow->in_y + dflow->in_h - r_dflow->in_h;
1011 } else {
1012 if (dflow->en_scaling) {
1013 l_dflow->in_w = ALIGN(dflow->in_w, 2) / 2 + l_dflow->overlap;
1014 r_dflow->in_w = l_dflow->in_w;
1015 } else if (dflow->en_img_enhancement) {
1016 l_dflow->in_w = ALIGN(dflow->in_w, 2) / 2 + l_dflow->overlap;
1017 r_dflow->in_w = dflow->in_w / 2 + r_dflow->overlap;
1018 } else {
1019 l_dflow->in_w = ALIGN(((dflow->in_w + 1) >> 1), 2);
1020 r_dflow->in_w = dflow->in_w - l_dflow->in_w;
1021 }
1022
1023 /* do YUV alignment when scaler enabled */
1024 if ((overlap != 0) && dflow->is_yuv) {
1025 l_dflow->in_w = ALIGN(l_dflow->in_w, 2);
1026 r_dflow->in_w = ALIGN(r_dflow->in_w, 2);
1027 }
1028
1029 /* on flip_h, the left display content from the right-source */
1030 if (flip_h)
1031 l_dflow->in_x = dflow->in_w + dflow->in_x - l_dflow->in_w;
1032 else
1033 r_dflow->in_x = dflow->in_w + dflow->in_x - r_dflow->in_w;
1034 }
1035
1036 /* split the disp_rect */
1037 if (dflow->en_scaling || dflow->en_img_enhancement)
1038 l_dflow->out_w = ((dflow->out_w + 1) >> 1);
1039 else
1040 l_dflow->out_w = ALIGN(((dflow->out_w + 1) >> 1), 2);
1041
1042 r_dflow->out_w = dflow->out_w - l_dflow->out_w;
1043
1044 l_dflow->out_x = dflow->out_x;
1045 r_dflow->out_x = l_dflow->out_w + l_dflow->out_x;
1046
1047 /* calculate the scaling crop */
1048 /* left scaler output more data and do crop */
1049 if (r90) {
1050 l_out = (dflow->out_w * l_dflow->in_h) / dflow->in_h;
1051 r_out = (dflow->out_w * r_dflow->in_h) / dflow->in_h;
1052 } else {
1053 l_out = (dflow->out_w * l_dflow->in_w) / dflow->in_w;
1054 r_out = (dflow->out_w * r_dflow->in_w) / dflow->in_w;
1055 }
1056
1057 l_dflow->left_crop = 0;
1058 l_dflow->right_crop = l_out - l_dflow->out_w;
1059 r_dflow->left_crop = r_out - r_dflow->out_w;
1060 r_dflow->right_crop = 0;
1061
1062 /* out_w includes the crop length */
1063 l_dflow->out_w += l_dflow->right_crop + l_dflow->left_crop;
1064 r_dflow->out_w += r_dflow->right_crop + r_dflow->left_crop;
1065}
1066
1067/* For layer split, a plane state will be split to two data flows and handled
1068 * by two separated komeda layer input pipelines. komeda supports two types of
1069 * layer split:
1070 * - none-scaling split:
1071 * / layer-left -> \
1072 * plane_state compiz-> ...
1073 * \ layer-right-> /
1074 *
1075 * - scaling split:
1076 * / layer-left -> scaler->\
1077 * plane_state merger -> compiz-> ...
1078 * \ layer-right-> scaler->/
1079 *
1080 * Since merger only supports scaler as input, so for none-scaling split, two
1081 * layer data flows will be output to compiz directly. for scaling_split, two
1082 * data flow will be merged by merger firstly, then merger outputs one merged
1083 * data flow to compiz.
1084 */
1085int komeda_build_layer_split_data_flow(struct komeda_layer *left,
1086 struct komeda_plane_state *kplane_st,
1087 struct komeda_crtc_state *kcrtc_st,
1088 struct komeda_data_flow_cfg *dflow)
1089{
1090 struct drm_plane *plane = kplane_st->base.plane;
1091 struct komeda_pipeline *pipe = left->base.pipeline;
1092 struct komeda_layer *right = left->right;
1093 struct komeda_data_flow_cfg l_dflow, r_dflow;
1094 int err;
1095
1096 komeda_split_data_flow(pipe->scalers[0], dflow, &l_dflow, &r_dflow);
1097
1098 DRM_DEBUG_ATOMIC("Assign %s + %s to [PLANE:%d:%s]: "
1099 "src[x/y:%d/%d, w/h:%d/%d] disp[x/y:%d/%d, w/h:%d/%d]",
1100 left->base.name, right->base.name,
1101 plane->base.id, plane->name,
1102 dflow->in_x, dflow->in_y, dflow->in_w, dflow->in_h,
1103 dflow->out_x, dflow->out_y, dflow->out_w, dflow->out_h);
1104
1105 err = komeda_build_layer_data_flow(left, kplane_st, kcrtc_st, &l_dflow);
1106 if (err)
1107 return err;
1108
1109 err = komeda_build_layer_data_flow(right, kplane_st, kcrtc_st, &r_dflow);
1110 if (err)
1111 return err;
1112
1113 /* The rotation has been handled by layer, so adjusted the data flow */
1114 komeda_rotate_data_flow(dflow, dflow->rot);
1115
1116 /* left and right dflow has been merged to compiz already,
1117 * no need merger to merge them anymore.
1118 */
1119 if (r_dflow.input.component == l_dflow.input.component)
1120 return 0;
1121
1122 /* line merger path */
1123 err = komeda_merger_validate(pipe->merger, plane, kcrtc_st,
1124 &l_dflow, &r_dflow, dflow);
1125 if (err)
1126 return err;
1127
1128 err = komeda_compiz_set_input(pipe->compiz, kcrtc_st, dflow);
1129
1130 return err;
1131}
1132
1133/* writeback data path: compiz -> scaler -> wb_layer -> memory */
1134int komeda_build_wb_data_flow(struct komeda_layer *wb_layer,
1135 struct drm_connector_state *conn_st,
1136 struct komeda_crtc_state *kcrtc_st,
1137 struct komeda_data_flow_cfg *dflow)
1138{
1139 struct drm_connector *conn = conn_st->connector;
1140 int err;
1141
1142 err = komeda_scaler_validate(conn, kcrtc_st, dflow);
1143 if (err)
1144 return err;
1145
1146 return komeda_wb_layer_validate(wb_layer, conn_st, dflow);
1147}
1148
1149/* writeback scaling split data path:
1150 * /-> scaler ->\
1151 * compiz -> splitter merger -> wb_layer -> memory
1152 * \-> scaler ->/
1153 */
1154int komeda_build_wb_split_data_flow(struct komeda_layer *wb_layer,
1155 struct drm_connector_state *conn_st,
1156 struct komeda_crtc_state *kcrtc_st,
1157 struct komeda_data_flow_cfg *dflow)
1158{
1159 struct komeda_pipeline *pipe = wb_layer->base.pipeline;
1160 struct drm_connector *conn = conn_st->connector;
1161 struct komeda_data_flow_cfg l_dflow, r_dflow;
1162 int err;
1163
1164 err = komeda_splitter_validate(pipe->splitter, conn_st,
1165 dflow, &l_dflow, &r_dflow);
1166 if (err)
1167 return err;
1168 err = komeda_scaler_validate(conn, kcrtc_st, &l_dflow);
1169 if (err)
1170 return err;
1171
1172 err = komeda_scaler_validate(conn, kcrtc_st, &r_dflow);
1173 if (err)
1174 return err;
1175
1176 err = komeda_merger_validate(pipe->merger, conn_st, kcrtc_st,
1177 &l_dflow, &r_dflow, dflow);
1178 if (err)
1179 return err;
1180
1181 return komeda_wb_layer_validate(wb_layer, conn_st, dflow);
1182}
1183
1184/* build display output data flow, the data path is:
1185 * compiz -> improc -> timing_ctrlr
1186 */
1187int komeda_build_display_data_flow(struct komeda_crtc *kcrtc,
1188 struct komeda_crtc_state *kcrtc_st)
1189{
1190 struct komeda_pipeline *master = kcrtc->master;
1191 struct komeda_pipeline *slave = kcrtc->slave;
1192 struct komeda_data_flow_cfg m_dflow; /* master data flow */
1193 struct komeda_data_flow_cfg s_dflow; /* slave data flow */
1194 int err;
1195
1196 memset(&m_dflow, 0, sizeof(m_dflow));
1197 memset(&s_dflow, 0, sizeof(s_dflow));
1198
1199 if (slave && has_bit(slave->id, kcrtc_st->active_pipes)) {
1200 err = komeda_compiz_validate(slave->compiz, kcrtc_st, &s_dflow);
1201 if (err)
1202 return err;
1203
1204 /* merge the slave dflow into master pipeline */
1205 err = komeda_compiz_set_input(master->compiz, kcrtc_st,
1206 &s_dflow);
1207 if (err)
1208 return err;
1209 }
1210
1211 err = komeda_compiz_validate(master->compiz, kcrtc_st, &m_dflow);
1212 if (err)
1213 return err;
1214
1215 err = komeda_improc_validate(master->improc, kcrtc_st, &m_dflow);
1216 if (err)
1217 return err;
1218
1219 err = komeda_timing_ctrlr_validate(master->ctrlr, kcrtc_st, &m_dflow);
1220 if (err)
1221 return err;
1222
1223 return 0;
1224}
1225
1226static void
1227komeda_pipeline_unbound_components(struct komeda_pipeline *pipe,
1228 struct komeda_pipeline_state *new)
1229{
1230 struct drm_atomic_state *drm_st = new->obj.state;
1231 struct komeda_pipeline_state *old = priv_to_pipe_st(pipe->obj.state);
1232 struct komeda_component_state *c_st;
1233 struct komeda_component *c;
1234 u32 disabling_comps, id;
1235
1236 WARN_ON(!old);
1237
1238 disabling_comps = (~new->active_comps) & old->active_comps;
1239
1240 /* unbound all disabling component */
1241 dp_for_each_set_bit(id, disabling_comps) {
1242 c = komeda_pipeline_get_component(pipe, id);
1243 c_st = komeda_component_get_state_and_set_user(c,
1244 drm_st, NULL, new->crtc);
1245 WARN_ON(IS_ERR(c_st));
1246 }
1247}
1248
1249/* release unclaimed pipeline resource */
1250int komeda_release_unclaimed_resources(struct komeda_pipeline *pipe,
1251 struct komeda_crtc_state *kcrtc_st)
1252{
1253 struct drm_atomic_state *drm_st = kcrtc_st->base.state;
1254 struct komeda_pipeline_state *st;
1255
1256 /* ignore the pipeline which is not affected */
1257 if (!pipe || !has_bit(pipe->id, kcrtc_st->affected_pipes))
1258 return 0;
1259
1260 if (has_bit(pipe->id, kcrtc_st->active_pipes))
1261 st = komeda_pipeline_get_new_state(pipe, drm_st);
1262 else
1263 st = komeda_pipeline_get_state_and_set_crtc(pipe, drm_st, NULL);
1264
1265 if (WARN_ON(IS_ERR_OR_NULL(st)))
1266 return -EINVAL;
1267
1268 komeda_pipeline_unbound_components(pipe, st);
1269
1270 return 0;
1271}
1272
1273/* Since standalong disabled components must be disabled separately and in the
1274 * last, So a complete disable operation may needs to call pipeline_disable
1275 * twice (two phase disabling).
1276 * Phase 1: disable the common components, flush it.
1277 * Phase 2: disable the standalone disabled components, flush it.
1278 *
1279 * RETURNS:
1280 * true: disable is not complete, needs a phase 2 disable.
1281 * false: disable is complete.
1282 */
1283bool komeda_pipeline_disable(struct komeda_pipeline *pipe,
1284 struct drm_atomic_state *old_state)
1285{
1286 struct komeda_pipeline_state *old;
1287 struct komeda_component *c;
1288 struct komeda_component_state *c_st;
1289 u32 id, disabling_comps = 0;
1290
1291 old = komeda_pipeline_get_old_state(pipe, old_state);
1292
1293 disabling_comps = old->active_comps &
1294 (~pipe->standalone_disabled_comps);
1295 if (!disabling_comps)
1296 disabling_comps = old->active_comps &
1297 pipe->standalone_disabled_comps;
1298
1299 DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, disabling_comps: 0x%x.\n",
1300 pipe->id, old->active_comps, disabling_comps);
1301
1302 dp_for_each_set_bit(id, disabling_comps) {
1303 c = komeda_pipeline_get_component(pipe, id);
1304 c_st = priv_to_comp_st(c->obj.state);
1305
1306 /*
1307 * If we disabled a component then all active_inputs should be
1308 * put in the list of changed_active_inputs, so they get
1309 * re-enabled.
1310 * This usually happens during a modeset when the pipeline is
1311 * first disabled and then the actual state gets committed
1312 * again.
1313 */
1314 c_st->changed_active_inputs |= c_st->active_inputs;
1315
1316 c->funcs->disable(c);
1317 }
1318
1319 /* Update the pipeline state, if there are components that are still
1320 * active, return true for calling the phase 2 disable.
1321 */
1322 old->active_comps &= ~disabling_comps;
1323
1324 return old->active_comps ? true : false;
1325}
1326
1327void komeda_pipeline_update(struct komeda_pipeline *pipe,
1328 struct drm_atomic_state *old_state)
1329{
1330 struct komeda_pipeline_state *new = priv_to_pipe_st(pipe->obj.state);
1331 struct komeda_pipeline_state *old;
1332 struct komeda_component *c;
1333 u32 id, changed_comps = 0;
1334
1335 old = komeda_pipeline_get_old_state(pipe, old_state);
1336
1337 changed_comps = new->active_comps | old->active_comps;
1338
1339 DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, changed: 0x%x.\n",
1340 pipe->id, new->active_comps, changed_comps);
1341
1342 dp_for_each_set_bit(id, changed_comps) {
1343 c = komeda_pipeline_get_component(pipe, id);
1344
1345 if (new->active_comps & BIT(c->id))
1346 c->funcs->update(c, priv_to_comp_st(c->obj.state));
1347 else
1348 c->funcs->disable(c);
1349 }
1350}