Loading...
Note: File does not exist in v3.1.
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
4 * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
5 */
6
7#define pr_fmt(fmt) "[drm:%s] " fmt, __func__
8#include "dpu_kms.h"
9#include "dpu_hw_lm.h"
10#include "dpu_hw_ctl.h"
11#include "dpu_hw_cdm.h"
12#include "dpu_hw_pingpong.h"
13#include "dpu_hw_sspp.h"
14#include "dpu_hw_intf.h"
15#include "dpu_hw_wb.h"
16#include "dpu_hw_dspp.h"
17#include "dpu_hw_merge3d.h"
18#include "dpu_hw_dsc.h"
19#include "dpu_encoder.h"
20#include "dpu_trace.h"
21
22
23static inline bool reserved_by_other(uint32_t *res_map, int idx,
24 uint32_t enc_id)
25{
26 return res_map[idx] && res_map[idx] != enc_id;
27}
28
29/**
30 * struct dpu_rm_requirements - Reservation requirements parameter bundle
31 * @topology: selected topology for the display
32 */
33struct dpu_rm_requirements {
34 struct msm_display_topology topology;
35};
36
37/**
38 * dpu_rm_init - Read hardware catalog and create reservation tracking objects
39 * for all HW blocks.
40 * @dev: Corresponding device for devres management
41 * @rm: DPU Resource Manager handle
42 * @cat: Pointer to hardware catalog
43 * @mdss_data: Pointer to MDSS / UBWC configuration
44 * @mmio: mapped register io address of MDP
45 * @return: 0 on Success otherwise -ERROR
46 */
47int dpu_rm_init(struct drm_device *dev,
48 struct dpu_rm *rm,
49 const struct dpu_mdss_cfg *cat,
50 const struct msm_mdss_data *mdss_data,
51 void __iomem *mmio)
52{
53 int rc, i;
54
55 if (!rm || !cat || !mmio) {
56 DPU_ERROR("invalid kms\n");
57 return -EINVAL;
58 }
59
60 /* Clear, setup lists */
61 memset(rm, 0, sizeof(*rm));
62
63 /* Interrogate HW catalog and create tracking items for hw blocks */
64 for (i = 0; i < cat->mixer_count; i++) {
65 struct dpu_hw_mixer *hw;
66 const struct dpu_lm_cfg *lm = &cat->mixer[i];
67
68 hw = dpu_hw_lm_init(dev, lm, mmio);
69 if (IS_ERR(hw)) {
70 rc = PTR_ERR(hw);
71 DPU_ERROR("failed lm object creation: err %d\n", rc);
72 goto fail;
73 }
74 rm->mixer_blks[lm->id - LM_0] = &hw->base;
75 }
76
77 for (i = 0; i < cat->merge_3d_count; i++) {
78 struct dpu_hw_merge_3d *hw;
79 const struct dpu_merge_3d_cfg *merge_3d = &cat->merge_3d[i];
80
81 hw = dpu_hw_merge_3d_init(dev, merge_3d, mmio);
82 if (IS_ERR(hw)) {
83 rc = PTR_ERR(hw);
84 DPU_ERROR("failed merge_3d object creation: err %d\n",
85 rc);
86 goto fail;
87 }
88 rm->merge_3d_blks[merge_3d->id - MERGE_3D_0] = &hw->base;
89 }
90
91 for (i = 0; i < cat->pingpong_count; i++) {
92 struct dpu_hw_pingpong *hw;
93 const struct dpu_pingpong_cfg *pp = &cat->pingpong[i];
94
95 hw = dpu_hw_pingpong_init(dev, pp, mmio, cat->mdss_ver);
96 if (IS_ERR(hw)) {
97 rc = PTR_ERR(hw);
98 DPU_ERROR("failed pingpong object creation: err %d\n",
99 rc);
100 goto fail;
101 }
102 if (pp->merge_3d && pp->merge_3d < MERGE_3D_MAX)
103 hw->merge_3d = to_dpu_hw_merge_3d(rm->merge_3d_blks[pp->merge_3d - MERGE_3D_0]);
104 rm->pingpong_blks[pp->id - PINGPONG_0] = &hw->base;
105 }
106
107 for (i = 0; i < cat->intf_count; i++) {
108 struct dpu_hw_intf *hw;
109 const struct dpu_intf_cfg *intf = &cat->intf[i];
110
111 hw = dpu_hw_intf_init(dev, intf, mmio, cat->mdss_ver);
112 if (IS_ERR(hw)) {
113 rc = PTR_ERR(hw);
114 DPU_ERROR("failed intf object creation: err %d\n", rc);
115 goto fail;
116 }
117 rm->hw_intf[intf->id - INTF_0] = hw;
118 }
119
120 for (i = 0; i < cat->wb_count; i++) {
121 struct dpu_hw_wb *hw;
122 const struct dpu_wb_cfg *wb = &cat->wb[i];
123
124 hw = dpu_hw_wb_init(dev, wb, mmio, cat->mdss_ver);
125 if (IS_ERR(hw)) {
126 rc = PTR_ERR(hw);
127 DPU_ERROR("failed wb object creation: err %d\n", rc);
128 goto fail;
129 }
130 rm->hw_wb[wb->id - WB_0] = hw;
131 }
132
133 for (i = 0; i < cat->ctl_count; i++) {
134 struct dpu_hw_ctl *hw;
135 const struct dpu_ctl_cfg *ctl = &cat->ctl[i];
136
137 hw = dpu_hw_ctl_init(dev, ctl, mmio, cat->mixer_count, cat->mixer);
138 if (IS_ERR(hw)) {
139 rc = PTR_ERR(hw);
140 DPU_ERROR("failed ctl object creation: err %d\n", rc);
141 goto fail;
142 }
143 rm->ctl_blks[ctl->id - CTL_0] = &hw->base;
144 }
145
146 for (i = 0; i < cat->dspp_count; i++) {
147 struct dpu_hw_dspp *hw;
148 const struct dpu_dspp_cfg *dspp = &cat->dspp[i];
149
150 hw = dpu_hw_dspp_init(dev, dspp, mmio);
151 if (IS_ERR(hw)) {
152 rc = PTR_ERR(hw);
153 DPU_ERROR("failed dspp object creation: err %d\n", rc);
154 goto fail;
155 }
156 rm->dspp_blks[dspp->id - DSPP_0] = &hw->base;
157 }
158
159 for (i = 0; i < cat->dsc_count; i++) {
160 struct dpu_hw_dsc *hw;
161 const struct dpu_dsc_cfg *dsc = &cat->dsc[i];
162
163 if (test_bit(DPU_DSC_HW_REV_1_2, &dsc->features))
164 hw = dpu_hw_dsc_init_1_2(dev, dsc, mmio);
165 else
166 hw = dpu_hw_dsc_init(dev, dsc, mmio);
167
168 if (IS_ERR(hw)) {
169 rc = PTR_ERR(hw);
170 DPU_ERROR("failed dsc object creation: err %d\n", rc);
171 goto fail;
172 }
173 rm->dsc_blks[dsc->id - DSC_0] = &hw->base;
174 }
175
176 for (i = 0; i < cat->sspp_count; i++) {
177 struct dpu_hw_sspp *hw;
178 const struct dpu_sspp_cfg *sspp = &cat->sspp[i];
179
180 hw = dpu_hw_sspp_init(dev, sspp, mmio, mdss_data, cat->mdss_ver);
181 if (IS_ERR(hw)) {
182 rc = PTR_ERR(hw);
183 DPU_ERROR("failed sspp object creation: err %d\n", rc);
184 goto fail;
185 }
186 rm->hw_sspp[sspp->id - SSPP_NONE] = hw;
187 }
188
189 if (cat->cdm) {
190 struct dpu_hw_cdm *hw;
191
192 hw = dpu_hw_cdm_init(dev, cat->cdm, mmio, cat->mdss_ver);
193 if (IS_ERR(hw)) {
194 rc = PTR_ERR(hw);
195 DPU_ERROR("failed cdm object creation: err %d\n", rc);
196 goto fail;
197 }
198 rm->cdm_blk = &hw->base;
199 }
200
201 return 0;
202
203fail:
204 return rc ? rc : -EFAULT;
205}
206
207static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
208{
209 return top->num_intf > 1;
210}
211
212/**
213 * _dpu_rm_get_lm_peer - get the id of a mixer which is a peer of the primary
214 * @rm: dpu resource manager handle
215 * @primary_idx: index of primary mixer in rm->mixer_blks[]
216 *
217 * Returns: lm peer mixed id on success or %-EINVAL on error
218 */
219static int _dpu_rm_get_lm_peer(struct dpu_rm *rm, int primary_idx)
220{
221 const struct dpu_lm_cfg *prim_lm_cfg;
222
223 prim_lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[primary_idx])->cap;
224
225 if (prim_lm_cfg->lm_pair >= LM_0 && prim_lm_cfg->lm_pair < LM_MAX)
226 return prim_lm_cfg->lm_pair - LM_0;
227 return -EINVAL;
228}
229
230/**
231 * _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
232 * proposed use case requirements, incl. hardwired dependent blocks like
233 * pingpong
234 * @rm: dpu resource manager handle
235 * @global_state: resources shared across multiple kms objects
236 * @enc_id: encoder id requesting for allocation
237 * @lm_idx: index of proposed layer mixer in rm->mixer_blks[], function checks
238 * if lm, and all other hardwired blocks connected to the lm (pp) is
239 * available and appropriate
240 * @pp_idx: output parameter, index of pingpong block attached to the layer
241 * mixer in rm->pingpong_blks[].
242 * @dspp_idx: output parameter, index of dspp block attached to the layer
243 * mixer in rm->dspp_blks[].
244 * @reqs: input parameter, rm requirements for HW blocks needed in the
245 * datapath.
246 * Return: true if lm matches all requirements, false otherwise
247 */
248static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
249 struct dpu_global_state *global_state,
250 uint32_t enc_id, int lm_idx, int *pp_idx, int *dspp_idx,
251 struct dpu_rm_requirements *reqs)
252{
253 const struct dpu_lm_cfg *lm_cfg;
254 int idx;
255
256 /* Already reserved? */
257 if (reserved_by_other(global_state->mixer_to_enc_id, lm_idx, enc_id)) {
258 DPU_DEBUG("lm %d already reserved\n", lm_idx + LM_0);
259 return false;
260 }
261
262 lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[lm_idx])->cap;
263 idx = lm_cfg->pingpong - PINGPONG_0;
264 if (idx < 0 || idx >= ARRAY_SIZE(rm->pingpong_blks)) {
265 DPU_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong);
266 return false;
267 }
268
269 if (reserved_by_other(global_state->pingpong_to_enc_id, idx, enc_id)) {
270 DPU_DEBUG("lm %d pp %d already reserved\n", lm_cfg->id,
271 lm_cfg->pingpong);
272 return false;
273 }
274 *pp_idx = idx;
275
276 if (!reqs->topology.num_dspp)
277 return true;
278
279 idx = lm_cfg->dspp - DSPP_0;
280 if (idx < 0 || idx >= ARRAY_SIZE(rm->dspp_blks)) {
281 DPU_ERROR("failed to get dspp on lm %d\n", lm_cfg->dspp);
282 return false;
283 }
284
285 if (reserved_by_other(global_state->dspp_to_enc_id, idx, enc_id)) {
286 DPU_DEBUG("lm %d dspp %d already reserved\n", lm_cfg->id,
287 lm_cfg->dspp);
288 return false;
289 }
290 *dspp_idx = idx;
291
292 return true;
293}
294
295static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
296 struct dpu_global_state *global_state,
297 uint32_t enc_id,
298 struct dpu_rm_requirements *reqs)
299
300{
301 int lm_idx[MAX_BLOCKS];
302 int pp_idx[MAX_BLOCKS];
303 int dspp_idx[MAX_BLOCKS] = {0};
304 int i, lm_count = 0;
305
306 if (!reqs->topology.num_lm) {
307 DPU_ERROR("invalid number of lm: %d\n", reqs->topology.num_lm);
308 return -EINVAL;
309 }
310
311 /* Find a primary mixer */
312 for (i = 0; i < ARRAY_SIZE(rm->mixer_blks) &&
313 lm_count < reqs->topology.num_lm; i++) {
314 if (!rm->mixer_blks[i])
315 continue;
316
317 lm_count = 0;
318 lm_idx[lm_count] = i;
319
320 if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state,
321 enc_id, i, &pp_idx[lm_count],
322 &dspp_idx[lm_count], reqs)) {
323 continue;
324 }
325
326 ++lm_count;
327
328 /* Valid primary mixer found, find matching peers */
329 if (lm_count < reqs->topology.num_lm) {
330 int j = _dpu_rm_get_lm_peer(rm, i);
331
332 /* ignore the peer if there is an error or if the peer was already processed */
333 if (j < 0 || j < i)
334 continue;
335
336 if (!rm->mixer_blks[j])
337 continue;
338
339 if (!_dpu_rm_check_lm_and_get_connected_blks(rm,
340 global_state, enc_id, j,
341 &pp_idx[lm_count], &dspp_idx[lm_count],
342 reqs)) {
343 continue;
344 }
345
346 lm_idx[lm_count] = j;
347 ++lm_count;
348 }
349 }
350
351 if (lm_count != reqs->topology.num_lm) {
352 DPU_DEBUG("unable to find appropriate mixers\n");
353 return -ENAVAIL;
354 }
355
356 for (i = 0; i < lm_count; i++) {
357 global_state->mixer_to_enc_id[lm_idx[i]] = enc_id;
358 global_state->pingpong_to_enc_id[pp_idx[i]] = enc_id;
359 global_state->dspp_to_enc_id[dspp_idx[i]] =
360 reqs->topology.num_dspp ? enc_id : 0;
361
362 trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, enc_id,
363 pp_idx[i] + PINGPONG_0);
364 }
365
366 return 0;
367}
368
369static int _dpu_rm_reserve_ctls(
370 struct dpu_rm *rm,
371 struct dpu_global_state *global_state,
372 uint32_t enc_id,
373 const struct msm_display_topology *top)
374{
375 int ctl_idx[MAX_BLOCKS];
376 int i = 0, j, num_ctls;
377 bool needs_split_display;
378
379 /* each hw_intf needs its own hw_ctrl to program its control path */
380 num_ctls = top->num_intf;
381
382 needs_split_display = _dpu_rm_needs_split_display(top);
383
384 for (j = 0; j < ARRAY_SIZE(rm->ctl_blks); j++) {
385 const struct dpu_hw_ctl *ctl;
386 unsigned long features;
387 bool has_split_display;
388
389 if (!rm->ctl_blks[j])
390 continue;
391 if (reserved_by_other(global_state->ctl_to_enc_id, j, enc_id))
392 continue;
393
394 ctl = to_dpu_hw_ctl(rm->ctl_blks[j]);
395 features = ctl->caps->features;
396 has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features;
397
398 DPU_DEBUG("ctl %d caps 0x%lX\n", j + CTL_0, features);
399
400 if (needs_split_display != has_split_display)
401 continue;
402
403 ctl_idx[i] = j;
404 DPU_DEBUG("ctl %d match\n", j + CTL_0);
405
406 if (++i == num_ctls)
407 break;
408
409 }
410
411 if (i != num_ctls)
412 return -ENAVAIL;
413
414 for (i = 0; i < ARRAY_SIZE(ctl_idx) && i < num_ctls; i++) {
415 global_state->ctl_to_enc_id[ctl_idx[i]] = enc_id;
416 trace_dpu_rm_reserve_ctls(i + CTL_0, enc_id);
417 }
418
419 return 0;
420}
421
422static int _dpu_rm_pingpong_next_index(struct dpu_global_state *global_state,
423 int start,
424 uint32_t enc_id)
425{
426 int i;
427
428 for (i = start; i < (PINGPONG_MAX - PINGPONG_0); i++) {
429 if (global_state->pingpong_to_enc_id[i] == enc_id)
430 return i;
431 }
432
433 return -ENAVAIL;
434}
435
436static int _dpu_rm_pingpong_dsc_check(int dsc_idx, int pp_idx)
437{
438 /*
439 * DSC with even index must be used with the PINGPONG with even index
440 * DSC with odd index must be used with the PINGPONG with odd index
441 */
442 if ((dsc_idx & 0x01) != (pp_idx & 0x01))
443 return -ENAVAIL;
444
445 return 0;
446}
447
448static int _dpu_rm_dsc_alloc(struct dpu_rm *rm,
449 struct dpu_global_state *global_state,
450 uint32_t enc_id,
451 const struct msm_display_topology *top)
452{
453 int num_dsc = 0;
454 int pp_idx = 0;
455 int dsc_idx;
456 int ret;
457
458 for (dsc_idx = 0; dsc_idx < ARRAY_SIZE(rm->dsc_blks) &&
459 num_dsc < top->num_dsc; dsc_idx++) {
460 if (!rm->dsc_blks[dsc_idx])
461 continue;
462
463 if (reserved_by_other(global_state->dsc_to_enc_id, dsc_idx, enc_id))
464 continue;
465
466 pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx, enc_id);
467 if (pp_idx < 0)
468 return -ENAVAIL;
469
470 ret = _dpu_rm_pingpong_dsc_check(dsc_idx, pp_idx);
471 if (ret)
472 return -ENAVAIL;
473
474 global_state->dsc_to_enc_id[dsc_idx] = enc_id;
475 num_dsc++;
476 pp_idx++;
477 }
478
479 if (num_dsc < top->num_dsc) {
480 DPU_ERROR("DSC allocation failed num_dsc=%d required=%d\n",
481 num_dsc, top->num_dsc);
482 return -ENAVAIL;
483 }
484
485 return 0;
486}
487
488static int _dpu_rm_dsc_alloc_pair(struct dpu_rm *rm,
489 struct dpu_global_state *global_state,
490 uint32_t enc_id,
491 const struct msm_display_topology *top)
492{
493 int num_dsc = 0;
494 int dsc_idx, pp_idx = 0;
495 int ret;
496
497 /* only start from even dsc index */
498 for (dsc_idx = 0; dsc_idx < ARRAY_SIZE(rm->dsc_blks) &&
499 num_dsc < top->num_dsc; dsc_idx += 2) {
500 if (!rm->dsc_blks[dsc_idx] ||
501 !rm->dsc_blks[dsc_idx + 1])
502 continue;
503
504 /* consective dsc index to be paired */
505 if (reserved_by_other(global_state->dsc_to_enc_id, dsc_idx, enc_id) ||
506 reserved_by_other(global_state->dsc_to_enc_id, dsc_idx + 1, enc_id))
507 continue;
508
509 pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx, enc_id);
510 if (pp_idx < 0)
511 return -ENAVAIL;
512
513 ret = _dpu_rm_pingpong_dsc_check(dsc_idx, pp_idx);
514 if (ret) {
515 pp_idx = 0;
516 continue;
517 }
518
519 pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx + 1, enc_id);
520 if (pp_idx < 0)
521 return -ENAVAIL;
522
523 ret = _dpu_rm_pingpong_dsc_check(dsc_idx + 1, pp_idx);
524 if (ret) {
525 pp_idx = 0;
526 continue;
527 }
528
529 global_state->dsc_to_enc_id[dsc_idx] = enc_id;
530 global_state->dsc_to_enc_id[dsc_idx + 1] = enc_id;
531 num_dsc += 2;
532 pp_idx++; /* start for next pair */
533 }
534
535 if (num_dsc < top->num_dsc) {
536 DPU_ERROR("DSC allocation failed num_dsc=%d required=%d\n",
537 num_dsc, top->num_dsc);
538 return -ENAVAIL;
539 }
540
541 return 0;
542}
543
544static int _dpu_rm_reserve_dsc(struct dpu_rm *rm,
545 struct dpu_global_state *global_state,
546 struct drm_encoder *enc,
547 const struct msm_display_topology *top)
548{
549 uint32_t enc_id = enc->base.id;
550
551 if (!top->num_dsc || !top->num_intf)
552 return 0;
553
554 /*
555 * Facts:
556 * 1) no pingpong split (two layer mixers shared one pingpong)
557 * 2) DSC pair starts from even index, such as index(0,1), (2,3), etc
558 * 3) even PINGPONG connects to even DSC
559 * 4) odd PINGPONG connects to odd DSC
560 * 5) pair: encoder +--> pp_idx_0 --> dsc_idx_0
561 * +--> pp_idx_1 --> dsc_idx_1
562 */
563
564 /* num_dsc should be either 1, 2 or 4 */
565 if (top->num_dsc > top->num_intf) /* merge mode */
566 return _dpu_rm_dsc_alloc_pair(rm, global_state, enc_id, top);
567 else
568 return _dpu_rm_dsc_alloc(rm, global_state, enc_id, top);
569
570 return 0;
571}
572
573static int _dpu_rm_reserve_cdm(struct dpu_rm *rm,
574 struct dpu_global_state *global_state,
575 struct drm_encoder *enc)
576{
577 /* try allocating only one CDM block */
578 if (!rm->cdm_blk) {
579 DPU_ERROR("CDM block does not exist\n");
580 return -EIO;
581 }
582
583 if (global_state->cdm_to_enc_id) {
584 DPU_ERROR("CDM_0 is already allocated\n");
585 return -EIO;
586 }
587
588 global_state->cdm_to_enc_id = enc->base.id;
589
590 return 0;
591}
592
593static int _dpu_rm_make_reservation(
594 struct dpu_rm *rm,
595 struct dpu_global_state *global_state,
596 struct drm_encoder *enc,
597 struct dpu_rm_requirements *reqs)
598{
599 int ret;
600
601 ret = _dpu_rm_reserve_lms(rm, global_state, enc->base.id, reqs);
602 if (ret) {
603 DPU_ERROR("unable to find appropriate mixers\n");
604 return ret;
605 }
606
607 ret = _dpu_rm_reserve_ctls(rm, global_state, enc->base.id,
608 &reqs->topology);
609 if (ret) {
610 DPU_ERROR("unable to find appropriate CTL\n");
611 return ret;
612 }
613
614 ret = _dpu_rm_reserve_dsc(rm, global_state, enc, &reqs->topology);
615 if (ret)
616 return ret;
617
618 if (reqs->topology.needs_cdm) {
619 ret = _dpu_rm_reserve_cdm(rm, global_state, enc);
620 if (ret) {
621 DPU_ERROR("unable to find CDM blk\n");
622 return ret;
623 }
624 }
625
626 return ret;
627}
628
629static int _dpu_rm_populate_requirements(
630 struct drm_encoder *enc,
631 struct dpu_rm_requirements *reqs,
632 struct msm_display_topology req_topology)
633{
634 reqs->topology = req_topology;
635
636 DRM_DEBUG_KMS("num_lm: %d num_dsc: %d num_intf: %d cdm: %d\n",
637 reqs->topology.num_lm, reqs->topology.num_dsc,
638 reqs->topology.num_intf, reqs->topology.needs_cdm);
639
640 return 0;
641}
642
643static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt,
644 uint32_t enc_id)
645{
646 int i;
647
648 for (i = 0; i < cnt; i++) {
649 if (res_mapping[i] == enc_id)
650 res_mapping[i] = 0;
651 }
652}
653
654/**
655 * dpu_rm_release - Given the encoder for the display chain, release any
656 * HW blocks previously reserved for that use case.
657 * @global_state: resources shared across multiple kms objects
658 * @enc: DRM Encoder handle
659 * @return: 0 on Success otherwise -ERROR
660 */
661void dpu_rm_release(struct dpu_global_state *global_state,
662 struct drm_encoder *enc)
663{
664 _dpu_rm_clear_mapping(global_state->pingpong_to_enc_id,
665 ARRAY_SIZE(global_state->pingpong_to_enc_id), enc->base.id);
666 _dpu_rm_clear_mapping(global_state->mixer_to_enc_id,
667 ARRAY_SIZE(global_state->mixer_to_enc_id), enc->base.id);
668 _dpu_rm_clear_mapping(global_state->ctl_to_enc_id,
669 ARRAY_SIZE(global_state->ctl_to_enc_id), enc->base.id);
670 _dpu_rm_clear_mapping(global_state->dsc_to_enc_id,
671 ARRAY_SIZE(global_state->dsc_to_enc_id), enc->base.id);
672 _dpu_rm_clear_mapping(global_state->dspp_to_enc_id,
673 ARRAY_SIZE(global_state->dspp_to_enc_id), enc->base.id);
674 _dpu_rm_clear_mapping(&global_state->cdm_to_enc_id, 1, enc->base.id);
675}
676
677/**
678 * dpu_rm_reserve - Given a CRTC->Encoder->Connector display chain, analyze
679 * the use connections and user requirements, specified through related
680 * topology control properties, and reserve hardware blocks to that
681 * display chain.
682 * HW blocks can then be accessed through dpu_rm_get_* functions.
683 * HW Reservations should be released via dpu_rm_release_hw.
684 * @rm: DPU Resource Manager handle
685 * @global_state: resources shared across multiple kms objects
686 * @enc: DRM Encoder handle
687 * @crtc_state: Proposed Atomic DRM CRTC State handle
688 * @topology: Pointer to topology info for the display
689 * @return: 0 on Success otherwise -ERROR
690 */
691int dpu_rm_reserve(
692 struct dpu_rm *rm,
693 struct dpu_global_state *global_state,
694 struct drm_encoder *enc,
695 struct drm_crtc_state *crtc_state,
696 struct msm_display_topology topology)
697{
698 struct dpu_rm_requirements reqs;
699 int ret;
700
701 /* Check if this is just a page-flip */
702 if (!drm_atomic_crtc_needs_modeset(crtc_state))
703 return 0;
704
705 if (IS_ERR(global_state)) {
706 DPU_ERROR("failed to global state\n");
707 return PTR_ERR(global_state);
708 }
709
710 DRM_DEBUG_KMS("reserving hw for enc %d crtc %d\n",
711 enc->base.id, crtc_state->crtc->base.id);
712
713 ret = _dpu_rm_populate_requirements(enc, &reqs, topology);
714 if (ret) {
715 DPU_ERROR("failed to populate hw requirements\n");
716 return ret;
717 }
718
719 ret = _dpu_rm_make_reservation(rm, global_state, enc, &reqs);
720 if (ret)
721 DPU_ERROR("failed to reserve hw resources: %d\n", ret);
722
723
724
725 return ret;
726}
727
728/**
729 * dpu_rm_get_assigned_resources - Get hw resources of the given type that are
730 * assigned to this encoder
731 * @rm: DPU Resource Manager handle
732 * @global_state: resources shared across multiple kms objects
733 * @enc_id: encoder id requesting for allocation
734 * @type: resource type to return data for
735 * @blks: pointer to the array to be filled by HW resources
736 * @blks_size: size of the @blks array
737 */
738int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
739 struct dpu_global_state *global_state, uint32_t enc_id,
740 enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size)
741{
742 struct dpu_hw_blk **hw_blks;
743 uint32_t *hw_to_enc_id;
744 int i, num_blks, max_blks;
745
746 switch (type) {
747 case DPU_HW_BLK_PINGPONG:
748 hw_blks = rm->pingpong_blks;
749 hw_to_enc_id = global_state->pingpong_to_enc_id;
750 max_blks = ARRAY_SIZE(rm->pingpong_blks);
751 break;
752 case DPU_HW_BLK_LM:
753 hw_blks = rm->mixer_blks;
754 hw_to_enc_id = global_state->mixer_to_enc_id;
755 max_blks = ARRAY_SIZE(rm->mixer_blks);
756 break;
757 case DPU_HW_BLK_CTL:
758 hw_blks = rm->ctl_blks;
759 hw_to_enc_id = global_state->ctl_to_enc_id;
760 max_blks = ARRAY_SIZE(rm->ctl_blks);
761 break;
762 case DPU_HW_BLK_DSPP:
763 hw_blks = rm->dspp_blks;
764 hw_to_enc_id = global_state->dspp_to_enc_id;
765 max_blks = ARRAY_SIZE(rm->dspp_blks);
766 break;
767 case DPU_HW_BLK_DSC:
768 hw_blks = rm->dsc_blks;
769 hw_to_enc_id = global_state->dsc_to_enc_id;
770 max_blks = ARRAY_SIZE(rm->dsc_blks);
771 break;
772 case DPU_HW_BLK_CDM:
773 hw_blks = &rm->cdm_blk;
774 hw_to_enc_id = &global_state->cdm_to_enc_id;
775 max_blks = 1;
776 break;
777 default:
778 DPU_ERROR("blk type %d not managed by rm\n", type);
779 return 0;
780 }
781
782 num_blks = 0;
783 for (i = 0; i < max_blks; i++) {
784 if (hw_to_enc_id[i] != enc_id)
785 continue;
786
787 if (num_blks == blks_size) {
788 DPU_ERROR("More than %d resources assigned to enc %d\n",
789 blks_size, enc_id);
790 break;
791 }
792 if (!hw_blks[i]) {
793 DPU_ERROR("Allocated resource %d unavailable to assign to enc %d\n",
794 type, enc_id);
795 break;
796 }
797 blks[num_blks++] = hw_blks[i];
798 }
799
800 return num_blks;
801}
802
803static void dpu_rm_print_state_helper(struct drm_printer *p,
804 struct dpu_hw_blk *blk,
805 uint32_t mapping)
806{
807 if (!blk)
808 drm_puts(p, "- ");
809 else if (!mapping)
810 drm_puts(p, "# ");
811 else
812 drm_printf(p, "%d ", mapping);
813}
814
815
816/**
817 * dpu_rm_print_state - output the RM private state
818 * @p: DRM printer
819 * @global_state: global state
820 */
821void dpu_rm_print_state(struct drm_printer *p,
822 const struct dpu_global_state *global_state)
823{
824 const struct dpu_rm *rm = global_state->rm;
825 int i;
826
827 drm_puts(p, "resource mapping:\n");
828 drm_puts(p, "\tpingpong=");
829 for (i = 0; i < ARRAY_SIZE(global_state->pingpong_to_enc_id); i++)
830 dpu_rm_print_state_helper(p, rm->pingpong_blks[i],
831 global_state->pingpong_to_enc_id[i]);
832 drm_puts(p, "\n");
833
834 drm_puts(p, "\tmixer=");
835 for (i = 0; i < ARRAY_SIZE(global_state->mixer_to_enc_id); i++)
836 dpu_rm_print_state_helper(p, rm->mixer_blks[i],
837 global_state->mixer_to_enc_id[i]);
838 drm_puts(p, "\n");
839
840 drm_puts(p, "\tctl=");
841 for (i = 0; i < ARRAY_SIZE(global_state->ctl_to_enc_id); i++)
842 dpu_rm_print_state_helper(p, rm->ctl_blks[i],
843 global_state->ctl_to_enc_id[i]);
844 drm_puts(p, "\n");
845
846 drm_puts(p, "\tdspp=");
847 for (i = 0; i < ARRAY_SIZE(global_state->dspp_to_enc_id); i++)
848 dpu_rm_print_state_helper(p, rm->dspp_blks[i],
849 global_state->dspp_to_enc_id[i]);
850 drm_puts(p, "\n");
851
852 drm_puts(p, "\tdsc=");
853 for (i = 0; i < ARRAY_SIZE(global_state->dsc_to_enc_id); i++)
854 dpu_rm_print_state_helper(p, rm->dsc_blks[i],
855 global_state->dsc_to_enc_id[i]);
856 drm_puts(p, "\n");
857
858 drm_puts(p, "\tcdm=");
859 dpu_rm_print_state_helper(p, rm->cdm_blk,
860 global_state->cdm_to_enc_id);
861 drm_puts(p, "\n");
862}