Linux Audio

Check our new training course

Loading...
v6.9.4
  1/*
  2 * Copyright 2022 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: AMD
 23 *
 24 */
 25
 26// header file of functions being implemented
 27#include "dcn32/dcn32_resource.h"
 28#include "dcn20/dcn20_resource.h"
 29#include "dml/dcn32/display_mode_vba_util_32.h"
 30#include "dml/dcn32/dcn32_fpu.h"
 31#include "dc_state_priv.h"
 32
 33static bool is_dual_plane(enum surface_pixel_format format)
 34{
 35	return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
 36}
 37
 38
 39uint32_t dcn32_helper_mall_bytes_to_ways(
 40		struct dc *dc,
 41		uint32_t total_size_in_mall_bytes)
 
 
 
 
 
 
 
 
 
 
 
 
 42{
 43	uint32_t cache_lines_used, lines_per_way, total_cache_lines, num_ways;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 44
 45	/* add 2 lines for worst case alignment */
 46	cache_lines_used = total_size_in_mall_bytes / dc->caps.cache_line_size + 2;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 47
 48	total_cache_lines = dc->caps.max_cab_allocation_bytes / dc->caps.cache_line_size;
 49	lines_per_way = total_cache_lines / dc->caps.cache_num_ways;
 50	num_ways = cache_lines_used / lines_per_way;
 51	if (cache_lines_used % lines_per_way > 0)
 52		num_ways++;
 53
 54	return num_ways;
 55}
 56
 57uint32_t dcn32_helper_calculate_mall_bytes_for_cursor(
 58		struct dc *dc,
 59		struct pipe_ctx *pipe_ctx,
 60		bool ignore_cursor_buf)
 61{
 62	struct hubp *hubp = pipe_ctx->plane_res.hubp;
 63	uint32_t cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height;
 64	uint32_t cursor_mall_size_bytes = 0;
 65
 66	switch (pipe_ctx->stream->cursor_attributes.color_format) {
 67	case CURSOR_MODE_MONO:
 68		cursor_size /= 2;
 69		break;
 70	case CURSOR_MODE_COLOR_1BIT_AND:
 71	case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
 72	case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
 73		cursor_size *= 4;
 74		break;
 75
 76	case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
 77	case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
 78		cursor_size *= 8;
 79		break;
 80	}
 81
 82	/* only count if cursor is enabled, and if additional allocation needed outside of the
 83	 * DCN cursor buffer
 84	 */
 85	if (pipe_ctx->stream->cursor_position.enable && (ignore_cursor_buf ||
 86			cursor_size > 16384)) {
 87		/* cursor_num_mblk = CEILING(num_cursors*cursor_width*cursor_width*cursor_Bpe/mblk_bytes, 1)
 88		 * Note: add 1 mblk in case of cursor misalignment
 89		 */
 90		cursor_mall_size_bytes = ((cursor_size + DCN3_2_MALL_MBLK_SIZE_BYTES - 1) /
 91				DCN3_2_MALL_MBLK_SIZE_BYTES + 1) * DCN3_2_MALL_MBLK_SIZE_BYTES;
 92	}
 93
 94	return cursor_mall_size_bytes;
 95}
 96
 97/**
 98 * dcn32_helper_calculate_num_ways_for_subvp(): Calculate number of ways needed for SubVP
 99 *
100 * Gets total allocation required for the phantom viewport calculated by DML in bytes and
101 * converts to number of cache ways.
102 *
103 * @dc: current dc state
104 * @context: new dc state
105 *
106 * Return: number of ways required for SubVP
107 */
108uint32_t dcn32_helper_calculate_num_ways_for_subvp(
109		struct dc *dc,
110		struct dc_state *context)
111{
112	if (context->bw_ctx.bw.dcn.mall_subvp_size_bytes > 0) {
113		if (dc->debug.force_subvp_num_ways) {
114			return dc->debug.force_subvp_num_ways;
115		} else {
116			return dcn32_helper_mall_bytes_to_ways(dc, context->bw_ctx.bw.dcn.mall_subvp_size_bytes);
117		}
118	} else {
119		return 0;
120	}
121}
122
123void dcn32_merge_pipes_for_subvp(struct dc *dc,
124		struct dc_state *context)
125{
126	uint32_t i;
127
128	/* merge pipes if necessary */
129	for (i = 0; i < dc->res_pool->pipe_count; i++) {
130		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
131
132		// For now merge all pipes for SubVP since pipe split case isn't supported yet
133
134		/* if ODM merge we ignore mpc tree, mpo pipes will have their own flags */
135		if (pipe->prev_odm_pipe) {
136			/*split off odm pipe*/
137			pipe->prev_odm_pipe->next_odm_pipe = pipe->next_odm_pipe;
138			if (pipe->next_odm_pipe)
139				pipe->next_odm_pipe->prev_odm_pipe = pipe->prev_odm_pipe;
140
141			pipe->bottom_pipe = NULL;
142			pipe->next_odm_pipe = NULL;
143			pipe->plane_state = NULL;
144			pipe->stream = NULL;
145			pipe->top_pipe = NULL;
146			pipe->prev_odm_pipe = NULL;
147			if (pipe->stream_res.dsc)
148				dcn20_release_dsc(&context->res_ctx, dc->res_pool, &pipe->stream_res.dsc);
149			memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
150			memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
151		} else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {
152			struct pipe_ctx *top_pipe = pipe->top_pipe;
153			struct pipe_ctx *bottom_pipe = pipe->bottom_pipe;
154
155			top_pipe->bottom_pipe = bottom_pipe;
156			if (bottom_pipe)
157				bottom_pipe->top_pipe = top_pipe;
158
159			pipe->top_pipe = NULL;
160			pipe->bottom_pipe = NULL;
161			pipe->plane_state = NULL;
162			pipe->stream = NULL;
163			memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
164			memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
165		}
166	}
167}
168
169bool dcn32_all_pipes_have_stream_and_plane(struct dc *dc,
170		struct dc_state *context)
171{
172	uint32_t i;
173
174	for (i = 0; i < dc->res_pool->pipe_count; i++) {
175		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
176
177		if (!pipe->stream)
178			continue;
179
180		if (!pipe->plane_state)
181			return false;
182	}
183	return true;
184}
185
186bool dcn32_subvp_in_use(struct dc *dc,
187		struct dc_state *context)
188{
189	uint32_t i;
190
191	for (i = 0; i < dc->res_pool->pipe_count; i++) {
192		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
193
194		if (dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_NONE)
195			return true;
196	}
197	return false;
198}
199
200bool dcn32_mpo_in_use(struct dc_state *context)
201{
202	uint32_t i;
203
204	for (i = 0; i < context->stream_count; i++) {
205		if (context->stream_status[i].plane_count > 1)
206			return true;
207	}
208	return false;
209}
210
211
212bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context)
213{
214	uint32_t i;
215
216	for (i = 0; i < dc->res_pool->pipe_count; i++) {
217		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
218
219		if (!pipe->stream)
220			continue;
221
222		if (pipe->plane_state && pipe->plane_state->rotation != ROTATION_ANGLE_0)
223			return true;
224	}
225	return false;
226}
227
228bool dcn32_is_center_timing(struct pipe_ctx *pipe)
229{
230	bool is_center_timing = false;
231
232	if (pipe->stream) {
233		if (pipe->stream->timing.v_addressable != pipe->stream->dst.height ||
234				pipe->stream->timing.v_addressable != pipe->stream->src.height) {
235			is_center_timing = true;
236		}
237	}
238
239	if (pipe->plane_state) {
240		if (pipe->stream->timing.v_addressable != pipe->plane_state->dst_rect.height &&
241				pipe->stream->timing.v_addressable != pipe->plane_state->src_rect.height) {
242			is_center_timing = true;
243		}
244	}
245
246	return is_center_timing;
247}
248
249bool dcn32_is_psr_capable(struct pipe_ctx *pipe)
250{
251	bool psr_capable = false;
252
253	if (pipe->stream && pipe->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED) {
254		psr_capable = true;
255	}
256	return psr_capable;
257}
258
259static void override_det_for_subvp(struct dc *dc, struct dc_state *context, uint8_t pipe_segments[])
260{
261	uint32_t i;
262	uint8_t fhd_count = 0;
263	uint8_t subvp_high_refresh_count = 0;
264	uint8_t stream_count = 0;
265
266	// Do not override if a stream has multiple planes
267	for (i = 0; i < context->stream_count; i++) {
268		if (context->stream_status[i].plane_count > 1)
269			return;
270
271		if (dc_state_get_stream_subvp_type(context, context->streams[i]) != SUBVP_PHANTOM)
272			stream_count++;
273	}
274
275	for (i = 0; i < dc->res_pool->pipe_count; i++) {
276		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
277
278		if (pipe_ctx->stream && pipe_ctx->plane_state && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) {
279			if (dcn32_allow_subvp_high_refresh_rate(dc, context, pipe_ctx)) {
280
281				if (pipe_ctx->stream->timing.v_addressable == 1080 && pipe_ctx->stream->timing.h_addressable == 1920) {
282					fhd_count++;
283				}
284				subvp_high_refresh_count++;
285			}
286		}
287	}
288
289	if (stream_count == 2 && subvp_high_refresh_count == 2 && fhd_count == 1) {
290		for (i = 0; i < dc->res_pool->pipe_count; i++) {
291			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
292
293			if (pipe_ctx->stream && pipe_ctx->plane_state && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) {
294				if (pipe_ctx->stream->timing.v_addressable == 1080 && pipe_ctx->stream->timing.h_addressable == 1920) {
295					if (pipe_segments[i] > 4)
296						pipe_segments[i] = 4;
297				}
298			}
299		}
300	}
301}
302
303/**
304 * dcn32_determine_det_override(): Determine DET allocation for each pipe
 
305 *
306 * This function determines how much DET to allocate for each pipe. The total number of
307 * DET segments will be split equally among each of the streams, and after that the DET
308 * segments per stream will be split equally among the planes for the given stream.
309 *
310 * If there is a plane that's driven by more than 1 pipe (i.e. pipe split), then the
311 * number of DET for that given plane will be split among the pipes driving that plane.
312 *
313 *
314 * High level algorithm:
315 * 1. Split total DET among number of streams
316 * 2. For each stream, split DET among the planes
317 * 3. For each plane, check if there is a pipe split. If yes, split the DET allocation
318 *    among those pipes.
319 * 4. Assign the DET override to the DML pipes.
320 *
321 * @dc: Current DC state
322 * @context: New DC state to be programmed
323 * @pipes: Array of DML pipes
324 *
325 * Return: void
 
 
326 */
327void dcn32_determine_det_override(struct dc *dc,
328		struct dc_state *context,
329		display_e2e_pipe_params_st *pipes)
330{
331	uint32_t i, j, k;
332	uint8_t pipe_plane_count, stream_segments, plane_segments, pipe_segments[MAX_PIPES] = {0};
333	uint8_t pipe_counted[MAX_PIPES] = {0};
334	uint8_t pipe_cnt = 0;
335	struct dc_plane_state *current_plane = NULL;
336	uint8_t stream_count = 0;
337
338	for (i = 0; i < context->stream_count; i++) {
339		/* Don't count SubVP streams for DET allocation */
340		if (dc_state_get_stream_subvp_type(context, context->streams[i]) != SUBVP_PHANTOM)
341			stream_count++;
 
342	}
343
344	if (stream_count > 0) {
345		stream_segments = 18 / stream_count;
346		for (i = 0; i < context->stream_count; i++) {
347			if (dc_state_get_stream_subvp_type(context, context->streams[i]) == SUBVP_PHANTOM)
348				continue;
349
350			if (context->stream_status[i].plane_count > 0)
351				plane_segments = stream_segments / context->stream_status[i].plane_count;
352			else
353				plane_segments = stream_segments;
354			for (j = 0; j < dc->res_pool->pipe_count; j++) {
355				pipe_plane_count = 0;
356				if (context->res_ctx.pipe_ctx[j].stream == context->streams[i] &&
357						pipe_counted[j] != 1) {
358					/* Note: pipe_plane_count indicates the number of pipes to be used for a
359					 * given plane. e.g. pipe_plane_count = 1 means single pipe (i.e. not split),
360					 * pipe_plane_count = 2 means 2:1 split, etc.
361					 */
362					pipe_plane_count++;
363					pipe_counted[j] = 1;
364					current_plane = context->res_ctx.pipe_ctx[j].plane_state;
365					for (k = 0; k < dc->res_pool->pipe_count; k++) {
366						if (k != j && context->res_ctx.pipe_ctx[k].stream == context->streams[i] &&
367								context->res_ctx.pipe_ctx[k].plane_state == current_plane) {
368							pipe_plane_count++;
369							pipe_counted[k] = 1;
370						}
371					}
372
373					pipe_segments[j] = plane_segments / pipe_plane_count;
374					for (k = 0; k < dc->res_pool->pipe_count; k++) {
375						if (k != j && context->res_ctx.pipe_ctx[k].stream == context->streams[i] &&
376								context->res_ctx.pipe_ctx[k].plane_state == current_plane) {
377							pipe_segments[k] = plane_segments / pipe_plane_count;
378						}
379					}
380				}
381			}
382		}
383
384		override_det_for_subvp(dc, context, pipe_segments);
385		for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
386			if (!context->res_ctx.pipe_ctx[i].stream)
387				continue;
388			pipes[pipe_cnt].pipe.src.det_size_override = pipe_segments[i] * DCN3_2_DET_SEG_SIZE;
389			pipe_cnt++;
390		}
391	} else {
392		for (i = 0; i < dc->res_pool->pipe_count; i++)
393			pipes[i].pipe.src.det_size_override = 4 * DCN3_2_DET_SEG_SIZE; //DCN3_2_DEFAULT_DET_SIZE
394	}
395}
396
397void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context,
398	display_e2e_pipe_params_st *pipes)
399{
400	int i, pipe_cnt;
401	struct resource_context *res_ctx = &context->res_ctx;
402	struct pipe_ctx *pipe;
403	bool disable_unbounded_requesting = dc->debug.disable_z9_mpc || dc->debug.disable_unbounded_requesting;
404
405	for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
406
407		if (!res_ctx->pipe_ctx[i].stream)
408			continue;
409
410		pipe = &res_ctx->pipe_ctx[i];
411		pipe_cnt++;
412	}
413
414	/* For DET allocation, we don't want to use DML policy (not optimal for utilizing all
415	 * the DET available for each pipe). Use the DET override input to maintain our driver
416	 * policy.
417	 */
418	if (pipe_cnt == 1) {
419		pipes[0].pipe.src.det_size_override = DCN3_2_MAX_DET_SIZE;
420		if (pipe->plane_state && !disable_unbounded_requesting && pipe->plane_state->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
421			if (!is_dual_plane(pipe->plane_state->format)) {
422				pipes[0].pipe.src.det_size_override = DCN3_2_DEFAULT_DET_SIZE;
423				pipes[0].pipe.src.unbounded_req_mode = true;
424				if (pipe->plane_state->src_rect.width >= 5120 &&
425					pipe->plane_state->src_rect.height >= 2880)
426					pipes[0].pipe.src.det_size_override = 320; // 5K or higher
427			}
428		}
429	} else
430		dcn32_determine_det_override(dc, context, pipes);
431}
432
433#define MAX_STRETCHED_V_BLANK 1000 // in micro-seconds (must ensure to match value in FW)
434/*
435 * Scaling factor for v_blank stretch calculations considering timing in
436 * micro-seconds and pixel clock in 100hz.
437 * Note: the parenthesis are necessary to ensure the correct order of
438 * operation where V_SCALE is used.
439 */
440#define V_SCALE (10000 / MAX_STRETCHED_V_BLANK)
441
442static int get_frame_rate_at_max_stretch_100hz(
443		struct dc_stream_state *fpo_candidate_stream,
444		uint32_t fpo_vactive_margin_us)
445{
446	struct dc_crtc_timing *timing = NULL;
447	uint32_t sec_per_100_lines;
448	uint32_t max_v_blank;
449	uint32_t curr_v_blank;
450	uint32_t v_stretch_max;
451	uint32_t stretched_frame_pix_cnt;
452	uint32_t scaled_stretched_frame_pix_cnt;
453	uint32_t scaled_refresh_rate;
454	uint32_t v_scale;
455
456	if (fpo_candidate_stream == NULL)
457		return 0;
458
459	/* check if refresh rate at least 120hz */
460	timing = &fpo_candidate_stream->timing;
461	if (timing == NULL)
462		return 0;
463
464	v_scale = 10000 / (MAX_STRETCHED_V_BLANK + fpo_vactive_margin_us);
465
466	sec_per_100_lines = timing->pix_clk_100hz / timing->h_total + 1;
467	max_v_blank = sec_per_100_lines / v_scale + 1;
468	curr_v_blank = timing->v_total - timing->v_addressable;
469	v_stretch_max = (max_v_blank > curr_v_blank) ? (max_v_blank - curr_v_blank) : (0);
470	stretched_frame_pix_cnt = (v_stretch_max + timing->v_total) * timing->h_total;
471	scaled_stretched_frame_pix_cnt = stretched_frame_pix_cnt / 10000;
472	scaled_refresh_rate = (timing->pix_clk_100hz) / scaled_stretched_frame_pix_cnt + 1;
473
474	return scaled_refresh_rate;
475
476}
477
478static bool is_refresh_rate_support_mclk_switch_using_fw_based_vblank_stretch(
479		struct dc_stream_state *fpo_candidate_stream, uint32_t fpo_vactive_margin_us)
480{
481	int refresh_rate_max_stretch_100hz;
482	int min_refresh_100hz;
483
484	if (fpo_candidate_stream == NULL)
485		return false;
486
487	refresh_rate_max_stretch_100hz = get_frame_rate_at_max_stretch_100hz(fpo_candidate_stream, fpo_vactive_margin_us);
488	min_refresh_100hz = fpo_candidate_stream->timing.min_refresh_in_uhz / 10000;
489
490	if (refresh_rate_max_stretch_100hz < min_refresh_100hz)
491		return false;
492
493	return true;
494}
495
496static int get_refresh_rate(struct dc_stream_state *fpo_candidate_stream)
497{
498	int refresh_rate = 0;
499	int h_v_total = 0;
500	struct dc_crtc_timing *timing = NULL;
501
502	if (fpo_candidate_stream == NULL)
503		return 0;
504
505	/* check if refresh rate at least 120hz */
506	timing = &fpo_candidate_stream->timing;
507	if (timing == NULL)
508		return 0;
509
510	h_v_total = timing->h_total * timing->v_total;
511	if (h_v_total == 0)
512		return 0;
513
514	refresh_rate = ((timing->pix_clk_100hz * 100) / (h_v_total)) + 1;
515	return refresh_rate;
516}
517
518/**
519 * dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch() - Determines if config can
520 *								    support FPO
521 *
522 * @dc: current dc state
523 * @context: new dc state
524 *
525 * Return: Pointer to FPO stream candidate if config can support FPO, otherwise NULL
526 */
527struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, struct dc_state *context)
528{
529	int refresh_rate = 0;
530	const int minimum_refreshrate_supported = 120;
531	struct dc_stream_state *fpo_candidate_stream = NULL;
532	bool is_fpo_vactive = false;
533	uint32_t fpo_vactive_margin_us = 0;
534	struct dc_stream_status *fpo_stream_status = NULL;
535
536	if (context == NULL)
537		return NULL;
538
539	if (dc->debug.disable_fams)
540		return NULL;
541
542	if (!dc->caps.dmub_caps.mclk_sw)
543		return NULL;
544
545	if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching_shut_down)
546		return NULL;
547
548	/* For FPO we can support up to 2 display configs if:
549	 * - first display uses FPO
550	 * - Second display switches in VACTIVE */
551	if (context->stream_count > 2)
552		return NULL;
553	else if (context->stream_count == 2) {
554		DC_FP_START();
555		dcn32_assign_fpo_vactive_candidate(dc, context, &fpo_candidate_stream);
556		DC_FP_END();
557		if (fpo_candidate_stream)
558			fpo_stream_status = dc_state_get_stream_status(context, fpo_candidate_stream);
559		DC_FP_START();
560		is_fpo_vactive = dcn32_find_vactive_pipe(dc, context, dc->debug.fpo_vactive_min_active_margin_us);
561		DC_FP_END();
562		if (!is_fpo_vactive || dc->debug.disable_fpo_vactive)
563			return NULL;
564	} else {
565		fpo_candidate_stream = context->streams[0];
566		if (fpo_candidate_stream)
567			fpo_stream_status = dc_state_get_stream_status(context, fpo_candidate_stream);
568	}
569
570	/* In DCN32/321, FPO uses per-pipe P-State force.
571	 * If there's no planes, HUBP is power gated and
572	 * therefore programming UCLK_PSTATE_FORCE does
573	 * nothing (P-State will always be asserted naturally
574	 * on a pipe that has HUBP power gated. Therefore we
575	 * only want to enable FPO if the FPO pipe has both
576	 * a stream and a plane.
577	 */
578	if (!fpo_candidate_stream || !fpo_stream_status || fpo_stream_status->plane_count == 0)
579		return NULL;
580
581	if (fpo_candidate_stream->sink->edid_caps.panel_patch.disable_fams)
582		return NULL;
583
584	refresh_rate = get_refresh_rate(fpo_candidate_stream);
585	if (refresh_rate < minimum_refreshrate_supported)
586		return NULL;
587
588	fpo_vactive_margin_us = is_fpo_vactive ? dc->debug.fpo_vactive_margin_us : 0; // For now hardcode the FPO + Vactive stretch margin to be 2000us
589	if (!is_refresh_rate_support_mclk_switch_using_fw_based_vblank_stretch(fpo_candidate_stream, fpo_vactive_margin_us))
590		return NULL;
591
592	if (!fpo_candidate_stream->allow_freesync)
593		return NULL;
594
595	if (fpo_candidate_stream->vrr_active_variable && dc->debug.disable_fams_gaming)
596		return NULL;
597
598	return fpo_candidate_stream;
599}
600
601bool dcn32_check_native_scaling_for_res(struct pipe_ctx *pipe, unsigned int width, unsigned int height)
602{
603	bool is_native_scaling = false;
604
605	if (pipe->stream->timing.h_addressable == width &&
606			pipe->stream->timing.v_addressable == height &&
607			pipe->plane_state->src_rect.width == width &&
608			pipe->plane_state->src_rect.height == height &&
609			pipe->plane_state->dst_rect.width == width &&
610			pipe->plane_state->dst_rect.height == height)
611		is_native_scaling = true;
612
613	return is_native_scaling;
614}
615
616/**
617 * disallow_subvp_in_active_plus_blank() - Function to determine disallowed subvp + drr/vblank configs
 
618 *
619 * @pipe: subvp pipe to be used for the subvp + drr/vblank config
 
 
 
 
 
 
 
 
 
 
 
 
 
620 *
621 * Since subvp is being enabled on more configs (such as 1080p60), we want
622 * to explicitly block any configs that we don't want to enable. We do not
623 * want to enable any 1080p60 (SubVP) + drr / vblank configs since these
624 * are already convered by FPO.
625 *
626 * Return: True if disallowed, false otherwise
627 */
628static bool disallow_subvp_in_active_plus_blank(struct pipe_ctx *pipe)
629{
630	bool disallow = false;
631
632	if (resource_is_pipe_type(pipe, OPP_HEAD) &&
633			resource_is_pipe_type(pipe, DPP_PIPE)) {
634		if (pipe->stream->timing.v_addressable == 1080 && pipe->stream->timing.h_addressable == 1920)
635			disallow = true;
636	}
637	return disallow;
638}
639
640/**
641 * dcn32_subvp_drr_admissable() - Determine if SubVP + DRR config is admissible
642 *
643 * @dc: Current DC state
644 * @context: New DC state to be programmed
645 *
646 * SubVP + DRR is admissible under the following conditions:
647 * - Config must have 2 displays (i.e., 2 non-phantom master pipes)
648 * - One display is SubVP
649 * - Other display must have Freesync enabled
650 * - The potential DRR display must not be PSR capable
651 *
652 * Return: True if admissible, false otherwise
653 */
654bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context)
655{
656	bool result = false;
657	uint32_t i;
658	uint8_t subvp_count = 0;
659	uint8_t non_subvp_pipes = 0;
660	bool drr_pipe_found = false;
661	bool drr_psr_capable = false;
662	uint64_t refresh_rate = 0;
663	bool subvp_disallow = false;
664
665	for (i = 0; i < dc->res_pool->pipe_count; i++) {
666		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
667		enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
668
669		if (resource_is_pipe_type(pipe, OPP_HEAD) &&
670				resource_is_pipe_type(pipe, DPP_PIPE)) {
671			if (pipe_mall_type == SUBVP_MAIN) {
672				subvp_count++;
673
674				subvp_disallow |= disallow_subvp_in_active_plus_blank(pipe);
675				refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
676					pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
677				refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
678				refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
679			}
680			if (pipe_mall_type == SUBVP_NONE) {
681				non_subvp_pipes++;
682				drr_psr_capable = (drr_psr_capable || dcn32_is_psr_capable(pipe));
683				if (pipe->stream->ignore_msa_timing_param &&
684						(pipe->stream->allow_freesync || pipe->stream->vrr_active_variable || pipe->stream->vrr_active_fixed)) {
685					drr_pipe_found = true;
686				}
687			}
688		}
689	}
690
691	if (subvp_count == 1 && !subvp_disallow && non_subvp_pipes == 1 && drr_pipe_found && !drr_psr_capable &&
692		((uint32_t)refresh_rate < 120))
693		result = true;
694
695	return result;
 
 
696}
697
698/**
699 * dcn32_subvp_vblank_admissable() - Determine if SubVP + Vblank config is admissible
 
 
 
 
 
 
 
700 *
701 * @dc: Current DC state
702 * @context: New DC state to be programmed
703 * @vlevel: Voltage level calculated by DML
704 *
705 * SubVP + Vblank is admissible under the following conditions:
706 * - Config must have 2 displays (i.e., 2 non-phantom master pipes)
707 * - One display is SubVP
708 * - Other display must not have Freesync capability
709 * - DML must have output DRAM clock change support as SubVP + Vblank
710 * - The potential vblank display must not be PSR capable
711 *
712 * Return: True if admissible, false otherwise
713 */
714bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int vlevel)
 
 
715{
716	bool result = false;
717	uint32_t i;
718	uint8_t subvp_count = 0;
719	uint8_t non_subvp_pipes = 0;
720	bool drr_pipe_found = false;
721	struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
722	bool vblank_psr_capable = false;
723	uint64_t refresh_rate = 0;
724	bool subvp_disallow = false;
725
726	for (i = 0; i < dc->res_pool->pipe_count; i++) {
727		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
728		enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
729
730		if (resource_is_pipe_type(pipe, OPP_HEAD) &&
731				resource_is_pipe_type(pipe, DPP_PIPE)) {
732			if (pipe_mall_type == SUBVP_MAIN) {
733				subvp_count++;
734
735				subvp_disallow |= disallow_subvp_in_active_plus_blank(pipe);
736				refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
737					pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
738				refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
739				refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
740			}
741			if (pipe_mall_type == SUBVP_NONE) {
742				non_subvp_pipes++;
743				vblank_psr_capable = (vblank_psr_capable || dcn32_is_psr_capable(pipe));
744				if (pipe->stream->ignore_msa_timing_param &&
745						(pipe->stream->allow_freesync || pipe->stream->vrr_active_variable || pipe->stream->vrr_active_fixed)) {
746					drr_pipe_found = true;
747				}
748			}
749		}
750	}
751
752	if (subvp_count == 1 && non_subvp_pipes == 1 && !drr_pipe_found && !vblank_psr_capable &&
753		((uint32_t)refresh_rate < 120) && !subvp_disallow &&
754		vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_vblank_w_mall_sub_vp)
755		result = true;
756
757	return result;
758}
759
760void dcn32_update_dml_pipes_odm_policy_based_on_context(struct dc *dc, struct dc_state *context,
761		display_e2e_pipe_params_st *pipes)
762{
763	int i, pipe_cnt;
764	struct resource_context *res_ctx = &context->res_ctx;
765	struct pipe_ctx *pipe = NULL;
766
767	for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
768		int odm_slice_count = 0;
769
770		if (!res_ctx->pipe_ctx[i].stream)
771			continue;
772		pipe = &res_ctx->pipe_ctx[i];
773		odm_slice_count = resource_get_odm_slice_count(pipe);
774
775		if (odm_slice_count == 1)
776			pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
777		else if (odm_slice_count == 2)
778			pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
779		else if (odm_slice_count == 4)
780			pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_4to1;
781
782		pipe_cnt++;
783	}
784}
785
786void dcn32_override_min_req_dcfclk(struct dc *dc, struct dc_state *context)
787{
788	if (dcn32_subvp_in_use(dc, context) && context->bw_ctx.bw.dcn.clk.dcfclk_khz <= MIN_SUBVP_DCFCLK_KHZ)
789		context->bw_ctx.bw.dcn.clk.dcfclk_khz = MIN_SUBVP_DCFCLK_KHZ;
790}
v6.2
  1/*
  2 * Copyright 2022 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: AMD
 23 *
 24 */
 25
 26// header file of functions being implemented
 27#include "dcn32_resource.h"
 28#include "dcn20/dcn20_resource.h"
 29#include "dml/dcn32/display_mode_vba_util_32.h"
 
 
 30
 31static bool is_dual_plane(enum surface_pixel_format format)
 32{
 33	return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
 34}
 35
 36/**
 37 * ********************************************************************************************
 38 * dcn32_helper_calculate_num_ways_for_subvp: Calculate number of ways needed for SubVP
 39 *
 40 * This function first checks the bytes required per pixel on the SubVP pipe, then calculates
 41 * the total number of pixels required in the SubVP MALL region. These are used to calculate
 42 * the number of cache lines used (then number of ways required) for SubVP MCLK switching.
 43 *
 44 * @param [in] dc: current dc state
 45 * @param [in] context: new dc state
 46 *
 47 * @return: number of ways required for SubVP
 48 *
 49 * ********************************************************************************************
 50 */
 51uint32_t dcn32_helper_calculate_num_ways_for_subvp(struct dc *dc, struct dc_state *context)
 52{
 53	uint32_t num_ways = 0;
 54	uint32_t bytes_per_pixel = 0;
 55	uint32_t cache_lines_used = 0;
 56	uint32_t lines_per_way = 0;
 57	uint32_t total_cache_lines = 0;
 58	uint32_t bytes_in_mall = 0;
 59	uint32_t num_mblks = 0;
 60	uint32_t cache_lines_per_plane = 0;
 61	uint32_t i = 0, j = 0;
 62	uint16_t mblk_width = 0;
 63	uint16_t mblk_height = 0;
 64	uint32_t full_vp_width_blk_aligned = 0;
 65	uint32_t full_vp_height_blk_aligned = 0;
 66	uint32_t mall_alloc_width_blk_aligned = 0;
 67	uint32_t mall_alloc_height_blk_aligned = 0;
 68	uint16_t full_vp_height = 0;
 69	bool subvp_in_use = false;
 70
 71	for (i = 0; i < dc->res_pool->pipe_count; i++) {
 72		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 73
 74		/* Find the phantom pipes.
 75		 * - For pipe split case we need to loop through the bottom and next ODM
 76		 *   pipes or only half the viewport size is counted
 77		 */
 78		if (pipe->stream && pipe->plane_state &&
 79				pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
 80			struct pipe_ctx *main_pipe = NULL;
 81
 82			subvp_in_use = true;
 83			/* Get full viewport height from main pipe (required for MBLK calculation) */
 84			for (j = 0; j < dc->res_pool->pipe_count; j++) {
 85				main_pipe = &context->res_ctx.pipe_ctx[j];
 86				if (main_pipe->stream == pipe->stream->mall_stream_config.paired_stream) {
 87					full_vp_height = main_pipe->plane_res.scl_data.viewport.height;
 88					break;
 89				}
 90			}
 91
 92			bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4;
 93			mblk_width = DCN3_2_MBLK_WIDTH;
 94			mblk_height = bytes_per_pixel == 4 ? DCN3_2_MBLK_HEIGHT_4BPE : DCN3_2_MBLK_HEIGHT_8BPE;
 95
 96			/* full_vp_width_blk_aligned = FLOOR(vp_x_start + full_vp_width + blk_width - 1, blk_width) -
 97			 * FLOOR(vp_x_start, blk_width)
 98			 */
 99			full_vp_width_blk_aligned = ((pipe->plane_res.scl_data.viewport.x +
100					pipe->plane_res.scl_data.viewport.width + mblk_width - 1) / mblk_width * mblk_width) -
101					(pipe->plane_res.scl_data.viewport.x / mblk_width * mblk_width);
102
103			/* full_vp_height_blk_aligned = FLOOR(vp_y_start + full_vp_height + blk_height - 1, blk_height) -
104			 * FLOOR(vp_y_start, blk_height)
105			 */
106			full_vp_height_blk_aligned = ((pipe->plane_res.scl_data.viewport.y +
107					full_vp_height + mblk_height - 1) / mblk_height * mblk_height) -
108					(pipe->plane_res.scl_data.viewport.y / mblk_height * mblk_height);
109
110			/* mall_alloc_width_blk_aligned_l/c = full_vp_width_blk_aligned_l/c */
111			mall_alloc_width_blk_aligned = full_vp_width_blk_aligned;
112
113			/* mall_alloc_height_blk_aligned_l/c = CEILING(sub_vp_height_l/c - 1, blk_height_l/c) + blk_height_l/c */
114			mall_alloc_height_blk_aligned = (pipe->plane_res.scl_data.viewport.height - 1 + mblk_height - 1) /
115					mblk_height * mblk_height + mblk_height;
116
117			/* full_mblk_width_ub_l/c = mall_alloc_width_blk_aligned_l/c;
118			 * full_mblk_height_ub_l/c = mall_alloc_height_blk_aligned_l/c;
119			 * num_mblk_l/c = (full_mblk_width_ub_l/c / mblk_width_l/c) * (full_mblk_height_ub_l/c / mblk_height_l/c);
120			 * (Should be divisible, but round up if not)
121			 */
122			num_mblks = ((mall_alloc_width_blk_aligned + mblk_width - 1) / mblk_width) *
123					((mall_alloc_height_blk_aligned + mblk_height - 1) / mblk_height);
124
125			/*For DCC:
126			 * meta_num_mblk = CEILING(meta_pitch*full_vp_height*Bpe/256/mblk_bytes, 1)
127			 */
128			if (pipe->plane_state->dcc.enable)
129				num_mblks += (pipe->plane_state->dcc.meta_pitch * pipe->plane_res.scl_data.viewport.height * bytes_per_pixel +
130								(256 * DCN3_2_MALL_MBLK_SIZE_BYTES) - 1) / (256 * DCN3_2_MALL_MBLK_SIZE_BYTES);
131
132			bytes_in_mall = num_mblks * DCN3_2_MALL_MBLK_SIZE_BYTES;
133			// cache lines used is total bytes / cache_line size. Add +2 for worst case alignment
134			// (MALL is 64-byte aligned)
135			cache_lines_per_plane = bytes_in_mall / dc->caps.cache_line_size + 2;
136
137			cache_lines_used += cache_lines_per_plane;
138		}
139	}
140
141	total_cache_lines = dc->caps.max_cab_allocation_bytes / dc->caps.cache_line_size;
142	lines_per_way = total_cache_lines / dc->caps.cache_num_ways;
143	num_ways = cache_lines_used / lines_per_way;
144	if (cache_lines_used % lines_per_way > 0)
145		num_ways++;
146
147	if (subvp_in_use && dc->debug.force_subvp_num_ways > 0)
148		num_ways = dc->debug.force_subvp_num_ways;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
149
150	return num_ways;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151}
152
153void dcn32_merge_pipes_for_subvp(struct dc *dc,
154		struct dc_state *context)
155{
156	uint32_t i;
157
158	/* merge pipes if necessary */
159	for (i = 0; i < dc->res_pool->pipe_count; i++) {
160		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
161
162		// For now merge all pipes for SubVP since pipe split case isn't supported yet
163
164		/* if ODM merge we ignore mpc tree, mpo pipes will have their own flags */
165		if (pipe->prev_odm_pipe) {
166			/*split off odm pipe*/
167			pipe->prev_odm_pipe->next_odm_pipe = pipe->next_odm_pipe;
168			if (pipe->next_odm_pipe)
169				pipe->next_odm_pipe->prev_odm_pipe = pipe->prev_odm_pipe;
170
171			pipe->bottom_pipe = NULL;
172			pipe->next_odm_pipe = NULL;
173			pipe->plane_state = NULL;
174			pipe->stream = NULL;
175			pipe->top_pipe = NULL;
176			pipe->prev_odm_pipe = NULL;
177			if (pipe->stream_res.dsc)
178				dcn20_release_dsc(&context->res_ctx, dc->res_pool, &pipe->stream_res.dsc);
179			memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
180			memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
181		} else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {
182			struct pipe_ctx *top_pipe = pipe->top_pipe;
183			struct pipe_ctx *bottom_pipe = pipe->bottom_pipe;
184
185			top_pipe->bottom_pipe = bottom_pipe;
186			if (bottom_pipe)
187				bottom_pipe->top_pipe = top_pipe;
188
189			pipe->top_pipe = NULL;
190			pipe->bottom_pipe = NULL;
191			pipe->plane_state = NULL;
192			pipe->stream = NULL;
193			memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
194			memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
195		}
196	}
197}
198
199bool dcn32_all_pipes_have_stream_and_plane(struct dc *dc,
200		struct dc_state *context)
201{
202	uint32_t i;
203
204	for (i = 0; i < dc->res_pool->pipe_count; i++) {
205		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
206
207		if (!pipe->stream)
208			continue;
209
210		if (!pipe->plane_state)
211			return false;
212	}
213	return true;
214}
215
216bool dcn32_subvp_in_use(struct dc *dc,
217		struct dc_state *context)
218{
219	uint32_t i;
220
221	for (i = 0; i < dc->res_pool->pipe_count; i++) {
222		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
223
224		if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE)
225			return true;
226	}
227	return false;
228}
229
230bool dcn32_mpo_in_use(struct dc_state *context)
231{
232	uint32_t i;
233
234	for (i = 0; i < context->stream_count; i++) {
235		if (context->stream_status[i].plane_count > 1)
236			return true;
237	}
238	return false;
239}
240
241
242bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context)
243{
244	uint32_t i;
245
246	for (i = 0; i < dc->res_pool->pipe_count; i++) {
247		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
248
249		if (!pipe->stream)
250			continue;
251
252		if (pipe->plane_state && pipe->plane_state->rotation != ROTATION_ANGLE_0)
253			return true;
254	}
255	return false;
256}
257
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
258/**
259 * *******************************************************************************************
260 * dcn32_determine_det_override: Determine DET allocation for each pipe
261 *
262 * This function determines how much DET to allocate for each pipe. The total number of
263 * DET segments will be split equally among each of the streams, and after that the DET
264 * segments per stream will be split equally among the planes for the given stream.
265 *
266 * If there is a plane that's driven by more than 1 pipe (i.e. pipe split), then the
267 * number of DET for that given plane will be split among the pipes driving that plane.
268 *
269 *
270 * High level algorithm:
271 * 1. Split total DET among number of streams
272 * 2. For each stream, split DET among the planes
273 * 3. For each plane, check if there is a pipe split. If yes, split the DET allocation
274 *    among those pipes.
275 * 4. Assign the DET override to the DML pipes.
276 *
277 * @param [in]: dc: Current DC state
278 * @param [in]: context: New DC state to be programmed
279 * @param [in]: pipes: Array of DML pipes
280 *
281 * @return: void
282 *
283 * *******************************************************************************************
284 */
285void dcn32_determine_det_override(struct dc *dc,
286		struct dc_state *context,
287		display_e2e_pipe_params_st *pipes)
288{
289	uint32_t i, j, k;
290	uint8_t pipe_plane_count, stream_segments, plane_segments, pipe_segments[MAX_PIPES] = {0};
291	uint8_t pipe_counted[MAX_PIPES] = {0};
292	uint8_t pipe_cnt = 0;
293	struct dc_plane_state *current_plane = NULL;
294	uint8_t stream_count = 0;
295
296	for (i = 0; i < context->stream_count; i++) {
297		/* Don't count SubVP streams for DET allocation */
298		if (context->streams[i]->mall_stream_config.type != SUBVP_PHANTOM) {
299			stream_count++;
300		}
301	}
302
303	if (stream_count > 0) {
304		stream_segments = 18 / stream_count;
305		for (i = 0; i < context->stream_count; i++) {
306			if (context->streams[i]->mall_stream_config.type == SUBVP_PHANTOM)
307				continue;
 
308			if (context->stream_status[i].plane_count > 0)
309				plane_segments = stream_segments / context->stream_status[i].plane_count;
310			else
311				plane_segments = stream_segments;
312			for (j = 0; j < dc->res_pool->pipe_count; j++) {
313				pipe_plane_count = 0;
314				if (context->res_ctx.pipe_ctx[j].stream == context->streams[i] &&
315						pipe_counted[j] != 1) {
316					/* Note: pipe_plane_count indicates the number of pipes to be used for a
317					 * given plane. e.g. pipe_plane_count = 1 means single pipe (i.e. not split),
318					 * pipe_plane_count = 2 means 2:1 split, etc.
319					 */
320					pipe_plane_count++;
321					pipe_counted[j] = 1;
322					current_plane = context->res_ctx.pipe_ctx[j].plane_state;
323					for (k = 0; k < dc->res_pool->pipe_count; k++) {
324						if (k != j && context->res_ctx.pipe_ctx[k].stream == context->streams[i] &&
325								context->res_ctx.pipe_ctx[k].plane_state == current_plane) {
326							pipe_plane_count++;
327							pipe_counted[k] = 1;
328						}
329					}
330
331					pipe_segments[j] = plane_segments / pipe_plane_count;
332					for (k = 0; k < dc->res_pool->pipe_count; k++) {
333						if (k != j && context->res_ctx.pipe_ctx[k].stream == context->streams[i] &&
334								context->res_ctx.pipe_ctx[k].plane_state == current_plane) {
335							pipe_segments[k] = plane_segments / pipe_plane_count;
336						}
337					}
338				}
339			}
340		}
341
 
342		for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
343			if (!context->res_ctx.pipe_ctx[i].stream)
344				continue;
345			pipes[pipe_cnt].pipe.src.det_size_override = pipe_segments[i] * DCN3_2_DET_SEG_SIZE;
346			pipe_cnt++;
347		}
348	} else {
349		for (i = 0; i < dc->res_pool->pipe_count; i++)
350			pipes[i].pipe.src.det_size_override = 4 * DCN3_2_DET_SEG_SIZE; //DCN3_2_DEFAULT_DET_SIZE
351	}
352}
353
354void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context,
355	display_e2e_pipe_params_st *pipes)
356{
357	int i, pipe_cnt;
358	struct resource_context *res_ctx = &context->res_ctx;
359	struct pipe_ctx *pipe;
 
360
361	for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
362
363		if (!res_ctx->pipe_ctx[i].stream)
364			continue;
365
366		pipe = &res_ctx->pipe_ctx[i];
367		pipe_cnt++;
368	}
369
370	/* For DET allocation, we don't want to use DML policy (not optimal for utilizing all
371	 * the DET available for each pipe). Use the DET override input to maintain our driver
372	 * policy.
373	 */
374	if (pipe_cnt == 1) {
375		pipes[0].pipe.src.det_size_override = DCN3_2_MAX_DET_SIZE;
376		if (pipe->plane_state && !dc->debug.disable_z9_mpc && pipe->plane_state->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
377			if (!is_dual_plane(pipe->plane_state->format)) {
378				pipes[0].pipe.src.det_size_override = DCN3_2_DEFAULT_DET_SIZE;
379				pipes[0].pipe.src.unbounded_req_mode = true;
380				if (pipe->plane_state->src_rect.width >= 5120 &&
381					pipe->plane_state->src_rect.height >= 2880)
382					pipes[0].pipe.src.det_size_override = 320; // 5K or higher
383			}
384		}
385	} else
386		dcn32_determine_det_override(dc, context, pipes);
387}
388
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
389/**
390 * *******************************************************************************************
391 * dcn32_save_mall_state: Save MALL (SubVP) state for fast validation cases
392 *
393 * This function saves the MALL (SubVP) case for fast validation cases. For fast validation,
394 * there are situations where a shallow copy of the dc->current_state is created for the
395 * validation. In this case we want to save and restore the mall config because we always
396 * teardown subvp at the beginning of validation (and don't attempt to add it back if it's
397 * fast validation). If we don't restore the subvp config in cases of fast validation +
398 * shallow copy of the dc->current_state, the dc->current_state will have a partially
399 * removed subvp state when we did not intend to remove it.
400 *
401 * NOTE: This function ONLY works if the streams are not moved to a different pipe in the
402 *       validation. We don't expect this to happen in fast_validation=1 cases.
403 *
404 * @param [in]: dc: Current DC state
405 * @param [in]: context: New DC state to be programmed
406 * @param [out]: temp_config: struct used to cache the existing MALL state
407 *
408 * @return: void
 
 
 
409 *
410 * *******************************************************************************************
411 */
412void dcn32_save_mall_state(struct dc *dc,
413		struct dc_state *context,
414		struct mall_temp_config *temp_config)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
415{
 
416	uint32_t i;
 
 
 
 
 
 
417
418	for (i = 0; i < dc->res_pool->pipe_count; i++) {
419		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 
420
421		if (pipe->stream)
422			temp_config->mall_stream_config[i] = pipe->stream->mall_stream_config;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
423
424		if (pipe->plane_state)
425			temp_config->is_phantom_plane[i] = pipe->plane_state->is_phantom;
426	}
427}
428
429/**
430 * *******************************************************************************************
431 * dcn32_restore_mall_state: Restore MALL (SubVP) state for fast validation cases
432 *
433 * Restore the MALL state based on the previously saved state from dcn32_save_mall_state
434 *
435 * @param [in]: dc: Current DC state
436 * @param [in/out]: context: New DC state to be programmed, restore MALL state into here
437 * @param [in]: temp_config: struct that has the cached MALL state
438 *
439 * @return: void
 
 
 
 
 
 
 
 
 
440 *
441 * *******************************************************************************************
442 */
443void dcn32_restore_mall_state(struct dc *dc,
444		struct dc_state *context,
445		struct mall_temp_config *temp_config)
446{
 
447	uint32_t i;
 
 
 
 
 
 
 
448
449	for (i = 0; i < dc->res_pool->pipe_count; i++) {
450		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 
451
452		if (pipe->stream)
453			pipe->stream->mall_stream_config = temp_config->mall_stream_config[i];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
454
455		if (pipe->plane_state)
456			pipe->plane_state->is_phantom = temp_config->is_phantom_plane[i];
 
 
 
 
 
 
457	}
 
 
 
 
 
 
458}