Linux Audio

Check our new training course

Loading...
v6.9.4
   1/*
   2 * Copyright 2016 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: AMD
  23 *
  24 */
  25
  26
  27#include "dcn30/dcn30_hubbub.h"
  28#include "dcn31_hubbub.h"
  29#include "dm_services.h"
  30#include "reg_helper.h"
  31
  32
  33#define CTX \
  34	hubbub2->base.ctx
  35#define DC_LOGGER \
  36	hubbub2->base.ctx->logger
  37#define REG(reg)\
  38	hubbub2->regs->reg
  39
  40#undef FN
  41#define FN(reg_name, field_name) \
  42	hubbub2->shifts->field_name, hubbub2->masks->field_name
  43
  44#ifdef NUM_VMID
  45#undef NUM_VMID
  46#endif
  47#define NUM_VMID 16
  48
  49#define DCN31_CRB_SEGMENT_SIZE_KB 64
  50
  51static void dcn31_init_crb(struct hubbub *hubbub)
  52{
  53	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
  54
  55	REG_GET(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT,
  56		&hubbub2->det0_size);
  57
  58	REG_GET(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT,
  59		&hubbub2->det1_size);
  60
  61	REG_GET(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT,
  62		&hubbub2->det2_size);
  63
  64	REG_GET(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT,
  65		&hubbub2->det3_size);
  66
  67	REG_GET(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE_CURRENT,
  68		&hubbub2->compbuf_size_segments);
  69
  70	REG_SET_2(COMPBUF_RESERVED_SPACE, 0,
  71			COMPBUF_RESERVED_SPACE_64B, hubbub2->pixel_chunk_size / 32,
  72			COMPBUF_RESERVED_SPACE_ZS, hubbub2->pixel_chunk_size / 128);
  73	REG_UPDATE(DCHUBBUB_DEBUG_CTRL_0, DET_DEPTH, 0x17F);
  74}
  75
  76static void dcn31_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigned int det_buffer_size_in_kbyte)
  77{
  78	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
  79
  80	unsigned int det_size_segments = (det_buffer_size_in_kbyte + DCN31_CRB_SEGMENT_SIZE_KB - 1) / DCN31_CRB_SEGMENT_SIZE_KB;
  81
  82	switch (hubp_inst) {
  83	case 0:
  84		REG_UPDATE(DCHUBBUB_DET0_CTRL,
  85					DET0_SIZE, det_size_segments);
  86		hubbub2->det0_size = det_size_segments;
  87		break;
  88	case 1:
  89		REG_UPDATE(DCHUBBUB_DET1_CTRL,
  90					DET1_SIZE, det_size_segments);
  91		hubbub2->det1_size = det_size_segments;
  92		break;
  93	case 2:
  94		REG_UPDATE(DCHUBBUB_DET2_CTRL,
  95					DET2_SIZE, det_size_segments);
  96		hubbub2->det2_size = det_size_segments;
  97		break;
  98	case 3:
  99		REG_UPDATE(DCHUBBUB_DET3_CTRL,
 100					DET3_SIZE, det_size_segments);
 101		hubbub2->det3_size = det_size_segments;
 102		break;
 103	default:
 104		break;
 105	}
 106	DC_LOG_DEBUG("Set DET%d to %d segments\n", hubp_inst, det_size_segments);
 107	/* Should never be hit, if it is we have an erroneous hw config*/
 108	ASSERT(hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size
 109			+ hubbub2->det3_size + hubbub2->compbuf_size_segments <= hubbub2->crb_size_segs);
 110}
 111
 112static void dcn31_wait_for_det_apply(struct hubbub *hubbub, int hubp_inst)
 113{
 114	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
 115
 116	switch (hubp_inst) {
 117	case 0:
 118		REG_WAIT(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, hubbub2->det0_size, 1000, 30);
 119		break;
 120	case 1:
 121		REG_WAIT(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, hubbub2->det1_size, 1000, 30);
 122		break;
 123	case 2:
 124		REG_WAIT(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, hubbub2->det2_size, 1000, 30);
 125		break;
 126	case 3:
 127		REG_WAIT(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, hubbub2->det3_size, 1000, 30);
 128		break;
 129	default:
 130		break;
 131	}
 132}
 133
 134static void dcn31_program_compbuf_size(struct hubbub *hubbub, unsigned int compbuf_size_kb, bool safe_to_increase)
 135{
 136	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
 137	unsigned int compbuf_size_segments = (compbuf_size_kb + DCN31_CRB_SEGMENT_SIZE_KB - 1) / DCN31_CRB_SEGMENT_SIZE_KB;
 138
 139	if (safe_to_increase || compbuf_size_segments <= hubbub2->compbuf_size_segments) {
 140		if (compbuf_size_segments > hubbub2->compbuf_size_segments) {
 141			REG_WAIT(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, hubbub2->det0_size, 1, 100);
 142			REG_WAIT(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, hubbub2->det1_size, 1, 100);
 143			REG_WAIT(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, hubbub2->det2_size, 1, 100);
 144			REG_WAIT(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, hubbub2->det3_size, 1, 100);
 145		}
 146		/* Should never be hit, if it is we have an erroneous hw config*/
 147		ASSERT(hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size
 148				+ hubbub2->det3_size + compbuf_size_segments <= hubbub2->crb_size_segs);
 149		REG_UPDATE(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE, compbuf_size_segments);
 
 150		hubbub2->compbuf_size_segments = compbuf_size_segments;
 151		ASSERT(REG_GET(DCHUBBUB_COMPBUF_CTRL, CONFIG_ERROR, &compbuf_size_segments) && !compbuf_size_segments);
 152	}
 153}
 154
 155static uint32_t convert_and_clamp(
 156	uint32_t wm_ns,
 157	uint32_t refclk_mhz,
 158	uint32_t clamp_value)
 159{
 160	uint32_t ret_val = 0;
 161	ret_val = wm_ns * refclk_mhz;
 162	ret_val /= 1000;
 163
 164	if (ret_val > clamp_value) {
 165		/* clamping WMs is abnormal, unexpected and may lead to underflow*/
 166		ASSERT(0);
 167		ret_val = clamp_value;
 168	}
 169
 170	return ret_val;
 171}
 172
 173static bool hubbub31_program_urgent_watermarks(
 174		struct hubbub *hubbub,
 175		struct dcn_watermark_set *watermarks,
 176		unsigned int refclk_mhz,
 177		bool safe_to_lower)
 178{
 179	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
 180	uint32_t prog_wm_value;
 181	bool wm_pending = false;
 182
 183	/* Repeat for water mark set A, B, C and D. */
 184	/* clock state A */
 185	if (safe_to_lower || watermarks->a.urgent_ns > hubbub2->watermarks.a.urgent_ns) {
 186		hubbub2->watermarks.a.urgent_ns = watermarks->a.urgent_ns;
 187		prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
 188				refclk_mhz, 0x3fff);
 189		REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
 190				DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
 191
 192		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
 193			"HW register value = 0x%x\n",
 194			watermarks->a.urgent_ns, prog_wm_value);
 195	} else if (watermarks->a.urgent_ns < hubbub2->watermarks.a.urgent_ns)
 196		wm_pending = true;
 197
 198	/* determine the transfer time for a quantity of data for a particular requestor.*/
 199	if (safe_to_lower || watermarks->a.frac_urg_bw_flip
 200			> hubbub2->watermarks.a.frac_urg_bw_flip) {
 201		hubbub2->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
 202
 203		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, 0,
 204				DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, watermarks->a.frac_urg_bw_flip);
 205	} else if (watermarks->a.frac_urg_bw_flip
 206			< hubbub2->watermarks.a.frac_urg_bw_flip)
 207		wm_pending = true;
 208
 209	if (safe_to_lower || watermarks->a.frac_urg_bw_nom
 210			> hubbub2->watermarks.a.frac_urg_bw_nom) {
 211		hubbub2->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
 212
 213		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0,
 214				DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->a.frac_urg_bw_nom);
 215	} else if (watermarks->a.frac_urg_bw_nom
 216			< hubbub2->watermarks.a.frac_urg_bw_nom)
 217		wm_pending = true;
 218
 219	if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub2->watermarks.a.urgent_latency_ns) {
 220		hubbub2->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns;
 221		prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns,
 222				refclk_mhz, 0x3fff);
 223		REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0,
 224				DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value);
 225	} else if (watermarks->a.urgent_latency_ns < hubbub2->watermarks.a.urgent_latency_ns)
 226		wm_pending = true;
 227
 228	/* clock state B */
 229	if (safe_to_lower || watermarks->b.urgent_ns > hubbub2->watermarks.b.urgent_ns) {
 230		hubbub2->watermarks.b.urgent_ns = watermarks->b.urgent_ns;
 231		prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns,
 232				refclk_mhz, 0x3fff);
 233		REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0,
 234				DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
 235
 236		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
 237			"HW register value = 0x%x\n",
 238			watermarks->b.urgent_ns, prog_wm_value);
 239	} else if (watermarks->b.urgent_ns < hubbub2->watermarks.b.urgent_ns)
 240		wm_pending = true;
 241
 242	/* determine the transfer time for a quantity of data for a particular requestor.*/
 243	if (safe_to_lower || watermarks->b.frac_urg_bw_flip
 244			> hubbub2->watermarks.b.frac_urg_bw_flip) {
 245		hubbub2->watermarks.b.frac_urg_bw_flip = watermarks->b.frac_urg_bw_flip;
 246
 247		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, 0,
 248				DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, watermarks->b.frac_urg_bw_flip);
 249	} else if (watermarks->b.frac_urg_bw_flip
 250			< hubbub2->watermarks.b.frac_urg_bw_flip)
 251		wm_pending = true;
 252
 253	if (safe_to_lower || watermarks->b.frac_urg_bw_nom
 254			> hubbub2->watermarks.b.frac_urg_bw_nom) {
 255		hubbub2->watermarks.b.frac_urg_bw_nom = watermarks->b.frac_urg_bw_nom;
 256
 257		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, 0,
 258				DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->b.frac_urg_bw_nom);
 259	} else if (watermarks->b.frac_urg_bw_nom
 260			< hubbub2->watermarks.b.frac_urg_bw_nom)
 261		wm_pending = true;
 262
 263	if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub2->watermarks.b.urgent_latency_ns) {
 264		hubbub2->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns;
 265		prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns,
 266				refclk_mhz, 0x3fff);
 267		REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0,
 268				DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value);
 269	} else if (watermarks->b.urgent_latency_ns < hubbub2->watermarks.b.urgent_latency_ns)
 270		wm_pending = true;
 271
 272	/* clock state C */
 273	if (safe_to_lower || watermarks->c.urgent_ns > hubbub2->watermarks.c.urgent_ns) {
 274		hubbub2->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
 275		prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns,
 276				refclk_mhz, 0x3fff);
 277		REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0,
 278				DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
 279
 280		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
 281			"HW register value = 0x%x\n",
 282			watermarks->c.urgent_ns, prog_wm_value);
 283	} else if (watermarks->c.urgent_ns < hubbub2->watermarks.c.urgent_ns)
 284		wm_pending = true;
 285
 286	/* determine the transfer time for a quantity of data for a particular requestor.*/
 287	if (safe_to_lower || watermarks->c.frac_urg_bw_flip
 288			> hubbub2->watermarks.c.frac_urg_bw_flip) {
 289		hubbub2->watermarks.c.frac_urg_bw_flip = watermarks->c.frac_urg_bw_flip;
 290
 291		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, 0,
 292				DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, watermarks->c.frac_urg_bw_flip);
 293	} else if (watermarks->c.frac_urg_bw_flip
 294			< hubbub2->watermarks.c.frac_urg_bw_flip)
 295		wm_pending = true;
 296
 297	if (safe_to_lower || watermarks->c.frac_urg_bw_nom
 298			> hubbub2->watermarks.c.frac_urg_bw_nom) {
 299		hubbub2->watermarks.c.frac_urg_bw_nom = watermarks->c.frac_urg_bw_nom;
 300
 301		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, 0,
 302				DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, watermarks->c.frac_urg_bw_nom);
 303	} else if (watermarks->c.frac_urg_bw_nom
 304			< hubbub2->watermarks.c.frac_urg_bw_nom)
 305		wm_pending = true;
 306
 307	if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub2->watermarks.c.urgent_latency_ns) {
 308		hubbub2->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns;
 309		prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns,
 310				refclk_mhz, 0x3fff);
 311		REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0,
 312				DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value);
 313	} else if (watermarks->c.urgent_latency_ns < hubbub2->watermarks.c.urgent_latency_ns)
 314		wm_pending = true;
 315
 316	/* clock state D */
 317	if (safe_to_lower || watermarks->d.urgent_ns > hubbub2->watermarks.d.urgent_ns) {
 318		hubbub2->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
 319		prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns,
 320				refclk_mhz, 0x3fff);
 321		REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0,
 322				DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
 323
 324		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
 325			"HW register value = 0x%x\n",
 326			watermarks->d.urgent_ns, prog_wm_value);
 327	} else if (watermarks->d.urgent_ns < hubbub2->watermarks.d.urgent_ns)
 328		wm_pending = true;
 329
 330	/* determine the transfer time for a quantity of data for a particular requestor.*/
 331	if (safe_to_lower || watermarks->d.frac_urg_bw_flip
 332			> hubbub2->watermarks.d.frac_urg_bw_flip) {
 333		hubbub2->watermarks.d.frac_urg_bw_flip = watermarks->d.frac_urg_bw_flip;
 334
 335		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, 0,
 336				DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, watermarks->d.frac_urg_bw_flip);
 337	} else if (watermarks->d.frac_urg_bw_flip
 338			< hubbub2->watermarks.d.frac_urg_bw_flip)
 339		wm_pending = true;
 340
 341	if (safe_to_lower || watermarks->d.frac_urg_bw_nom
 342			> hubbub2->watermarks.d.frac_urg_bw_nom) {
 343		hubbub2->watermarks.d.frac_urg_bw_nom = watermarks->d.frac_urg_bw_nom;
 344
 345		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, 0,
 346				DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, watermarks->d.frac_urg_bw_nom);
 347	} else if (watermarks->d.frac_urg_bw_nom
 348			< hubbub2->watermarks.d.frac_urg_bw_nom)
 349		wm_pending = true;
 350
 351	if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub2->watermarks.d.urgent_latency_ns) {
 352		hubbub2->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns;
 353		prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns,
 354				refclk_mhz, 0x3fff);
 355		REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0,
 356				DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value);
 357	} else if (watermarks->d.urgent_latency_ns < hubbub2->watermarks.d.urgent_latency_ns)
 358		wm_pending = true;
 359
 360	return wm_pending;
 361}
 362
 363static bool hubbub31_program_stutter_watermarks(
 364		struct hubbub *hubbub,
 365		struct dcn_watermark_set *watermarks,
 366		unsigned int refclk_mhz,
 367		bool safe_to_lower)
 368{
 369	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
 370	uint32_t prog_wm_value;
 371	bool wm_pending = false;
 372
 373	/* clock state A */
 374	if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
 375			> hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) {
 376		hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
 377				watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
 378		prog_wm_value = convert_and_clamp(
 379				watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
 380				refclk_mhz, 0xfffff);
 381		REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
 382				DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
 383		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
 384			"HW register value = 0x%x\n",
 385			watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
 386	} else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
 387			< hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns)
 388		wm_pending = true;
 389
 390	if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
 391			> hubbub2->watermarks.a.cstate_pstate.cstate_exit_ns) {
 392		hubbub2->watermarks.a.cstate_pstate.cstate_exit_ns =
 393				watermarks->a.cstate_pstate.cstate_exit_ns;
 394		prog_wm_value = convert_and_clamp(
 395				watermarks->a.cstate_pstate.cstate_exit_ns,
 396				refclk_mhz, 0xfffff);
 397		REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
 398				DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
 399		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
 400			"HW register value = 0x%x\n",
 401			watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
 402	} else if (watermarks->a.cstate_pstate.cstate_exit_ns
 403			< hubbub2->watermarks.a.cstate_pstate.cstate_exit_ns)
 404		wm_pending = true;
 405
 406	if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns
 407			> hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns) {
 408		hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns =
 409				watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns;
 410		prog_wm_value = convert_and_clamp(
 411				watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns,
 412				refclk_mhz, 0xfffff);
 413		REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, 0,
 414				DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, prog_wm_value);
 415		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_A calculated =%d\n"
 416			"HW register value = 0x%x\n",
 417			watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns, prog_wm_value);
 418	} else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns
 419			< hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns)
 420		wm_pending = true;
 421
 422	if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_z8_ns
 423			> hubbub2->watermarks.a.cstate_pstate.cstate_exit_z8_ns) {
 424		hubbub2->watermarks.a.cstate_pstate.cstate_exit_z8_ns =
 425				watermarks->a.cstate_pstate.cstate_exit_z8_ns;
 426		prog_wm_value = convert_and_clamp(
 427				watermarks->a.cstate_pstate.cstate_exit_z8_ns,
 428				refclk_mhz, 0xfffff);
 429		REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, 0,
 430				DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, prog_wm_value);
 431		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_A calculated =%d\n"
 432			"HW register value = 0x%x\n",
 433			watermarks->a.cstate_pstate.cstate_exit_z8_ns, prog_wm_value);
 434	} else if (watermarks->a.cstate_pstate.cstate_exit_z8_ns
 435			< hubbub2->watermarks.a.cstate_pstate.cstate_exit_z8_ns)
 436		wm_pending = true;
 437
 438	/* clock state B */
 439	if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
 440			> hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) {
 441		hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
 442				watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
 443		prog_wm_value = convert_and_clamp(
 444				watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
 445				refclk_mhz, 0xfffff);
 446		REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
 447				DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
 448		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
 449			"HW register value = 0x%x\n",
 450			watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
 451	} else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
 452			< hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns)
 453		wm_pending = true;
 454
 455	if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
 456			> hubbub2->watermarks.b.cstate_pstate.cstate_exit_ns) {
 457		hubbub2->watermarks.b.cstate_pstate.cstate_exit_ns =
 458				watermarks->b.cstate_pstate.cstate_exit_ns;
 459		prog_wm_value = convert_and_clamp(
 460				watermarks->b.cstate_pstate.cstate_exit_ns,
 461				refclk_mhz, 0xfffff);
 462		REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
 463				DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
 464		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
 465			"HW register value = 0x%x\n",
 466			watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
 467	} else if (watermarks->b.cstate_pstate.cstate_exit_ns
 468			< hubbub2->watermarks.b.cstate_pstate.cstate_exit_ns)
 469		wm_pending = true;
 470
 471	if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns
 472			> hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_z8_ns) {
 473		hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_z8_ns =
 474				watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns;
 475		prog_wm_value = convert_and_clamp(
 476				watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns,
 477				refclk_mhz, 0xfffff);
 478		REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, 0,
 479				DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, prog_wm_value);
 480		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_B calculated =%d\n"
 481			"HW register value = 0x%x\n",
 482			watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns, prog_wm_value);
 483	} else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns
 484			< hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_z8_ns)
 485		wm_pending = true;
 486
 487	if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_z8_ns
 488			> hubbub2->watermarks.b.cstate_pstate.cstate_exit_z8_ns) {
 489		hubbub2->watermarks.b.cstate_pstate.cstate_exit_z8_ns =
 490				watermarks->b.cstate_pstate.cstate_exit_z8_ns;
 491		prog_wm_value = convert_and_clamp(
 492				watermarks->b.cstate_pstate.cstate_exit_z8_ns,
 493				refclk_mhz, 0xfffff);
 494		REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, 0,
 495				DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, prog_wm_value);
 496		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_B calculated =%d\n"
 497			"HW register value = 0x%x\n",
 498			watermarks->b.cstate_pstate.cstate_exit_z8_ns, prog_wm_value);
 499	} else if (watermarks->b.cstate_pstate.cstate_exit_z8_ns
 500			< hubbub2->watermarks.b.cstate_pstate.cstate_exit_z8_ns)
 501		wm_pending = true;
 502
 503	/* clock state C */
 504	if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
 505			> hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) {
 506		hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
 507				watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
 508		prog_wm_value = convert_and_clamp(
 509				watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
 510				refclk_mhz, 0xfffff);
 511		REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0,
 512				DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
 513		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
 514			"HW register value = 0x%x\n",
 515			watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
 516	} else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
 517			< hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns)
 518		wm_pending = true;
 519
 520	if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
 521			> hubbub2->watermarks.c.cstate_pstate.cstate_exit_ns) {
 522		hubbub2->watermarks.c.cstate_pstate.cstate_exit_ns =
 523				watermarks->c.cstate_pstate.cstate_exit_ns;
 524		prog_wm_value = convert_and_clamp(
 525				watermarks->c.cstate_pstate.cstate_exit_ns,
 526				refclk_mhz, 0xfffff);
 527		REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0,
 528				DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
 529		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
 530			"HW register value = 0x%x\n",
 531			watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
 532	} else if (watermarks->c.cstate_pstate.cstate_exit_ns
 533			< hubbub2->watermarks.c.cstate_pstate.cstate_exit_ns)
 534		wm_pending = true;
 535
 536	if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns
 537			> hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_z8_ns) {
 538		hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_z8_ns =
 539				watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns;
 540		prog_wm_value = convert_and_clamp(
 541				watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns,
 542				refclk_mhz, 0xfffff);
 543		REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, 0,
 544				DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, prog_wm_value);
 545		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_C calculated =%d\n"
 546			"HW register value = 0x%x\n",
 547			watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns, prog_wm_value);
 548	} else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns
 549			< hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_z8_ns)
 550		wm_pending = true;
 551
 552	if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_z8_ns
 553			> hubbub2->watermarks.c.cstate_pstate.cstate_exit_z8_ns) {
 554		hubbub2->watermarks.c.cstate_pstate.cstate_exit_z8_ns =
 555				watermarks->c.cstate_pstate.cstate_exit_z8_ns;
 556		prog_wm_value = convert_and_clamp(
 557				watermarks->c.cstate_pstate.cstate_exit_z8_ns,
 558				refclk_mhz, 0xfffff);
 559		REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, 0,
 560				DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, prog_wm_value);
 561		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_C calculated =%d\n"
 562			"HW register value = 0x%x\n",
 563			watermarks->c.cstate_pstate.cstate_exit_z8_ns, prog_wm_value);
 564	} else if (watermarks->c.cstate_pstate.cstate_exit_z8_ns
 565			< hubbub2->watermarks.c.cstate_pstate.cstate_exit_z8_ns)
 566		wm_pending = true;
 567
 568	/* clock state D */
 569	if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
 570			> hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) {
 571		hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
 572				watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
 573		prog_wm_value = convert_and_clamp(
 574				watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
 575				refclk_mhz, 0xfffff);
 576		REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0,
 577				DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
 578		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
 579			"HW register value = 0x%x\n",
 580			watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
 581	} else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
 582			< hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns)
 583		wm_pending = true;
 584
 585	if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
 586			> hubbub2->watermarks.d.cstate_pstate.cstate_exit_ns) {
 587		hubbub2->watermarks.d.cstate_pstate.cstate_exit_ns =
 588				watermarks->d.cstate_pstate.cstate_exit_ns;
 589		prog_wm_value = convert_and_clamp(
 590				watermarks->d.cstate_pstate.cstate_exit_ns,
 591				refclk_mhz, 0xfffff);
 592		REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0,
 593				DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
 594		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
 595			"HW register value = 0x%x\n",
 596			watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
 597	} else if (watermarks->d.cstate_pstate.cstate_exit_ns
 598			< hubbub2->watermarks.d.cstate_pstate.cstate_exit_ns)
 599		wm_pending = true;
 600
 601	if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns
 602			> hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_z8_ns) {
 603		hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_z8_ns =
 604				watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns;
 605		prog_wm_value = convert_and_clamp(
 606				watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns,
 607				refclk_mhz, 0xfffff);
 608		REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, 0,
 609				DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, prog_wm_value);
 610		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_D calculated =%d\n"
 611			"HW register value = 0x%x\n",
 612			watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns, prog_wm_value);
 613	} else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns
 614			< hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_z8_ns)
 615		wm_pending = true;
 616
 617	if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_z8_ns
 618			> hubbub2->watermarks.d.cstate_pstate.cstate_exit_z8_ns) {
 619		hubbub2->watermarks.d.cstate_pstate.cstate_exit_z8_ns =
 620				watermarks->d.cstate_pstate.cstate_exit_z8_ns;
 621		prog_wm_value = convert_and_clamp(
 622				watermarks->d.cstate_pstate.cstate_exit_z8_ns,
 623				refclk_mhz, 0xfffff);
 624		REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, 0,
 625				DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, prog_wm_value);
 626		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_D calculated =%d\n"
 627			"HW register value = 0x%x\n",
 628			watermarks->d.cstate_pstate.cstate_exit_z8_ns, prog_wm_value);
 629	} else if (watermarks->d.cstate_pstate.cstate_exit_z8_ns
 630			< hubbub2->watermarks.d.cstate_pstate.cstate_exit_z8_ns)
 631		wm_pending = true;
 632
 633	return wm_pending;
 634}
 635
 636static bool hubbub31_program_pstate_watermarks(
 637		struct hubbub *hubbub,
 638		struct dcn_watermark_set *watermarks,
 639		unsigned int refclk_mhz,
 640		bool safe_to_lower)
 641{
 642	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
 643	uint32_t prog_wm_value;
 644
 645	bool wm_pending = false;
 646
 647	/* clock state A */
 648	if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
 649			> hubbub2->watermarks.a.cstate_pstate.pstate_change_ns) {
 650		hubbub2->watermarks.a.cstate_pstate.pstate_change_ns =
 651				watermarks->a.cstate_pstate.pstate_change_ns;
 652		prog_wm_value = convert_and_clamp(
 653				watermarks->a.cstate_pstate.pstate_change_ns,
 654				refclk_mhz, 0xffff);
 655		REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0,
 656				DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
 657		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
 658			"HW register value = 0x%x\n\n",
 659			watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
 660	} else if (watermarks->a.cstate_pstate.pstate_change_ns
 661			< hubbub2->watermarks.a.cstate_pstate.pstate_change_ns)
 662		wm_pending = true;
 663
 664	/* clock state B */
 665	if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
 666			> hubbub2->watermarks.b.cstate_pstate.pstate_change_ns) {
 667		hubbub2->watermarks.b.cstate_pstate.pstate_change_ns =
 668				watermarks->b.cstate_pstate.pstate_change_ns;
 669		prog_wm_value = convert_and_clamp(
 670				watermarks->b.cstate_pstate.pstate_change_ns,
 671				refclk_mhz, 0xffff);
 672		REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0,
 673				DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
 674		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
 675			"HW register value = 0x%x\n\n",
 676			watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
 677	} else if (watermarks->b.cstate_pstate.pstate_change_ns
 678			< hubbub2->watermarks.b.cstate_pstate.pstate_change_ns)
 679		wm_pending = false;
 680
 681	/* clock state C */
 682	if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
 683			> hubbub2->watermarks.c.cstate_pstate.pstate_change_ns) {
 684		hubbub2->watermarks.c.cstate_pstate.pstate_change_ns =
 685				watermarks->c.cstate_pstate.pstate_change_ns;
 686		prog_wm_value = convert_and_clamp(
 687				watermarks->c.cstate_pstate.pstate_change_ns,
 688				refclk_mhz, 0xffff);
 689		REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0,
 690				DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
 691		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
 692			"HW register value = 0x%x\n\n",
 693			watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
 694	} else if (watermarks->c.cstate_pstate.pstate_change_ns
 695			< hubbub2->watermarks.c.cstate_pstate.pstate_change_ns)
 696		wm_pending = true;
 697
 698	/* clock state D */
 699	if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns
 700			> hubbub2->watermarks.d.cstate_pstate.pstate_change_ns) {
 701		hubbub2->watermarks.d.cstate_pstate.pstate_change_ns =
 702				watermarks->d.cstate_pstate.pstate_change_ns;
 703		prog_wm_value = convert_and_clamp(
 704				watermarks->d.cstate_pstate.pstate_change_ns,
 705				refclk_mhz, 0xffff);
 706		REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0,
 707				DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
 708		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
 709			"HW register value = 0x%x\n\n",
 710			watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
 711	} else if (watermarks->d.cstate_pstate.pstate_change_ns
 712			< hubbub2->watermarks.d.cstate_pstate.pstate_change_ns)
 713		wm_pending = true;
 714
 715	return wm_pending;
 716}
 717
 718static bool hubbub31_program_watermarks(
 719		struct hubbub *hubbub,
 720		struct dcn_watermark_set *watermarks,
 721		unsigned int refclk_mhz,
 722		bool safe_to_lower)
 723{
 724	bool wm_pending = false;
 725
 726	if (hubbub31_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
 727		wm_pending = true;
 728
 729	if (hubbub31_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
 730		wm_pending = true;
 731
 732	if (hubbub31_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
 733		wm_pending = true;
 734
 735	/*
 736	 * The DCHub arbiter has a mechanism to dynamically rate limit the DCHub request stream to the fabric.
 737	 * If the memory controller is fully utilized and the DCHub requestors are
 738	 * well ahead of their amortized schedule, then it is safe to prevent the next winner
 739	 * from being committed and sent to the fabric.
 740	 * The utilization of the memory controller is approximated by ensuring that
 741	 * the number of outstanding requests is greater than a threshold specified
 742	 * by the ARB_MIN_REQ_OUTSTANDING. To determine that the DCHub requestors are well ahead of the amortized schedule,
 743	 * the slack of the next winner is compared with the ARB_SAT_LEVEL in DLG RefClk cycles.
 744	 *
 745	 * TODO: Revisit request limit after figure out right number. request limit for RM isn't decided yet, set maximum value (0x1FF)
 746	 * to turn off it for now.
 747	 */
 748	/*REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0,
 749			DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
 750	REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
 751			DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);*/
 752
 753	hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
 754	return wm_pending;
 755}
 756
 757static void hubbub3_get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height,
 758		unsigned int bytes_per_element)
 759{
 760	/* copied from DML.  might want to refactor DML to leverage from DML */
 761	/* DML : get_blk256_size */
 762	if (bytes_per_element == 1) {
 763		*blk256_width = 16;
 764		*blk256_height = 16;
 765	} else if (bytes_per_element == 2) {
 766		*blk256_width = 16;
 767		*blk256_height = 8;
 768	} else if (bytes_per_element == 4) {
 769		*blk256_width = 8;
 770		*blk256_height = 8;
 771	} else if (bytes_per_element == 8) {
 772		*blk256_width = 8;
 773		*blk256_height = 4;
 774	}
 775}
 776
 777static void hubbub31_det_request_size(
 778		unsigned int detile_buf_size,
 779		unsigned int height,
 780		unsigned int width,
 781		unsigned int bpe,
 782		bool *req128_horz_wc,
 783		bool *req128_vert_wc)
 784{
 785	unsigned int blk256_height = 0;
 786	unsigned int blk256_width = 0;
 787	unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc;
 788
 789	hubbub3_get_blk256_size(&blk256_width, &blk256_height, bpe);
 790
 791	swath_bytes_horz_wc = width * blk256_height * bpe;
 792	swath_bytes_vert_wc = height * blk256_width * bpe;
 793
 794	*req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
 795			false : /* full 256B request */
 796			true; /* half 128b request */
 797
 798	*req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ?
 799			false : /* full 256B request */
 800			true; /* half 128b request */
 801}
 802
 803static bool hubbub31_get_dcc_compression_cap(struct hubbub *hubbub,
 804		const struct dc_dcc_surface_param *input,
 805		struct dc_surface_dcc_cap *output)
 806{
 807	struct dc *dc = hubbub->ctx->dc;
 808	enum dcc_control dcc_control;
 809	unsigned int bpe;
 810	enum segment_order segment_order_horz, segment_order_vert;
 811	bool req128_horz_wc, req128_vert_wc;
 812
 813	memset(output, 0, sizeof(*output));
 814
 815	if (dc->debug.disable_dcc == DCC_DISABLE)
 816		return false;
 817
 818	if (!hubbub->funcs->dcc_support_pixel_format(input->format,
 819			&bpe))
 820		return false;
 821
 822	if (!hubbub->funcs->dcc_support_swizzle(input->swizzle_mode, bpe,
 823			&segment_order_horz, &segment_order_vert))
 824		return false;
 825
 826	hubbub31_det_request_size(TO_DCN20_HUBBUB(hubbub)->detile_buf_size,
 827			input->surface_size.height,  input->surface_size.width,
 828			bpe, &req128_horz_wc, &req128_vert_wc);
 829
 830	if (!req128_horz_wc && !req128_vert_wc) {
 831		dcc_control = dcc_control__256_256_xxx;
 832	} else if (input->scan == SCAN_DIRECTION_HORIZONTAL) {
 833		if (!req128_horz_wc)
 834			dcc_control = dcc_control__256_256_xxx;
 835		else if (segment_order_horz == segment_order__contiguous)
 836			dcc_control = dcc_control__128_128_xxx;
 837		else
 838			dcc_control = dcc_control__256_64_64;
 839	} else if (input->scan == SCAN_DIRECTION_VERTICAL) {
 840		if (!req128_vert_wc)
 841			dcc_control = dcc_control__256_256_xxx;
 842		else if (segment_order_vert == segment_order__contiguous)
 843			dcc_control = dcc_control__128_128_xxx;
 844		else
 845			dcc_control = dcc_control__256_64_64;
 846	} else {
 847		if ((req128_horz_wc &&
 848			segment_order_horz == segment_order__non_contiguous) ||
 849			(req128_vert_wc &&
 850			segment_order_vert == segment_order__non_contiguous))
 851			/* access_dir not known, must use most constraining */
 852			dcc_control = dcc_control__256_64_64;
 853		else
 854			/* reg128 is true for either horz and vert
 855			 * but segment_order is contiguous
 856			 */
 857			dcc_control = dcc_control__128_128_xxx;
 858	}
 859
 860	/* Exception for 64KB_R_X */
 861	if ((bpe == 2) && (input->swizzle_mode == DC_SW_64KB_R_X))
 862		dcc_control = dcc_control__128_128_xxx;
 863
 864	if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE &&
 865		dcc_control != dcc_control__256_256_xxx)
 866		return false;
 867
 868	switch (dcc_control) {
 869	case dcc_control__256_256_xxx:
 870		output->grph.rgb.max_uncompressed_blk_size = 256;
 871		output->grph.rgb.max_compressed_blk_size = 256;
 872		output->grph.rgb.independent_64b_blks = false;
 873		output->grph.rgb.dcc_controls.dcc_256_256_unconstrained = 1;
 874		output->grph.rgb.dcc_controls.dcc_256_128_128 = 1;
 875		break;
 876	case dcc_control__128_128_xxx:
 877		output->grph.rgb.max_uncompressed_blk_size = 128;
 878		output->grph.rgb.max_compressed_blk_size = 128;
 879		output->grph.rgb.independent_64b_blks = false;
 880		output->grph.rgb.dcc_controls.dcc_128_128_uncontrained = 1;
 881		output->grph.rgb.dcc_controls.dcc_256_128_128 = 1;
 882		break;
 883	case dcc_control__256_64_64:
 884		output->grph.rgb.max_uncompressed_blk_size = 256;
 885		output->grph.rgb.max_compressed_blk_size = 64;
 886		output->grph.rgb.independent_64b_blks = true;
 887		output->grph.rgb.dcc_controls.dcc_256_64_64 = 1;
 888		break;
 889	case dcc_control__256_128_128:
 890		output->grph.rgb.max_uncompressed_blk_size = 256;
 891		output->grph.rgb.max_compressed_blk_size = 128;
 892		output->grph.rgb.independent_64b_blks = false;
 893		output->grph.rgb.dcc_controls.dcc_256_128_128 = 1;
 894		break;
 895	}
 896	output->capable = true;
 897	output->const_color_support = true;
 898
 899	return true;
 900}
 901
 902int hubbub31_init_dchub_sys_ctx(struct hubbub *hubbub,
 903		struct dcn_hubbub_phys_addr_config *pa_config)
 904{
 905	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
 906	struct dcn_vmid_page_table_config phys_config;
 907
 908	REG_SET(DCN_VM_FB_LOCATION_BASE, 0,
 909			FB_BASE, pa_config->system_aperture.fb_base >> 24);
 910	REG_SET(DCN_VM_FB_LOCATION_TOP, 0,
 911			FB_TOP, pa_config->system_aperture.fb_top >> 24);
 912	REG_SET(DCN_VM_FB_OFFSET, 0,
 913			FB_OFFSET, pa_config->system_aperture.fb_offset >> 24);
 914	REG_SET(DCN_VM_AGP_BOT, 0,
 915			AGP_BOT, pa_config->system_aperture.agp_bot >> 24);
 916	REG_SET(DCN_VM_AGP_TOP, 0,
 917			AGP_TOP, pa_config->system_aperture.agp_top >> 24);
 918	REG_SET(DCN_VM_AGP_BASE, 0,
 919			AGP_BASE, pa_config->system_aperture.agp_base >> 24);
 920
 921	if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) {
 922		phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12;
 923		phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12;
 924		phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr;
 925		phys_config.depth = 0;
 926		phys_config.block_size = 0;
 927		// Init VMID 0 based on PA config
 928		dcn20_vmid_setup(&hubbub2->vmid[0], &phys_config);
 929
 930		dcn20_vmid_setup(&hubbub2->vmid[15], &phys_config);
 931	}
 932
 933	dcn21_dchvm_init(hubbub);
 934
 935	return NUM_VMID;
 936}
 937
 938static void hubbub31_get_dchub_ref_freq(struct hubbub *hubbub,
 939		unsigned int dccg_ref_freq_inKhz,
 940		unsigned int *dchub_ref_freq_inKhz)
 941{
 942	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
 943	uint32_t ref_div = 0;
 944	uint32_t ref_en = 0;
 945	unsigned int dc_refclk_khz = 24000;
 946
 947	REG_GET_2(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, &ref_div,
 948			DCHUBBUB_GLOBAL_TIMER_ENABLE, &ref_en);
 949
 950	if (ref_en) {
 951		if (ref_div == 2)
 952			*dchub_ref_freq_inKhz = dc_refclk_khz / 2;
 953		else
 954			*dchub_ref_freq_inKhz = dc_refclk_khz;
 955
 956		/*
 957		 * The external Reference Clock may change based on the board or
 958		 * platform requirements and the programmable integer divide must
 959		 * be programmed to provide a suitable DLG RefClk frequency between
 960		 * a minimum of 20MHz and maximum of 50MHz
 961		 */
 962		if (*dchub_ref_freq_inKhz < 20000 || *dchub_ref_freq_inKhz > 50000)
 963			ASSERT_CRITICAL(false);
 964
 965		return;
 966	} else {
 967		*dchub_ref_freq_inKhz = dc_refclk_khz;
 968
 969		// HUBBUB global timer must be enabled.
 970		ASSERT_CRITICAL(false);
 971		return;
 972	}
 973}
 974
 975static bool hubbub31_verify_allow_pstate_change_high(struct hubbub *hubbub)
 976{
 977	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
 978
 979	/*
 980	 * Pstate latency is ~20us so if we wait over 40us and pstate allow
 981	 * still not asserted, we are probably stuck and going to hang
 982	 */
 983	const unsigned int pstate_wait_timeout_us = 100;
 984	const unsigned int pstate_wait_expected_timeout_us = 40;
 985
 986	static unsigned int max_sampled_pstate_wait_us; /* data collection */
 987	static bool forced_pstate_allow; /* help with revert wa */
 988
 989	unsigned int debug_data = 0;
 990	unsigned int i;
 991
 992	if (forced_pstate_allow) {
 993		/* we hacked to force pstate allow to prevent hang last time
 994		 * we verify_allow_pstate_change_high.  so disable force
 995		 * here so we can check status
 996		 */
 997		REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
 998			     DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 0,
 999			     DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 0);
1000		forced_pstate_allow = false;
1001	}
1002
1003	REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, hubbub2->debug_test_index_pstate);
1004
1005	for (i = 0; i < pstate_wait_timeout_us; i++) {
1006		debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA);
1007
1008		/* Debug bit is specific to ASIC. */
1009		if (debug_data & (1 << 26)) {
1010			if (i > pstate_wait_expected_timeout_us)
1011				DC_LOG_WARNING("pstate took longer than expected ~%dus\n", i);
1012			return true;
1013		}
1014		if (max_sampled_pstate_wait_us < i)
1015			max_sampled_pstate_wait_us = i;
1016
1017		udelay(1);
1018	}
1019
1020	/* force pstate allow to prevent system hang
1021	 * and break to debugger to investigate
1022	 */
1023	REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
1024		     DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 1,
1025		     DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 1);
1026	forced_pstate_allow = true;
1027
1028	DC_LOG_WARNING("pstate TEST_DEBUG_DATA: 0x%X\n",
1029			debug_data);
1030
1031	return false;
1032}
1033
1034void hubbub31_init(struct hubbub *hubbub)
1035{
1036	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
1037
1038	/*Enable clock gate*/
1039	if (hubbub->ctx->dc->debug.disable_clock_gate) {
1040		/*done in hwseq*/
1041		/*REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);*/
1042		REG_UPDATE_2(DCHUBBUB_CLOCK_CNTL,
1043				DISPCLK_R_DCHUBBUB_GATE_DIS, 1,
1044				DCFCLK_R_DCHUBBUB_GATE_DIS, 1);
1045	}
1046
1047	/*
1048	only the DCN will determine when to connect the SDP port
1049	*/
1050	REG_UPDATE(DCHUBBUB_SDPIF_CFG0,	SDPIF_PORT_CONTROL, 1);
1051}
1052static const struct hubbub_funcs hubbub31_funcs = {
1053	.update_dchub = hubbub2_update_dchub,
1054	.init_dchub_sys_ctx = hubbub31_init_dchub_sys_ctx,
1055	.init_vm_ctx = hubbub2_init_vm_ctx,
1056	.dcc_support_swizzle = hubbub3_dcc_support_swizzle,
1057	.dcc_support_pixel_format = hubbub2_dcc_support_pixel_format,
1058	.get_dcc_compression_cap = hubbub31_get_dcc_compression_cap,
1059	.wm_read_state = hubbub21_wm_read_state,
1060	.get_dchub_ref_freq = hubbub31_get_dchub_ref_freq,
1061	.program_watermarks = hubbub31_program_watermarks,
1062	.allow_self_refresh_control = hubbub1_allow_self_refresh_control,
1063	.is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,
1064	.verify_allow_pstate_change_high = hubbub31_verify_allow_pstate_change_high,
1065	.program_det_size = dcn31_program_det_size,
1066	.wait_for_det_apply = dcn31_wait_for_det_apply,
1067	.program_compbuf_size = dcn31_program_compbuf_size,
1068	.init_crb = dcn31_init_crb,
1069	.hubbub_read_state = hubbub2_read_state,
1070};
1071
1072void hubbub31_construct(struct dcn20_hubbub *hubbub31,
1073	struct dc_context *ctx,
1074	const struct dcn_hubbub_registers *hubbub_regs,
1075	const struct dcn_hubbub_shift *hubbub_shift,
1076	const struct dcn_hubbub_mask *hubbub_mask,
1077	int det_size_kb,
1078	int pixel_chunk_size_kb,
1079	int config_return_buffer_size_kb)
1080{
1081
1082	hubbub3_construct(hubbub31, ctx, hubbub_regs, hubbub_shift, hubbub_mask);
1083	hubbub31->base.funcs = &hubbub31_funcs;
1084	hubbub31->detile_buf_size = det_size_kb * 1024;
1085	hubbub31->pixel_chunk_size = pixel_chunk_size_kb * 1024;
1086	hubbub31->crb_size_segs = config_return_buffer_size_kb / DCN31_CRB_SEGMENT_SIZE_KB;
1087
1088	hubbub31->debug_test_index_pstate = 0x6;
1089}
1090
v5.14.15
  1/*
  2 * Copyright 2016 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: AMD
 23 *
 24 */
 25
 26
 27#include "dcn30/dcn30_hubbub.h"
 28#include "dcn31_hubbub.h"
 29#include "dm_services.h"
 30#include "reg_helper.h"
 31
 32
 33#define CTX \
 34	hubbub2->base.ctx
 35#define DC_LOGGER \
 36	hubbub2->base.ctx->logger
 37#define REG(reg)\
 38	hubbub2->regs->reg
 39
 40#undef FN
 41#define FN(reg_name, field_name) \
 42	hubbub2->shifts->field_name, hubbub2->masks->field_name
 43
 44#ifdef NUM_VMID
 45#undef NUM_VMID
 46#endif
 47#define NUM_VMID 16
 48
 49#define DCN31_CRB_SEGMENT_SIZE_KB 64
 50
 51static void dcn31_init_crb(struct hubbub *hubbub)
 52{
 53	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
 54
 55	REG_GET(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT,
 56		&hubbub2->det0_size);
 57
 58	REG_GET(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT,
 59		&hubbub2->det1_size);
 60
 61	REG_GET(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT,
 62		&hubbub2->det2_size);
 63
 64	REG_GET(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT,
 65		&hubbub2->det3_size);
 66
 67	REG_GET(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE_CURRENT,
 68		&hubbub2->compbuf_size_segments);
 69
 70	REG_SET_2(COMPBUF_RESERVED_SPACE, 0,
 71			COMPBUF_RESERVED_SPACE_64B, hubbub2->pixel_chunk_size / 32,
 72			COMPBUF_RESERVED_SPACE_ZS, hubbub2->pixel_chunk_size / 128);
 73	REG_UPDATE(DCHUBBUB_DEBUG_CTRL_0, DET_DEPTH, 0x17F);
 74}
 75
 76static void dcn31_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigned int det_buffer_size_in_kbyte)
 77{
 78	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
 79
 80	unsigned int det_size_segments = (det_buffer_size_in_kbyte + DCN31_CRB_SEGMENT_SIZE_KB - 1) / DCN31_CRB_SEGMENT_SIZE_KB;
 81
 82	switch (hubp_inst) {
 83	case 0:
 84		REG_UPDATE(DCHUBBUB_DET0_CTRL,
 85					DET0_SIZE, det_size_segments);
 86		hubbub2->det0_size = det_size_segments;
 87		break;
 88	case 1:
 89		REG_UPDATE(DCHUBBUB_DET1_CTRL,
 90					DET1_SIZE, det_size_segments);
 91		hubbub2->det1_size = det_size_segments;
 92		break;
 93	case 2:
 94		REG_UPDATE(DCHUBBUB_DET2_CTRL,
 95					DET2_SIZE, det_size_segments);
 96		hubbub2->det2_size = det_size_segments;
 97		break;
 98	case 3:
 99		REG_UPDATE(DCHUBBUB_DET3_CTRL,
100					DET3_SIZE, det_size_segments);
101		hubbub2->det3_size = det_size_segments;
102		break;
103	default:
104		break;
105	}
 
106	/* Should never be hit, if it is we have an erroneous hw config*/
107	ASSERT(hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size
108			+ hubbub2->det3_size + hubbub2->compbuf_size_segments <= hubbub2->crb_size_segs);
109}
110
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111static void dcn31_program_compbuf_size(struct hubbub *hubbub, unsigned int compbuf_size_kb, bool safe_to_increase)
112{
113	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
114	unsigned int compbuf_size_segments = (compbuf_size_kb + DCN31_CRB_SEGMENT_SIZE_KB - 1) / DCN31_CRB_SEGMENT_SIZE_KB;
115
116	if (safe_to_increase || compbuf_size_segments <= hubbub2->compbuf_size_segments) {
117		if (compbuf_size_segments > hubbub2->compbuf_size_segments) {
118			REG_WAIT(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, hubbub2->det0_size, 1, 100);
119			REG_WAIT(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, hubbub2->det1_size, 1, 100);
120			REG_WAIT(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, hubbub2->det2_size, 1, 100);
121			REG_WAIT(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, hubbub2->det3_size, 1, 100);
122		}
123		/* Should never be hit, if it is we have an erroneous hw config*/
124		ASSERT(hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size
125				+ hubbub2->det3_size + compbuf_size_segments <= hubbub2->crb_size_segs);
126		REG_UPDATE(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE, compbuf_size_segments);
127		REG_WAIT(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE_CURRENT, compbuf_size_segments, 1, 100);
128		hubbub2->compbuf_size_segments = compbuf_size_segments;
 
129	}
130}
131
132static uint32_t convert_and_clamp(
133	uint32_t wm_ns,
134	uint32_t refclk_mhz,
135	uint32_t clamp_value)
136{
137	uint32_t ret_val = 0;
138	ret_val = wm_ns * refclk_mhz;
139	ret_val /= 1000;
140
141	if (ret_val > clamp_value)
 
 
142		ret_val = clamp_value;
 
143
144	return ret_val;
145}
146
147static bool hubbub31_program_urgent_watermarks(
148		struct hubbub *hubbub,
149		struct dcn_watermark_set *watermarks,
150		unsigned int refclk_mhz,
151		bool safe_to_lower)
152{
153	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
154	uint32_t prog_wm_value;
155	bool wm_pending = false;
156
157	/* Repeat for water mark set A, B, C and D. */
158	/* clock state A */
159	if (safe_to_lower || watermarks->a.urgent_ns > hubbub2->watermarks.a.urgent_ns) {
160		hubbub2->watermarks.a.urgent_ns = watermarks->a.urgent_ns;
161		prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
162				refclk_mhz, 0x1fffff);
163		REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
164				DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
165
166		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
167			"HW register value = 0x%x\n",
168			watermarks->a.urgent_ns, prog_wm_value);
169	} else if (watermarks->a.urgent_ns < hubbub2->watermarks.a.urgent_ns)
170		wm_pending = true;
171
172	/* determine the transfer time for a quantity of data for a particular requestor.*/
173	if (safe_to_lower || watermarks->a.frac_urg_bw_flip
174			> hubbub2->watermarks.a.frac_urg_bw_flip) {
175		hubbub2->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
176
177		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, 0,
178				DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, watermarks->a.frac_urg_bw_flip);
179	} else if (watermarks->a.frac_urg_bw_flip
180			< hubbub2->watermarks.a.frac_urg_bw_flip)
181		wm_pending = true;
182
183	if (safe_to_lower || watermarks->a.frac_urg_bw_nom
184			> hubbub2->watermarks.a.frac_urg_bw_nom) {
185		hubbub2->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
186
187		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0,
188				DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->a.frac_urg_bw_nom);
189	} else if (watermarks->a.frac_urg_bw_nom
190			< hubbub2->watermarks.a.frac_urg_bw_nom)
191		wm_pending = true;
192
193	if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub2->watermarks.a.urgent_latency_ns) {
194		hubbub2->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns;
195		prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns,
196				refclk_mhz, 0x1fffff);
197		REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0,
198				DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value);
199	} else if (watermarks->a.urgent_latency_ns < hubbub2->watermarks.a.urgent_latency_ns)
200		wm_pending = true;
201
202	/* clock state B */
203	if (safe_to_lower || watermarks->b.urgent_ns > hubbub2->watermarks.b.urgent_ns) {
204		hubbub2->watermarks.b.urgent_ns = watermarks->b.urgent_ns;
205		prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns,
206				refclk_mhz, 0x1fffff);
207		REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0,
208				DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
209
210		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
211			"HW register value = 0x%x\n",
212			watermarks->b.urgent_ns, prog_wm_value);
213	} else if (watermarks->b.urgent_ns < hubbub2->watermarks.b.urgent_ns)
214		wm_pending = true;
215
216	/* determine the transfer time for a quantity of data for a particular requestor.*/
217	if (safe_to_lower || watermarks->b.frac_urg_bw_flip
218			> hubbub2->watermarks.b.frac_urg_bw_flip) {
219		hubbub2->watermarks.b.frac_urg_bw_flip = watermarks->b.frac_urg_bw_flip;
220
221		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, 0,
222				DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, watermarks->b.frac_urg_bw_flip);
223	} else if (watermarks->b.frac_urg_bw_flip
224			< hubbub2->watermarks.b.frac_urg_bw_flip)
225		wm_pending = true;
226
227	if (safe_to_lower || watermarks->b.frac_urg_bw_nom
228			> hubbub2->watermarks.b.frac_urg_bw_nom) {
229		hubbub2->watermarks.b.frac_urg_bw_nom = watermarks->b.frac_urg_bw_nom;
230
231		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, 0,
232				DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->b.frac_urg_bw_nom);
233	} else if (watermarks->b.frac_urg_bw_nom
234			< hubbub2->watermarks.b.frac_urg_bw_nom)
235		wm_pending = true;
236
237	if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub2->watermarks.b.urgent_latency_ns) {
238		hubbub2->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns;
239		prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns,
240				refclk_mhz, 0x1fffff);
241		REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0,
242				DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value);
243	} else if (watermarks->b.urgent_latency_ns < hubbub2->watermarks.b.urgent_latency_ns)
244		wm_pending = true;
245
246	/* clock state C */
247	if (safe_to_lower || watermarks->c.urgent_ns > hubbub2->watermarks.c.urgent_ns) {
248		hubbub2->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
249		prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns,
250				refclk_mhz, 0x1fffff);
251		REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0,
252				DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
253
254		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
255			"HW register value = 0x%x\n",
256			watermarks->c.urgent_ns, prog_wm_value);
257	} else if (watermarks->c.urgent_ns < hubbub2->watermarks.c.urgent_ns)
258		wm_pending = true;
259
260	/* determine the transfer time for a quantity of data for a particular requestor.*/
261	if (safe_to_lower || watermarks->c.frac_urg_bw_flip
262			> hubbub2->watermarks.c.frac_urg_bw_flip) {
263		hubbub2->watermarks.c.frac_urg_bw_flip = watermarks->c.frac_urg_bw_flip;
264
265		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, 0,
266				DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, watermarks->c.frac_urg_bw_flip);
267	} else if (watermarks->c.frac_urg_bw_flip
268			< hubbub2->watermarks.c.frac_urg_bw_flip)
269		wm_pending = true;
270
271	if (safe_to_lower || watermarks->c.frac_urg_bw_nom
272			> hubbub2->watermarks.c.frac_urg_bw_nom) {
273		hubbub2->watermarks.c.frac_urg_bw_nom = watermarks->c.frac_urg_bw_nom;
274
275		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, 0,
276				DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, watermarks->c.frac_urg_bw_nom);
277	} else if (watermarks->c.frac_urg_bw_nom
278			< hubbub2->watermarks.c.frac_urg_bw_nom)
279		wm_pending = true;
280
281	if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub2->watermarks.c.urgent_latency_ns) {
282		hubbub2->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns;
283		prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns,
284				refclk_mhz, 0x1fffff);
285		REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0,
286				DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value);
287	} else if (watermarks->c.urgent_latency_ns < hubbub2->watermarks.c.urgent_latency_ns)
288		wm_pending = true;
289
290	/* clock state D */
291	if (safe_to_lower || watermarks->d.urgent_ns > hubbub2->watermarks.d.urgent_ns) {
292		hubbub2->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
293		prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns,
294				refclk_mhz, 0x1fffff);
295		REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0,
296				DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
297
298		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
299			"HW register value = 0x%x\n",
300			watermarks->d.urgent_ns, prog_wm_value);
301	} else if (watermarks->d.urgent_ns < hubbub2->watermarks.d.urgent_ns)
302		wm_pending = true;
303
304	/* determine the transfer time for a quantity of data for a particular requestor.*/
305	if (safe_to_lower || watermarks->d.frac_urg_bw_flip
306			> hubbub2->watermarks.d.frac_urg_bw_flip) {
307		hubbub2->watermarks.d.frac_urg_bw_flip = watermarks->d.frac_urg_bw_flip;
308
309		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, 0,
310				DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, watermarks->d.frac_urg_bw_flip);
311	} else if (watermarks->d.frac_urg_bw_flip
312			< hubbub2->watermarks.d.frac_urg_bw_flip)
313		wm_pending = true;
314
315	if (safe_to_lower || watermarks->d.frac_urg_bw_nom
316			> hubbub2->watermarks.d.frac_urg_bw_nom) {
317		hubbub2->watermarks.d.frac_urg_bw_nom = watermarks->d.frac_urg_bw_nom;
318
319		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, 0,
320				DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, watermarks->d.frac_urg_bw_nom);
321	} else if (watermarks->d.frac_urg_bw_nom
322			< hubbub2->watermarks.d.frac_urg_bw_nom)
323		wm_pending = true;
324
325	if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub2->watermarks.d.urgent_latency_ns) {
326		hubbub2->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns;
327		prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns,
328				refclk_mhz, 0x1fffff);
329		REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0,
330				DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value);
331	} else if (watermarks->d.urgent_latency_ns < hubbub2->watermarks.d.urgent_latency_ns)
332		wm_pending = true;
333
334	return wm_pending;
335}
336
337static bool hubbub31_program_stutter_watermarks(
338		struct hubbub *hubbub,
339		struct dcn_watermark_set *watermarks,
340		unsigned int refclk_mhz,
341		bool safe_to_lower)
342{
343	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
344	uint32_t prog_wm_value;
345	bool wm_pending = false;
346
347	/* clock state A */
348	if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
349			> hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) {
350		hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
351				watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
352		prog_wm_value = convert_and_clamp(
353				watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
354				refclk_mhz, 0x1fffff);
355		REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
356				DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
357		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
358			"HW register value = 0x%x\n",
359			watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
360	} else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
361			< hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns)
362		wm_pending = true;
363
364	if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
365			> hubbub2->watermarks.a.cstate_pstate.cstate_exit_ns) {
366		hubbub2->watermarks.a.cstate_pstate.cstate_exit_ns =
367				watermarks->a.cstate_pstate.cstate_exit_ns;
368		prog_wm_value = convert_and_clamp(
369				watermarks->a.cstate_pstate.cstate_exit_ns,
370				refclk_mhz, 0x1fffff);
371		REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
372				DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
373		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
374			"HW register value = 0x%x\n",
375			watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
376	} else if (watermarks->a.cstate_pstate.cstate_exit_ns
377			< hubbub2->watermarks.a.cstate_pstate.cstate_exit_ns)
378		wm_pending = true;
379
380	if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns
381			> hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns) {
382		hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns =
383				watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns;
384		prog_wm_value = convert_and_clamp(
385				watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns,
386				refclk_mhz, 0x1fffff);
387		REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, 0,
388				DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, prog_wm_value);
389		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_A calculated =%d\n"
390			"HW register value = 0x%x\n",
391			watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns, prog_wm_value);
392	} else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns
393			< hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns)
394		wm_pending = true;
395
396	if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_z8_ns
397			> hubbub2->watermarks.a.cstate_pstate.cstate_exit_z8_ns) {
398		hubbub2->watermarks.a.cstate_pstate.cstate_exit_z8_ns =
399				watermarks->a.cstate_pstate.cstate_exit_z8_ns;
400		prog_wm_value = convert_and_clamp(
401				watermarks->a.cstate_pstate.cstate_exit_z8_ns,
402				refclk_mhz, 0x1fffff);
403		REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, 0,
404				DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, prog_wm_value);
405		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_A calculated =%d\n"
406			"HW register value = 0x%x\n",
407			watermarks->a.cstate_pstate.cstate_exit_z8_ns, prog_wm_value);
408	} else if (watermarks->a.cstate_pstate.cstate_exit_z8_ns
409			< hubbub2->watermarks.a.cstate_pstate.cstate_exit_z8_ns)
410		wm_pending = true;
411
412	/* clock state B */
413	if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
414			> hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) {
415		hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
416				watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
417		prog_wm_value = convert_and_clamp(
418				watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
419				refclk_mhz, 0x1fffff);
420		REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
421				DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
422		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
423			"HW register value = 0x%x\n",
424			watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
425	} else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
426			< hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns)
427		wm_pending = true;
428
429	if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
430			> hubbub2->watermarks.b.cstate_pstate.cstate_exit_ns) {
431		hubbub2->watermarks.b.cstate_pstate.cstate_exit_ns =
432				watermarks->b.cstate_pstate.cstate_exit_ns;
433		prog_wm_value = convert_and_clamp(
434				watermarks->b.cstate_pstate.cstate_exit_ns,
435				refclk_mhz, 0x1fffff);
436		REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
437				DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
438		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
439			"HW register value = 0x%x\n",
440			watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
441	} else if (watermarks->b.cstate_pstate.cstate_exit_ns
442			< hubbub2->watermarks.b.cstate_pstate.cstate_exit_ns)
443		wm_pending = true;
444
445	if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns
446			> hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_z8_ns) {
447		hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_z8_ns =
448				watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns;
449		prog_wm_value = convert_and_clamp(
450				watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns,
451				refclk_mhz, 0x1fffff);
452		REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, 0,
453				DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, prog_wm_value);
454		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_B calculated =%d\n"
455			"HW register value = 0x%x\n",
456			watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns, prog_wm_value);
457	} else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns
458			< hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_z8_ns)
459		wm_pending = true;
460
461	if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_z8_ns
462			> hubbub2->watermarks.b.cstate_pstate.cstate_exit_z8_ns) {
463		hubbub2->watermarks.b.cstate_pstate.cstate_exit_z8_ns =
464				watermarks->b.cstate_pstate.cstate_exit_z8_ns;
465		prog_wm_value = convert_and_clamp(
466				watermarks->b.cstate_pstate.cstate_exit_z8_ns,
467				refclk_mhz, 0x1fffff);
468		REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, 0,
469				DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, prog_wm_value);
470		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_B calculated =%d\n"
471			"HW register value = 0x%x\n",
472			watermarks->b.cstate_pstate.cstate_exit_z8_ns, prog_wm_value);
473	} else if (watermarks->b.cstate_pstate.cstate_exit_z8_ns
474			< hubbub2->watermarks.b.cstate_pstate.cstate_exit_z8_ns)
475		wm_pending = true;
476
477	/* clock state C */
478	if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
479			> hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) {
480		hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
481				watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
482		prog_wm_value = convert_and_clamp(
483				watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
484				refclk_mhz, 0x1fffff);
485		REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0,
486				DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
487		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
488			"HW register value = 0x%x\n",
489			watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
490	} else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
491			< hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns)
492		wm_pending = true;
493
494	if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
495			> hubbub2->watermarks.c.cstate_pstate.cstate_exit_ns) {
496		hubbub2->watermarks.c.cstate_pstate.cstate_exit_ns =
497				watermarks->c.cstate_pstate.cstate_exit_ns;
498		prog_wm_value = convert_and_clamp(
499				watermarks->c.cstate_pstate.cstate_exit_ns,
500				refclk_mhz, 0x1fffff);
501		REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0,
502				DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
503		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
504			"HW register value = 0x%x\n",
505			watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
506	} else if (watermarks->c.cstate_pstate.cstate_exit_ns
507			< hubbub2->watermarks.c.cstate_pstate.cstate_exit_ns)
508		wm_pending = true;
509
510	if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns
511			> hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_z8_ns) {
512		hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_z8_ns =
513				watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns;
514		prog_wm_value = convert_and_clamp(
515				watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns,
516				refclk_mhz, 0x1fffff);
517		REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, 0,
518				DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, prog_wm_value);
519		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_C calculated =%d\n"
520			"HW register value = 0x%x\n",
521			watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns, prog_wm_value);
522	} else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns
523			< hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_z8_ns)
524		wm_pending = true;
525
526	if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_z8_ns
527			> hubbub2->watermarks.c.cstate_pstate.cstate_exit_z8_ns) {
528		hubbub2->watermarks.c.cstate_pstate.cstate_exit_z8_ns =
529				watermarks->c.cstate_pstate.cstate_exit_z8_ns;
530		prog_wm_value = convert_and_clamp(
531				watermarks->c.cstate_pstate.cstate_exit_z8_ns,
532				refclk_mhz, 0x1fffff);
533		REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, 0,
534				DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, prog_wm_value);
535		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_C calculated =%d\n"
536			"HW register value = 0x%x\n",
537			watermarks->c.cstate_pstate.cstate_exit_z8_ns, prog_wm_value);
538	} else if (watermarks->c.cstate_pstate.cstate_exit_z8_ns
539			< hubbub2->watermarks.c.cstate_pstate.cstate_exit_z8_ns)
540		wm_pending = true;
541
542	/* clock state D */
543	if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
544			> hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) {
545		hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
546				watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
547		prog_wm_value = convert_and_clamp(
548				watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
549				refclk_mhz, 0x1fffff);
550		REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0,
551				DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
552		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
553			"HW register value = 0x%x\n",
554			watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
555	} else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
556			< hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns)
557		wm_pending = true;
558
559	if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
560			> hubbub2->watermarks.d.cstate_pstate.cstate_exit_ns) {
561		hubbub2->watermarks.d.cstate_pstate.cstate_exit_ns =
562				watermarks->d.cstate_pstate.cstate_exit_ns;
563		prog_wm_value = convert_and_clamp(
564				watermarks->d.cstate_pstate.cstate_exit_ns,
565				refclk_mhz, 0x1fffff);
566		REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0,
567				DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
568		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
569			"HW register value = 0x%x\n",
570			watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
571	} else if (watermarks->d.cstate_pstate.cstate_exit_ns
572			< hubbub2->watermarks.d.cstate_pstate.cstate_exit_ns)
573		wm_pending = true;
574
575	if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns
576			> hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_z8_ns) {
577		hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_z8_ns =
578				watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns;
579		prog_wm_value = convert_and_clamp(
580				watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns,
581				refclk_mhz, 0x1fffff);
582		REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, 0,
583				DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, prog_wm_value);
584		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_D calculated =%d\n"
585			"HW register value = 0x%x\n",
586			watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns, prog_wm_value);
587	} else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns
588			< hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_z8_ns)
589		wm_pending = true;
590
591	if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_z8_ns
592			> hubbub2->watermarks.d.cstate_pstate.cstate_exit_z8_ns) {
593		hubbub2->watermarks.d.cstate_pstate.cstate_exit_z8_ns =
594				watermarks->d.cstate_pstate.cstate_exit_z8_ns;
595		prog_wm_value = convert_and_clamp(
596				watermarks->d.cstate_pstate.cstate_exit_z8_ns,
597				refclk_mhz, 0x1fffff);
598		REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, 0,
599				DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, prog_wm_value);
600		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_D calculated =%d\n"
601			"HW register value = 0x%x\n",
602			watermarks->d.cstate_pstate.cstate_exit_z8_ns, prog_wm_value);
603	} else if (watermarks->d.cstate_pstate.cstate_exit_z8_ns
604			< hubbub2->watermarks.d.cstate_pstate.cstate_exit_z8_ns)
605		wm_pending = true;
606
607	return wm_pending;
608}
609
610static bool hubbub31_program_pstate_watermarks(
611		struct hubbub *hubbub,
612		struct dcn_watermark_set *watermarks,
613		unsigned int refclk_mhz,
614		bool safe_to_lower)
615{
616	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
617	uint32_t prog_wm_value;
618
619	bool wm_pending = false;
620
621	/* clock state A */
622	if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
623			> hubbub2->watermarks.a.cstate_pstate.pstate_change_ns) {
624		hubbub2->watermarks.a.cstate_pstate.pstate_change_ns =
625				watermarks->a.cstate_pstate.pstate_change_ns;
626		prog_wm_value = convert_and_clamp(
627				watermarks->a.cstate_pstate.pstate_change_ns,
628				refclk_mhz, 0x1fffff);
629		REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0,
630				DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
631		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
632			"HW register value = 0x%x\n\n",
633			watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
634	} else if (watermarks->a.cstate_pstate.pstate_change_ns
635			< hubbub2->watermarks.a.cstate_pstate.pstate_change_ns)
636		wm_pending = true;
637
638	/* clock state B */
639	if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
640			> hubbub2->watermarks.b.cstate_pstate.pstate_change_ns) {
641		hubbub2->watermarks.b.cstate_pstate.pstate_change_ns =
642				watermarks->b.cstate_pstate.pstate_change_ns;
643		prog_wm_value = convert_and_clamp(
644				watermarks->b.cstate_pstate.pstate_change_ns,
645				refclk_mhz, 0x1fffff);
646		REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0,
647				DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
648		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
649			"HW register value = 0x%x\n\n",
650			watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
651	} else if (watermarks->b.cstate_pstate.pstate_change_ns
652			< hubbub2->watermarks.b.cstate_pstate.pstate_change_ns)
653		wm_pending = false;
654
655	/* clock state C */
656	if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
657			> hubbub2->watermarks.c.cstate_pstate.pstate_change_ns) {
658		hubbub2->watermarks.c.cstate_pstate.pstate_change_ns =
659				watermarks->c.cstate_pstate.pstate_change_ns;
660		prog_wm_value = convert_and_clamp(
661				watermarks->c.cstate_pstate.pstate_change_ns,
662				refclk_mhz, 0x1fffff);
663		REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0,
664				DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
665		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
666			"HW register value = 0x%x\n\n",
667			watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
668	} else if (watermarks->c.cstate_pstate.pstate_change_ns
669			< hubbub2->watermarks.c.cstate_pstate.pstate_change_ns)
670		wm_pending = true;
671
672	/* clock state D */
673	if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns
674			> hubbub2->watermarks.d.cstate_pstate.pstate_change_ns) {
675		hubbub2->watermarks.d.cstate_pstate.pstate_change_ns =
676				watermarks->d.cstate_pstate.pstate_change_ns;
677		prog_wm_value = convert_and_clamp(
678				watermarks->d.cstate_pstate.pstate_change_ns,
679				refclk_mhz, 0x1fffff);
680		REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0,
681				DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
682		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
683			"HW register value = 0x%x\n\n",
684			watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
685	} else if (watermarks->d.cstate_pstate.pstate_change_ns
686			< hubbub2->watermarks.d.cstate_pstate.pstate_change_ns)
687		wm_pending = true;
688
689	return wm_pending;
690}
691
692static bool hubbub31_program_watermarks(
693		struct hubbub *hubbub,
694		struct dcn_watermark_set *watermarks,
695		unsigned int refclk_mhz,
696		bool safe_to_lower)
697{
698	bool wm_pending = false;
699
700	if (hubbub31_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
701		wm_pending = true;
702
703	if (hubbub31_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
704		wm_pending = true;
705
706	if (hubbub31_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
707		wm_pending = true;
708
709	/*
710	 * The DCHub arbiter has a mechanism to dynamically rate limit the DCHub request stream to the fabric.
711	 * If the memory controller is fully utilized and the DCHub requestors are
712	 * well ahead of their amortized schedule, then it is safe to prevent the next winner
713	 * from being committed and sent to the fabric.
714	 * The utilization of the memory controller is approximated by ensuring that
715	 * the number of outstanding requests is greater than a threshold specified
716	 * by the ARB_MIN_REQ_OUTSTANDING. To determine that the DCHub requestors are well ahead of the amortized schedule,
717	 * the slack of the next winner is compared with the ARB_SAT_LEVEL in DLG RefClk cycles.
718	 *
719	 * TODO: Revisit request limit after figure out right number. request limit for RM isn't decided yet, set maximum value (0x1FF)
720	 * to turn off it for now.
721	 */
722	/*REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0,
723			DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
724	REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
725			DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);*/
726
727	hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
728	return wm_pending;
729}
730
731static void hubbub3_get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height,
732		unsigned int bytes_per_element)
733{
734	/* copied from DML.  might want to refactor DML to leverage from DML */
735	/* DML : get_blk256_size */
736	if (bytes_per_element == 1) {
737		*blk256_width = 16;
738		*blk256_height = 16;
739	} else if (bytes_per_element == 2) {
740		*blk256_width = 16;
741		*blk256_height = 8;
742	} else if (bytes_per_element == 4) {
743		*blk256_width = 8;
744		*blk256_height = 8;
745	} else if (bytes_per_element == 8) {
746		*blk256_width = 8;
747		*blk256_height = 4;
748	}
749}
750
751static void hubbub31_det_request_size(
752		unsigned int detile_buf_size,
753		unsigned int height,
754		unsigned int width,
755		unsigned int bpe,
756		bool *req128_horz_wc,
757		bool *req128_vert_wc)
758{
759	unsigned int blk256_height = 0;
760	unsigned int blk256_width = 0;
761	unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc;
762
763	hubbub3_get_blk256_size(&blk256_width, &blk256_height, bpe);
764
765	swath_bytes_horz_wc = width * blk256_height * bpe;
766	swath_bytes_vert_wc = height * blk256_width * bpe;
767
768	*req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
769			false : /* full 256B request */
770			true; /* half 128b request */
771
772	*req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ?
773			false : /* full 256B request */
774			true; /* half 128b request */
775}
776
777static bool hubbub31_get_dcc_compression_cap(struct hubbub *hubbub,
778		const struct dc_dcc_surface_param *input,
779		struct dc_surface_dcc_cap *output)
780{
781	struct dc *dc = hubbub->ctx->dc;
782	enum dcc_control dcc_control;
783	unsigned int bpe;
784	enum segment_order segment_order_horz, segment_order_vert;
785	bool req128_horz_wc, req128_vert_wc;
786
787	memset(output, 0, sizeof(*output));
788
789	if (dc->debug.disable_dcc == DCC_DISABLE)
790		return false;
791
792	if (!hubbub->funcs->dcc_support_pixel_format(input->format,
793			&bpe))
794		return false;
795
796	if (!hubbub->funcs->dcc_support_swizzle(input->swizzle_mode, bpe,
797			&segment_order_horz, &segment_order_vert))
798		return false;
799
800	hubbub31_det_request_size(TO_DCN20_HUBBUB(hubbub)->detile_buf_size,
801			input->surface_size.height,  input->surface_size.width,
802			bpe, &req128_horz_wc, &req128_vert_wc);
803
804	if (!req128_horz_wc && !req128_vert_wc) {
805		dcc_control = dcc_control__256_256_xxx;
806	} else if (input->scan == SCAN_DIRECTION_HORIZONTAL) {
807		if (!req128_horz_wc)
808			dcc_control = dcc_control__256_256_xxx;
809		else if (segment_order_horz == segment_order__contiguous)
810			dcc_control = dcc_control__128_128_xxx;
811		else
812			dcc_control = dcc_control__256_64_64;
813	} else if (input->scan == SCAN_DIRECTION_VERTICAL) {
814		if (!req128_vert_wc)
815			dcc_control = dcc_control__256_256_xxx;
816		else if (segment_order_vert == segment_order__contiguous)
817			dcc_control = dcc_control__128_128_xxx;
818		else
819			dcc_control = dcc_control__256_64_64;
820	} else {
821		if ((req128_horz_wc &&
822			segment_order_horz == segment_order__non_contiguous) ||
823			(req128_vert_wc &&
824			segment_order_vert == segment_order__non_contiguous))
825			/* access_dir not known, must use most constraining */
826			dcc_control = dcc_control__256_64_64;
827		else
828			/* reg128 is true for either horz and vert
829			 * but segment_order is contiguous
830			 */
831			dcc_control = dcc_control__128_128_xxx;
832	}
833
834	/* Exception for 64KB_R_X */
835	if ((bpe == 2) && (input->swizzle_mode == DC_SW_64KB_R_X))
836		dcc_control = dcc_control__128_128_xxx;
837
838	if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE &&
839		dcc_control != dcc_control__256_256_xxx)
840		return false;
841
842	switch (dcc_control) {
843	case dcc_control__256_256_xxx:
844		output->grph.rgb.max_uncompressed_blk_size = 256;
845		output->grph.rgb.max_compressed_blk_size = 256;
846		output->grph.rgb.independent_64b_blks = false;
847		output->grph.rgb.dcc_controls.dcc_256_256_unconstrained = 1;
848		output->grph.rgb.dcc_controls.dcc_256_128_128 = 1;
849		break;
850	case dcc_control__128_128_xxx:
851		output->grph.rgb.max_uncompressed_blk_size = 128;
852		output->grph.rgb.max_compressed_blk_size = 128;
853		output->grph.rgb.independent_64b_blks = false;
854		output->grph.rgb.dcc_controls.dcc_128_128_uncontrained = 1;
855		output->grph.rgb.dcc_controls.dcc_256_128_128 = 1;
856		break;
857	case dcc_control__256_64_64:
858		output->grph.rgb.max_uncompressed_blk_size = 256;
859		output->grph.rgb.max_compressed_blk_size = 64;
860		output->grph.rgb.independent_64b_blks = true;
861		output->grph.rgb.dcc_controls.dcc_256_64_64 = 1;
862		break;
863	case dcc_control__256_128_128:
864		output->grph.rgb.max_uncompressed_blk_size = 256;
865		output->grph.rgb.max_compressed_blk_size = 128;
866		output->grph.rgb.independent_64b_blks = false;
867		output->grph.rgb.dcc_controls.dcc_256_128_128 = 1;
868		break;
869	}
870	output->capable = true;
871	output->const_color_support = true;
872
873	return true;
874}
875
876static int hubbub31_init_dchub_sys_ctx(struct hubbub *hubbub,
877		struct dcn_hubbub_phys_addr_config *pa_config)
878{
879	hubbub3_init_dchub_sys_ctx(hubbub, pa_config);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
880
881	dcn21_dchvm_init(hubbub);
882
883	return NUM_VMID;
884}
885
886static void hubbub31_get_dchub_ref_freq(struct hubbub *hubbub,
887		unsigned int dccg_ref_freq_inKhz,
888		unsigned int *dchub_ref_freq_inKhz)
889{
890	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
891	uint32_t ref_div = 0;
892	uint32_t ref_en = 0;
893	unsigned int dc_refclk_khz = 24000;
894
895	REG_GET_2(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, &ref_div,
896			DCHUBBUB_GLOBAL_TIMER_ENABLE, &ref_en);
897
898	if (ref_en) {
899		if (ref_div == 2)
900			*dchub_ref_freq_inKhz = dc_refclk_khz / 2;
901		else
902			*dchub_ref_freq_inKhz = dc_refclk_khz;
903
904		/*
905		 * The external Reference Clock may change based on the board or
906		 * platform requirements and the programmable integer divide must
907		 * be programmed to provide a suitable DLG RefClk frequency between
908		 * a minimum of 20MHz and maximum of 50MHz
909		 */
910		if (*dchub_ref_freq_inKhz < 20000 || *dchub_ref_freq_inKhz > 50000)
911			ASSERT_CRITICAL(false);
912
913		return;
914	} else {
915		*dchub_ref_freq_inKhz = dc_refclk_khz;
916
917		// HUBBUB global timer must be enabled.
918		ASSERT_CRITICAL(false);
919		return;
920	}
921}
922
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
923static const struct hubbub_funcs hubbub31_funcs = {
924	.update_dchub = hubbub2_update_dchub,
925	.init_dchub_sys_ctx = hubbub31_init_dchub_sys_ctx,
926	.init_vm_ctx = hubbub2_init_vm_ctx,
927	.dcc_support_swizzle = hubbub3_dcc_support_swizzle,
928	.dcc_support_pixel_format = hubbub2_dcc_support_pixel_format,
929	.get_dcc_compression_cap = hubbub31_get_dcc_compression_cap,
930	.wm_read_state = hubbub21_wm_read_state,
931	.get_dchub_ref_freq = hubbub31_get_dchub_ref_freq,
932	.program_watermarks = hubbub31_program_watermarks,
933	.allow_self_refresh_control = hubbub1_allow_self_refresh_control,
934	.is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,
 
935	.program_det_size = dcn31_program_det_size,
 
936	.program_compbuf_size = dcn31_program_compbuf_size,
937	.init_crb = dcn31_init_crb
 
938};
939
940void hubbub31_construct(struct dcn20_hubbub *hubbub31,
941	struct dc_context *ctx,
942	const struct dcn_hubbub_registers *hubbub_regs,
943	const struct dcn_hubbub_shift *hubbub_shift,
944	const struct dcn_hubbub_mask *hubbub_mask,
945	int det_size_kb,
946	int pixel_chunk_size_kb,
947	int config_return_buffer_size_kb)
948{
949
950	hubbub3_construct(hubbub31, ctx, hubbub_regs, hubbub_shift, hubbub_mask);
951	hubbub31->base.funcs = &hubbub31_funcs;
952	hubbub31->detile_buf_size = det_size_kb * 1024;
953	hubbub31->pixel_chunk_size = pixel_chunk_size_kb * 1024;
954	hubbub31->crb_size_segs = config_return_buffer_size_kb / DCN31_CRB_SEGMENT_SIZE_KB;
 
 
955}
956