Loading...
1/*
2* Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25#include <linux/delay.h>
26#include "dm_services.h"
27#include "dcn20/dcn20_hubbub.h"
28#include "dcn21_hubbub.h"
29#include "reg_helper.h"
30
31#define REG(reg)\
32 hubbub1->regs->reg
33#define DC_LOGGER \
34 hubbub1->base.ctx->logger
35#define CTX \
36 hubbub1->base.ctx
37
38#undef FN
39#define FN(reg_name, field_name) \
40 hubbub1->shifts->field_name, hubbub1->masks->field_name
41
42#define REG(reg)\
43 hubbub1->regs->reg
44
45#define CTX \
46 hubbub1->base.ctx
47
48#undef FN
49#define FN(reg_name, field_name) \
50 hubbub1->shifts->field_name, hubbub1->masks->field_name
51
52static uint32_t convert_and_clamp(
53 uint32_t wm_ns,
54 uint32_t refclk_mhz,
55 uint32_t clamp_value)
56{
57 uint32_t ret_val = 0;
58 ret_val = wm_ns * refclk_mhz;
59 ret_val /= 1000;
60
61 if (ret_val > clamp_value)
62 ret_val = clamp_value;
63
64 return ret_val;
65}
66
67void dcn21_dchvm_init(struct hubbub *hubbub)
68{
69 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
70 uint32_t riommu_active;
71 int i;
72
73 //Init DCHVM block
74 REG_UPDATE(DCHVM_CTRL0, HOSTVM_INIT_REQ, 1);
75
76 //Poll until RIOMMU_ACTIVE = 1
77 for (i = 0; i < 100; i++) {
78 REG_GET(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, &riommu_active);
79
80 if (riommu_active)
81 break;
82 else
83 udelay(5);
84 }
85
86 if (riommu_active) {
87 //Reflect the power status of DCHUBBUB
88 REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, 1);
89
90 //Start rIOMMU prefetching
91 REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, 1);
92
93 // Enable dynamic clock gating
94 REG_UPDATE_4(DCHVM_CLK_CTRL,
95 HVM_DISPCLK_R_GATE_DIS, 0,
96 HVM_DISPCLK_G_GATE_DIS, 0,
97 HVM_DCFCLK_R_GATE_DIS, 0,
98 HVM_DCFCLK_G_GATE_DIS, 0);
99
100 //Poll until HOSTVM_PREFETCH_DONE = 1
101 REG_WAIT(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, 1, 5, 100);
102
103 hubbub->riommu_active = true;
104 }
105}
106
107int hubbub21_init_dchub(struct hubbub *hubbub,
108 struct dcn_hubbub_phys_addr_config *pa_config)
109{
110 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
111 struct dcn_vmid_page_table_config phys_config;
112
113 REG_SET(DCN_VM_FB_LOCATION_BASE, 0,
114 FB_BASE, pa_config->system_aperture.fb_base >> 24);
115 REG_SET(DCN_VM_FB_LOCATION_TOP, 0,
116 FB_TOP, pa_config->system_aperture.fb_top >> 24);
117 REG_SET(DCN_VM_FB_OFFSET, 0,
118 FB_OFFSET, pa_config->system_aperture.fb_offset >> 24);
119 REG_SET(DCN_VM_AGP_BOT, 0,
120 AGP_BOT, pa_config->system_aperture.agp_bot >> 24);
121 REG_SET(DCN_VM_AGP_TOP, 0,
122 AGP_TOP, pa_config->system_aperture.agp_top >> 24);
123 REG_SET(DCN_VM_AGP_BASE, 0,
124 AGP_BASE, pa_config->system_aperture.agp_base >> 24);
125
126 if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) {
127 phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12;
128 phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12;
129 phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr | 1; //Note: hack
130 phys_config.depth = 0;
131 phys_config.block_size = 0;
132 // Init VMID 0 based on PA config
133 dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config);
134 }
135
136 dcn21_dchvm_init(hubbub);
137
138 return hubbub1->num_vmid;
139}
140
141bool hubbub21_program_urgent_watermarks(
142 struct hubbub *hubbub,
143 struct dcn_watermark_set *watermarks,
144 unsigned int refclk_mhz,
145 bool safe_to_lower)
146{
147 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
148 uint32_t prog_wm_value;
149 bool wm_pending = false;
150
151 /* Repeat for water mark set A, B, C and D. */
152 /* clock state A */
153 if (safe_to_lower || watermarks->a.urgent_ns > hubbub1->watermarks.a.urgent_ns) {
154 hubbub1->watermarks.a.urgent_ns = watermarks->a.urgent_ns;
155 prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
156 refclk_mhz, 0x1fffff);
157 REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
158 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value,
159 DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_A, prog_wm_value);
160
161 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
162 "HW register value = 0x%x\n",
163 watermarks->a.urgent_ns, prog_wm_value);
164 } else if (watermarks->a.urgent_ns < hubbub1->watermarks.a.urgent_ns)
165 wm_pending = true;
166
167 /* determine the transfer time for a quantity of data for a particular requestor.*/
168 if (safe_to_lower || watermarks->a.frac_urg_bw_flip
169 > hubbub1->watermarks.a.frac_urg_bw_flip) {
170 hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
171
172 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, 0,
173 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, watermarks->a.frac_urg_bw_flip);
174 } else if (watermarks->a.frac_urg_bw_flip
175 < hubbub1->watermarks.a.frac_urg_bw_flip)
176 wm_pending = true;
177
178 if (safe_to_lower || watermarks->a.frac_urg_bw_nom
179 > hubbub1->watermarks.a.frac_urg_bw_nom) {
180 hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
181
182 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0,
183 DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->a.frac_urg_bw_nom);
184 } else if (watermarks->a.frac_urg_bw_nom
185 < hubbub1->watermarks.a.frac_urg_bw_nom)
186 wm_pending = true;
187
188 if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub1->watermarks.a.urgent_latency_ns) {
189 hubbub1->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns;
190 prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns,
191 refclk_mhz, 0x1fffff);
192 REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0,
193 DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value);
194 } else if (watermarks->a.urgent_latency_ns < hubbub1->watermarks.a.urgent_latency_ns)
195 wm_pending = true;
196
197 /* clock state B */
198 if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) {
199 hubbub1->watermarks.b.urgent_ns = watermarks->b.urgent_ns;
200 prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns,
201 refclk_mhz, 0x1fffff);
202 REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0,
203 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value,
204 DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_B, prog_wm_value);
205
206 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
207 "HW register value = 0x%x\n",
208 watermarks->b.urgent_ns, prog_wm_value);
209 } else if (watermarks->b.urgent_ns < hubbub1->watermarks.b.urgent_ns)
210 wm_pending = true;
211
212 /* determine the transfer time for a quantity of data for a particular requestor.*/
213 if (safe_to_lower || watermarks->a.frac_urg_bw_flip
214 > hubbub1->watermarks.a.frac_urg_bw_flip) {
215 hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
216
217 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, 0,
218 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, watermarks->a.frac_urg_bw_flip);
219 } else if (watermarks->a.frac_urg_bw_flip
220 < hubbub1->watermarks.a.frac_urg_bw_flip)
221 wm_pending = true;
222
223 if (safe_to_lower || watermarks->a.frac_urg_bw_nom
224 > hubbub1->watermarks.a.frac_urg_bw_nom) {
225 hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
226
227 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, 0,
228 DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->a.frac_urg_bw_nom);
229 } else if (watermarks->a.frac_urg_bw_nom
230 < hubbub1->watermarks.a.frac_urg_bw_nom)
231 wm_pending = true;
232
233 if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub1->watermarks.b.urgent_latency_ns) {
234 hubbub1->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns;
235 prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns,
236 refclk_mhz, 0x1fffff);
237 REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0,
238 DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value);
239 } else if (watermarks->b.urgent_latency_ns < hubbub1->watermarks.b.urgent_latency_ns)
240 wm_pending = true;
241
242 /* clock state C */
243 if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) {
244 hubbub1->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
245 prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns,
246 refclk_mhz, 0x1fffff);
247 REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0,
248 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value,
249 DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_C, prog_wm_value);
250
251 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
252 "HW register value = 0x%x\n",
253 watermarks->c.urgent_ns, prog_wm_value);
254 } else if (watermarks->c.urgent_ns < hubbub1->watermarks.c.urgent_ns)
255 wm_pending = true;
256
257 /* determine the transfer time for a quantity of data for a particular requestor.*/
258 if (safe_to_lower || watermarks->a.frac_urg_bw_flip
259 > hubbub1->watermarks.a.frac_urg_bw_flip) {
260 hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
261
262 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, 0,
263 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, watermarks->a.frac_urg_bw_flip);
264 } else if (watermarks->a.frac_urg_bw_flip
265 < hubbub1->watermarks.a.frac_urg_bw_flip)
266 wm_pending = true;
267
268 if (safe_to_lower || watermarks->a.frac_urg_bw_nom
269 > hubbub1->watermarks.a.frac_urg_bw_nom) {
270 hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
271
272 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, 0,
273 DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, watermarks->a.frac_urg_bw_nom);
274 } else if (watermarks->a.frac_urg_bw_nom
275 < hubbub1->watermarks.a.frac_urg_bw_nom)
276 wm_pending = true;
277
278 if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub1->watermarks.c.urgent_latency_ns) {
279 hubbub1->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns;
280 prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns,
281 refclk_mhz, 0x1fffff);
282 REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0,
283 DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value);
284 } else if (watermarks->c.urgent_latency_ns < hubbub1->watermarks.c.urgent_latency_ns)
285 wm_pending = true;
286
287 /* clock state D */
288 if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) {
289 hubbub1->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
290 prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns,
291 refclk_mhz, 0x1fffff);
292 REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0,
293 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value,
294 DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_D, prog_wm_value);
295
296 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
297 "HW register value = 0x%x\n",
298 watermarks->d.urgent_ns, prog_wm_value);
299 } else if (watermarks->d.urgent_ns < hubbub1->watermarks.d.urgent_ns)
300 wm_pending = true;
301
302 /* determine the transfer time for a quantity of data for a particular requestor.*/
303 if (safe_to_lower || watermarks->a.frac_urg_bw_flip
304 > hubbub1->watermarks.a.frac_urg_bw_flip) {
305 hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
306
307 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, 0,
308 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, watermarks->a.frac_urg_bw_flip);
309 } else if (watermarks->a.frac_urg_bw_flip
310 < hubbub1->watermarks.a.frac_urg_bw_flip)
311 wm_pending = true;
312
313 if (safe_to_lower || watermarks->a.frac_urg_bw_nom
314 > hubbub1->watermarks.a.frac_urg_bw_nom) {
315 hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
316
317 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, 0,
318 DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, watermarks->a.frac_urg_bw_nom);
319 } else if (watermarks->a.frac_urg_bw_nom
320 < hubbub1->watermarks.a.frac_urg_bw_nom)
321 wm_pending = true;
322
323 if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub1->watermarks.d.urgent_latency_ns) {
324 hubbub1->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns;
325 prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns,
326 refclk_mhz, 0x1fffff);
327 REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0,
328 DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value);
329 } else if (watermarks->d.urgent_latency_ns < hubbub1->watermarks.d.urgent_latency_ns)
330 wm_pending = true;
331
332 return wm_pending;
333}
334
335bool hubbub21_program_stutter_watermarks(
336 struct hubbub *hubbub,
337 struct dcn_watermark_set *watermarks,
338 unsigned int refclk_mhz,
339 bool safe_to_lower)
340{
341 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
342 uint32_t prog_wm_value;
343 bool wm_pending = false;
344
345 /* clock state A */
346 if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
347 > hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) {
348 hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
349 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
350 prog_wm_value = convert_and_clamp(
351 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
352 refclk_mhz, 0x1fffff);
353 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
354 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value,
355 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
356 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
357 "HW register value = 0x%x\n",
358 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
359 } else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
360 < hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns)
361 wm_pending = true;
362
363 if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
364 > hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) {
365 hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns =
366 watermarks->a.cstate_pstate.cstate_exit_ns;
367 prog_wm_value = convert_and_clamp(
368 watermarks->a.cstate_pstate.cstate_exit_ns,
369 refclk_mhz, 0x1fffff);
370 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
371 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value,
372 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
373 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
374 "HW register value = 0x%x\n",
375 watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
376 } else if (watermarks->a.cstate_pstate.cstate_exit_ns
377 < hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns)
378 wm_pending = true;
379
380 /* clock state B */
381 if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
382 > hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) {
383 hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
384 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
385 prog_wm_value = convert_and_clamp(
386 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
387 refclk_mhz, 0x1fffff);
388 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
389 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value,
390 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
391 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
392 "HW register value = 0x%x\n",
393 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
394 } else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
395 < hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns)
396 wm_pending = true;
397
398 if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
399 > hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) {
400 hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns =
401 watermarks->b.cstate_pstate.cstate_exit_ns;
402 prog_wm_value = convert_and_clamp(
403 watermarks->b.cstate_pstate.cstate_exit_ns,
404 refclk_mhz, 0x1fffff);
405 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
406 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value,
407 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
408 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
409 "HW register value = 0x%x\n",
410 watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
411 } else if (watermarks->b.cstate_pstate.cstate_exit_ns
412 < hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns)
413 wm_pending = true;
414
415 /* clock state C */
416 if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
417 > hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) {
418 hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
419 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
420 prog_wm_value = convert_and_clamp(
421 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
422 refclk_mhz, 0x1fffff);
423 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0,
424 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value,
425 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
426 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
427 "HW register value = 0x%x\n",
428 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
429 } else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
430 < hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns)
431 wm_pending = true;
432
433 if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
434 > hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) {
435 hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns =
436 watermarks->c.cstate_pstate.cstate_exit_ns;
437 prog_wm_value = convert_and_clamp(
438 watermarks->c.cstate_pstate.cstate_exit_ns,
439 refclk_mhz, 0x1fffff);
440 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0,
441 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value,
442 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
443 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
444 "HW register value = 0x%x\n",
445 watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
446 } else if (watermarks->c.cstate_pstate.cstate_exit_ns
447 < hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns)
448 wm_pending = true;
449
450 /* clock state D */
451 if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
452 > hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) {
453 hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
454 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
455 prog_wm_value = convert_and_clamp(
456 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
457 refclk_mhz, 0x1fffff);
458 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0,
459 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value,
460 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
461 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
462 "HW register value = 0x%x\n",
463 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
464 } else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
465 < hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns)
466 wm_pending = true;
467
468 if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
469 > hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) {
470 hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns =
471 watermarks->d.cstate_pstate.cstate_exit_ns;
472 prog_wm_value = convert_and_clamp(
473 watermarks->d.cstate_pstate.cstate_exit_ns,
474 refclk_mhz, 0x1fffff);
475 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0,
476 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value,
477 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
478 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
479 "HW register value = 0x%x\n",
480 watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
481 } else if (watermarks->d.cstate_pstate.cstate_exit_ns
482 < hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns)
483 wm_pending = true;
484
485 return wm_pending;
486}
487
488bool hubbub21_program_pstate_watermarks(
489 struct hubbub *hubbub,
490 struct dcn_watermark_set *watermarks,
491 unsigned int refclk_mhz,
492 bool safe_to_lower)
493{
494 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
495 uint32_t prog_wm_value;
496
497 bool wm_pending = false;
498
499 /* clock state A */
500 if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
501 > hubbub1->watermarks.a.cstate_pstate.pstate_change_ns) {
502 hubbub1->watermarks.a.cstate_pstate.pstate_change_ns =
503 watermarks->a.cstate_pstate.pstate_change_ns;
504 prog_wm_value = convert_and_clamp(
505 watermarks->a.cstate_pstate.pstate_change_ns,
506 refclk_mhz, 0x1fffff);
507 REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0,
508 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value,
509 DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
510 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
511 "HW register value = 0x%x\n\n",
512 watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
513 } else if (watermarks->a.cstate_pstate.pstate_change_ns
514 < hubbub1->watermarks.a.cstate_pstate.pstate_change_ns)
515 wm_pending = true;
516
517 /* clock state B */
518 if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
519 > hubbub1->watermarks.b.cstate_pstate.pstate_change_ns) {
520 hubbub1->watermarks.b.cstate_pstate.pstate_change_ns =
521 watermarks->b.cstate_pstate.pstate_change_ns;
522 prog_wm_value = convert_and_clamp(
523 watermarks->b.cstate_pstate.pstate_change_ns,
524 refclk_mhz, 0x1fffff);
525 REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0,
526 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value,
527 DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
528 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
529 "HW register value = 0x%x\n\n",
530 watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
531 } else if (watermarks->b.cstate_pstate.pstate_change_ns
532 < hubbub1->watermarks.b.cstate_pstate.pstate_change_ns)
533 wm_pending = false;
534
535 /* clock state C */
536 if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
537 > hubbub1->watermarks.c.cstate_pstate.pstate_change_ns) {
538 hubbub1->watermarks.c.cstate_pstate.pstate_change_ns =
539 watermarks->c.cstate_pstate.pstate_change_ns;
540 prog_wm_value = convert_and_clamp(
541 watermarks->c.cstate_pstate.pstate_change_ns,
542 refclk_mhz, 0x1fffff);
543 REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0,
544 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value,
545 DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
546 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
547 "HW register value = 0x%x\n\n",
548 watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
549 } else if (watermarks->c.cstate_pstate.pstate_change_ns
550 < hubbub1->watermarks.c.cstate_pstate.pstate_change_ns)
551 wm_pending = true;
552
553 /* clock state D */
554 if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns
555 > hubbub1->watermarks.d.cstate_pstate.pstate_change_ns) {
556 hubbub1->watermarks.d.cstate_pstate.pstate_change_ns =
557 watermarks->d.cstate_pstate.pstate_change_ns;
558 prog_wm_value = convert_and_clamp(
559 watermarks->d.cstate_pstate.pstate_change_ns,
560 refclk_mhz, 0x1fffff);
561 REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0,
562 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value,
563 DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
564 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
565 "HW register value = 0x%x\n\n",
566 watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
567 } else if (watermarks->d.cstate_pstate.pstate_change_ns
568 < hubbub1->watermarks.d.cstate_pstate.pstate_change_ns)
569 wm_pending = true;
570
571 return wm_pending;
572}
573
574bool hubbub21_program_watermarks(
575 struct hubbub *hubbub,
576 struct dcn_watermark_set *watermarks,
577 unsigned int refclk_mhz,
578 bool safe_to_lower)
579{
580 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
581 bool wm_pending = false;
582
583 if (hubbub21_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
584 wm_pending = true;
585
586 if (hubbub21_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
587 wm_pending = true;
588
589 if (hubbub21_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
590 wm_pending = true;
591
592 /*
593 * The DCHub arbiter has a mechanism to dynamically rate limit the DCHub request stream to the fabric.
594 * If the memory controller is fully utilized and the DCHub requestors are
595 * well ahead of their amortized schedule, then it is safe to prevent the next winner
596 * from being committed and sent to the fabric.
597 * The utilization of the memory controller is approximated by ensuring that
598 * the number of outstanding requests is greater than a threshold specified
599 * by the ARB_MIN_REQ_OUTSTANDING. To determine that the DCHub requestors are well ahead of the amortized schedule,
600 * the slack of the next winner is compared with the ARB_SAT_LEVEL in DLG RefClk cycles.
601 *
602 * TODO: Revisit request limit after figure out right number. request limit for Renoir isn't decided yet, set maximum value (0x1FF)
603 * to turn off it for now.
604 */
605 REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0,
606 DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
607 REG_UPDATE_2(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
608 DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF,
609 DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD, 0xA);
610 REG_UPDATE(DCHUBBUB_ARB_HOSTVM_CNTL,
611 DCHUBBUB_ARB_MAX_QOS_COMMIT_THRESHOLD, 0xF);
612
613 hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
614
615 return wm_pending;
616}
617
618void hubbub21_wm_read_state(struct hubbub *hubbub,
619 struct dcn_hubbub_wm *wm)
620{
621 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
622 struct dcn_hubbub_wm_set *s;
623
624 memset(wm, 0, sizeof(struct dcn_hubbub_wm));
625
626 s = &wm->sets[0];
627 s->wm_set = 0;
628 REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A,
629 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, &s->data_urgent);
630
631 REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A,
632 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, &s->sr_enter);
633
634 REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A,
635 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, &s->sr_exit);
636
637 REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A,
638 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, &s->dram_clk_change);
639
640 s = &wm->sets[1];
641 s->wm_set = 1;
642 REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B,
643 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, &s->data_urgent);
644
645 REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B,
646 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, &s->sr_enter);
647
648 REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B,
649 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, &s->sr_exit);
650
651 REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B,
652 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, &s->dram_clk_change);
653
654 s = &wm->sets[2];
655 s->wm_set = 2;
656 REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C,
657 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, &s->data_urgent);
658
659 REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C,
660 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, &s->sr_enter);
661
662 REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C,
663 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, &s->sr_exit);
664
665 REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C,
666 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, &s->dram_clk_change);
667
668 s = &wm->sets[3];
669 s->wm_set = 3;
670 REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D,
671 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, &s->data_urgent);
672
673 REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D,
674 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, &s->sr_enter);
675
676 REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D,
677 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, &s->sr_exit);
678
679 REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D,
680 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, &s->dram_clk_change);
681}
682
683static void hubbub21_apply_DEDCN21_147_wa(struct hubbub *hubbub)
684{
685 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
686 uint32_t prog_wm_value;
687
688 prog_wm_value = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A);
689 REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
690}
691
692static const struct hubbub_funcs hubbub21_funcs = {
693 .update_dchub = hubbub2_update_dchub,
694 .init_dchub_sys_ctx = hubbub21_init_dchub,
695 .init_vm_ctx = hubbub2_init_vm_ctx,
696 .dcc_support_swizzle = hubbub2_dcc_support_swizzle,
697 .dcc_support_pixel_format = hubbub2_dcc_support_pixel_format,
698 .get_dcc_compression_cap = hubbub2_get_dcc_compression_cap,
699 .wm_read_state = hubbub21_wm_read_state,
700 .get_dchub_ref_freq = hubbub2_get_dchub_ref_freq,
701 .program_watermarks = hubbub21_program_watermarks,
702 .allow_self_refresh_control = hubbub1_allow_self_refresh_control,
703 .apply_DEDCN21_147_wa = hubbub21_apply_DEDCN21_147_wa,
704 .hubbub_read_state = hubbub2_read_state,
705};
706
707void hubbub21_construct(struct dcn20_hubbub *hubbub,
708 struct dc_context *ctx,
709 const struct dcn_hubbub_registers *hubbub_regs,
710 const struct dcn_hubbub_shift *hubbub_shift,
711 const struct dcn_hubbub_mask *hubbub_mask)
712{
713 hubbub->base.ctx = ctx;
714
715 hubbub->base.funcs = &hubbub21_funcs;
716
717 hubbub->regs = hubbub_regs;
718 hubbub->shifts = hubbub_shift;
719 hubbub->masks = hubbub_mask;
720
721 hubbub->debug_test_index_pstate = 0xB;
722 hubbub->detile_buf_size = 164 * 1024; /* 164KB for DCN2.0 */
723}
1/*
2* Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25#include "dm_services.h"
26#include "dcn20/dcn20_hubbub.h"
27#include "dcn21_hubbub.h"
28#include "reg_helper.h"
29
30#define REG(reg)\
31 hubbub1->regs->reg
32#define DC_LOGGER \
33 hubbub1->base.ctx->logger
34#define CTX \
35 hubbub1->base.ctx
36
37#undef FN
38#define FN(reg_name, field_name) \
39 hubbub1->shifts->field_name, hubbub1->masks->field_name
40
41#define REG(reg)\
42 hubbub1->regs->reg
43
44#define CTX \
45 hubbub1->base.ctx
46
47#undef FN
48#define FN(reg_name, field_name) \
49 hubbub1->shifts->field_name, hubbub1->masks->field_name
50
51#ifdef NUM_VMID
52#undef NUM_VMID
53#endif
54#define NUM_VMID 1
55
56static uint32_t convert_and_clamp(
57 uint32_t wm_ns,
58 uint32_t refclk_mhz,
59 uint32_t clamp_value)
60{
61 uint32_t ret_val = 0;
62 ret_val = wm_ns * refclk_mhz;
63 ret_val /= 1000;
64
65 if (ret_val > clamp_value)
66 ret_val = clamp_value;
67
68 return ret_val;
69}
70
71void dcn21_dchvm_init(struct hubbub *hubbub)
72{
73 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
74
75 //Init DCHVM block
76 REG_UPDATE(DCHVM_CTRL0, HOSTVM_INIT_REQ, 1);
77
78 //Poll until RIOMMU_ACTIVE = 1
79 //TODO: Figure out interval us and retry count
80 REG_WAIT(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, 1, 5, 100);
81
82 //Reflect the power status of DCHUBBUB
83 REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, 1);
84
85 //Start rIOMMU prefetching
86 REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, 1);
87
88 // Enable dynamic clock gating
89 REG_UPDATE_4(DCHVM_CLK_CTRL,
90 HVM_DISPCLK_R_GATE_DIS, 0,
91 HVM_DISPCLK_G_GATE_DIS, 0,
92 HVM_DCFCLK_R_GATE_DIS, 0,
93 HVM_DCFCLK_G_GATE_DIS, 0);
94
95 //Poll until HOSTVM_PREFETCH_DONE = 1
96 //TODO: Figure out interval us and retry count
97 REG_WAIT(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, 1, 5, 100);
98}
99
100static int hubbub21_init_dchub(struct hubbub *hubbub,
101 struct dcn_hubbub_phys_addr_config *pa_config)
102{
103 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
104
105 REG_SET(DCN_VM_FB_LOCATION_BASE, 0,
106 FB_BASE, pa_config->system_aperture.fb_base);
107 REG_SET(DCN_VM_FB_LOCATION_TOP, 0,
108 FB_TOP, pa_config->system_aperture.fb_top);
109 REG_SET(DCN_VM_FB_OFFSET, 0,
110 FB_OFFSET, pa_config->system_aperture.fb_offset);
111 REG_SET(DCN_VM_AGP_BOT, 0,
112 AGP_BOT, pa_config->system_aperture.agp_bot);
113 REG_SET(DCN_VM_AGP_TOP, 0,
114 AGP_TOP, pa_config->system_aperture.agp_top);
115 REG_SET(DCN_VM_AGP_BASE, 0,
116 AGP_BASE, pa_config->system_aperture.agp_base);
117
118 dcn21_dchvm_init(hubbub);
119
120 return NUM_VMID;
121}
122
123static void hubbub21_program_urgent_watermarks(
124 struct hubbub *hubbub,
125 struct dcn_watermark_set *watermarks,
126 unsigned int refclk_mhz,
127 bool safe_to_lower)
128{
129 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
130 uint32_t prog_wm_value;
131
132 /* Repeat for water mark set A, B, C and D. */
133 /* clock state A */
134 if (safe_to_lower || watermarks->a.urgent_ns > hubbub1->watermarks.a.urgent_ns) {
135 hubbub1->watermarks.a.urgent_ns = watermarks->a.urgent_ns;
136 prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
137 refclk_mhz, 0x1fffff);
138 REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
139 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value,
140 DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_A, prog_wm_value);
141
142 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
143 "HW register value = 0x%x\n",
144 watermarks->a.urgent_ns, prog_wm_value);
145 }
146
147 /* determine the transfer time for a quantity of data for a particular requestor.*/
148 if (safe_to_lower || watermarks->a.frac_urg_bw_flip
149 > hubbub1->watermarks.a.frac_urg_bw_flip) {
150 hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
151
152 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, 0,
153 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, watermarks->a.frac_urg_bw_flip);
154 }
155
156 if (safe_to_lower || watermarks->a.frac_urg_bw_nom
157 > hubbub1->watermarks.a.frac_urg_bw_nom) {
158 hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
159
160 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0,
161 DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->a.frac_urg_bw_nom);
162 }
163
164 /* clock state B */
165 if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) {
166 hubbub1->watermarks.b.urgent_ns = watermarks->b.urgent_ns;
167 prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns,
168 refclk_mhz, 0x1fffff);
169 REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0,
170 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value,
171 DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_B, prog_wm_value);
172
173 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
174 "HW register value = 0x%x\n",
175 watermarks->b.urgent_ns, prog_wm_value);
176 }
177
178 /* determine the transfer time for a quantity of data for a particular requestor.*/
179 if (safe_to_lower || watermarks->a.frac_urg_bw_flip
180 > hubbub1->watermarks.a.frac_urg_bw_flip) {
181 hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
182
183 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, 0,
184 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, watermarks->a.frac_urg_bw_flip);
185 }
186
187 if (safe_to_lower || watermarks->a.frac_urg_bw_nom
188 > hubbub1->watermarks.a.frac_urg_bw_nom) {
189 hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
190
191 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, 0,
192 DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->a.frac_urg_bw_nom);
193 }
194
195 /* clock state C */
196 if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) {
197 hubbub1->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
198 prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns,
199 refclk_mhz, 0x1fffff);
200 REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0,
201 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value,
202 DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_C, prog_wm_value);
203
204 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
205 "HW register value = 0x%x\n",
206 watermarks->c.urgent_ns, prog_wm_value);
207 }
208
209 /* determine the transfer time for a quantity of data for a particular requestor.*/
210 if (safe_to_lower || watermarks->a.frac_urg_bw_flip
211 > hubbub1->watermarks.a.frac_urg_bw_flip) {
212 hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
213
214 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, 0,
215 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, watermarks->a.frac_urg_bw_flip);
216 }
217
218 if (safe_to_lower || watermarks->a.frac_urg_bw_nom
219 > hubbub1->watermarks.a.frac_urg_bw_nom) {
220 hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
221
222 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, 0,
223 DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, watermarks->a.frac_urg_bw_nom);
224 }
225
226 /* clock state D */
227 if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) {
228 hubbub1->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
229 prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns,
230 refclk_mhz, 0x1fffff);
231 REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0,
232 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value,
233 DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_D, prog_wm_value);
234
235 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
236 "HW register value = 0x%x\n",
237 watermarks->d.urgent_ns, prog_wm_value);
238 }
239
240 /* determine the transfer time for a quantity of data for a particular requestor.*/
241 if (safe_to_lower || watermarks->a.frac_urg_bw_flip
242 > hubbub1->watermarks.a.frac_urg_bw_flip) {
243 hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
244
245 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, 0,
246 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, watermarks->a.frac_urg_bw_flip);
247 }
248
249 if (safe_to_lower || watermarks->a.frac_urg_bw_nom
250 > hubbub1->watermarks.a.frac_urg_bw_nom) {
251 hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
252
253 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, 0,
254 DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, watermarks->a.frac_urg_bw_nom);
255 }
256}
257
258static void hubbub21_program_stutter_watermarks(
259 struct hubbub *hubbub,
260 struct dcn_watermark_set *watermarks,
261 unsigned int refclk_mhz,
262 bool safe_to_lower)
263{
264 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
265 uint32_t prog_wm_value;
266
267 /* clock state A */
268 if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
269 > hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) {
270 hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
271 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
272 prog_wm_value = convert_and_clamp(
273 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
274 refclk_mhz, 0x1fffff);
275 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
276 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value,
277 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
278 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
279 "HW register value = 0x%x\n",
280 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
281 }
282
283 if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
284 > hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) {
285 hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns =
286 watermarks->a.cstate_pstate.cstate_exit_ns;
287 prog_wm_value = convert_and_clamp(
288 watermarks->a.cstate_pstate.cstate_exit_ns,
289 refclk_mhz, 0x1fffff);
290 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
291 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value,
292 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
293 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
294 "HW register value = 0x%x\n",
295 watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
296 }
297
298 /* clock state B */
299 if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
300 > hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) {
301 hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
302 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
303 prog_wm_value = convert_and_clamp(
304 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
305 refclk_mhz, 0x1fffff);
306 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
307 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value,
308 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
309 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
310 "HW register value = 0x%x\n",
311 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
312 }
313
314 if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
315 > hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) {
316 hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns =
317 watermarks->b.cstate_pstate.cstate_exit_ns;
318 prog_wm_value = convert_and_clamp(
319 watermarks->b.cstate_pstate.cstate_exit_ns,
320 refclk_mhz, 0x1fffff);
321 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
322 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value,
323 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
324 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
325 "HW register value = 0x%x\n",
326 watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
327 }
328
329 /* clock state C */
330 if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
331 > hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) {
332 hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
333 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
334 prog_wm_value = convert_and_clamp(
335 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
336 refclk_mhz, 0x1fffff);
337 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0,
338 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value,
339 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
340 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
341 "HW register value = 0x%x\n",
342 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
343 }
344
345 if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
346 > hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) {
347 hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns =
348 watermarks->c.cstate_pstate.cstate_exit_ns;
349 prog_wm_value = convert_and_clamp(
350 watermarks->c.cstate_pstate.cstate_exit_ns,
351 refclk_mhz, 0x1fffff);
352 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0,
353 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value,
354 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
355 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
356 "HW register value = 0x%x\n",
357 watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
358 }
359
360 /* clock state D */
361 if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
362 > hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) {
363 hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
364 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
365 prog_wm_value = convert_and_clamp(
366 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
367 refclk_mhz, 0x1fffff);
368 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0,
369 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value,
370 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
371 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
372 "HW register value = 0x%x\n",
373 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
374 }
375
376 if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
377 > hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) {
378 hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns =
379 watermarks->d.cstate_pstate.cstate_exit_ns;
380 prog_wm_value = convert_and_clamp(
381 watermarks->d.cstate_pstate.cstate_exit_ns,
382 refclk_mhz, 0x1fffff);
383 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0,
384 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value,
385 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
386 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
387 "HW register value = 0x%x\n",
388 watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
389 }
390}
391
392static void hubbub21_program_pstate_watermarks(
393 struct hubbub *hubbub,
394 struct dcn_watermark_set *watermarks,
395 unsigned int refclk_mhz,
396 bool safe_to_lower)
397{
398 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
399 uint32_t prog_wm_value;
400
401 /* clock state A */
402 if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
403 > hubbub1->watermarks.a.cstate_pstate.pstate_change_ns) {
404 hubbub1->watermarks.a.cstate_pstate.pstate_change_ns =
405 watermarks->a.cstate_pstate.pstate_change_ns;
406 prog_wm_value = convert_and_clamp(
407 watermarks->a.cstate_pstate.pstate_change_ns,
408 refclk_mhz, 0x1fffff);
409 REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0,
410 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value,
411 DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
412 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
413 "HW register value = 0x%x\n\n",
414 watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
415 }
416
417 /* clock state B */
418 if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
419 > hubbub1->watermarks.b.cstate_pstate.pstate_change_ns) {
420 hubbub1->watermarks.b.cstate_pstate.pstate_change_ns =
421 watermarks->b.cstate_pstate.pstate_change_ns;
422 prog_wm_value = convert_and_clamp(
423 watermarks->b.cstate_pstate.pstate_change_ns,
424 refclk_mhz, 0x1fffff);
425 REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0,
426 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value,
427 DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
428 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
429 "HW register value = 0x%x\n\n",
430 watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
431 }
432
433 /* clock state C */
434 if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
435 > hubbub1->watermarks.c.cstate_pstate.pstate_change_ns) {
436 hubbub1->watermarks.c.cstate_pstate.pstate_change_ns =
437 watermarks->c.cstate_pstate.pstate_change_ns;
438 prog_wm_value = convert_and_clamp(
439 watermarks->c.cstate_pstate.pstate_change_ns,
440 refclk_mhz, 0x1fffff);
441 REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0,
442 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value,
443 DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
444 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
445 "HW register value = 0x%x\n\n",
446 watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
447 }
448
449 /* clock state D */
450 if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns
451 > hubbub1->watermarks.d.cstate_pstate.pstate_change_ns) {
452 hubbub1->watermarks.d.cstate_pstate.pstate_change_ns =
453 watermarks->d.cstate_pstate.pstate_change_ns;
454 prog_wm_value = convert_and_clamp(
455 watermarks->d.cstate_pstate.pstate_change_ns,
456 refclk_mhz, 0x1fffff);
457 REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0,
458 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value,
459 DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
460 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
461 "HW register value = 0x%x\n\n",
462 watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
463 }
464}
465
466void hubbub21_program_watermarks(
467 struct hubbub *hubbub,
468 struct dcn_watermark_set *watermarks,
469 unsigned int refclk_mhz,
470 bool safe_to_lower)
471{
472 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
473
474 hubbub21_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
475 hubbub21_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
476 hubbub21_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
477
478 /*
479 * The DCHub arbiter has a mechanism to dynamically rate limit the DCHub request stream to the fabric.
480 * If the memory controller is fully utilized and the DCHub requestors are
481 * well ahead of their amortized schedule, then it is safe to prevent the next winner
482 * from being committed and sent to the fabric.
483 * The utilization of the memory controller is approximated by ensuring that
484 * the number of outstanding requests is greater than a threshold specified
485 * by the ARB_MIN_REQ_OUTSTANDING. To determine that the DCHub requestors are well ahead of the amortized schedule,
486 * the slack of the next winner is compared with the ARB_SAT_LEVEL in DLG RefClk cycles.
487 *
488 * TODO: Revisit request limit after figure out right number. request limit for Renoir isn't decided yet, set maximum value (0x1FF)
489 * to turn off it for now.
490 */
491 REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0,
492 DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
493 REG_UPDATE_2(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
494 DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF,
495 DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD, 0xA);
496 REG_UPDATE(DCHUBBUB_ARB_HOSTVM_CNTL,
497 DCHUBBUB_ARB_MAX_QOS_COMMIT_THRESHOLD, 0xF);
498
499 hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
500}
501
502void hubbub21_wm_read_state(struct hubbub *hubbub,
503 struct dcn_hubbub_wm *wm)
504{
505 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
506 struct dcn_hubbub_wm_set *s;
507
508 memset(wm, 0, sizeof(struct dcn_hubbub_wm));
509
510 s = &wm->sets[0];
511 s->wm_set = 0;
512 REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A,
513 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, &s->data_urgent);
514
515 REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A,
516 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, &s->sr_enter);
517
518 REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A,
519 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, &s->sr_exit);
520
521 REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A,
522 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, &s->dram_clk_chanage);
523
524 s = &wm->sets[1];
525 s->wm_set = 1;
526 REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B,
527 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, &s->data_urgent);
528
529 REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B,
530 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, &s->sr_enter);
531
532 REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B,
533 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, &s->sr_exit);
534
535 REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B,
536 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, &s->dram_clk_chanage);
537
538 s = &wm->sets[2];
539 s->wm_set = 2;
540 REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C,
541 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, &s->data_urgent);
542
543 REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C,
544 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, &s->sr_enter);
545
546 REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C,
547 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, &s->sr_exit);
548
549 REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C,
550 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, &s->dram_clk_chanage);
551
552 s = &wm->sets[3];
553 s->wm_set = 3;
554 REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D,
555 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, &s->data_urgent);
556
557 REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D,
558 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, &s->sr_enter);
559
560 REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D,
561 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, &s->sr_exit);
562
563 REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D,
564 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, &s->dram_clk_chanage);
565}
566
567
568static const struct hubbub_funcs hubbub21_funcs = {
569 .update_dchub = hubbub2_update_dchub,
570 .init_dchub_sys_ctx = hubbub21_init_dchub,
571 .init_vm_ctx = NULL,
572 .dcc_support_swizzle = hubbub2_dcc_support_swizzle,
573 .dcc_support_pixel_format = hubbub2_dcc_support_pixel_format,
574 .get_dcc_compression_cap = hubbub2_get_dcc_compression_cap,
575 .wm_read_state = hubbub21_wm_read_state,
576 .get_dchub_ref_freq = hubbub2_get_dchub_ref_freq,
577 .program_watermarks = hubbub21_program_watermarks,
578};
579
580void hubbub21_construct(struct dcn20_hubbub *hubbub,
581 struct dc_context *ctx,
582 const struct dcn_hubbub_registers *hubbub_regs,
583 const struct dcn_hubbub_shift *hubbub_shift,
584 const struct dcn_hubbub_mask *hubbub_mask)
585{
586 hubbub->base.ctx = ctx;
587
588 hubbub->base.funcs = &hubbub21_funcs;
589
590 hubbub->regs = hubbub_regs;
591 hubbub->shifts = hubbub_shift;
592 hubbub->masks = hubbub_mask;
593
594 hubbub->debug_test_index_pstate = 0xB;
595}