Loading...
1/*
2* Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25#include <linux/delay.h>
26#include "dm_services.h"
27#include "dcn20/dcn20_hubbub.h"
28#include "dcn21_hubbub.h"
29#include "reg_helper.h"
30
31#define REG(reg)\
32 hubbub1->regs->reg
33#define DC_LOGGER \
34 hubbub1->base.ctx->logger
35#define CTX \
36 hubbub1->base.ctx
37
38#undef FN
39#define FN(reg_name, field_name) \
40 hubbub1->shifts->field_name, hubbub1->masks->field_name
41
42#define REG(reg)\
43 hubbub1->regs->reg
44
45#define CTX \
46 hubbub1->base.ctx
47
48#undef FN
49#define FN(reg_name, field_name) \
50 hubbub1->shifts->field_name, hubbub1->masks->field_name
51
52static uint32_t convert_and_clamp(
53 uint32_t wm_ns,
54 uint32_t refclk_mhz,
55 uint32_t clamp_value)
56{
57 uint32_t ret_val = 0;
58 ret_val = wm_ns * refclk_mhz;
59 ret_val /= 1000;
60
61 if (ret_val > clamp_value)
62 ret_val = clamp_value;
63
64 return ret_val;
65}
66
67void dcn21_dchvm_init(struct hubbub *hubbub)
68{
69 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
70 uint32_t riommu_active;
71 int i;
72
73 //Init DCHVM block
74 REG_UPDATE(DCHVM_CTRL0, HOSTVM_INIT_REQ, 1);
75
76 //Poll until RIOMMU_ACTIVE = 1
77 for (i = 0; i < 100; i++) {
78 REG_GET(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, &riommu_active);
79
80 if (riommu_active)
81 break;
82 else
83 udelay(5);
84 }
85
86 if (riommu_active) {
87 //Reflect the power status of DCHUBBUB
88 REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, 1);
89
90 //Start rIOMMU prefetching
91 REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, 1);
92
93 // Enable dynamic clock gating
94 REG_UPDATE_4(DCHVM_CLK_CTRL,
95 HVM_DISPCLK_R_GATE_DIS, 0,
96 HVM_DISPCLK_G_GATE_DIS, 0,
97 HVM_DCFCLK_R_GATE_DIS, 0,
98 HVM_DCFCLK_G_GATE_DIS, 0);
99
100 //Poll until HOSTVM_PREFETCH_DONE = 1
101 REG_WAIT(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, 1, 5, 100);
102
103 hubbub->riommu_active = true;
104 }
105}
106
107int hubbub21_init_dchub(struct hubbub *hubbub,
108 struct dcn_hubbub_phys_addr_config *pa_config)
109{
110 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
111 struct dcn_vmid_page_table_config phys_config;
112
113 REG_SET(DCN_VM_FB_LOCATION_BASE, 0,
114 FB_BASE, pa_config->system_aperture.fb_base >> 24);
115 REG_SET(DCN_VM_FB_LOCATION_TOP, 0,
116 FB_TOP, pa_config->system_aperture.fb_top >> 24);
117 REG_SET(DCN_VM_FB_OFFSET, 0,
118 FB_OFFSET, pa_config->system_aperture.fb_offset >> 24);
119 REG_SET(DCN_VM_AGP_BOT, 0,
120 AGP_BOT, pa_config->system_aperture.agp_bot >> 24);
121 REG_SET(DCN_VM_AGP_TOP, 0,
122 AGP_TOP, pa_config->system_aperture.agp_top >> 24);
123 REG_SET(DCN_VM_AGP_BASE, 0,
124 AGP_BASE, pa_config->system_aperture.agp_base >> 24);
125
126 if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) {
127 phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12;
128 phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12;
129 phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr | 1; //Note: hack
130 phys_config.depth = 0;
131 phys_config.block_size = 0;
132 // Init VMID 0 based on PA config
133 dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config);
134 }
135
136 dcn21_dchvm_init(hubbub);
137
138 return hubbub1->num_vmid;
139}
140
141bool hubbub21_program_urgent_watermarks(
142 struct hubbub *hubbub,
143 struct dcn_watermark_set *watermarks,
144 unsigned int refclk_mhz,
145 bool safe_to_lower)
146{
147 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
148 uint32_t prog_wm_value;
149 bool wm_pending = false;
150
151 /* Repeat for water mark set A, B, C and D. */
152 /* clock state A */
153 if (safe_to_lower || watermarks->a.urgent_ns > hubbub1->watermarks.a.urgent_ns) {
154 hubbub1->watermarks.a.urgent_ns = watermarks->a.urgent_ns;
155 prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
156 refclk_mhz, 0x1fffff);
157 REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
158 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value,
159 DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_A, prog_wm_value);
160
161 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
162 "HW register value = 0x%x\n",
163 watermarks->a.urgent_ns, prog_wm_value);
164 } else if (watermarks->a.urgent_ns < hubbub1->watermarks.a.urgent_ns)
165 wm_pending = true;
166
167 /* determine the transfer time for a quantity of data for a particular requestor.*/
168 if (safe_to_lower || watermarks->a.frac_urg_bw_flip
169 > hubbub1->watermarks.a.frac_urg_bw_flip) {
170 hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
171
172 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, 0,
173 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, watermarks->a.frac_urg_bw_flip);
174 } else if (watermarks->a.frac_urg_bw_flip
175 < hubbub1->watermarks.a.frac_urg_bw_flip)
176 wm_pending = true;
177
178 if (safe_to_lower || watermarks->a.frac_urg_bw_nom
179 > hubbub1->watermarks.a.frac_urg_bw_nom) {
180 hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
181
182 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0,
183 DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->a.frac_urg_bw_nom);
184 } else if (watermarks->a.frac_urg_bw_nom
185 < hubbub1->watermarks.a.frac_urg_bw_nom)
186 wm_pending = true;
187
188 if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub1->watermarks.a.urgent_latency_ns) {
189 hubbub1->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns;
190 prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns,
191 refclk_mhz, 0x1fffff);
192 REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0,
193 DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value);
194 } else if (watermarks->a.urgent_latency_ns < hubbub1->watermarks.a.urgent_latency_ns)
195 wm_pending = true;
196
197 /* clock state B */
198 if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) {
199 hubbub1->watermarks.b.urgent_ns = watermarks->b.urgent_ns;
200 prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns,
201 refclk_mhz, 0x1fffff);
202 REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0,
203 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value,
204 DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_B, prog_wm_value);
205
206 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
207 "HW register value = 0x%x\n",
208 watermarks->b.urgent_ns, prog_wm_value);
209 } else if (watermarks->b.urgent_ns < hubbub1->watermarks.b.urgent_ns)
210 wm_pending = true;
211
212 /* determine the transfer time for a quantity of data for a particular requestor.*/
213 if (safe_to_lower || watermarks->a.frac_urg_bw_flip
214 > hubbub1->watermarks.a.frac_urg_bw_flip) {
215 hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
216
217 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, 0,
218 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, watermarks->a.frac_urg_bw_flip);
219 } else if (watermarks->a.frac_urg_bw_flip
220 < hubbub1->watermarks.a.frac_urg_bw_flip)
221 wm_pending = true;
222
223 if (safe_to_lower || watermarks->a.frac_urg_bw_nom
224 > hubbub1->watermarks.a.frac_urg_bw_nom) {
225 hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
226
227 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, 0,
228 DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->a.frac_urg_bw_nom);
229 } else if (watermarks->a.frac_urg_bw_nom
230 < hubbub1->watermarks.a.frac_urg_bw_nom)
231 wm_pending = true;
232
233 if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub1->watermarks.b.urgent_latency_ns) {
234 hubbub1->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns;
235 prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns,
236 refclk_mhz, 0x1fffff);
237 REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0,
238 DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value);
239 } else if (watermarks->b.urgent_latency_ns < hubbub1->watermarks.b.urgent_latency_ns)
240 wm_pending = true;
241
242 /* clock state C */
243 if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) {
244 hubbub1->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
245 prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns,
246 refclk_mhz, 0x1fffff);
247 REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0,
248 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value,
249 DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_C, prog_wm_value);
250
251 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
252 "HW register value = 0x%x\n",
253 watermarks->c.urgent_ns, prog_wm_value);
254 } else if (watermarks->c.urgent_ns < hubbub1->watermarks.c.urgent_ns)
255 wm_pending = true;
256
257 /* determine the transfer time for a quantity of data for a particular requestor.*/
258 if (safe_to_lower || watermarks->a.frac_urg_bw_flip
259 > hubbub1->watermarks.a.frac_urg_bw_flip) {
260 hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
261
262 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, 0,
263 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, watermarks->a.frac_urg_bw_flip);
264 } else if (watermarks->a.frac_urg_bw_flip
265 < hubbub1->watermarks.a.frac_urg_bw_flip)
266 wm_pending = true;
267
268 if (safe_to_lower || watermarks->a.frac_urg_bw_nom
269 > hubbub1->watermarks.a.frac_urg_bw_nom) {
270 hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
271
272 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, 0,
273 DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, watermarks->a.frac_urg_bw_nom);
274 } else if (watermarks->a.frac_urg_bw_nom
275 < hubbub1->watermarks.a.frac_urg_bw_nom)
276 wm_pending = true;
277
278 if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub1->watermarks.c.urgent_latency_ns) {
279 hubbub1->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns;
280 prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns,
281 refclk_mhz, 0x1fffff);
282 REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0,
283 DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value);
284 } else if (watermarks->c.urgent_latency_ns < hubbub1->watermarks.c.urgent_latency_ns)
285 wm_pending = true;
286
287 /* clock state D */
288 if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) {
289 hubbub1->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
290 prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns,
291 refclk_mhz, 0x1fffff);
292 REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0,
293 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value,
294 DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_D, prog_wm_value);
295
296 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
297 "HW register value = 0x%x\n",
298 watermarks->d.urgent_ns, prog_wm_value);
299 } else if (watermarks->d.urgent_ns < hubbub1->watermarks.d.urgent_ns)
300 wm_pending = true;
301
302 /* determine the transfer time for a quantity of data for a particular requestor.*/
303 if (safe_to_lower || watermarks->a.frac_urg_bw_flip
304 > hubbub1->watermarks.a.frac_urg_bw_flip) {
305 hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
306
307 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, 0,
308 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, watermarks->a.frac_urg_bw_flip);
309 } else if (watermarks->a.frac_urg_bw_flip
310 < hubbub1->watermarks.a.frac_urg_bw_flip)
311 wm_pending = true;
312
313 if (safe_to_lower || watermarks->a.frac_urg_bw_nom
314 > hubbub1->watermarks.a.frac_urg_bw_nom) {
315 hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
316
317 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, 0,
318 DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, watermarks->a.frac_urg_bw_nom);
319 } else if (watermarks->a.frac_urg_bw_nom
320 < hubbub1->watermarks.a.frac_urg_bw_nom)
321 wm_pending = true;
322
323 if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub1->watermarks.d.urgent_latency_ns) {
324 hubbub1->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns;
325 prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns,
326 refclk_mhz, 0x1fffff);
327 REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0,
328 DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value);
329 } else if (watermarks->d.urgent_latency_ns < hubbub1->watermarks.d.urgent_latency_ns)
330 wm_pending = true;
331
332 return wm_pending;
333}
334
335bool hubbub21_program_stutter_watermarks(
336 struct hubbub *hubbub,
337 struct dcn_watermark_set *watermarks,
338 unsigned int refclk_mhz,
339 bool safe_to_lower)
340{
341 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
342 uint32_t prog_wm_value;
343 bool wm_pending = false;
344
345 /* clock state A */
346 if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
347 > hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) {
348 hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
349 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
350 prog_wm_value = convert_and_clamp(
351 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
352 refclk_mhz, 0x1fffff);
353 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
354 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value,
355 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
356 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
357 "HW register value = 0x%x\n",
358 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
359 } else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
360 < hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns)
361 wm_pending = true;
362
363 if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
364 > hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) {
365 hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns =
366 watermarks->a.cstate_pstate.cstate_exit_ns;
367 prog_wm_value = convert_and_clamp(
368 watermarks->a.cstate_pstate.cstate_exit_ns,
369 refclk_mhz, 0x1fffff);
370 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
371 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value,
372 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
373 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
374 "HW register value = 0x%x\n",
375 watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
376 } else if (watermarks->a.cstate_pstate.cstate_exit_ns
377 < hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns)
378 wm_pending = true;
379
380 /* clock state B */
381 if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
382 > hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) {
383 hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
384 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
385 prog_wm_value = convert_and_clamp(
386 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
387 refclk_mhz, 0x1fffff);
388 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
389 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value,
390 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
391 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
392 "HW register value = 0x%x\n",
393 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
394 } else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
395 < hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns)
396 wm_pending = true;
397
398 if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
399 > hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) {
400 hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns =
401 watermarks->b.cstate_pstate.cstate_exit_ns;
402 prog_wm_value = convert_and_clamp(
403 watermarks->b.cstate_pstate.cstate_exit_ns,
404 refclk_mhz, 0x1fffff);
405 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
406 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value,
407 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
408 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
409 "HW register value = 0x%x\n",
410 watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
411 } else if (watermarks->b.cstate_pstate.cstate_exit_ns
412 < hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns)
413 wm_pending = true;
414
415 /* clock state C */
416 if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
417 > hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) {
418 hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
419 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
420 prog_wm_value = convert_and_clamp(
421 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
422 refclk_mhz, 0x1fffff);
423 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0,
424 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value,
425 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
426 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
427 "HW register value = 0x%x\n",
428 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
429 } else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
430 < hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns)
431 wm_pending = true;
432
433 if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
434 > hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) {
435 hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns =
436 watermarks->c.cstate_pstate.cstate_exit_ns;
437 prog_wm_value = convert_and_clamp(
438 watermarks->c.cstate_pstate.cstate_exit_ns,
439 refclk_mhz, 0x1fffff);
440 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0,
441 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value,
442 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
443 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
444 "HW register value = 0x%x\n",
445 watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
446 } else if (watermarks->c.cstate_pstate.cstate_exit_ns
447 < hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns)
448 wm_pending = true;
449
450 /* clock state D */
451 if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
452 > hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) {
453 hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
454 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
455 prog_wm_value = convert_and_clamp(
456 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
457 refclk_mhz, 0x1fffff);
458 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0,
459 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value,
460 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
461 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
462 "HW register value = 0x%x\n",
463 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
464 } else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
465 < hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns)
466 wm_pending = true;
467
468 if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
469 > hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) {
470 hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns =
471 watermarks->d.cstate_pstate.cstate_exit_ns;
472 prog_wm_value = convert_and_clamp(
473 watermarks->d.cstate_pstate.cstate_exit_ns,
474 refclk_mhz, 0x1fffff);
475 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0,
476 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value,
477 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
478 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
479 "HW register value = 0x%x\n",
480 watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
481 } else if (watermarks->d.cstate_pstate.cstate_exit_ns
482 < hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns)
483 wm_pending = true;
484
485 return wm_pending;
486}
487
488bool hubbub21_program_pstate_watermarks(
489 struct hubbub *hubbub,
490 struct dcn_watermark_set *watermarks,
491 unsigned int refclk_mhz,
492 bool safe_to_lower)
493{
494 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
495 uint32_t prog_wm_value;
496
497 bool wm_pending = false;
498
499 /* clock state A */
500 if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
501 > hubbub1->watermarks.a.cstate_pstate.pstate_change_ns) {
502 hubbub1->watermarks.a.cstate_pstate.pstate_change_ns =
503 watermarks->a.cstate_pstate.pstate_change_ns;
504 prog_wm_value = convert_and_clamp(
505 watermarks->a.cstate_pstate.pstate_change_ns,
506 refclk_mhz, 0x1fffff);
507 REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0,
508 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value,
509 DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
510 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
511 "HW register value = 0x%x\n\n",
512 watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
513 } else if (watermarks->a.cstate_pstate.pstate_change_ns
514 < hubbub1->watermarks.a.cstate_pstate.pstate_change_ns)
515 wm_pending = true;
516
517 /* clock state B */
518 if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
519 > hubbub1->watermarks.b.cstate_pstate.pstate_change_ns) {
520 hubbub1->watermarks.b.cstate_pstate.pstate_change_ns =
521 watermarks->b.cstate_pstate.pstate_change_ns;
522 prog_wm_value = convert_and_clamp(
523 watermarks->b.cstate_pstate.pstate_change_ns,
524 refclk_mhz, 0x1fffff);
525 REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0,
526 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value,
527 DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
528 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
529 "HW register value = 0x%x\n\n",
530 watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
531 } else if (watermarks->b.cstate_pstate.pstate_change_ns
532 < hubbub1->watermarks.b.cstate_pstate.pstate_change_ns)
533 wm_pending = false;
534
535 /* clock state C */
536 if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
537 > hubbub1->watermarks.c.cstate_pstate.pstate_change_ns) {
538 hubbub1->watermarks.c.cstate_pstate.pstate_change_ns =
539 watermarks->c.cstate_pstate.pstate_change_ns;
540 prog_wm_value = convert_and_clamp(
541 watermarks->c.cstate_pstate.pstate_change_ns,
542 refclk_mhz, 0x1fffff);
543 REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0,
544 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value,
545 DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
546 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
547 "HW register value = 0x%x\n\n",
548 watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
549 } else if (watermarks->c.cstate_pstate.pstate_change_ns
550 < hubbub1->watermarks.c.cstate_pstate.pstate_change_ns)
551 wm_pending = true;
552
553 /* clock state D */
554 if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns
555 > hubbub1->watermarks.d.cstate_pstate.pstate_change_ns) {
556 hubbub1->watermarks.d.cstate_pstate.pstate_change_ns =
557 watermarks->d.cstate_pstate.pstate_change_ns;
558 prog_wm_value = convert_and_clamp(
559 watermarks->d.cstate_pstate.pstate_change_ns,
560 refclk_mhz, 0x1fffff);
561 REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0,
562 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value,
563 DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
564 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
565 "HW register value = 0x%x\n\n",
566 watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
567 } else if (watermarks->d.cstate_pstate.pstate_change_ns
568 < hubbub1->watermarks.d.cstate_pstate.pstate_change_ns)
569 wm_pending = true;
570
571 return wm_pending;
572}
573
574bool hubbub21_program_watermarks(
575 struct hubbub *hubbub,
576 struct dcn_watermark_set *watermarks,
577 unsigned int refclk_mhz,
578 bool safe_to_lower)
579{
580 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
581 bool wm_pending = false;
582
583 if (hubbub21_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
584 wm_pending = true;
585
586 if (hubbub21_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
587 wm_pending = true;
588
589 if (hubbub21_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
590 wm_pending = true;
591
592 /*
593 * The DCHub arbiter has a mechanism to dynamically rate limit the DCHub request stream to the fabric.
594 * If the memory controller is fully utilized and the DCHub requestors are
595 * well ahead of their amortized schedule, then it is safe to prevent the next winner
596 * from being committed and sent to the fabric.
597 * The utilization of the memory controller is approximated by ensuring that
598 * the number of outstanding requests is greater than a threshold specified
599 * by the ARB_MIN_REQ_OUTSTANDING. To determine that the DCHub requestors are well ahead of the amortized schedule,
600 * the slack of the next winner is compared with the ARB_SAT_LEVEL in DLG RefClk cycles.
601 *
602 * TODO: Revisit request limit after figure out right number. request limit for Renoir isn't decided yet, set maximum value (0x1FF)
603 * to turn off it for now.
604 */
605 REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0,
606 DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
607 REG_UPDATE_2(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
608 DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF,
609 DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD, 0xA);
610 REG_UPDATE(DCHUBBUB_ARB_HOSTVM_CNTL,
611 DCHUBBUB_ARB_MAX_QOS_COMMIT_THRESHOLD, 0xF);
612
613 hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
614
615 return wm_pending;
616}
617
618void hubbub21_wm_read_state(struct hubbub *hubbub,
619 struct dcn_hubbub_wm *wm)
620{
621 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
622 struct dcn_hubbub_wm_set *s;
623
624 memset(wm, 0, sizeof(struct dcn_hubbub_wm));
625
626 s = &wm->sets[0];
627 s->wm_set = 0;
628 REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A,
629 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, &s->data_urgent);
630
631 REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A,
632 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, &s->sr_enter);
633
634 REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A,
635 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, &s->sr_exit);
636
637 REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A,
638 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, &s->dram_clk_change);
639
640 s = &wm->sets[1];
641 s->wm_set = 1;
642 REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B,
643 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, &s->data_urgent);
644
645 REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B,
646 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, &s->sr_enter);
647
648 REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B,
649 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, &s->sr_exit);
650
651 REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B,
652 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, &s->dram_clk_change);
653
654 s = &wm->sets[2];
655 s->wm_set = 2;
656 REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C,
657 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, &s->data_urgent);
658
659 REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C,
660 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, &s->sr_enter);
661
662 REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C,
663 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, &s->sr_exit);
664
665 REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C,
666 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, &s->dram_clk_change);
667
668 s = &wm->sets[3];
669 s->wm_set = 3;
670 REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D,
671 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, &s->data_urgent);
672
673 REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D,
674 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, &s->sr_enter);
675
676 REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D,
677 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, &s->sr_exit);
678
679 REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D,
680 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, &s->dram_clk_change);
681}
682
683static void hubbub21_apply_DEDCN21_147_wa(struct hubbub *hubbub)
684{
685 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
686 uint32_t prog_wm_value;
687
688 prog_wm_value = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A);
689 REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
690}
691
692static const struct hubbub_funcs hubbub21_funcs = {
693 .update_dchub = hubbub2_update_dchub,
694 .init_dchub_sys_ctx = hubbub21_init_dchub,
695 .init_vm_ctx = hubbub2_init_vm_ctx,
696 .dcc_support_swizzle = hubbub2_dcc_support_swizzle,
697 .dcc_support_pixel_format = hubbub2_dcc_support_pixel_format,
698 .get_dcc_compression_cap = hubbub2_get_dcc_compression_cap,
699 .wm_read_state = hubbub21_wm_read_state,
700 .get_dchub_ref_freq = hubbub2_get_dchub_ref_freq,
701 .program_watermarks = hubbub21_program_watermarks,
702 .allow_self_refresh_control = hubbub1_allow_self_refresh_control,
703 .apply_DEDCN21_147_wa = hubbub21_apply_DEDCN21_147_wa,
704 .hubbub_read_state = hubbub2_read_state,
705};
706
707void hubbub21_construct(struct dcn20_hubbub *hubbub,
708 struct dc_context *ctx,
709 const struct dcn_hubbub_registers *hubbub_regs,
710 const struct dcn_hubbub_shift *hubbub_shift,
711 const struct dcn_hubbub_mask *hubbub_mask)
712{
713 hubbub->base.ctx = ctx;
714
715 hubbub->base.funcs = &hubbub21_funcs;
716
717 hubbub->regs = hubbub_regs;
718 hubbub->shifts = hubbub_shift;
719 hubbub->masks = hubbub_mask;
720
721 hubbub->debug_test_index_pstate = 0xB;
722 hubbub->detile_buf_size = 164 * 1024; /* 164KB for DCN2.0 */
723}
1/*
2* Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25#include <linux/delay.h>
26#include "dm_services.h"
27#include "dcn20/dcn20_hubbub.h"
28#include "dcn21_hubbub.h"
29#include "reg_helper.h"
30
31#define REG(reg)\
32 hubbub1->regs->reg
33#define DC_LOGGER \
34 hubbub1->base.ctx->logger
35#define CTX \
36 hubbub1->base.ctx
37
38#undef FN
39#define FN(reg_name, field_name) \
40 hubbub1->shifts->field_name, hubbub1->masks->field_name
41
42#define REG(reg)\
43 hubbub1->regs->reg
44
45#define CTX \
46 hubbub1->base.ctx
47
48#undef FN
49#define FN(reg_name, field_name) \
50 hubbub1->shifts->field_name, hubbub1->masks->field_name
51
52static uint32_t convert_and_clamp(
53 uint32_t wm_ns,
54 uint32_t refclk_mhz,
55 uint32_t clamp_value)
56{
57 uint32_t ret_val = 0;
58 ret_val = wm_ns * refclk_mhz;
59 ret_val /= 1000;
60
61 if (ret_val > clamp_value)
62 ret_val = clamp_value;
63
64 return ret_val;
65}
66
67void dcn21_dchvm_init(struct hubbub *hubbub)
68{
69 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
70 uint32_t riommu_active;
71 int i;
72
73 //Init DCHVM block
74 REG_UPDATE(DCHVM_CTRL0, HOSTVM_INIT_REQ, 1);
75
76 //Poll until RIOMMU_ACTIVE = 1
77 for (i = 0; i < 100; i++) {
78 REG_GET(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, &riommu_active);
79
80 if (riommu_active)
81 break;
82 else
83 udelay(5);
84 }
85
86 if (riommu_active) {
87 //Reflect the power status of DCHUBBUB
88 REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, 1);
89
90 //Start rIOMMU prefetching
91 REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, 1);
92
93 // Enable dynamic clock gating
94 REG_UPDATE_4(DCHVM_CLK_CTRL,
95 HVM_DISPCLK_R_GATE_DIS, 0,
96 HVM_DISPCLK_G_GATE_DIS, 0,
97 HVM_DCFCLK_R_GATE_DIS, 0,
98 HVM_DCFCLK_G_GATE_DIS, 0);
99
100 //Poll until HOSTVM_PREFETCH_DONE = 1
101 REG_WAIT(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, 1, 5, 100);
102 }
103}
104
105int hubbub21_init_dchub(struct hubbub *hubbub,
106 struct dcn_hubbub_phys_addr_config *pa_config)
107{
108 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
109 struct dcn_vmid_page_table_config phys_config;
110
111 REG_SET(DCN_VM_FB_LOCATION_BASE, 0,
112 FB_BASE, pa_config->system_aperture.fb_base >> 24);
113 REG_SET(DCN_VM_FB_LOCATION_TOP, 0,
114 FB_TOP, pa_config->system_aperture.fb_top >> 24);
115 REG_SET(DCN_VM_FB_OFFSET, 0,
116 FB_OFFSET, pa_config->system_aperture.fb_offset >> 24);
117 REG_SET(DCN_VM_AGP_BOT, 0,
118 AGP_BOT, pa_config->system_aperture.agp_bot >> 24);
119 REG_SET(DCN_VM_AGP_TOP, 0,
120 AGP_TOP, pa_config->system_aperture.agp_top >> 24);
121 REG_SET(DCN_VM_AGP_BASE, 0,
122 AGP_BASE, pa_config->system_aperture.agp_base >> 24);
123
124 if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) {
125 phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12;
126 phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12;
127 phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr | 1; //Note: hack
128 phys_config.depth = 0;
129 phys_config.block_size = 0;
130 // Init VMID 0 based on PA config
131 dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config);
132 }
133
134 dcn21_dchvm_init(hubbub);
135
136 return hubbub1->num_vmid;
137}
138
139bool hubbub21_program_urgent_watermarks(
140 struct hubbub *hubbub,
141 struct dcn_watermark_set *watermarks,
142 unsigned int refclk_mhz,
143 bool safe_to_lower)
144{
145 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
146 uint32_t prog_wm_value;
147 bool wm_pending = false;
148
149 /* Repeat for water mark set A, B, C and D. */
150 /* clock state A */
151 if (safe_to_lower || watermarks->a.urgent_ns > hubbub1->watermarks.a.urgent_ns) {
152 hubbub1->watermarks.a.urgent_ns = watermarks->a.urgent_ns;
153 prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
154 refclk_mhz, 0x1fffff);
155 REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
156 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value,
157 DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_A, prog_wm_value);
158
159 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
160 "HW register value = 0x%x\n",
161 watermarks->a.urgent_ns, prog_wm_value);
162 } else if (watermarks->a.urgent_ns < hubbub1->watermarks.a.urgent_ns)
163 wm_pending = true;
164
165 /* determine the transfer time for a quantity of data for a particular requestor.*/
166 if (safe_to_lower || watermarks->a.frac_urg_bw_flip
167 > hubbub1->watermarks.a.frac_urg_bw_flip) {
168 hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
169
170 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, 0,
171 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, watermarks->a.frac_urg_bw_flip);
172 } else if (watermarks->a.frac_urg_bw_flip
173 < hubbub1->watermarks.a.frac_urg_bw_flip)
174 wm_pending = true;
175
176 if (safe_to_lower || watermarks->a.frac_urg_bw_nom
177 > hubbub1->watermarks.a.frac_urg_bw_nom) {
178 hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
179
180 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0,
181 DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->a.frac_urg_bw_nom);
182 } else if (watermarks->a.frac_urg_bw_nom
183 < hubbub1->watermarks.a.frac_urg_bw_nom)
184 wm_pending = true;
185
186 if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub1->watermarks.a.urgent_latency_ns) {
187 hubbub1->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns;
188 prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns,
189 refclk_mhz, 0x1fffff);
190 REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0,
191 DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value);
192 } else if (watermarks->a.urgent_latency_ns < hubbub1->watermarks.a.urgent_latency_ns)
193 wm_pending = true;
194
195 /* clock state B */
196 if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) {
197 hubbub1->watermarks.b.urgent_ns = watermarks->b.urgent_ns;
198 prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns,
199 refclk_mhz, 0x1fffff);
200 REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0,
201 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value,
202 DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_B, prog_wm_value);
203
204 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
205 "HW register value = 0x%x\n",
206 watermarks->b.urgent_ns, prog_wm_value);
207 } else if (watermarks->b.urgent_ns < hubbub1->watermarks.b.urgent_ns)
208 wm_pending = true;
209
210 /* determine the transfer time for a quantity of data for a particular requestor.*/
211 if (safe_to_lower || watermarks->a.frac_urg_bw_flip
212 > hubbub1->watermarks.a.frac_urg_bw_flip) {
213 hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
214
215 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, 0,
216 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, watermarks->a.frac_urg_bw_flip);
217 } else if (watermarks->a.frac_urg_bw_flip
218 < hubbub1->watermarks.a.frac_urg_bw_flip)
219 wm_pending = true;
220
221 if (safe_to_lower || watermarks->a.frac_urg_bw_nom
222 > hubbub1->watermarks.a.frac_urg_bw_nom) {
223 hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
224
225 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, 0,
226 DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->a.frac_urg_bw_nom);
227 } else if (watermarks->a.frac_urg_bw_nom
228 < hubbub1->watermarks.a.frac_urg_bw_nom)
229 wm_pending = true;
230
231 if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub1->watermarks.b.urgent_latency_ns) {
232 hubbub1->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns;
233 prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns,
234 refclk_mhz, 0x1fffff);
235 REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0,
236 DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value);
237 } else if (watermarks->b.urgent_latency_ns < hubbub1->watermarks.b.urgent_latency_ns)
238 wm_pending = true;
239
240 /* clock state C */
241 if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) {
242 hubbub1->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
243 prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns,
244 refclk_mhz, 0x1fffff);
245 REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0,
246 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value,
247 DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_C, prog_wm_value);
248
249 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
250 "HW register value = 0x%x\n",
251 watermarks->c.urgent_ns, prog_wm_value);
252 } else if (watermarks->c.urgent_ns < hubbub1->watermarks.c.urgent_ns)
253 wm_pending = true;
254
255 /* determine the transfer time for a quantity of data for a particular requestor.*/
256 if (safe_to_lower || watermarks->a.frac_urg_bw_flip
257 > hubbub1->watermarks.a.frac_urg_bw_flip) {
258 hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
259
260 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, 0,
261 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, watermarks->a.frac_urg_bw_flip);
262 } else if (watermarks->a.frac_urg_bw_flip
263 < hubbub1->watermarks.a.frac_urg_bw_flip)
264 wm_pending = true;
265
266 if (safe_to_lower || watermarks->a.frac_urg_bw_nom
267 > hubbub1->watermarks.a.frac_urg_bw_nom) {
268 hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
269
270 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, 0,
271 DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, watermarks->a.frac_urg_bw_nom);
272 } else if (watermarks->a.frac_urg_bw_nom
273 < hubbub1->watermarks.a.frac_urg_bw_nom)
274 wm_pending = true;
275
276 if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub1->watermarks.c.urgent_latency_ns) {
277 hubbub1->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns;
278 prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns,
279 refclk_mhz, 0x1fffff);
280 REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0,
281 DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value);
282 } else if (watermarks->c.urgent_latency_ns < hubbub1->watermarks.c.urgent_latency_ns)
283 wm_pending = true;
284
285 /* clock state D */
286 if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) {
287 hubbub1->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
288 prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns,
289 refclk_mhz, 0x1fffff);
290 REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0,
291 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value,
292 DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_D, prog_wm_value);
293
294 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
295 "HW register value = 0x%x\n",
296 watermarks->d.urgent_ns, prog_wm_value);
297 } else if (watermarks->d.urgent_ns < hubbub1->watermarks.d.urgent_ns)
298 wm_pending = true;
299
300 /* determine the transfer time for a quantity of data for a particular requestor.*/
301 if (safe_to_lower || watermarks->a.frac_urg_bw_flip
302 > hubbub1->watermarks.a.frac_urg_bw_flip) {
303 hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
304
305 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, 0,
306 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, watermarks->a.frac_urg_bw_flip);
307 } else if (watermarks->a.frac_urg_bw_flip
308 < hubbub1->watermarks.a.frac_urg_bw_flip)
309 wm_pending = true;
310
311 if (safe_to_lower || watermarks->a.frac_urg_bw_nom
312 > hubbub1->watermarks.a.frac_urg_bw_nom) {
313 hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
314
315 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, 0,
316 DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, watermarks->a.frac_urg_bw_nom);
317 } else if (watermarks->a.frac_urg_bw_nom
318 < hubbub1->watermarks.a.frac_urg_bw_nom)
319 wm_pending = true;
320
321 if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub1->watermarks.d.urgent_latency_ns) {
322 hubbub1->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns;
323 prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns,
324 refclk_mhz, 0x1fffff);
325 REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0,
326 DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value);
327 } else if (watermarks->d.urgent_latency_ns < hubbub1->watermarks.d.urgent_latency_ns)
328 wm_pending = true;
329
330 return wm_pending;
331}
332
333bool hubbub21_program_stutter_watermarks(
334 struct hubbub *hubbub,
335 struct dcn_watermark_set *watermarks,
336 unsigned int refclk_mhz,
337 bool safe_to_lower)
338{
339 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
340 uint32_t prog_wm_value;
341 bool wm_pending = false;
342
343 /* clock state A */
344 if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
345 > hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) {
346 hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
347 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
348 prog_wm_value = convert_and_clamp(
349 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
350 refclk_mhz, 0x1fffff);
351 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
352 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value,
353 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
354 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
355 "HW register value = 0x%x\n",
356 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
357 } else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
358 < hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns)
359 wm_pending = true;
360
361 if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
362 > hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) {
363 hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns =
364 watermarks->a.cstate_pstate.cstate_exit_ns;
365 prog_wm_value = convert_and_clamp(
366 watermarks->a.cstate_pstate.cstate_exit_ns,
367 refclk_mhz, 0x1fffff);
368 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
369 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value,
370 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
371 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
372 "HW register value = 0x%x\n",
373 watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
374 } else if (watermarks->a.cstate_pstate.cstate_exit_ns
375 < hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns)
376 wm_pending = true;
377
378 /* clock state B */
379 if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
380 > hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) {
381 hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
382 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
383 prog_wm_value = convert_and_clamp(
384 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
385 refclk_mhz, 0x1fffff);
386 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
387 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value,
388 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
389 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
390 "HW register value = 0x%x\n",
391 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
392 } else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
393 < hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns)
394 wm_pending = true;
395
396 if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
397 > hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) {
398 hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns =
399 watermarks->b.cstate_pstate.cstate_exit_ns;
400 prog_wm_value = convert_and_clamp(
401 watermarks->b.cstate_pstate.cstate_exit_ns,
402 refclk_mhz, 0x1fffff);
403 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
404 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value,
405 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
406 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
407 "HW register value = 0x%x\n",
408 watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
409 } else if (watermarks->b.cstate_pstate.cstate_exit_ns
410 < hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns)
411 wm_pending = true;
412
413 /* clock state C */
414 if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
415 > hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) {
416 hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
417 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
418 prog_wm_value = convert_and_clamp(
419 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
420 refclk_mhz, 0x1fffff);
421 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0,
422 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value,
423 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
424 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
425 "HW register value = 0x%x\n",
426 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
427 } else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
428 < hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns)
429 wm_pending = true;
430
431 if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
432 > hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) {
433 hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns =
434 watermarks->c.cstate_pstate.cstate_exit_ns;
435 prog_wm_value = convert_and_clamp(
436 watermarks->c.cstate_pstate.cstate_exit_ns,
437 refclk_mhz, 0x1fffff);
438 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0,
439 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value,
440 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
441 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
442 "HW register value = 0x%x\n",
443 watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
444 } else if (watermarks->c.cstate_pstate.cstate_exit_ns
445 < hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns)
446 wm_pending = true;
447
448 /* clock state D */
449 if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
450 > hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) {
451 hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
452 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
453 prog_wm_value = convert_and_clamp(
454 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
455 refclk_mhz, 0x1fffff);
456 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0,
457 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value,
458 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
459 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
460 "HW register value = 0x%x\n",
461 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
462 } else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
463 < hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns)
464 wm_pending = true;
465
466 if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
467 > hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) {
468 hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns =
469 watermarks->d.cstate_pstate.cstate_exit_ns;
470 prog_wm_value = convert_and_clamp(
471 watermarks->d.cstate_pstate.cstate_exit_ns,
472 refclk_mhz, 0x1fffff);
473 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0,
474 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value,
475 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
476 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
477 "HW register value = 0x%x\n",
478 watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
479 } else if (watermarks->d.cstate_pstate.cstate_exit_ns
480 < hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns)
481 wm_pending = true;
482
483 return wm_pending;
484}
485
486bool hubbub21_program_pstate_watermarks(
487 struct hubbub *hubbub,
488 struct dcn_watermark_set *watermarks,
489 unsigned int refclk_mhz,
490 bool safe_to_lower)
491{
492 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
493 uint32_t prog_wm_value;
494
495 bool wm_pending = false;
496
497 /* clock state A */
498 if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
499 > hubbub1->watermarks.a.cstate_pstate.pstate_change_ns) {
500 hubbub1->watermarks.a.cstate_pstate.pstate_change_ns =
501 watermarks->a.cstate_pstate.pstate_change_ns;
502 prog_wm_value = convert_and_clamp(
503 watermarks->a.cstate_pstate.pstate_change_ns,
504 refclk_mhz, 0x1fffff);
505 REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0,
506 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value,
507 DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
508 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
509 "HW register value = 0x%x\n\n",
510 watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
511 } else if (watermarks->a.cstate_pstate.pstate_change_ns
512 < hubbub1->watermarks.a.cstate_pstate.pstate_change_ns)
513 wm_pending = true;
514
515 /* clock state B */
516 if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
517 > hubbub1->watermarks.b.cstate_pstate.pstate_change_ns) {
518 hubbub1->watermarks.b.cstate_pstate.pstate_change_ns =
519 watermarks->b.cstate_pstate.pstate_change_ns;
520 prog_wm_value = convert_and_clamp(
521 watermarks->b.cstate_pstate.pstate_change_ns,
522 refclk_mhz, 0x1fffff);
523 REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0,
524 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value,
525 DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
526 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
527 "HW register value = 0x%x\n\n",
528 watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
529 } else if (watermarks->b.cstate_pstate.pstate_change_ns
530 < hubbub1->watermarks.b.cstate_pstate.pstate_change_ns)
531 wm_pending = false;
532
533 /* clock state C */
534 if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
535 > hubbub1->watermarks.c.cstate_pstate.pstate_change_ns) {
536 hubbub1->watermarks.c.cstate_pstate.pstate_change_ns =
537 watermarks->c.cstate_pstate.pstate_change_ns;
538 prog_wm_value = convert_and_clamp(
539 watermarks->c.cstate_pstate.pstate_change_ns,
540 refclk_mhz, 0x1fffff);
541 REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0,
542 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value,
543 DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
544 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
545 "HW register value = 0x%x\n\n",
546 watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
547 } else if (watermarks->c.cstate_pstate.pstate_change_ns
548 < hubbub1->watermarks.c.cstate_pstate.pstate_change_ns)
549 wm_pending = true;
550
551 /* clock state D */
552 if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns
553 > hubbub1->watermarks.d.cstate_pstate.pstate_change_ns) {
554 hubbub1->watermarks.d.cstate_pstate.pstate_change_ns =
555 watermarks->d.cstate_pstate.pstate_change_ns;
556 prog_wm_value = convert_and_clamp(
557 watermarks->d.cstate_pstate.pstate_change_ns,
558 refclk_mhz, 0x1fffff);
559 REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0,
560 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value,
561 DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
562 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
563 "HW register value = 0x%x\n\n",
564 watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
565 } else if (watermarks->d.cstate_pstate.pstate_change_ns
566 < hubbub1->watermarks.d.cstate_pstate.pstate_change_ns)
567 wm_pending = true;
568
569 return wm_pending;
570}
571
572bool hubbub21_program_watermarks(
573 struct hubbub *hubbub,
574 struct dcn_watermark_set *watermarks,
575 unsigned int refclk_mhz,
576 bool safe_to_lower)
577{
578 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
579 bool wm_pending = false;
580
581 if (hubbub21_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
582 wm_pending = true;
583
584 if (hubbub21_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
585 wm_pending = true;
586
587 if (hubbub21_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
588 wm_pending = true;
589
590 /*
591 * The DCHub arbiter has a mechanism to dynamically rate limit the DCHub request stream to the fabric.
592 * If the memory controller is fully utilized and the DCHub requestors are
593 * well ahead of their amortized schedule, then it is safe to prevent the next winner
594 * from being committed and sent to the fabric.
595 * The utilization of the memory controller is approximated by ensuring that
596 * the number of outstanding requests is greater than a threshold specified
597 * by the ARB_MIN_REQ_OUTSTANDING. To determine that the DCHub requestors are well ahead of the amortized schedule,
598 * the slack of the next winner is compared with the ARB_SAT_LEVEL in DLG RefClk cycles.
599 *
600 * TODO: Revisit request limit after figure out right number. request limit for Renoir isn't decided yet, set maximum value (0x1FF)
601 * to turn off it for now.
602 */
603 REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0,
604 DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
605 REG_UPDATE_2(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
606 DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF,
607 DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD, 0xA);
608 REG_UPDATE(DCHUBBUB_ARB_HOSTVM_CNTL,
609 DCHUBBUB_ARB_MAX_QOS_COMMIT_THRESHOLD, 0xF);
610
611 hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
612
613 return wm_pending;
614}
615
616void hubbub21_wm_read_state(struct hubbub *hubbub,
617 struct dcn_hubbub_wm *wm)
618{
619 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
620 struct dcn_hubbub_wm_set *s;
621
622 memset(wm, 0, sizeof(struct dcn_hubbub_wm));
623
624 s = &wm->sets[0];
625 s->wm_set = 0;
626 REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A,
627 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, &s->data_urgent);
628
629 REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A,
630 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, &s->sr_enter);
631
632 REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A,
633 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, &s->sr_exit);
634
635 REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A,
636 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, &s->dram_clk_chanage);
637
638 s = &wm->sets[1];
639 s->wm_set = 1;
640 REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B,
641 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, &s->data_urgent);
642
643 REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B,
644 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, &s->sr_enter);
645
646 REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B,
647 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, &s->sr_exit);
648
649 REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B,
650 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, &s->dram_clk_chanage);
651
652 s = &wm->sets[2];
653 s->wm_set = 2;
654 REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C,
655 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, &s->data_urgent);
656
657 REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C,
658 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, &s->sr_enter);
659
660 REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C,
661 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, &s->sr_exit);
662
663 REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C,
664 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, &s->dram_clk_chanage);
665
666 s = &wm->sets[3];
667 s->wm_set = 3;
668 REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D,
669 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, &s->data_urgent);
670
671 REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D,
672 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, &s->sr_enter);
673
674 REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D,
675 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, &s->sr_exit);
676
677 REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D,
678 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, &s->dram_clk_chanage);
679}
680
681void hubbub21_apply_DEDCN21_147_wa(struct hubbub *hubbub)
682{
683 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
684 uint32_t prog_wm_value;
685
686 prog_wm_value = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A);
687 REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
688}
689
690static const struct hubbub_funcs hubbub21_funcs = {
691 .update_dchub = hubbub2_update_dchub,
692 .init_dchub_sys_ctx = hubbub21_init_dchub,
693 .init_vm_ctx = hubbub2_init_vm_ctx,
694 .dcc_support_swizzle = hubbub2_dcc_support_swizzle,
695 .dcc_support_pixel_format = hubbub2_dcc_support_pixel_format,
696 .get_dcc_compression_cap = hubbub2_get_dcc_compression_cap,
697 .wm_read_state = hubbub21_wm_read_state,
698 .get_dchub_ref_freq = hubbub2_get_dchub_ref_freq,
699 .program_watermarks = hubbub21_program_watermarks,
700 .allow_self_refresh_control = hubbub1_allow_self_refresh_control,
701 .apply_DEDCN21_147_wa = hubbub21_apply_DEDCN21_147_wa,
702};
703
704void hubbub21_construct(struct dcn20_hubbub *hubbub,
705 struct dc_context *ctx,
706 const struct dcn_hubbub_registers *hubbub_regs,
707 const struct dcn_hubbub_shift *hubbub_shift,
708 const struct dcn_hubbub_mask *hubbub_mask)
709{
710 hubbub->base.ctx = ctx;
711
712 hubbub->base.funcs = &hubbub21_funcs;
713
714 hubbub->regs = hubbub_regs;
715 hubbub->shifts = hubbub_shift;
716 hubbub->masks = hubbub_mask;
717
718 hubbub->debug_test_index_pstate = 0xB;
719 hubbub->detile_buf_size = 164 * 1024; /* 164KB for DCN2.0 */
720}