Loading...
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27#include "dcn10_hubp.h"
28#include "dcn10_hubbub.h"
29#include "reg_helper.h"
30
31#define CTX \
32 hubbub1->base.ctx
33#define DC_LOGGER \
34 hubbub1->base.ctx->logger
35#define REG(reg)\
36 hubbub1->regs->reg
37
38#undef FN
39#define FN(reg_name, field_name) \
40 hubbub1->shifts->field_name, hubbub1->masks->field_name
41
42void hubbub1_wm_read_state(struct hubbub *hubbub,
43 struct dcn_hubbub_wm *wm)
44{
45 struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
46 struct dcn_hubbub_wm_set *s;
47
48 memset(wm, 0, sizeof(struct dcn_hubbub_wm));
49
50 s = &wm->sets[0];
51 s->wm_set = 0;
52 s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A);
53 s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A);
54 if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A)) {
55 s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A);
56 s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A);
57 }
58 s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A);
59
60 s = &wm->sets[1];
61 s->wm_set = 1;
62 s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B);
63 s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B);
64 if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B)) {
65 s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B);
66 s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B);
67 }
68 s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B);
69
70 s = &wm->sets[2];
71 s->wm_set = 2;
72 s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C);
73 s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C);
74 if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C)) {
75 s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C);
76 s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C);
77 }
78 s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C);
79
80 s = &wm->sets[3];
81 s->wm_set = 3;
82 s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D);
83 s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D);
84 if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D)) {
85 s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D);
86 s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D);
87 }
88 s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D);
89}
90
91void hubbub1_allow_self_refresh_control(struct hubbub *hubbub, bool allow)
92{
93 struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
94 /*
95 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 1 means do not allow stutter
96 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0 means allow stutter
97 */
98
99 REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
100 DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE, 0,
101 DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, !allow);
102}
103
104bool hubbub1_is_allow_self_refresh_enabled(struct hubbub *hubbub)
105{
106 struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
107 uint32_t enable = 0;
108
109 REG_GET(DCHUBBUB_ARB_DRAM_STATE_CNTL,
110 DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, &enable);
111
112 return enable ? true : false;
113}
114
115
116bool hubbub1_verify_allow_pstate_change_high(
117 struct hubbub *hubbub)
118{
119 struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
120
121 /* pstate latency is ~20us so if we wait over 40us and pstate allow
122 * still not asserted, we are probably stuck and going to hang
123 *
124 * TODO: Figure out why it takes ~100us on linux
125 * pstate takes around ~100us (up to 200us) on linux. Unknown currently
126 * as to why it takes that long on linux
127 */
128 const unsigned int pstate_wait_timeout_us = 200;
129 const unsigned int pstate_wait_expected_timeout_us = 180;
130 static unsigned int max_sampled_pstate_wait_us; /* data collection */
131 static bool forced_pstate_allow; /* help with revert wa */
132
133 unsigned int debug_data;
134 unsigned int i;
135
136 if (forced_pstate_allow) {
137 /* we hacked to force pstate allow to prevent hang last time
138 * we verify_allow_pstate_change_high. so disable force
139 * here so we can check status
140 */
141 REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
142 DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 0,
143 DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 0);
144 forced_pstate_allow = false;
145 }
146
147 /* The following table only applies to DCN1 and DCN2,
148 * for newer DCNs, need to consult with HW IP folks to read RTL
149 * HUBBUB:DCHUBBUB_TEST_ARB_DEBUG10 DCHUBBUBDEBUGIND:0xB
150 * description
151 * 0: Pipe0 Plane0 Allow Pstate Change
152 * 1: Pipe0 Plane1 Allow Pstate Change
153 * 2: Pipe0 Cursor0 Allow Pstate Change
154 * 3: Pipe0 Cursor1 Allow Pstate Change
155 * 4: Pipe1 Plane0 Allow Pstate Change
156 * 5: Pipe1 Plane1 Allow Pstate Change
157 * 6: Pipe1 Cursor0 Allow Pstate Change
158 * 7: Pipe1 Cursor1 Allow Pstate Change
159 * 8: Pipe2 Plane0 Allow Pstate Change
160 * 9: Pipe2 Plane1 Allow Pstate Change
161 * 10: Pipe2 Cursor0 Allow Pstate Change
162 * 11: Pipe2 Cursor1 Allow Pstate Change
163 * 12: Pipe3 Plane0 Allow Pstate Change
164 * 13: Pipe3 Plane1 Allow Pstate Change
165 * 14: Pipe3 Cursor0 Allow Pstate Change
166 * 15: Pipe3 Cursor1 Allow Pstate Change
167 * 16: Pipe4 Plane0 Allow Pstate Change
168 * 17: Pipe4 Plane1 Allow Pstate Change
169 * 18: Pipe4 Cursor0 Allow Pstate Change
170 * 19: Pipe4 Cursor1 Allow Pstate Change
171 * 20: Pipe5 Plane0 Allow Pstate Change
172 * 21: Pipe5 Plane1 Allow Pstate Change
173 * 22: Pipe5 Cursor0 Allow Pstate Change
174 * 23: Pipe5 Cursor1 Allow Pstate Change
175 * 24: Pipe6 Plane0 Allow Pstate Change
176 * 25: Pipe6 Plane1 Allow Pstate Change
177 * 26: Pipe6 Cursor0 Allow Pstate Change
178 * 27: Pipe6 Cursor1 Allow Pstate Change
179 * 28: WB0 Allow Pstate Change
180 * 29: WB1 Allow Pstate Change
181 * 30: Arbiter's allow_pstate_change
182 * 31: SOC pstate change request
183 */
184
185 REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, hubbub1->debug_test_index_pstate);
186
187 for (i = 0; i < pstate_wait_timeout_us; i++) {
188 debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA);
189
190 if (debug_data & (1 << 30)) {
191
192 if (i > pstate_wait_expected_timeout_us)
193 DC_LOG_WARNING("pstate took longer than expected ~%dus\n",
194 i);
195
196 return true;
197 }
198 if (max_sampled_pstate_wait_us < i)
199 max_sampled_pstate_wait_us = i;
200
201 udelay(1);
202 }
203
204 /* force pstate allow to prevent system hang
205 * and break to debugger to investigate
206 */
207 REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
208 DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 1,
209 DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 1);
210 forced_pstate_allow = true;
211
212 DC_LOG_WARNING("pstate TEST_DEBUG_DATA: 0x%X\n",
213 debug_data);
214
215 return false;
216}
217
218static uint32_t convert_and_clamp(
219 uint32_t wm_ns,
220 uint32_t refclk_mhz,
221 uint32_t clamp_value)
222{
223 uint32_t ret_val = 0;
224 ret_val = wm_ns * refclk_mhz;
225 ret_val /= 1000;
226
227 if (ret_val > clamp_value)
228 ret_val = clamp_value;
229
230 return ret_val;
231}
232
233
234void hubbub1_wm_change_req_wa(struct hubbub *hubbub)
235{
236 struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
237
238 REG_UPDATE_SEQ_2(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
239 DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 0,
240 DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
241}
242
243bool hubbub1_program_urgent_watermarks(
244 struct hubbub *hubbub,
245 struct dcn_watermark_set *watermarks,
246 unsigned int refclk_mhz,
247 bool safe_to_lower)
248{
249 struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
250 uint32_t prog_wm_value;
251 bool wm_pending = false;
252
253 /* Repeat for water mark set A, B, C and D. */
254 /* clock state A */
255 if (safe_to_lower || watermarks->a.urgent_ns > hubbub1->watermarks.a.urgent_ns) {
256 hubbub1->watermarks.a.urgent_ns = watermarks->a.urgent_ns;
257 prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
258 refclk_mhz, 0x1fffff);
259 REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
260 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
261
262 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
263 "HW register value = 0x%x\n",
264 watermarks->a.urgent_ns, prog_wm_value);
265 } else if (watermarks->a.urgent_ns < hubbub1->watermarks.a.urgent_ns)
266 wm_pending = true;
267
268 if (safe_to_lower || watermarks->a.pte_meta_urgent_ns > hubbub1->watermarks.a.pte_meta_urgent_ns) {
269 hubbub1->watermarks.a.pte_meta_urgent_ns = watermarks->a.pte_meta_urgent_ns;
270 prog_wm_value = convert_and_clamp(watermarks->a.pte_meta_urgent_ns,
271 refclk_mhz, 0x1fffff);
272 REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A, prog_wm_value);
273 DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_A calculated =%d\n"
274 "HW register value = 0x%x\n",
275 watermarks->a.pte_meta_urgent_ns, prog_wm_value);
276 } else if (watermarks->a.pte_meta_urgent_ns < hubbub1->watermarks.a.pte_meta_urgent_ns)
277 wm_pending = true;
278
279 /* clock state B */
280 if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) {
281 hubbub1->watermarks.b.urgent_ns = watermarks->b.urgent_ns;
282 prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns,
283 refclk_mhz, 0x1fffff);
284 REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0,
285 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
286
287 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
288 "HW register value = 0x%x\n",
289 watermarks->b.urgent_ns, prog_wm_value);
290 } else if (watermarks->b.urgent_ns < hubbub1->watermarks.b.urgent_ns)
291 wm_pending = true;
292
293 if (safe_to_lower || watermarks->b.pte_meta_urgent_ns > hubbub1->watermarks.b.pte_meta_urgent_ns) {
294 hubbub1->watermarks.b.pte_meta_urgent_ns = watermarks->b.pte_meta_urgent_ns;
295 prog_wm_value = convert_and_clamp(watermarks->b.pte_meta_urgent_ns,
296 refclk_mhz, 0x1fffff);
297 REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B, prog_wm_value);
298 DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_B calculated =%d\n"
299 "HW register value = 0x%x\n",
300 watermarks->b.pte_meta_urgent_ns, prog_wm_value);
301 } else if (watermarks->b.pte_meta_urgent_ns < hubbub1->watermarks.b.pte_meta_urgent_ns)
302 wm_pending = true;
303
304 /* clock state C */
305 if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) {
306 hubbub1->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
307 prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns,
308 refclk_mhz, 0x1fffff);
309 REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0,
310 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
311
312 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
313 "HW register value = 0x%x\n",
314 watermarks->c.urgent_ns, prog_wm_value);
315 } else if (watermarks->c.urgent_ns < hubbub1->watermarks.c.urgent_ns)
316 wm_pending = true;
317
318 if (safe_to_lower || watermarks->c.pte_meta_urgent_ns > hubbub1->watermarks.c.pte_meta_urgent_ns) {
319 hubbub1->watermarks.c.pte_meta_urgent_ns = watermarks->c.pte_meta_urgent_ns;
320 prog_wm_value = convert_and_clamp(watermarks->c.pte_meta_urgent_ns,
321 refclk_mhz, 0x1fffff);
322 REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C, prog_wm_value);
323 DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_C calculated =%d\n"
324 "HW register value = 0x%x\n",
325 watermarks->c.pte_meta_urgent_ns, prog_wm_value);
326 } else if (watermarks->c.pte_meta_urgent_ns < hubbub1->watermarks.c.pte_meta_urgent_ns)
327 wm_pending = true;
328
329 /* clock state D */
330 if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) {
331 hubbub1->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
332 prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns,
333 refclk_mhz, 0x1fffff);
334 REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0,
335 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
336
337 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
338 "HW register value = 0x%x\n",
339 watermarks->d.urgent_ns, prog_wm_value);
340 } else if (watermarks->d.urgent_ns < hubbub1->watermarks.d.urgent_ns)
341 wm_pending = true;
342
343 if (safe_to_lower || watermarks->d.pte_meta_urgent_ns > hubbub1->watermarks.d.pte_meta_urgent_ns) {
344 hubbub1->watermarks.d.pte_meta_urgent_ns = watermarks->d.pte_meta_urgent_ns;
345 prog_wm_value = convert_and_clamp(watermarks->d.pte_meta_urgent_ns,
346 refclk_mhz, 0x1fffff);
347 REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D, prog_wm_value);
348 DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_D calculated =%d\n"
349 "HW register value = 0x%x\n",
350 watermarks->d.pte_meta_urgent_ns, prog_wm_value);
351 } else if (watermarks->d.pte_meta_urgent_ns < hubbub1->watermarks.d.pte_meta_urgent_ns)
352 wm_pending = true;
353
354 return wm_pending;
355}
356
357bool hubbub1_program_stutter_watermarks(
358 struct hubbub *hubbub,
359 struct dcn_watermark_set *watermarks,
360 unsigned int refclk_mhz,
361 bool safe_to_lower)
362{
363 struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
364 uint32_t prog_wm_value;
365 bool wm_pending = false;
366
367 /* clock state A */
368 if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
369 > hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) {
370 hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
371 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
372 prog_wm_value = convert_and_clamp(
373 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
374 refclk_mhz, 0x1fffff);
375 REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
376 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
377 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
378 "HW register value = 0x%x\n",
379 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
380 } else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
381 < hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns)
382 wm_pending = true;
383
384 if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
385 > hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) {
386 hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns =
387 watermarks->a.cstate_pstate.cstate_exit_ns;
388 prog_wm_value = convert_and_clamp(
389 watermarks->a.cstate_pstate.cstate_exit_ns,
390 refclk_mhz, 0x1fffff);
391 REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
392 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
393 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
394 "HW register value = 0x%x\n",
395 watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
396 } else if (watermarks->a.cstate_pstate.cstate_exit_ns
397 < hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns)
398 wm_pending = true;
399
400 /* clock state B */
401 if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
402 > hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) {
403 hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
404 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
405 prog_wm_value = convert_and_clamp(
406 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
407 refclk_mhz, 0x1fffff);
408 REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
409 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
410 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
411 "HW register value = 0x%x\n",
412 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
413 } else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
414 < hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns)
415 wm_pending = true;
416
417 if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
418 > hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) {
419 hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns =
420 watermarks->b.cstate_pstate.cstate_exit_ns;
421 prog_wm_value = convert_and_clamp(
422 watermarks->b.cstate_pstate.cstate_exit_ns,
423 refclk_mhz, 0x1fffff);
424 REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
425 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
426 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
427 "HW register value = 0x%x\n",
428 watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
429 } else if (watermarks->b.cstate_pstate.cstate_exit_ns
430 < hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns)
431 wm_pending = true;
432
433 /* clock state C */
434 if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
435 > hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) {
436 hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
437 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
438 prog_wm_value = convert_and_clamp(
439 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
440 refclk_mhz, 0x1fffff);
441 REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0,
442 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
443 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
444 "HW register value = 0x%x\n",
445 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
446 } else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
447 < hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns)
448 wm_pending = true;
449
450 if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
451 > hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) {
452 hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns =
453 watermarks->c.cstate_pstate.cstate_exit_ns;
454 prog_wm_value = convert_and_clamp(
455 watermarks->c.cstate_pstate.cstate_exit_ns,
456 refclk_mhz, 0x1fffff);
457 REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0,
458 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
459 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
460 "HW register value = 0x%x\n",
461 watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
462 } else if (watermarks->c.cstate_pstate.cstate_exit_ns
463 < hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns)
464 wm_pending = true;
465
466 /* clock state D */
467 if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
468 > hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) {
469 hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
470 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
471 prog_wm_value = convert_and_clamp(
472 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
473 refclk_mhz, 0x1fffff);
474 REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0,
475 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
476 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
477 "HW register value = 0x%x\n",
478 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
479 } else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
480 < hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns)
481 wm_pending = true;
482
483 if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
484 > hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) {
485 hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns =
486 watermarks->d.cstate_pstate.cstate_exit_ns;
487 prog_wm_value = convert_and_clamp(
488 watermarks->d.cstate_pstate.cstate_exit_ns,
489 refclk_mhz, 0x1fffff);
490 REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0,
491 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
492 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
493 "HW register value = 0x%x\n",
494 watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
495 } else if (watermarks->d.cstate_pstate.cstate_exit_ns
496 < hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns)
497 wm_pending = true;
498
499 return wm_pending;
500}
501
502bool hubbub1_program_pstate_watermarks(
503 struct hubbub *hubbub,
504 struct dcn_watermark_set *watermarks,
505 unsigned int refclk_mhz,
506 bool safe_to_lower)
507{
508 struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
509 uint32_t prog_wm_value;
510 bool wm_pending = false;
511
512 /* clock state A */
513 if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
514 > hubbub1->watermarks.a.cstate_pstate.pstate_change_ns) {
515 hubbub1->watermarks.a.cstate_pstate.pstate_change_ns =
516 watermarks->a.cstate_pstate.pstate_change_ns;
517 prog_wm_value = convert_and_clamp(
518 watermarks->a.cstate_pstate.pstate_change_ns,
519 refclk_mhz, 0x1fffff);
520 REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0,
521 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
522 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
523 "HW register value = 0x%x\n\n",
524 watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
525 } else if (watermarks->a.cstate_pstate.pstate_change_ns
526 < hubbub1->watermarks.a.cstate_pstate.pstate_change_ns)
527 wm_pending = true;
528
529 /* clock state B */
530 if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
531 > hubbub1->watermarks.b.cstate_pstate.pstate_change_ns) {
532 hubbub1->watermarks.b.cstate_pstate.pstate_change_ns =
533 watermarks->b.cstate_pstate.pstate_change_ns;
534 prog_wm_value = convert_and_clamp(
535 watermarks->b.cstate_pstate.pstate_change_ns,
536 refclk_mhz, 0x1fffff);
537 REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0,
538 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
539 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
540 "HW register value = 0x%x\n\n",
541 watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
542 } else if (watermarks->b.cstate_pstate.pstate_change_ns
543 < hubbub1->watermarks.b.cstate_pstate.pstate_change_ns)
544 wm_pending = true;
545
546 /* clock state C */
547 if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
548 > hubbub1->watermarks.c.cstate_pstate.pstate_change_ns) {
549 hubbub1->watermarks.c.cstate_pstate.pstate_change_ns =
550 watermarks->c.cstate_pstate.pstate_change_ns;
551 prog_wm_value = convert_and_clamp(
552 watermarks->c.cstate_pstate.pstate_change_ns,
553 refclk_mhz, 0x1fffff);
554 REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0,
555 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
556 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
557 "HW register value = 0x%x\n\n",
558 watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
559 } else if (watermarks->c.cstate_pstate.pstate_change_ns
560 < hubbub1->watermarks.c.cstate_pstate.pstate_change_ns)
561 wm_pending = true;
562
563 /* clock state D */
564 if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns
565 > hubbub1->watermarks.d.cstate_pstate.pstate_change_ns) {
566 hubbub1->watermarks.d.cstate_pstate.pstate_change_ns =
567 watermarks->d.cstate_pstate.pstate_change_ns;
568 prog_wm_value = convert_and_clamp(
569 watermarks->d.cstate_pstate.pstate_change_ns,
570 refclk_mhz, 0x1fffff);
571 REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0,
572 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
573 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
574 "HW register value = 0x%x\n\n",
575 watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
576 } else if (watermarks->d.cstate_pstate.pstate_change_ns
577 < hubbub1->watermarks.d.cstate_pstate.pstate_change_ns)
578 wm_pending = true;
579
580 return wm_pending;
581}
582
583bool hubbub1_program_watermarks(
584 struct hubbub *hubbub,
585 struct dcn_watermark_set *watermarks,
586 unsigned int refclk_mhz,
587 bool safe_to_lower)
588{
589 struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
590 bool wm_pending = false;
591 /*
592 * Need to clamp to max of the register values (i.e. no wrap)
593 * for dcn1, all wm registers are 21-bit wide
594 */
595 if (hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
596 wm_pending = true;
597
598 if (hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
599 wm_pending = true;
600
601 if (hubbub1_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
602 wm_pending = true;
603
604 REG_UPDATE(DCHUBBUB_ARB_SAT_LEVEL,
605 DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
606 REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
607 DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 68);
608
609 hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
610
611#if 0
612 REG_UPDATE_2(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
613 DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE, 1,
614 DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
615#endif
616 return wm_pending;
617}
618
619void hubbub1_update_dchub(
620 struct hubbub *hubbub,
621 struct dchub_init_data *dh_data)
622{
623 struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
624
625 if (REG(DCHUBBUB_SDPIF_FB_TOP) == 0) {
626 ASSERT(false);
627 /*should not come here*/
628 return;
629 }
630 /* TODO: port code from dal2 */
631 switch (dh_data->fb_mode) {
632 case FRAME_BUFFER_MODE_ZFB_ONLY:
633 /*For ZFB case need to put DCHUB FB BASE and TOP upside down to indicate ZFB mode*/
634 REG_UPDATE(DCHUBBUB_SDPIF_FB_TOP,
635 SDPIF_FB_TOP, 0);
636
637 REG_UPDATE(DCHUBBUB_SDPIF_FB_BASE,
638 SDPIF_FB_BASE, 0x0FFFF);
639
640 REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
641 SDPIF_AGP_BASE, dh_data->zfb_phys_addr_base >> 22);
642
643 REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
644 SDPIF_AGP_BOT, dh_data->zfb_mc_base_addr >> 22);
645
646 REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
647 SDPIF_AGP_TOP, (dh_data->zfb_mc_base_addr +
648 dh_data->zfb_size_in_byte - 1) >> 22);
649 break;
650 case FRAME_BUFFER_MODE_MIXED_ZFB_AND_LOCAL:
651 /*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/
652
653 REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
654 SDPIF_AGP_BASE, dh_data->zfb_phys_addr_base >> 22);
655
656 REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
657 SDPIF_AGP_BOT, dh_data->zfb_mc_base_addr >> 22);
658
659 REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
660 SDPIF_AGP_TOP, (dh_data->zfb_mc_base_addr +
661 dh_data->zfb_size_in_byte - 1) >> 22);
662 break;
663 case FRAME_BUFFER_MODE_LOCAL_ONLY:
664 /*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/
665 REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
666 SDPIF_AGP_BASE, 0);
667
668 REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
669 SDPIF_AGP_BOT, 0X03FFFF);
670
671 REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
672 SDPIF_AGP_TOP, 0);
673 break;
674 default:
675 break;
676 }
677
678 dh_data->dchub_initialzied = true;
679 dh_data->dchub_info_valid = false;
680}
681
682void hubbub1_toggle_watermark_change_req(struct hubbub *hubbub)
683{
684 struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
685
686 uint32_t watermark_change_req;
687
688 REG_GET(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
689 DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, &watermark_change_req);
690
691 if (watermark_change_req)
692 watermark_change_req = 0;
693 else
694 watermark_change_req = 1;
695
696 REG_UPDATE(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
697 DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, watermark_change_req);
698}
699
700void hubbub1_soft_reset(struct hubbub *hubbub, bool reset)
701{
702 struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
703
704 uint32_t reset_en = reset ? 1 : 0;
705
706 REG_UPDATE(DCHUBBUB_SOFT_RESET,
707 DCHUBBUB_GLOBAL_SOFT_RESET, reset_en);
708}
709
710static bool hubbub1_dcc_support_swizzle(
711 enum swizzle_mode_values swizzle,
712 unsigned int bytes_per_element,
713 enum segment_order *segment_order_horz,
714 enum segment_order *segment_order_vert)
715{
716 bool standard_swizzle = false;
717 bool display_swizzle = false;
718
719 switch (swizzle) {
720 case DC_SW_4KB_S:
721 case DC_SW_64KB_S:
722 case DC_SW_VAR_S:
723 case DC_SW_4KB_S_X:
724 case DC_SW_64KB_S_X:
725 case DC_SW_VAR_S_X:
726 standard_swizzle = true;
727 break;
728 case DC_SW_4KB_D:
729 case DC_SW_64KB_D:
730 case DC_SW_VAR_D:
731 case DC_SW_4KB_D_X:
732 case DC_SW_64KB_D_X:
733 case DC_SW_VAR_D_X:
734 display_swizzle = true;
735 break;
736 default:
737 break;
738 }
739
740 if (bytes_per_element == 1 && standard_swizzle) {
741 *segment_order_horz = segment_order__contiguous;
742 *segment_order_vert = segment_order__na;
743 return true;
744 }
745 if (bytes_per_element == 2 && standard_swizzle) {
746 *segment_order_horz = segment_order__non_contiguous;
747 *segment_order_vert = segment_order__contiguous;
748 return true;
749 }
750 if (bytes_per_element == 4 && standard_swizzle) {
751 *segment_order_horz = segment_order__non_contiguous;
752 *segment_order_vert = segment_order__contiguous;
753 return true;
754 }
755 if (bytes_per_element == 8 && standard_swizzle) {
756 *segment_order_horz = segment_order__na;
757 *segment_order_vert = segment_order__contiguous;
758 return true;
759 }
760 if (bytes_per_element == 8 && display_swizzle) {
761 *segment_order_horz = segment_order__contiguous;
762 *segment_order_vert = segment_order__non_contiguous;
763 return true;
764 }
765
766 return false;
767}
768
769static bool hubbub1_dcc_support_pixel_format(
770 enum surface_pixel_format format,
771 unsigned int *bytes_per_element)
772{
773 /* DML: get_bytes_per_element */
774 switch (format) {
775 case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
776 case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
777 *bytes_per_element = 2;
778 return true;
779 case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
780 case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
781 case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
782 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
783 *bytes_per_element = 4;
784 return true;
785 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
786 case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
787 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
788 case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
789 *bytes_per_element = 8;
790 return true;
791 default:
792 return false;
793 }
794}
795
796static void hubbub1_get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height,
797 unsigned int bytes_per_element)
798{
799 /* copied from DML. might want to refactor DML to leverage from DML */
800 /* DML : get_blk256_size */
801 if (bytes_per_element == 1) {
802 *blk256_width = 16;
803 *blk256_height = 16;
804 } else if (bytes_per_element == 2) {
805 *blk256_width = 16;
806 *blk256_height = 8;
807 } else if (bytes_per_element == 4) {
808 *blk256_width = 8;
809 *blk256_height = 8;
810 } else if (bytes_per_element == 8) {
811 *blk256_width = 8;
812 *blk256_height = 4;
813 }
814}
815
816static void hubbub1_det_request_size(
817 unsigned int height,
818 unsigned int width,
819 unsigned int bpe,
820 bool *req128_horz_wc,
821 bool *req128_vert_wc)
822{
823 unsigned int detile_buf_size = 164 * 1024; /* 164KB for DCN1.0 */
824
825 unsigned int blk256_height = 0;
826 unsigned int blk256_width = 0;
827 unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc;
828
829 hubbub1_get_blk256_size(&blk256_width, &blk256_height, bpe);
830
831 swath_bytes_horz_wc = width * blk256_height * bpe;
832 swath_bytes_vert_wc = height * blk256_width * bpe;
833
834 *req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
835 false : /* full 256B request */
836 true; /* half 128b request */
837
838 *req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ?
839 false : /* full 256B request */
840 true; /* half 128b request */
841}
842
843static bool hubbub1_get_dcc_compression_cap(struct hubbub *hubbub,
844 const struct dc_dcc_surface_param *input,
845 struct dc_surface_dcc_cap *output)
846{
847 struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
848 struct dc *dc = hubbub1->base.ctx->dc;
849
850 /* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */
851 enum dcc_control dcc_control;
852 unsigned int bpe;
853 enum segment_order segment_order_horz, segment_order_vert;
854 bool req128_horz_wc, req128_vert_wc;
855
856 memset(output, 0, sizeof(*output));
857
858 if (dc->debug.disable_dcc == DCC_DISABLE)
859 return false;
860
861 if (!hubbub1->base.funcs->dcc_support_pixel_format(input->format, &bpe))
862 return false;
863
864 if (!hubbub1->base.funcs->dcc_support_swizzle(input->swizzle_mode, bpe,
865 &segment_order_horz, &segment_order_vert))
866 return false;
867
868 hubbub1_det_request_size(input->surface_size.height, input->surface_size.width,
869 bpe, &req128_horz_wc, &req128_vert_wc);
870
871 if (!req128_horz_wc && !req128_vert_wc) {
872 dcc_control = dcc_control__256_256_xxx;
873 } else if (input->scan == SCAN_DIRECTION_HORIZONTAL) {
874 if (!req128_horz_wc)
875 dcc_control = dcc_control__256_256_xxx;
876 else if (segment_order_horz == segment_order__contiguous)
877 dcc_control = dcc_control__128_128_xxx;
878 else
879 dcc_control = dcc_control__256_64_64;
880 } else if (input->scan == SCAN_DIRECTION_VERTICAL) {
881 if (!req128_vert_wc)
882 dcc_control = dcc_control__256_256_xxx;
883 else if (segment_order_vert == segment_order__contiguous)
884 dcc_control = dcc_control__128_128_xxx;
885 else
886 dcc_control = dcc_control__256_64_64;
887 } else {
888 if ((req128_horz_wc &&
889 segment_order_horz == segment_order__non_contiguous) ||
890 (req128_vert_wc &&
891 segment_order_vert == segment_order__non_contiguous))
892 /* access_dir not known, must use most constraining */
893 dcc_control = dcc_control__256_64_64;
894 else
895 /* reg128 is true for either horz and vert
896 * but segment_order is contiguous
897 */
898 dcc_control = dcc_control__128_128_xxx;
899 }
900
901 if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE &&
902 dcc_control != dcc_control__256_256_xxx)
903 return false;
904
905 switch (dcc_control) {
906 case dcc_control__256_256_xxx:
907 output->grph.rgb.max_uncompressed_blk_size = 256;
908 output->grph.rgb.max_compressed_blk_size = 256;
909 output->grph.rgb.independent_64b_blks = false;
910 break;
911 case dcc_control__128_128_xxx:
912 output->grph.rgb.max_uncompressed_blk_size = 128;
913 output->grph.rgb.max_compressed_blk_size = 128;
914 output->grph.rgb.independent_64b_blks = false;
915 break;
916 case dcc_control__256_64_64:
917 output->grph.rgb.max_uncompressed_blk_size = 256;
918 output->grph.rgb.max_compressed_blk_size = 64;
919 output->grph.rgb.independent_64b_blks = true;
920 break;
921 default:
922 ASSERT(false);
923 break;
924 }
925
926 output->capable = true;
927 output->const_color_support = false;
928
929 return true;
930}
931
932static const struct hubbub_funcs hubbub1_funcs = {
933 .update_dchub = hubbub1_update_dchub,
934 .dcc_support_swizzle = hubbub1_dcc_support_swizzle,
935 .dcc_support_pixel_format = hubbub1_dcc_support_pixel_format,
936 .get_dcc_compression_cap = hubbub1_get_dcc_compression_cap,
937 .wm_read_state = hubbub1_wm_read_state,
938 .program_watermarks = hubbub1_program_watermarks,
939 .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,
940 .allow_self_refresh_control = hubbub1_allow_self_refresh_control,
941 .verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high,
942};
943
944void hubbub1_construct(struct hubbub *hubbub,
945 struct dc_context *ctx,
946 const struct dcn_hubbub_registers *hubbub_regs,
947 const struct dcn_hubbub_shift *hubbub_shift,
948 const struct dcn_hubbub_mask *hubbub_mask)
949{
950 struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
951
952 hubbub1->base.ctx = ctx;
953
954 hubbub1->base.funcs = &hubbub1_funcs;
955
956 hubbub1->regs = hubbub_regs;
957 hubbub1->shifts = hubbub_shift;
958 hubbub1->masks = hubbub_mask;
959
960 hubbub1->debug_test_index_pstate = 0x7;
961 if (ctx->dce_version == DCN_VERSION_1_01)
962 hubbub1->debug_test_index_pstate = 0xB;
963}
964
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include <linux/delay.h>
27
28#include "dm_services.h"
29#include "dcn10_hubp.h"
30#include "dcn10_hubbub.h"
31#include "reg_helper.h"
32
33#define CTX \
34 hubbub1->base.ctx
35#define DC_LOGGER \
36 hubbub1->base.ctx->logger
37#define REG(reg)\
38 hubbub1->regs->reg
39
40#undef FN
41#define FN(reg_name, field_name) \
42 hubbub1->shifts->field_name, hubbub1->masks->field_name
43
44void hubbub1_wm_read_state(struct hubbub *hubbub,
45 struct dcn_hubbub_wm *wm)
46{
47 struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
48 struct dcn_hubbub_wm_set *s;
49
50 memset(wm, 0, sizeof(struct dcn_hubbub_wm));
51
52 s = &wm->sets[0];
53 s->wm_set = 0;
54 s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A);
55 s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A);
56 if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A)) {
57 s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A);
58 s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A);
59 }
60 s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A);
61
62 s = &wm->sets[1];
63 s->wm_set = 1;
64 s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B);
65 s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B);
66 if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B)) {
67 s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B);
68 s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B);
69 }
70 s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B);
71
72 s = &wm->sets[2];
73 s->wm_set = 2;
74 s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C);
75 s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C);
76 if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C)) {
77 s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C);
78 s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C);
79 }
80 s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C);
81
82 s = &wm->sets[3];
83 s->wm_set = 3;
84 s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D);
85 s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D);
86 if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D)) {
87 s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D);
88 s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D);
89 }
90 s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D);
91}
92
93void hubbub1_allow_self_refresh_control(struct hubbub *hubbub, bool allow)
94{
95 struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
96
97 /*
98 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 1 means do not allow stutter
99 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0 means allow stutter
100 */
101
102 REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
103 DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE, 0,
104 DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, !allow);
105}
106
107bool hubbub1_is_allow_self_refresh_enabled(struct hubbub *hubbub)
108{
109 struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
110 uint32_t enable = 0;
111
112 REG_GET(DCHUBBUB_ARB_DRAM_STATE_CNTL,
113 DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, &enable);
114
115 return enable ? true : false;
116}
117
118
119bool hubbub1_verify_allow_pstate_change_high(
120 struct hubbub *hubbub)
121{
122 struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
123
124 /* pstate latency is ~20us so if we wait over 40us and pstate allow
125 * still not asserted, we are probably stuck and going to hang
126 *
127 * TODO: Figure out why it takes ~100us on linux
128 * pstate takes around ~100us on linux. Unknown currently as to
129 * why it takes that long on linux
130 */
131 static unsigned int pstate_wait_timeout_us = 200;
132 static unsigned int pstate_wait_expected_timeout_us = 40;
133 static unsigned int max_sampled_pstate_wait_us; /* data collection */
134 static bool forced_pstate_allow; /* help with revert wa */
135
136 unsigned int debug_data;
137 unsigned int i;
138
139 if (forced_pstate_allow) {
140 /* we hacked to force pstate allow to prevent hang last time
141 * we verify_allow_pstate_change_high. so disable force
142 * here so we can check status
143 */
144 REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
145 DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 0,
146 DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 0);
147 forced_pstate_allow = false;
148 }
149
150 /* RV2:
151 * dchubbubdebugind, at: 0xB
152 * description
153 * 0: Pipe0 Plane0 Allow Pstate Change
154 * 1: Pipe0 Plane1 Allow Pstate Change
155 * 2: Pipe0 Cursor0 Allow Pstate Change
156 * 3: Pipe0 Cursor1 Allow Pstate Change
157 * 4: Pipe1 Plane0 Allow Pstate Change
158 * 5: Pipe1 Plane1 Allow Pstate Change
159 * 6: Pipe1 Cursor0 Allow Pstate Change
160 * 7: Pipe1 Cursor1 Allow Pstate Change
161 * 8: Pipe2 Plane0 Allow Pstate Change
162 * 9: Pipe2 Plane1 Allow Pstate Change
163 * 10: Pipe2 Cursor0 Allow Pstate Change
164 * 11: Pipe2 Cursor1 Allow Pstate Change
165 * 12: Pipe3 Plane0 Allow Pstate Change
166 * 13: Pipe3 Plane1 Allow Pstate Change
167 * 14: Pipe3 Cursor0 Allow Pstate Change
168 * 15: Pipe3 Cursor1 Allow Pstate Change
169 * 16: Pipe4 Plane0 Allow Pstate Change
170 * 17: Pipe4 Plane1 Allow Pstate Change
171 * 18: Pipe4 Cursor0 Allow Pstate Change
172 * 19: Pipe4 Cursor1 Allow Pstate Change
173 * 20: Pipe5 Plane0 Allow Pstate Change
174 * 21: Pipe5 Plane1 Allow Pstate Change
175 * 22: Pipe5 Cursor0 Allow Pstate Change
176 * 23: Pipe5 Cursor1 Allow Pstate Change
177 * 24: Pipe6 Plane0 Allow Pstate Change
178 * 25: Pipe6 Plane1 Allow Pstate Change
179 * 26: Pipe6 Cursor0 Allow Pstate Change
180 * 27: Pipe6 Cursor1 Allow Pstate Change
181 * 28: WB0 Allow Pstate Change
182 * 29: WB1 Allow Pstate Change
183 * 30: Arbiter's allow_pstate_change
184 * 31: SOC pstate change request"
185 */
186 /*DCN2.x:
187 HUBBUB:DCHUBBUB_TEST_ARB_DEBUG10 DCHUBBUBDEBUGIND:0xB
188 0: Pipe0 Plane0 Allow P-state Change
189 1: Pipe0 Plane1 Allow P-state Change
190 2: Pipe0 Cursor0 Allow P-state Change
191 3: Pipe0 Cursor1 Allow P-state Change
192 4: Pipe1 Plane0 Allow P-state Change
193 5: Pipe1 Plane1 Allow P-state Change
194 6: Pipe1 Cursor0 Allow P-state Change
195 7: Pipe1 Cursor1 Allow P-state Change
196 8: Pipe2 Plane0 Allow P-state Change
197 9: Pipe2 Plane1 Allow P-state Change
198 10: Pipe2 Cursor0 Allow P-state Change
199 11: Pipe2 Cursor1 Allow P-state Change
200 12: Pipe3 Plane0 Allow P-state Change
201 13: Pipe3 Plane1 Allow P-state Change
202 14: Pipe3 Cursor0 Allow P-state Change
203 15: Pipe3 Cursor1 Allow P-state Change
204 16: Pipe4 Plane0 Allow P-state Change
205 17: Pipe4 Plane1 Allow P-state Change
206 18: Pipe4 Cursor0 Allow P-state Change
207 19: Pipe4 Cursor1 Allow P-state Change
208 20: Pipe5 Plane0 Allow P-state Change
209 21: Pipe5 Plane1 Allow P-state Change
210 22: Pipe5 Cursor0 Allow P-state Change
211 23: Pipe5 Cursor1 Allow P-state Change
212 24: Pipe6 Plane0 Allow P-state Change
213 25: Pipe6 Plane1 Allow P-state Change
214 26: Pipe6 Cursor0 Allow P-state Change
215 27: Pipe6 Cursor1 Allow P-state Change
216 28: WB0 Allow P-state Change
217 29: WB1 Allow P-state Change
218 30: Arbiter`s Allow P-state Change
219 31: SOC P-state Change request
220 */
221 /* RV1:
222 * dchubbubdebugind, at: 0x7
223 * description "3-0: Pipe0 cursor0 QOS
224 * 7-4: Pipe1 cursor0 QOS
225 * 11-8: Pipe2 cursor0 QOS
226 * 15-12: Pipe3 cursor0 QOS
227 * 16: Pipe0 Plane0 Allow Pstate Change
228 * 17: Pipe1 Plane0 Allow Pstate Change
229 * 18: Pipe2 Plane0 Allow Pstate Change
230 * 19: Pipe3 Plane0 Allow Pstate Change
231 * 20: Pipe0 Plane1 Allow Pstate Change
232 * 21: Pipe1 Plane1 Allow Pstate Change
233 * 22: Pipe2 Plane1 Allow Pstate Change
234 * 23: Pipe3 Plane1 Allow Pstate Change
235 * 24: Pipe0 cursor0 Allow Pstate Change
236 * 25: Pipe1 cursor0 Allow Pstate Change
237 * 26: Pipe2 cursor0 Allow Pstate Change
238 * 27: Pipe3 cursor0 Allow Pstate Change
239 * 28: WB0 Allow Pstate Change
240 * 29: WB1 Allow Pstate Change
241 * 30: Arbiter's allow_pstate_change
242 * 31: SOC pstate change request
243 */
244
245 REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, hubbub1->debug_test_index_pstate);
246
247 for (i = 0; i < pstate_wait_timeout_us; i++) {
248 debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA);
249
250 if (debug_data & (1 << 30)) {
251
252 if (i > pstate_wait_expected_timeout_us)
253 DC_LOG_WARNING("pstate took longer than expected ~%dus\n",
254 i);
255
256 return true;
257 }
258 if (max_sampled_pstate_wait_us < i)
259 max_sampled_pstate_wait_us = i;
260
261 udelay(1);
262 }
263
264 /* force pstate allow to prevent system hang
265 * and break to debugger to investigate
266 */
267 REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
268 DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 1,
269 DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 1);
270 forced_pstate_allow = true;
271
272 DC_LOG_WARNING("pstate TEST_DEBUG_DATA: 0x%X\n",
273 debug_data);
274
275 return false;
276}
277
278static uint32_t convert_and_clamp(
279 uint32_t wm_ns,
280 uint32_t refclk_mhz,
281 uint32_t clamp_value)
282{
283 uint32_t ret_val = 0;
284 ret_val = wm_ns * refclk_mhz;
285 ret_val /= 1000;
286
287 if (ret_val > clamp_value)
288 ret_val = clamp_value;
289
290 return ret_val;
291}
292
293
294void hubbub1_wm_change_req_wa(struct hubbub *hubbub)
295{
296 struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
297
298 REG_UPDATE_SEQ_2(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
299 DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 0,
300 DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
301}
302
303void hubbub1_program_urgent_watermarks(
304 struct hubbub *hubbub,
305 struct dcn_watermark_set *watermarks,
306 unsigned int refclk_mhz,
307 bool safe_to_lower)
308{
309 struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
310 uint32_t prog_wm_value;
311
312 /* Repeat for water mark set A, B, C and D. */
313 /* clock state A */
314 if (safe_to_lower || watermarks->a.urgent_ns > hubbub1->watermarks.a.urgent_ns) {
315 hubbub1->watermarks.a.urgent_ns = watermarks->a.urgent_ns;
316 prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
317 refclk_mhz, 0x1fffff);
318 REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
319 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
320
321 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
322 "HW register value = 0x%x\n",
323 watermarks->a.urgent_ns, prog_wm_value);
324 }
325
326 if (safe_to_lower || watermarks->a.pte_meta_urgent_ns > hubbub1->watermarks.a.pte_meta_urgent_ns) {
327 hubbub1->watermarks.a.pte_meta_urgent_ns = watermarks->a.pte_meta_urgent_ns;
328 prog_wm_value = convert_and_clamp(watermarks->a.pte_meta_urgent_ns,
329 refclk_mhz, 0x1fffff);
330 REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A, prog_wm_value);
331 DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_A calculated =%d\n"
332 "HW register value = 0x%x\n",
333 watermarks->a.pte_meta_urgent_ns, prog_wm_value);
334 }
335
336 /* clock state B */
337 if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) {
338 hubbub1->watermarks.b.urgent_ns = watermarks->b.urgent_ns;
339 prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns,
340 refclk_mhz, 0x1fffff);
341 REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0,
342 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
343
344 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
345 "HW register value = 0x%x\n",
346 watermarks->b.urgent_ns, prog_wm_value);
347 }
348
349 if (safe_to_lower || watermarks->b.pte_meta_urgent_ns > hubbub1->watermarks.b.pte_meta_urgent_ns) {
350 hubbub1->watermarks.b.pte_meta_urgent_ns = watermarks->b.pte_meta_urgent_ns;
351 prog_wm_value = convert_and_clamp(watermarks->b.pte_meta_urgent_ns,
352 refclk_mhz, 0x1fffff);
353 REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B, prog_wm_value);
354 DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_B calculated =%d\n"
355 "HW register value = 0x%x\n",
356 watermarks->b.pte_meta_urgent_ns, prog_wm_value);
357 }
358
359 /* clock state C */
360 if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) {
361 hubbub1->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
362 prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns,
363 refclk_mhz, 0x1fffff);
364 REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0,
365 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
366
367 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
368 "HW register value = 0x%x\n",
369 watermarks->c.urgent_ns, prog_wm_value);
370 }
371
372 if (safe_to_lower || watermarks->c.pte_meta_urgent_ns > hubbub1->watermarks.c.pte_meta_urgent_ns) {
373 hubbub1->watermarks.c.pte_meta_urgent_ns = watermarks->c.pte_meta_urgent_ns;
374 prog_wm_value = convert_and_clamp(watermarks->c.pte_meta_urgent_ns,
375 refclk_mhz, 0x1fffff);
376 REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C, prog_wm_value);
377 DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_C calculated =%d\n"
378 "HW register value = 0x%x\n",
379 watermarks->c.pte_meta_urgent_ns, prog_wm_value);
380 }
381
382 /* clock state D */
383 if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) {
384 hubbub1->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
385 prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns,
386 refclk_mhz, 0x1fffff);
387 REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0,
388 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
389
390 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
391 "HW register value = 0x%x\n",
392 watermarks->d.urgent_ns, prog_wm_value);
393 }
394
395 if (safe_to_lower || watermarks->d.pte_meta_urgent_ns > hubbub1->watermarks.d.pte_meta_urgent_ns) {
396 hubbub1->watermarks.d.pte_meta_urgent_ns = watermarks->d.pte_meta_urgent_ns;
397 prog_wm_value = convert_and_clamp(watermarks->d.pte_meta_urgent_ns,
398 refclk_mhz, 0x1fffff);
399 REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D, prog_wm_value);
400 DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_D calculated =%d\n"
401 "HW register value = 0x%x\n",
402 watermarks->d.pte_meta_urgent_ns, prog_wm_value);
403 }
404}
405
406void hubbub1_program_stutter_watermarks(
407 struct hubbub *hubbub,
408 struct dcn_watermark_set *watermarks,
409 unsigned int refclk_mhz,
410 bool safe_to_lower)
411{
412 struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
413 uint32_t prog_wm_value;
414
415 /* clock state A */
416 if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
417 > hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) {
418 hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
419 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
420 prog_wm_value = convert_and_clamp(
421 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
422 refclk_mhz, 0x1fffff);
423 REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
424 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
425 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
426 "HW register value = 0x%x\n",
427 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
428 }
429
430 if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
431 > hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) {
432 hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns =
433 watermarks->a.cstate_pstate.cstate_exit_ns;
434 prog_wm_value = convert_and_clamp(
435 watermarks->a.cstate_pstate.cstate_exit_ns,
436 refclk_mhz, 0x1fffff);
437 REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
438 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
439 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
440 "HW register value = 0x%x\n",
441 watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
442 }
443
444 /* clock state B */
445 if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
446 > hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) {
447 hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
448 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
449 prog_wm_value = convert_and_clamp(
450 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
451 refclk_mhz, 0x1fffff);
452 REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
453 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
454 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
455 "HW register value = 0x%x\n",
456 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
457 }
458
459 if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
460 > hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) {
461 hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns =
462 watermarks->b.cstate_pstate.cstate_exit_ns;
463 prog_wm_value = convert_and_clamp(
464 watermarks->b.cstate_pstate.cstate_exit_ns,
465 refclk_mhz, 0x1fffff);
466 REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
467 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
468 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
469 "HW register value = 0x%x\n",
470 watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
471 }
472
473 /* clock state C */
474 if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
475 > hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) {
476 hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
477 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
478 prog_wm_value = convert_and_clamp(
479 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
480 refclk_mhz, 0x1fffff);
481 REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0,
482 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
483 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
484 "HW register value = 0x%x\n",
485 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
486 }
487
488 if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
489 > hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) {
490 hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns =
491 watermarks->c.cstate_pstate.cstate_exit_ns;
492 prog_wm_value = convert_and_clamp(
493 watermarks->c.cstate_pstate.cstate_exit_ns,
494 refclk_mhz, 0x1fffff);
495 REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0,
496 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
497 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
498 "HW register value = 0x%x\n",
499 watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
500 }
501
502 /* clock state D */
503 if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
504 > hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) {
505 hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
506 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
507 prog_wm_value = convert_and_clamp(
508 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
509 refclk_mhz, 0x1fffff);
510 REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0,
511 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
512 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
513 "HW register value = 0x%x\n",
514 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
515 }
516
517 if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
518 > hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) {
519 hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns =
520 watermarks->d.cstate_pstate.cstate_exit_ns;
521 prog_wm_value = convert_and_clamp(
522 watermarks->d.cstate_pstate.cstate_exit_ns,
523 refclk_mhz, 0x1fffff);
524 REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0,
525 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
526 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
527 "HW register value = 0x%x\n",
528 watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
529 }
530
531}
532
533void hubbub1_program_pstate_watermarks(
534 struct hubbub *hubbub,
535 struct dcn_watermark_set *watermarks,
536 unsigned int refclk_mhz,
537 bool safe_to_lower)
538{
539 struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
540 uint32_t prog_wm_value;
541
542 /* clock state A */
543 if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
544 > hubbub1->watermarks.a.cstate_pstate.pstate_change_ns) {
545 hubbub1->watermarks.a.cstate_pstate.pstate_change_ns =
546 watermarks->a.cstate_pstate.pstate_change_ns;
547 prog_wm_value = convert_and_clamp(
548 watermarks->a.cstate_pstate.pstate_change_ns,
549 refclk_mhz, 0x1fffff);
550 REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0,
551 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
552 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
553 "HW register value = 0x%x\n\n",
554 watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
555 }
556
557 /* clock state B */
558 if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
559 > hubbub1->watermarks.b.cstate_pstate.pstate_change_ns) {
560 hubbub1->watermarks.b.cstate_pstate.pstate_change_ns =
561 watermarks->b.cstate_pstate.pstate_change_ns;
562 prog_wm_value = convert_and_clamp(
563 watermarks->b.cstate_pstate.pstate_change_ns,
564 refclk_mhz, 0x1fffff);
565 REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0,
566 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
567 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
568 "HW register value = 0x%x\n\n",
569 watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
570 }
571
572 /* clock state C */
573 if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
574 > hubbub1->watermarks.c.cstate_pstate.pstate_change_ns) {
575 hubbub1->watermarks.c.cstate_pstate.pstate_change_ns =
576 watermarks->c.cstate_pstate.pstate_change_ns;
577 prog_wm_value = convert_and_clamp(
578 watermarks->c.cstate_pstate.pstate_change_ns,
579 refclk_mhz, 0x1fffff);
580 REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0,
581 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
582 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
583 "HW register value = 0x%x\n\n",
584 watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
585 }
586
587 /* clock state D */
588 if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns
589 > hubbub1->watermarks.d.cstate_pstate.pstate_change_ns) {
590 hubbub1->watermarks.d.cstate_pstate.pstate_change_ns =
591 watermarks->d.cstate_pstate.pstate_change_ns;
592 prog_wm_value = convert_and_clamp(
593 watermarks->d.cstate_pstate.pstate_change_ns,
594 refclk_mhz, 0x1fffff);
595 REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0,
596 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
597 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
598 "HW register value = 0x%x\n\n",
599 watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
600 }
601}
602
603void hubbub1_program_watermarks(
604 struct hubbub *hubbub,
605 struct dcn_watermark_set *watermarks,
606 unsigned int refclk_mhz,
607 bool safe_to_lower)
608{
609 struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
610 /*
611 * Need to clamp to max of the register values (i.e. no wrap)
612 * for dcn1, all wm registers are 21-bit wide
613 */
614 hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
615 hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
616 hubbub1_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
617
618 REG_UPDATE(DCHUBBUB_ARB_SAT_LEVEL,
619 DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
620 REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
621 DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 68);
622
623 hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
624
625#if 0
626 REG_UPDATE_2(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
627 DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE, 1,
628 DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
629#endif
630}
631
632void hubbub1_update_dchub(
633 struct hubbub *hubbub,
634 struct dchub_init_data *dh_data)
635{
636 struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
637
638 if (REG(DCHUBBUB_SDPIF_FB_TOP) == 0) {
639 ASSERT(false);
640 /*should not come here*/
641 return;
642 }
643 /* TODO: port code from dal2 */
644 switch (dh_data->fb_mode) {
645 case FRAME_BUFFER_MODE_ZFB_ONLY:
646 /*For ZFB case need to put DCHUB FB BASE and TOP upside down to indicate ZFB mode*/
647 REG_UPDATE(DCHUBBUB_SDPIF_FB_TOP,
648 SDPIF_FB_TOP, 0);
649
650 REG_UPDATE(DCHUBBUB_SDPIF_FB_BASE,
651 SDPIF_FB_BASE, 0x0FFFF);
652
653 REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
654 SDPIF_AGP_BASE, dh_data->zfb_phys_addr_base >> 22);
655
656 REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
657 SDPIF_AGP_BOT, dh_data->zfb_mc_base_addr >> 22);
658
659 REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
660 SDPIF_AGP_TOP, (dh_data->zfb_mc_base_addr +
661 dh_data->zfb_size_in_byte - 1) >> 22);
662 break;
663 case FRAME_BUFFER_MODE_MIXED_ZFB_AND_LOCAL:
664 /*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/
665
666 REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
667 SDPIF_AGP_BASE, dh_data->zfb_phys_addr_base >> 22);
668
669 REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
670 SDPIF_AGP_BOT, dh_data->zfb_mc_base_addr >> 22);
671
672 REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
673 SDPIF_AGP_TOP, (dh_data->zfb_mc_base_addr +
674 dh_data->zfb_size_in_byte - 1) >> 22);
675 break;
676 case FRAME_BUFFER_MODE_LOCAL_ONLY:
677 /*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/
678 REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
679 SDPIF_AGP_BASE, 0);
680
681 REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
682 SDPIF_AGP_BOT, 0X03FFFF);
683
684 REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
685 SDPIF_AGP_TOP, 0);
686 break;
687 default:
688 break;
689 }
690
691 dh_data->dchub_initialzied = true;
692 dh_data->dchub_info_valid = false;
693}
694
695void hubbub1_toggle_watermark_change_req(struct hubbub *hubbub)
696{
697 struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
698
699 uint32_t watermark_change_req;
700
701 REG_GET(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
702 DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, &watermark_change_req);
703
704 if (watermark_change_req)
705 watermark_change_req = 0;
706 else
707 watermark_change_req = 1;
708
709 REG_UPDATE(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
710 DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, watermark_change_req);
711}
712
713void hubbub1_soft_reset(struct hubbub *hubbub, bool reset)
714{
715 struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
716
717 uint32_t reset_en = reset ? 1 : 0;
718
719 REG_UPDATE(DCHUBBUB_SOFT_RESET,
720 DCHUBBUB_GLOBAL_SOFT_RESET, reset_en);
721}
722
723static bool hubbub1_dcc_support_swizzle(
724 enum swizzle_mode_values swizzle,
725 unsigned int bytes_per_element,
726 enum segment_order *segment_order_horz,
727 enum segment_order *segment_order_vert)
728{
729 bool standard_swizzle = false;
730 bool display_swizzle = false;
731
732 switch (swizzle) {
733 case DC_SW_4KB_S:
734 case DC_SW_64KB_S:
735 case DC_SW_VAR_S:
736 case DC_SW_4KB_S_X:
737 case DC_SW_64KB_S_X:
738 case DC_SW_VAR_S_X:
739 standard_swizzle = true;
740 break;
741 case DC_SW_4KB_D:
742 case DC_SW_64KB_D:
743 case DC_SW_VAR_D:
744 case DC_SW_4KB_D_X:
745 case DC_SW_64KB_D_X:
746 case DC_SW_VAR_D_X:
747 display_swizzle = true;
748 break;
749 default:
750 break;
751 }
752
753 if (bytes_per_element == 1 && standard_swizzle) {
754 *segment_order_horz = segment_order__contiguous;
755 *segment_order_vert = segment_order__na;
756 return true;
757 }
758 if (bytes_per_element == 2 && standard_swizzle) {
759 *segment_order_horz = segment_order__non_contiguous;
760 *segment_order_vert = segment_order__contiguous;
761 return true;
762 }
763 if (bytes_per_element == 4 && standard_swizzle) {
764 *segment_order_horz = segment_order__non_contiguous;
765 *segment_order_vert = segment_order__contiguous;
766 return true;
767 }
768 if (bytes_per_element == 8 && standard_swizzle) {
769 *segment_order_horz = segment_order__na;
770 *segment_order_vert = segment_order__contiguous;
771 return true;
772 }
773 if (bytes_per_element == 8 && display_swizzle) {
774 *segment_order_horz = segment_order__contiguous;
775 *segment_order_vert = segment_order__non_contiguous;
776 return true;
777 }
778
779 return false;
780}
781
782static bool hubbub1_dcc_support_pixel_format(
783 enum surface_pixel_format format,
784 unsigned int *bytes_per_element)
785{
786 /* DML: get_bytes_per_element */
787 switch (format) {
788 case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
789 case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
790 *bytes_per_element = 2;
791 return true;
792 case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
793 case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
794 case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
795 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
796 *bytes_per_element = 4;
797 return true;
798 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
799 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
800 case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
801 *bytes_per_element = 8;
802 return true;
803 default:
804 return false;
805 }
806}
807
808static void hubbub1_get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height,
809 unsigned int bytes_per_element)
810{
811 /* copied from DML. might want to refactor DML to leverage from DML */
812 /* DML : get_blk256_size */
813 if (bytes_per_element == 1) {
814 *blk256_width = 16;
815 *blk256_height = 16;
816 } else if (bytes_per_element == 2) {
817 *blk256_width = 16;
818 *blk256_height = 8;
819 } else if (bytes_per_element == 4) {
820 *blk256_width = 8;
821 *blk256_height = 8;
822 } else if (bytes_per_element == 8) {
823 *blk256_width = 8;
824 *blk256_height = 4;
825 }
826}
827
828static void hubbub1_det_request_size(
829 unsigned int height,
830 unsigned int width,
831 unsigned int bpe,
832 bool *req128_horz_wc,
833 bool *req128_vert_wc)
834{
835 unsigned int detile_buf_size = 164 * 1024; /* 164KB for DCN1.0 */
836
837 unsigned int blk256_height = 0;
838 unsigned int blk256_width = 0;
839 unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc;
840
841 hubbub1_get_blk256_size(&blk256_width, &blk256_height, bpe);
842
843 swath_bytes_horz_wc = height * blk256_height * bpe;
844 swath_bytes_vert_wc = width * blk256_width * bpe;
845
846 *req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
847 false : /* full 256B request */
848 true; /* half 128b request */
849
850 *req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ?
851 false : /* full 256B request */
852 true; /* half 128b request */
853}
854
855static bool hubbub1_get_dcc_compression_cap(struct hubbub *hubbub,
856 const struct dc_dcc_surface_param *input,
857 struct dc_surface_dcc_cap *output)
858{
859 struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
860 struct dc *dc = hubbub1->base.ctx->dc;
861
862 /* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */
863 enum dcc_control dcc_control;
864 unsigned int bpe;
865 enum segment_order segment_order_horz, segment_order_vert;
866 bool req128_horz_wc, req128_vert_wc;
867
868 memset(output, 0, sizeof(*output));
869
870 if (dc->debug.disable_dcc == DCC_DISABLE)
871 return false;
872
873 if (!hubbub1->base.funcs->dcc_support_pixel_format(input->format, &bpe))
874 return false;
875
876 if (!hubbub1->base.funcs->dcc_support_swizzle(input->swizzle_mode, bpe,
877 &segment_order_horz, &segment_order_vert))
878 return false;
879
880 hubbub1_det_request_size(input->surface_size.height, input->surface_size.width,
881 bpe, &req128_horz_wc, &req128_vert_wc);
882
883 if (!req128_horz_wc && !req128_vert_wc) {
884 dcc_control = dcc_control__256_256_xxx;
885 } else if (input->scan == SCAN_DIRECTION_HORIZONTAL) {
886 if (!req128_horz_wc)
887 dcc_control = dcc_control__256_256_xxx;
888 else if (segment_order_horz == segment_order__contiguous)
889 dcc_control = dcc_control__128_128_xxx;
890 else
891 dcc_control = dcc_control__256_64_64;
892 } else if (input->scan == SCAN_DIRECTION_VERTICAL) {
893 if (!req128_vert_wc)
894 dcc_control = dcc_control__256_256_xxx;
895 else if (segment_order_vert == segment_order__contiguous)
896 dcc_control = dcc_control__128_128_xxx;
897 else
898 dcc_control = dcc_control__256_64_64;
899 } else {
900 if ((req128_horz_wc &&
901 segment_order_horz == segment_order__non_contiguous) ||
902 (req128_vert_wc &&
903 segment_order_vert == segment_order__non_contiguous))
904 /* access_dir not known, must use most constraining */
905 dcc_control = dcc_control__256_64_64;
906 else
907 /* reg128 is true for either horz and vert
908 * but segment_order is contiguous
909 */
910 dcc_control = dcc_control__128_128_xxx;
911 }
912
913 if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE &&
914 dcc_control != dcc_control__256_256_xxx)
915 return false;
916
917 switch (dcc_control) {
918 case dcc_control__256_256_xxx:
919 output->grph.rgb.max_uncompressed_blk_size = 256;
920 output->grph.rgb.max_compressed_blk_size = 256;
921 output->grph.rgb.independent_64b_blks = false;
922 break;
923 case dcc_control__128_128_xxx:
924 output->grph.rgb.max_uncompressed_blk_size = 128;
925 output->grph.rgb.max_compressed_blk_size = 128;
926 output->grph.rgb.independent_64b_blks = false;
927 break;
928 case dcc_control__256_64_64:
929 output->grph.rgb.max_uncompressed_blk_size = 256;
930 output->grph.rgb.max_compressed_blk_size = 64;
931 output->grph.rgb.independent_64b_blks = true;
932 break;
933 }
934
935 output->capable = true;
936 output->const_color_support = false;
937
938 return true;
939}
940
941static const struct hubbub_funcs hubbub1_funcs = {
942 .update_dchub = hubbub1_update_dchub,
943 .dcc_support_swizzle = hubbub1_dcc_support_swizzle,
944 .dcc_support_pixel_format = hubbub1_dcc_support_pixel_format,
945 .get_dcc_compression_cap = hubbub1_get_dcc_compression_cap,
946 .wm_read_state = hubbub1_wm_read_state,
947 .program_watermarks = hubbub1_program_watermarks,
948 .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,
949 .allow_self_refresh_control = hubbub1_allow_self_refresh_control,
950};
951
952void hubbub1_construct(struct hubbub *hubbub,
953 struct dc_context *ctx,
954 const struct dcn_hubbub_registers *hubbub_regs,
955 const struct dcn_hubbub_shift *hubbub_shift,
956 const struct dcn_hubbub_mask *hubbub_mask)
957{
958 struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
959
960 hubbub1->base.ctx = ctx;
961
962 hubbub1->base.funcs = &hubbub1_funcs;
963
964 hubbub1->regs = hubbub_regs;
965 hubbub1->shifts = hubbub_shift;
966 hubbub1->masks = hubbub_mask;
967
968 hubbub1->debug_test_index_pstate = 0x7;
969 if (ctx->dce_version == DCN_VERSION_1_01)
970 hubbub1->debug_test_index_pstate = 0xB;
971}
972