Loading...
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include <linux/irqdomain.h>
27#include <linux/pci.h>
28#include <linux/pm_domain.h>
29#include <linux/platform_device.h>
30#include <sound/designware_i2s.h>
31#include <sound/pcm.h>
32
33#include "amdgpu.h"
34#include "atom.h"
35#include "amdgpu_acp.h"
36
37#include "acp_gfx_if.h"
38
39#define ACP_TILE_ON_MASK 0x03
40#define ACP_TILE_OFF_MASK 0x02
41#define ACP_TILE_ON_RETAIN_REG_MASK 0x1f
42#define ACP_TILE_OFF_RETAIN_REG_MASK 0x20
43
44#define ACP_TILE_P1_MASK 0x3e
45#define ACP_TILE_P2_MASK 0x3d
46#define ACP_TILE_DSP0_MASK 0x3b
47#define ACP_TILE_DSP1_MASK 0x37
48
49#define ACP_TILE_DSP2_MASK 0x2f
50
51#define ACP_DMA_REGS_END 0x146c0
52#define ACP_I2S_PLAY_REGS_START 0x14840
53#define ACP_I2S_PLAY_REGS_END 0x148b4
54#define ACP_I2S_CAP_REGS_START 0x148b8
55#define ACP_I2S_CAP_REGS_END 0x1496c
56
57#define ACP_I2S_COMP1_CAP_REG_OFFSET 0xac
58#define ACP_I2S_COMP2_CAP_REG_OFFSET 0xa8
59#define ACP_I2S_COMP1_PLAY_REG_OFFSET 0x6c
60#define ACP_I2S_COMP2_PLAY_REG_OFFSET 0x68
61#define ACP_BT_PLAY_REGS_START 0x14970
62#define ACP_BT_PLAY_REGS_END 0x14a24
63#define ACP_BT_COMP1_REG_OFFSET 0xac
64#define ACP_BT_COMP2_REG_OFFSET 0xa8
65
66#define mmACP_PGFSM_RETAIN_REG 0x51c9
67#define mmACP_PGFSM_CONFIG_REG 0x51ca
68#define mmACP_PGFSM_READ_REG_0 0x51cc
69
70#define mmACP_MEM_SHUT_DOWN_REQ_LO 0x51f8
71#define mmACP_MEM_SHUT_DOWN_REQ_HI 0x51f9
72#define mmACP_MEM_SHUT_DOWN_STS_LO 0x51fa
73#define mmACP_MEM_SHUT_DOWN_STS_HI 0x51fb
74
75#define mmACP_CONTROL 0x5131
76#define mmACP_STATUS 0x5133
77#define mmACP_SOFT_RESET 0x5134
78#define ACP_CONTROL__ClkEn_MASK 0x1
79#define ACP_SOFT_RESET__SoftResetAud_MASK 0x100
80#define ACP_SOFT_RESET__SoftResetAudDone_MASK 0x1000000
81#define ACP_CLOCK_EN_TIME_OUT_VALUE 0x000000FF
82#define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE 0x000000FF
83
84#define ACP_TIMEOUT_LOOP 0x000000FF
85#define ACP_DEVS 4
86#define ACP_SRC_ID 162
87
88enum {
89 ACP_TILE_P1 = 0,
90 ACP_TILE_P2,
91 ACP_TILE_DSP0,
92 ACP_TILE_DSP1,
93 ACP_TILE_DSP2,
94};
95
96static int acp_sw_init(void *handle)
97{
98 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
99
100 adev->acp.parent = adev->dev;
101
102 adev->acp.cgs_device =
103 amdgpu_cgs_create_device(adev);
104 if (!adev->acp.cgs_device)
105 return -EINVAL;
106
107 return 0;
108}
109
110static int acp_sw_fini(void *handle)
111{
112 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
113
114 if (adev->acp.cgs_device)
115 amdgpu_cgs_destroy_device(adev->acp.cgs_device);
116
117 return 0;
118}
119
120struct acp_pm_domain {
121 void *adev;
122 struct generic_pm_domain gpd;
123};
124
125static int acp_poweroff(struct generic_pm_domain *genpd)
126{
127 struct acp_pm_domain *apd;
128 struct amdgpu_device *adev;
129
130 apd = container_of(genpd, struct acp_pm_domain, gpd);
131 if (apd != NULL) {
132 adev = apd->adev;
133 /* call smu to POWER GATE ACP block
134 * smu will
135 * 1. turn off the acp clock
136 * 2. power off the acp tiles
137 * 3. check and enter ulv state
138 */
139 if (adev->powerplay.pp_funcs &&
140 adev->powerplay.pp_funcs->set_powergating_by_smu)
141 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
142 }
143 return 0;
144}
145
146static int acp_poweron(struct generic_pm_domain *genpd)
147{
148 struct acp_pm_domain *apd;
149 struct amdgpu_device *adev;
150
151 apd = container_of(genpd, struct acp_pm_domain, gpd);
152 if (apd != NULL) {
153 adev = apd->adev;
154 /* call smu to UNGATE ACP block
155 * smu will
156 * 1. exit ulv
157 * 2. turn on acp clock
158 * 3. power on acp tiles
159 */
160 if (adev->powerplay.pp_funcs->set_powergating_by_smu)
161 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
162 }
163 return 0;
164}
165
166static struct device *get_mfd_cell_dev(const char *device_name, int r)
167{
168 char auto_dev_name[25];
169 struct device *dev;
170
171 snprintf(auto_dev_name, sizeof(auto_dev_name),
172 "%s.%d.auto", device_name, r);
173 dev = bus_find_device_by_name(&platform_bus_type, NULL, auto_dev_name);
174 dev_info(dev, "device %s added to pm domain\n", auto_dev_name);
175
176 return dev;
177}
178
179/**
180 * acp_hw_init - start and test ACP block
181 *
182 * @adev: amdgpu_device pointer
183 *
184 */
185static int acp_hw_init(void *handle)
186{
187 int r, i;
188 uint64_t acp_base;
189 u32 val = 0;
190 u32 count = 0;
191 struct device *dev;
192 struct i2s_platform_data *i2s_pdata = NULL;
193
194 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
195
196 const struct amdgpu_ip_block *ip_block =
197 amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP);
198
199 if (!ip_block)
200 return -EINVAL;
201
202 r = amd_acp_hw_init(adev->acp.cgs_device,
203 ip_block->version->major, ip_block->version->minor);
204 /* -ENODEV means board uses AZ rather than ACP */
205 if (r == -ENODEV) {
206 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
207 return 0;
208 } else if (r) {
209 return r;
210 }
211
212 if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289)
213 return -EINVAL;
214
215 acp_base = adev->rmmio_base;
216
217
218 adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL);
219 if (adev->acp.acp_genpd == NULL)
220 return -ENOMEM;
221
222 adev->acp.acp_genpd->gpd.name = "ACP_AUDIO";
223 adev->acp.acp_genpd->gpd.power_off = acp_poweroff;
224 adev->acp.acp_genpd->gpd.power_on = acp_poweron;
225
226
227 adev->acp.acp_genpd->adev = adev;
228
229 pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false);
230
231 adev->acp.acp_cell = kcalloc(ACP_DEVS, sizeof(struct mfd_cell),
232 GFP_KERNEL);
233
234 if (adev->acp.acp_cell == NULL) {
235 r = -ENOMEM;
236 goto failure;
237 }
238
239 adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL);
240 if (adev->acp.acp_res == NULL) {
241 r = -ENOMEM;
242 goto failure;
243 }
244
245 i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL);
246 if (i2s_pdata == NULL) {
247 r = -ENOMEM;
248 goto failure;
249 }
250
251 switch (adev->asic_type) {
252 case CHIP_STONEY:
253 i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
254 DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
255 break;
256 default:
257 i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
258 }
259 i2s_pdata[0].cap = DWC_I2S_PLAY;
260 i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000;
261 i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_PLAY_REG_OFFSET;
262 i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_PLAY_REG_OFFSET;
263 switch (adev->asic_type) {
264 case CHIP_STONEY:
265 i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
266 DW_I2S_QUIRK_COMP_PARAM1 |
267 DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
268 break;
269 default:
270 i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
271 DW_I2S_QUIRK_COMP_PARAM1;
272 }
273
274 i2s_pdata[1].cap = DWC_I2S_RECORD;
275 i2s_pdata[1].snd_rates = SNDRV_PCM_RATE_8000_96000;
276 i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET;
277 i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET;
278
279 i2s_pdata[2].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
280 switch (adev->asic_type) {
281 case CHIP_STONEY:
282 i2s_pdata[2].quirks |= DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
283 break;
284 default:
285 break;
286 }
287
288 i2s_pdata[2].cap = DWC_I2S_PLAY | DWC_I2S_RECORD;
289 i2s_pdata[2].snd_rates = SNDRV_PCM_RATE_8000_96000;
290 i2s_pdata[2].i2s_reg_comp1 = ACP_BT_COMP1_REG_OFFSET;
291 i2s_pdata[2].i2s_reg_comp2 = ACP_BT_COMP2_REG_OFFSET;
292
293 adev->acp.acp_res[0].name = "acp2x_dma";
294 adev->acp.acp_res[0].flags = IORESOURCE_MEM;
295 adev->acp.acp_res[0].start = acp_base;
296 adev->acp.acp_res[0].end = acp_base + ACP_DMA_REGS_END;
297
298 adev->acp.acp_res[1].name = "acp2x_dw_i2s_play";
299 adev->acp.acp_res[1].flags = IORESOURCE_MEM;
300 adev->acp.acp_res[1].start = acp_base + ACP_I2S_PLAY_REGS_START;
301 adev->acp.acp_res[1].end = acp_base + ACP_I2S_PLAY_REGS_END;
302
303 adev->acp.acp_res[2].name = "acp2x_dw_i2s_cap";
304 adev->acp.acp_res[2].flags = IORESOURCE_MEM;
305 adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START;
306 adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END;
307
308 adev->acp.acp_res[3].name = "acp2x_dw_bt_i2s_play_cap";
309 adev->acp.acp_res[3].flags = IORESOURCE_MEM;
310 adev->acp.acp_res[3].start = acp_base + ACP_BT_PLAY_REGS_START;
311 adev->acp.acp_res[3].end = acp_base + ACP_BT_PLAY_REGS_END;
312
313 adev->acp.acp_res[4].name = "acp2x_dma_irq";
314 adev->acp.acp_res[4].flags = IORESOURCE_IRQ;
315 adev->acp.acp_res[4].start = amdgpu_irq_create_mapping(adev, 162);
316 adev->acp.acp_res[4].end = adev->acp.acp_res[4].start;
317
318 adev->acp.acp_cell[0].name = "acp_audio_dma";
319 adev->acp.acp_cell[0].num_resources = 5;
320 adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
321 adev->acp.acp_cell[0].platform_data = &adev->asic_type;
322 adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
323
324 adev->acp.acp_cell[1].name = "designware-i2s";
325 adev->acp.acp_cell[1].num_resources = 1;
326 adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1];
327 adev->acp.acp_cell[1].platform_data = &i2s_pdata[0];
328 adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data);
329
330 adev->acp.acp_cell[2].name = "designware-i2s";
331 adev->acp.acp_cell[2].num_resources = 1;
332 adev->acp.acp_cell[2].resources = &adev->acp.acp_res[2];
333 adev->acp.acp_cell[2].platform_data = &i2s_pdata[1];
334 adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data);
335
336 adev->acp.acp_cell[3].name = "designware-i2s";
337 adev->acp.acp_cell[3].num_resources = 1;
338 adev->acp.acp_cell[3].resources = &adev->acp.acp_res[3];
339 adev->acp.acp_cell[3].platform_data = &i2s_pdata[2];
340 adev->acp.acp_cell[3].pdata_size = sizeof(struct i2s_platform_data);
341
342 r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell,
343 ACP_DEVS);
344 if (r)
345 goto failure;
346
347 for (i = 0; i < ACP_DEVS ; i++) {
348 dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
349 r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
350 if (r) {
351 dev_err(dev, "Failed to add dev to genpd\n");
352 goto failure;
353 }
354 }
355
356
357 /* Assert Soft reset of ACP */
358 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
359
360 val |= ACP_SOFT_RESET__SoftResetAud_MASK;
361 cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
362
363 count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
364 while (true) {
365 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
366 if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
367 (val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
368 break;
369 if (--count == 0) {
370 dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
371 r = -ETIMEDOUT;
372 goto failure;
373 }
374 udelay(100);
375 }
376 /* Enable clock to ACP and wait until the clock is enabled */
377 val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
378 val = val | ACP_CONTROL__ClkEn_MASK;
379 cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
380
381 count = ACP_CLOCK_EN_TIME_OUT_VALUE;
382
383 while (true) {
384 val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
385 if (val & (u32) 0x1)
386 break;
387 if (--count == 0) {
388 dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
389 r = -ETIMEDOUT;
390 goto failure;
391 }
392 udelay(100);
393 }
394 /* Deassert the SOFT RESET flags */
395 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
396 val &= ~ACP_SOFT_RESET__SoftResetAud_MASK;
397 cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
398 return 0;
399
400failure:
401 kfree(i2s_pdata);
402 kfree(adev->acp.acp_res);
403 kfree(adev->acp.acp_cell);
404 kfree(adev->acp.acp_genpd);
405 return r;
406}
407
408/**
409 * acp_hw_fini - stop the hardware block
410 *
411 * @adev: amdgpu_device pointer
412 *
413 */
414static int acp_hw_fini(void *handle)
415{
416 int i, ret;
417 u32 val = 0;
418 u32 count = 0;
419 struct device *dev;
420 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
421
422 /* return early if no ACP */
423 if (!adev->acp.acp_genpd) {
424 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
425 return 0;
426 }
427
428 /* Assert Soft reset of ACP */
429 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
430
431 val |= ACP_SOFT_RESET__SoftResetAud_MASK;
432 cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
433
434 count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
435 while (true) {
436 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
437 if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
438 (val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
439 break;
440 if (--count == 0) {
441 dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
442 return -ETIMEDOUT;
443 }
444 udelay(100);
445 }
446 /* Disable ACP clock */
447 val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
448 val &= ~ACP_CONTROL__ClkEn_MASK;
449 cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
450
451 count = ACP_CLOCK_EN_TIME_OUT_VALUE;
452
453 while (true) {
454 val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
455 if (val & (u32) 0x1)
456 break;
457 if (--count == 0) {
458 dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
459 return -ETIMEDOUT;
460 }
461 udelay(100);
462 }
463
464 for (i = 0; i < ACP_DEVS ; i++) {
465 dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
466 ret = pm_genpd_remove_device(dev);
467 /* If removal fails, dont giveup and try rest */
468 if (ret)
469 dev_err(dev, "remove dev from genpd failed\n");
470 }
471
472 mfd_remove_devices(adev->acp.parent);
473 kfree(adev->acp.acp_res);
474 kfree(adev->acp.acp_genpd);
475 kfree(adev->acp.acp_cell);
476
477 return 0;
478}
479
480static int acp_suspend(void *handle)
481{
482 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
483
484 /* power up on suspend */
485 if (!adev->acp.acp_cell)
486 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
487 return 0;
488}
489
490static int acp_resume(void *handle)
491{
492 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
493
494 /* power down again on resume */
495 if (!adev->acp.acp_cell)
496 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
497 return 0;
498}
499
500static int acp_early_init(void *handle)
501{
502 return 0;
503}
504
505static bool acp_is_idle(void *handle)
506{
507 return true;
508}
509
510static int acp_wait_for_idle(void *handle)
511{
512 return 0;
513}
514
515static int acp_soft_reset(void *handle)
516{
517 return 0;
518}
519
520static int acp_set_clockgating_state(void *handle,
521 enum amd_clockgating_state state)
522{
523 return 0;
524}
525
526static int acp_set_powergating_state(void *handle,
527 enum amd_powergating_state state)
528{
529 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
530 bool enable = (state == AMD_PG_STATE_GATE);
531
532 if (adev->powerplay.pp_funcs &&
533 adev->powerplay.pp_funcs->set_powergating_by_smu)
534 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, enable);
535
536 return 0;
537}
538
539static const struct amd_ip_funcs acp_ip_funcs = {
540 .name = "acp_ip",
541 .early_init = acp_early_init,
542 .late_init = NULL,
543 .sw_init = acp_sw_init,
544 .sw_fini = acp_sw_fini,
545 .hw_init = acp_hw_init,
546 .hw_fini = acp_hw_fini,
547 .suspend = acp_suspend,
548 .resume = acp_resume,
549 .is_idle = acp_is_idle,
550 .wait_for_idle = acp_wait_for_idle,
551 .soft_reset = acp_soft_reset,
552 .set_clockgating_state = acp_set_clockgating_state,
553 .set_powergating_state = acp_set_powergating_state,
554};
555
556const struct amdgpu_ip_block_version acp_ip_block =
557{
558 .type = AMD_IP_BLOCK_TYPE_ACP,
559 .major = 2,
560 .minor = 2,
561 .rev = 0,
562 .funcs = &acp_ip_funcs,
563};
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include <linux/irqdomain.h>
27#include <linux/pci.h>
28#include <linux/pm_domain.h>
29#include <linux/platform_device.h>
30#include <sound/designware_i2s.h>
31#include <sound/pcm.h>
32#include <linux/acpi.h>
33#include <linux/dmi.h>
34
35#include "amdgpu.h"
36#include "atom.h"
37#include "amdgpu_acp.h"
38
39#include "acp_gfx_if.h"
40
41#define ST_JADEITE 1
42#define ACP_TILE_ON_MASK 0x03
43#define ACP_TILE_OFF_MASK 0x02
44#define ACP_TILE_ON_RETAIN_REG_MASK 0x1f
45#define ACP_TILE_OFF_RETAIN_REG_MASK 0x20
46
47#define ACP_TILE_P1_MASK 0x3e
48#define ACP_TILE_P2_MASK 0x3d
49#define ACP_TILE_DSP0_MASK 0x3b
50#define ACP_TILE_DSP1_MASK 0x37
51
52#define ACP_TILE_DSP2_MASK 0x2f
53
54#define ACP_DMA_REGS_END 0x146c0
55#define ACP_I2S_PLAY_REGS_START 0x14840
56#define ACP_I2S_PLAY_REGS_END 0x148b4
57#define ACP_I2S_CAP_REGS_START 0x148b8
58#define ACP_I2S_CAP_REGS_END 0x1496c
59
60#define ACP_I2S_COMP1_CAP_REG_OFFSET 0xac
61#define ACP_I2S_COMP2_CAP_REG_OFFSET 0xa8
62#define ACP_I2S_COMP1_PLAY_REG_OFFSET 0x6c
63#define ACP_I2S_COMP2_PLAY_REG_OFFSET 0x68
64#define ACP_BT_PLAY_REGS_START 0x14970
65#define ACP_BT_PLAY_REGS_END 0x14a24
66#define ACP_BT_COMP1_REG_OFFSET 0xac
67#define ACP_BT_COMP2_REG_OFFSET 0xa8
68
69#define mmACP_PGFSM_RETAIN_REG 0x51c9
70#define mmACP_PGFSM_CONFIG_REG 0x51ca
71#define mmACP_PGFSM_READ_REG_0 0x51cc
72
73#define mmACP_MEM_SHUT_DOWN_REQ_LO 0x51f8
74#define mmACP_MEM_SHUT_DOWN_REQ_HI 0x51f9
75#define mmACP_MEM_SHUT_DOWN_STS_LO 0x51fa
76#define mmACP_MEM_SHUT_DOWN_STS_HI 0x51fb
77
78#define mmACP_CONTROL 0x5131
79#define mmACP_STATUS 0x5133
80#define mmACP_SOFT_RESET 0x5134
81#define ACP_CONTROL__ClkEn_MASK 0x1
82#define ACP_SOFT_RESET__SoftResetAud_MASK 0x100
83#define ACP_SOFT_RESET__SoftResetAudDone_MASK 0x1000000
84#define ACP_CLOCK_EN_TIME_OUT_VALUE 0x000000FF
85#define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE 0x000000FF
86
87#define ACP_TIMEOUT_LOOP 0x000000FF
88#define ACP_DEVS 4
89#define ACP_SRC_ID 162
90
91static unsigned long acp_machine_id;
92
93enum {
94 ACP_TILE_P1 = 0,
95 ACP_TILE_P2,
96 ACP_TILE_DSP0,
97 ACP_TILE_DSP1,
98 ACP_TILE_DSP2,
99};
100
101static int acp_sw_init(struct amdgpu_ip_block *ip_block)
102{
103 struct amdgpu_device *adev = ip_block->adev;
104
105 adev->acp.parent = adev->dev;
106
107 adev->acp.cgs_device =
108 amdgpu_cgs_create_device(adev);
109 if (!adev->acp.cgs_device)
110 return -EINVAL;
111
112 return 0;
113}
114
115static int acp_sw_fini(struct amdgpu_ip_block *ip_block)
116{
117 struct amdgpu_device *adev = ip_block->adev;
118
119 if (adev->acp.cgs_device)
120 amdgpu_cgs_destroy_device(adev->acp.cgs_device);
121
122 return 0;
123}
124
125struct acp_pm_domain {
126 void *adev;
127 struct generic_pm_domain gpd;
128};
129
130static int acp_poweroff(struct generic_pm_domain *genpd)
131{
132 struct acp_pm_domain *apd;
133 struct amdgpu_device *adev;
134
135 apd = container_of(genpd, struct acp_pm_domain, gpd);
136 adev = apd->adev;
137 /* call smu to POWER GATE ACP block
138 * smu will
139 * 1. turn off the acp clock
140 * 2. power off the acp tiles
141 * 3. check and enter ulv state
142 */
143 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
144 return 0;
145}
146
147static int acp_poweron(struct generic_pm_domain *genpd)
148{
149 struct acp_pm_domain *apd;
150 struct amdgpu_device *adev;
151
152 apd = container_of(genpd, struct acp_pm_domain, gpd);
153 adev = apd->adev;
154 /* call smu to UNGATE ACP block
155 * smu will
156 * 1. exit ulv
157 * 2. turn on acp clock
158 * 3. power on acp tiles
159 */
160 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
161 return 0;
162}
163
164static int acp_genpd_add_device(struct device *dev, void *data)
165{
166 struct generic_pm_domain *gpd = data;
167 int ret;
168
169 ret = pm_genpd_add_device(gpd, dev);
170 if (ret)
171 dev_err(dev, "Failed to add dev to genpd %d\n", ret);
172
173 return ret;
174}
175
176static int acp_genpd_remove_device(struct device *dev, void *data)
177{
178 int ret;
179
180 ret = pm_genpd_remove_device(dev);
181 if (ret)
182 dev_err(dev, "Failed to remove dev from genpd %d\n", ret);
183
184 /* Continue to remove */
185 return 0;
186}
187
188static int acp_quirk_cb(const struct dmi_system_id *id)
189{
190 acp_machine_id = ST_JADEITE;
191 return 1;
192}
193
194static const struct dmi_system_id acp_quirk_table[] = {
195 {
196 .callback = acp_quirk_cb,
197 .matches = {
198 DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMD"),
199 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Jadeite"),
200 }
201 },
202 {
203 .callback = acp_quirk_cb,
204 .matches = {
205 DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "IP3 Technology CO.,Ltd."),
206 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ASN1D"),
207 },
208 },
209 {
210 .callback = acp_quirk_cb,
211 .matches = {
212 DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Standard"),
213 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ASN10"),
214 },
215 },
216 {}
217};
218
219/**
220 * acp_hw_init - start and test ACP block
221 *
222 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
223 *
224 */
225static int acp_hw_init(struct amdgpu_ip_block *ip_block)
226{
227 int r;
228 u64 acp_base;
229 u32 val = 0;
230 u32 count = 0;
231 struct i2s_platform_data *i2s_pdata = NULL;
232
233 struct amdgpu_device *adev = ip_block->adev;
234
235 r = amd_acp_hw_init(adev->acp.cgs_device,
236 ip_block->version->major, ip_block->version->minor);
237 /* -ENODEV means board uses AZ rather than ACP */
238 if (r == -ENODEV) {
239 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
240 return 0;
241 } else if (r) {
242 return r;
243 }
244
245 if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289)
246 return -EINVAL;
247
248 acp_base = adev->rmmio_base;
249 adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL);
250 if (!adev->acp.acp_genpd)
251 return -ENOMEM;
252
253 adev->acp.acp_genpd->gpd.name = "ACP_AUDIO";
254 adev->acp.acp_genpd->gpd.power_off = acp_poweroff;
255 adev->acp.acp_genpd->gpd.power_on = acp_poweron;
256 adev->acp.acp_genpd->adev = adev;
257
258 pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false);
259 dmi_check_system(acp_quirk_table);
260 switch (acp_machine_id) {
261 case ST_JADEITE:
262 {
263 adev->acp.acp_cell = kcalloc(2, sizeof(struct mfd_cell),
264 GFP_KERNEL);
265 if (!adev->acp.acp_cell) {
266 r = -ENOMEM;
267 goto failure;
268 }
269
270 adev->acp.acp_res = kcalloc(3, sizeof(struct resource), GFP_KERNEL);
271 if (!adev->acp.acp_res) {
272 r = -ENOMEM;
273 goto failure;
274 }
275
276 i2s_pdata = kcalloc(1, sizeof(struct i2s_platform_data), GFP_KERNEL);
277 if (!i2s_pdata) {
278 r = -ENOMEM;
279 goto failure;
280 }
281
282 i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
283 DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
284 i2s_pdata[0].cap = DWC_I2S_PLAY | DWC_I2S_RECORD;
285 i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000;
286 i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET;
287 i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET;
288
289 adev->acp.acp_res[0].name = "acp2x_dma";
290 adev->acp.acp_res[0].flags = IORESOURCE_MEM;
291 adev->acp.acp_res[0].start = acp_base;
292 adev->acp.acp_res[0].end = acp_base + ACP_DMA_REGS_END;
293
294 adev->acp.acp_res[1].name = "acp2x_dw_i2s_play_cap";
295 adev->acp.acp_res[1].flags = IORESOURCE_MEM;
296 adev->acp.acp_res[1].start = acp_base + ACP_I2S_CAP_REGS_START;
297 adev->acp.acp_res[1].end = acp_base + ACP_I2S_CAP_REGS_END;
298
299 adev->acp.acp_res[2].name = "acp2x_dma_irq";
300 adev->acp.acp_res[2].flags = IORESOURCE_IRQ;
301 adev->acp.acp_res[2].start = amdgpu_irq_create_mapping(adev, 162);
302 adev->acp.acp_res[2].end = adev->acp.acp_res[2].start;
303
304 adev->acp.acp_cell[0].name = "acp_audio_dma";
305 adev->acp.acp_cell[0].num_resources = 3;
306 adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
307 adev->acp.acp_cell[0].platform_data = &adev->asic_type;
308 adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
309
310 adev->acp.acp_cell[1].name = "designware-i2s";
311 adev->acp.acp_cell[1].num_resources = 1;
312 adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1];
313 adev->acp.acp_cell[1].platform_data = &i2s_pdata[0];
314 adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data);
315 r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell, 2);
316 if (r)
317 goto failure;
318 r = device_for_each_child(adev->acp.parent, &adev->acp.acp_genpd->gpd,
319 acp_genpd_add_device);
320 if (r)
321 goto failure;
322 break;
323 }
324 default:
325 adev->acp.acp_cell = kcalloc(ACP_DEVS, sizeof(struct mfd_cell),
326 GFP_KERNEL);
327
328 if (!adev->acp.acp_cell) {
329 r = -ENOMEM;
330 goto failure;
331 }
332
333 adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL);
334 if (!adev->acp.acp_res) {
335 r = -ENOMEM;
336 goto failure;
337 }
338
339 i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL);
340 if (!i2s_pdata) {
341 r = -ENOMEM;
342 goto failure;
343 }
344
345 switch (adev->asic_type) {
346 case CHIP_STONEY:
347 i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
348 DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
349 break;
350 default:
351 i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
352 }
353 i2s_pdata[0].cap = DWC_I2S_PLAY;
354 i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000;
355 i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_PLAY_REG_OFFSET;
356 i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_PLAY_REG_OFFSET;
357 switch (adev->asic_type) {
358 case CHIP_STONEY:
359 i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
360 DW_I2S_QUIRK_COMP_PARAM1 |
361 DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
362 break;
363 default:
364 i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
365 DW_I2S_QUIRK_COMP_PARAM1;
366 }
367
368 i2s_pdata[1].cap = DWC_I2S_RECORD;
369 i2s_pdata[1].snd_rates = SNDRV_PCM_RATE_8000_96000;
370 i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET;
371 i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET;
372
373 i2s_pdata[2].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
374 switch (adev->asic_type) {
375 case CHIP_STONEY:
376 i2s_pdata[2].quirks |= DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
377 break;
378 default:
379 break;
380 }
381
382 i2s_pdata[2].cap = DWC_I2S_PLAY | DWC_I2S_RECORD;
383 i2s_pdata[2].snd_rates = SNDRV_PCM_RATE_8000_96000;
384 i2s_pdata[2].i2s_reg_comp1 = ACP_BT_COMP1_REG_OFFSET;
385 i2s_pdata[2].i2s_reg_comp2 = ACP_BT_COMP2_REG_OFFSET;
386
387 adev->acp.acp_res[0].name = "acp2x_dma";
388 adev->acp.acp_res[0].flags = IORESOURCE_MEM;
389 adev->acp.acp_res[0].start = acp_base;
390 adev->acp.acp_res[0].end = acp_base + ACP_DMA_REGS_END;
391
392 adev->acp.acp_res[1].name = "acp2x_dw_i2s_play";
393 adev->acp.acp_res[1].flags = IORESOURCE_MEM;
394 adev->acp.acp_res[1].start = acp_base + ACP_I2S_PLAY_REGS_START;
395 adev->acp.acp_res[1].end = acp_base + ACP_I2S_PLAY_REGS_END;
396
397 adev->acp.acp_res[2].name = "acp2x_dw_i2s_cap";
398 adev->acp.acp_res[2].flags = IORESOURCE_MEM;
399 adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START;
400 adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END;
401
402 adev->acp.acp_res[3].name = "acp2x_dw_bt_i2s_play_cap";
403 adev->acp.acp_res[3].flags = IORESOURCE_MEM;
404 adev->acp.acp_res[3].start = acp_base + ACP_BT_PLAY_REGS_START;
405 adev->acp.acp_res[3].end = acp_base + ACP_BT_PLAY_REGS_END;
406
407 adev->acp.acp_res[4].name = "acp2x_dma_irq";
408 adev->acp.acp_res[4].flags = IORESOURCE_IRQ;
409 adev->acp.acp_res[4].start = amdgpu_irq_create_mapping(adev, 162);
410 adev->acp.acp_res[4].end = adev->acp.acp_res[4].start;
411
412 adev->acp.acp_cell[0].name = "acp_audio_dma";
413 adev->acp.acp_cell[0].num_resources = 5;
414 adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
415 adev->acp.acp_cell[0].platform_data = &adev->asic_type;
416 adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
417
418 adev->acp.acp_cell[1].name = "designware-i2s";
419 adev->acp.acp_cell[1].num_resources = 1;
420 adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1];
421 adev->acp.acp_cell[1].platform_data = &i2s_pdata[0];
422 adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data);
423
424 adev->acp.acp_cell[2].name = "designware-i2s";
425 adev->acp.acp_cell[2].num_resources = 1;
426 adev->acp.acp_cell[2].resources = &adev->acp.acp_res[2];
427 adev->acp.acp_cell[2].platform_data = &i2s_pdata[1];
428 adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data);
429
430 adev->acp.acp_cell[3].name = "designware-i2s";
431 adev->acp.acp_cell[3].num_resources = 1;
432 adev->acp.acp_cell[3].resources = &adev->acp.acp_res[3];
433 adev->acp.acp_cell[3].platform_data = &i2s_pdata[2];
434 adev->acp.acp_cell[3].pdata_size = sizeof(struct i2s_platform_data);
435
436 r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell, ACP_DEVS);
437 if (r)
438 goto failure;
439
440 r = device_for_each_child(adev->acp.parent, &adev->acp.acp_genpd->gpd,
441 acp_genpd_add_device);
442 if (r)
443 goto failure;
444 }
445
446 /* Assert Soft reset of ACP */
447 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
448
449 val |= ACP_SOFT_RESET__SoftResetAud_MASK;
450 cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
451
452 count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
453 while (true) {
454 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
455 if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
456 (val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
457 break;
458 if (--count == 0) {
459 dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
460 r = -ETIMEDOUT;
461 goto failure;
462 }
463 udelay(100);
464 }
465 /* Enable clock to ACP and wait until the clock is enabled */
466 val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
467 val = val | ACP_CONTROL__ClkEn_MASK;
468 cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
469
470 count = ACP_CLOCK_EN_TIME_OUT_VALUE;
471
472 while (true) {
473 val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
474 if (val & (u32) 0x1)
475 break;
476 if (--count == 0) {
477 dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
478 r = -ETIMEDOUT;
479 goto failure;
480 }
481 udelay(100);
482 }
483 /* Deassert the SOFT RESET flags */
484 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
485 val &= ~ACP_SOFT_RESET__SoftResetAud_MASK;
486 cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
487 return 0;
488
489failure:
490 kfree(i2s_pdata);
491 kfree(adev->acp.acp_res);
492 kfree(adev->acp.acp_cell);
493 kfree(adev->acp.acp_genpd);
494 return r;
495}
496
497/**
498 * acp_hw_fini - stop the hardware block
499 *
500 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
501 *
502 */
503static int acp_hw_fini(struct amdgpu_ip_block *ip_block)
504{
505 u32 val = 0;
506 u32 count = 0;
507 struct amdgpu_device *adev = ip_block->adev;
508
509 /* return early if no ACP */
510 if (!adev->acp.acp_genpd) {
511 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
512 return 0;
513 }
514
515 /* Assert Soft reset of ACP */
516 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
517
518 val |= ACP_SOFT_RESET__SoftResetAud_MASK;
519 cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
520
521 count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
522 while (true) {
523 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
524 if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
525 (val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
526 break;
527 if (--count == 0) {
528 dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
529 return -ETIMEDOUT;
530 }
531 udelay(100);
532 }
533 /* Disable ACP clock */
534 val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
535 val &= ~ACP_CONTROL__ClkEn_MASK;
536 cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
537
538 count = ACP_CLOCK_EN_TIME_OUT_VALUE;
539
540 while (true) {
541 val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
542 if (val & (u32) 0x1)
543 break;
544 if (--count == 0) {
545 dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
546 return -ETIMEDOUT;
547 }
548 udelay(100);
549 }
550
551 device_for_each_child(adev->acp.parent, NULL,
552 acp_genpd_remove_device);
553
554 mfd_remove_devices(adev->acp.parent);
555 kfree(adev->acp.acp_res);
556 kfree(adev->acp.acp_genpd);
557 kfree(adev->acp.acp_cell);
558
559 return 0;
560}
561
562static int acp_suspend(struct amdgpu_ip_block *ip_block)
563{
564 struct amdgpu_device *adev = ip_block->adev;
565
566 /* power up on suspend */
567 if (!adev->acp.acp_cell)
568 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
569 return 0;
570}
571
572static int acp_resume(struct amdgpu_ip_block *ip_block)
573{
574 struct amdgpu_device *adev = ip_block->adev;
575
576 /* power down again on resume */
577 if (!adev->acp.acp_cell)
578 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
579 return 0;
580}
581
582static bool acp_is_idle(void *handle)
583{
584 return true;
585}
586
587static int acp_set_clockgating_state(void *handle,
588 enum amd_clockgating_state state)
589{
590 return 0;
591}
592
593static int acp_set_powergating_state(void *handle,
594 enum amd_powergating_state state)
595{
596 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
597 bool enable = (state == AMD_PG_STATE_GATE);
598
599 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, enable);
600
601 return 0;
602}
603
604static const struct amd_ip_funcs acp_ip_funcs = {
605 .name = "acp_ip",
606 .sw_init = acp_sw_init,
607 .sw_fini = acp_sw_fini,
608 .hw_init = acp_hw_init,
609 .hw_fini = acp_hw_fini,
610 .suspend = acp_suspend,
611 .resume = acp_resume,
612 .is_idle = acp_is_idle,
613 .set_clockgating_state = acp_set_clockgating_state,
614 .set_powergating_state = acp_set_powergating_state,
615};
616
617const struct amdgpu_ip_block_version acp_ip_block = {
618 .type = AMD_IP_BLOCK_TYPE_ACP,
619 .major = 2,
620 .minor = 2,
621 .rev = 0,
622 .funcs = &acp_ip_funcs,
623};