Loading...
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include <linux/irqdomain.h>
27#include <linux/pm_domain.h>
28#include <linux/platform_device.h>
29#include <sound/designware_i2s.h>
30#include <sound/pcm.h>
31
32#include "amdgpu.h"
33#include "atom.h"
34#include "amdgpu_acp.h"
35
36#include "acp_gfx_if.h"
37
38#define ACP_TILE_ON_MASK 0x03
39#define ACP_TILE_OFF_MASK 0x02
40#define ACP_TILE_ON_RETAIN_REG_MASK 0x1f
41#define ACP_TILE_OFF_RETAIN_REG_MASK 0x20
42
43#define ACP_TILE_P1_MASK 0x3e
44#define ACP_TILE_P2_MASK 0x3d
45#define ACP_TILE_DSP0_MASK 0x3b
46#define ACP_TILE_DSP1_MASK 0x37
47
48#define ACP_TILE_DSP2_MASK 0x2f
49
50#define ACP_DMA_REGS_END 0x146c0
51#define ACP_I2S_PLAY_REGS_START 0x14840
52#define ACP_I2S_PLAY_REGS_END 0x148b4
53#define ACP_I2S_CAP_REGS_START 0x148b8
54#define ACP_I2S_CAP_REGS_END 0x1496c
55
56#define ACP_I2S_COMP1_CAP_REG_OFFSET 0xac
57#define ACP_I2S_COMP2_CAP_REG_OFFSET 0xa8
58#define ACP_I2S_COMP1_PLAY_REG_OFFSET 0x6c
59#define ACP_I2S_COMP2_PLAY_REG_OFFSET 0x68
60
61#define mmACP_PGFSM_RETAIN_REG 0x51c9
62#define mmACP_PGFSM_CONFIG_REG 0x51ca
63#define mmACP_PGFSM_READ_REG_0 0x51cc
64
65#define mmACP_MEM_SHUT_DOWN_REQ_LO 0x51f8
66#define mmACP_MEM_SHUT_DOWN_REQ_HI 0x51f9
67#define mmACP_MEM_SHUT_DOWN_STS_LO 0x51fa
68#define mmACP_MEM_SHUT_DOWN_STS_HI 0x51fb
69
70#define mmACP_CONTROL 0x5131
71#define mmACP_STATUS 0x5133
72#define mmACP_SOFT_RESET 0x5134
73#define ACP_CONTROL__ClkEn_MASK 0x1
74#define ACP_SOFT_RESET__SoftResetAud_MASK 0x100
75#define ACP_SOFT_RESET__SoftResetAudDone_MASK 0x1000000
76#define ACP_CLOCK_EN_TIME_OUT_VALUE 0x000000FF
77#define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE 0x000000FF
78
79#define ACP_TIMEOUT_LOOP 0x000000FF
80#define ACP_DEVS 3
81#define ACP_SRC_ID 162
82
83enum {
84 ACP_TILE_P1 = 0,
85 ACP_TILE_P2,
86 ACP_TILE_DSP0,
87 ACP_TILE_DSP1,
88 ACP_TILE_DSP2,
89};
90
91static int acp_sw_init(void *handle)
92{
93 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
94
95 adev->acp.parent = adev->dev;
96
97 adev->acp.cgs_device =
98 amdgpu_cgs_create_device(adev);
99 if (!adev->acp.cgs_device)
100 return -EINVAL;
101
102 return 0;
103}
104
105static int acp_sw_fini(void *handle)
106{
107 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
108
109 if (adev->acp.cgs_device)
110 amdgpu_cgs_destroy_device(adev->acp.cgs_device);
111
112 return 0;
113}
114
115/* power off a tile/block within ACP */
116static int acp_suspend_tile(void *cgs_dev, int tile)
117{
118 u32 val = 0;
119 u32 count = 0;
120
121 if ((tile < ACP_TILE_P1) || (tile > ACP_TILE_DSP2)) {
122 pr_err("Invalid ACP tile : %d to suspend\n", tile);
123 return -1;
124 }
125
126 val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 + tile);
127 val &= ACP_TILE_ON_MASK;
128
129 if (val == 0x0) {
130 val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG);
131 val = val | (1 << tile);
132 cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val);
133 cgs_write_register(cgs_dev, mmACP_PGFSM_CONFIG_REG,
134 0x500 + tile);
135
136 count = ACP_TIMEOUT_LOOP;
137 while (true) {
138 val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0
139 + tile);
140 val = val & ACP_TILE_ON_MASK;
141 if (val == ACP_TILE_OFF_MASK)
142 break;
143 if (--count == 0) {
144 pr_err("Timeout reading ACP PGFSM status\n");
145 return -ETIMEDOUT;
146 }
147 udelay(100);
148 }
149
150 val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG);
151
152 val |= ACP_TILE_OFF_RETAIN_REG_MASK;
153 cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val);
154 }
155 return 0;
156}
157
158/* power on a tile/block within ACP */
159static int acp_resume_tile(void *cgs_dev, int tile)
160{
161 u32 val = 0;
162 u32 count = 0;
163
164 if ((tile < ACP_TILE_P1) || (tile > ACP_TILE_DSP2)) {
165 pr_err("Invalid ACP tile to resume\n");
166 return -1;
167 }
168
169 val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 + tile);
170 val = val & ACP_TILE_ON_MASK;
171
172 if (val != 0x0) {
173 cgs_write_register(cgs_dev, mmACP_PGFSM_CONFIG_REG,
174 0x600 + tile);
175 count = ACP_TIMEOUT_LOOP;
176 while (true) {
177 val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0
178 + tile);
179 val = val & ACP_TILE_ON_MASK;
180 if (val == 0x0)
181 break;
182 if (--count == 0) {
183 pr_err("Timeout reading ACP PGFSM status\n");
184 return -ETIMEDOUT;
185 }
186 udelay(100);
187 }
188 val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG);
189 if (tile == ACP_TILE_P1)
190 val = val & (ACP_TILE_P1_MASK);
191 else if (tile == ACP_TILE_P2)
192 val = val & (ACP_TILE_P2_MASK);
193
194 cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val);
195 }
196 return 0;
197}
198
199struct acp_pm_domain {
200 void *cgs_dev;
201 struct generic_pm_domain gpd;
202};
203
204static int acp_poweroff(struct generic_pm_domain *genpd)
205{
206 int i, ret;
207 struct acp_pm_domain *apd;
208
209 apd = container_of(genpd, struct acp_pm_domain, gpd);
210 if (apd != NULL) {
211 /* Donot return abruptly if any of power tile fails to suspend.
212 * Log it and continue powering off other tile
213 */
214 for (i = 4; i >= 0 ; i--) {
215 ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_P1 + i);
216 if (ret)
217 pr_err("ACP tile %d tile suspend failed\n", i);
218 }
219 }
220 return 0;
221}
222
223static int acp_poweron(struct generic_pm_domain *genpd)
224{
225 int i, ret;
226 struct acp_pm_domain *apd;
227
228 apd = container_of(genpd, struct acp_pm_domain, gpd);
229 if (apd != NULL) {
230 for (i = 0; i < 2; i++) {
231 ret = acp_resume_tile(apd->cgs_dev, ACP_TILE_P1 + i);
232 if (ret) {
233 pr_err("ACP tile %d resume failed\n", i);
234 break;
235 }
236 }
237
238 /* Disable DSPs which are not going to be used */
239 for (i = 0; i < 3; i++) {
240 ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_DSP0 + i);
241 /* Continue suspending other DSP, even if one fails */
242 if (ret)
243 pr_err("ACP DSP %d suspend failed\n", i);
244 }
245 }
246 return 0;
247}
248
249static struct device *get_mfd_cell_dev(const char *device_name, int r)
250{
251 char auto_dev_name[25];
252 struct device *dev;
253
254 snprintf(auto_dev_name, sizeof(auto_dev_name),
255 "%s.%d.auto", device_name, r);
256 dev = bus_find_device_by_name(&platform_bus_type, NULL, auto_dev_name);
257 dev_info(dev, "device %s added to pm domain\n", auto_dev_name);
258
259 return dev;
260}
261
262/**
263 * acp_hw_init - start and test ACP block
264 *
265 * @adev: amdgpu_device pointer
266 *
267 */
268static int acp_hw_init(void *handle)
269{
270 int r, i;
271 uint64_t acp_base;
272 u32 val = 0;
273 u32 count = 0;
274 struct device *dev;
275 struct i2s_platform_data *i2s_pdata;
276
277 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
278
279 const struct amdgpu_ip_block *ip_block =
280 amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP);
281
282 if (!ip_block)
283 return -EINVAL;
284
285 r = amd_acp_hw_init(adev->acp.cgs_device,
286 ip_block->version->major, ip_block->version->minor);
287 /* -ENODEV means board uses AZ rather than ACP */
288 if (r == -ENODEV)
289 return 0;
290 else if (r)
291 return r;
292
293 r = cgs_get_pci_resource(adev->acp.cgs_device, CGS_RESOURCE_TYPE_MMIO,
294 0x5289, 0, &acp_base);
295 if (r == -ENODEV)
296 return 0;
297 else if (r)
298 return r;
299 if (adev->asic_type != CHIP_STONEY) {
300 adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL);
301 if (adev->acp.acp_genpd == NULL)
302 return -ENOMEM;
303
304 adev->acp.acp_genpd->gpd.name = "ACP_AUDIO";
305 adev->acp.acp_genpd->gpd.power_off = acp_poweroff;
306 adev->acp.acp_genpd->gpd.power_on = acp_poweron;
307
308
309 adev->acp.acp_genpd->cgs_dev = adev->acp.cgs_device;
310
311 pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false);
312 }
313
314 adev->acp.acp_cell = kzalloc(sizeof(struct mfd_cell) * ACP_DEVS,
315 GFP_KERNEL);
316
317 if (adev->acp.acp_cell == NULL)
318 return -ENOMEM;
319
320 adev->acp.acp_res = kzalloc(sizeof(struct resource) * 4, GFP_KERNEL);
321
322 if (adev->acp.acp_res == NULL) {
323 kfree(adev->acp.acp_cell);
324 return -ENOMEM;
325 }
326
327 i2s_pdata = kzalloc(sizeof(struct i2s_platform_data) * 2, GFP_KERNEL);
328 if (i2s_pdata == NULL) {
329 kfree(adev->acp.acp_res);
330 kfree(adev->acp.acp_cell);
331 return -ENOMEM;
332 }
333
334 switch (adev->asic_type) {
335 case CHIP_STONEY:
336 i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
337 DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
338 break;
339 default:
340 i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
341 }
342 i2s_pdata[0].cap = DWC_I2S_PLAY;
343 i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000;
344 i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_PLAY_REG_OFFSET;
345 i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_PLAY_REG_OFFSET;
346 switch (adev->asic_type) {
347 case CHIP_STONEY:
348 i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
349 DW_I2S_QUIRK_COMP_PARAM1 |
350 DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
351 break;
352 default:
353 i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
354 DW_I2S_QUIRK_COMP_PARAM1;
355 }
356
357 i2s_pdata[1].cap = DWC_I2S_RECORD;
358 i2s_pdata[1].snd_rates = SNDRV_PCM_RATE_8000_96000;
359 i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET;
360 i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET;
361
362 adev->acp.acp_res[0].name = "acp2x_dma";
363 adev->acp.acp_res[0].flags = IORESOURCE_MEM;
364 adev->acp.acp_res[0].start = acp_base;
365 adev->acp.acp_res[0].end = acp_base + ACP_DMA_REGS_END;
366
367 adev->acp.acp_res[1].name = "acp2x_dw_i2s_play";
368 adev->acp.acp_res[1].flags = IORESOURCE_MEM;
369 adev->acp.acp_res[1].start = acp_base + ACP_I2S_PLAY_REGS_START;
370 adev->acp.acp_res[1].end = acp_base + ACP_I2S_PLAY_REGS_END;
371
372 adev->acp.acp_res[2].name = "acp2x_dw_i2s_cap";
373 adev->acp.acp_res[2].flags = IORESOURCE_MEM;
374 adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START;
375 adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END;
376
377 adev->acp.acp_res[3].name = "acp2x_dma_irq";
378 adev->acp.acp_res[3].flags = IORESOURCE_IRQ;
379 adev->acp.acp_res[3].start = amdgpu_irq_create_mapping(adev, 162);
380 adev->acp.acp_res[3].end = adev->acp.acp_res[3].start;
381
382 adev->acp.acp_cell[0].name = "acp_audio_dma";
383 adev->acp.acp_cell[0].num_resources = 4;
384 adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
385 adev->acp.acp_cell[0].platform_data = &adev->asic_type;
386 adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
387
388 adev->acp.acp_cell[1].name = "designware-i2s";
389 adev->acp.acp_cell[1].num_resources = 1;
390 adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1];
391 adev->acp.acp_cell[1].platform_data = &i2s_pdata[0];
392 adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data);
393
394 adev->acp.acp_cell[2].name = "designware-i2s";
395 adev->acp.acp_cell[2].num_resources = 1;
396 adev->acp.acp_cell[2].resources = &adev->acp.acp_res[2];
397 adev->acp.acp_cell[2].platform_data = &i2s_pdata[1];
398 adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data);
399
400 r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell,
401 ACP_DEVS);
402 if (r)
403 return r;
404
405 if (adev->asic_type != CHIP_STONEY) {
406 for (i = 0; i < ACP_DEVS ; i++) {
407 dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
408 r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
409 if (r) {
410 dev_err(dev, "Failed to add dev to genpd\n");
411 return r;
412 }
413 }
414 }
415
416 /* Assert Soft reset of ACP */
417 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
418
419 val |= ACP_SOFT_RESET__SoftResetAud_MASK;
420 cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
421
422 count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
423 while (true) {
424 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
425 if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
426 (val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
427 break;
428 if (--count == 0) {
429 dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
430 return -ETIMEDOUT;
431 }
432 udelay(100);
433 }
434 /* Enable clock to ACP and wait until the clock is enabled */
435 val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
436 val = val | ACP_CONTROL__ClkEn_MASK;
437 cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
438
439 count = ACP_CLOCK_EN_TIME_OUT_VALUE;
440
441 while (true) {
442 val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
443 if (val & (u32) 0x1)
444 break;
445 if (--count == 0) {
446 dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
447 return -ETIMEDOUT;
448 }
449 udelay(100);
450 }
451 /* Deassert the SOFT RESET flags */
452 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
453 val &= ~ACP_SOFT_RESET__SoftResetAud_MASK;
454 cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
455
456 return 0;
457}
458
459/**
460 * acp_hw_fini - stop the hardware block
461 *
462 * @adev: amdgpu_device pointer
463 *
464 */
465static int acp_hw_fini(void *handle)
466{
467 int i, ret;
468 u32 val = 0;
469 u32 count = 0;
470 struct device *dev;
471 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
472
473 /* return early if no ACP */
474 if (!adev->acp.acp_cell)
475 return 0;
476
477 /* Assert Soft reset of ACP */
478 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
479
480 val |= ACP_SOFT_RESET__SoftResetAud_MASK;
481 cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
482
483 count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
484 while (true) {
485 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
486 if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
487 (val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
488 break;
489 if (--count == 0) {
490 dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
491 return -ETIMEDOUT;
492 }
493 udelay(100);
494 }
495 /* Disable ACP clock */
496 val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
497 val &= ~ACP_CONTROL__ClkEn_MASK;
498 cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
499
500 count = ACP_CLOCK_EN_TIME_OUT_VALUE;
501
502 while (true) {
503 val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
504 if (val & (u32) 0x1)
505 break;
506 if (--count == 0) {
507 dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
508 return -ETIMEDOUT;
509 }
510 udelay(100);
511 }
512
513 if (adev->acp.acp_genpd) {
514 for (i = 0; i < ACP_DEVS ; i++) {
515 dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
516 ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev);
517 /* If removal fails, dont giveup and try rest */
518 if (ret)
519 dev_err(dev, "remove dev from genpd failed\n");
520 }
521 kfree(adev->acp.acp_genpd);
522 }
523
524 mfd_remove_devices(adev->acp.parent);
525 kfree(adev->acp.acp_res);
526 kfree(adev->acp.acp_cell);
527
528 return 0;
529}
530
531static int acp_suspend(void *handle)
532{
533 return 0;
534}
535
536static int acp_resume(void *handle)
537{
538 return 0;
539}
540
541static int acp_early_init(void *handle)
542{
543 return 0;
544}
545
546static bool acp_is_idle(void *handle)
547{
548 return true;
549}
550
551static int acp_wait_for_idle(void *handle)
552{
553 return 0;
554}
555
556static int acp_soft_reset(void *handle)
557{
558 return 0;
559}
560
561static int acp_set_clockgating_state(void *handle,
562 enum amd_clockgating_state state)
563{
564 return 0;
565}
566
567static int acp_set_powergating_state(void *handle,
568 enum amd_powergating_state state)
569{
570 return 0;
571}
572
573static const struct amd_ip_funcs acp_ip_funcs = {
574 .name = "acp_ip",
575 .early_init = acp_early_init,
576 .late_init = NULL,
577 .sw_init = acp_sw_init,
578 .sw_fini = acp_sw_fini,
579 .hw_init = acp_hw_init,
580 .hw_fini = acp_hw_fini,
581 .suspend = acp_suspend,
582 .resume = acp_resume,
583 .is_idle = acp_is_idle,
584 .wait_for_idle = acp_wait_for_idle,
585 .soft_reset = acp_soft_reset,
586 .set_clockgating_state = acp_set_clockgating_state,
587 .set_powergating_state = acp_set_powergating_state,
588};
589
590const struct amdgpu_ip_block_version acp_ip_block =
591{
592 .type = AMD_IP_BLOCK_TYPE_ACP,
593 .major = 2,
594 .minor = 2,
595 .rev = 0,
596 .funcs = &acp_ip_funcs,
597};
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include <linux/irqdomain.h>
27#include <linux/pci.h>
28#include <linux/pm_domain.h>
29#include <linux/platform_device.h>
30#include <sound/designware_i2s.h>
31#include <sound/pcm.h>
32
33#include "amdgpu.h"
34#include "atom.h"
35#include "amdgpu_acp.h"
36
37#include "acp_gfx_if.h"
38
39#define ACP_TILE_ON_MASK 0x03
40#define ACP_TILE_OFF_MASK 0x02
41#define ACP_TILE_ON_RETAIN_REG_MASK 0x1f
42#define ACP_TILE_OFF_RETAIN_REG_MASK 0x20
43
44#define ACP_TILE_P1_MASK 0x3e
45#define ACP_TILE_P2_MASK 0x3d
46#define ACP_TILE_DSP0_MASK 0x3b
47#define ACP_TILE_DSP1_MASK 0x37
48
49#define ACP_TILE_DSP2_MASK 0x2f
50
51#define ACP_DMA_REGS_END 0x146c0
52#define ACP_I2S_PLAY_REGS_START 0x14840
53#define ACP_I2S_PLAY_REGS_END 0x148b4
54#define ACP_I2S_CAP_REGS_START 0x148b8
55#define ACP_I2S_CAP_REGS_END 0x1496c
56
57#define ACP_I2S_COMP1_CAP_REG_OFFSET 0xac
58#define ACP_I2S_COMP2_CAP_REG_OFFSET 0xa8
59#define ACP_I2S_COMP1_PLAY_REG_OFFSET 0x6c
60#define ACP_I2S_COMP2_PLAY_REG_OFFSET 0x68
61#define ACP_BT_PLAY_REGS_START 0x14970
62#define ACP_BT_PLAY_REGS_END 0x14a24
63#define ACP_BT_COMP1_REG_OFFSET 0xac
64#define ACP_BT_COMP2_REG_OFFSET 0xa8
65
66#define mmACP_PGFSM_RETAIN_REG 0x51c9
67#define mmACP_PGFSM_CONFIG_REG 0x51ca
68#define mmACP_PGFSM_READ_REG_0 0x51cc
69
70#define mmACP_MEM_SHUT_DOWN_REQ_LO 0x51f8
71#define mmACP_MEM_SHUT_DOWN_REQ_HI 0x51f9
72#define mmACP_MEM_SHUT_DOWN_STS_LO 0x51fa
73#define mmACP_MEM_SHUT_DOWN_STS_HI 0x51fb
74
75#define mmACP_CONTROL 0x5131
76#define mmACP_STATUS 0x5133
77#define mmACP_SOFT_RESET 0x5134
78#define ACP_CONTROL__ClkEn_MASK 0x1
79#define ACP_SOFT_RESET__SoftResetAud_MASK 0x100
80#define ACP_SOFT_RESET__SoftResetAudDone_MASK 0x1000000
81#define ACP_CLOCK_EN_TIME_OUT_VALUE 0x000000FF
82#define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE 0x000000FF
83
84#define ACP_TIMEOUT_LOOP 0x000000FF
85#define ACP_DEVS 4
86#define ACP_SRC_ID 162
87
88enum {
89 ACP_TILE_P1 = 0,
90 ACP_TILE_P2,
91 ACP_TILE_DSP0,
92 ACP_TILE_DSP1,
93 ACP_TILE_DSP2,
94};
95
96static int acp_sw_init(void *handle)
97{
98 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
99
100 adev->acp.parent = adev->dev;
101
102 adev->acp.cgs_device =
103 amdgpu_cgs_create_device(adev);
104 if (!adev->acp.cgs_device)
105 return -EINVAL;
106
107 return 0;
108}
109
110static int acp_sw_fini(void *handle)
111{
112 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
113
114 if (adev->acp.cgs_device)
115 amdgpu_cgs_destroy_device(adev->acp.cgs_device);
116
117 return 0;
118}
119
120struct acp_pm_domain {
121 void *adev;
122 struct generic_pm_domain gpd;
123};
124
125static int acp_poweroff(struct generic_pm_domain *genpd)
126{
127 struct acp_pm_domain *apd;
128 struct amdgpu_device *adev;
129
130 apd = container_of(genpd, struct acp_pm_domain, gpd);
131 if (apd != NULL) {
132 adev = apd->adev;
133 /* call smu to POWER GATE ACP block
134 * smu will
135 * 1. turn off the acp clock
136 * 2. power off the acp tiles
137 * 3. check and enter ulv state
138 */
139 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
140 }
141 return 0;
142}
143
144static int acp_poweron(struct generic_pm_domain *genpd)
145{
146 struct acp_pm_domain *apd;
147 struct amdgpu_device *adev;
148
149 apd = container_of(genpd, struct acp_pm_domain, gpd);
150 if (apd != NULL) {
151 adev = apd->adev;
152 /* call smu to UNGATE ACP block
153 * smu will
154 * 1. exit ulv
155 * 2. turn on acp clock
156 * 3. power on acp tiles
157 */
158 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
159 }
160 return 0;
161}
162
163static int acp_genpd_add_device(struct device *dev, void *data)
164{
165 struct generic_pm_domain *gpd = data;
166 int ret;
167
168 ret = pm_genpd_add_device(gpd, dev);
169 if (ret)
170 dev_err(dev, "Failed to add dev to genpd %d\n", ret);
171
172 return ret;
173}
174
175static int acp_genpd_remove_device(struct device *dev, void *data)
176{
177 int ret;
178
179 ret = pm_genpd_remove_device(dev);
180 if (ret)
181 dev_err(dev, "Failed to remove dev from genpd %d\n", ret);
182
183 /* Continue to remove */
184 return 0;
185}
186
187/**
188 * acp_hw_init - start and test ACP block
189 *
190 * @handle: handle used to pass amdgpu_device pointer
191 *
192 */
193static int acp_hw_init(void *handle)
194{
195 int r;
196 uint64_t acp_base;
197 u32 val = 0;
198 u32 count = 0;
199 struct i2s_platform_data *i2s_pdata = NULL;
200
201 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
202
203 const struct amdgpu_ip_block *ip_block =
204 amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP);
205
206 if (!ip_block)
207 return -EINVAL;
208
209 r = amd_acp_hw_init(adev->acp.cgs_device,
210 ip_block->version->major, ip_block->version->minor);
211 /* -ENODEV means board uses AZ rather than ACP */
212 if (r == -ENODEV) {
213 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
214 return 0;
215 } else if (r) {
216 return r;
217 }
218
219 if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289)
220 return -EINVAL;
221
222 acp_base = adev->rmmio_base;
223
224
225 adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL);
226 if (adev->acp.acp_genpd == NULL)
227 return -ENOMEM;
228
229 adev->acp.acp_genpd->gpd.name = "ACP_AUDIO";
230 adev->acp.acp_genpd->gpd.power_off = acp_poweroff;
231 adev->acp.acp_genpd->gpd.power_on = acp_poweron;
232
233
234 adev->acp.acp_genpd->adev = adev;
235
236 pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false);
237
238 adev->acp.acp_cell = kcalloc(ACP_DEVS, sizeof(struct mfd_cell),
239 GFP_KERNEL);
240
241 if (adev->acp.acp_cell == NULL) {
242 r = -ENOMEM;
243 goto failure;
244 }
245
246 adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL);
247 if (adev->acp.acp_res == NULL) {
248 r = -ENOMEM;
249 goto failure;
250 }
251
252 i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL);
253 if (i2s_pdata == NULL) {
254 r = -ENOMEM;
255 goto failure;
256 }
257
258 switch (adev->asic_type) {
259 case CHIP_STONEY:
260 i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
261 DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
262 break;
263 default:
264 i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
265 }
266 i2s_pdata[0].cap = DWC_I2S_PLAY;
267 i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000;
268 i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_PLAY_REG_OFFSET;
269 i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_PLAY_REG_OFFSET;
270 switch (adev->asic_type) {
271 case CHIP_STONEY:
272 i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
273 DW_I2S_QUIRK_COMP_PARAM1 |
274 DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
275 break;
276 default:
277 i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
278 DW_I2S_QUIRK_COMP_PARAM1;
279 }
280
281 i2s_pdata[1].cap = DWC_I2S_RECORD;
282 i2s_pdata[1].snd_rates = SNDRV_PCM_RATE_8000_96000;
283 i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET;
284 i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET;
285
286 i2s_pdata[2].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
287 switch (adev->asic_type) {
288 case CHIP_STONEY:
289 i2s_pdata[2].quirks |= DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
290 break;
291 default:
292 break;
293 }
294
295 i2s_pdata[2].cap = DWC_I2S_PLAY | DWC_I2S_RECORD;
296 i2s_pdata[2].snd_rates = SNDRV_PCM_RATE_8000_96000;
297 i2s_pdata[2].i2s_reg_comp1 = ACP_BT_COMP1_REG_OFFSET;
298 i2s_pdata[2].i2s_reg_comp2 = ACP_BT_COMP2_REG_OFFSET;
299
300 adev->acp.acp_res[0].name = "acp2x_dma";
301 adev->acp.acp_res[0].flags = IORESOURCE_MEM;
302 adev->acp.acp_res[0].start = acp_base;
303 adev->acp.acp_res[0].end = acp_base + ACP_DMA_REGS_END;
304
305 adev->acp.acp_res[1].name = "acp2x_dw_i2s_play";
306 adev->acp.acp_res[1].flags = IORESOURCE_MEM;
307 adev->acp.acp_res[1].start = acp_base + ACP_I2S_PLAY_REGS_START;
308 adev->acp.acp_res[1].end = acp_base + ACP_I2S_PLAY_REGS_END;
309
310 adev->acp.acp_res[2].name = "acp2x_dw_i2s_cap";
311 adev->acp.acp_res[2].flags = IORESOURCE_MEM;
312 adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START;
313 adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END;
314
315 adev->acp.acp_res[3].name = "acp2x_dw_bt_i2s_play_cap";
316 adev->acp.acp_res[3].flags = IORESOURCE_MEM;
317 adev->acp.acp_res[3].start = acp_base + ACP_BT_PLAY_REGS_START;
318 adev->acp.acp_res[3].end = acp_base + ACP_BT_PLAY_REGS_END;
319
320 adev->acp.acp_res[4].name = "acp2x_dma_irq";
321 adev->acp.acp_res[4].flags = IORESOURCE_IRQ;
322 adev->acp.acp_res[4].start = amdgpu_irq_create_mapping(adev, 162);
323 adev->acp.acp_res[4].end = adev->acp.acp_res[4].start;
324
325 adev->acp.acp_cell[0].name = "acp_audio_dma";
326 adev->acp.acp_cell[0].num_resources = 5;
327 adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
328 adev->acp.acp_cell[0].platform_data = &adev->asic_type;
329 adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
330
331 adev->acp.acp_cell[1].name = "designware-i2s";
332 adev->acp.acp_cell[1].num_resources = 1;
333 adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1];
334 adev->acp.acp_cell[1].platform_data = &i2s_pdata[0];
335 adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data);
336
337 adev->acp.acp_cell[2].name = "designware-i2s";
338 adev->acp.acp_cell[2].num_resources = 1;
339 adev->acp.acp_cell[2].resources = &adev->acp.acp_res[2];
340 adev->acp.acp_cell[2].platform_data = &i2s_pdata[1];
341 adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data);
342
343 adev->acp.acp_cell[3].name = "designware-i2s";
344 adev->acp.acp_cell[3].num_resources = 1;
345 adev->acp.acp_cell[3].resources = &adev->acp.acp_res[3];
346 adev->acp.acp_cell[3].platform_data = &i2s_pdata[2];
347 adev->acp.acp_cell[3].pdata_size = sizeof(struct i2s_platform_data);
348
349 r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell,
350 ACP_DEVS);
351 if (r)
352 goto failure;
353
354 r = device_for_each_child(adev->acp.parent, &adev->acp.acp_genpd->gpd,
355 acp_genpd_add_device);
356 if (r)
357 goto failure;
358
359 /* Assert Soft reset of ACP */
360 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
361
362 val |= ACP_SOFT_RESET__SoftResetAud_MASK;
363 cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
364
365 count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
366 while (true) {
367 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
368 if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
369 (val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
370 break;
371 if (--count == 0) {
372 dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
373 r = -ETIMEDOUT;
374 goto failure;
375 }
376 udelay(100);
377 }
378 /* Enable clock to ACP and wait until the clock is enabled */
379 val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
380 val = val | ACP_CONTROL__ClkEn_MASK;
381 cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
382
383 count = ACP_CLOCK_EN_TIME_OUT_VALUE;
384
385 while (true) {
386 val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
387 if (val & (u32) 0x1)
388 break;
389 if (--count == 0) {
390 dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
391 r = -ETIMEDOUT;
392 goto failure;
393 }
394 udelay(100);
395 }
396 /* Deassert the SOFT RESET flags */
397 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
398 val &= ~ACP_SOFT_RESET__SoftResetAud_MASK;
399 cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
400 return 0;
401
402failure:
403 kfree(i2s_pdata);
404 kfree(adev->acp.acp_res);
405 kfree(adev->acp.acp_cell);
406 kfree(adev->acp.acp_genpd);
407 return r;
408}
409
410/**
411 * acp_hw_fini - stop the hardware block
412 *
413 * @handle: handle used to pass amdgpu_device pointer
414 *
415 */
416static int acp_hw_fini(void *handle)
417{
418 u32 val = 0;
419 u32 count = 0;
420 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
421
422 /* return early if no ACP */
423 if (!adev->acp.acp_genpd) {
424 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
425 return 0;
426 }
427
428 /* Assert Soft reset of ACP */
429 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
430
431 val |= ACP_SOFT_RESET__SoftResetAud_MASK;
432 cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
433
434 count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
435 while (true) {
436 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
437 if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
438 (val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
439 break;
440 if (--count == 0) {
441 dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
442 return -ETIMEDOUT;
443 }
444 udelay(100);
445 }
446 /* Disable ACP clock */
447 val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
448 val &= ~ACP_CONTROL__ClkEn_MASK;
449 cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
450
451 count = ACP_CLOCK_EN_TIME_OUT_VALUE;
452
453 while (true) {
454 val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
455 if (val & (u32) 0x1)
456 break;
457 if (--count == 0) {
458 dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
459 return -ETIMEDOUT;
460 }
461 udelay(100);
462 }
463
464 device_for_each_child(adev->acp.parent, NULL,
465 acp_genpd_remove_device);
466
467 mfd_remove_devices(adev->acp.parent);
468 kfree(adev->acp.acp_res);
469 kfree(adev->acp.acp_genpd);
470 kfree(adev->acp.acp_cell);
471
472 return 0;
473}
474
475static int acp_suspend(void *handle)
476{
477 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
478
479 /* power up on suspend */
480 if (!adev->acp.acp_cell)
481 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
482 return 0;
483}
484
485static int acp_resume(void *handle)
486{
487 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
488
489 /* power down again on resume */
490 if (!adev->acp.acp_cell)
491 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
492 return 0;
493}
494
495static int acp_early_init(void *handle)
496{
497 return 0;
498}
499
500static bool acp_is_idle(void *handle)
501{
502 return true;
503}
504
505static int acp_wait_for_idle(void *handle)
506{
507 return 0;
508}
509
510static int acp_soft_reset(void *handle)
511{
512 return 0;
513}
514
515static int acp_set_clockgating_state(void *handle,
516 enum amd_clockgating_state state)
517{
518 return 0;
519}
520
521static int acp_set_powergating_state(void *handle,
522 enum amd_powergating_state state)
523{
524 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
525 bool enable = (state == AMD_PG_STATE_GATE);
526
527 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, enable);
528
529 return 0;
530}
531
532static const struct amd_ip_funcs acp_ip_funcs = {
533 .name = "acp_ip",
534 .early_init = acp_early_init,
535 .late_init = NULL,
536 .sw_init = acp_sw_init,
537 .sw_fini = acp_sw_fini,
538 .hw_init = acp_hw_init,
539 .hw_fini = acp_hw_fini,
540 .suspend = acp_suspend,
541 .resume = acp_resume,
542 .is_idle = acp_is_idle,
543 .wait_for_idle = acp_wait_for_idle,
544 .soft_reset = acp_soft_reset,
545 .set_clockgating_state = acp_set_clockgating_state,
546 .set_powergating_state = acp_set_powergating_state,
547};
548
549const struct amdgpu_ip_block_version acp_ip_block =
550{
551 .type = AMD_IP_BLOCK_TYPE_ACP,
552 .major = 2,
553 .minor = 2,
554 .rev = 0,
555 .funcs = &acp_ip_funcs,
556};