Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Mar 24-27, 2025, special US time zones
Register
Loading...
v4.17
  1/*
  2 * Copyright 2015 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: AMD
 23 *
 24 */
 25
 26#include <linux/irqdomain.h>
 
 27#include <linux/pm_domain.h>
 28#include <linux/platform_device.h>
 29#include <sound/designware_i2s.h>
 30#include <sound/pcm.h>
 31
 32#include "amdgpu.h"
 33#include "atom.h"
 34#include "amdgpu_acp.h"
 35
 36#include "acp_gfx_if.h"
 37
 38#define ACP_TILE_ON_MASK                	0x03
 39#define ACP_TILE_OFF_MASK               	0x02
 40#define ACP_TILE_ON_RETAIN_REG_MASK     	0x1f
 41#define ACP_TILE_OFF_RETAIN_REG_MASK    	0x20
 42
 43#define ACP_TILE_P1_MASK                	0x3e
 44#define ACP_TILE_P2_MASK                	0x3d
 45#define ACP_TILE_DSP0_MASK              	0x3b
 46#define ACP_TILE_DSP1_MASK              	0x37
 47
 48#define ACP_TILE_DSP2_MASK              	0x2f
 49
 50#define ACP_DMA_REGS_END			0x146c0
 51#define ACP_I2S_PLAY_REGS_START			0x14840
 52#define ACP_I2S_PLAY_REGS_END			0x148b4
 53#define ACP_I2S_CAP_REGS_START			0x148b8
 54#define ACP_I2S_CAP_REGS_END			0x1496c
 55
 56#define ACP_I2S_COMP1_CAP_REG_OFFSET		0xac
 57#define ACP_I2S_COMP2_CAP_REG_OFFSET		0xa8
 58#define ACP_I2S_COMP1_PLAY_REG_OFFSET		0x6c
 59#define ACP_I2S_COMP2_PLAY_REG_OFFSET		0x68
 
 
 
 
 60
 61#define mmACP_PGFSM_RETAIN_REG			0x51c9
 62#define mmACP_PGFSM_CONFIG_REG			0x51ca
 63#define mmACP_PGFSM_READ_REG_0			0x51cc
 64
 65#define mmACP_MEM_SHUT_DOWN_REQ_LO		0x51f8
 66#define mmACP_MEM_SHUT_DOWN_REQ_HI		0x51f9
 67#define mmACP_MEM_SHUT_DOWN_STS_LO		0x51fa
 68#define mmACP_MEM_SHUT_DOWN_STS_HI		0x51fb
 69
 70#define mmACP_CONTROL				0x5131
 71#define mmACP_STATUS				0x5133
 72#define mmACP_SOFT_RESET			0x5134
 73#define ACP_CONTROL__ClkEn_MASK 		0x1
 74#define ACP_SOFT_RESET__SoftResetAud_MASK 	0x100
 75#define ACP_SOFT_RESET__SoftResetAudDone_MASK	0x1000000
 76#define ACP_CLOCK_EN_TIME_OUT_VALUE		0x000000FF
 77#define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE	0x000000FF
 78
 79#define ACP_TIMEOUT_LOOP			0x000000FF
 80#define ACP_DEVS				3
 81#define ACP_SRC_ID				162
 82
 83enum {
 84	ACP_TILE_P1 = 0,
 85	ACP_TILE_P2,
 86	ACP_TILE_DSP0,
 87	ACP_TILE_DSP1,
 88	ACP_TILE_DSP2,
 89};
 90
 91static int acp_sw_init(void *handle)
 92{
 93	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 94
 95	adev->acp.parent = adev->dev;
 96
 97	adev->acp.cgs_device =
 98		amdgpu_cgs_create_device(adev);
 99	if (!adev->acp.cgs_device)
100		return -EINVAL;
101
102	return 0;
103}
104
105static int acp_sw_fini(void *handle)
106{
107	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
108
109	if (adev->acp.cgs_device)
110		amdgpu_cgs_destroy_device(adev->acp.cgs_device);
111
112	return 0;
113}
114
115/* power off a tile/block within ACP */
116static int acp_suspend_tile(void *cgs_dev, int tile)
117{
118	u32 val = 0;
119	u32 count = 0;
120
121	if ((tile  < ACP_TILE_P1) || (tile > ACP_TILE_DSP2)) {
122		pr_err("Invalid ACP tile : %d to suspend\n", tile);
123		return -1;
124	}
125
126	val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 + tile);
127	val &= ACP_TILE_ON_MASK;
128
129	if (val == 0x0) {
130		val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG);
131		val = val | (1 << tile);
132		cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val);
133		cgs_write_register(cgs_dev, mmACP_PGFSM_CONFIG_REG,
134					0x500 + tile);
135
136		count = ACP_TIMEOUT_LOOP;
137		while (true) {
138			val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0
139								+ tile);
140			val = val & ACP_TILE_ON_MASK;
141			if (val == ACP_TILE_OFF_MASK)
142				break;
143			if (--count == 0) {
144				pr_err("Timeout reading ACP PGFSM status\n");
145				return -ETIMEDOUT;
146			}
147			udelay(100);
148		}
149
150		val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG);
151
152		val |= ACP_TILE_OFF_RETAIN_REG_MASK;
153		cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val);
154	}
155	return 0;
156}
157
158/* power on a tile/block within ACP */
159static int acp_resume_tile(void *cgs_dev, int tile)
160{
161	u32 val = 0;
162	u32 count = 0;
163
164	if ((tile  < ACP_TILE_P1) || (tile > ACP_TILE_DSP2)) {
165		pr_err("Invalid ACP tile to resume\n");
166		return -1;
167	}
168
169	val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 + tile);
170	val = val & ACP_TILE_ON_MASK;
171
172	if (val != 0x0) {
173		cgs_write_register(cgs_dev, mmACP_PGFSM_CONFIG_REG,
174					0x600 + tile);
175		count = ACP_TIMEOUT_LOOP;
176		while (true) {
177			val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0
178							+ tile);
179			val = val & ACP_TILE_ON_MASK;
180			if (val == 0x0)
181				break;
182			if (--count == 0) {
183				pr_err("Timeout reading ACP PGFSM status\n");
184				return -ETIMEDOUT;
185			}
186			udelay(100);
187		}
188		val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG);
189		if (tile == ACP_TILE_P1)
190			val = val & (ACP_TILE_P1_MASK);
191		else if (tile == ACP_TILE_P2)
192			val = val & (ACP_TILE_P2_MASK);
193
194		cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val);
195	}
196	return 0;
197}
198
199struct acp_pm_domain {
200	void *cgs_dev;
201	struct generic_pm_domain gpd;
202};
203
204static int acp_poweroff(struct generic_pm_domain *genpd)
205{
206	int i, ret;
207	struct acp_pm_domain *apd;
 
208
209	apd = container_of(genpd, struct acp_pm_domain, gpd);
210	if (apd != NULL) {
211		/* Donot return abruptly if any of power tile fails to suspend.
212		 * Log it and continue powering off other tile
213		 */
214		for (i = 4; i >= 0 ; i--) {
215			ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_P1 + i);
216			if (ret)
217				pr_err("ACP tile %d tile suspend failed\n", i);
218		}
 
 
219	}
220	return 0;
221}
222
223static int acp_poweron(struct generic_pm_domain *genpd)
224{
225	int i, ret;
226	struct acp_pm_domain *apd;
 
227
228	apd = container_of(genpd, struct acp_pm_domain, gpd);
229	if (apd != NULL) {
230		for (i = 0; i < 2; i++) {
231			ret = acp_resume_tile(apd->cgs_dev, ACP_TILE_P1 + i);
232			if (ret) {
233				pr_err("ACP tile %d resume failed\n", i);
234				break;
235			}
236		}
237
238		/* Disable DSPs which are not going to be used */
239		for (i = 0; i < 3; i++) {
240			ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_DSP0 + i);
241			/* Continue suspending other DSP, even if one fails */
242			if (ret)
243				pr_err("ACP DSP %d suspend failed\n", i);
244		}
245	}
246	return 0;
247}
248
249static struct device *get_mfd_cell_dev(const char *device_name, int r)
250{
251	char auto_dev_name[25];
252	struct device *dev;
253
254	snprintf(auto_dev_name, sizeof(auto_dev_name),
255		 "%s.%d.auto", device_name, r);
256	dev = bus_find_device_by_name(&platform_bus_type, NULL, auto_dev_name);
257	dev_info(dev, "device %s added to pm domain\n", auto_dev_name);
258
259	return dev;
260}
261
262/**
263 * acp_hw_init - start and test ACP block
264 *
265 * @adev: amdgpu_device pointer
266 *
267 */
268static int acp_hw_init(void *handle)
269{
270	int r, i;
271	uint64_t acp_base;
272	u32 val = 0;
273	u32 count = 0;
274	struct device *dev;
275	struct i2s_platform_data *i2s_pdata;
276
277	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
278
279	const struct amdgpu_ip_block *ip_block =
280		amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP);
281
282	if (!ip_block)
283		return -EINVAL;
284
285	r = amd_acp_hw_init(adev->acp.cgs_device,
286			    ip_block->version->major, ip_block->version->minor);
287	/* -ENODEV means board uses AZ rather than ACP */
288	if (r == -ENODEV)
 
289		return 0;
290	else if (r)
291		return r;
 
292
293	r = cgs_get_pci_resource(adev->acp.cgs_device, CGS_RESOURCE_TYPE_MMIO,
294			0x5289, 0, &acp_base);
295	if (r == -ENODEV)
296		return 0;
297	else if (r)
298		return r;
299	if (adev->asic_type != CHIP_STONEY) {
300		adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL);
301		if (adev->acp.acp_genpd == NULL)
302			return -ENOMEM;
303
304		adev->acp.acp_genpd->gpd.name = "ACP_AUDIO";
305		adev->acp.acp_genpd->gpd.power_off = acp_poweroff;
306		adev->acp.acp_genpd->gpd.power_on = acp_poweron;
307
308
309		adev->acp.acp_genpd->cgs_dev = adev->acp.cgs_device;
 
 
310
311		pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false);
312	}
 
313
314	adev->acp.acp_cell = kzalloc(sizeof(struct mfd_cell) * ACP_DEVS,
315							GFP_KERNEL);
316
317	if (adev->acp.acp_cell == NULL)
318		return -ENOMEM;
319
320	adev->acp.acp_res = kzalloc(sizeof(struct resource) * 4, GFP_KERNEL);
321
 
 
 
 
 
 
 
 
 
322	if (adev->acp.acp_res == NULL) {
323		kfree(adev->acp.acp_cell);
324		return -ENOMEM;
325	}
326
327	i2s_pdata = kzalloc(sizeof(struct i2s_platform_data) * 2, GFP_KERNEL);
328	if (i2s_pdata == NULL) {
329		kfree(adev->acp.acp_res);
330		kfree(adev->acp.acp_cell);
331		return -ENOMEM;
332	}
333
334	switch (adev->asic_type) {
335	case CHIP_STONEY:
336		i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
337			DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
338		break;
339	default:
340		i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
341	}
342	i2s_pdata[0].cap = DWC_I2S_PLAY;
343	i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000;
344	i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_PLAY_REG_OFFSET;
345	i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_PLAY_REG_OFFSET;
346	switch (adev->asic_type) {
347	case CHIP_STONEY:
348		i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
349			DW_I2S_QUIRK_COMP_PARAM1 |
350			DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
351		break;
352	default:
353		i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
354			DW_I2S_QUIRK_COMP_PARAM1;
355	}
356
357	i2s_pdata[1].cap = DWC_I2S_RECORD;
358	i2s_pdata[1].snd_rates = SNDRV_PCM_RATE_8000_96000;
359	i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET;
360	i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET;
361
 
 
 
 
 
 
 
 
 
 
 
 
 
 
362	adev->acp.acp_res[0].name = "acp2x_dma";
363	adev->acp.acp_res[0].flags = IORESOURCE_MEM;
364	adev->acp.acp_res[0].start = acp_base;
365	adev->acp.acp_res[0].end = acp_base + ACP_DMA_REGS_END;
366
367	adev->acp.acp_res[1].name = "acp2x_dw_i2s_play";
368	adev->acp.acp_res[1].flags = IORESOURCE_MEM;
369	adev->acp.acp_res[1].start = acp_base + ACP_I2S_PLAY_REGS_START;
370	adev->acp.acp_res[1].end = acp_base + ACP_I2S_PLAY_REGS_END;
371
372	adev->acp.acp_res[2].name = "acp2x_dw_i2s_cap";
373	adev->acp.acp_res[2].flags = IORESOURCE_MEM;
374	adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START;
375	adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END;
376
377	adev->acp.acp_res[3].name = "acp2x_dma_irq";
378	adev->acp.acp_res[3].flags = IORESOURCE_IRQ;
379	adev->acp.acp_res[3].start = amdgpu_irq_create_mapping(adev, 162);
380	adev->acp.acp_res[3].end = adev->acp.acp_res[3].start;
 
 
 
 
 
381
382	adev->acp.acp_cell[0].name = "acp_audio_dma";
383	adev->acp.acp_cell[0].num_resources = 4;
384	adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
385	adev->acp.acp_cell[0].platform_data = &adev->asic_type;
386	adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
387
388	adev->acp.acp_cell[1].name = "designware-i2s";
389	adev->acp.acp_cell[1].num_resources = 1;
390	adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1];
391	adev->acp.acp_cell[1].platform_data = &i2s_pdata[0];
392	adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data);
393
394	adev->acp.acp_cell[2].name = "designware-i2s";
395	adev->acp.acp_cell[2].num_resources = 1;
396	adev->acp.acp_cell[2].resources = &adev->acp.acp_res[2];
397	adev->acp.acp_cell[2].platform_data = &i2s_pdata[1];
398	adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data);
399
 
 
 
 
 
 
400	r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell,
401								ACP_DEVS);
402	if (r)
403		return r;
404
405	if (adev->asic_type != CHIP_STONEY) {
406		for (i = 0; i < ACP_DEVS ; i++) {
407			dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
408			r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
409			if (r) {
410				dev_err(dev, "Failed to add dev to genpd\n");
411				return r;
412			}
413		}
414	}
415
 
416	/* Assert Soft reset of ACP */
417	val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
418
419	val |= ACP_SOFT_RESET__SoftResetAud_MASK;
420	cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
421
422	count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
423	while (true) {
424		val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
425		if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
426		    (val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
427			break;
428		if (--count == 0) {
429			dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
430			return -ETIMEDOUT;
 
431		}
432		udelay(100);
433	}
434	/* Enable clock to ACP and wait until the clock is enabled */
435	val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
436	val = val | ACP_CONTROL__ClkEn_MASK;
437	cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
438
439	count = ACP_CLOCK_EN_TIME_OUT_VALUE;
440
441	while (true) {
442		val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
443		if (val & (u32) 0x1)
444			break;
445		if (--count == 0) {
446			dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
447			return -ETIMEDOUT;
 
448		}
449		udelay(100);
450	}
451	/* Deassert the SOFT RESET flags */
452	val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
453	val &= ~ACP_SOFT_RESET__SoftResetAud_MASK;
454	cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
455
456	return 0;
 
 
 
 
 
 
 
457}
458
459/**
460 * acp_hw_fini - stop the hardware block
461 *
462 * @adev: amdgpu_device pointer
463 *
464 */
465static int acp_hw_fini(void *handle)
466{
467	int i, ret;
468	u32 val = 0;
469	u32 count = 0;
470	struct device *dev;
471	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
472
473	/* return early if no ACP */
474	if (!adev->acp.acp_cell)
 
475		return 0;
 
476
477	/* Assert Soft reset of ACP */
478	val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
479
480	val |= ACP_SOFT_RESET__SoftResetAud_MASK;
481	cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
482
483	count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
484	while (true) {
485		val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
486		if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
487		    (val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
488			break;
489		if (--count == 0) {
490			dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
491			return -ETIMEDOUT;
492		}
493		udelay(100);
494	}
495	/* Disable ACP clock */
496	val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
497	val &= ~ACP_CONTROL__ClkEn_MASK;
498	cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
499
500	count = ACP_CLOCK_EN_TIME_OUT_VALUE;
501
502	while (true) {
503		val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
504		if (val & (u32) 0x1)
505			break;
506		if (--count == 0) {
507			dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
508			return -ETIMEDOUT;
509		}
510		udelay(100);
511	}
512
513	if (adev->acp.acp_genpd) {
514		for (i = 0; i < ACP_DEVS ; i++) {
515			dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
516			ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev);
517			/* If removal fails, dont giveup and try rest */
518			if (ret)
519				dev_err(dev, "remove dev from genpd failed\n");
520		}
521		kfree(adev->acp.acp_genpd);
522	}
523
524	mfd_remove_devices(adev->acp.parent);
525	kfree(adev->acp.acp_res);
 
526	kfree(adev->acp.acp_cell);
527
528	return 0;
529}
530
531static int acp_suspend(void *handle)
532{
 
 
 
 
 
533	return 0;
534}
535
536static int acp_resume(void *handle)
537{
 
 
 
 
 
538	return 0;
539}
540
541static int acp_early_init(void *handle)
542{
543	return 0;
544}
545
546static bool acp_is_idle(void *handle)
547{
548	return true;
549}
550
551static int acp_wait_for_idle(void *handle)
552{
553	return 0;
554}
555
556static int acp_soft_reset(void *handle)
557{
558	return 0;
559}
560
561static int acp_set_clockgating_state(void *handle,
562				     enum amd_clockgating_state state)
563{
564	return 0;
565}
566
567static int acp_set_powergating_state(void *handle,
568				     enum amd_powergating_state state)
569{
 
 
 
 
 
 
 
570	return 0;
571}
572
573static const struct amd_ip_funcs acp_ip_funcs = {
574	.name = "acp_ip",
575	.early_init = acp_early_init,
576	.late_init = NULL,
577	.sw_init = acp_sw_init,
578	.sw_fini = acp_sw_fini,
579	.hw_init = acp_hw_init,
580	.hw_fini = acp_hw_fini,
581	.suspend = acp_suspend,
582	.resume = acp_resume,
583	.is_idle = acp_is_idle,
584	.wait_for_idle = acp_wait_for_idle,
585	.soft_reset = acp_soft_reset,
586	.set_clockgating_state = acp_set_clockgating_state,
587	.set_powergating_state = acp_set_powergating_state,
588};
589
590const struct amdgpu_ip_block_version acp_ip_block =
591{
592	.type = AMD_IP_BLOCK_TYPE_ACP,
593	.major = 2,
594	.minor = 2,
595	.rev = 0,
596	.funcs = &acp_ip_funcs,
597};
v5.9
  1/*
  2 * Copyright 2015 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: AMD
 23 *
 24 */
 25
 26#include <linux/irqdomain.h>
 27#include <linux/pci.h>
 28#include <linux/pm_domain.h>
 29#include <linux/platform_device.h>
 30#include <sound/designware_i2s.h>
 31#include <sound/pcm.h>
 32
 33#include "amdgpu.h"
 34#include "atom.h"
 35#include "amdgpu_acp.h"
 36
 37#include "acp_gfx_if.h"
 38
 39#define ACP_TILE_ON_MASK                	0x03
 40#define ACP_TILE_OFF_MASK               	0x02
 41#define ACP_TILE_ON_RETAIN_REG_MASK     	0x1f
 42#define ACP_TILE_OFF_RETAIN_REG_MASK    	0x20
 43
 44#define ACP_TILE_P1_MASK                	0x3e
 45#define ACP_TILE_P2_MASK                	0x3d
 46#define ACP_TILE_DSP0_MASK              	0x3b
 47#define ACP_TILE_DSP1_MASK              	0x37
 48
 49#define ACP_TILE_DSP2_MASK              	0x2f
 50
 51#define ACP_DMA_REGS_END			0x146c0
 52#define ACP_I2S_PLAY_REGS_START			0x14840
 53#define ACP_I2S_PLAY_REGS_END			0x148b4
 54#define ACP_I2S_CAP_REGS_START			0x148b8
 55#define ACP_I2S_CAP_REGS_END			0x1496c
 56
 57#define ACP_I2S_COMP1_CAP_REG_OFFSET		0xac
 58#define ACP_I2S_COMP2_CAP_REG_OFFSET		0xa8
 59#define ACP_I2S_COMP1_PLAY_REG_OFFSET		0x6c
 60#define ACP_I2S_COMP2_PLAY_REG_OFFSET		0x68
 61#define ACP_BT_PLAY_REGS_START			0x14970
 62#define ACP_BT_PLAY_REGS_END			0x14a24
 63#define ACP_BT_COMP1_REG_OFFSET			0xac
 64#define ACP_BT_COMP2_REG_OFFSET			0xa8
 65
 66#define mmACP_PGFSM_RETAIN_REG			0x51c9
 67#define mmACP_PGFSM_CONFIG_REG			0x51ca
 68#define mmACP_PGFSM_READ_REG_0			0x51cc
 69
 70#define mmACP_MEM_SHUT_DOWN_REQ_LO		0x51f8
 71#define mmACP_MEM_SHUT_DOWN_REQ_HI		0x51f9
 72#define mmACP_MEM_SHUT_DOWN_STS_LO		0x51fa
 73#define mmACP_MEM_SHUT_DOWN_STS_HI		0x51fb
 74
 75#define mmACP_CONTROL				0x5131
 76#define mmACP_STATUS				0x5133
 77#define mmACP_SOFT_RESET			0x5134
 78#define ACP_CONTROL__ClkEn_MASK 		0x1
 79#define ACP_SOFT_RESET__SoftResetAud_MASK 	0x100
 80#define ACP_SOFT_RESET__SoftResetAudDone_MASK	0x1000000
 81#define ACP_CLOCK_EN_TIME_OUT_VALUE		0x000000FF
 82#define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE	0x000000FF
 83
 84#define ACP_TIMEOUT_LOOP			0x000000FF
 85#define ACP_DEVS				4
 86#define ACP_SRC_ID				162
 87
 88enum {
 89	ACP_TILE_P1 = 0,
 90	ACP_TILE_P2,
 91	ACP_TILE_DSP0,
 92	ACP_TILE_DSP1,
 93	ACP_TILE_DSP2,
 94};
 95
 96static int acp_sw_init(void *handle)
 97{
 98	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 99
100	adev->acp.parent = adev->dev;
101
102	adev->acp.cgs_device =
103		amdgpu_cgs_create_device(adev);
104	if (!adev->acp.cgs_device)
105		return -EINVAL;
106
107	return 0;
108}
109
110static int acp_sw_fini(void *handle)
111{
112	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
113
114	if (adev->acp.cgs_device)
115		amdgpu_cgs_destroy_device(adev->acp.cgs_device);
116
117	return 0;
118}
119
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120struct acp_pm_domain {
121	void *adev;
122	struct generic_pm_domain gpd;
123};
124
125static int acp_poweroff(struct generic_pm_domain *genpd)
126{
 
127	struct acp_pm_domain *apd;
128	struct amdgpu_device *adev;
129
130	apd = container_of(genpd, struct acp_pm_domain, gpd);
131	if (apd != NULL) {
132		adev = apd->adev;
133	/* call smu to POWER GATE ACP block
134	 * smu will
135	 * 1. turn off the acp clock
136	 * 2. power off the acp tiles
137	 * 3. check and enter ulv state
138	 */
139		if (adev->powerplay.pp_funcs &&
140			adev->powerplay.pp_funcs->set_powergating_by_smu)
141			amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
142	}
143	return 0;
144}
145
146static int acp_poweron(struct generic_pm_domain *genpd)
147{
 
148	struct acp_pm_domain *apd;
149	struct amdgpu_device *adev;
150
151	apd = container_of(genpd, struct acp_pm_domain, gpd);
152	if (apd != NULL) {
153		adev = apd->adev;
154	/* call smu to UNGATE ACP block
155	 * smu will
156	 * 1. exit ulv
157	 * 2. turn on acp clock
158	 * 3. power on acp tiles
159	 */
160		if (adev->powerplay.pp_funcs->set_powergating_by_smu)
161			amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
 
 
 
 
 
 
162	}
163	return 0;
164}
165
166static struct device *get_mfd_cell_dev(const char *device_name, int r)
167{
168	char auto_dev_name[25];
169	struct device *dev;
170
171	snprintf(auto_dev_name, sizeof(auto_dev_name),
172		 "%s.%d.auto", device_name, r);
173	dev = bus_find_device_by_name(&platform_bus_type, NULL, auto_dev_name);
174	dev_info(dev, "device %s added to pm domain\n", auto_dev_name);
175
176	return dev;
177}
178
179/**
180 * acp_hw_init - start and test ACP block
181 *
182 * @adev: amdgpu_device pointer
183 *
184 */
185static int acp_hw_init(void *handle)
186{
187	int r, i;
188	uint64_t acp_base;
189	u32 val = 0;
190	u32 count = 0;
191	struct device *dev;
192	struct i2s_platform_data *i2s_pdata = NULL;
193
194	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
195
196	const struct amdgpu_ip_block *ip_block =
197		amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP);
198
199	if (!ip_block)
200		return -EINVAL;
201
202	r = amd_acp_hw_init(adev->acp.cgs_device,
203			    ip_block->version->major, ip_block->version->minor);
204	/* -ENODEV means board uses AZ rather than ACP */
205	if (r == -ENODEV) {
206		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
207		return 0;
208	} else if (r) {
209		return r;
210	}
211
212	if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289)
213		return -EINVAL;
 
 
 
 
 
 
 
 
214
215	acp_base = adev->rmmio_base;
 
 
216
217
218	adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL);
219	if (adev->acp.acp_genpd == NULL)
220		return -ENOMEM;
221
222	adev->acp.acp_genpd->gpd.name = "ACP_AUDIO";
223	adev->acp.acp_genpd->gpd.power_off = acp_poweroff;
224	adev->acp.acp_genpd->gpd.power_on = acp_poweron;
225
 
 
226
227	adev->acp.acp_genpd->adev = adev;
 
228
229	pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false);
230
231	adev->acp.acp_cell = kcalloc(ACP_DEVS, sizeof(struct mfd_cell),
232							GFP_KERNEL);
233
234	if (adev->acp.acp_cell == NULL) {
235		r = -ENOMEM;
236		goto failure;
237	}
238
239	adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL);
240	if (adev->acp.acp_res == NULL) {
241		r = -ENOMEM;
242		goto failure;
243	}
244
245	i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL);
246	if (i2s_pdata == NULL) {
247		r = -ENOMEM;
248		goto failure;
 
249	}
250
251	switch (adev->asic_type) {
252	case CHIP_STONEY:
253		i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
254			DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
255		break;
256	default:
257		i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
258	}
259	i2s_pdata[0].cap = DWC_I2S_PLAY;
260	i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000;
261	i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_PLAY_REG_OFFSET;
262	i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_PLAY_REG_OFFSET;
263	switch (adev->asic_type) {
264	case CHIP_STONEY:
265		i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
266			DW_I2S_QUIRK_COMP_PARAM1 |
267			DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
268		break;
269	default:
270		i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
271			DW_I2S_QUIRK_COMP_PARAM1;
272	}
273
274	i2s_pdata[1].cap = DWC_I2S_RECORD;
275	i2s_pdata[1].snd_rates = SNDRV_PCM_RATE_8000_96000;
276	i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET;
277	i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET;
278
279	i2s_pdata[2].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
280	switch (adev->asic_type) {
281	case CHIP_STONEY:
282		i2s_pdata[2].quirks |= DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
283		break;
284	default:
285		break;
286	}
287
288	i2s_pdata[2].cap = DWC_I2S_PLAY | DWC_I2S_RECORD;
289	i2s_pdata[2].snd_rates = SNDRV_PCM_RATE_8000_96000;
290	i2s_pdata[2].i2s_reg_comp1 = ACP_BT_COMP1_REG_OFFSET;
291	i2s_pdata[2].i2s_reg_comp2 = ACP_BT_COMP2_REG_OFFSET;
292
293	adev->acp.acp_res[0].name = "acp2x_dma";
294	adev->acp.acp_res[0].flags = IORESOURCE_MEM;
295	adev->acp.acp_res[0].start = acp_base;
296	adev->acp.acp_res[0].end = acp_base + ACP_DMA_REGS_END;
297
298	adev->acp.acp_res[1].name = "acp2x_dw_i2s_play";
299	adev->acp.acp_res[1].flags = IORESOURCE_MEM;
300	adev->acp.acp_res[1].start = acp_base + ACP_I2S_PLAY_REGS_START;
301	adev->acp.acp_res[1].end = acp_base + ACP_I2S_PLAY_REGS_END;
302
303	adev->acp.acp_res[2].name = "acp2x_dw_i2s_cap";
304	adev->acp.acp_res[2].flags = IORESOURCE_MEM;
305	adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START;
306	adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END;
307
308	adev->acp.acp_res[3].name = "acp2x_dw_bt_i2s_play_cap";
309	adev->acp.acp_res[3].flags = IORESOURCE_MEM;
310	adev->acp.acp_res[3].start = acp_base + ACP_BT_PLAY_REGS_START;
311	adev->acp.acp_res[3].end = acp_base + ACP_BT_PLAY_REGS_END;
312
313	adev->acp.acp_res[4].name = "acp2x_dma_irq";
314	adev->acp.acp_res[4].flags = IORESOURCE_IRQ;
315	adev->acp.acp_res[4].start = amdgpu_irq_create_mapping(adev, 162);
316	adev->acp.acp_res[4].end = adev->acp.acp_res[4].start;
317
318	adev->acp.acp_cell[0].name = "acp_audio_dma";
319	adev->acp.acp_cell[0].num_resources = 5;
320	adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
321	adev->acp.acp_cell[0].platform_data = &adev->asic_type;
322	adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
323
324	adev->acp.acp_cell[1].name = "designware-i2s";
325	adev->acp.acp_cell[1].num_resources = 1;
326	adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1];
327	adev->acp.acp_cell[1].platform_data = &i2s_pdata[0];
328	adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data);
329
330	adev->acp.acp_cell[2].name = "designware-i2s";
331	adev->acp.acp_cell[2].num_resources = 1;
332	adev->acp.acp_cell[2].resources = &adev->acp.acp_res[2];
333	adev->acp.acp_cell[2].platform_data = &i2s_pdata[1];
334	adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data);
335
336	adev->acp.acp_cell[3].name = "designware-i2s";
337	adev->acp.acp_cell[3].num_resources = 1;
338	adev->acp.acp_cell[3].resources = &adev->acp.acp_res[3];
339	adev->acp.acp_cell[3].platform_data = &i2s_pdata[2];
340	adev->acp.acp_cell[3].pdata_size = sizeof(struct i2s_platform_data);
341
342	r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell,
343								ACP_DEVS);
344	if (r)
345		goto failure;
346
347	for (i = 0; i < ACP_DEVS ; i++) {
348		dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
349		r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
350		if (r) {
351			dev_err(dev, "Failed to add dev to genpd\n");
352			goto failure;
 
 
353		}
354	}
355
356
357	/* Assert Soft reset of ACP */
358	val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
359
360	val |= ACP_SOFT_RESET__SoftResetAud_MASK;
361	cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
362
363	count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
364	while (true) {
365		val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
366		if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
367		    (val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
368			break;
369		if (--count == 0) {
370			dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
371			r = -ETIMEDOUT;
372			goto failure;
373		}
374		udelay(100);
375	}
376	/* Enable clock to ACP and wait until the clock is enabled */
377	val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
378	val = val | ACP_CONTROL__ClkEn_MASK;
379	cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
380
381	count = ACP_CLOCK_EN_TIME_OUT_VALUE;
382
383	while (true) {
384		val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
385		if (val & (u32) 0x1)
386			break;
387		if (--count == 0) {
388			dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
389			r = -ETIMEDOUT;
390			goto failure;
391		}
392		udelay(100);
393	}
394	/* Deassert the SOFT RESET flags */
395	val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
396	val &= ~ACP_SOFT_RESET__SoftResetAud_MASK;
397	cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
 
398	return 0;
399
400failure:
401	kfree(i2s_pdata);
402	kfree(adev->acp.acp_res);
403	kfree(adev->acp.acp_cell);
404	kfree(adev->acp.acp_genpd);
405	return r;
406}
407
408/**
409 * acp_hw_fini - stop the hardware block
410 *
411 * @adev: amdgpu_device pointer
412 *
413 */
414static int acp_hw_fini(void *handle)
415{
416	int i, ret;
417	u32 val = 0;
418	u32 count = 0;
419	struct device *dev;
420	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
421
422	/* return early if no ACP */
423	if (!adev->acp.acp_genpd) {
424		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
425		return 0;
426	}
427
428	/* Assert Soft reset of ACP */
429	val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
430
431	val |= ACP_SOFT_RESET__SoftResetAud_MASK;
432	cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
433
434	count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
435	while (true) {
436		val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
437		if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
438		    (val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
439			break;
440		if (--count == 0) {
441			dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
442			return -ETIMEDOUT;
443		}
444		udelay(100);
445	}
446	/* Disable ACP clock */
447	val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
448	val &= ~ACP_CONTROL__ClkEn_MASK;
449	cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
450
451	count = ACP_CLOCK_EN_TIME_OUT_VALUE;
452
453	while (true) {
454		val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
455		if (val & (u32) 0x1)
456			break;
457		if (--count == 0) {
458			dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
459			return -ETIMEDOUT;
460		}
461		udelay(100);
462	}
463
464	for (i = 0; i < ACP_DEVS ; i++) {
465		dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
466		ret = pm_genpd_remove_device(dev);
467		/* If removal fails, dont giveup and try rest */
468		if (ret)
469			dev_err(dev, "remove dev from genpd failed\n");
 
 
 
470	}
471
472	mfd_remove_devices(adev->acp.parent);
473	kfree(adev->acp.acp_res);
474	kfree(adev->acp.acp_genpd);
475	kfree(adev->acp.acp_cell);
476
477	return 0;
478}
479
480static int acp_suspend(void *handle)
481{
482	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
483
484	/* power up on suspend */
485	if (!adev->acp.acp_cell)
486		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
487	return 0;
488}
489
490static int acp_resume(void *handle)
491{
492	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
493
494	/* power down again on resume */
495	if (!adev->acp.acp_cell)
496		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
497	return 0;
498}
499
500static int acp_early_init(void *handle)
501{
502	return 0;
503}
504
505static bool acp_is_idle(void *handle)
506{
507	return true;
508}
509
510static int acp_wait_for_idle(void *handle)
511{
512	return 0;
513}
514
515static int acp_soft_reset(void *handle)
516{
517	return 0;
518}
519
520static int acp_set_clockgating_state(void *handle,
521				     enum amd_clockgating_state state)
522{
523	return 0;
524}
525
526static int acp_set_powergating_state(void *handle,
527				     enum amd_powergating_state state)
528{
529	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
530	bool enable = (state == AMD_PG_STATE_GATE);
531
532	if (adev->powerplay.pp_funcs &&
533		adev->powerplay.pp_funcs->set_powergating_by_smu)
534		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, enable);
535
536	return 0;
537}
538
539static const struct amd_ip_funcs acp_ip_funcs = {
540	.name = "acp_ip",
541	.early_init = acp_early_init,
542	.late_init = NULL,
543	.sw_init = acp_sw_init,
544	.sw_fini = acp_sw_fini,
545	.hw_init = acp_hw_init,
546	.hw_fini = acp_hw_fini,
547	.suspend = acp_suspend,
548	.resume = acp_resume,
549	.is_idle = acp_is_idle,
550	.wait_for_idle = acp_wait_for_idle,
551	.soft_reset = acp_soft_reset,
552	.set_clockgating_state = acp_set_clockgating_state,
553	.set_powergating_state = acp_set_powergating_state,
554};
555
556const struct amdgpu_ip_block_version acp_ip_block =
557{
558	.type = AMD_IP_BLOCK_TYPE_ACP,
559	.major = 2,
560	.minor = 2,
561	.rev = 0,
562	.funcs = &acp_ip_funcs,
563};