Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 * Copyright 2015 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: AMD
 23 *
 24 */
 25
 26#include <linux/irqdomain.h>
 27#include <linux/pm_domain.h>
 28#include <linux/platform_device.h>
 29#include <sound/designware_i2s.h>
 30#include <sound/pcm.h>
 31
 32#include "amdgpu.h"
 33#include "atom.h"
 34#include "amdgpu_acp.h"
 35
 36#include "acp_gfx_if.h"
 37
 38#define ACP_TILE_ON_MASK                0x03
 39#define ACP_TILE_OFF_MASK               0x02
 40#define ACP_TILE_ON_RETAIN_REG_MASK     0x1f
 41#define ACP_TILE_OFF_RETAIN_REG_MASK    0x20
 42
 43#define ACP_TILE_P1_MASK                0x3e
 44#define ACP_TILE_P2_MASK                0x3d
 45#define ACP_TILE_DSP0_MASK              0x3b
 46#define ACP_TILE_DSP1_MASK              0x37
 47
 48#define ACP_TILE_DSP2_MASK              0x2f
 49
 50#define ACP_DMA_REGS_END		0x146c0
 51#define ACP_I2S_PLAY_REGS_START		0x14840
 52#define ACP_I2S_PLAY_REGS_END		0x148b4
 53#define ACP_I2S_CAP_REGS_START		0x148b8
 54#define ACP_I2S_CAP_REGS_END		0x1496c
 55
 56#define ACP_I2S_COMP1_CAP_REG_OFFSET	0xac
 57#define ACP_I2S_COMP2_CAP_REG_OFFSET	0xa8
 58#define ACP_I2S_COMP1_PLAY_REG_OFFSET	0x6c
 59#define ACP_I2S_COMP2_PLAY_REG_OFFSET	0x68
 60
 61#define mmACP_PGFSM_RETAIN_REG		0x51c9
 62#define mmACP_PGFSM_CONFIG_REG		0x51ca
 63#define mmACP_PGFSM_READ_REG_0		0x51cc
 64
 65#define mmACP_MEM_SHUT_DOWN_REQ_LO	0x51f8
 66#define mmACP_MEM_SHUT_DOWN_REQ_HI	0x51f9
 67#define mmACP_MEM_SHUT_DOWN_STS_LO	0x51fa
 68#define mmACP_MEM_SHUT_DOWN_STS_HI	0x51fb
 69
 70#define ACP_TIMEOUT_LOOP		0x000000FF
 71#define ACP_DEVS			3
 72#define ACP_SRC_ID			162
 73
 74enum {
 75	ACP_TILE_P1 = 0,
 76	ACP_TILE_P2,
 77	ACP_TILE_DSP0,
 78	ACP_TILE_DSP1,
 79	ACP_TILE_DSP2,
 80};
 81
 82static int acp_sw_init(void *handle)
 83{
 84	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 85
 86	adev->acp.parent = adev->dev;
 87
 88	adev->acp.cgs_device =
 89		amdgpu_cgs_create_device(adev);
 90	if (!adev->acp.cgs_device)
 91		return -EINVAL;
 92
 93	return 0;
 94}
 95
 96static int acp_sw_fini(void *handle)
 97{
 98	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 99
100	if (adev->acp.cgs_device)
101		amdgpu_cgs_destroy_device(adev->acp.cgs_device);
102
103	return 0;
104}
105
106/* power off a tile/block within ACP */
107static int acp_suspend_tile(void *cgs_dev, int tile)
108{
109	u32 val = 0;
110	u32 count = 0;
111
112	if ((tile  < ACP_TILE_P1) || (tile > ACP_TILE_DSP2)) {
113		pr_err("Invalid ACP tile : %d to suspend\n", tile);
114		return -1;
115	}
116
117	val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 + tile);
118	val &= ACP_TILE_ON_MASK;
119
120	if (val == 0x0) {
121		val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG);
122		val = val | (1 << tile);
123		cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val);
124		cgs_write_register(cgs_dev, mmACP_PGFSM_CONFIG_REG,
125					0x500 + tile);
126
127		count = ACP_TIMEOUT_LOOP;
128		while (true) {
129			val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0
130								+ tile);
131			val = val & ACP_TILE_ON_MASK;
132			if (val == ACP_TILE_OFF_MASK)
133				break;
134			if (--count == 0) {
135				pr_err("Timeout reading ACP PGFSM status\n");
136				return -ETIMEDOUT;
137			}
138			udelay(100);
139		}
140
141		val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG);
142
143		val |= ACP_TILE_OFF_RETAIN_REG_MASK;
144		cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val);
145	}
146	return 0;
147}
148
149/* power on a tile/block within ACP */
150static int acp_resume_tile(void *cgs_dev, int tile)
151{
152	u32 val = 0;
153	u32 count = 0;
154
155	if ((tile  < ACP_TILE_P1) || (tile > ACP_TILE_DSP2)) {
156		pr_err("Invalid ACP tile to resume\n");
157		return -1;
158	}
159
160	val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 + tile);
161	val = val & ACP_TILE_ON_MASK;
162
163	if (val != 0x0) {
164		cgs_write_register(cgs_dev, mmACP_PGFSM_CONFIG_REG,
165					0x600 + tile);
166		count = ACP_TIMEOUT_LOOP;
167		while (true) {
168			val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0
169							+ tile);
170			val = val & ACP_TILE_ON_MASK;
171			if (val == 0x0)
172				break;
173			if (--count == 0) {
174				pr_err("Timeout reading ACP PGFSM status\n");
175				return -ETIMEDOUT;
176			}
177			udelay(100);
178		}
179		val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG);
180		if (tile == ACP_TILE_P1)
181			val = val & (ACP_TILE_P1_MASK);
182		else if (tile == ACP_TILE_P2)
183			val = val & (ACP_TILE_P2_MASK);
184
185		cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val);
186	}
187	return 0;
188}
189
190struct acp_pm_domain {
191	void *cgs_dev;
192	struct generic_pm_domain gpd;
193};
194
195static int acp_poweroff(struct generic_pm_domain *genpd)
196{
197	int i, ret;
198	struct acp_pm_domain *apd;
199
200	apd = container_of(genpd, struct acp_pm_domain, gpd);
201	if (apd != NULL) {
202		/* Donot return abruptly if any of power tile fails to suspend.
203		 * Log it and continue powering off other tile
204		 */
205		for (i = 4; i >= 0 ; i--) {
206			ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_P1 + i);
207			if (ret)
208				pr_err("ACP tile %d tile suspend failed\n", i);
209		}
210	}
211	return 0;
212}
213
214static int acp_poweron(struct generic_pm_domain *genpd)
215{
216	int i, ret;
217	struct acp_pm_domain *apd;
218
219	apd = container_of(genpd, struct acp_pm_domain, gpd);
220	if (apd != NULL) {
221		for (i = 0; i < 2; i++) {
222			ret = acp_resume_tile(apd->cgs_dev, ACP_TILE_P1 + i);
223			if (ret) {
224				pr_err("ACP tile %d resume failed\n", i);
225				break;
226			}
227		}
228
229		/* Disable DSPs which are not going to be used */
230		for (i = 0; i < 3; i++) {
231			ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_DSP0 + i);
232			/* Continue suspending other DSP, even if one fails */
233			if (ret)
234				pr_err("ACP DSP %d suspend failed\n", i);
235		}
236	}
237	return 0;
238}
239
240static struct device *get_mfd_cell_dev(const char *device_name, int r)
241{
242	char auto_dev_name[25];
243	struct device *dev;
244
245	snprintf(auto_dev_name, sizeof(auto_dev_name),
246		 "%s.%d.auto", device_name, r);
247	dev = bus_find_device_by_name(&platform_bus_type, NULL, auto_dev_name);
248	dev_info(dev, "device %s added to pm domain\n", auto_dev_name);
249
250	return dev;
251}
252
253/**
254 * acp_hw_init - start and test ACP block
255 *
256 * @adev: amdgpu_device pointer
257 *
258 */
259static int acp_hw_init(void *handle)
260{
261	int r, i;
262	uint64_t acp_base;
263	struct device *dev;
264	struct i2s_platform_data *i2s_pdata;
265
266	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
267
268	const struct amdgpu_ip_block_version *ip_version =
269		amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP);
270
271	if (!ip_version)
272		return -EINVAL;
273
274	r = amd_acp_hw_init(adev->acp.cgs_device,
275			    ip_version->major, ip_version->minor);
276	/* -ENODEV means board uses AZ rather than ACP */
277	if (r == -ENODEV)
278		return 0;
279	else if (r)
280		return r;
281
282	r = cgs_get_pci_resource(adev->acp.cgs_device, CGS_RESOURCE_TYPE_MMIO,
283			0x5289, 0, &acp_base);
284	if (r == -ENODEV)
285		return 0;
286	else if (r)
287		return r;
288
289	adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL);
290	if (adev->acp.acp_genpd == NULL)
291		return -ENOMEM;
292
293	adev->acp.acp_genpd->gpd.name = "ACP_AUDIO";
294	adev->acp.acp_genpd->gpd.power_off = acp_poweroff;
295	adev->acp.acp_genpd->gpd.power_on = acp_poweron;
296
297
298	adev->acp.acp_genpd->cgs_dev = adev->acp.cgs_device;
299
300	pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false);
301
302	adev->acp.acp_cell = kzalloc(sizeof(struct mfd_cell) * ACP_DEVS,
303							GFP_KERNEL);
304
305	if (adev->acp.acp_cell == NULL)
306		return -ENOMEM;
307
308	adev->acp.acp_res = kzalloc(sizeof(struct resource) * 4, GFP_KERNEL);
309
310	if (adev->acp.acp_res == NULL) {
311		kfree(adev->acp.acp_cell);
312		return -ENOMEM;
313	}
314
315	i2s_pdata = kzalloc(sizeof(struct i2s_platform_data) * 2, GFP_KERNEL);
316	if (i2s_pdata == NULL) {
317		kfree(adev->acp.acp_res);
318		kfree(adev->acp.acp_cell);
319		return -ENOMEM;
320	}
321
322	i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
323	i2s_pdata[0].cap = DWC_I2S_PLAY;
324	i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000;
325	i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_PLAY_REG_OFFSET;
326	i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_PLAY_REG_OFFSET;
327
328	i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
329				DW_I2S_QUIRK_COMP_PARAM1;
330	i2s_pdata[1].cap = DWC_I2S_RECORD;
331	i2s_pdata[1].snd_rates = SNDRV_PCM_RATE_8000_96000;
332	i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET;
333	i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET;
334
335	adev->acp.acp_res[0].name = "acp2x_dma";
336	adev->acp.acp_res[0].flags = IORESOURCE_MEM;
337	adev->acp.acp_res[0].start = acp_base;
338	adev->acp.acp_res[0].end = acp_base + ACP_DMA_REGS_END;
339
340	adev->acp.acp_res[1].name = "acp2x_dw_i2s_play";
341	adev->acp.acp_res[1].flags = IORESOURCE_MEM;
342	adev->acp.acp_res[1].start = acp_base + ACP_I2S_PLAY_REGS_START;
343	adev->acp.acp_res[1].end = acp_base + ACP_I2S_PLAY_REGS_END;
344
345	adev->acp.acp_res[2].name = "acp2x_dw_i2s_cap";
346	adev->acp.acp_res[2].flags = IORESOURCE_MEM;
347	adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START;
348	adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END;
349
350	adev->acp.acp_res[3].name = "acp2x_dma_irq";
351	adev->acp.acp_res[3].flags = IORESOURCE_IRQ;
352	adev->acp.acp_res[3].start = amdgpu_irq_create_mapping(adev, 162);
353	adev->acp.acp_res[3].end = adev->acp.acp_res[3].start;
354
355	adev->acp.acp_cell[0].name = "acp_audio_dma";
356	adev->acp.acp_cell[0].num_resources = 4;
357	adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
358
359	adev->acp.acp_cell[1].name = "designware-i2s";
360	adev->acp.acp_cell[1].num_resources = 1;
361	adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1];
362	adev->acp.acp_cell[1].platform_data = &i2s_pdata[0];
363	adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data);
364
365	adev->acp.acp_cell[2].name = "designware-i2s";
366	adev->acp.acp_cell[2].num_resources = 1;
367	adev->acp.acp_cell[2].resources = &adev->acp.acp_res[2];
368	adev->acp.acp_cell[2].platform_data = &i2s_pdata[1];
369	adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data);
370
371	r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell,
372								ACP_DEVS);
373	if (r)
374		return r;
375
376	for (i = 0; i < ACP_DEVS ; i++) {
377		dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
378		r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
379		if (r) {
380			dev_err(dev, "Failed to add dev to genpd\n");
381			return r;
382		}
383	}
384
385	return 0;
386}
387
388/**
389 * acp_hw_fini - stop the hardware block
390 *
391 * @adev: amdgpu_device pointer
392 *
393 */
394static int acp_hw_fini(void *handle)
395{
396	int i, ret;
397	struct device *dev;
398
399	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
400
 
 
 
 
401	for (i = 0; i < ACP_DEVS ; i++) {
402		dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
403		ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev);
404		/* If removal fails, dont giveup and try rest */
405		if (ret)
406			dev_err(dev, "remove dev from genpd failed\n");
407	}
408
409	mfd_remove_devices(adev->acp.parent);
410	kfree(adev->acp.acp_res);
411	kfree(adev->acp.acp_genpd);
412	kfree(adev->acp.acp_cell);
413
414	return 0;
415}
416
417static int acp_suspend(void *handle)
418{
419	return 0;
420}
421
422static int acp_resume(void *handle)
423{
424	int i, ret;
425	struct acp_pm_domain *apd;
426	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
427
428	/* return early if no ACP */
429	if (!adev->acp.acp_genpd)
430		return 0;
431
432	/* SMU block will power on ACP irrespective of ACP runtime status.
433	 * Power off explicitly based on genpd ACP runtime status so that ACP
434	 * hw and ACP-genpd status are in sync.
435	 * 'suspend_power_off' represents "Power status before system suspend"
436	*/
437	if (adev->acp.acp_genpd->gpd.suspend_power_off == true) {
438		apd = container_of(&adev->acp.acp_genpd->gpd,
439					struct acp_pm_domain, gpd);
440
441		for (i = 4; i >= 0 ; i--) {
442			ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_P1 + i);
443			if (ret)
444				pr_err("ACP tile %d tile suspend failed\n", i);
445		}
446	}
447	return 0;
448}
449
450static int acp_early_init(void *handle)
451{
452	return 0;
453}
454
455static bool acp_is_idle(void *handle)
456{
457	return true;
458}
459
460static int acp_wait_for_idle(void *handle)
461{
462	return 0;
463}
464
465static int acp_soft_reset(void *handle)
466{
467	return 0;
468}
469
470static void acp_print_status(void *handle)
471{
472	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
473
474	dev_info(adev->dev, "ACP STATUS\n");
475}
476
477static int acp_set_clockgating_state(void *handle,
478				     enum amd_clockgating_state state)
479{
480	return 0;
481}
482
483static int acp_set_powergating_state(void *handle,
484				     enum amd_powergating_state state)
485{
486	return 0;
487}
488
489const struct amd_ip_funcs acp_ip_funcs = {
 
490	.early_init = acp_early_init,
491	.late_init = NULL,
492	.sw_init = acp_sw_init,
493	.sw_fini = acp_sw_fini,
494	.hw_init = acp_hw_init,
495	.hw_fini = acp_hw_fini,
496	.suspend = acp_suspend,
497	.resume = acp_resume,
498	.is_idle = acp_is_idle,
499	.wait_for_idle = acp_wait_for_idle,
500	.soft_reset = acp_soft_reset,
501	.print_status = acp_print_status,
502	.set_clockgating_state = acp_set_clockgating_state,
503	.set_powergating_state = acp_set_powergating_state,
 
 
 
 
 
 
 
 
 
504};
v4.10.11
  1/*
  2 * Copyright 2015 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: AMD
 23 *
 24 */
 25
 26#include <linux/irqdomain.h>
 27#include <linux/pm_domain.h>
 28#include <linux/platform_device.h>
 29#include <sound/designware_i2s.h>
 30#include <sound/pcm.h>
 31
 32#include "amdgpu.h"
 33#include "atom.h"
 34#include "amdgpu_acp.h"
 35
 36#include "acp_gfx_if.h"
 37
 38#define ACP_TILE_ON_MASK                0x03
 39#define ACP_TILE_OFF_MASK               0x02
 40#define ACP_TILE_ON_RETAIN_REG_MASK     0x1f
 41#define ACP_TILE_OFF_RETAIN_REG_MASK    0x20
 42
 43#define ACP_TILE_P1_MASK                0x3e
 44#define ACP_TILE_P2_MASK                0x3d
 45#define ACP_TILE_DSP0_MASK              0x3b
 46#define ACP_TILE_DSP1_MASK              0x37
 47
 48#define ACP_TILE_DSP2_MASK              0x2f
 49
 50#define ACP_DMA_REGS_END		0x146c0
 51#define ACP_I2S_PLAY_REGS_START		0x14840
 52#define ACP_I2S_PLAY_REGS_END		0x148b4
 53#define ACP_I2S_CAP_REGS_START		0x148b8
 54#define ACP_I2S_CAP_REGS_END		0x1496c
 55
 56#define ACP_I2S_COMP1_CAP_REG_OFFSET	0xac
 57#define ACP_I2S_COMP2_CAP_REG_OFFSET	0xa8
 58#define ACP_I2S_COMP1_PLAY_REG_OFFSET	0x6c
 59#define ACP_I2S_COMP2_PLAY_REG_OFFSET	0x68
 60
 61#define mmACP_PGFSM_RETAIN_REG		0x51c9
 62#define mmACP_PGFSM_CONFIG_REG		0x51ca
 63#define mmACP_PGFSM_READ_REG_0		0x51cc
 64
 65#define mmACP_MEM_SHUT_DOWN_REQ_LO	0x51f8
 66#define mmACP_MEM_SHUT_DOWN_REQ_HI	0x51f9
 67#define mmACP_MEM_SHUT_DOWN_STS_LO	0x51fa
 68#define mmACP_MEM_SHUT_DOWN_STS_HI	0x51fb
 69
 70#define ACP_TIMEOUT_LOOP		0x000000FF
 71#define ACP_DEVS			3
 72#define ACP_SRC_ID			162
 73
 74enum {
 75	ACP_TILE_P1 = 0,
 76	ACP_TILE_P2,
 77	ACP_TILE_DSP0,
 78	ACP_TILE_DSP1,
 79	ACP_TILE_DSP2,
 80};
 81
 82static int acp_sw_init(void *handle)
 83{
 84	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 85
 86	adev->acp.parent = adev->dev;
 87
 88	adev->acp.cgs_device =
 89		amdgpu_cgs_create_device(adev);
 90	if (!adev->acp.cgs_device)
 91		return -EINVAL;
 92
 93	return 0;
 94}
 95
 96static int acp_sw_fini(void *handle)
 97{
 98	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 99
100	if (adev->acp.cgs_device)
101		amdgpu_cgs_destroy_device(adev->acp.cgs_device);
102
103	return 0;
104}
105
106/* power off a tile/block within ACP */
107static int acp_suspend_tile(void *cgs_dev, int tile)
108{
109	u32 val = 0;
110	u32 count = 0;
111
112	if ((tile  < ACP_TILE_P1) || (tile > ACP_TILE_DSP2)) {
113		pr_err("Invalid ACP tile : %d to suspend\n", tile);
114		return -1;
115	}
116
117	val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 + tile);
118	val &= ACP_TILE_ON_MASK;
119
120	if (val == 0x0) {
121		val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG);
122		val = val | (1 << tile);
123		cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val);
124		cgs_write_register(cgs_dev, mmACP_PGFSM_CONFIG_REG,
125					0x500 + tile);
126
127		count = ACP_TIMEOUT_LOOP;
128		while (true) {
129			val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0
130								+ tile);
131			val = val & ACP_TILE_ON_MASK;
132			if (val == ACP_TILE_OFF_MASK)
133				break;
134			if (--count == 0) {
135				pr_err("Timeout reading ACP PGFSM status\n");
136				return -ETIMEDOUT;
137			}
138			udelay(100);
139		}
140
141		val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG);
142
143		val |= ACP_TILE_OFF_RETAIN_REG_MASK;
144		cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val);
145	}
146	return 0;
147}
148
149/* power on a tile/block within ACP */
150static int acp_resume_tile(void *cgs_dev, int tile)
151{
152	u32 val = 0;
153	u32 count = 0;
154
155	if ((tile  < ACP_TILE_P1) || (tile > ACP_TILE_DSP2)) {
156		pr_err("Invalid ACP tile to resume\n");
157		return -1;
158	}
159
160	val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 + tile);
161	val = val & ACP_TILE_ON_MASK;
162
163	if (val != 0x0) {
164		cgs_write_register(cgs_dev, mmACP_PGFSM_CONFIG_REG,
165					0x600 + tile);
166		count = ACP_TIMEOUT_LOOP;
167		while (true) {
168			val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0
169							+ tile);
170			val = val & ACP_TILE_ON_MASK;
171			if (val == 0x0)
172				break;
173			if (--count == 0) {
174				pr_err("Timeout reading ACP PGFSM status\n");
175				return -ETIMEDOUT;
176			}
177			udelay(100);
178		}
179		val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG);
180		if (tile == ACP_TILE_P1)
181			val = val & (ACP_TILE_P1_MASK);
182		else if (tile == ACP_TILE_P2)
183			val = val & (ACP_TILE_P2_MASK);
184
185		cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val);
186	}
187	return 0;
188}
189
190struct acp_pm_domain {
191	void *cgs_dev;
192	struct generic_pm_domain gpd;
193};
194
195static int acp_poweroff(struct generic_pm_domain *genpd)
196{
197	int i, ret;
198	struct acp_pm_domain *apd;
199
200	apd = container_of(genpd, struct acp_pm_domain, gpd);
201	if (apd != NULL) {
202		/* Donot return abruptly if any of power tile fails to suspend.
203		 * Log it and continue powering off other tile
204		 */
205		for (i = 4; i >= 0 ; i--) {
206			ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_P1 + i);
207			if (ret)
208				pr_err("ACP tile %d tile suspend failed\n", i);
209		}
210	}
211	return 0;
212}
213
214static int acp_poweron(struct generic_pm_domain *genpd)
215{
216	int i, ret;
217	struct acp_pm_domain *apd;
218
219	apd = container_of(genpd, struct acp_pm_domain, gpd);
220	if (apd != NULL) {
221		for (i = 0; i < 2; i++) {
222			ret = acp_resume_tile(apd->cgs_dev, ACP_TILE_P1 + i);
223			if (ret) {
224				pr_err("ACP tile %d resume failed\n", i);
225				break;
226			}
227		}
228
229		/* Disable DSPs which are not going to be used */
230		for (i = 0; i < 3; i++) {
231			ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_DSP0 + i);
232			/* Continue suspending other DSP, even if one fails */
233			if (ret)
234				pr_err("ACP DSP %d suspend failed\n", i);
235		}
236	}
237	return 0;
238}
239
240static struct device *get_mfd_cell_dev(const char *device_name, int r)
241{
242	char auto_dev_name[25];
243	struct device *dev;
244
245	snprintf(auto_dev_name, sizeof(auto_dev_name),
246		 "%s.%d.auto", device_name, r);
247	dev = bus_find_device_by_name(&platform_bus_type, NULL, auto_dev_name);
248	dev_info(dev, "device %s added to pm domain\n", auto_dev_name);
249
250	return dev;
251}
252
253/**
254 * acp_hw_init - start and test ACP block
255 *
256 * @adev: amdgpu_device pointer
257 *
258 */
259static int acp_hw_init(void *handle)
260{
261	int r, i;
262	uint64_t acp_base;
263	struct device *dev;
264	struct i2s_platform_data *i2s_pdata;
265
266	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
267
268	const struct amdgpu_ip_block *ip_block =
269		amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP);
270
271	if (!ip_block)
272		return -EINVAL;
273
274	r = amd_acp_hw_init(adev->acp.cgs_device,
275			    ip_block->version->major, ip_block->version->minor);
276	/* -ENODEV means board uses AZ rather than ACP */
277	if (r == -ENODEV)
278		return 0;
279	else if (r)
280		return r;
281
282	r = cgs_get_pci_resource(adev->acp.cgs_device, CGS_RESOURCE_TYPE_MMIO,
283			0x5289, 0, &acp_base);
284	if (r == -ENODEV)
285		return 0;
286	else if (r)
287		return r;
288
289	adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL);
290	if (adev->acp.acp_genpd == NULL)
291		return -ENOMEM;
292
293	adev->acp.acp_genpd->gpd.name = "ACP_AUDIO";
294	adev->acp.acp_genpd->gpd.power_off = acp_poweroff;
295	adev->acp.acp_genpd->gpd.power_on = acp_poweron;
296
297
298	adev->acp.acp_genpd->cgs_dev = adev->acp.cgs_device;
299
300	pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false);
301
302	adev->acp.acp_cell = kzalloc(sizeof(struct mfd_cell) * ACP_DEVS,
303							GFP_KERNEL);
304
305	if (adev->acp.acp_cell == NULL)
306		return -ENOMEM;
307
308	adev->acp.acp_res = kzalloc(sizeof(struct resource) * 4, GFP_KERNEL);
309
310	if (adev->acp.acp_res == NULL) {
311		kfree(adev->acp.acp_cell);
312		return -ENOMEM;
313	}
314
315	i2s_pdata = kzalloc(sizeof(struct i2s_platform_data) * 2, GFP_KERNEL);
316	if (i2s_pdata == NULL) {
317		kfree(adev->acp.acp_res);
318		kfree(adev->acp.acp_cell);
319		return -ENOMEM;
320	}
321
322	i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
323	i2s_pdata[0].cap = DWC_I2S_PLAY;
324	i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000;
325	i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_PLAY_REG_OFFSET;
326	i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_PLAY_REG_OFFSET;
327
328	i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
329				DW_I2S_QUIRK_COMP_PARAM1;
330	i2s_pdata[1].cap = DWC_I2S_RECORD;
331	i2s_pdata[1].snd_rates = SNDRV_PCM_RATE_8000_96000;
332	i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET;
333	i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET;
334
335	adev->acp.acp_res[0].name = "acp2x_dma";
336	adev->acp.acp_res[0].flags = IORESOURCE_MEM;
337	adev->acp.acp_res[0].start = acp_base;
338	adev->acp.acp_res[0].end = acp_base + ACP_DMA_REGS_END;
339
340	adev->acp.acp_res[1].name = "acp2x_dw_i2s_play";
341	adev->acp.acp_res[1].flags = IORESOURCE_MEM;
342	adev->acp.acp_res[1].start = acp_base + ACP_I2S_PLAY_REGS_START;
343	adev->acp.acp_res[1].end = acp_base + ACP_I2S_PLAY_REGS_END;
344
345	adev->acp.acp_res[2].name = "acp2x_dw_i2s_cap";
346	adev->acp.acp_res[2].flags = IORESOURCE_MEM;
347	adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START;
348	adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END;
349
350	adev->acp.acp_res[3].name = "acp2x_dma_irq";
351	adev->acp.acp_res[3].flags = IORESOURCE_IRQ;
352	adev->acp.acp_res[3].start = amdgpu_irq_create_mapping(adev, 162);
353	adev->acp.acp_res[3].end = adev->acp.acp_res[3].start;
354
355	adev->acp.acp_cell[0].name = "acp_audio_dma";
356	adev->acp.acp_cell[0].num_resources = 4;
357	adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
358
359	adev->acp.acp_cell[1].name = "designware-i2s";
360	adev->acp.acp_cell[1].num_resources = 1;
361	adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1];
362	adev->acp.acp_cell[1].platform_data = &i2s_pdata[0];
363	adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data);
364
365	adev->acp.acp_cell[2].name = "designware-i2s";
366	adev->acp.acp_cell[2].num_resources = 1;
367	adev->acp.acp_cell[2].resources = &adev->acp.acp_res[2];
368	adev->acp.acp_cell[2].platform_data = &i2s_pdata[1];
369	adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data);
370
371	r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell,
372								ACP_DEVS);
373	if (r)
374		return r;
375
376	for (i = 0; i < ACP_DEVS ; i++) {
377		dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
378		r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
379		if (r) {
380			dev_err(dev, "Failed to add dev to genpd\n");
381			return r;
382		}
383	}
384
385	return 0;
386}
387
388/**
389 * acp_hw_fini - stop the hardware block
390 *
391 * @adev: amdgpu_device pointer
392 *
393 */
394static int acp_hw_fini(void *handle)
395{
396	int i, ret;
397	struct device *dev;
 
398	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
399
400	/* return early if no ACP */
401	if (!adev->acp.acp_genpd)
402		return 0;
403
404	for (i = 0; i < ACP_DEVS ; i++) {
405		dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
406		ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev);
407		/* If removal fails, dont giveup and try rest */
408		if (ret)
409			dev_err(dev, "remove dev from genpd failed\n");
410	}
411
412	mfd_remove_devices(adev->acp.parent);
413	kfree(adev->acp.acp_res);
414	kfree(adev->acp.acp_genpd);
415	kfree(adev->acp.acp_cell);
416
417	return 0;
418}
419
420static int acp_suspend(void *handle)
421{
422	return 0;
423}
424
425static int acp_resume(void *handle)
426{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
427	return 0;
428}
429
430static int acp_early_init(void *handle)
431{
432	return 0;
433}
434
435static bool acp_is_idle(void *handle)
436{
437	return true;
438}
439
440static int acp_wait_for_idle(void *handle)
441{
442	return 0;
443}
444
445static int acp_soft_reset(void *handle)
446{
447	return 0;
448}
449
 
 
 
 
 
 
 
450static int acp_set_clockgating_state(void *handle,
451				     enum amd_clockgating_state state)
452{
453	return 0;
454}
455
456static int acp_set_powergating_state(void *handle,
457				     enum amd_powergating_state state)
458{
459	return 0;
460}
461
462static const struct amd_ip_funcs acp_ip_funcs = {
463	.name = "acp_ip",
464	.early_init = acp_early_init,
465	.late_init = NULL,
466	.sw_init = acp_sw_init,
467	.sw_fini = acp_sw_fini,
468	.hw_init = acp_hw_init,
469	.hw_fini = acp_hw_fini,
470	.suspend = acp_suspend,
471	.resume = acp_resume,
472	.is_idle = acp_is_idle,
473	.wait_for_idle = acp_wait_for_idle,
474	.soft_reset = acp_soft_reset,
 
475	.set_clockgating_state = acp_set_clockgating_state,
476	.set_powergating_state = acp_set_powergating_state,
477};
478
479const struct amdgpu_ip_block_version acp_ip_block =
480{
481	.type = AMD_IP_BLOCK_TYPE_ACP,
482	.major = 2,
483	.minor = 2,
484	.rev = 0,
485	.funcs = &acp_ip_funcs,
486};