Linux Audio

Check our new training course

Loading...
v6.9.4
  1/*
  2 * Copyright 2015 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 *
 23 */
 24#include <linux/list.h>
 25#include <linux/pci.h>
 26#include <linux/slab.h>
 27
 
 
 28#include <linux/firmware.h>
 29#include <drm/amdgpu_drm.h>
 30#include "amdgpu.h"
 
 31#include "atom.h"
 32#include "amdgpu_ucode.h"
 33
 34struct amdgpu_cgs_device {
 35	struct cgs_device base;
 36	struct amdgpu_device *adev;
 37};
 38
 39#define CGS_FUNC_ADEV							\
 40	struct amdgpu_device *adev =					\
 41		((struct amdgpu_cgs_device *)cgs_device)->adev
 42
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 43
 44static uint32_t amdgpu_cgs_read_register(struct cgs_device *cgs_device, unsigned int offset)
 45{
 46	CGS_FUNC_ADEV;
 47	return RREG32(offset);
 48}
 49
 50static void amdgpu_cgs_write_register(struct cgs_device *cgs_device, unsigned int offset,
 51				      uint32_t value)
 52{
 53	CGS_FUNC_ADEV;
 54	WREG32(offset, value);
 55}
 56
 57static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device,
 58					     enum cgs_ind_reg space,
 59					     unsigned int index)
 60{
 61	CGS_FUNC_ADEV;
 62	switch (space) {
 
 
 63	case CGS_IND_REG__PCIE:
 64		return RREG32_PCIE(index);
 65	case CGS_IND_REG__SMC:
 66		return RREG32_SMC(index);
 67	case CGS_IND_REG__UVD_CTX:
 68		return RREG32_UVD_CTX(index);
 69	case CGS_IND_REG__DIDT:
 70		return RREG32_DIDT(index);
 71	case CGS_IND_REG_GC_CAC:
 72		return RREG32_GC_CAC(index);
 73	case CGS_IND_REG_SE_CAC:
 74		return RREG32_SE_CAC(index);
 75	case CGS_IND_REG__AUDIO_ENDPT:
 76		DRM_ERROR("audio endpt register access not implemented.\n");
 77		return 0;
 78	default:
 79		BUG();
 80	}
 81	WARN(1, "Invalid indirect register space");
 82	return 0;
 83}
 84
 85static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
 86					  enum cgs_ind_reg space,
 87					  unsigned int index, uint32_t value)
 88{
 89	CGS_FUNC_ADEV;
 90	switch (space) {
 
 
 91	case CGS_IND_REG__PCIE:
 92		return WREG32_PCIE(index, value);
 93	case CGS_IND_REG__SMC:
 94		return WREG32_SMC(index, value);
 95	case CGS_IND_REG__UVD_CTX:
 96		return WREG32_UVD_CTX(index, value);
 97	case CGS_IND_REG__DIDT:
 98		return WREG32_DIDT(index, value);
 99	case CGS_IND_REG_GC_CAC:
100		return WREG32_GC_CAC(index, value);
101	case CGS_IND_REG_SE_CAC:
102		return WREG32_SE_CAC(index, value);
103	case CGS_IND_REG__AUDIO_ENDPT:
104		DRM_ERROR("audio endpt register access not implemented.\n");
105		return;
106	default:
107		BUG();
108	}
109	WARN(1, "Invalid indirect register space");
110}
111
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
113{
114	CGS_FUNC_ADEV;
115	enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM;
116
117	switch (fw_type) {
118	case CGS_UCODE_ID_SDMA0:
119		result = AMDGPU_UCODE_ID_SDMA0;
120		break;
121	case CGS_UCODE_ID_SDMA1:
122		result = AMDGPU_UCODE_ID_SDMA1;
123		break;
124	case CGS_UCODE_ID_CP_CE:
125		result = AMDGPU_UCODE_ID_CP_CE;
126		break;
127	case CGS_UCODE_ID_CP_PFP:
128		result = AMDGPU_UCODE_ID_CP_PFP;
129		break;
130	case CGS_UCODE_ID_CP_ME:
131		result = AMDGPU_UCODE_ID_CP_ME;
132		break;
133	case CGS_UCODE_ID_CP_MEC:
134	case CGS_UCODE_ID_CP_MEC_JT1:
135		result = AMDGPU_UCODE_ID_CP_MEC1;
136		break;
137	case CGS_UCODE_ID_CP_MEC_JT2:
138		/* for VI. JT2 should be the same as JT1, because:
139			1, MEC2 and MEC1 use exactly same FW.
140			2, JT2 is not pached but JT1 is.
141		*/
142		if (adev->asic_type >= CHIP_TOPAZ)
143			result = AMDGPU_UCODE_ID_CP_MEC1;
144		else
145			result = AMDGPU_UCODE_ID_CP_MEC2;
146		break;
147	case CGS_UCODE_ID_RLC_G:
148		result = AMDGPU_UCODE_ID_RLC_G;
149		break;
150	case CGS_UCODE_ID_STORAGE:
151		result = AMDGPU_UCODE_ID_STORAGE;
152		break;
153	default:
154		DRM_ERROR("Firmware type not supported\n");
155	}
156	return result;
157}
158
 
 
 
 
 
 
 
 
 
 
 
159static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device,
160					enum cgs_ucode_id type)
161{
162	CGS_FUNC_ADEV;
163	uint16_t fw_version = 0;
164
165	switch (type) {
166	case CGS_UCODE_ID_SDMA0:
167		fw_version = adev->sdma.instance[0].fw_version;
168		break;
169	case CGS_UCODE_ID_SDMA1:
170		fw_version = adev->sdma.instance[1].fw_version;
171		break;
172	case CGS_UCODE_ID_CP_CE:
173		fw_version = adev->gfx.ce_fw_version;
174		break;
175	case CGS_UCODE_ID_CP_PFP:
176		fw_version = adev->gfx.pfp_fw_version;
177		break;
178	case CGS_UCODE_ID_CP_ME:
179		fw_version = adev->gfx.me_fw_version;
180		break;
181	case CGS_UCODE_ID_CP_MEC:
182		fw_version = adev->gfx.mec_fw_version;
183		break;
184	case CGS_UCODE_ID_CP_MEC_JT1:
185		fw_version = adev->gfx.mec_fw_version;
186		break;
187	case CGS_UCODE_ID_CP_MEC_JT2:
188		fw_version = adev->gfx.mec_fw_version;
189		break;
190	case CGS_UCODE_ID_RLC_G:
191		fw_version = adev->gfx.rlc_fw_version;
192		break;
193	case CGS_UCODE_ID_STORAGE:
194		break;
195	default:
196		DRM_ERROR("firmware type %d do not have version\n", type);
197		break;
198	}
199	return fw_version;
200}
201
202static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
203					enum cgs_ucode_id type,
204					struct cgs_firmware_info *info)
205{
206	CGS_FUNC_ADEV;
207
208	if (type != CGS_UCODE_ID_SMU && type != CGS_UCODE_ID_SMU_SK) {
209		uint64_t gpu_addr;
210		uint32_t data_size;
211		const struct gfx_firmware_header_v1_0 *header;
212		enum AMDGPU_UCODE_ID id;
213		struct amdgpu_firmware_info *ucode;
214
215		id = fw_type_convert(cgs_device, type);
216		ucode = &adev->firmware.ucode[id];
217		if (ucode->fw == NULL)
218			return -EINVAL;
219
220		gpu_addr  = ucode->mc_addr;
221		header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
222		data_size = le32_to_cpu(header->header.ucode_size_bytes);
223
224		if ((type == CGS_UCODE_ID_CP_MEC_JT1) ||
225		    (type == CGS_UCODE_ID_CP_MEC_JT2)) {
226			gpu_addr += ALIGN(le32_to_cpu(header->header.ucode_size_bytes), PAGE_SIZE);
227			data_size = le32_to_cpu(header->jt_size) << 2;
228		}
229
230		info->kptr = ucode->kaddr;
231		info->image_size = data_size;
232		info->mc_addr = gpu_addr;
233		info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
234
235		if (type == CGS_UCODE_ID_CP_MEC)
236			info->image_size = le32_to_cpu(header->jt_offset) << 2;
237
238		info->fw_version = amdgpu_get_firmware_version(cgs_device, type);
239		info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
240	} else {
241		char fw_name[30] = {0};
242		int err = 0;
243		uint32_t ucode_size;
244		uint32_t ucode_start_address;
245		const uint8_t *src;
246		const struct smc_firmware_header_v1_0 *hdr;
247		const struct common_firmware_header *header;
248		struct amdgpu_firmware_info *ucode = NULL;
249
250		if (!adev->pm.fw) {
251			switch (adev->asic_type) {
252			case CHIP_TAHITI:
253				strcpy(fw_name, "radeon/tahiti_smc.bin");
254				break;
255			case CHIP_PITCAIRN:
256				if ((adev->pdev->revision == 0x81) &&
257				    ((adev->pdev->device == 0x6810) ||
258				    (adev->pdev->device == 0x6811))) {
259					info->is_kicker = true;
260					strcpy(fw_name, "radeon/pitcairn_k_smc.bin");
261				} else {
262					strcpy(fw_name, "radeon/pitcairn_smc.bin");
263				}
264				break;
265			case CHIP_VERDE:
266				if (((adev->pdev->device == 0x6820) &&
267					((adev->pdev->revision == 0x81) ||
268					(adev->pdev->revision == 0x83))) ||
269				    ((adev->pdev->device == 0x6821) &&
270					((adev->pdev->revision == 0x83) ||
271					(adev->pdev->revision == 0x87))) ||
272				    ((adev->pdev->revision == 0x87) &&
273					((adev->pdev->device == 0x6823) ||
274					(adev->pdev->device == 0x682b)))) {
275					info->is_kicker = true;
276					strcpy(fw_name, "radeon/verde_k_smc.bin");
277				} else {
278					strcpy(fw_name, "radeon/verde_smc.bin");
279				}
280				break;
281			case CHIP_OLAND:
282				if (((adev->pdev->revision == 0x81) &&
283					((adev->pdev->device == 0x6600) ||
284					(adev->pdev->device == 0x6604) ||
285					(adev->pdev->device == 0x6605) ||
286					(adev->pdev->device == 0x6610))) ||
287				    ((adev->pdev->revision == 0x83) &&
288					(adev->pdev->device == 0x6610))) {
289					info->is_kicker = true;
290					strcpy(fw_name, "radeon/oland_k_smc.bin");
291				} else {
292					strcpy(fw_name, "radeon/oland_smc.bin");
293				}
294				break;
295			case CHIP_HAINAN:
296				if (((adev->pdev->revision == 0x81) &&
297					(adev->pdev->device == 0x6660)) ||
298				    ((adev->pdev->revision == 0x83) &&
299					((adev->pdev->device == 0x6660) ||
300					(adev->pdev->device == 0x6663) ||
301					(adev->pdev->device == 0x6665) ||
302					 (adev->pdev->device == 0x6667)))) {
303					info->is_kicker = true;
304					strcpy(fw_name, "radeon/hainan_k_smc.bin");
305				} else if ((adev->pdev->revision == 0xc3) &&
306					 (adev->pdev->device == 0x6665)) {
307					info->is_kicker = true;
308					strcpy(fw_name, "radeon/banks_k_2_smc.bin");
309				} else {
310					strcpy(fw_name, "radeon/hainan_smc.bin");
311				}
312				break;
313			case CHIP_BONAIRE:
314				if ((adev->pdev->revision == 0x80) ||
315					(adev->pdev->revision == 0x81) ||
316					(adev->pdev->device == 0x665f)) {
317					info->is_kicker = true;
318					strcpy(fw_name, "amdgpu/bonaire_k_smc.bin");
319				} else {
320					strcpy(fw_name, "amdgpu/bonaire_smc.bin");
321				}
322				break;
323			case CHIP_HAWAII:
324				if (adev->pdev->revision == 0x80) {
325					info->is_kicker = true;
326					strcpy(fw_name, "amdgpu/hawaii_k_smc.bin");
327				} else {
328					strcpy(fw_name, "amdgpu/hawaii_smc.bin");
329				}
330				break;
331			case CHIP_TOPAZ:
332				if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) ||
333				    ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) ||
334				    ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87)) ||
335				    ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD1)) ||
336				    ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD3))) {
337					info->is_kicker = true;
338					strcpy(fw_name, "amdgpu/topaz_k_smc.bin");
339				} else
340					strcpy(fw_name, "amdgpu/topaz_smc.bin");
341				break;
342			case CHIP_TONGA:
343				if (((adev->pdev->device == 0x6939) && (adev->pdev->revision == 0xf1)) ||
344				    ((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1))) {
345					info->is_kicker = true;
346					strcpy(fw_name, "amdgpu/tonga_k_smc.bin");
347				} else
348					strcpy(fw_name, "amdgpu/tonga_smc.bin");
349				break;
350			case CHIP_FIJI:
351				strcpy(fw_name, "amdgpu/fiji_smc.bin");
352				break;
353			case CHIP_POLARIS11:
354				if (type == CGS_UCODE_ID_SMU) {
355					if (ASICID_IS_P21(adev->pdev->device, adev->pdev->revision)) {
356						info->is_kicker = true;
357						strcpy(fw_name, "amdgpu/polaris11_k_smc.bin");
358					} else if (ASICID_IS_P31(adev->pdev->device, adev->pdev->revision)) {
359						info->is_kicker = true;
360						strcpy(fw_name, "amdgpu/polaris11_k2_smc.bin");
361					} else {
362						strcpy(fw_name, "amdgpu/polaris11_smc.bin");
363					}
364				} else if (type == CGS_UCODE_ID_SMU_SK) {
365					strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
366				}
367				break;
368			case CHIP_POLARIS10:
369				if (type == CGS_UCODE_ID_SMU) {
370					if (ASICID_IS_P20(adev->pdev->device, adev->pdev->revision)) {
371						info->is_kicker = true;
372						strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
373					} else if (ASICID_IS_P30(adev->pdev->device, adev->pdev->revision)) {
374						info->is_kicker = true;
375						strcpy(fw_name, "amdgpu/polaris10_k2_smc.bin");
376					} else {
377						strcpy(fw_name, "amdgpu/polaris10_smc.bin");
378					}
379				} else if (type == CGS_UCODE_ID_SMU_SK) {
380					strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
381				}
382				break;
383			case CHIP_POLARIS12:
384				if (ASICID_IS_P23(adev->pdev->device, adev->pdev->revision)) {
385					info->is_kicker = true;
386					strcpy(fw_name, "amdgpu/polaris12_k_smc.bin");
387				} else {
388					strcpy(fw_name, "amdgpu/polaris12_smc.bin");
389				}
390				break;
391			case CHIP_VEGAM:
392				strcpy(fw_name, "amdgpu/vegam_smc.bin");
393				break;
394			case CHIP_VEGA10:
395				if ((adev->pdev->device == 0x687f) &&
396					((adev->pdev->revision == 0xc0) ||
397					(adev->pdev->revision == 0xc1) ||
398					(adev->pdev->revision == 0xc3)))
399					strcpy(fw_name, "amdgpu/vega10_acg_smc.bin");
400				else
401					strcpy(fw_name, "amdgpu/vega10_smc.bin");
402				break;
403			case CHIP_VEGA12:
404				strcpy(fw_name, "amdgpu/vega12_smc.bin");
405				break;
406			case CHIP_VEGA20:
407				strcpy(fw_name, "amdgpu/vega20_smc.bin");
408				break;
409			default:
410				DRM_ERROR("SMC firmware not supported\n");
411				return -EINVAL;
412			}
413
414			err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name);
415			if (err) {
416				DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
417				amdgpu_ucode_release(&adev->pm.fw);
418				return err;
419			}
420
421			if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
422				ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
423				ucode->ucode_id = AMDGPU_UCODE_ID_SMC;
424				ucode->fw = adev->pm.fw;
425				header = (const struct common_firmware_header *)ucode->fw->data;
426				adev->firmware.fw_size +=
427					ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
428			}
429		}
430
431		hdr = (const struct smc_firmware_header_v1_0 *)	adev->pm.fw->data;
432		amdgpu_ucode_print_smc_hdr(&hdr->header);
433		adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
434		ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
435		ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
436		src = (const uint8_t *)(adev->pm.fw->data +
437		       le32_to_cpu(hdr->header.ucode_array_offset_bytes));
438
439		info->version = adev->pm.fw_version;
440		info->image_size = ucode_size;
441		info->ucode_start_address = ucode_start_address;
442		info->kptr = (void *)src;
443	}
444	return 0;
445}
446
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
447static const struct cgs_ops amdgpu_cgs_ops = {
448	.read_register = amdgpu_cgs_read_register,
449	.write_register = amdgpu_cgs_write_register,
450	.read_ind_register = amdgpu_cgs_read_ind_register,
451	.write_ind_register = amdgpu_cgs_write_ind_register,
452	.get_firmware_info = amdgpu_cgs_get_firmware_info,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
453};
454
455struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev)
456{
457	struct amdgpu_cgs_device *cgs_device =
458		kmalloc(sizeof(*cgs_device), GFP_KERNEL);
459
460	if (!cgs_device) {
461		DRM_ERROR("Couldn't allocate CGS device structure\n");
462		return NULL;
463	}
464
465	cgs_device->base.ops = &amdgpu_cgs_ops;
 
466	cgs_device->adev = adev;
467
468	return (struct cgs_device *)cgs_device;
469}
470
471void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device)
472{
473	kfree(cgs_device);
474}
v4.10.11
   1/*
   2 * Copyright 2015 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 *
  23 */
  24#include <linux/list.h>
 
  25#include <linux/slab.h>
  26#include <linux/pci.h>
  27#include <linux/acpi.h>
  28#include <drm/drmP.h>
  29#include <linux/firmware.h>
  30#include <drm/amdgpu_drm.h>
  31#include "amdgpu.h"
  32#include "cgs_linux.h"
  33#include "atom.h"
  34#include "amdgpu_ucode.h"
  35
  36struct amdgpu_cgs_device {
  37	struct cgs_device base;
  38	struct amdgpu_device *adev;
  39};
  40
  41#define CGS_FUNC_ADEV							\
  42	struct amdgpu_device *adev =					\
  43		((struct amdgpu_cgs_device *)cgs_device)->adev
  44
  45static int amdgpu_cgs_gpu_mem_info(struct cgs_device *cgs_device, enum cgs_gpu_mem_type type,
  46				   uint64_t *mc_start, uint64_t *mc_size,
  47				   uint64_t *mem_size)
  48{
  49	CGS_FUNC_ADEV;
  50	switch(type) {
  51	case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
  52	case CGS_GPU_MEM_TYPE__VISIBLE_FB:
  53		*mc_start = 0;
  54		*mc_size = adev->mc.visible_vram_size;
  55		*mem_size = adev->mc.visible_vram_size - adev->vram_pin_size;
  56		break;
  57	case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
  58	case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
  59		*mc_start = adev->mc.visible_vram_size;
  60		*mc_size = adev->mc.real_vram_size - adev->mc.visible_vram_size;
  61		*mem_size = *mc_size;
  62		break;
  63	case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
  64	case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
  65		*mc_start = adev->mc.gtt_start;
  66		*mc_size = adev->mc.gtt_size;
  67		*mem_size = adev->mc.gtt_size - adev->gart_pin_size;
  68		break;
  69	default:
  70		return -EINVAL;
  71	}
  72
  73	return 0;
  74}
  75
  76static int amdgpu_cgs_gmap_kmem(struct cgs_device *cgs_device, void *kmem,
  77				uint64_t size,
  78				uint64_t min_offset, uint64_t max_offset,
  79				cgs_handle_t *kmem_handle, uint64_t *mcaddr)
  80{
  81	CGS_FUNC_ADEV;
  82	int ret;
  83	struct amdgpu_bo *bo;
  84	struct page *kmem_page = vmalloc_to_page(kmem);
  85	int npages = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT;
  86
  87	struct sg_table *sg = drm_prime_pages_to_sg(&kmem_page, npages);
  88	ret = amdgpu_bo_create(adev, size, PAGE_SIZE, false,
  89			       AMDGPU_GEM_DOMAIN_GTT, 0, sg, NULL, &bo);
  90	if (ret)
  91		return ret;
  92	ret = amdgpu_bo_reserve(bo, false);
  93	if (unlikely(ret != 0))
  94		return ret;
  95
  96	/* pin buffer into GTT */
  97	ret = amdgpu_bo_pin_restricted(bo, AMDGPU_GEM_DOMAIN_GTT,
  98				       min_offset, max_offset, mcaddr);
  99	amdgpu_bo_unreserve(bo);
 100
 101	*kmem_handle = (cgs_handle_t)bo;
 102	return ret;
 103}
 104
 105static int amdgpu_cgs_gunmap_kmem(struct cgs_device *cgs_device, cgs_handle_t kmem_handle)
 106{
 107	struct amdgpu_bo *obj = (struct amdgpu_bo *)kmem_handle;
 108
 109	if (obj) {
 110		int r = amdgpu_bo_reserve(obj, false);
 111		if (likely(r == 0)) {
 112			amdgpu_bo_unpin(obj);
 113			amdgpu_bo_unreserve(obj);
 114		}
 115		amdgpu_bo_unref(&obj);
 116
 117	}
 118	return 0;
 119}
 120
 121static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
 122				    enum cgs_gpu_mem_type type,
 123				    uint64_t size, uint64_t align,
 124				    uint64_t min_offset, uint64_t max_offset,
 125				    cgs_handle_t *handle)
 126{
 127	CGS_FUNC_ADEV;
 128	uint16_t flags = 0;
 129	int ret = 0;
 130	uint32_t domain = 0;
 131	struct amdgpu_bo *obj;
 132	struct ttm_placement placement;
 133	struct ttm_place place;
 134
 135	if (min_offset > max_offset) {
 136		BUG_ON(1);
 137		return -EINVAL;
 138	}
 139
 140	/* fail if the alignment is not a power of 2 */
 141	if (((align != 1) && (align & (align - 1)))
 142	    || size == 0 || align == 0)
 143		return -EINVAL;
 144
 145
 146	switch(type) {
 147	case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
 148	case CGS_GPU_MEM_TYPE__VISIBLE_FB:
 149		flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
 150			AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
 151		domain = AMDGPU_GEM_DOMAIN_VRAM;
 152		if (max_offset > adev->mc.real_vram_size)
 153			return -EINVAL;
 154		place.fpfn = min_offset >> PAGE_SHIFT;
 155		place.lpfn = max_offset >> PAGE_SHIFT;
 156		place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
 157			TTM_PL_FLAG_VRAM;
 158		break;
 159	case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
 160	case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
 161		flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
 162			AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
 163		domain = AMDGPU_GEM_DOMAIN_VRAM;
 164		if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
 165			place.fpfn =
 166				max(min_offset, adev->mc.visible_vram_size) >> PAGE_SHIFT;
 167			place.lpfn =
 168				min(max_offset, adev->mc.real_vram_size) >> PAGE_SHIFT;
 169			place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
 170				TTM_PL_FLAG_VRAM;
 171		}
 172
 173		break;
 174	case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
 175		domain = AMDGPU_GEM_DOMAIN_GTT;
 176		place.fpfn = min_offset >> PAGE_SHIFT;
 177		place.lpfn = max_offset >> PAGE_SHIFT;
 178		place.flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
 179		break;
 180	case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
 181		flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
 182		domain = AMDGPU_GEM_DOMAIN_GTT;
 183		place.fpfn = min_offset >> PAGE_SHIFT;
 184		place.lpfn = max_offset >> PAGE_SHIFT;
 185		place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT |
 186			TTM_PL_FLAG_UNCACHED;
 187		break;
 188	default:
 189		return -EINVAL;
 190	}
 191
 192
 193	*handle = 0;
 194
 195	placement.placement = &place;
 196	placement.num_placement = 1;
 197	placement.busy_placement = &place;
 198	placement.num_busy_placement = 1;
 199
 200	ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE,
 201					  true, domain, flags,
 202					  NULL, &placement, NULL,
 203					  &obj);
 204	if (ret) {
 205		DRM_ERROR("(%d) bo create failed\n", ret);
 206		return ret;
 207	}
 208	*handle = (cgs_handle_t)obj;
 209
 210	return ret;
 211}
 212
 213static int amdgpu_cgs_free_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
 214{
 215	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
 216
 217	if (obj) {
 218		int r = amdgpu_bo_reserve(obj, false);
 219		if (likely(r == 0)) {
 220			amdgpu_bo_kunmap(obj);
 221			amdgpu_bo_unpin(obj);
 222			amdgpu_bo_unreserve(obj);
 223		}
 224		amdgpu_bo_unref(&obj);
 225
 226	}
 227	return 0;
 228}
 229
 230static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle,
 231				   uint64_t *mcaddr)
 232{
 233	int r;
 234	u64 min_offset, max_offset;
 235	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
 236
 237	WARN_ON_ONCE(obj->placement.num_placement > 1);
 238
 239	min_offset = obj->placements[0].fpfn << PAGE_SHIFT;
 240	max_offset = obj->placements[0].lpfn << PAGE_SHIFT;
 241
 242	r = amdgpu_bo_reserve(obj, false);
 243	if (unlikely(r != 0))
 244		return r;
 245	r = amdgpu_bo_pin_restricted(obj, obj->prefered_domains,
 246				     min_offset, max_offset, mcaddr);
 247	amdgpu_bo_unreserve(obj);
 248	return r;
 249}
 250
 251static int amdgpu_cgs_gunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
 252{
 253	int r;
 254	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
 255	r = amdgpu_bo_reserve(obj, false);
 256	if (unlikely(r != 0))
 257		return r;
 258	r = amdgpu_bo_unpin(obj);
 259	amdgpu_bo_unreserve(obj);
 260	return r;
 261}
 262
 263static int amdgpu_cgs_kmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle,
 264				   void **map)
 265{
 266	int r;
 267	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
 268	r = amdgpu_bo_reserve(obj, false);
 269	if (unlikely(r != 0))
 270		return r;
 271	r = amdgpu_bo_kmap(obj, map);
 272	amdgpu_bo_unreserve(obj);
 273	return r;
 274}
 275
 276static int amdgpu_cgs_kunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
 277{
 278	int r;
 279	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
 280	r = amdgpu_bo_reserve(obj, false);
 281	if (unlikely(r != 0))
 282		return r;
 283	amdgpu_bo_kunmap(obj);
 284	amdgpu_bo_unreserve(obj);
 285	return r;
 286}
 287
 288static uint32_t amdgpu_cgs_read_register(struct cgs_device *cgs_device, unsigned offset)
 289{
 290	CGS_FUNC_ADEV;
 291	return RREG32(offset);
 292}
 293
 294static void amdgpu_cgs_write_register(struct cgs_device *cgs_device, unsigned offset,
 295				      uint32_t value)
 296{
 297	CGS_FUNC_ADEV;
 298	WREG32(offset, value);
 299}
 300
 301static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device,
 302					     enum cgs_ind_reg space,
 303					     unsigned index)
 304{
 305	CGS_FUNC_ADEV;
 306	switch (space) {
 307	case CGS_IND_REG__MMIO:
 308		return RREG32_IDX(index);
 309	case CGS_IND_REG__PCIE:
 310		return RREG32_PCIE(index);
 311	case CGS_IND_REG__SMC:
 312		return RREG32_SMC(index);
 313	case CGS_IND_REG__UVD_CTX:
 314		return RREG32_UVD_CTX(index);
 315	case CGS_IND_REG__DIDT:
 316		return RREG32_DIDT(index);
 317	case CGS_IND_REG_GC_CAC:
 318		return RREG32_GC_CAC(index);
 
 
 319	case CGS_IND_REG__AUDIO_ENDPT:
 320		DRM_ERROR("audio endpt register access not implemented.\n");
 321		return 0;
 
 
 322	}
 323	WARN(1, "Invalid indirect register space");
 324	return 0;
 325}
 326
 327static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
 328					  enum cgs_ind_reg space,
 329					  unsigned index, uint32_t value)
 330{
 331	CGS_FUNC_ADEV;
 332	switch (space) {
 333	case CGS_IND_REG__MMIO:
 334		return WREG32_IDX(index, value);
 335	case CGS_IND_REG__PCIE:
 336		return WREG32_PCIE(index, value);
 337	case CGS_IND_REG__SMC:
 338		return WREG32_SMC(index, value);
 339	case CGS_IND_REG__UVD_CTX:
 340		return WREG32_UVD_CTX(index, value);
 341	case CGS_IND_REG__DIDT:
 342		return WREG32_DIDT(index, value);
 343	case CGS_IND_REG_GC_CAC:
 344		return WREG32_GC_CAC(index, value);
 
 
 345	case CGS_IND_REG__AUDIO_ENDPT:
 346		DRM_ERROR("audio endpt register access not implemented.\n");
 347		return;
 
 
 348	}
 349	WARN(1, "Invalid indirect register space");
 350}
 351
 352static uint8_t amdgpu_cgs_read_pci_config_byte(struct cgs_device *cgs_device, unsigned addr)
 353{
 354	CGS_FUNC_ADEV;
 355	uint8_t val;
 356	int ret = pci_read_config_byte(adev->pdev, addr, &val);
 357	if (WARN(ret, "pci_read_config_byte error"))
 358		return 0;
 359	return val;
 360}
 361
 362static uint16_t amdgpu_cgs_read_pci_config_word(struct cgs_device *cgs_device, unsigned addr)
 363{
 364	CGS_FUNC_ADEV;
 365	uint16_t val;
 366	int ret = pci_read_config_word(adev->pdev, addr, &val);
 367	if (WARN(ret, "pci_read_config_word error"))
 368		return 0;
 369	return val;
 370}
 371
 372static uint32_t amdgpu_cgs_read_pci_config_dword(struct cgs_device *cgs_device,
 373						 unsigned addr)
 374{
 375	CGS_FUNC_ADEV;
 376	uint32_t val;
 377	int ret = pci_read_config_dword(adev->pdev, addr, &val);
 378	if (WARN(ret, "pci_read_config_dword error"))
 379		return 0;
 380	return val;
 381}
 382
 383static void amdgpu_cgs_write_pci_config_byte(struct cgs_device *cgs_device, unsigned addr,
 384					     uint8_t value)
 385{
 386	CGS_FUNC_ADEV;
 387	int ret = pci_write_config_byte(adev->pdev, addr, value);
 388	WARN(ret, "pci_write_config_byte error");
 389}
 390
 391static void amdgpu_cgs_write_pci_config_word(struct cgs_device *cgs_device, unsigned addr,
 392					     uint16_t value)
 393{
 394	CGS_FUNC_ADEV;
 395	int ret = pci_write_config_word(adev->pdev, addr, value);
 396	WARN(ret, "pci_write_config_word error");
 397}
 398
 399static void amdgpu_cgs_write_pci_config_dword(struct cgs_device *cgs_device, unsigned addr,
 400					      uint32_t value)
 401{
 402	CGS_FUNC_ADEV;
 403	int ret = pci_write_config_dword(adev->pdev, addr, value);
 404	WARN(ret, "pci_write_config_dword error");
 405}
 406
 407
 408static int amdgpu_cgs_get_pci_resource(struct cgs_device *cgs_device,
 409				       enum cgs_resource_type resource_type,
 410				       uint64_t size,
 411				       uint64_t offset,
 412				       uint64_t *resource_base)
 413{
 414	CGS_FUNC_ADEV;
 415
 416	if (resource_base == NULL)
 417		return -EINVAL;
 418
 419	switch (resource_type) {
 420	case CGS_RESOURCE_TYPE_MMIO:
 421		if (adev->rmmio_size == 0)
 422			return -ENOENT;
 423		if ((offset + size) > adev->rmmio_size)
 424			return -EINVAL;
 425		*resource_base = adev->rmmio_base;
 426		return 0;
 427	case CGS_RESOURCE_TYPE_DOORBELL:
 428		if (adev->doorbell.size == 0)
 429			return -ENOENT;
 430		if ((offset + size) > adev->doorbell.size)
 431			return -EINVAL;
 432		*resource_base = adev->doorbell.base;
 433		return 0;
 434	case CGS_RESOURCE_TYPE_FB:
 435	case CGS_RESOURCE_TYPE_IO:
 436	case CGS_RESOURCE_TYPE_ROM:
 437	default:
 438		return -EINVAL;
 439	}
 440}
 441
 442static const void *amdgpu_cgs_atom_get_data_table(struct cgs_device *cgs_device,
 443						  unsigned table, uint16_t *size,
 444						  uint8_t *frev, uint8_t *crev)
 445{
 446	CGS_FUNC_ADEV;
 447	uint16_t data_start;
 448
 449	if (amdgpu_atom_parse_data_header(
 450		    adev->mode_info.atom_context, table, size,
 451		    frev, crev, &data_start))
 452		return (uint8_t*)adev->mode_info.atom_context->bios +
 453			data_start;
 454
 455	return NULL;
 456}
 457
 458static int amdgpu_cgs_atom_get_cmd_table_revs(struct cgs_device *cgs_device, unsigned table,
 459					      uint8_t *frev, uint8_t *crev)
 460{
 461	CGS_FUNC_ADEV;
 462
 463	if (amdgpu_atom_parse_cmd_header(
 464		    adev->mode_info.atom_context, table,
 465		    frev, crev))
 466		return 0;
 467
 468	return -EINVAL;
 469}
 470
 471static int amdgpu_cgs_atom_exec_cmd_table(struct cgs_device *cgs_device, unsigned table,
 472					  void *args)
 473{
 474	CGS_FUNC_ADEV;
 475
 476	return amdgpu_atom_execute_table(
 477		adev->mode_info.atom_context, table, args);
 478}
 479
 480static int amdgpu_cgs_create_pm_request(struct cgs_device *cgs_device, cgs_handle_t *request)
 481{
 482	/* TODO */
 483	return 0;
 484}
 485
 486static int amdgpu_cgs_destroy_pm_request(struct cgs_device *cgs_device, cgs_handle_t request)
 487{
 488	/* TODO */
 489	return 0;
 490}
 491
 492static int amdgpu_cgs_set_pm_request(struct cgs_device *cgs_device, cgs_handle_t request,
 493				     int active)
 494{
 495	/* TODO */
 496	return 0;
 497}
 498
 499static int amdgpu_cgs_pm_request_clock(struct cgs_device *cgs_device, cgs_handle_t request,
 500				       enum cgs_clock clock, unsigned freq)
 501{
 502	/* TODO */
 503	return 0;
 504}
 505
 506static int amdgpu_cgs_pm_request_engine(struct cgs_device *cgs_device, cgs_handle_t request,
 507					enum cgs_engine engine, int powered)
 508{
 509	/* TODO */
 510	return 0;
 511}
 512
 513
 514
 515static int amdgpu_cgs_pm_query_clock_limits(struct cgs_device *cgs_device,
 516					    enum cgs_clock clock,
 517					    struct cgs_clock_limits *limits)
 518{
 519	/* TODO */
 520	return 0;
 521}
 522
 523static int amdgpu_cgs_set_camera_voltages(struct cgs_device *cgs_device, uint32_t mask,
 524					  const uint32_t *voltages)
 525{
 526	DRM_ERROR("not implemented");
 527	return -EPERM;
 528}
 529
 530struct cgs_irq_params {
 531	unsigned src_id;
 532	cgs_irq_source_set_func_t set;
 533	cgs_irq_handler_func_t handler;
 534	void *private_data;
 535};
 536
 537static int cgs_set_irq_state(struct amdgpu_device *adev,
 538			     struct amdgpu_irq_src *src,
 539			     unsigned type,
 540			     enum amdgpu_interrupt_state state)
 541{
 542	struct cgs_irq_params *irq_params =
 543		(struct cgs_irq_params *)src->data;
 544	if (!irq_params)
 545		return -EINVAL;
 546	if (!irq_params->set)
 547		return -EINVAL;
 548	return irq_params->set(irq_params->private_data,
 549			       irq_params->src_id,
 550			       type,
 551			       (int)state);
 552}
 553
 554static int cgs_process_irq(struct amdgpu_device *adev,
 555			   struct amdgpu_irq_src *source,
 556			   struct amdgpu_iv_entry *entry)
 557{
 558	struct cgs_irq_params *irq_params =
 559		(struct cgs_irq_params *)source->data;
 560	if (!irq_params)
 561		return -EINVAL;
 562	if (!irq_params->handler)
 563		return -EINVAL;
 564	return irq_params->handler(irq_params->private_data,
 565				   irq_params->src_id,
 566				   entry->iv_entry);
 567}
 568
 569static const struct amdgpu_irq_src_funcs cgs_irq_funcs = {
 570	.set = cgs_set_irq_state,
 571	.process = cgs_process_irq,
 572};
 573
 574static int amdgpu_cgs_add_irq_source(struct cgs_device *cgs_device, unsigned src_id,
 575				     unsigned num_types,
 576				     cgs_irq_source_set_func_t set,
 577				     cgs_irq_handler_func_t handler,
 578				     void *private_data)
 579{
 580	CGS_FUNC_ADEV;
 581	int ret = 0;
 582	struct cgs_irq_params *irq_params;
 583	struct amdgpu_irq_src *source =
 584		kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
 585	if (!source)
 586		return -ENOMEM;
 587	irq_params =
 588		kzalloc(sizeof(struct cgs_irq_params), GFP_KERNEL);
 589	if (!irq_params) {
 590		kfree(source);
 591		return -ENOMEM;
 592	}
 593	source->num_types = num_types;
 594	source->funcs = &cgs_irq_funcs;
 595	irq_params->src_id = src_id;
 596	irq_params->set = set;
 597	irq_params->handler = handler;
 598	irq_params->private_data = private_data;
 599	source->data = (void *)irq_params;
 600	ret = amdgpu_irq_add_id(adev, src_id, source);
 601	if (ret) {
 602		kfree(irq_params);
 603		kfree(source);
 604	}
 605
 606	return ret;
 607}
 608
 609static int amdgpu_cgs_irq_get(struct cgs_device *cgs_device, unsigned src_id, unsigned type)
 610{
 611	CGS_FUNC_ADEV;
 612	return amdgpu_irq_get(adev, adev->irq.sources[src_id], type);
 613}
 614
 615static int amdgpu_cgs_irq_put(struct cgs_device *cgs_device, unsigned src_id, unsigned type)
 616{
 617	CGS_FUNC_ADEV;
 618	return amdgpu_irq_put(adev, adev->irq.sources[src_id], type);
 619}
 620
 621static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
 622				  enum amd_ip_block_type block_type,
 623				  enum amd_clockgating_state state)
 624{
 625	CGS_FUNC_ADEV;
 626	int i, r = -1;
 627
 628	for (i = 0; i < adev->num_ip_blocks; i++) {
 629		if (!adev->ip_blocks[i].status.valid)
 630			continue;
 631
 632		if (adev->ip_blocks[i].version->type == block_type) {
 633			r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
 634								(void *)adev,
 635									state);
 636			break;
 637		}
 638	}
 639	return r;
 640}
 641
 642static int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
 643				  enum amd_ip_block_type block_type,
 644				  enum amd_powergating_state state)
 645{
 646	CGS_FUNC_ADEV;
 647	int i, r = -1;
 648
 649	for (i = 0; i < adev->num_ip_blocks; i++) {
 650		if (!adev->ip_blocks[i].status.valid)
 651			continue;
 652
 653		if (adev->ip_blocks[i].version->type == block_type) {
 654			r = adev->ip_blocks[i].version->funcs->set_powergating_state(
 655								(void *)adev,
 656									state);
 657			break;
 658		}
 659	}
 660	return r;
 661}
 662
 663
 664static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
 665{
 666	CGS_FUNC_ADEV;
 667	enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM;
 668
 669	switch (fw_type) {
 670	case CGS_UCODE_ID_SDMA0:
 671		result = AMDGPU_UCODE_ID_SDMA0;
 672		break;
 673	case CGS_UCODE_ID_SDMA1:
 674		result = AMDGPU_UCODE_ID_SDMA1;
 675		break;
 676	case CGS_UCODE_ID_CP_CE:
 677		result = AMDGPU_UCODE_ID_CP_CE;
 678		break;
 679	case CGS_UCODE_ID_CP_PFP:
 680		result = AMDGPU_UCODE_ID_CP_PFP;
 681		break;
 682	case CGS_UCODE_ID_CP_ME:
 683		result = AMDGPU_UCODE_ID_CP_ME;
 684		break;
 685	case CGS_UCODE_ID_CP_MEC:
 686	case CGS_UCODE_ID_CP_MEC_JT1:
 687		result = AMDGPU_UCODE_ID_CP_MEC1;
 688		break;
 689	case CGS_UCODE_ID_CP_MEC_JT2:
 690		/* for VI. JT2 should be the same as JT1, because:
 691			1, MEC2 and MEC1 use exactly same FW.
 692			2, JT2 is not pached but JT1 is.
 693		*/
 694		if (adev->asic_type >= CHIP_TOPAZ)
 695			result = AMDGPU_UCODE_ID_CP_MEC1;
 696		else
 697			result = AMDGPU_UCODE_ID_CP_MEC2;
 698		break;
 699	case CGS_UCODE_ID_RLC_G:
 700		result = AMDGPU_UCODE_ID_RLC_G;
 701		break;
 702	case CGS_UCODE_ID_STORAGE:
 703		result = AMDGPU_UCODE_ID_STORAGE;
 704		break;
 705	default:
 706		DRM_ERROR("Firmware type not supported\n");
 707	}
 708	return result;
 709}
 710
 711static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode_id type)
 712{
 713	CGS_FUNC_ADEV;
 714	if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) {
 715		release_firmware(adev->pm.fw);
 716		return 0;
 717	}
 718	/* cannot release other firmware because they are not created by cgs */
 719	return -EINVAL;
 720}
 721
 722static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device,
 723					enum cgs_ucode_id type)
 724{
 725	CGS_FUNC_ADEV;
 726	uint16_t fw_version = 0;
 727
 728	switch (type) {
 729		case CGS_UCODE_ID_SDMA0:
 730			fw_version = adev->sdma.instance[0].fw_version;
 731			break;
 732		case CGS_UCODE_ID_SDMA1:
 733			fw_version = adev->sdma.instance[1].fw_version;
 734			break;
 735		case CGS_UCODE_ID_CP_CE:
 736			fw_version = adev->gfx.ce_fw_version;
 737			break;
 738		case CGS_UCODE_ID_CP_PFP:
 739			fw_version = adev->gfx.pfp_fw_version;
 740			break;
 741		case CGS_UCODE_ID_CP_ME:
 742			fw_version = adev->gfx.me_fw_version;
 743			break;
 744		case CGS_UCODE_ID_CP_MEC:
 745			fw_version = adev->gfx.mec_fw_version;
 746			break;
 747		case CGS_UCODE_ID_CP_MEC_JT1:
 748			fw_version = adev->gfx.mec_fw_version;
 749			break;
 750		case CGS_UCODE_ID_CP_MEC_JT2:
 751			fw_version = adev->gfx.mec_fw_version;
 752			break;
 753		case CGS_UCODE_ID_RLC_G:
 754			fw_version = adev->gfx.rlc_fw_version;
 755			break;
 756		case CGS_UCODE_ID_STORAGE:
 757			break;
 758		default:
 759			DRM_ERROR("firmware type %d do not have version\n", type);
 760			break;
 761	}
 762	return fw_version;
 763}
 764
 765static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
 766					enum cgs_ucode_id type,
 767					struct cgs_firmware_info *info)
 768{
 769	CGS_FUNC_ADEV;
 770
 771	if ((CGS_UCODE_ID_SMU != type) && (CGS_UCODE_ID_SMU_SK != type)) {
 772		uint64_t gpu_addr;
 773		uint32_t data_size;
 774		const struct gfx_firmware_header_v1_0 *header;
 775		enum AMDGPU_UCODE_ID id;
 776		struct amdgpu_firmware_info *ucode;
 777
 778		id = fw_type_convert(cgs_device, type);
 779		ucode = &adev->firmware.ucode[id];
 780		if (ucode->fw == NULL)
 781			return -EINVAL;
 782
 783		gpu_addr  = ucode->mc_addr;
 784		header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
 785		data_size = le32_to_cpu(header->header.ucode_size_bytes);
 786
 787		if ((type == CGS_UCODE_ID_CP_MEC_JT1) ||
 788		    (type == CGS_UCODE_ID_CP_MEC_JT2)) {
 789			gpu_addr += ALIGN(le32_to_cpu(header->header.ucode_size_bytes), PAGE_SIZE);
 790			data_size = le32_to_cpu(header->jt_size) << 2;
 791		}
 792
 793		info->kptr = ucode->kaddr;
 794		info->image_size = data_size;
 795		info->mc_addr = gpu_addr;
 796		info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
 797
 798		if (CGS_UCODE_ID_CP_MEC == type)
 799			info->image_size = (header->jt_offset) << 2;
 800
 801		info->fw_version = amdgpu_get_firmware_version(cgs_device, type);
 802		info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
 803	} else {
 804		char fw_name[30] = {0};
 805		int err = 0;
 806		uint32_t ucode_size;
 807		uint32_t ucode_start_address;
 808		const uint8_t *src;
 809		const struct smc_firmware_header_v1_0 *hdr;
 
 
 810
 811		if (!adev->pm.fw) {
 812			switch (adev->asic_type) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 813			case CHIP_TOPAZ:
 814				if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) ||
 815				    ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) ||
 816				    ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87)))
 
 
 
 817					strcpy(fw_name, "amdgpu/topaz_k_smc.bin");
 818				else
 819					strcpy(fw_name, "amdgpu/topaz_smc.bin");
 820				break;
 821			case CHIP_TONGA:
 822				if (((adev->pdev->device == 0x6939) && (adev->pdev->revision == 0xf1)) ||
 823				    ((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1)))
 
 824					strcpy(fw_name, "amdgpu/tonga_k_smc.bin");
 825				else
 826					strcpy(fw_name, "amdgpu/tonga_smc.bin");
 827				break;
 828			case CHIP_FIJI:
 829				strcpy(fw_name, "amdgpu/fiji_smc.bin");
 830				break;
 831			case CHIP_POLARIS11:
 832				if (type == CGS_UCODE_ID_SMU)
 833					strcpy(fw_name, "amdgpu/polaris11_smc.bin");
 834				else if (type == CGS_UCODE_ID_SMU_SK)
 
 
 
 
 
 
 
 
 835					strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
 
 836				break;
 837			case CHIP_POLARIS10:
 838				if (type == CGS_UCODE_ID_SMU)
 839					strcpy(fw_name, "amdgpu/polaris10_smc.bin");
 840				else if (type == CGS_UCODE_ID_SMU_SK)
 
 
 
 
 
 
 
 
 841					strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
 
 842				break;
 843			case CHIP_POLARIS12:
 844				strcpy(fw_name, "amdgpu/polaris12_smc.bin");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 845				break;
 846			default:
 847				DRM_ERROR("SMC firmware not supported\n");
 848				return -EINVAL;
 849			}
 850
 851			err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
 852			if (err) {
 853				DRM_ERROR("Failed to request firmware\n");
 
 854				return err;
 855			}
 856
 857			err = amdgpu_ucode_validate(adev->pm.fw);
 858			if (err) {
 859				DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
 860				release_firmware(adev->pm.fw);
 861				adev->pm.fw = NULL;
 862				return err;
 
 863			}
 864		}
 865
 866		hdr = (const struct smc_firmware_header_v1_0 *)	adev->pm.fw->data;
 867		amdgpu_ucode_print_smc_hdr(&hdr->header);
 868		adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
 869		ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
 870		ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
 871		src = (const uint8_t *)(adev->pm.fw->data +
 872		       le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 873
 874		info->version = adev->pm.fw_version;
 875		info->image_size = ucode_size;
 876		info->ucode_start_address = ucode_start_address;
 877		info->kptr = (void *)src;
 878	}
 879	return 0;
 880}
 881
 882static int amdgpu_cgs_is_virtualization_enabled(void *cgs_device)
 883{
 884	CGS_FUNC_ADEV;
 885	return amdgpu_sriov_vf(adev);
 886}
 887
 888static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
 889					struct cgs_system_info *sys_info)
 890{
 891	CGS_FUNC_ADEV;
 892
 893	if (NULL == sys_info)
 894		return -ENODEV;
 895
 896	if (sizeof(struct cgs_system_info) != sys_info->size)
 897		return -ENODEV;
 898
 899	switch (sys_info->info_id) {
 900	case CGS_SYSTEM_INFO_ADAPTER_BDF_ID:
 901		sys_info->value = adev->pdev->devfn | (adev->pdev->bus->number << 8);
 902		break;
 903	case CGS_SYSTEM_INFO_PCIE_GEN_INFO:
 904		sys_info->value = adev->pm.pcie_gen_mask;
 905		break;
 906	case CGS_SYSTEM_INFO_PCIE_MLW:
 907		sys_info->value = adev->pm.pcie_mlw_mask;
 908		break;
 909	case CGS_SYSTEM_INFO_PCIE_DEV:
 910		sys_info->value = adev->pdev->device;
 911		break;
 912	case CGS_SYSTEM_INFO_PCIE_REV:
 913		sys_info->value = adev->pdev->revision;
 914		break;
 915	case CGS_SYSTEM_INFO_CG_FLAGS:
 916		sys_info->value = adev->cg_flags;
 917		break;
 918	case CGS_SYSTEM_INFO_PG_FLAGS:
 919		sys_info->value = adev->pg_flags;
 920		break;
 921	case CGS_SYSTEM_INFO_GFX_CU_INFO:
 922		sys_info->value = adev->gfx.cu_info.number;
 923		break;
 924	case CGS_SYSTEM_INFO_GFX_SE_INFO:
 925		sys_info->value = adev->gfx.config.max_shader_engines;
 926		break;
 927	case CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID:
 928		sys_info->value = adev->pdev->subsystem_device;
 929		break;
 930	case CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID:
 931		sys_info->value = adev->pdev->subsystem_vendor;
 932		break;
 933	default:
 934		return -ENODEV;
 935	}
 936
 937	return 0;
 938}
 939
 940static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
 941					  struct cgs_display_info *info)
 942{
 943	CGS_FUNC_ADEV;
 944	struct amdgpu_crtc *amdgpu_crtc;
 945	struct drm_device *ddev = adev->ddev;
 946	struct drm_crtc *crtc;
 947	uint32_t line_time_us, vblank_lines;
 948	struct cgs_mode_info *mode_info;
 949
 950	if (info == NULL)
 951		return -EINVAL;
 952
 953	mode_info = info->mode_info;
 954
 955	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
 956		list_for_each_entry(crtc,
 957				&ddev->mode_config.crtc_list, head) {
 958			amdgpu_crtc = to_amdgpu_crtc(crtc);
 959			if (crtc->enabled) {
 960				info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
 961				info->display_count++;
 962			}
 963			if (mode_info != NULL &&
 964				crtc->enabled && amdgpu_crtc->enabled &&
 965				amdgpu_crtc->hw_mode.clock) {
 966				line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
 967							amdgpu_crtc->hw_mode.clock;
 968				vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
 969							amdgpu_crtc->hw_mode.crtc_vdisplay +
 970							(amdgpu_crtc->v_border * 2);
 971				mode_info->vblank_time_us = vblank_lines * line_time_us;
 972				mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
 973				mode_info->ref_clock = adev->clock.spll.reference_freq;
 974				mode_info = NULL;
 975			}
 976		}
 977	}
 978
 979	return 0;
 980}
 981
 982
 983static int amdgpu_cgs_notify_dpm_enabled(struct cgs_device *cgs_device, bool enabled)
 984{
 985	CGS_FUNC_ADEV;
 986
 987	adev->pm.dpm_enabled = enabled;
 988
 989	return 0;
 990}
 991
 992/** \brief evaluate acpi namespace object, handle or pathname must be valid
 993 *  \param cgs_device
 994 *  \param info input/output arguments for the control method
 995 *  \return status
 996 */
 997
 998#if defined(CONFIG_ACPI)
 999static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
1000				    struct cgs_acpi_method_info *info)
1001{
1002	CGS_FUNC_ADEV;
1003	acpi_handle handle;
1004	struct acpi_object_list input;
1005	struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
1006	union acpi_object *params, *obj;
1007	uint8_t name[5] = {'\0'};
1008	struct cgs_acpi_method_argument *argument;
1009	uint32_t i, count;
1010	acpi_status status;
1011	int result;
1012
1013	handle = ACPI_HANDLE(&adev->pdev->dev);
1014	if (!handle)
1015		return -ENODEV;
1016
1017	memset(&input, 0, sizeof(struct acpi_object_list));
1018
1019	/* validate input info */
1020	if (info->size != sizeof(struct cgs_acpi_method_info))
1021		return -EINVAL;
1022
1023	input.count = info->input_count;
1024	if (info->input_count > 0) {
1025		if (info->pinput_argument == NULL)
1026			return -EINVAL;
1027		argument = info->pinput_argument;
1028		for (i = 0; i < info->input_count; i++) {
1029			if (((argument->type == ACPI_TYPE_STRING) ||
1030			     (argument->type == ACPI_TYPE_BUFFER)) &&
1031			    (argument->pointer == NULL))
1032				return -EINVAL;
1033			argument++;
1034		}
1035	}
1036
1037	if (info->output_count > 0) {
1038		if (info->poutput_argument == NULL)
1039			return -EINVAL;
1040		argument = info->poutput_argument;
1041		for (i = 0; i < info->output_count; i++) {
1042			if (((argument->type == ACPI_TYPE_STRING) ||
1043				(argument->type == ACPI_TYPE_BUFFER))
1044				&& (argument->pointer == NULL))
1045				return -EINVAL;
1046			argument++;
1047		}
1048	}
1049
1050	/* The path name passed to acpi_evaluate_object should be null terminated */
1051	if ((info->field & CGS_ACPI_FIELD_METHOD_NAME) != 0) {
1052		strncpy(name, (char *)&(info->name), sizeof(uint32_t));
1053		name[4] = '\0';
1054	}
1055
1056	/* parse input parameters */
1057	if (input.count > 0) {
1058		input.pointer = params =
1059				kzalloc(sizeof(union acpi_object) * input.count, GFP_KERNEL);
1060		if (params == NULL)
1061			return -EINVAL;
1062
1063		argument = info->pinput_argument;
1064
1065		for (i = 0; i < input.count; i++) {
1066			params->type = argument->type;
1067			switch (params->type) {
1068			case ACPI_TYPE_INTEGER:
1069				params->integer.value = argument->value;
1070				break;
1071			case ACPI_TYPE_STRING:
1072				params->string.length = argument->data_length;
1073				params->string.pointer = argument->pointer;
1074				break;
1075			case ACPI_TYPE_BUFFER:
1076				params->buffer.length = argument->data_length;
1077				params->buffer.pointer = argument->pointer;
1078				break;
1079			default:
1080				break;
1081			}
1082			params++;
1083			argument++;
1084		}
1085	}
1086
1087	/* parse output info */
1088	count = info->output_count;
1089	argument = info->poutput_argument;
1090
1091	/* evaluate the acpi method */
1092	status = acpi_evaluate_object(handle, name, &input, &output);
1093
1094	if (ACPI_FAILURE(status)) {
1095		result = -EIO;
1096		goto free_input;
1097	}
1098
1099	/* return the output info */
1100	obj = output.pointer;
1101
1102	if (count > 1) {
1103		if ((obj->type != ACPI_TYPE_PACKAGE) ||
1104			(obj->package.count != count)) {
1105			result = -EIO;
1106			goto free_obj;
1107		}
1108		params = obj->package.elements;
1109	} else
1110		params = obj;
1111
1112	if (params == NULL) {
1113		result = -EIO;
1114		goto free_obj;
1115	}
1116
1117	for (i = 0; i < count; i++) {
1118		if (argument->type != params->type) {
1119			result = -EIO;
1120			goto free_obj;
1121		}
1122		switch (params->type) {
1123		case ACPI_TYPE_INTEGER:
1124			argument->value = params->integer.value;
1125			break;
1126		case ACPI_TYPE_STRING:
1127			if ((params->string.length != argument->data_length) ||
1128				(params->string.pointer == NULL)) {
1129				result = -EIO;
1130				goto free_obj;
1131			}
1132			strncpy(argument->pointer,
1133				params->string.pointer,
1134				params->string.length);
1135			break;
1136		case ACPI_TYPE_BUFFER:
1137			if (params->buffer.pointer == NULL) {
1138				result = -EIO;
1139				goto free_obj;
1140			}
1141			memcpy(argument->pointer,
1142				params->buffer.pointer,
1143				argument->data_length);
1144			break;
1145		default:
1146			break;
1147		}
1148		argument++;
1149		params++;
1150	}
1151
1152	result = 0;
1153free_obj:
1154	kfree(obj);
1155free_input:
1156	kfree((void *)input.pointer);
1157	return result;
1158}
1159#else
1160static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
1161				struct cgs_acpi_method_info *info)
1162{
1163	return -EIO;
1164}
1165#endif
1166
1167static int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device,
1168					uint32_t acpi_method,
1169					uint32_t acpi_function,
1170					void *pinput, void *poutput,
1171					uint32_t output_count,
1172					uint32_t input_size,
1173					uint32_t output_size)
1174{
1175	struct cgs_acpi_method_argument acpi_input[2] = { {0}, {0} };
1176	struct cgs_acpi_method_argument acpi_output = {0};
1177	struct cgs_acpi_method_info info = {0};
1178
1179	acpi_input[0].type = CGS_ACPI_TYPE_INTEGER;
1180	acpi_input[0].data_length = sizeof(uint32_t);
1181	acpi_input[0].value = acpi_function;
1182
1183	acpi_input[1].type = CGS_ACPI_TYPE_BUFFER;
1184	acpi_input[1].data_length = input_size;
1185	acpi_input[1].pointer = pinput;
1186
1187	acpi_output.type = CGS_ACPI_TYPE_BUFFER;
1188	acpi_output.data_length = output_size;
1189	acpi_output.pointer = poutput;
1190
1191	info.size = sizeof(struct cgs_acpi_method_info);
1192	info.field = CGS_ACPI_FIELD_METHOD_NAME | CGS_ACPI_FIELD_INPUT_ARGUMENT_COUNT;
1193	info.input_count = 2;
1194	info.name = acpi_method;
1195	info.pinput_argument = acpi_input;
1196	info.output_count = output_count;
1197	info.poutput_argument = &acpi_output;
1198
1199	return amdgpu_cgs_acpi_eval_object(cgs_device, &info);
1200}
1201
1202static const struct cgs_ops amdgpu_cgs_ops = {
1203	amdgpu_cgs_gpu_mem_info,
1204	amdgpu_cgs_gmap_kmem,
1205	amdgpu_cgs_gunmap_kmem,
1206	amdgpu_cgs_alloc_gpu_mem,
1207	amdgpu_cgs_free_gpu_mem,
1208	amdgpu_cgs_gmap_gpu_mem,
1209	amdgpu_cgs_gunmap_gpu_mem,
1210	amdgpu_cgs_kmap_gpu_mem,
1211	amdgpu_cgs_kunmap_gpu_mem,
1212	amdgpu_cgs_read_register,
1213	amdgpu_cgs_write_register,
1214	amdgpu_cgs_read_ind_register,
1215	amdgpu_cgs_write_ind_register,
1216	amdgpu_cgs_read_pci_config_byte,
1217	amdgpu_cgs_read_pci_config_word,
1218	amdgpu_cgs_read_pci_config_dword,
1219	amdgpu_cgs_write_pci_config_byte,
1220	amdgpu_cgs_write_pci_config_word,
1221	amdgpu_cgs_write_pci_config_dword,
1222	amdgpu_cgs_get_pci_resource,
1223	amdgpu_cgs_atom_get_data_table,
1224	amdgpu_cgs_atom_get_cmd_table_revs,
1225	amdgpu_cgs_atom_exec_cmd_table,
1226	amdgpu_cgs_create_pm_request,
1227	amdgpu_cgs_destroy_pm_request,
1228	amdgpu_cgs_set_pm_request,
1229	amdgpu_cgs_pm_request_clock,
1230	amdgpu_cgs_pm_request_engine,
1231	amdgpu_cgs_pm_query_clock_limits,
1232	amdgpu_cgs_set_camera_voltages,
1233	amdgpu_cgs_get_firmware_info,
1234	amdgpu_cgs_rel_firmware,
1235	amdgpu_cgs_set_powergating_state,
1236	amdgpu_cgs_set_clockgating_state,
1237	amdgpu_cgs_get_active_displays_info,
1238	amdgpu_cgs_notify_dpm_enabled,
1239	amdgpu_cgs_call_acpi_method,
1240	amdgpu_cgs_query_system_info,
1241	amdgpu_cgs_is_virtualization_enabled
1242};
1243
1244static const struct cgs_os_ops amdgpu_cgs_os_ops = {
1245	amdgpu_cgs_add_irq_source,
1246	amdgpu_cgs_irq_get,
1247	amdgpu_cgs_irq_put
1248};
1249
1250struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev)
1251{
1252	struct amdgpu_cgs_device *cgs_device =
1253		kmalloc(sizeof(*cgs_device), GFP_KERNEL);
1254
1255	if (!cgs_device) {
1256		DRM_ERROR("Couldn't allocate CGS device structure\n");
1257		return NULL;
1258	}
1259
1260	cgs_device->base.ops = &amdgpu_cgs_ops;
1261	cgs_device->base.os_ops = &amdgpu_cgs_os_ops;
1262	cgs_device->adev = adev;
1263
1264	return (struct cgs_device *)cgs_device;
1265}
1266
1267void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device)
1268{
1269	kfree(cgs_device);
1270}