Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2 * Copyright 2015 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 *
  23 */
  24#include <linux/list.h>
  25#include <linux/slab.h>
  26#include <linux/pci.h>
  27#include <linux/acpi.h>
  28#include <drm/drmP.h>
  29#include <linux/firmware.h>
  30#include <drm/amdgpu_drm.h>
  31#include "amdgpu.h"
  32#include "cgs_linux.h"
  33#include "atom.h"
  34#include "amdgpu_ucode.h"
  35
  36struct amdgpu_cgs_device {
  37	struct cgs_device base;
  38	struct amdgpu_device *adev;
  39};
  40
  41#define CGS_FUNC_ADEV							\
  42	struct amdgpu_device *adev =					\
  43		((struct amdgpu_cgs_device *)cgs_device)->adev
  44
  45static int amdgpu_cgs_gpu_mem_info(void *cgs_device, enum cgs_gpu_mem_type type,
  46				   uint64_t *mc_start, uint64_t *mc_size,
  47				   uint64_t *mem_size)
  48{
  49	CGS_FUNC_ADEV;
  50	switch(type) {
  51	case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
  52	case CGS_GPU_MEM_TYPE__VISIBLE_FB:
  53		*mc_start = 0;
  54		*mc_size = adev->mc.visible_vram_size;
  55		*mem_size = adev->mc.visible_vram_size - adev->vram_pin_size;
  56		break;
  57	case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
  58	case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
  59		*mc_start = adev->mc.visible_vram_size;
  60		*mc_size = adev->mc.real_vram_size - adev->mc.visible_vram_size;
  61		*mem_size = *mc_size;
  62		break;
  63	case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
  64	case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
  65		*mc_start = adev->mc.gtt_start;
  66		*mc_size = adev->mc.gtt_size;
  67		*mem_size = adev->mc.gtt_size - adev->gart_pin_size;
  68		break;
  69	default:
  70		return -EINVAL;
  71	}
  72
  73	return 0;
  74}
  75
  76static int amdgpu_cgs_gmap_kmem(void *cgs_device, void *kmem,
  77				uint64_t size,
  78				uint64_t min_offset, uint64_t max_offset,
  79				cgs_handle_t *kmem_handle, uint64_t *mcaddr)
  80{
  81	CGS_FUNC_ADEV;
  82	int ret;
  83	struct amdgpu_bo *bo;
  84	struct page *kmem_page = vmalloc_to_page(kmem);
  85	int npages = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT;
  86
  87	struct sg_table *sg = drm_prime_pages_to_sg(&kmem_page, npages);
  88	ret = amdgpu_bo_create(adev, size, PAGE_SIZE, false,
  89			       AMDGPU_GEM_DOMAIN_GTT, 0, sg, NULL, &bo);
  90	if (ret)
  91		return ret;
  92	ret = amdgpu_bo_reserve(bo, false);
  93	if (unlikely(ret != 0))
  94		return ret;
  95
  96	/* pin buffer into GTT */
  97	ret = amdgpu_bo_pin_restricted(bo, AMDGPU_GEM_DOMAIN_GTT,
  98				       min_offset, max_offset, mcaddr);
  99	amdgpu_bo_unreserve(bo);
 100
 101	*kmem_handle = (cgs_handle_t)bo;
 102	return ret;
 103}
 104
 105static int amdgpu_cgs_gunmap_kmem(void *cgs_device, cgs_handle_t kmem_handle)
 106{
 107	struct amdgpu_bo *obj = (struct amdgpu_bo *)kmem_handle;
 108
 109	if (obj) {
 110		int r = amdgpu_bo_reserve(obj, false);
 111		if (likely(r == 0)) {
 112			amdgpu_bo_unpin(obj);
 113			amdgpu_bo_unreserve(obj);
 114		}
 115		amdgpu_bo_unref(&obj);
 116
 117	}
 118	return 0;
 119}
 120
 121static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device,
 122				    enum cgs_gpu_mem_type type,
 123				    uint64_t size, uint64_t align,
 124				    uint64_t min_offset, uint64_t max_offset,
 125				    cgs_handle_t *handle)
 126{
 127	CGS_FUNC_ADEV;
 128	uint16_t flags = 0;
 129	int ret = 0;
 130	uint32_t domain = 0;
 131	struct amdgpu_bo *obj;
 132	struct ttm_placement placement;
 133	struct ttm_place place;
 134
 135	if (min_offset > max_offset) {
 136		BUG_ON(1);
 137		return -EINVAL;
 138	}
 139
 140	/* fail if the alignment is not a power of 2 */
 141	if (((align != 1) && (align & (align - 1)))
 142	    || size == 0 || align == 0)
 143		return -EINVAL;
 144
 145
 146	switch(type) {
 147	case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
 148	case CGS_GPU_MEM_TYPE__VISIBLE_FB:
 149		flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 
 150		domain = AMDGPU_GEM_DOMAIN_VRAM;
 151		if (max_offset > adev->mc.real_vram_size)
 152			return -EINVAL;
 153		place.fpfn = min_offset >> PAGE_SHIFT;
 154		place.lpfn = max_offset >> PAGE_SHIFT;
 155		place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
 156			TTM_PL_FLAG_VRAM;
 157		break;
 158	case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
 159	case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
 160		flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
 
 161		domain = AMDGPU_GEM_DOMAIN_VRAM;
 162		if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
 163			place.fpfn =
 164				max(min_offset, adev->mc.visible_vram_size) >> PAGE_SHIFT;
 165			place.lpfn =
 166				min(max_offset, adev->mc.real_vram_size) >> PAGE_SHIFT;
 167			place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
 168				TTM_PL_FLAG_VRAM;
 169		}
 170
 171		break;
 172	case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
 173		domain = AMDGPU_GEM_DOMAIN_GTT;
 174		place.fpfn = min_offset >> PAGE_SHIFT;
 175		place.lpfn = max_offset >> PAGE_SHIFT;
 176		place.flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
 177		break;
 178	case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
 179		flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
 180		domain = AMDGPU_GEM_DOMAIN_GTT;
 181		place.fpfn = min_offset >> PAGE_SHIFT;
 182		place.lpfn = max_offset >> PAGE_SHIFT;
 183		place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT |
 184			TTM_PL_FLAG_UNCACHED;
 185		break;
 186	default:
 187		return -EINVAL;
 188	}
 189
 190
 191	*handle = 0;
 192
 193	placement.placement = &place;
 194	placement.num_placement = 1;
 195	placement.busy_placement = &place;
 196	placement.num_busy_placement = 1;
 197
 198	ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE,
 199					  true, domain, flags,
 200					  NULL, &placement, NULL,
 201					  &obj);
 202	if (ret) {
 203		DRM_ERROR("(%d) bo create failed\n", ret);
 204		return ret;
 205	}
 206	*handle = (cgs_handle_t)obj;
 207
 208	return ret;
 209}
 210
 211static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle)
 212{
 213	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
 214
 215	if (obj) {
 216		int r = amdgpu_bo_reserve(obj, false);
 217		if (likely(r == 0)) {
 218			amdgpu_bo_kunmap(obj);
 219			amdgpu_bo_unpin(obj);
 220			amdgpu_bo_unreserve(obj);
 221		}
 222		amdgpu_bo_unref(&obj);
 223
 224	}
 225	return 0;
 226}
 227
 228static int amdgpu_cgs_gmap_gpu_mem(void *cgs_device, cgs_handle_t handle,
 229				   uint64_t *mcaddr)
 230{
 231	int r;
 232	u64 min_offset, max_offset;
 233	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
 234
 235	WARN_ON_ONCE(obj->placement.num_placement > 1);
 236
 237	min_offset = obj->placements[0].fpfn << PAGE_SHIFT;
 238	max_offset = obj->placements[0].lpfn << PAGE_SHIFT;
 239
 240	r = amdgpu_bo_reserve(obj, false);
 241	if (unlikely(r != 0))
 242		return r;
 243	r = amdgpu_bo_pin_restricted(obj, AMDGPU_GEM_DOMAIN_GTT,
 244				     min_offset, max_offset, mcaddr);
 245	amdgpu_bo_unreserve(obj);
 246	return r;
 247}
 248
 249static int amdgpu_cgs_gunmap_gpu_mem(void *cgs_device, cgs_handle_t handle)
 250{
 251	int r;
 252	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
 253	r = amdgpu_bo_reserve(obj, false);
 254	if (unlikely(r != 0))
 255		return r;
 256	r = amdgpu_bo_unpin(obj);
 257	amdgpu_bo_unreserve(obj);
 258	return r;
 259}
 260
 261static int amdgpu_cgs_kmap_gpu_mem(void *cgs_device, cgs_handle_t handle,
 262				   void **map)
 263{
 264	int r;
 265	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
 266	r = amdgpu_bo_reserve(obj, false);
 267	if (unlikely(r != 0))
 268		return r;
 269	r = amdgpu_bo_kmap(obj, map);
 270	amdgpu_bo_unreserve(obj);
 271	return r;
 272}
 273
 274static int amdgpu_cgs_kunmap_gpu_mem(void *cgs_device, cgs_handle_t handle)
 275{
 276	int r;
 277	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
 278	r = amdgpu_bo_reserve(obj, false);
 279	if (unlikely(r != 0))
 280		return r;
 281	amdgpu_bo_kunmap(obj);
 282	amdgpu_bo_unreserve(obj);
 283	return r;
 284}
 285
 286static uint32_t amdgpu_cgs_read_register(void *cgs_device, unsigned offset)
 287{
 288	CGS_FUNC_ADEV;
 289	return RREG32(offset);
 290}
 291
 292static void amdgpu_cgs_write_register(void *cgs_device, unsigned offset,
 293				      uint32_t value)
 294{
 295	CGS_FUNC_ADEV;
 296	WREG32(offset, value);
 297}
 298
 299static uint32_t amdgpu_cgs_read_ind_register(void *cgs_device,
 300					     enum cgs_ind_reg space,
 301					     unsigned index)
 302{
 303	CGS_FUNC_ADEV;
 304	switch (space) {
 305	case CGS_IND_REG__MMIO:
 306		return RREG32_IDX(index);
 307	case CGS_IND_REG__PCIE:
 308		return RREG32_PCIE(index);
 309	case CGS_IND_REG__SMC:
 310		return RREG32_SMC(index);
 311	case CGS_IND_REG__UVD_CTX:
 312		return RREG32_UVD_CTX(index);
 313	case CGS_IND_REG__DIDT:
 314		return RREG32_DIDT(index);
 
 
 315	case CGS_IND_REG__AUDIO_ENDPT:
 316		DRM_ERROR("audio endpt register access not implemented.\n");
 317		return 0;
 318	}
 319	WARN(1, "Invalid indirect register space");
 320	return 0;
 321}
 322
 323static void amdgpu_cgs_write_ind_register(void *cgs_device,
 324					  enum cgs_ind_reg space,
 325					  unsigned index, uint32_t value)
 326{
 327	CGS_FUNC_ADEV;
 328	switch (space) {
 329	case CGS_IND_REG__MMIO:
 330		return WREG32_IDX(index, value);
 331	case CGS_IND_REG__PCIE:
 332		return WREG32_PCIE(index, value);
 333	case CGS_IND_REG__SMC:
 334		return WREG32_SMC(index, value);
 335	case CGS_IND_REG__UVD_CTX:
 336		return WREG32_UVD_CTX(index, value);
 337	case CGS_IND_REG__DIDT:
 338		return WREG32_DIDT(index, value);
 
 
 339	case CGS_IND_REG__AUDIO_ENDPT:
 340		DRM_ERROR("audio endpt register access not implemented.\n");
 341		return;
 342	}
 343	WARN(1, "Invalid indirect register space");
 344}
 345
 346static uint8_t amdgpu_cgs_read_pci_config_byte(void *cgs_device, unsigned addr)
 347{
 348	CGS_FUNC_ADEV;
 349	uint8_t val;
 350	int ret = pci_read_config_byte(adev->pdev, addr, &val);
 351	if (WARN(ret, "pci_read_config_byte error"))
 352		return 0;
 353	return val;
 354}
 355
 356static uint16_t amdgpu_cgs_read_pci_config_word(void *cgs_device, unsigned addr)
 357{
 358	CGS_FUNC_ADEV;
 359	uint16_t val;
 360	int ret = pci_read_config_word(adev->pdev, addr, &val);
 361	if (WARN(ret, "pci_read_config_word error"))
 362		return 0;
 363	return val;
 364}
 365
 366static uint32_t amdgpu_cgs_read_pci_config_dword(void *cgs_device,
 367						 unsigned addr)
 368{
 369	CGS_FUNC_ADEV;
 370	uint32_t val;
 371	int ret = pci_read_config_dword(adev->pdev, addr, &val);
 372	if (WARN(ret, "pci_read_config_dword error"))
 373		return 0;
 374	return val;
 375}
 376
 377static void amdgpu_cgs_write_pci_config_byte(void *cgs_device, unsigned addr,
 378					     uint8_t value)
 379{
 380	CGS_FUNC_ADEV;
 381	int ret = pci_write_config_byte(adev->pdev, addr, value);
 382	WARN(ret, "pci_write_config_byte error");
 383}
 384
 385static void amdgpu_cgs_write_pci_config_word(void *cgs_device, unsigned addr,
 386					     uint16_t value)
 387{
 388	CGS_FUNC_ADEV;
 389	int ret = pci_write_config_word(adev->pdev, addr, value);
 390	WARN(ret, "pci_write_config_word error");
 391}
 392
 393static void amdgpu_cgs_write_pci_config_dword(void *cgs_device, unsigned addr,
 394					      uint32_t value)
 395{
 396	CGS_FUNC_ADEV;
 397	int ret = pci_write_config_dword(adev->pdev, addr, value);
 398	WARN(ret, "pci_write_config_dword error");
 399}
 400
 401
 402static int amdgpu_cgs_get_pci_resource(void *cgs_device,
 403				       enum cgs_resource_type resource_type,
 404				       uint64_t size,
 405				       uint64_t offset,
 406				       uint64_t *resource_base)
 407{
 408	CGS_FUNC_ADEV;
 409
 410	if (resource_base == NULL)
 411		return -EINVAL;
 412
 413	switch (resource_type) {
 414	case CGS_RESOURCE_TYPE_MMIO:
 415		if (adev->rmmio_size == 0)
 416			return -ENOENT;
 417		if ((offset + size) > adev->rmmio_size)
 418			return -EINVAL;
 419		*resource_base = adev->rmmio_base;
 420		return 0;
 421	case CGS_RESOURCE_TYPE_DOORBELL:
 422		if (adev->doorbell.size == 0)
 423			return -ENOENT;
 424		if ((offset + size) > adev->doorbell.size)
 425			return -EINVAL;
 426		*resource_base = adev->doorbell.base;
 427		return 0;
 428	case CGS_RESOURCE_TYPE_FB:
 429	case CGS_RESOURCE_TYPE_IO:
 430	case CGS_RESOURCE_TYPE_ROM:
 431	default:
 432		return -EINVAL;
 433	}
 434}
 435
 436static const void *amdgpu_cgs_atom_get_data_table(void *cgs_device,
 437						  unsigned table, uint16_t *size,
 438						  uint8_t *frev, uint8_t *crev)
 439{
 440	CGS_FUNC_ADEV;
 441	uint16_t data_start;
 442
 443	if (amdgpu_atom_parse_data_header(
 444		    adev->mode_info.atom_context, table, size,
 445		    frev, crev, &data_start))
 446		return (uint8_t*)adev->mode_info.atom_context->bios +
 447			data_start;
 448
 449	return NULL;
 450}
 451
 452static int amdgpu_cgs_atom_get_cmd_table_revs(void *cgs_device, unsigned table,
 453					      uint8_t *frev, uint8_t *crev)
 454{
 455	CGS_FUNC_ADEV;
 456
 457	if (amdgpu_atom_parse_cmd_header(
 458		    adev->mode_info.atom_context, table,
 459		    frev, crev))
 460		return 0;
 461
 462	return -EINVAL;
 463}
 464
 465static int amdgpu_cgs_atom_exec_cmd_table(void *cgs_device, unsigned table,
 466					  void *args)
 467{
 468	CGS_FUNC_ADEV;
 469
 470	return amdgpu_atom_execute_table(
 471		adev->mode_info.atom_context, table, args);
 472}
 473
 474static int amdgpu_cgs_create_pm_request(void *cgs_device, cgs_handle_t *request)
 475{
 476	/* TODO */
 477	return 0;
 478}
 479
 480static int amdgpu_cgs_destroy_pm_request(void *cgs_device, cgs_handle_t request)
 481{
 482	/* TODO */
 483	return 0;
 484}
 485
 486static int amdgpu_cgs_set_pm_request(void *cgs_device, cgs_handle_t request,
 487				     int active)
 488{
 489	/* TODO */
 490	return 0;
 491}
 492
 493static int amdgpu_cgs_pm_request_clock(void *cgs_device, cgs_handle_t request,
 494				       enum cgs_clock clock, unsigned freq)
 495{
 496	/* TODO */
 497	return 0;
 498}
 499
 500static int amdgpu_cgs_pm_request_engine(void *cgs_device, cgs_handle_t request,
 501					enum cgs_engine engine, int powered)
 502{
 503	/* TODO */
 504	return 0;
 505}
 506
 507
 508
 509static int amdgpu_cgs_pm_query_clock_limits(void *cgs_device,
 510					    enum cgs_clock clock,
 511					    struct cgs_clock_limits *limits)
 512{
 513	/* TODO */
 514	return 0;
 515}
 516
 517static int amdgpu_cgs_set_camera_voltages(void *cgs_device, uint32_t mask,
 518					  const uint32_t *voltages)
 519{
 520	DRM_ERROR("not implemented");
 521	return -EPERM;
 522}
 523
 524struct cgs_irq_params {
 525	unsigned src_id;
 526	cgs_irq_source_set_func_t set;
 527	cgs_irq_handler_func_t handler;
 528	void *private_data;
 529};
 530
 531static int cgs_set_irq_state(struct amdgpu_device *adev,
 532			     struct amdgpu_irq_src *src,
 533			     unsigned type,
 534			     enum amdgpu_interrupt_state state)
 535{
 536	struct cgs_irq_params *irq_params =
 537		(struct cgs_irq_params *)src->data;
 538	if (!irq_params)
 539		return -EINVAL;
 540	if (!irq_params->set)
 541		return -EINVAL;
 542	return irq_params->set(irq_params->private_data,
 543			       irq_params->src_id,
 544			       type,
 545			       (int)state);
 546}
 547
 548static int cgs_process_irq(struct amdgpu_device *adev,
 549			   struct amdgpu_irq_src *source,
 550			   struct amdgpu_iv_entry *entry)
 551{
 552	struct cgs_irq_params *irq_params =
 553		(struct cgs_irq_params *)source->data;
 554	if (!irq_params)
 555		return -EINVAL;
 556	if (!irq_params->handler)
 557		return -EINVAL;
 558	return irq_params->handler(irq_params->private_data,
 559				   irq_params->src_id,
 560				   entry->iv_entry);
 561}
 562
 563static const struct amdgpu_irq_src_funcs cgs_irq_funcs = {
 564	.set = cgs_set_irq_state,
 565	.process = cgs_process_irq,
 566};
 567
 568static int amdgpu_cgs_add_irq_source(void *cgs_device, unsigned src_id,
 569				     unsigned num_types,
 570				     cgs_irq_source_set_func_t set,
 571				     cgs_irq_handler_func_t handler,
 572				     void *private_data)
 573{
 574	CGS_FUNC_ADEV;
 575	int ret = 0;
 576	struct cgs_irq_params *irq_params;
 577	struct amdgpu_irq_src *source =
 578		kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
 579	if (!source)
 580		return -ENOMEM;
 581	irq_params =
 582		kzalloc(sizeof(struct cgs_irq_params), GFP_KERNEL);
 583	if (!irq_params) {
 584		kfree(source);
 585		return -ENOMEM;
 586	}
 587	source->num_types = num_types;
 588	source->funcs = &cgs_irq_funcs;
 589	irq_params->src_id = src_id;
 590	irq_params->set = set;
 591	irq_params->handler = handler;
 592	irq_params->private_data = private_data;
 593	source->data = (void *)irq_params;
 594	ret = amdgpu_irq_add_id(adev, src_id, source);
 595	if (ret) {
 596		kfree(irq_params);
 597		kfree(source);
 598	}
 599
 600	return ret;
 601}
 602
 603static int amdgpu_cgs_irq_get(void *cgs_device, unsigned src_id, unsigned type)
 604{
 605	CGS_FUNC_ADEV;
 606	return amdgpu_irq_get(adev, adev->irq.sources[src_id], type);
 607}
 608
 609static int amdgpu_cgs_irq_put(void *cgs_device, unsigned src_id, unsigned type)
 610{
 611	CGS_FUNC_ADEV;
 612	return amdgpu_irq_put(adev, adev->irq.sources[src_id], type);
 613}
 614
 615int amdgpu_cgs_set_clockgating_state(void *cgs_device,
 616				  enum amd_ip_block_type block_type,
 617				  enum amd_clockgating_state state)
 618{
 619	CGS_FUNC_ADEV;
 620	int i, r = -1;
 621
 622	for (i = 0; i < adev->num_ip_blocks; i++) {
 623		if (!adev->ip_block_status[i].valid)
 624			continue;
 625
 626		if (adev->ip_blocks[i].type == block_type) {
 627			r = adev->ip_blocks[i].funcs->set_clockgating_state(
 628								(void *)adev,
 629									state);
 630			break;
 631		}
 632	}
 633	return r;
 634}
 635
 636int amdgpu_cgs_set_powergating_state(void *cgs_device,
 637				  enum amd_ip_block_type block_type,
 638				  enum amd_powergating_state state)
 639{
 640	CGS_FUNC_ADEV;
 641	int i, r = -1;
 642
 643	for (i = 0; i < adev->num_ip_blocks; i++) {
 644		if (!adev->ip_block_status[i].valid)
 645			continue;
 646
 647		if (adev->ip_blocks[i].type == block_type) {
 648			r = adev->ip_blocks[i].funcs->set_powergating_state(
 649								(void *)adev,
 650									state);
 651			break;
 652		}
 653	}
 654	return r;
 655}
 656
 657
 658static uint32_t fw_type_convert(void *cgs_device, uint32_t fw_type)
 659{
 660	CGS_FUNC_ADEV;
 661	enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM;
 662
 663	switch (fw_type) {
 664	case CGS_UCODE_ID_SDMA0:
 665		result = AMDGPU_UCODE_ID_SDMA0;
 666		break;
 667	case CGS_UCODE_ID_SDMA1:
 668		result = AMDGPU_UCODE_ID_SDMA1;
 669		break;
 670	case CGS_UCODE_ID_CP_CE:
 671		result = AMDGPU_UCODE_ID_CP_CE;
 672		break;
 673	case CGS_UCODE_ID_CP_PFP:
 674		result = AMDGPU_UCODE_ID_CP_PFP;
 675		break;
 676	case CGS_UCODE_ID_CP_ME:
 677		result = AMDGPU_UCODE_ID_CP_ME;
 678		break;
 679	case CGS_UCODE_ID_CP_MEC:
 680	case CGS_UCODE_ID_CP_MEC_JT1:
 681		result = AMDGPU_UCODE_ID_CP_MEC1;
 682		break;
 683	case CGS_UCODE_ID_CP_MEC_JT2:
 684		if (adev->asic_type == CHIP_TONGA)
 685			result = AMDGPU_UCODE_ID_CP_MEC2;
 686		else if (adev->asic_type == CHIP_CARRIZO)
 
 
 687			result = AMDGPU_UCODE_ID_CP_MEC1;
 
 
 688		break;
 689	case CGS_UCODE_ID_RLC_G:
 690		result = AMDGPU_UCODE_ID_RLC_G;
 691		break;
 
 
 
 692	default:
 693		DRM_ERROR("Firmware type not supported\n");
 694	}
 695	return result;
 696}
 697
 698static int amdgpu_cgs_get_firmware_info(void *cgs_device,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 699					enum cgs_ucode_id type,
 700					struct cgs_firmware_info *info)
 701{
 702	CGS_FUNC_ADEV;
 703
 704	if (CGS_UCODE_ID_SMU != type) {
 705		uint64_t gpu_addr;
 706		uint32_t data_size;
 707		const struct gfx_firmware_header_v1_0 *header;
 708		enum AMDGPU_UCODE_ID id;
 709		struct amdgpu_firmware_info *ucode;
 710
 711		id = fw_type_convert(cgs_device, type);
 712		ucode = &adev->firmware.ucode[id];
 713		if (ucode->fw == NULL)
 714			return -EINVAL;
 715
 716		gpu_addr  = ucode->mc_addr;
 717		header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
 718		data_size = le32_to_cpu(header->header.ucode_size_bytes);
 719
 720		if ((type == CGS_UCODE_ID_CP_MEC_JT1) ||
 721		    (type == CGS_UCODE_ID_CP_MEC_JT2)) {
 722			gpu_addr += le32_to_cpu(header->jt_offset) << 2;
 723			data_size = le32_to_cpu(header->jt_size) << 2;
 724		}
 725		info->mc_addr = gpu_addr;
 
 726		info->image_size = data_size;
 
 727		info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
 
 
 
 
 
 728		info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
 729	} else {
 730		char fw_name[30] = {0};
 731		int err = 0;
 732		uint32_t ucode_size;
 733		uint32_t ucode_start_address;
 734		const uint8_t *src;
 735		const struct smc_firmware_header_v1_0 *hdr;
 736
 737		switch (adev->asic_type) {
 738		case CHIP_TONGA:
 739			strcpy(fw_name, "amdgpu/tonga_smc.bin");
 740			break;
 741		case CHIP_FIJI:
 742			strcpy(fw_name, "amdgpu/fiji_smc.bin");
 743			break;
 744		default:
 745			DRM_ERROR("SMC firmware not supported\n");
 746			return -EINVAL;
 747		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 748
 749		err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
 750		if (err) {
 751			DRM_ERROR("Failed to request firmware\n");
 752			return err;
 753		}
 754
 755		err = amdgpu_ucode_validate(adev->pm.fw);
 756		if (err) {
 757			DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
 758			release_firmware(adev->pm.fw);
 759			adev->pm.fw = NULL;
 760			return err;
 
 761		}
 762
 763		hdr = (const struct smc_firmware_header_v1_0 *)	adev->pm.fw->data;
 
 764		adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
 765		ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
 766		ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
 767		src = (const uint8_t *)(adev->pm.fw->data +
 768		       le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 769
 770		info->version = adev->pm.fw_version;
 771		info->image_size = ucode_size;
 
 772		info->kptr = (void *)src;
 773	}
 774	return 0;
 775}
 776
 777static int amdgpu_cgs_query_system_info(void *cgs_device,
 778				struct cgs_system_info *sys_info)
 
 
 
 
 
 
 779{
 780	CGS_FUNC_ADEV;
 781
 782	if (NULL == sys_info)
 783		return -ENODEV;
 784
 785	if (sizeof(struct cgs_system_info) != sys_info->size)
 786		return -ENODEV;
 787
 788	switch (sys_info->info_id) {
 789	case CGS_SYSTEM_INFO_ADAPTER_BDF_ID:
 790		sys_info->value = adev->pdev->devfn | (adev->pdev->bus->number << 8);
 791		break;
 792	case CGS_SYSTEM_INFO_PCIE_GEN_INFO:
 793		sys_info->value = adev->pm.pcie_gen_mask;
 794		break;
 795	case CGS_SYSTEM_INFO_PCIE_MLW:
 796		sys_info->value = adev->pm.pcie_mlw_mask;
 797		break;
 
 
 
 
 
 
 798	case CGS_SYSTEM_INFO_CG_FLAGS:
 799		sys_info->value = adev->cg_flags;
 800		break;
 801	case CGS_SYSTEM_INFO_PG_FLAGS:
 802		sys_info->value = adev->pg_flags;
 803		break;
 
 
 
 
 
 
 
 
 
 
 
 
 804	default:
 805		return -ENODEV;
 806	}
 807
 808	return 0;
 809}
 810
 811static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
 812					  struct cgs_display_info *info)
 813{
 814	CGS_FUNC_ADEV;
 815	struct amdgpu_crtc *amdgpu_crtc;
 816	struct drm_device *ddev = adev->ddev;
 817	struct drm_crtc *crtc;
 818	uint32_t line_time_us, vblank_lines;
 819	struct cgs_mode_info *mode_info;
 820
 821	if (info == NULL)
 822		return -EINVAL;
 823
 824	mode_info = info->mode_info;
 825
 826	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
 827		list_for_each_entry(crtc,
 828				&ddev->mode_config.crtc_list, head) {
 829			amdgpu_crtc = to_amdgpu_crtc(crtc);
 830			if (crtc->enabled) {
 831				info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
 832				info->display_count++;
 833			}
 834			if (mode_info != NULL &&
 835				crtc->enabled && amdgpu_crtc->enabled &&
 836				amdgpu_crtc->hw_mode.clock) {
 837				line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
 838							amdgpu_crtc->hw_mode.clock;
 839				vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
 840							amdgpu_crtc->hw_mode.crtc_vdisplay +
 841							(amdgpu_crtc->v_border * 2);
 842				mode_info->vblank_time_us = vblank_lines * line_time_us;
 843				mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
 844				mode_info->ref_clock = adev->clock.spll.reference_freq;
 845				mode_info = NULL;
 846			}
 847		}
 848	}
 849
 850	return 0;
 851}
 852
 853
 854static int amdgpu_cgs_notify_dpm_enabled(void *cgs_device, bool enabled)
 855{
 856	CGS_FUNC_ADEV;
 857
 858	adev->pm.dpm_enabled = enabled;
 859
 860	return 0;
 861}
 862
 863/** \brief evaluate acpi namespace object, handle or pathname must be valid
 864 *  \param cgs_device
 865 *  \param info input/output arguments for the control method
 866 *  \return status
 867 */
 868
 869#if defined(CONFIG_ACPI)
 870static int amdgpu_cgs_acpi_eval_object(void *cgs_device,
 871				    struct cgs_acpi_method_info *info)
 872{
 873	CGS_FUNC_ADEV;
 874	acpi_handle handle;
 875	struct acpi_object_list input;
 876	struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
 877	union acpi_object *params = NULL;
 878	union acpi_object *obj = NULL;
 879	uint8_t name[5] = {'\0'};
 880	struct cgs_acpi_method_argument *argument = NULL;
 881	uint32_t i, count;
 882	acpi_status status;
 883	int result;
 884	uint32_t func_no = 0xFFFFFFFF;
 885
 886	handle = ACPI_HANDLE(&adev->pdev->dev);
 887	if (!handle)
 888		return -ENODEV;
 889
 890	memset(&input, 0, sizeof(struct acpi_object_list));
 891
 892	/* validate input info */
 893	if (info->size != sizeof(struct cgs_acpi_method_info))
 894		return -EINVAL;
 895
 896	input.count = info->input_count;
 897	if (info->input_count > 0) {
 898		if (info->pinput_argument == NULL)
 899			return -EINVAL;
 900		argument = info->pinput_argument;
 901		func_no = argument->value;
 902		for (i = 0; i < info->input_count; i++) {
 903			if (((argument->type == ACPI_TYPE_STRING) ||
 904			     (argument->type == ACPI_TYPE_BUFFER)) &&
 905			    (argument->pointer == NULL))
 906				return -EINVAL;
 907			argument++;
 908		}
 909	}
 910
 911	if (info->output_count > 0) {
 912		if (info->poutput_argument == NULL)
 913			return -EINVAL;
 914		argument = info->poutput_argument;
 915		for (i = 0; i < info->output_count; i++) {
 916			if (((argument->type == ACPI_TYPE_STRING) ||
 917				(argument->type == ACPI_TYPE_BUFFER))
 918				&& (argument->pointer == NULL))
 919				return -EINVAL;
 920			argument++;
 921		}
 922	}
 923
 924	/* The path name passed to acpi_evaluate_object should be null terminated */
 925	if ((info->field & CGS_ACPI_FIELD_METHOD_NAME) != 0) {
 926		strncpy(name, (char *)&(info->name), sizeof(uint32_t));
 927		name[4] = '\0';
 928	}
 929
 930	/* parse input parameters */
 931	if (input.count > 0) {
 932		input.pointer = params =
 933				kzalloc(sizeof(union acpi_object) * input.count, GFP_KERNEL);
 934		if (params == NULL)
 935			return -EINVAL;
 936
 937		argument = info->pinput_argument;
 938
 939		for (i = 0; i < input.count; i++) {
 940			params->type = argument->type;
 941			switch (params->type) {
 942			case ACPI_TYPE_INTEGER:
 943				params->integer.value = argument->value;
 944				break;
 945			case ACPI_TYPE_STRING:
 946				params->string.length = argument->method_length;
 947				params->string.pointer = argument->pointer;
 948				break;
 949			case ACPI_TYPE_BUFFER:
 950				params->buffer.length = argument->method_length;
 951				params->buffer.pointer = argument->pointer;
 952				break;
 953			default:
 954				break;
 955			}
 956			params++;
 957			argument++;
 958		}
 959	}
 960
 961	/* parse output info */
 962	count = info->output_count;
 963	argument = info->poutput_argument;
 964
 965	/* evaluate the acpi method */
 966	status = acpi_evaluate_object(handle, name, &input, &output);
 967
 968	if (ACPI_FAILURE(status)) {
 969		result = -EIO;
 970		goto error;
 971	}
 972
 973	/* return the output info */
 974	obj = output.pointer;
 975
 976	if (count > 1) {
 977		if ((obj->type != ACPI_TYPE_PACKAGE) ||
 978			(obj->package.count != count)) {
 979			result = -EIO;
 980			goto error;
 981		}
 982		params = obj->package.elements;
 983	} else
 984		params = obj;
 985
 986	if (params == NULL) {
 987		result = -EIO;
 988		goto error;
 989	}
 990
 991	for (i = 0; i < count; i++) {
 992		if (argument->type != params->type) {
 993			result = -EIO;
 994			goto error;
 995		}
 996		switch (params->type) {
 997		case ACPI_TYPE_INTEGER:
 998			argument->value = params->integer.value;
 999			break;
1000		case ACPI_TYPE_STRING:
1001			if ((params->string.length != argument->data_length) ||
1002				(params->string.pointer == NULL)) {
1003				result = -EIO;
1004				goto error;
1005			}
1006			strncpy(argument->pointer,
1007				params->string.pointer,
1008				params->string.length);
1009			break;
1010		case ACPI_TYPE_BUFFER:
1011			if (params->buffer.pointer == NULL) {
1012				result = -EIO;
1013				goto error;
1014			}
1015			memcpy(argument->pointer,
1016				params->buffer.pointer,
1017				argument->data_length);
1018			break;
1019		default:
1020			break;
1021		}
1022		argument++;
1023		params++;
1024	}
1025
1026error:
1027	if (obj != NULL)
1028		kfree(obj);
 
1029	kfree((void *)input.pointer);
1030	return result;
1031}
1032#else
1033static int amdgpu_cgs_acpi_eval_object(void *cgs_device,
1034				struct cgs_acpi_method_info *info)
1035{
1036	return -EIO;
1037}
1038#endif
1039
1040int amdgpu_cgs_call_acpi_method(void *cgs_device,
1041					uint32_t acpi_method,
1042					uint32_t acpi_function,
1043					void *pinput, void *poutput,
1044					uint32_t output_count,
1045					uint32_t input_size,
1046					uint32_t output_size)
1047{
1048	struct cgs_acpi_method_argument acpi_input[2] = { {0}, {0} };
1049	struct cgs_acpi_method_argument acpi_output = {0};
1050	struct cgs_acpi_method_info info = {0};
1051
1052	acpi_input[0].type = CGS_ACPI_TYPE_INTEGER;
1053	acpi_input[0].method_length = sizeof(uint32_t);
1054	acpi_input[0].data_length = sizeof(uint32_t);
1055	acpi_input[0].value = acpi_function;
1056
1057	acpi_input[1].type = CGS_ACPI_TYPE_BUFFER;
1058	acpi_input[1].method_length = CGS_ACPI_MAX_BUFFER_SIZE;
1059	acpi_input[1].data_length = input_size;
1060	acpi_input[1].pointer = pinput;
1061
1062	acpi_output.type = CGS_ACPI_TYPE_BUFFER;
1063	acpi_output.method_length = CGS_ACPI_MAX_BUFFER_SIZE;
1064	acpi_output.data_length = output_size;
1065	acpi_output.pointer = poutput;
1066
1067	info.size = sizeof(struct cgs_acpi_method_info);
1068	info.field = CGS_ACPI_FIELD_METHOD_NAME | CGS_ACPI_FIELD_INPUT_ARGUMENT_COUNT;
1069	info.input_count = 2;
1070	info.name = acpi_method;
1071	info.pinput_argument = acpi_input;
1072	info.output_count = output_count;
1073	info.poutput_argument = &acpi_output;
1074
1075	return amdgpu_cgs_acpi_eval_object(cgs_device, &info);
1076}
1077
1078static const struct cgs_ops amdgpu_cgs_ops = {
1079	amdgpu_cgs_gpu_mem_info,
1080	amdgpu_cgs_gmap_kmem,
1081	amdgpu_cgs_gunmap_kmem,
1082	amdgpu_cgs_alloc_gpu_mem,
1083	amdgpu_cgs_free_gpu_mem,
1084	amdgpu_cgs_gmap_gpu_mem,
1085	amdgpu_cgs_gunmap_gpu_mem,
1086	amdgpu_cgs_kmap_gpu_mem,
1087	amdgpu_cgs_kunmap_gpu_mem,
1088	amdgpu_cgs_read_register,
1089	amdgpu_cgs_write_register,
1090	amdgpu_cgs_read_ind_register,
1091	amdgpu_cgs_write_ind_register,
1092	amdgpu_cgs_read_pci_config_byte,
1093	amdgpu_cgs_read_pci_config_word,
1094	amdgpu_cgs_read_pci_config_dword,
1095	amdgpu_cgs_write_pci_config_byte,
1096	amdgpu_cgs_write_pci_config_word,
1097	amdgpu_cgs_write_pci_config_dword,
1098	amdgpu_cgs_get_pci_resource,
1099	amdgpu_cgs_atom_get_data_table,
1100	amdgpu_cgs_atom_get_cmd_table_revs,
1101	amdgpu_cgs_atom_exec_cmd_table,
1102	amdgpu_cgs_create_pm_request,
1103	amdgpu_cgs_destroy_pm_request,
1104	amdgpu_cgs_set_pm_request,
1105	amdgpu_cgs_pm_request_clock,
1106	amdgpu_cgs_pm_request_engine,
1107	amdgpu_cgs_pm_query_clock_limits,
1108	amdgpu_cgs_set_camera_voltages,
1109	amdgpu_cgs_get_firmware_info,
 
1110	amdgpu_cgs_set_powergating_state,
1111	amdgpu_cgs_set_clockgating_state,
1112	amdgpu_cgs_get_active_displays_info,
1113	amdgpu_cgs_notify_dpm_enabled,
1114	amdgpu_cgs_call_acpi_method,
1115	amdgpu_cgs_query_system_info,
 
1116};
1117
1118static const struct cgs_os_ops amdgpu_cgs_os_ops = {
1119	amdgpu_cgs_add_irq_source,
1120	amdgpu_cgs_irq_get,
1121	amdgpu_cgs_irq_put
1122};
1123
1124void *amdgpu_cgs_create_device(struct amdgpu_device *adev)
1125{
1126	struct amdgpu_cgs_device *cgs_device =
1127		kmalloc(sizeof(*cgs_device), GFP_KERNEL);
1128
1129	if (!cgs_device) {
1130		DRM_ERROR("Couldn't allocate CGS device structure\n");
1131		return NULL;
1132	}
1133
1134	cgs_device->base.ops = &amdgpu_cgs_ops;
1135	cgs_device->base.os_ops = &amdgpu_cgs_os_ops;
1136	cgs_device->adev = adev;
1137
1138	return cgs_device;
1139}
1140
1141void amdgpu_cgs_destroy_device(void *cgs_device)
1142{
1143	kfree(cgs_device);
1144}
v4.10.11
   1/*
   2 * Copyright 2015 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 *
  23 */
  24#include <linux/list.h>
  25#include <linux/slab.h>
  26#include <linux/pci.h>
  27#include <linux/acpi.h>
  28#include <drm/drmP.h>
  29#include <linux/firmware.h>
  30#include <drm/amdgpu_drm.h>
  31#include "amdgpu.h"
  32#include "cgs_linux.h"
  33#include "atom.h"
  34#include "amdgpu_ucode.h"
  35
  36struct amdgpu_cgs_device {
  37	struct cgs_device base;
  38	struct amdgpu_device *adev;
  39};
  40
  41#define CGS_FUNC_ADEV							\
  42	struct amdgpu_device *adev =					\
  43		((struct amdgpu_cgs_device *)cgs_device)->adev
  44
  45static int amdgpu_cgs_gpu_mem_info(struct cgs_device *cgs_device, enum cgs_gpu_mem_type type,
  46				   uint64_t *mc_start, uint64_t *mc_size,
  47				   uint64_t *mem_size)
  48{
  49	CGS_FUNC_ADEV;
  50	switch(type) {
  51	case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
  52	case CGS_GPU_MEM_TYPE__VISIBLE_FB:
  53		*mc_start = 0;
  54		*mc_size = adev->mc.visible_vram_size;
  55		*mem_size = adev->mc.visible_vram_size - adev->vram_pin_size;
  56		break;
  57	case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
  58	case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
  59		*mc_start = adev->mc.visible_vram_size;
  60		*mc_size = adev->mc.real_vram_size - adev->mc.visible_vram_size;
  61		*mem_size = *mc_size;
  62		break;
  63	case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
  64	case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
  65		*mc_start = adev->mc.gtt_start;
  66		*mc_size = adev->mc.gtt_size;
  67		*mem_size = adev->mc.gtt_size - adev->gart_pin_size;
  68		break;
  69	default:
  70		return -EINVAL;
  71	}
  72
  73	return 0;
  74}
  75
  76static int amdgpu_cgs_gmap_kmem(struct cgs_device *cgs_device, void *kmem,
  77				uint64_t size,
  78				uint64_t min_offset, uint64_t max_offset,
  79				cgs_handle_t *kmem_handle, uint64_t *mcaddr)
  80{
  81	CGS_FUNC_ADEV;
  82	int ret;
  83	struct amdgpu_bo *bo;
  84	struct page *kmem_page = vmalloc_to_page(kmem);
  85	int npages = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT;
  86
  87	struct sg_table *sg = drm_prime_pages_to_sg(&kmem_page, npages);
  88	ret = amdgpu_bo_create(adev, size, PAGE_SIZE, false,
  89			       AMDGPU_GEM_DOMAIN_GTT, 0, sg, NULL, &bo);
  90	if (ret)
  91		return ret;
  92	ret = amdgpu_bo_reserve(bo, false);
  93	if (unlikely(ret != 0))
  94		return ret;
  95
  96	/* pin buffer into GTT */
  97	ret = amdgpu_bo_pin_restricted(bo, AMDGPU_GEM_DOMAIN_GTT,
  98				       min_offset, max_offset, mcaddr);
  99	amdgpu_bo_unreserve(bo);
 100
 101	*kmem_handle = (cgs_handle_t)bo;
 102	return ret;
 103}
 104
 105static int amdgpu_cgs_gunmap_kmem(struct cgs_device *cgs_device, cgs_handle_t kmem_handle)
 106{
 107	struct amdgpu_bo *obj = (struct amdgpu_bo *)kmem_handle;
 108
 109	if (obj) {
 110		int r = amdgpu_bo_reserve(obj, false);
 111		if (likely(r == 0)) {
 112			amdgpu_bo_unpin(obj);
 113			amdgpu_bo_unreserve(obj);
 114		}
 115		amdgpu_bo_unref(&obj);
 116
 117	}
 118	return 0;
 119}
 120
 121static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
 122				    enum cgs_gpu_mem_type type,
 123				    uint64_t size, uint64_t align,
 124				    uint64_t min_offset, uint64_t max_offset,
 125				    cgs_handle_t *handle)
 126{
 127	CGS_FUNC_ADEV;
 128	uint16_t flags = 0;
 129	int ret = 0;
 130	uint32_t domain = 0;
 131	struct amdgpu_bo *obj;
 132	struct ttm_placement placement;
 133	struct ttm_place place;
 134
 135	if (min_offset > max_offset) {
 136		BUG_ON(1);
 137		return -EINVAL;
 138	}
 139
 140	/* fail if the alignment is not a power of 2 */
 141	if (((align != 1) && (align & (align - 1)))
 142	    || size == 0 || align == 0)
 143		return -EINVAL;
 144
 145
 146	switch(type) {
 147	case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
 148	case CGS_GPU_MEM_TYPE__VISIBLE_FB:
 149		flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
 150			AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
 151		domain = AMDGPU_GEM_DOMAIN_VRAM;
 152		if (max_offset > adev->mc.real_vram_size)
 153			return -EINVAL;
 154		place.fpfn = min_offset >> PAGE_SHIFT;
 155		place.lpfn = max_offset >> PAGE_SHIFT;
 156		place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
 157			TTM_PL_FLAG_VRAM;
 158		break;
 159	case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
 160	case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
 161		flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
 162			AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
 163		domain = AMDGPU_GEM_DOMAIN_VRAM;
 164		if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
 165			place.fpfn =
 166				max(min_offset, adev->mc.visible_vram_size) >> PAGE_SHIFT;
 167			place.lpfn =
 168				min(max_offset, adev->mc.real_vram_size) >> PAGE_SHIFT;
 169			place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
 170				TTM_PL_FLAG_VRAM;
 171		}
 172
 173		break;
 174	case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
 175		domain = AMDGPU_GEM_DOMAIN_GTT;
 176		place.fpfn = min_offset >> PAGE_SHIFT;
 177		place.lpfn = max_offset >> PAGE_SHIFT;
 178		place.flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
 179		break;
 180	case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
 181		flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
 182		domain = AMDGPU_GEM_DOMAIN_GTT;
 183		place.fpfn = min_offset >> PAGE_SHIFT;
 184		place.lpfn = max_offset >> PAGE_SHIFT;
 185		place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT |
 186			TTM_PL_FLAG_UNCACHED;
 187		break;
 188	default:
 189		return -EINVAL;
 190	}
 191
 192
 193	*handle = 0;
 194
 195	placement.placement = &place;
 196	placement.num_placement = 1;
 197	placement.busy_placement = &place;
 198	placement.num_busy_placement = 1;
 199
 200	ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE,
 201					  true, domain, flags,
 202					  NULL, &placement, NULL,
 203					  &obj);
 204	if (ret) {
 205		DRM_ERROR("(%d) bo create failed\n", ret);
 206		return ret;
 207	}
 208	*handle = (cgs_handle_t)obj;
 209
 210	return ret;
 211}
 212
 213static int amdgpu_cgs_free_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
 214{
 215	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
 216
 217	if (obj) {
 218		int r = amdgpu_bo_reserve(obj, false);
 219		if (likely(r == 0)) {
 220			amdgpu_bo_kunmap(obj);
 221			amdgpu_bo_unpin(obj);
 222			amdgpu_bo_unreserve(obj);
 223		}
 224		amdgpu_bo_unref(&obj);
 225
 226	}
 227	return 0;
 228}
 229
 230static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle,
 231				   uint64_t *mcaddr)
 232{
 233	int r;
 234	u64 min_offset, max_offset;
 235	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
 236
 237	WARN_ON_ONCE(obj->placement.num_placement > 1);
 238
 239	min_offset = obj->placements[0].fpfn << PAGE_SHIFT;
 240	max_offset = obj->placements[0].lpfn << PAGE_SHIFT;
 241
 242	r = amdgpu_bo_reserve(obj, false);
 243	if (unlikely(r != 0))
 244		return r;
 245	r = amdgpu_bo_pin_restricted(obj, obj->prefered_domains,
 246				     min_offset, max_offset, mcaddr);
 247	amdgpu_bo_unreserve(obj);
 248	return r;
 249}
 250
 251static int amdgpu_cgs_gunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
 252{
 253	int r;
 254	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
 255	r = amdgpu_bo_reserve(obj, false);
 256	if (unlikely(r != 0))
 257		return r;
 258	r = amdgpu_bo_unpin(obj);
 259	amdgpu_bo_unreserve(obj);
 260	return r;
 261}
 262
 263static int amdgpu_cgs_kmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle,
 264				   void **map)
 265{
 266	int r;
 267	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
 268	r = amdgpu_bo_reserve(obj, false);
 269	if (unlikely(r != 0))
 270		return r;
 271	r = amdgpu_bo_kmap(obj, map);
 272	amdgpu_bo_unreserve(obj);
 273	return r;
 274}
 275
 276static int amdgpu_cgs_kunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
 277{
 278	int r;
 279	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
 280	r = amdgpu_bo_reserve(obj, false);
 281	if (unlikely(r != 0))
 282		return r;
 283	amdgpu_bo_kunmap(obj);
 284	amdgpu_bo_unreserve(obj);
 285	return r;
 286}
 287
 288static uint32_t amdgpu_cgs_read_register(struct cgs_device *cgs_device, unsigned offset)
 289{
 290	CGS_FUNC_ADEV;
 291	return RREG32(offset);
 292}
 293
 294static void amdgpu_cgs_write_register(struct cgs_device *cgs_device, unsigned offset,
 295				      uint32_t value)
 296{
 297	CGS_FUNC_ADEV;
 298	WREG32(offset, value);
 299}
 300
 301static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device,
 302					     enum cgs_ind_reg space,
 303					     unsigned index)
 304{
 305	CGS_FUNC_ADEV;
 306	switch (space) {
 307	case CGS_IND_REG__MMIO:
 308		return RREG32_IDX(index);
 309	case CGS_IND_REG__PCIE:
 310		return RREG32_PCIE(index);
 311	case CGS_IND_REG__SMC:
 312		return RREG32_SMC(index);
 313	case CGS_IND_REG__UVD_CTX:
 314		return RREG32_UVD_CTX(index);
 315	case CGS_IND_REG__DIDT:
 316		return RREG32_DIDT(index);
 317	case CGS_IND_REG_GC_CAC:
 318		return RREG32_GC_CAC(index);
 319	case CGS_IND_REG__AUDIO_ENDPT:
 320		DRM_ERROR("audio endpt register access not implemented.\n");
 321		return 0;
 322	}
 323	WARN(1, "Invalid indirect register space");
 324	return 0;
 325}
 326
 327static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
 328					  enum cgs_ind_reg space,
 329					  unsigned index, uint32_t value)
 330{
 331	CGS_FUNC_ADEV;
 332	switch (space) {
 333	case CGS_IND_REG__MMIO:
 334		return WREG32_IDX(index, value);
 335	case CGS_IND_REG__PCIE:
 336		return WREG32_PCIE(index, value);
 337	case CGS_IND_REG__SMC:
 338		return WREG32_SMC(index, value);
 339	case CGS_IND_REG__UVD_CTX:
 340		return WREG32_UVD_CTX(index, value);
 341	case CGS_IND_REG__DIDT:
 342		return WREG32_DIDT(index, value);
 343	case CGS_IND_REG_GC_CAC:
 344		return WREG32_GC_CAC(index, value);
 345	case CGS_IND_REG__AUDIO_ENDPT:
 346		DRM_ERROR("audio endpt register access not implemented.\n");
 347		return;
 348	}
 349	WARN(1, "Invalid indirect register space");
 350}
 351
 352static uint8_t amdgpu_cgs_read_pci_config_byte(struct cgs_device *cgs_device, unsigned addr)
 353{
 354	CGS_FUNC_ADEV;
 355	uint8_t val;
 356	int ret = pci_read_config_byte(adev->pdev, addr, &val);
 357	if (WARN(ret, "pci_read_config_byte error"))
 358		return 0;
 359	return val;
 360}
 361
 362static uint16_t amdgpu_cgs_read_pci_config_word(struct cgs_device *cgs_device, unsigned addr)
 363{
 364	CGS_FUNC_ADEV;
 365	uint16_t val;
 366	int ret = pci_read_config_word(adev->pdev, addr, &val);
 367	if (WARN(ret, "pci_read_config_word error"))
 368		return 0;
 369	return val;
 370}
 371
 372static uint32_t amdgpu_cgs_read_pci_config_dword(struct cgs_device *cgs_device,
 373						 unsigned addr)
 374{
 375	CGS_FUNC_ADEV;
 376	uint32_t val;
 377	int ret = pci_read_config_dword(adev->pdev, addr, &val);
 378	if (WARN(ret, "pci_read_config_dword error"))
 379		return 0;
 380	return val;
 381}
 382
 383static void amdgpu_cgs_write_pci_config_byte(struct cgs_device *cgs_device, unsigned addr,
 384					     uint8_t value)
 385{
 386	CGS_FUNC_ADEV;
 387	int ret = pci_write_config_byte(adev->pdev, addr, value);
 388	WARN(ret, "pci_write_config_byte error");
 389}
 390
 391static void amdgpu_cgs_write_pci_config_word(struct cgs_device *cgs_device, unsigned addr,
 392					     uint16_t value)
 393{
 394	CGS_FUNC_ADEV;
 395	int ret = pci_write_config_word(adev->pdev, addr, value);
 396	WARN(ret, "pci_write_config_word error");
 397}
 398
 399static void amdgpu_cgs_write_pci_config_dword(struct cgs_device *cgs_device, unsigned addr,
 400					      uint32_t value)
 401{
 402	CGS_FUNC_ADEV;
 403	int ret = pci_write_config_dword(adev->pdev, addr, value);
 404	WARN(ret, "pci_write_config_dword error");
 405}
 406
 407
 408static int amdgpu_cgs_get_pci_resource(struct cgs_device *cgs_device,
 409				       enum cgs_resource_type resource_type,
 410				       uint64_t size,
 411				       uint64_t offset,
 412				       uint64_t *resource_base)
 413{
 414	CGS_FUNC_ADEV;
 415
 416	if (resource_base == NULL)
 417		return -EINVAL;
 418
 419	switch (resource_type) {
 420	case CGS_RESOURCE_TYPE_MMIO:
 421		if (adev->rmmio_size == 0)
 422			return -ENOENT;
 423		if ((offset + size) > adev->rmmio_size)
 424			return -EINVAL;
 425		*resource_base = adev->rmmio_base;
 426		return 0;
 427	case CGS_RESOURCE_TYPE_DOORBELL:
 428		if (adev->doorbell.size == 0)
 429			return -ENOENT;
 430		if ((offset + size) > adev->doorbell.size)
 431			return -EINVAL;
 432		*resource_base = adev->doorbell.base;
 433		return 0;
 434	case CGS_RESOURCE_TYPE_FB:
 435	case CGS_RESOURCE_TYPE_IO:
 436	case CGS_RESOURCE_TYPE_ROM:
 437	default:
 438		return -EINVAL;
 439	}
 440}
 441
 442static const void *amdgpu_cgs_atom_get_data_table(struct cgs_device *cgs_device,
 443						  unsigned table, uint16_t *size,
 444						  uint8_t *frev, uint8_t *crev)
 445{
 446	CGS_FUNC_ADEV;
 447	uint16_t data_start;
 448
 449	if (amdgpu_atom_parse_data_header(
 450		    adev->mode_info.atom_context, table, size,
 451		    frev, crev, &data_start))
 452		return (uint8_t*)adev->mode_info.atom_context->bios +
 453			data_start;
 454
 455	return NULL;
 456}
 457
 458static int amdgpu_cgs_atom_get_cmd_table_revs(struct cgs_device *cgs_device, unsigned table,
 459					      uint8_t *frev, uint8_t *crev)
 460{
 461	CGS_FUNC_ADEV;
 462
 463	if (amdgpu_atom_parse_cmd_header(
 464		    adev->mode_info.atom_context, table,
 465		    frev, crev))
 466		return 0;
 467
 468	return -EINVAL;
 469}
 470
 471static int amdgpu_cgs_atom_exec_cmd_table(struct cgs_device *cgs_device, unsigned table,
 472					  void *args)
 473{
 474	CGS_FUNC_ADEV;
 475
 476	return amdgpu_atom_execute_table(
 477		adev->mode_info.atom_context, table, args);
 478}
 479
 480static int amdgpu_cgs_create_pm_request(struct cgs_device *cgs_device, cgs_handle_t *request)
 481{
 482	/* TODO */
 483	return 0;
 484}
 485
 486static int amdgpu_cgs_destroy_pm_request(struct cgs_device *cgs_device, cgs_handle_t request)
 487{
 488	/* TODO */
 489	return 0;
 490}
 491
 492static int amdgpu_cgs_set_pm_request(struct cgs_device *cgs_device, cgs_handle_t request,
 493				     int active)
 494{
 495	/* TODO */
 496	return 0;
 497}
 498
 499static int amdgpu_cgs_pm_request_clock(struct cgs_device *cgs_device, cgs_handle_t request,
 500				       enum cgs_clock clock, unsigned freq)
 501{
 502	/* TODO */
 503	return 0;
 504}
 505
 506static int amdgpu_cgs_pm_request_engine(struct cgs_device *cgs_device, cgs_handle_t request,
 507					enum cgs_engine engine, int powered)
 508{
 509	/* TODO */
 510	return 0;
 511}
 512
 513
 514
 515static int amdgpu_cgs_pm_query_clock_limits(struct cgs_device *cgs_device,
 516					    enum cgs_clock clock,
 517					    struct cgs_clock_limits *limits)
 518{
 519	/* TODO */
 520	return 0;
 521}
 522
 523static int amdgpu_cgs_set_camera_voltages(struct cgs_device *cgs_device, uint32_t mask,
 524					  const uint32_t *voltages)
 525{
 526	DRM_ERROR("not implemented");
 527	return -EPERM;
 528}
 529
 530struct cgs_irq_params {
 531	unsigned src_id;
 532	cgs_irq_source_set_func_t set;
 533	cgs_irq_handler_func_t handler;
 534	void *private_data;
 535};
 536
 537static int cgs_set_irq_state(struct amdgpu_device *adev,
 538			     struct amdgpu_irq_src *src,
 539			     unsigned type,
 540			     enum amdgpu_interrupt_state state)
 541{
 542	struct cgs_irq_params *irq_params =
 543		(struct cgs_irq_params *)src->data;
 544	if (!irq_params)
 545		return -EINVAL;
 546	if (!irq_params->set)
 547		return -EINVAL;
 548	return irq_params->set(irq_params->private_data,
 549			       irq_params->src_id,
 550			       type,
 551			       (int)state);
 552}
 553
 554static int cgs_process_irq(struct amdgpu_device *adev,
 555			   struct amdgpu_irq_src *source,
 556			   struct amdgpu_iv_entry *entry)
 557{
 558	struct cgs_irq_params *irq_params =
 559		(struct cgs_irq_params *)source->data;
 560	if (!irq_params)
 561		return -EINVAL;
 562	if (!irq_params->handler)
 563		return -EINVAL;
 564	return irq_params->handler(irq_params->private_data,
 565				   irq_params->src_id,
 566				   entry->iv_entry);
 567}
 568
 569static const struct amdgpu_irq_src_funcs cgs_irq_funcs = {
 570	.set = cgs_set_irq_state,
 571	.process = cgs_process_irq,
 572};
 573
 574static int amdgpu_cgs_add_irq_source(struct cgs_device *cgs_device, unsigned src_id,
 575				     unsigned num_types,
 576				     cgs_irq_source_set_func_t set,
 577				     cgs_irq_handler_func_t handler,
 578				     void *private_data)
 579{
 580	CGS_FUNC_ADEV;
 581	int ret = 0;
 582	struct cgs_irq_params *irq_params;
 583	struct amdgpu_irq_src *source =
 584		kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
 585	if (!source)
 586		return -ENOMEM;
 587	irq_params =
 588		kzalloc(sizeof(struct cgs_irq_params), GFP_KERNEL);
 589	if (!irq_params) {
 590		kfree(source);
 591		return -ENOMEM;
 592	}
 593	source->num_types = num_types;
 594	source->funcs = &cgs_irq_funcs;
 595	irq_params->src_id = src_id;
 596	irq_params->set = set;
 597	irq_params->handler = handler;
 598	irq_params->private_data = private_data;
 599	source->data = (void *)irq_params;
 600	ret = amdgpu_irq_add_id(adev, src_id, source);
 601	if (ret) {
 602		kfree(irq_params);
 603		kfree(source);
 604	}
 605
 606	return ret;
 607}
 608
 609static int amdgpu_cgs_irq_get(struct cgs_device *cgs_device, unsigned src_id, unsigned type)
 610{
 611	CGS_FUNC_ADEV;
 612	return amdgpu_irq_get(adev, adev->irq.sources[src_id], type);
 613}
 614
 615static int amdgpu_cgs_irq_put(struct cgs_device *cgs_device, unsigned src_id, unsigned type)
 616{
 617	CGS_FUNC_ADEV;
 618	return amdgpu_irq_put(adev, adev->irq.sources[src_id], type);
 619}
 620
 621static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
 622				  enum amd_ip_block_type block_type,
 623				  enum amd_clockgating_state state)
 624{
 625	CGS_FUNC_ADEV;
 626	int i, r = -1;
 627
 628	for (i = 0; i < adev->num_ip_blocks; i++) {
 629		if (!adev->ip_blocks[i].status.valid)
 630			continue;
 631
 632		if (adev->ip_blocks[i].version->type == block_type) {
 633			r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
 634								(void *)adev,
 635									state);
 636			break;
 637		}
 638	}
 639	return r;
 640}
 641
 642static int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
 643				  enum amd_ip_block_type block_type,
 644				  enum amd_powergating_state state)
 645{
 646	CGS_FUNC_ADEV;
 647	int i, r = -1;
 648
 649	for (i = 0; i < adev->num_ip_blocks; i++) {
 650		if (!adev->ip_blocks[i].status.valid)
 651			continue;
 652
 653		if (adev->ip_blocks[i].version->type == block_type) {
 654			r = adev->ip_blocks[i].version->funcs->set_powergating_state(
 655								(void *)adev,
 656									state);
 657			break;
 658		}
 659	}
 660	return r;
 661}
 662
 663
 664static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
 665{
 666	CGS_FUNC_ADEV;
 667	enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM;
 668
 669	switch (fw_type) {
 670	case CGS_UCODE_ID_SDMA0:
 671		result = AMDGPU_UCODE_ID_SDMA0;
 672		break;
 673	case CGS_UCODE_ID_SDMA1:
 674		result = AMDGPU_UCODE_ID_SDMA1;
 675		break;
 676	case CGS_UCODE_ID_CP_CE:
 677		result = AMDGPU_UCODE_ID_CP_CE;
 678		break;
 679	case CGS_UCODE_ID_CP_PFP:
 680		result = AMDGPU_UCODE_ID_CP_PFP;
 681		break;
 682	case CGS_UCODE_ID_CP_ME:
 683		result = AMDGPU_UCODE_ID_CP_ME;
 684		break;
 685	case CGS_UCODE_ID_CP_MEC:
 686	case CGS_UCODE_ID_CP_MEC_JT1:
 687		result = AMDGPU_UCODE_ID_CP_MEC1;
 688		break;
 689	case CGS_UCODE_ID_CP_MEC_JT2:
 690		/* for VI. JT2 should be the same as JT1, because:
 691			1, MEC2 and MEC1 use exactly same FW.
 692			2, JT2 is not pached but JT1 is.
 693		*/
 694		if (adev->asic_type >= CHIP_TOPAZ)
 695			result = AMDGPU_UCODE_ID_CP_MEC1;
 696		else
 697			result = AMDGPU_UCODE_ID_CP_MEC2;
 698		break;
 699	case CGS_UCODE_ID_RLC_G:
 700		result = AMDGPU_UCODE_ID_RLC_G;
 701		break;
 702	case CGS_UCODE_ID_STORAGE:
 703		result = AMDGPU_UCODE_ID_STORAGE;
 704		break;
 705	default:
 706		DRM_ERROR("Firmware type not supported\n");
 707	}
 708	return result;
 709}
 710
 711static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode_id type)
 712{
 713	CGS_FUNC_ADEV;
 714	if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) {
 715		release_firmware(adev->pm.fw);
 716		return 0;
 717	}
 718	/* cannot release other firmware because they are not created by cgs */
 719	return -EINVAL;
 720}
 721
 722static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device,
 723					enum cgs_ucode_id type)
 724{
 725	CGS_FUNC_ADEV;
 726	uint16_t fw_version = 0;
 727
 728	switch (type) {
 729		case CGS_UCODE_ID_SDMA0:
 730			fw_version = adev->sdma.instance[0].fw_version;
 731			break;
 732		case CGS_UCODE_ID_SDMA1:
 733			fw_version = adev->sdma.instance[1].fw_version;
 734			break;
 735		case CGS_UCODE_ID_CP_CE:
 736			fw_version = adev->gfx.ce_fw_version;
 737			break;
 738		case CGS_UCODE_ID_CP_PFP:
 739			fw_version = adev->gfx.pfp_fw_version;
 740			break;
 741		case CGS_UCODE_ID_CP_ME:
 742			fw_version = adev->gfx.me_fw_version;
 743			break;
 744		case CGS_UCODE_ID_CP_MEC:
 745			fw_version = adev->gfx.mec_fw_version;
 746			break;
 747		case CGS_UCODE_ID_CP_MEC_JT1:
 748			fw_version = adev->gfx.mec_fw_version;
 749			break;
 750		case CGS_UCODE_ID_CP_MEC_JT2:
 751			fw_version = adev->gfx.mec_fw_version;
 752			break;
 753		case CGS_UCODE_ID_RLC_G:
 754			fw_version = adev->gfx.rlc_fw_version;
 755			break;
 756		case CGS_UCODE_ID_STORAGE:
 757			break;
 758		default:
 759			DRM_ERROR("firmware type %d do not have version\n", type);
 760			break;
 761	}
 762	return fw_version;
 763}
 764
 765static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
 766					enum cgs_ucode_id type,
 767					struct cgs_firmware_info *info)
 768{
 769	CGS_FUNC_ADEV;
 770
 771	if ((CGS_UCODE_ID_SMU != type) && (CGS_UCODE_ID_SMU_SK != type)) {
 772		uint64_t gpu_addr;
 773		uint32_t data_size;
 774		const struct gfx_firmware_header_v1_0 *header;
 775		enum AMDGPU_UCODE_ID id;
 776		struct amdgpu_firmware_info *ucode;
 777
 778		id = fw_type_convert(cgs_device, type);
 779		ucode = &adev->firmware.ucode[id];
 780		if (ucode->fw == NULL)
 781			return -EINVAL;
 782
 783		gpu_addr  = ucode->mc_addr;
 784		header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
 785		data_size = le32_to_cpu(header->header.ucode_size_bytes);
 786
 787		if ((type == CGS_UCODE_ID_CP_MEC_JT1) ||
 788		    (type == CGS_UCODE_ID_CP_MEC_JT2)) {
 789			gpu_addr += ALIGN(le32_to_cpu(header->header.ucode_size_bytes), PAGE_SIZE);
 790			data_size = le32_to_cpu(header->jt_size) << 2;
 791		}
 792
 793		info->kptr = ucode->kaddr;
 794		info->image_size = data_size;
 795		info->mc_addr = gpu_addr;
 796		info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
 797
 798		if (CGS_UCODE_ID_CP_MEC == type)
 799			info->image_size = (header->jt_offset) << 2;
 800
 801		info->fw_version = amdgpu_get_firmware_version(cgs_device, type);
 802		info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
 803	} else {
 804		char fw_name[30] = {0};
 805		int err = 0;
 806		uint32_t ucode_size;
 807		uint32_t ucode_start_address;
 808		const uint8_t *src;
 809		const struct smc_firmware_header_v1_0 *hdr;
 810
 811		if (!adev->pm.fw) {
 812			switch (adev->asic_type) {
 813			case CHIP_TOPAZ:
 814				if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) ||
 815				    ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) ||
 816				    ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87)))
 817					strcpy(fw_name, "amdgpu/topaz_k_smc.bin");
 818				else
 819					strcpy(fw_name, "amdgpu/topaz_smc.bin");
 820				break;
 821			case CHIP_TONGA:
 822				if (((adev->pdev->device == 0x6939) && (adev->pdev->revision == 0xf1)) ||
 823				    ((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1)))
 824					strcpy(fw_name, "amdgpu/tonga_k_smc.bin");
 825				else
 826					strcpy(fw_name, "amdgpu/tonga_smc.bin");
 827				break;
 828			case CHIP_FIJI:
 829				strcpy(fw_name, "amdgpu/fiji_smc.bin");
 830				break;
 831			case CHIP_POLARIS11:
 832				if (type == CGS_UCODE_ID_SMU)
 833					strcpy(fw_name, "amdgpu/polaris11_smc.bin");
 834				else if (type == CGS_UCODE_ID_SMU_SK)
 835					strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
 836				break;
 837			case CHIP_POLARIS10:
 838				if (type == CGS_UCODE_ID_SMU)
 839					strcpy(fw_name, "amdgpu/polaris10_smc.bin");
 840				else if (type == CGS_UCODE_ID_SMU_SK)
 841					strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
 842				break;
 843			case CHIP_POLARIS12:
 844				strcpy(fw_name, "amdgpu/polaris12_smc.bin");
 845				break;
 846			default:
 847				DRM_ERROR("SMC firmware not supported\n");
 848				return -EINVAL;
 849			}
 850
 851			err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
 852			if (err) {
 853				DRM_ERROR("Failed to request firmware\n");
 854				return err;
 855			}
 856
 857			err = amdgpu_ucode_validate(adev->pm.fw);
 858			if (err) {
 859				DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
 860				release_firmware(adev->pm.fw);
 861				adev->pm.fw = NULL;
 862				return err;
 863			}
 864		}
 865
 866		hdr = (const struct smc_firmware_header_v1_0 *)	adev->pm.fw->data;
 867		amdgpu_ucode_print_smc_hdr(&hdr->header);
 868		adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
 869		ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
 870		ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
 871		src = (const uint8_t *)(adev->pm.fw->data +
 872		       le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 873
 874		info->version = adev->pm.fw_version;
 875		info->image_size = ucode_size;
 876		info->ucode_start_address = ucode_start_address;
 877		info->kptr = (void *)src;
 878	}
 879	return 0;
 880}
 881
 882static int amdgpu_cgs_is_virtualization_enabled(void *cgs_device)
 883{
 884	CGS_FUNC_ADEV;
 885	return amdgpu_sriov_vf(adev);
 886}
 887
 888static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
 889					struct cgs_system_info *sys_info)
 890{
 891	CGS_FUNC_ADEV;
 892
 893	if (NULL == sys_info)
 894		return -ENODEV;
 895
 896	if (sizeof(struct cgs_system_info) != sys_info->size)
 897		return -ENODEV;
 898
 899	switch (sys_info->info_id) {
 900	case CGS_SYSTEM_INFO_ADAPTER_BDF_ID:
 901		sys_info->value = adev->pdev->devfn | (adev->pdev->bus->number << 8);
 902		break;
 903	case CGS_SYSTEM_INFO_PCIE_GEN_INFO:
 904		sys_info->value = adev->pm.pcie_gen_mask;
 905		break;
 906	case CGS_SYSTEM_INFO_PCIE_MLW:
 907		sys_info->value = adev->pm.pcie_mlw_mask;
 908		break;
 909	case CGS_SYSTEM_INFO_PCIE_DEV:
 910		sys_info->value = adev->pdev->device;
 911		break;
 912	case CGS_SYSTEM_INFO_PCIE_REV:
 913		sys_info->value = adev->pdev->revision;
 914		break;
 915	case CGS_SYSTEM_INFO_CG_FLAGS:
 916		sys_info->value = adev->cg_flags;
 917		break;
 918	case CGS_SYSTEM_INFO_PG_FLAGS:
 919		sys_info->value = adev->pg_flags;
 920		break;
 921	case CGS_SYSTEM_INFO_GFX_CU_INFO:
 922		sys_info->value = adev->gfx.cu_info.number;
 923		break;
 924	case CGS_SYSTEM_INFO_GFX_SE_INFO:
 925		sys_info->value = adev->gfx.config.max_shader_engines;
 926		break;
 927	case CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID:
 928		sys_info->value = adev->pdev->subsystem_device;
 929		break;
 930	case CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID:
 931		sys_info->value = adev->pdev->subsystem_vendor;
 932		break;
 933	default:
 934		return -ENODEV;
 935	}
 936
 937	return 0;
 938}
 939
 940static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
 941					  struct cgs_display_info *info)
 942{
 943	CGS_FUNC_ADEV;
 944	struct amdgpu_crtc *amdgpu_crtc;
 945	struct drm_device *ddev = adev->ddev;
 946	struct drm_crtc *crtc;
 947	uint32_t line_time_us, vblank_lines;
 948	struct cgs_mode_info *mode_info;
 949
 950	if (info == NULL)
 951		return -EINVAL;
 952
 953	mode_info = info->mode_info;
 954
 955	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
 956		list_for_each_entry(crtc,
 957				&ddev->mode_config.crtc_list, head) {
 958			amdgpu_crtc = to_amdgpu_crtc(crtc);
 959			if (crtc->enabled) {
 960				info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
 961				info->display_count++;
 962			}
 963			if (mode_info != NULL &&
 964				crtc->enabled && amdgpu_crtc->enabled &&
 965				amdgpu_crtc->hw_mode.clock) {
 966				line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
 967							amdgpu_crtc->hw_mode.clock;
 968				vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
 969							amdgpu_crtc->hw_mode.crtc_vdisplay +
 970							(amdgpu_crtc->v_border * 2);
 971				mode_info->vblank_time_us = vblank_lines * line_time_us;
 972				mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
 973				mode_info->ref_clock = adev->clock.spll.reference_freq;
 974				mode_info = NULL;
 975			}
 976		}
 977	}
 978
 979	return 0;
 980}
 981
 982
 983static int amdgpu_cgs_notify_dpm_enabled(struct cgs_device *cgs_device, bool enabled)
 984{
 985	CGS_FUNC_ADEV;
 986
 987	adev->pm.dpm_enabled = enabled;
 988
 989	return 0;
 990}
 991
 992/** \brief evaluate acpi namespace object, handle or pathname must be valid
 993 *  \param cgs_device
 994 *  \param info input/output arguments for the control method
 995 *  \return status
 996 */
 997
 998#if defined(CONFIG_ACPI)
 999static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
1000				    struct cgs_acpi_method_info *info)
1001{
1002	CGS_FUNC_ADEV;
1003	acpi_handle handle;
1004	struct acpi_object_list input;
1005	struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
1006	union acpi_object *params, *obj;
 
1007	uint8_t name[5] = {'\0'};
1008	struct cgs_acpi_method_argument *argument;
1009	uint32_t i, count;
1010	acpi_status status;
1011	int result;
 
1012
1013	handle = ACPI_HANDLE(&adev->pdev->dev);
1014	if (!handle)
1015		return -ENODEV;
1016
1017	memset(&input, 0, sizeof(struct acpi_object_list));
1018
1019	/* validate input info */
1020	if (info->size != sizeof(struct cgs_acpi_method_info))
1021		return -EINVAL;
1022
1023	input.count = info->input_count;
1024	if (info->input_count > 0) {
1025		if (info->pinput_argument == NULL)
1026			return -EINVAL;
1027		argument = info->pinput_argument;
 
1028		for (i = 0; i < info->input_count; i++) {
1029			if (((argument->type == ACPI_TYPE_STRING) ||
1030			     (argument->type == ACPI_TYPE_BUFFER)) &&
1031			    (argument->pointer == NULL))
1032				return -EINVAL;
1033			argument++;
1034		}
1035	}
1036
1037	if (info->output_count > 0) {
1038		if (info->poutput_argument == NULL)
1039			return -EINVAL;
1040		argument = info->poutput_argument;
1041		for (i = 0; i < info->output_count; i++) {
1042			if (((argument->type == ACPI_TYPE_STRING) ||
1043				(argument->type == ACPI_TYPE_BUFFER))
1044				&& (argument->pointer == NULL))
1045				return -EINVAL;
1046			argument++;
1047		}
1048	}
1049
1050	/* The path name passed to acpi_evaluate_object should be null terminated */
1051	if ((info->field & CGS_ACPI_FIELD_METHOD_NAME) != 0) {
1052		strncpy(name, (char *)&(info->name), sizeof(uint32_t));
1053		name[4] = '\0';
1054	}
1055
1056	/* parse input parameters */
1057	if (input.count > 0) {
1058		input.pointer = params =
1059				kzalloc(sizeof(union acpi_object) * input.count, GFP_KERNEL);
1060		if (params == NULL)
1061			return -EINVAL;
1062
1063		argument = info->pinput_argument;
1064
1065		for (i = 0; i < input.count; i++) {
1066			params->type = argument->type;
1067			switch (params->type) {
1068			case ACPI_TYPE_INTEGER:
1069				params->integer.value = argument->value;
1070				break;
1071			case ACPI_TYPE_STRING:
1072				params->string.length = argument->data_length;
1073				params->string.pointer = argument->pointer;
1074				break;
1075			case ACPI_TYPE_BUFFER:
1076				params->buffer.length = argument->data_length;
1077				params->buffer.pointer = argument->pointer;
1078				break;
1079			default:
1080				break;
1081			}
1082			params++;
1083			argument++;
1084		}
1085	}
1086
1087	/* parse output info */
1088	count = info->output_count;
1089	argument = info->poutput_argument;
1090
1091	/* evaluate the acpi method */
1092	status = acpi_evaluate_object(handle, name, &input, &output);
1093
1094	if (ACPI_FAILURE(status)) {
1095		result = -EIO;
1096		goto free_input;
1097	}
1098
1099	/* return the output info */
1100	obj = output.pointer;
1101
1102	if (count > 1) {
1103		if ((obj->type != ACPI_TYPE_PACKAGE) ||
1104			(obj->package.count != count)) {
1105			result = -EIO;
1106			goto free_obj;
1107		}
1108		params = obj->package.elements;
1109	} else
1110		params = obj;
1111
1112	if (params == NULL) {
1113		result = -EIO;
1114		goto free_obj;
1115	}
1116
1117	for (i = 0; i < count; i++) {
1118		if (argument->type != params->type) {
1119			result = -EIO;
1120			goto free_obj;
1121		}
1122		switch (params->type) {
1123		case ACPI_TYPE_INTEGER:
1124			argument->value = params->integer.value;
1125			break;
1126		case ACPI_TYPE_STRING:
1127			if ((params->string.length != argument->data_length) ||
1128				(params->string.pointer == NULL)) {
1129				result = -EIO;
1130				goto free_obj;
1131			}
1132			strncpy(argument->pointer,
1133				params->string.pointer,
1134				params->string.length);
1135			break;
1136		case ACPI_TYPE_BUFFER:
1137			if (params->buffer.pointer == NULL) {
1138				result = -EIO;
1139				goto free_obj;
1140			}
1141			memcpy(argument->pointer,
1142				params->buffer.pointer,
1143				argument->data_length);
1144			break;
1145		default:
1146			break;
1147		}
1148		argument++;
1149		params++;
1150	}
1151
1152	result = 0;
1153free_obj:
1154	kfree(obj);
1155free_input:
1156	kfree((void *)input.pointer);
1157	return result;
1158}
1159#else
1160static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
1161				struct cgs_acpi_method_info *info)
1162{
1163	return -EIO;
1164}
1165#endif
1166
1167static int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device,
1168					uint32_t acpi_method,
1169					uint32_t acpi_function,
1170					void *pinput, void *poutput,
1171					uint32_t output_count,
1172					uint32_t input_size,
1173					uint32_t output_size)
1174{
1175	struct cgs_acpi_method_argument acpi_input[2] = { {0}, {0} };
1176	struct cgs_acpi_method_argument acpi_output = {0};
1177	struct cgs_acpi_method_info info = {0};
1178
1179	acpi_input[0].type = CGS_ACPI_TYPE_INTEGER;
 
1180	acpi_input[0].data_length = sizeof(uint32_t);
1181	acpi_input[0].value = acpi_function;
1182
1183	acpi_input[1].type = CGS_ACPI_TYPE_BUFFER;
 
1184	acpi_input[1].data_length = input_size;
1185	acpi_input[1].pointer = pinput;
1186
1187	acpi_output.type = CGS_ACPI_TYPE_BUFFER;
 
1188	acpi_output.data_length = output_size;
1189	acpi_output.pointer = poutput;
1190
1191	info.size = sizeof(struct cgs_acpi_method_info);
1192	info.field = CGS_ACPI_FIELD_METHOD_NAME | CGS_ACPI_FIELD_INPUT_ARGUMENT_COUNT;
1193	info.input_count = 2;
1194	info.name = acpi_method;
1195	info.pinput_argument = acpi_input;
1196	info.output_count = output_count;
1197	info.poutput_argument = &acpi_output;
1198
1199	return amdgpu_cgs_acpi_eval_object(cgs_device, &info);
1200}
1201
1202static const struct cgs_ops amdgpu_cgs_ops = {
1203	amdgpu_cgs_gpu_mem_info,
1204	amdgpu_cgs_gmap_kmem,
1205	amdgpu_cgs_gunmap_kmem,
1206	amdgpu_cgs_alloc_gpu_mem,
1207	amdgpu_cgs_free_gpu_mem,
1208	amdgpu_cgs_gmap_gpu_mem,
1209	amdgpu_cgs_gunmap_gpu_mem,
1210	amdgpu_cgs_kmap_gpu_mem,
1211	amdgpu_cgs_kunmap_gpu_mem,
1212	amdgpu_cgs_read_register,
1213	amdgpu_cgs_write_register,
1214	amdgpu_cgs_read_ind_register,
1215	amdgpu_cgs_write_ind_register,
1216	amdgpu_cgs_read_pci_config_byte,
1217	amdgpu_cgs_read_pci_config_word,
1218	amdgpu_cgs_read_pci_config_dword,
1219	amdgpu_cgs_write_pci_config_byte,
1220	amdgpu_cgs_write_pci_config_word,
1221	amdgpu_cgs_write_pci_config_dword,
1222	amdgpu_cgs_get_pci_resource,
1223	amdgpu_cgs_atom_get_data_table,
1224	amdgpu_cgs_atom_get_cmd_table_revs,
1225	amdgpu_cgs_atom_exec_cmd_table,
1226	amdgpu_cgs_create_pm_request,
1227	amdgpu_cgs_destroy_pm_request,
1228	amdgpu_cgs_set_pm_request,
1229	amdgpu_cgs_pm_request_clock,
1230	amdgpu_cgs_pm_request_engine,
1231	amdgpu_cgs_pm_query_clock_limits,
1232	amdgpu_cgs_set_camera_voltages,
1233	amdgpu_cgs_get_firmware_info,
1234	amdgpu_cgs_rel_firmware,
1235	amdgpu_cgs_set_powergating_state,
1236	amdgpu_cgs_set_clockgating_state,
1237	amdgpu_cgs_get_active_displays_info,
1238	amdgpu_cgs_notify_dpm_enabled,
1239	amdgpu_cgs_call_acpi_method,
1240	amdgpu_cgs_query_system_info,
1241	amdgpu_cgs_is_virtualization_enabled
1242};
1243
1244static const struct cgs_os_ops amdgpu_cgs_os_ops = {
1245	amdgpu_cgs_add_irq_source,
1246	amdgpu_cgs_irq_get,
1247	amdgpu_cgs_irq_put
1248};
1249
1250struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev)
1251{
1252	struct amdgpu_cgs_device *cgs_device =
1253		kmalloc(sizeof(*cgs_device), GFP_KERNEL);
1254
1255	if (!cgs_device) {
1256		DRM_ERROR("Couldn't allocate CGS device structure\n");
1257		return NULL;
1258	}
1259
1260	cgs_device->base.ops = &amdgpu_cgs_ops;
1261	cgs_device->base.os_ops = &amdgpu_cgs_os_ops;
1262	cgs_device->adev = adev;
1263
1264	return (struct cgs_device *)cgs_device;
1265}
1266
1267void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device)
1268{
1269	kfree(cgs_device);
1270}