Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
   3
   4#include <linux/clk.h>
   5#include <linux/interconnect.h>
   6#include <linux/pm_domain.h>
   7#include <linux/pm_opp.h>
   8#include <soc/qcom/cmd-db.h>
   9#include <drm/drm_gem.h>
  10
  11#include "a6xx_gpu.h"
  12#include "a6xx_gmu.xml.h"
  13#include "msm_gem.h"
  14#include "msm_gpu_trace.h"
  15#include "msm_mmu.h"
  16
  17static void a6xx_gmu_fault(struct a6xx_gmu *gmu)
  18{
  19	struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
  20	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
  21	struct msm_gpu *gpu = &adreno_gpu->base;
  22
  23	/* FIXME: add a banner here */
  24	gmu->hung = true;
  25
  26	/* Turn off the hangcheck timer while we are resetting */
  27	del_timer(&gpu->hangcheck_timer);
  28
  29	/* Queue the GPU handler because we need to treat this as a recovery */
  30	kthread_queue_work(gpu->worker, &gpu->recover_work);
  31}
  32
  33static irqreturn_t a6xx_gmu_irq(int irq, void *data)
  34{
  35	struct a6xx_gmu *gmu = data;
  36	u32 status;
  37
  38	status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS);
  39	gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status);
  40
  41	if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE) {
  42		dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n");
  43
  44		a6xx_gmu_fault(gmu);
  45	}
  46
  47	if (status &  A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR)
  48		dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n");
  49
  50	if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
  51		dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n",
  52			gmu_read(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS));
  53
  54	return IRQ_HANDLED;
  55}
  56
  57static irqreturn_t a6xx_hfi_irq(int irq, void *data)
  58{
  59	struct a6xx_gmu *gmu = data;
  60	u32 status;
  61
  62	status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO);
  63	gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status);
  64
  65	if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) {
  66		dev_err_ratelimited(gmu->dev, "GMU firmware fault\n");
  67
  68		a6xx_gmu_fault(gmu);
  69	}
  70
  71	return IRQ_HANDLED;
  72}
  73
  74bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu)
  75{
  76	u32 val;
  77
  78	/* This can be called from gpu state code so make sure GMU is valid */
  79	if (!gmu->initialized)
  80		return false;
  81
  82	val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
  83
  84	return !(val &
  85		(A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF |
  86		A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF));
  87}
  88
  89/* Check to see if the GX rail is still powered */
  90bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
  91{
  92	u32 val;
  93
  94	/* This can be called from gpu state code so make sure GMU is valid */
  95	if (!gmu->initialized)
  96		return false;
  97
  98	val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
  99
 100	return !(val &
 101		(A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF |
 102		A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF));
 103}
 104
 105void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp,
 106		       bool suspended)
 107{
 108	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
 109	struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
 110	struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
 111	u32 perf_index;
 112	unsigned long gpu_freq;
 113	int ret = 0;
 114
 115	gpu_freq = dev_pm_opp_get_freq(opp);
 116
 117	if (gpu_freq == gmu->freq)
 118		return;
 119
 120	for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++)
 121		if (gpu_freq == gmu->gpu_freqs[perf_index])
 122			break;
 123
 124	gmu->current_perf_index = perf_index;
 125	gmu->freq = gmu->gpu_freqs[perf_index];
 126
 127	trace_msm_gmu_freq_change(gmu->freq, perf_index);
 128
 129	/*
 130	 * This can get called from devfreq while the hardware is idle. Don't
 131	 * bring up the power if it isn't already active. All we're doing here
 132	 * is updating the frequency so that when we come back online we're at
 133	 * the right rate.
 134	 */
 135	if (suspended)
 136		return;
 137
 138	if (!gmu->legacy) {
 139		a6xx_hfi_set_freq(gmu, perf_index);
 140		dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
 141		return;
 142	}
 143
 144	gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0);
 145
 146	gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING,
 147			((3 & 0xf) << 28) | perf_index);
 148
 149	/*
 150	 * Send an invalid index as a vote for the bus bandwidth and let the
 151	 * firmware decide on the right vote
 152	 */
 153	gmu_write(gmu, REG_A6XX_GMU_DCVS_BW_SETTING, 0xff);
 154
 155	/* Set and clear the OOB for DCVS to trigger the GMU */
 156	a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET);
 157	a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET);
 158
 159	ret = gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN);
 160	if (ret)
 161		dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret);
 162
 163	dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
 164}
 165
 166unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu)
 167{
 168	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
 169	struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
 170	struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
 171
 172	return  gmu->freq;
 173}
 174
 175static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu)
 176{
 177	u32 val;
 178	int local = gmu->idle_level;
 179
 180	/* SPTP and IFPC both report as IFPC */
 181	if (gmu->idle_level == GMU_IDLE_STATE_SPTP)
 182		local = GMU_IDLE_STATE_IFPC;
 183
 184	val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
 185
 186	if (val == local) {
 187		if (gmu->idle_level != GMU_IDLE_STATE_IFPC ||
 188			!a6xx_gmu_gx_is_on(gmu))
 189			return true;
 190	}
 191
 192	return false;
 193}
 194
 195/* Wait for the GMU to get to its most idle state */
 196int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu)
 197{
 198	return spin_until(a6xx_gmu_check_idle_level(gmu));
 199}
 200
 201static int a6xx_gmu_start(struct a6xx_gmu *gmu)
 202{
 203	int ret;
 204	u32 val;
 205	u32 mask, reset_val;
 206
 207	val = gmu_read(gmu, REG_A6XX_GMU_CM3_DTCM_START + 0xff8);
 208	if (val <= 0x20010004) {
 209		mask = 0xffffffff;
 210		reset_val = 0xbabeface;
 211	} else {
 212		mask = 0x1ff;
 213		reset_val = 0x100;
 214	}
 215
 216	gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
 217
 218	/* Set the log wptr index
 219	 * note: downstream saves the value in poweroff and restores it here
 220	 */
 221	gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP, 0);
 222
 223	gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0);
 224
 225	ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val,
 226		(val & mask) == reset_val, 100, 10000);
 227
 228	if (ret)
 229		DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n");
 230
 231	return ret;
 232}
 233
 234static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
 235{
 236	u32 val;
 237	int ret;
 238
 239	gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1);
 240
 241	ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val,
 242		val & 1, 100, 10000);
 243	if (ret)
 244		DRM_DEV_ERROR(gmu->dev, "Unable to start the HFI queues\n");
 245
 246	return ret;
 247}
 248
 249struct a6xx_gmu_oob_bits {
 250	int set, ack, set_new, ack_new, clear, clear_new;
 251	const char *name;
 252};
 253
 254/* These are the interrupt / ack bits for each OOB request that are set
 255 * in a6xx_gmu_set_oob and a6xx_clear_oob
 256 */
 257static const struct a6xx_gmu_oob_bits a6xx_gmu_oob_bits[] = {
 258	[GMU_OOB_GPU_SET] = {
 259		.name = "GPU_SET",
 260		.set = 16,
 261		.ack = 24,
 262		.set_new = 30,
 263		.ack_new = 31,
 264		.clear = 24,
 265		.clear_new = 31,
 266	},
 267
 268	[GMU_OOB_PERFCOUNTER_SET] = {
 269		.name = "PERFCOUNTER",
 270		.set = 17,
 271		.ack = 25,
 272		.set_new = 28,
 273		.ack_new = 30,
 274		.clear = 25,
 275		.clear_new = 29,
 276	},
 277
 278	[GMU_OOB_BOOT_SLUMBER] = {
 279		.name = "BOOT_SLUMBER",
 280		.set = 22,
 281		.ack = 30,
 282		.clear = 30,
 283	},
 284
 285	[GMU_OOB_DCVS_SET] = {
 286		.name = "GPU_DCVS",
 287		.set = 23,
 288		.ack = 31,
 289		.clear = 31,
 290	},
 291};
 292
 293/* Trigger a OOB (out of band) request to the GMU */
 294int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
 295{
 296	int ret;
 297	u32 val;
 298	int request, ack;
 299
 300	WARN_ON_ONCE(!mutex_is_locked(&gmu->lock));
 301
 302	if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
 303		return -EINVAL;
 304
 305	if (gmu->legacy) {
 306		request = a6xx_gmu_oob_bits[state].set;
 307		ack = a6xx_gmu_oob_bits[state].ack;
 308	} else {
 309		request = a6xx_gmu_oob_bits[state].set_new;
 310		ack = a6xx_gmu_oob_bits[state].ack_new;
 311		if (!request || !ack) {
 312			DRM_DEV_ERROR(gmu->dev,
 313				      "Invalid non-legacy GMU request %s\n",
 314				      a6xx_gmu_oob_bits[state].name);
 315			return -EINVAL;
 316		}
 317	}
 318
 319	/* Trigger the equested OOB operation */
 320	gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request);
 321
 322	/* Wait for the acknowledge interrupt */
 323	ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
 324		val & (1 << ack), 100, 10000);
 325
 326	if (ret)
 327		DRM_DEV_ERROR(gmu->dev,
 328			"Timeout waiting for GMU OOB set %s: 0x%x\n",
 329				a6xx_gmu_oob_bits[state].name,
 330				gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO));
 331
 332	/* Clear the acknowledge interrupt */
 333	gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, 1 << ack);
 334
 335	return ret;
 336}
 337
 338/* Clear a pending OOB state in the GMU */
 339void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
 340{
 341	int bit;
 342
 343	WARN_ON_ONCE(!mutex_is_locked(&gmu->lock));
 344
 345	if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
 346		return;
 347
 348	if (gmu->legacy)
 349		bit = a6xx_gmu_oob_bits[state].clear;
 350	else
 351		bit = a6xx_gmu_oob_bits[state].clear_new;
 352
 353	gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << bit);
 354}
 355
 356/* Enable CPU control of SPTP power power collapse */
 357static int a6xx_sptprac_enable(struct a6xx_gmu *gmu)
 358{
 359	int ret;
 360	u32 val;
 361
 362	if (!gmu->legacy)
 363		return 0;
 364
 365	gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000);
 366
 367	ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
 368		(val & 0x38) == 0x28, 1, 100);
 369
 370	if (ret) {
 371		DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n",
 372			gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
 373	}
 374
 375	return 0;
 376}
 377
 378/* Disable CPU control of SPTP power power collapse */
 379static void a6xx_sptprac_disable(struct a6xx_gmu *gmu)
 380{
 381	u32 val;
 382	int ret;
 383
 384	if (!gmu->legacy)
 385		return;
 386
 387	/* Make sure retention is on */
 388	gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11));
 389
 390	gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778001);
 391
 392	ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
 393		(val & 0x04), 100, 10000);
 394
 395	if (ret)
 396		DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n",
 397			gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
 398}
 399
 400/* Let the GMU know we are starting a boot sequence */
 401static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu)
 402{
 403	u32 vote;
 404
 405	/* Let the GMU know we are getting ready for boot */
 406	gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 0);
 407
 408	/* Choose the "default" power level as the highest available */
 409	vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1];
 410
 411	gmu_write(gmu, REG_A6XX_GMU_GX_VOTE_IDX, vote & 0xff);
 412	gmu_write(gmu, REG_A6XX_GMU_MX_VOTE_IDX, (vote >> 8) & 0xff);
 413
 414	/* Let the GMU know the boot sequence has started */
 415	return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
 416}
 417
 418/* Let the GMU know that we are about to go into slumber */
 419static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
 420{
 421	int ret;
 422
 423	/* Disable the power counter so the GMU isn't busy */
 424	gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0);
 425
 426	/* Disable SPTP_PC if the CPU is responsible for it */
 427	if (gmu->idle_level < GMU_IDLE_STATE_SPTP)
 428		a6xx_sptprac_disable(gmu);
 429
 430	if (!gmu->legacy) {
 431		ret = a6xx_hfi_send_prep_slumber(gmu);
 432		goto out;
 433	}
 434
 435	/* Tell the GMU to get ready to slumber */
 436	gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1);
 437
 438	ret = a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
 439	a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
 440
 441	if (!ret) {
 442		/* Check to see if the GMU really did slumber */
 443		if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE)
 444			!= 0x0f) {
 445			DRM_DEV_ERROR(gmu->dev, "The GMU did not go into slumber\n");
 446			ret = -ETIMEDOUT;
 447		}
 448	}
 449
 450out:
 451	/* Put fence into allow mode */
 452	gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
 453	return ret;
 454}
 455
 456static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
 457{
 458	int ret;
 459	u32 val;
 460
 461	gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1 << 1);
 462	/* Wait for the register to finish posting */
 463	wmb();
 464
 465	ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val,
 466		val & (1 << 1), 100, 10000);
 467	if (ret) {
 468		DRM_DEV_ERROR(gmu->dev, "Unable to power on the GPU RSC\n");
 469		return ret;
 470	}
 471
 472	ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val,
 473		!val, 100, 10000);
 474
 475	if (ret) {
 476		DRM_DEV_ERROR(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n");
 477		return ret;
 478	}
 479
 480	gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
 481
 482	/* Set up CX GMU counter 0 to count busy ticks */
 483	gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000);
 484	gmu_rmw(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xff, 0x20);
 485
 486	/* Enable the power counter */
 487	gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
 488	return 0;
 489}
 490
 491static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
 492{
 493	int ret;
 494	u32 val;
 495
 496	gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1);
 497
 498	ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
 499		val, val & (1 << 16), 100, 10000);
 500	if (ret)
 501		DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n");
 502
 503	gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
 504}
 505
 506static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value)
 507{
 508	msm_writel(value, ptr + (offset << 2));
 509}
 510
 511static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
 512		const char *name);
 513
 514static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
 515{
 516	struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
 517	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
 518	struct platform_device *pdev = to_platform_device(gmu->dev);
 519	void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
 520	void __iomem *seqptr = NULL;
 521	uint32_t pdc_address_offset;
 522	bool pdc_in_aop = false;
 523
 524	if (IS_ERR(pdcptr))
 525		goto err;
 526
 527	if (adreno_is_a650(adreno_gpu) || adreno_is_a660_family(adreno_gpu))
 528		pdc_in_aop = true;
 529	else if (adreno_is_a618(adreno_gpu) || adreno_is_a640_family(adreno_gpu))
 530		pdc_address_offset = 0x30090;
 531	else if (adreno_is_a619(adreno_gpu))
 532		pdc_address_offset = 0x300a0;
 533	else
 534		pdc_address_offset = 0x30080;
 535
 536	if (!pdc_in_aop) {
 537		seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq");
 538		if (IS_ERR(seqptr))
 539			goto err;
 540	}
 541
 542	/* Disable SDE clock gating */
 543	gmu_write_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));
 544
 545	/* Setup RSC PDC handshake for sleep and wakeup */
 546	gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1);
 547	gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0);
 548	gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0);
 549	gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0);
 550	gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0);
 551	gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000);
 552	gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0);
 553	gmu_write_rscc(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0);
 554	gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520);
 555	gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
 556	gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
 557
 558	/* Load RSC sequencer uCode for sleep and wakeup */
 559	if (adreno_is_a650_family(adreno_gpu)) {
 560		gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xeaaae5a0);
 561		gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xe1a1ebab);
 562		gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e0a581);
 563		gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xecac82e2);
 564		gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020edad);
 565	} else {
 566		gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0);
 567		gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7);
 568		gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1);
 569		gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2);
 570		gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8);
 571	}
 572
 573	if (pdc_in_aop)
 574		goto setup_pdc;
 575
 576	/* Load PDC sequencer uCode for power up and power down sequence */
 577	pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1);
 578	pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2);
 579	pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, 0x8382a6e0);
 580	pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, 0xbce3e284);
 581	pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, 0x002081fc);
 582
 583	/* Set TCS commands used by PDC sequence for low power modes */
 584	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, 7);
 585	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0);
 586	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CONTROL, 0);
 587	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, 0x10108);
 588	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, 0x30010);
 589	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, 1);
 590	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108);
 591	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000);
 592	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0);
 593
 594	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108);
 595	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, pdc_address_offset);
 596	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0);
 597
 598	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
 599	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0);
 600	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0);
 601	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108);
 602	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010);
 603	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2);
 604
 605	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108);
 606	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000);
 607	if (adreno_is_a618(adreno_gpu) || adreno_is_a619(adreno_gpu) ||
 608			adreno_is_a650_family(adreno_gpu))
 609		pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x2);
 610	else
 611		pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
 612	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108);
 613	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, pdc_address_offset);
 614	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3);
 615
 616	/* Setup GPU PDC */
 617setup_pdc:
 618	pdc_write(pdcptr, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0);
 619	pdc_write(pdcptr, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001);
 620
 621	/* ensure no writes happen before the uCode is fully written */
 622	wmb();
 623
 624err:
 625	if (!IS_ERR_OR_NULL(pdcptr))
 626		iounmap(pdcptr);
 627	if (!IS_ERR_OR_NULL(seqptr))
 628		iounmap(seqptr);
 629}
 630
 631/*
 632 * The lowest 16 bits of this value are the number of XO clock cycles for main
 633 * hysteresis which is set at 0x1680 cycles (300 us).  The higher 16 bits are
 634 * for the shorter hysteresis that happens after main - this is 0xa (.5 us)
 635 */
 636
 637#define GMU_PWR_COL_HYST 0x000a1680
 638
 639/* Set up the idle state for the GMU */
 640static void a6xx_gmu_power_config(struct a6xx_gmu *gmu)
 641{
 642	/* Disable GMU WB/RB buffer */
 643	gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1);
 644	gmu_write(gmu, REG_A6XX_GMU_ICACHE_CONFIG, 0x1);
 645	gmu_write(gmu, REG_A6XX_GMU_DCACHE_CONFIG, 0x1);
 646
 647	gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400);
 648
 649	switch (gmu->idle_level) {
 650	case GMU_IDLE_STATE_IFPC:
 651		gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST,
 652			GMU_PWR_COL_HYST);
 653		gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
 654			A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
 655			A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE);
 656		fallthrough;
 657	case GMU_IDLE_STATE_SPTP:
 658		gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST,
 659			GMU_PWR_COL_HYST);
 660		gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
 661			A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
 662			A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE);
 663	}
 664
 665	/* Enable RPMh GPU client */
 666	gmu_rmw(gmu, REG_A6XX_GMU_RPMH_CTRL, 0,
 667		A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE |
 668		A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE |
 669		A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE |
 670		A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE |
 671		A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE |
 672		A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE);
 673}
 674
 675struct block_header {
 676	u32 addr;
 677	u32 size;
 678	u32 type;
 679	u32 value;
 680	u32 data[];
 681};
 682
 683/* this should be a general kernel helper */
 684static int in_range(u32 addr, u32 start, u32 size)
 685{
 686	return addr >= start && addr < start + size;
 687}
 688
 689static bool fw_block_mem(struct a6xx_gmu_bo *bo, const struct block_header *blk)
 690{
 691	if (!in_range(blk->addr, bo->iova, bo->size))
 692		return false;
 693
 694	memcpy(bo->virt + blk->addr - bo->iova, blk->data, blk->size);
 695	return true;
 696}
 697
 698static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
 699{
 700	struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
 701	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
 702	const struct firmware *fw_image = adreno_gpu->fw[ADRENO_FW_GMU];
 703	const struct block_header *blk;
 704	u32 reg_offset;
 705
 706	u32 itcm_base = 0x00000000;
 707	u32 dtcm_base = 0x00040000;
 708
 709	if (adreno_is_a650_family(adreno_gpu))
 710		dtcm_base = 0x10004000;
 711
 712	if (gmu->legacy) {
 713		/* Sanity check the size of the firmware that was loaded */
 714		if (fw_image->size > 0x8000) {
 715			DRM_DEV_ERROR(gmu->dev,
 716				"GMU firmware is bigger than the available region\n");
 717			return -EINVAL;
 718		}
 719
 720		gmu_write_bulk(gmu, REG_A6XX_GMU_CM3_ITCM_START,
 721			       (u32*) fw_image->data, fw_image->size);
 722		return 0;
 723	}
 724
 725
 726	for (blk = (const struct block_header *) fw_image->data;
 727	     (const u8*) blk < fw_image->data + fw_image->size;
 728	     blk = (const struct block_header *) &blk->data[blk->size >> 2]) {
 729		if (blk->size == 0)
 730			continue;
 731
 732		if (in_range(blk->addr, itcm_base, SZ_16K)) {
 733			reg_offset = (blk->addr - itcm_base) >> 2;
 734			gmu_write_bulk(gmu,
 735				REG_A6XX_GMU_CM3_ITCM_START + reg_offset,
 736				blk->data, blk->size);
 737		} else if (in_range(blk->addr, dtcm_base, SZ_16K)) {
 738			reg_offset = (blk->addr - dtcm_base) >> 2;
 739			gmu_write_bulk(gmu,
 740				REG_A6XX_GMU_CM3_DTCM_START + reg_offset,
 741				blk->data, blk->size);
 742		} else if (!fw_block_mem(&gmu->icache, blk) &&
 743			   !fw_block_mem(&gmu->dcache, blk) &&
 744			   !fw_block_mem(&gmu->dummy, blk)) {
 745			DRM_DEV_ERROR(gmu->dev,
 746				"failed to match fw block (addr=%.8x size=%d data[0]=%.8x)\n",
 747				blk->addr, blk->size, blk->data[0]);
 748		}
 749	}
 750
 751	return 0;
 752}
 753
 754static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
 755{
 756	static bool rpmh_init;
 757	struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
 758	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
 759	int ret;
 760	u32 chipid;
 761
 762	if (adreno_is_a650_family(adreno_gpu)) {
 763		gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, 1);
 764		gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1);
 765	}
 766
 767	if (state == GMU_WARM_BOOT) {
 768		ret = a6xx_rpmh_start(gmu);
 769		if (ret)
 770			return ret;
 771	} else {
 772		if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU],
 773			"GMU firmware is not loaded\n"))
 774			return -ENOENT;
 775
 776		/* Turn on register retention */
 777		gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
 778
 779		/* We only need to load the RPMh microcode once */
 780		if (!rpmh_init) {
 781			a6xx_gmu_rpmh_init(gmu);
 782			rpmh_init = true;
 783		} else {
 784			ret = a6xx_rpmh_start(gmu);
 785			if (ret)
 786				return ret;
 787		}
 788
 789		ret = a6xx_gmu_fw_load(gmu);
 790		if (ret)
 791			return ret;
 792	}
 793
 794	gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0);
 795	gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02);
 796
 797	/* Write the iova of the HFI table */
 798	gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi.iova);
 799	gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1);
 800
 801	gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0,
 802		(1 << 31) | (0xa << 18) | (0xa0));
 803
 804	chipid = adreno_gpu->rev.core << 24;
 805	chipid |= adreno_gpu->rev.major << 16;
 806	chipid |= adreno_gpu->rev.minor << 12;
 807	chipid |= adreno_gpu->rev.patchid << 8;
 808
 809	gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid);
 810
 811	gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG,
 812		  gmu->log.iova | (gmu->log.size / SZ_4K - 1));
 813
 814	/* Set up the lowest idle level on the GMU */
 815	a6xx_gmu_power_config(gmu);
 816
 817	ret = a6xx_gmu_start(gmu);
 818	if (ret)
 819		return ret;
 820
 821	if (gmu->legacy) {
 822		ret = a6xx_gmu_gfx_rail_on(gmu);
 823		if (ret)
 824			return ret;
 825	}
 826
 827	/* Enable SPTP_PC if the CPU is responsible for it */
 828	if (gmu->idle_level < GMU_IDLE_STATE_SPTP) {
 829		ret = a6xx_sptprac_enable(gmu);
 830		if (ret)
 831			return ret;
 832	}
 833
 834	ret = a6xx_gmu_hfi_start(gmu);
 835	if (ret)
 836		return ret;
 837
 838	/* FIXME: Do we need this wmb() here? */
 839	wmb();
 840
 841	return 0;
 842}
 843
 844#define A6XX_HFI_IRQ_MASK \
 845	(A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT)
 846
 847#define A6XX_GMU_IRQ_MASK \
 848	(A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE | \
 849	 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR | \
 850	 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
 851
 852static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu)
 853{
 854	disable_irq(gmu->gmu_irq);
 855	disable_irq(gmu->hfi_irq);
 856
 857	gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~0);
 858	gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0);
 859}
 860
 861static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
 862{
 863	u32 val;
 864
 865	/* Make sure there are no outstanding RPMh votes */
 866	gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val,
 867		(val & 1), 100, 10000);
 868	gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val,
 869		(val & 1), 100, 10000);
 870	gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val,
 871		(val & 1), 100, 10000);
 872	gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val,
 873		(val & 1), 100, 1000);
 874}
 875
 876#define GBIF_CLIENT_HALT_MASK             BIT(0)
 877#define GBIF_ARB_HALT_MASK                BIT(1)
 878
 879static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu,
 880		bool gx_off)
 881{
 882	struct msm_gpu *gpu = &adreno_gpu->base;
 883
 884	if (!a6xx_has_gbif(adreno_gpu)) {
 885		gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
 886		spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) &
 887								0xf) == 0xf);
 888		gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
 889
 890		return;
 891	}
 892
 893	if (gx_off) {
 894		/* Halt the gx side of GBIF */
 895		gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 1);
 896		spin_until(gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT_ACK) & 1);
 897	}
 898
 899	/* Halt new client requests on GBIF */
 900	gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
 901	spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
 902			(GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK);
 903
 904	/* Halt all AXI requests on GBIF */
 905	gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK);
 906	spin_until((gpu_read(gpu,  REG_A6XX_GBIF_HALT_ACK) &
 907			(GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK);
 908
 909	/* The GBIF halt needs to be explicitly cleared */
 910	gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
 911}
 912
 913/* Force the GMU off in case it isn't responsive */
 914static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
 915{
 916	struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
 917	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
 918	struct msm_gpu *gpu = &adreno_gpu->base;
 919
 920	/* Flush all the queues */
 921	a6xx_hfi_stop(gmu);
 922
 923	/* Stop the interrupts */
 924	a6xx_gmu_irq_disable(gmu);
 925
 926	/* Force off SPTP in case the GMU is managing it */
 927	a6xx_sptprac_disable(gmu);
 928
 929	/* Make sure there are no outstanding RPMh votes */
 930	a6xx_gmu_rpmh_off(gmu);
 931
 932	/* Halt the gmu cm3 core */
 933	gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
 934
 935	a6xx_bus_clear_pending_transactions(adreno_gpu, true);
 936
 937	/* Reset GPU core blocks */
 938	gpu_write(gpu, REG_A6XX_RBBM_SW_RESET_CMD, 1);
 939	udelay(100);
 940}
 941
 942static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
 943{
 944	struct dev_pm_opp *gpu_opp;
 945	unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index];
 946
 947	gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true);
 948	if (IS_ERR(gpu_opp))
 949		return;
 950
 951	gmu->freq = 0; /* so a6xx_gmu_set_freq() doesn't exit early */
 952	a6xx_gmu_set_freq(gpu, gpu_opp, false);
 953	dev_pm_opp_put(gpu_opp);
 954}
 955
 956static void a6xx_gmu_set_initial_bw(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
 957{
 958	struct dev_pm_opp *gpu_opp;
 959	unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index];
 960
 961	gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true);
 962	if (IS_ERR(gpu_opp))
 963		return;
 964
 965	dev_pm_opp_set_opp(&gpu->pdev->dev, gpu_opp);
 966	dev_pm_opp_put(gpu_opp);
 967}
 968
 969int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
 970{
 971	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
 972	struct msm_gpu *gpu = &adreno_gpu->base;
 973	struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
 974	int status, ret;
 975
 976	if (WARN(!gmu->initialized, "The GMU is not set up yet\n"))
 977		return 0;
 978
 979	gmu->hung = false;
 980
 981	/* Turn on the resources */
 982	pm_runtime_get_sync(gmu->dev);
 983
 984	/*
 985	 * "enable" the GX power domain which won't actually do anything but it
 986	 * will make sure that the refcounting is correct in case we need to
 987	 * bring down the GX after a GMU failure
 988	 */
 989	if (!IS_ERR_OR_NULL(gmu->gxpd))
 990		pm_runtime_get_sync(gmu->gxpd);
 991
 992	/* Use a known rate to bring up the GMU */
 993	clk_set_rate(gmu->core_clk, 200000000);
 994	clk_set_rate(gmu->hub_clk, 150000000);
 995	ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
 996	if (ret) {
 997		pm_runtime_put(gmu->gxpd);
 998		pm_runtime_put(gmu->dev);
 999		return ret;
1000	}
1001
1002	/* Set the bus quota to a reasonable value for boot */
1003	a6xx_gmu_set_initial_bw(gpu, gmu);
1004
1005	/* Enable the GMU interrupt */
1006	gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0);
1007	gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~A6XX_GMU_IRQ_MASK);
1008	enable_irq(gmu->gmu_irq);
1009
1010	/* Check to see if we are doing a cold or warm boot */
1011	status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ?
1012		GMU_WARM_BOOT : GMU_COLD_BOOT;
1013
1014	/*
1015	 * Warm boot path does not work on newer GPUs
1016	 * Presumably this is because icache/dcache regions must be restored
1017	 */
1018	if (!gmu->legacy)
1019		status = GMU_COLD_BOOT;
1020
1021	ret = a6xx_gmu_fw_start(gmu, status);
1022	if (ret)
1023		goto out;
1024
1025	ret = a6xx_hfi_start(gmu, status);
1026	if (ret)
1027		goto out;
1028
1029	/*
1030	 * Turn on the GMU firmware fault interrupt after we know the boot
1031	 * sequence is successful
1032	 */
1033	gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0);
1034	gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~A6XX_HFI_IRQ_MASK);
1035	enable_irq(gmu->hfi_irq);
1036
1037	/* Set the GPU to the current freq */
1038	a6xx_gmu_set_initial_freq(gpu, gmu);
1039
1040out:
1041	/* On failure, shut down the GMU to leave it in a good state */
1042	if (ret) {
1043		disable_irq(gmu->gmu_irq);
1044		a6xx_rpmh_stop(gmu);
1045		pm_runtime_put(gmu->gxpd);
1046		pm_runtime_put(gmu->dev);
1047	}
1048
1049	return ret;
1050}
1051
1052bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
1053{
1054	u32 reg;
1055
1056	if (!gmu->initialized)
1057		return true;
1058
1059	reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS);
1060
1061	if (reg &  A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB)
1062		return false;
1063
1064	return true;
1065}
1066
1067/* Gracefully try to shut down the GMU and by extension the GPU */
1068static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
1069{
1070	struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1071	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1072	u32 val;
1073
1074	/*
1075	 * The GMU may still be in slumber unless the GPU started so check and
1076	 * skip putting it back into slumber if so
1077	 */
1078	val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
1079
1080	if (val != 0xf) {
1081		int ret = a6xx_gmu_wait_for_idle(gmu);
1082
1083		/* If the GMU isn't responding assume it is hung */
1084		if (ret) {
1085			a6xx_gmu_force_off(gmu);
1086			return;
1087		}
1088
1089		a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung);
1090
1091		/* tell the GMU we want to slumber */
1092		ret = a6xx_gmu_notify_slumber(gmu);
1093		if (ret) {
1094			a6xx_gmu_force_off(gmu);
1095			return;
1096		}
1097
1098		ret = gmu_poll_timeout(gmu,
1099			REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val,
1100			!(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB),
1101			100, 10000);
1102
1103		/*
1104		 * Let the user know we failed to slumber but don't worry too
1105		 * much because we are powering down anyway
1106		 */
1107
1108		if (ret)
1109			DRM_DEV_ERROR(gmu->dev,
1110				"Unable to slumber GMU: status = 0%x/0%x\n",
1111				gmu_read(gmu,
1112					REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS),
1113				gmu_read(gmu,
1114					REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2));
1115	}
1116
1117	/* Turn off HFI */
1118	a6xx_hfi_stop(gmu);
1119
1120	/* Stop the interrupts and mask the hardware */
1121	a6xx_gmu_irq_disable(gmu);
1122
1123	/* Tell RPMh to power off the GPU */
1124	a6xx_rpmh_stop(gmu);
1125}
1126
1127
1128int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
1129{
1130	struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1131	struct msm_gpu *gpu = &a6xx_gpu->base.base;
1132
1133	if (!pm_runtime_active(gmu->dev))
1134		return 0;
1135
1136	/*
1137	 * Force the GMU off if we detected a hang, otherwise try to shut it
1138	 * down gracefully
1139	 */
1140	if (gmu->hung)
1141		a6xx_gmu_force_off(gmu);
1142	else
1143		a6xx_gmu_shutdown(gmu);
1144
1145	/* Remove the bus vote */
1146	dev_pm_opp_set_opp(&gpu->pdev->dev, NULL);
1147
1148	/*
1149	 * Make sure the GX domain is off before turning off the GMU (CX)
1150	 * domain. Usually the GMU does this but only if the shutdown sequence
1151	 * was successful
1152	 */
1153	if (!IS_ERR_OR_NULL(gmu->gxpd))
1154		pm_runtime_put_sync(gmu->gxpd);
1155
1156	clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
1157
1158	pm_runtime_put_sync(gmu->dev);
1159
1160	return 0;
1161}
1162
1163static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu)
1164{
1165	msm_gem_kernel_put(gmu->hfi.obj, gmu->aspace);
1166	msm_gem_kernel_put(gmu->debug.obj, gmu->aspace);
1167	msm_gem_kernel_put(gmu->icache.obj, gmu->aspace);
1168	msm_gem_kernel_put(gmu->dcache.obj, gmu->aspace);
1169	msm_gem_kernel_put(gmu->dummy.obj, gmu->aspace);
1170	msm_gem_kernel_put(gmu->log.obj, gmu->aspace);
1171
1172	gmu->aspace->mmu->funcs->detach(gmu->aspace->mmu);
1173	msm_gem_address_space_put(gmu->aspace);
1174}
1175
1176static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
1177		size_t size, u64 iova, const char *name)
1178{
1179	struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1180	struct drm_device *dev = a6xx_gpu->base.base.dev;
1181	uint32_t flags = MSM_BO_WC;
1182	u64 range_start, range_end;
1183	int ret;
1184
1185	size = PAGE_ALIGN(size);
1186	if (!iova) {
1187		/* no fixed address - use GMU's uncached range */
1188		range_start = 0x60000000 + PAGE_SIZE; /* skip dummy page */
1189		range_end = 0x80000000;
1190	} else {
1191		/* range for fixed address */
1192		range_start = iova;
1193		range_end = iova + size;
1194		/* use IOMMU_PRIV for icache/dcache */
1195		flags |= MSM_BO_MAP_PRIV;
1196	}
1197
1198	bo->obj = msm_gem_new(dev, size, flags);
1199	if (IS_ERR(bo->obj))
1200		return PTR_ERR(bo->obj);
1201
1202	ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->aspace, &bo->iova,
1203					     range_start, range_end);
1204	if (ret) {
1205		drm_gem_object_put(bo->obj);
1206		return ret;
1207	}
1208
1209	bo->virt = msm_gem_get_vaddr(bo->obj);
1210	bo->size = size;
1211
1212	msm_gem_object_set_name(bo->obj, name);
1213
1214	return 0;
1215}
1216
1217static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
1218{
1219	struct msm_mmu *mmu;
1220
1221	mmu = msm_iommu_new(gmu->dev, 0);
1222	if (!mmu)
1223		return -ENODEV;
1224	if (IS_ERR(mmu))
1225		return PTR_ERR(mmu);
1226
1227	gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x80000000);
1228	if (IS_ERR(gmu->aspace))
1229		return PTR_ERR(gmu->aspace);
1230
1231	return 0;
1232}
1233
1234/* Return the 'arc-level' for the given frequency */
1235static unsigned int a6xx_gmu_get_arc_level(struct device *dev,
1236					   unsigned long freq)
1237{
1238	struct dev_pm_opp *opp;
1239	unsigned int val;
1240
1241	if (!freq)
1242		return 0;
1243
1244	opp = dev_pm_opp_find_freq_exact(dev, freq, true);
1245	if (IS_ERR(opp))
1246		return 0;
1247
1248	val = dev_pm_opp_get_level(opp);
1249
1250	dev_pm_opp_put(opp);
1251
1252	return val;
1253}
1254
1255static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
1256		unsigned long *freqs, int freqs_count, const char *id)
1257{
1258	int i, j;
1259	const u16 *pri, *sec;
1260	size_t pri_count, sec_count;
1261
1262	pri = cmd_db_read_aux_data(id, &pri_count);
1263	if (IS_ERR(pri))
1264		return PTR_ERR(pri);
1265	/*
1266	 * The data comes back as an array of unsigned shorts so adjust the
1267	 * count accordingly
1268	 */
1269	pri_count >>= 1;
1270	if (!pri_count)
1271		return -EINVAL;
1272
1273	sec = cmd_db_read_aux_data("mx.lvl", &sec_count);
1274	if (IS_ERR(sec))
1275		return PTR_ERR(sec);
1276
1277	sec_count >>= 1;
1278	if (!sec_count)
1279		return -EINVAL;
1280
1281	/* Construct a vote for each frequency */
1282	for (i = 0; i < freqs_count; i++) {
1283		u8 pindex = 0, sindex = 0;
1284		unsigned int level = a6xx_gmu_get_arc_level(dev, freqs[i]);
1285
1286		/* Get the primary index that matches the arc level */
1287		for (j = 0; j < pri_count; j++) {
1288			if (pri[j] >= level) {
1289				pindex = j;
1290				break;
1291			}
1292		}
1293
1294		if (j == pri_count) {
1295			DRM_DEV_ERROR(dev,
1296				      "Level %u not found in the RPMh list\n",
1297				      level);
1298			DRM_DEV_ERROR(dev, "Available levels:\n");
1299			for (j = 0; j < pri_count; j++)
1300				DRM_DEV_ERROR(dev, "  %u\n", pri[j]);
1301
1302			return -EINVAL;
1303		}
1304
1305		/*
1306		 * Look for a level in in the secondary list that matches. If
1307		 * nothing fits, use the maximum non zero vote
1308		 */
1309
1310		for (j = 0; j < sec_count; j++) {
1311			if (sec[j] >= level) {
1312				sindex = j;
1313				break;
1314			} else if (sec[j]) {
1315				sindex = j;
1316			}
1317		}
1318
1319		/* Construct the vote */
1320		votes[i] = ((pri[pindex] & 0xffff) << 16) |
1321			(sindex << 8) | pindex;
1322	}
1323
1324	return 0;
1325}
1326
1327/*
1328 * The GMU votes with the RPMh for itself and on behalf of the GPU but we need
1329 * to construct the list of votes on the CPU and send it over. Query the RPMh
1330 * voltage levels and build the votes
1331 */
1332
1333static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu)
1334{
1335	struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1336	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1337	struct msm_gpu *gpu = &adreno_gpu->base;
1338	int ret;
1339
1340	/* Build the GX votes */
1341	ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes,
1342		gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl");
1343
1344	/* Build the CX votes */
1345	ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes,
1346		gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl");
1347
1348	return ret;
1349}
1350
1351static int a6xx_gmu_build_freq_table(struct device *dev, unsigned long *freqs,
1352		u32 size)
1353{
1354	int count = dev_pm_opp_get_opp_count(dev);
1355	struct dev_pm_opp *opp;
1356	int i, index = 0;
1357	unsigned long freq = 1;
1358
1359	/*
1360	 * The OPP table doesn't contain the "off" frequency level so we need to
1361	 * add 1 to the table size to account for it
1362	 */
1363
1364	if (WARN(count + 1 > size,
1365		"The GMU frequency table is being truncated\n"))
1366		count = size - 1;
1367
1368	/* Set the "off" frequency */
1369	freqs[index++] = 0;
1370
1371	for (i = 0; i < count; i++) {
1372		opp = dev_pm_opp_find_freq_ceil(dev, &freq);
1373		if (IS_ERR(opp))
1374			break;
1375
1376		dev_pm_opp_put(opp);
1377		freqs[index++] = freq++;
1378	}
1379
1380	return index;
1381}
1382
1383static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
1384{
1385	struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1386	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1387	struct msm_gpu *gpu = &adreno_gpu->base;
1388
1389	int ret = 0;
1390
1391	/*
1392	 * The GMU handles its own frequency switching so build a list of
1393	 * available frequencies to send during initialization
1394	 */
1395	ret = devm_pm_opp_of_add_table(gmu->dev);
1396	if (ret) {
1397		DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n");
1398		return ret;
1399	}
1400
1401	gmu->nr_gmu_freqs = a6xx_gmu_build_freq_table(gmu->dev,
1402		gmu->gmu_freqs, ARRAY_SIZE(gmu->gmu_freqs));
1403
1404	/*
1405	 * The GMU also handles GPU frequency switching so build a list
1406	 * from the GPU OPP table
1407	 */
1408	gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev,
1409		gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs));
1410
1411	gmu->current_perf_index = gmu->nr_gpu_freqs - 1;
1412
1413	/* Build the list of RPMh votes that we'll send to the GMU */
1414	return a6xx_gmu_rpmh_votes_init(gmu);
1415}
1416
1417static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu)
1418{
1419	int ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks);
1420
1421	if (ret < 1)
1422		return ret;
1423
1424	gmu->nr_clocks = ret;
1425
1426	gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks,
1427		gmu->nr_clocks, "gmu");
1428
1429	gmu->hub_clk = msm_clk_bulk_get_clock(gmu->clocks,
1430		gmu->nr_clocks, "hub");
1431
1432	return 0;
1433}
1434
1435static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
1436		const char *name)
1437{
1438	void __iomem *ret;
1439	struct resource *res = platform_get_resource_byname(pdev,
1440			IORESOURCE_MEM, name);
1441
1442	if (!res) {
1443		DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name);
1444		return ERR_PTR(-EINVAL);
1445	}
1446
1447	ret = ioremap(res->start, resource_size(res));
1448	if (!ret) {
1449		DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name);
1450		return ERR_PTR(-EINVAL);
1451	}
1452
1453	return ret;
1454}
1455
1456static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
1457		const char *name, irq_handler_t handler)
1458{
1459	int irq, ret;
1460
1461	irq = platform_get_irq_byname(pdev, name);
1462
1463	ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH, name, gmu);
1464	if (ret) {
1465		DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s %d\n",
1466			      name, ret);
1467		return ret;
1468	}
1469
1470	disable_irq(irq);
1471
1472	return irq;
1473}
1474
1475void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
1476{
1477	struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1478	struct platform_device *pdev = to_platform_device(gmu->dev);
1479
1480	if (!gmu->initialized)
1481		return;
1482
1483	pm_runtime_force_suspend(gmu->dev);
1484
1485	if (!IS_ERR_OR_NULL(gmu->gxpd)) {
1486		pm_runtime_disable(gmu->gxpd);
1487		dev_pm_domain_detach(gmu->gxpd, false);
1488	}
1489
1490	iounmap(gmu->mmio);
1491	if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc"))
1492		iounmap(gmu->rscc);
1493	gmu->mmio = NULL;
1494	gmu->rscc = NULL;
1495
1496	a6xx_gmu_memory_free(gmu);
1497
1498	free_irq(gmu->gmu_irq, gmu);
1499	free_irq(gmu->hfi_irq, gmu);
1500
1501	/* Drop reference taken in of_find_device_by_node */
1502	put_device(gmu->dev);
1503
1504	gmu->initialized = false;
1505}
1506
1507int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
1508{
1509	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1510	struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1511	struct platform_device *pdev = of_find_device_by_node(node);
1512	int ret;
1513
1514	if (!pdev)
1515		return -ENODEV;
1516
1517	mutex_init(&gmu->lock);
1518
1519	gmu->dev = &pdev->dev;
1520
1521	of_dma_configure(gmu->dev, node, true);
1522
1523	/* Fow now, don't do anything fancy until we get our feet under us */
1524	gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
1525
1526	pm_runtime_enable(gmu->dev);
1527
1528	/* Get the list of clocks */
1529	ret = a6xx_gmu_clocks_probe(gmu);
1530	if (ret)
1531		goto err_put_device;
1532
1533	ret = a6xx_gmu_memory_probe(gmu);
1534	if (ret)
1535		goto err_put_device;
1536
1537
1538	/* A660 now requires handling "prealloc requests" in GMU firmware
1539	 * For now just hardcode allocations based on the known firmware.
1540	 * note: there is no indication that these correspond to "dummy" or
1541	 * "debug" regions, but this "guess" allows reusing these BOs which
1542	 * are otherwise unused by a660.
1543	 */
1544	gmu->dummy.size = SZ_4K;
1545	if (adreno_is_a660_family(adreno_gpu)) {
1546		ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7,
1547					    0x60400000, "debug");
1548		if (ret)
1549			goto err_memory;
1550
1551		gmu->dummy.size = SZ_8K;
1552	}
1553
1554	/* Allocate memory for the GMU dummy page */
1555	ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, gmu->dummy.size,
1556				    0x60000000, "dummy");
1557	if (ret)
1558		goto err_memory;
1559
1560	/* Note that a650 family also includes a660 family: */
1561	if (adreno_is_a650_family(adreno_gpu)) {
1562		ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
1563			SZ_16M - SZ_16K, 0x04000, "icache");
1564		if (ret)
1565			goto err_memory;
1566	/*
1567	 * NOTE: when porting legacy ("pre-650-family") GPUs you may be tempted to add a condition
1568	 * to allocate icache/dcache here, as per downstream code flow, but it may not actually be
1569	 * necessary. If you omit this step and you don't get random pagefaults, you are likely
1570	 * good to go without this!
1571	 */
1572	} else if (adreno_is_a640_family(adreno_gpu)) {
1573		ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
1574			SZ_256K - SZ_16K, 0x04000, "icache");
1575		if (ret)
1576			goto err_memory;
1577
1578		ret = a6xx_gmu_memory_alloc(gmu, &gmu->dcache,
1579			SZ_256K - SZ_16K, 0x44000, "dcache");
1580		if (ret)
1581			goto err_memory;
1582	} else if (adreno_is_a630(adreno_gpu) || adreno_is_a615_family(adreno_gpu)) {
1583		/* HFI v1, has sptprac */
1584		gmu->legacy = true;
1585
1586		/* Allocate memory for the GMU debug region */
1587		ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0, "debug");
1588		if (ret)
1589			goto err_memory;
1590	}
1591
1592	/* Allocate memory for for the HFI queues */
1593	ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0, "hfi");
1594	if (ret)
1595		goto err_memory;
1596
1597	/* Allocate memory for the GMU log region */
1598	ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_4K, 0, "log");
1599	if (ret)
1600		goto err_memory;
1601
1602	/* Map the GMU registers */
1603	gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
1604	if (IS_ERR(gmu->mmio)) {
1605		ret = PTR_ERR(gmu->mmio);
1606		goto err_memory;
1607	}
1608
1609	if (adreno_is_a650_family(adreno_gpu)) {
1610		gmu->rscc = a6xx_gmu_get_mmio(pdev, "rscc");
1611		if (IS_ERR(gmu->rscc))
1612			goto err_mmio;
1613	} else {
1614		gmu->rscc = gmu->mmio + 0x23000;
1615	}
1616
1617	/* Get the HFI and GMU interrupts */
1618	gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq);
1619	gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq);
1620
1621	if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0)
1622		goto err_mmio;
1623
1624	/*
1625	 * Get a link to the GX power domain to reset the GPU in case of GMU
1626	 * crash
1627	 */
1628	gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx");
1629
1630	/* Get the power levels for the GMU and GPU */
1631	a6xx_gmu_pwrlevels_probe(gmu);
1632
1633	/* Set up the HFI queues */
1634	a6xx_hfi_init(gmu);
1635
1636	gmu->initialized = true;
1637
1638	return 0;
1639
1640err_mmio:
1641	iounmap(gmu->mmio);
1642	if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc"))
1643		iounmap(gmu->rscc);
1644	free_irq(gmu->gmu_irq, gmu);
1645	free_irq(gmu->hfi_irq, gmu);
1646
1647	ret = -ENODEV;
1648
1649err_memory:
1650	a6xx_gmu_memory_free(gmu);
1651err_put_device:
1652	/* Drop reference taken in of_find_device_by_node */
1653	put_device(gmu->dev);
1654
1655	return ret;
1656}