Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: MIT
   2/*
   3 * Copyright © 2019 Intel Corporation
   4 */
   5
   6#include <drm/i915_drm.h>
   7
   8#include "i915_drv.h"
   9#include "intel_breadcrumbs.h"
  10#include "intel_gt.h"
  11#include "intel_gt_clock_utils.h"
  12#include "intel_gt_irq.h"
  13#include "intel_gt_pm_irq.h"
  14#include "intel_rps.h"
  15#include "intel_sideband.h"
  16#include "../../../platform/x86/intel_ips.h"
  17
  18#define BUSY_MAX_EI	20u /* ms */
  19
  20/*
  21 * Lock protecting IPS related data structures
  22 */
  23static DEFINE_SPINLOCK(mchdev_lock);
  24
  25static struct intel_gt *rps_to_gt(struct intel_rps *rps)
  26{
  27	return container_of(rps, struct intel_gt, rps);
  28}
  29
  30static struct drm_i915_private *rps_to_i915(struct intel_rps *rps)
  31{
  32	return rps_to_gt(rps)->i915;
  33}
  34
  35static struct intel_uncore *rps_to_uncore(struct intel_rps *rps)
  36{
  37	return rps_to_gt(rps)->uncore;
  38}
  39
  40static u32 rps_pm_sanitize_mask(struct intel_rps *rps, u32 mask)
  41{
  42	return mask & ~rps->pm_intrmsk_mbz;
  43}
  44
  45static void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
  46{
  47	intel_uncore_write_fw(uncore, reg, val);
  48}
  49
  50static void rps_timer(struct timer_list *t)
  51{
  52	struct intel_rps *rps = from_timer(rps, t, timer);
  53	struct intel_engine_cs *engine;
  54	ktime_t dt, last, timestamp;
  55	enum intel_engine_id id;
  56	s64 max_busy[3] = {};
  57
  58	timestamp = 0;
  59	for_each_engine(engine, rps_to_gt(rps), id) {
  60		s64 busy;
  61		int i;
  62
  63		dt = intel_engine_get_busy_time(engine, &timestamp);
  64		last = engine->stats.rps;
  65		engine->stats.rps = dt;
  66
  67		busy = ktime_to_ns(ktime_sub(dt, last));
  68		for (i = 0; i < ARRAY_SIZE(max_busy); i++) {
  69			if (busy > max_busy[i])
  70				swap(busy, max_busy[i]);
  71		}
  72	}
  73	last = rps->pm_timestamp;
  74	rps->pm_timestamp = timestamp;
  75
  76	if (intel_rps_is_active(rps)) {
  77		s64 busy;
  78		int i;
  79
  80		dt = ktime_sub(timestamp, last);
  81
  82		/*
  83		 * Our goal is to evaluate each engine independently, so we run
  84		 * at the lowest clocks required to sustain the heaviest
  85		 * workload. However, a task may be split into sequential
  86		 * dependent operations across a set of engines, such that
  87		 * the independent contributions do not account for high load,
  88		 * but overall the task is GPU bound. For example, consider
  89		 * video decode on vcs followed by colour post-processing
  90		 * on vecs, followed by general post-processing on rcs.
  91		 * Since multi-engines being active does imply a single
  92		 * continuous workload across all engines, we hedge our
  93		 * bets by only contributing a factor of the distributed
  94		 * load into our busyness calculation.
  95		 */
  96		busy = max_busy[0];
  97		for (i = 1; i < ARRAY_SIZE(max_busy); i++) {
  98			if (!max_busy[i])
  99				break;
 100
 101			busy += div_u64(max_busy[i], 1 << i);
 102		}
 103		GT_TRACE(rps_to_gt(rps),
 104			 "busy:%lld [%d%%], max:[%lld, %lld, %lld], interval:%d\n",
 105			 busy, (int)div64_u64(100 * busy, dt),
 106			 max_busy[0], max_busy[1], max_busy[2],
 107			 rps->pm_interval);
 108
 109		if (100 * busy > rps->power.up_threshold * dt &&
 110		    rps->cur_freq < rps->max_freq_softlimit) {
 111			rps->pm_iir |= GEN6_PM_RP_UP_THRESHOLD;
 112			rps->pm_interval = 1;
 113			schedule_work(&rps->work);
 114		} else if (100 * busy < rps->power.down_threshold * dt &&
 115			   rps->cur_freq > rps->min_freq_softlimit) {
 116			rps->pm_iir |= GEN6_PM_RP_DOWN_THRESHOLD;
 117			rps->pm_interval = 1;
 118			schedule_work(&rps->work);
 119		} else {
 120			rps->last_adj = 0;
 121		}
 122
 123		mod_timer(&rps->timer,
 124			  jiffies + msecs_to_jiffies(rps->pm_interval));
 125		rps->pm_interval = min(rps->pm_interval * 2, BUSY_MAX_EI);
 126	}
 127}
 128
 129static void rps_start_timer(struct intel_rps *rps)
 130{
 131	rps->pm_timestamp = ktime_sub(ktime_get(), rps->pm_timestamp);
 132	rps->pm_interval = 1;
 133	mod_timer(&rps->timer, jiffies + 1);
 134}
 135
 136static void rps_stop_timer(struct intel_rps *rps)
 137{
 138	del_timer_sync(&rps->timer);
 139	rps->pm_timestamp = ktime_sub(ktime_get(), rps->pm_timestamp);
 140	cancel_work_sync(&rps->work);
 141}
 142
 143static u32 rps_pm_mask(struct intel_rps *rps, u8 val)
 144{
 145	u32 mask = 0;
 146
 147	/* We use UP_EI_EXPIRED interrupts for both up/down in manual mode */
 148	if (val > rps->min_freq_softlimit)
 149		mask |= (GEN6_PM_RP_UP_EI_EXPIRED |
 150			 GEN6_PM_RP_DOWN_THRESHOLD |
 151			 GEN6_PM_RP_DOWN_TIMEOUT);
 152
 153	if (val < rps->max_freq_softlimit)
 154		mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
 155
 156	mask &= rps->pm_events;
 157
 158	return rps_pm_sanitize_mask(rps, ~mask);
 159}
 160
 161static void rps_reset_ei(struct intel_rps *rps)
 162{
 163	memset(&rps->ei, 0, sizeof(rps->ei));
 164}
 165
 166static void rps_enable_interrupts(struct intel_rps *rps)
 167{
 168	struct intel_gt *gt = rps_to_gt(rps);
 169
 170	GT_TRACE(gt, "interrupts:on rps->pm_events: %x, rps_pm_mask:%x\n",
 171		 rps->pm_events, rps_pm_mask(rps, rps->last_freq));
 172
 173	rps_reset_ei(rps);
 174
 175	spin_lock_irq(&gt->irq_lock);
 176	gen6_gt_pm_enable_irq(gt, rps->pm_events);
 177	spin_unlock_irq(&gt->irq_lock);
 178
 179	intel_uncore_write(gt->uncore,
 180			   GEN6_PMINTRMSK, rps_pm_mask(rps, rps->last_freq));
 181}
 182
 183static void gen6_rps_reset_interrupts(struct intel_rps *rps)
 184{
 185	gen6_gt_pm_reset_iir(rps_to_gt(rps), GEN6_PM_RPS_EVENTS);
 186}
 187
 188static void gen11_rps_reset_interrupts(struct intel_rps *rps)
 189{
 190	while (gen11_gt_reset_one_iir(rps_to_gt(rps), 0, GEN11_GTPM))
 191		;
 192}
 193
 194static void rps_reset_interrupts(struct intel_rps *rps)
 195{
 196	struct intel_gt *gt = rps_to_gt(rps);
 197
 198	spin_lock_irq(&gt->irq_lock);
 199	if (GRAPHICS_VER(gt->i915) >= 11)
 200		gen11_rps_reset_interrupts(rps);
 201	else
 202		gen6_rps_reset_interrupts(rps);
 203
 204	rps->pm_iir = 0;
 205	spin_unlock_irq(&gt->irq_lock);
 206}
 207
 208static void rps_disable_interrupts(struct intel_rps *rps)
 209{
 210	struct intel_gt *gt = rps_to_gt(rps);
 211
 212	intel_uncore_write(gt->uncore,
 213			   GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u));
 214
 215	spin_lock_irq(&gt->irq_lock);
 216	gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS);
 217	spin_unlock_irq(&gt->irq_lock);
 218
 219	intel_synchronize_irq(gt->i915);
 220
 221	/*
 222	 * Now that we will not be generating any more work, flush any
 223	 * outstanding tasks. As we are called on the RPS idle path,
 224	 * we will reset the GPU to minimum frequencies, so the current
 225	 * state of the worker can be discarded.
 226	 */
 227	cancel_work_sync(&rps->work);
 228
 229	rps_reset_interrupts(rps);
 230	GT_TRACE(gt, "interrupts:off\n");
 231}
 232
 233static const struct cparams {
 234	u16 i;
 235	u16 t;
 236	u16 m;
 237	u16 c;
 238} cparams[] = {
 239	{ 1, 1333, 301, 28664 },
 240	{ 1, 1066, 294, 24460 },
 241	{ 1, 800, 294, 25192 },
 242	{ 0, 1333, 276, 27605 },
 243	{ 0, 1066, 276, 27605 },
 244	{ 0, 800, 231, 23784 },
 245};
 246
 247static void gen5_rps_init(struct intel_rps *rps)
 248{
 249	struct drm_i915_private *i915 = rps_to_i915(rps);
 250	struct intel_uncore *uncore = rps_to_uncore(rps);
 251	u8 fmax, fmin, fstart;
 252	u32 rgvmodectl;
 253	int c_m, i;
 254
 255	if (i915->fsb_freq <= 3200)
 256		c_m = 0;
 257	else if (i915->fsb_freq <= 4800)
 258		c_m = 1;
 259	else
 260		c_m = 2;
 261
 262	for (i = 0; i < ARRAY_SIZE(cparams); i++) {
 263		if (cparams[i].i == c_m && cparams[i].t == i915->mem_freq) {
 264			rps->ips.m = cparams[i].m;
 265			rps->ips.c = cparams[i].c;
 266			break;
 267		}
 268	}
 269
 270	rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
 271
 272	/* Set up min, max, and cur for interrupt handling */
 273	fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
 274	fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
 275	fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
 276		MEMMODE_FSTART_SHIFT;
 277	drm_dbg(&i915->drm, "fmax: %d, fmin: %d, fstart: %d\n",
 278		fmax, fmin, fstart);
 279
 280	rps->min_freq = fmax;
 281	rps->efficient_freq = fstart;
 282	rps->max_freq = fmin;
 283}
 284
 285static unsigned long
 286__ips_chipset_val(struct intel_ips *ips)
 287{
 288	struct intel_uncore *uncore =
 289		rps_to_uncore(container_of(ips, struct intel_rps, ips));
 290	unsigned long now = jiffies_to_msecs(jiffies), dt;
 291	unsigned long result;
 292	u64 total, delta;
 293
 294	lockdep_assert_held(&mchdev_lock);
 295
 296	/*
 297	 * Prevent division-by-zero if we are asking too fast.
 298	 * Also, we don't get interesting results if we are polling
 299	 * faster than once in 10ms, so just return the saved value
 300	 * in such cases.
 301	 */
 302	dt = now - ips->last_time1;
 303	if (dt <= 10)
 304		return ips->chipset_power;
 305
 306	/* FIXME: handle per-counter overflow */
 307	total = intel_uncore_read(uncore, DMIEC);
 308	total += intel_uncore_read(uncore, DDREC);
 309	total += intel_uncore_read(uncore, CSIEC);
 310
 311	delta = total - ips->last_count1;
 312
 313	result = div_u64(div_u64(ips->m * delta, dt) + ips->c, 10);
 314
 315	ips->last_count1 = total;
 316	ips->last_time1 = now;
 317
 318	ips->chipset_power = result;
 319
 320	return result;
 321}
 322
 323static unsigned long ips_mch_val(struct intel_uncore *uncore)
 324{
 325	unsigned int m, x, b;
 326	u32 tsfs;
 327
 328	tsfs = intel_uncore_read(uncore, TSFS);
 329	x = intel_uncore_read8(uncore, TR1);
 330
 331	b = tsfs & TSFS_INTR_MASK;
 332	m = (tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT;
 333
 334	return m * x / 127 - b;
 335}
 336
 337static int _pxvid_to_vd(u8 pxvid)
 338{
 339	if (pxvid == 0)
 340		return 0;
 341
 342	if (pxvid >= 8 && pxvid < 31)
 343		pxvid = 31;
 344
 345	return (pxvid + 2) * 125;
 346}
 347
 348static u32 pvid_to_extvid(struct drm_i915_private *i915, u8 pxvid)
 349{
 350	const int vd = _pxvid_to_vd(pxvid);
 351
 352	if (INTEL_INFO(i915)->is_mobile)
 353		return max(vd - 1125, 0);
 354
 355	return vd;
 356}
 357
 358static void __gen5_ips_update(struct intel_ips *ips)
 359{
 360	struct intel_uncore *uncore =
 361		rps_to_uncore(container_of(ips, struct intel_rps, ips));
 362	u64 now, delta, dt;
 363	u32 count;
 364
 365	lockdep_assert_held(&mchdev_lock);
 366
 367	now = ktime_get_raw_ns();
 368	dt = now - ips->last_time2;
 369	do_div(dt, NSEC_PER_MSEC);
 370
 371	/* Don't divide by 0 */
 372	if (dt <= 10)
 373		return;
 374
 375	count = intel_uncore_read(uncore, GFXEC);
 376	delta = count - ips->last_count2;
 377
 378	ips->last_count2 = count;
 379	ips->last_time2 = now;
 380
 381	/* More magic constants... */
 382	ips->gfx_power = div_u64(delta * 1181, dt * 10);
 383}
 384
 385static void gen5_rps_update(struct intel_rps *rps)
 386{
 387	spin_lock_irq(&mchdev_lock);
 388	__gen5_ips_update(&rps->ips);
 389	spin_unlock_irq(&mchdev_lock);
 390}
 391
 392static unsigned int gen5_invert_freq(struct intel_rps *rps,
 393				     unsigned int val)
 394{
 395	/* Invert the frequency bin into an ips delay */
 396	val = rps->max_freq - val;
 397	val = rps->min_freq + val;
 398
 399	return val;
 400}
 401
 402static int __gen5_rps_set(struct intel_rps *rps, u8 val)
 403{
 404	struct intel_uncore *uncore = rps_to_uncore(rps);
 405	u16 rgvswctl;
 406
 407	lockdep_assert_held(&mchdev_lock);
 408
 409	rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
 410	if (rgvswctl & MEMCTL_CMD_STS) {
 411		DRM_DEBUG("gpu busy, RCS change rejected\n");
 412		return -EBUSY; /* still busy with another command */
 413	}
 414
 415	/* Invert the frequency bin into an ips delay */
 416	val = gen5_invert_freq(rps, val);
 417
 418	rgvswctl =
 419		(MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
 420		(val << MEMCTL_FREQ_SHIFT) |
 421		MEMCTL_SFCAVM;
 422	intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
 423	intel_uncore_posting_read16(uncore, MEMSWCTL);
 424
 425	rgvswctl |= MEMCTL_CMD_STS;
 426	intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
 427
 428	return 0;
 429}
 430
 431static int gen5_rps_set(struct intel_rps *rps, u8 val)
 432{
 433	int err;
 434
 435	spin_lock_irq(&mchdev_lock);
 436	err = __gen5_rps_set(rps, val);
 437	spin_unlock_irq(&mchdev_lock);
 438
 439	return err;
 440}
 441
 442static unsigned long intel_pxfreq(u32 vidfreq)
 443{
 444	int div = (vidfreq & 0x3f0000) >> 16;
 445	int post = (vidfreq & 0x3000) >> 12;
 446	int pre = (vidfreq & 0x7);
 447
 448	if (!pre)
 449		return 0;
 450
 451	return div * 133333 / (pre << post);
 452}
 453
 454static unsigned int init_emon(struct intel_uncore *uncore)
 455{
 456	u8 pxw[16];
 457	int i;
 458
 459	/* Disable to program */
 460	intel_uncore_write(uncore, ECR, 0);
 461	intel_uncore_posting_read(uncore, ECR);
 462
 463	/* Program energy weights for various events */
 464	intel_uncore_write(uncore, SDEW, 0x15040d00);
 465	intel_uncore_write(uncore, CSIEW0, 0x007f0000);
 466	intel_uncore_write(uncore, CSIEW1, 0x1e220004);
 467	intel_uncore_write(uncore, CSIEW2, 0x04000004);
 468
 469	for (i = 0; i < 5; i++)
 470		intel_uncore_write(uncore, PEW(i), 0);
 471	for (i = 0; i < 3; i++)
 472		intel_uncore_write(uncore, DEW(i), 0);
 473
 474	/* Program P-state weights to account for frequency power adjustment */
 475	for (i = 0; i < 16; i++) {
 476		u32 pxvidfreq = intel_uncore_read(uncore, PXVFREQ(i));
 477		unsigned int freq = intel_pxfreq(pxvidfreq);
 478		unsigned int vid =
 479			(pxvidfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT;
 480		unsigned int val;
 481
 482		val = vid * vid * freq / 1000 * 255;
 483		val /= 127 * 127 * 900;
 484
 485		pxw[i] = val;
 486	}
 487	/* Render standby states get 0 weight */
 488	pxw[14] = 0;
 489	pxw[15] = 0;
 490
 491	for (i = 0; i < 4; i++) {
 492		intel_uncore_write(uncore, PXW(i),
 493				   pxw[i * 4 + 0] << 24 |
 494				   pxw[i * 4 + 1] << 16 |
 495				   pxw[i * 4 + 2] <<  8 |
 496				   pxw[i * 4 + 3] <<  0);
 497	}
 498
 499	/* Adjust magic regs to magic values (more experimental results) */
 500	intel_uncore_write(uncore, OGW0, 0);
 501	intel_uncore_write(uncore, OGW1, 0);
 502	intel_uncore_write(uncore, EG0, 0x00007f00);
 503	intel_uncore_write(uncore, EG1, 0x0000000e);
 504	intel_uncore_write(uncore, EG2, 0x000e0000);
 505	intel_uncore_write(uncore, EG3, 0x68000300);
 506	intel_uncore_write(uncore, EG4, 0x42000000);
 507	intel_uncore_write(uncore, EG5, 0x00140031);
 508	intel_uncore_write(uncore, EG6, 0);
 509	intel_uncore_write(uncore, EG7, 0);
 510
 511	for (i = 0; i < 8; i++)
 512		intel_uncore_write(uncore, PXWL(i), 0);
 513
 514	/* Enable PMON + select events */
 515	intel_uncore_write(uncore, ECR, 0x80000019);
 516
 517	return intel_uncore_read(uncore, LCFUSE02) & LCFUSE_HIV_MASK;
 518}
 519
 520static bool gen5_rps_enable(struct intel_rps *rps)
 521{
 522	struct drm_i915_private *i915 = rps_to_i915(rps);
 523	struct intel_uncore *uncore = rps_to_uncore(rps);
 524	u8 fstart, vstart;
 525	u32 rgvmodectl;
 526
 527	spin_lock_irq(&mchdev_lock);
 528
 529	rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
 530
 531	/* Enable temp reporting */
 532	intel_uncore_write16(uncore, PMMISC,
 533			     intel_uncore_read16(uncore, PMMISC) | MCPPCE_EN);
 534	intel_uncore_write16(uncore, TSC1,
 535			     intel_uncore_read16(uncore, TSC1) | TSE);
 536
 537	/* 100ms RC evaluation intervals */
 538	intel_uncore_write(uncore, RCUPEI, 100000);
 539	intel_uncore_write(uncore, RCDNEI, 100000);
 540
 541	/* Set max/min thresholds to 90ms and 80ms respectively */
 542	intel_uncore_write(uncore, RCBMAXAVG, 90000);
 543	intel_uncore_write(uncore, RCBMINAVG, 80000);
 544
 545	intel_uncore_write(uncore, MEMIHYST, 1);
 546
 547	/* Set up min, max, and cur for interrupt handling */
 548	fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
 549		MEMMODE_FSTART_SHIFT;
 550
 551	vstart = (intel_uncore_read(uncore, PXVFREQ(fstart)) &
 552		  PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT;
 553
 554	intel_uncore_write(uncore,
 555			   MEMINTREN,
 556			   MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
 557
 558	intel_uncore_write(uncore, VIDSTART, vstart);
 559	intel_uncore_posting_read(uncore, VIDSTART);
 560
 561	rgvmodectl |= MEMMODE_SWMODE_EN;
 562	intel_uncore_write(uncore, MEMMODECTL, rgvmodectl);
 563
 564	if (wait_for_atomic((intel_uncore_read(uncore, MEMSWCTL) &
 565			     MEMCTL_CMD_STS) == 0, 10))
 566		drm_err(&uncore->i915->drm,
 567			"stuck trying to change perf mode\n");
 568	mdelay(1);
 569
 570	__gen5_rps_set(rps, rps->cur_freq);
 571
 572	rps->ips.last_count1 = intel_uncore_read(uncore, DMIEC);
 573	rps->ips.last_count1 += intel_uncore_read(uncore, DDREC);
 574	rps->ips.last_count1 += intel_uncore_read(uncore, CSIEC);
 575	rps->ips.last_time1 = jiffies_to_msecs(jiffies);
 576
 577	rps->ips.last_count2 = intel_uncore_read(uncore, GFXEC);
 578	rps->ips.last_time2 = ktime_get_raw_ns();
 579
 580	spin_lock(&i915->irq_lock);
 581	ilk_enable_display_irq(i915, DE_PCU_EVENT);
 582	spin_unlock(&i915->irq_lock);
 583
 584	spin_unlock_irq(&mchdev_lock);
 585
 586	rps->ips.corr = init_emon(uncore);
 587
 588	return true;
 589}
 590
 591static void gen5_rps_disable(struct intel_rps *rps)
 592{
 593	struct drm_i915_private *i915 = rps_to_i915(rps);
 594	struct intel_uncore *uncore = rps_to_uncore(rps);
 595	u16 rgvswctl;
 596
 597	spin_lock_irq(&mchdev_lock);
 598
 599	spin_lock(&i915->irq_lock);
 600	ilk_disable_display_irq(i915, DE_PCU_EVENT);
 601	spin_unlock(&i915->irq_lock);
 602
 603	rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
 604
 605	/* Ack interrupts, disable EFC interrupt */
 606	intel_uncore_write(uncore, MEMINTREN,
 607			   intel_uncore_read(uncore, MEMINTREN) &
 608			   ~MEMINT_EVAL_CHG_EN);
 609	intel_uncore_write(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
 610
 611	/* Go back to the starting frequency */
 612	__gen5_rps_set(rps, rps->idle_freq);
 613	mdelay(1);
 614	rgvswctl |= MEMCTL_CMD_STS;
 615	intel_uncore_write(uncore, MEMSWCTL, rgvswctl);
 616	mdelay(1);
 617
 618	spin_unlock_irq(&mchdev_lock);
 619}
 620
 621static u32 rps_limits(struct intel_rps *rps, u8 val)
 622{
 623	u32 limits;
 624
 625	/*
 626	 * Only set the down limit when we've reached the lowest level to avoid
 627	 * getting more interrupts, otherwise leave this clear. This prevents a
 628	 * race in the hw when coming out of rc6: There's a tiny window where
 629	 * the hw runs at the minimal clock before selecting the desired
 630	 * frequency, if the down threshold expires in that window we will not
 631	 * receive a down interrupt.
 632	 */
 633	if (GRAPHICS_VER(rps_to_i915(rps)) >= 9) {
 634		limits = rps->max_freq_softlimit << 23;
 635		if (val <= rps->min_freq_softlimit)
 636			limits |= rps->min_freq_softlimit << 14;
 637	} else {
 638		limits = rps->max_freq_softlimit << 24;
 639		if (val <= rps->min_freq_softlimit)
 640			limits |= rps->min_freq_softlimit << 16;
 641	}
 642
 643	return limits;
 644}
 645
 646static void rps_set_power(struct intel_rps *rps, int new_power)
 647{
 648	struct intel_gt *gt = rps_to_gt(rps);
 649	struct intel_uncore *uncore = gt->uncore;
 650	u32 threshold_up = 0, threshold_down = 0; /* in % */
 651	u32 ei_up = 0, ei_down = 0;
 652
 653	lockdep_assert_held(&rps->power.mutex);
 654
 655	if (new_power == rps->power.mode)
 656		return;
 657
 658	threshold_up = 95;
 659	threshold_down = 85;
 660
 661	/* Note the units here are not exactly 1us, but 1280ns. */
 662	switch (new_power) {
 663	case LOW_POWER:
 664		ei_up = 16000;
 665		ei_down = 32000;
 666		break;
 667
 668	case BETWEEN:
 669		ei_up = 13000;
 670		ei_down = 32000;
 671		break;
 672
 673	case HIGH_POWER:
 674		ei_up = 10000;
 675		ei_down = 32000;
 676		break;
 677	}
 678
 679	/* When byt can survive without system hang with dynamic
 680	 * sw freq adjustments, this restriction can be lifted.
 681	 */
 682	if (IS_VALLEYVIEW(gt->i915))
 683		goto skip_hw_write;
 684
 685	GT_TRACE(gt,
 686		 "changing power mode [%d], up %d%% @ %dus, down %d%% @ %dus\n",
 687		 new_power, threshold_up, ei_up, threshold_down, ei_down);
 688
 689	set(uncore, GEN6_RP_UP_EI,
 690	    intel_gt_ns_to_pm_interval(gt, ei_up * 1000));
 691	set(uncore, GEN6_RP_UP_THRESHOLD,
 692	    intel_gt_ns_to_pm_interval(gt, ei_up * threshold_up * 10));
 693
 694	set(uncore, GEN6_RP_DOWN_EI,
 695	    intel_gt_ns_to_pm_interval(gt, ei_down * 1000));
 696	set(uncore, GEN6_RP_DOWN_THRESHOLD,
 697	    intel_gt_ns_to_pm_interval(gt, ei_down * threshold_down * 10));
 698
 699	set(uncore, GEN6_RP_CONTROL,
 700	    (GRAPHICS_VER(gt->i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
 701	    GEN6_RP_MEDIA_HW_NORMAL_MODE |
 702	    GEN6_RP_MEDIA_IS_GFX |
 703	    GEN6_RP_ENABLE |
 704	    GEN6_RP_UP_BUSY_AVG |
 705	    GEN6_RP_DOWN_IDLE_AVG);
 706
 707skip_hw_write:
 708	rps->power.mode = new_power;
 709	rps->power.up_threshold = threshold_up;
 710	rps->power.down_threshold = threshold_down;
 711}
 712
 713static void gen6_rps_set_thresholds(struct intel_rps *rps, u8 val)
 714{
 715	int new_power;
 716
 717	new_power = rps->power.mode;
 718	switch (rps->power.mode) {
 719	case LOW_POWER:
 720		if (val > rps->efficient_freq + 1 &&
 721		    val > rps->cur_freq)
 722			new_power = BETWEEN;
 723		break;
 724
 725	case BETWEEN:
 726		if (val <= rps->efficient_freq &&
 727		    val < rps->cur_freq)
 728			new_power = LOW_POWER;
 729		else if (val >= rps->rp0_freq &&
 730			 val > rps->cur_freq)
 731			new_power = HIGH_POWER;
 732		break;
 733
 734	case HIGH_POWER:
 735		if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 &&
 736		    val < rps->cur_freq)
 737			new_power = BETWEEN;
 738		break;
 739	}
 740	/* Max/min bins are special */
 741	if (val <= rps->min_freq_softlimit)
 742		new_power = LOW_POWER;
 743	if (val >= rps->max_freq_softlimit)
 744		new_power = HIGH_POWER;
 745
 746	mutex_lock(&rps->power.mutex);
 747	if (rps->power.interactive)
 748		new_power = HIGH_POWER;
 749	rps_set_power(rps, new_power);
 750	mutex_unlock(&rps->power.mutex);
 751}
 752
 753void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive)
 754{
 755	GT_TRACE(rps_to_gt(rps), "mark interactive: %s\n", yesno(interactive));
 756
 757	mutex_lock(&rps->power.mutex);
 758	if (interactive) {
 759		if (!rps->power.interactive++ && intel_rps_is_active(rps))
 760			rps_set_power(rps, HIGH_POWER);
 761	} else {
 762		GEM_BUG_ON(!rps->power.interactive);
 763		rps->power.interactive--;
 764	}
 765	mutex_unlock(&rps->power.mutex);
 766}
 767
 768static int gen6_rps_set(struct intel_rps *rps, u8 val)
 769{
 770	struct intel_uncore *uncore = rps_to_uncore(rps);
 771	struct drm_i915_private *i915 = rps_to_i915(rps);
 772	u32 swreq;
 773
 774	if (GRAPHICS_VER(i915) >= 9)
 775		swreq = GEN9_FREQUENCY(val);
 776	else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
 777		swreq = HSW_FREQUENCY(val);
 778	else
 779		swreq = (GEN6_FREQUENCY(val) |
 780			 GEN6_OFFSET(0) |
 781			 GEN6_AGGRESSIVE_TURBO);
 782	set(uncore, GEN6_RPNSWREQ, swreq);
 783
 784	GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d, swreq:%x\n",
 785		 val, intel_gpu_freq(rps, val), swreq);
 786
 787	return 0;
 788}
 789
 790static int vlv_rps_set(struct intel_rps *rps, u8 val)
 791{
 792	struct drm_i915_private *i915 = rps_to_i915(rps);
 793	int err;
 794
 795	vlv_punit_get(i915);
 796	err = vlv_punit_write(i915, PUNIT_REG_GPU_FREQ_REQ, val);
 797	vlv_punit_put(i915);
 798
 799	GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d\n",
 800		 val, intel_gpu_freq(rps, val));
 801
 802	return err;
 803}
 804
 805static int rps_set(struct intel_rps *rps, u8 val, bool update)
 806{
 807	struct drm_i915_private *i915 = rps_to_i915(rps);
 808	int err;
 809
 810	if (val == rps->last_freq)
 811		return 0;
 812
 813	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
 814		err = vlv_rps_set(rps, val);
 815	else if (GRAPHICS_VER(i915) >= 6)
 816		err = gen6_rps_set(rps, val);
 817	else
 818		err = gen5_rps_set(rps, val);
 819	if (err)
 820		return err;
 821
 822	if (update && GRAPHICS_VER(i915) >= 6)
 823		gen6_rps_set_thresholds(rps, val);
 824	rps->last_freq = val;
 825
 826	return 0;
 827}
 828
 829void intel_rps_unpark(struct intel_rps *rps)
 830{
 831	if (!intel_rps_is_enabled(rps))
 832		return;
 833
 834	GT_TRACE(rps_to_gt(rps), "unpark:%x\n", rps->cur_freq);
 835
 836	/*
 837	 * Use the user's desired frequency as a guide, but for better
 838	 * performance, jump directly to RPe as our starting frequency.
 839	 */
 840	mutex_lock(&rps->lock);
 841
 842	intel_rps_set_active(rps);
 843	intel_rps_set(rps,
 844		      clamp(rps->cur_freq,
 845			    rps->min_freq_softlimit,
 846			    rps->max_freq_softlimit));
 847
 848	mutex_unlock(&rps->lock);
 849
 850	rps->pm_iir = 0;
 851	if (intel_rps_has_interrupts(rps))
 852		rps_enable_interrupts(rps);
 853	if (intel_rps_uses_timer(rps))
 854		rps_start_timer(rps);
 855
 856	if (GRAPHICS_VER(rps_to_i915(rps)) == 5)
 857		gen5_rps_update(rps);
 858}
 859
 860void intel_rps_park(struct intel_rps *rps)
 861{
 862	int adj;
 863
 864	if (!intel_rps_clear_active(rps))
 865		return;
 866
 867	if (intel_rps_uses_timer(rps))
 868		rps_stop_timer(rps);
 869	if (intel_rps_has_interrupts(rps))
 870		rps_disable_interrupts(rps);
 871
 872	if (rps->last_freq <= rps->idle_freq)
 873		return;
 874
 875	/*
 876	 * The punit delays the write of the frequency and voltage until it
 877	 * determines the GPU is awake. During normal usage we don't want to
 878	 * waste power changing the frequency if the GPU is sleeping (rc6).
 879	 * However, the GPU and driver is now idle and we do not want to delay
 880	 * switching to minimum voltage (reducing power whilst idle) as we do
 881	 * not expect to be woken in the near future and so must flush the
 882	 * change by waking the device.
 883	 *
 884	 * We choose to take the media powerwell (either would do to trick the
 885	 * punit into committing the voltage change) as that takes a lot less
 886	 * power than the render powerwell.
 887	 */
 888	intel_uncore_forcewake_get(rps_to_uncore(rps), FORCEWAKE_MEDIA);
 889	rps_set(rps, rps->idle_freq, false);
 890	intel_uncore_forcewake_put(rps_to_uncore(rps), FORCEWAKE_MEDIA);
 891
 892	/*
 893	 * Since we will try and restart from the previously requested
 894	 * frequency on unparking, treat this idle point as a downclock
 895	 * interrupt and reduce the frequency for resume. If we park/unpark
 896	 * more frequently than the rps worker can run, we will not respond
 897	 * to any EI and never see a change in frequency.
 898	 *
 899	 * (Note we accommodate Cherryview's limitation of only using an
 900	 * even bin by applying it to all.)
 901	 */
 902	adj = rps->last_adj;
 903	if (adj < 0)
 904		adj *= 2;
 905	else /* CHV needs even encode values */
 906		adj = -2;
 907	rps->last_adj = adj;
 908	rps->cur_freq = max_t(int, rps->cur_freq + adj, rps->min_freq);
 909	if (rps->cur_freq < rps->efficient_freq) {
 910		rps->cur_freq = rps->efficient_freq;
 911		rps->last_adj = 0;
 912	}
 913
 914	GT_TRACE(rps_to_gt(rps), "park:%x\n", rps->cur_freq);
 915}
 916
 917void intel_rps_boost(struct i915_request *rq)
 918{
 919	if (i915_request_signaled(rq) || i915_request_has_waitboost(rq))
 920		return;
 921
 922	/* Serializes with i915_request_retire() */
 923	if (!test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags)) {
 924		struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps;
 925
 926		if (atomic_fetch_inc(&rps->num_waiters))
 927			return;
 928
 929		if (!intel_rps_is_active(rps))
 930			return;
 931
 932		GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n",
 933			 rq->fence.context, rq->fence.seqno);
 934
 935		if (READ_ONCE(rps->cur_freq) < rps->boost_freq)
 936			schedule_work(&rps->work);
 937
 938		WRITE_ONCE(rps->boosts, rps->boosts + 1); /* debug only */
 939	}
 940}
 941
 942int intel_rps_set(struct intel_rps *rps, u8 val)
 943{
 944	int err;
 945
 946	lockdep_assert_held(&rps->lock);
 947	GEM_BUG_ON(val > rps->max_freq);
 948	GEM_BUG_ON(val < rps->min_freq);
 949
 950	if (intel_rps_is_active(rps)) {
 951		err = rps_set(rps, val, true);
 952		if (err)
 953			return err;
 954
 955		/*
 956		 * Make sure we continue to get interrupts
 957		 * until we hit the minimum or maximum frequencies.
 958		 */
 959		if (intel_rps_has_interrupts(rps)) {
 960			struct intel_uncore *uncore = rps_to_uncore(rps);
 961
 962			set(uncore,
 963			    GEN6_RP_INTERRUPT_LIMITS, rps_limits(rps, val));
 964
 965			set(uncore, GEN6_PMINTRMSK, rps_pm_mask(rps, val));
 966		}
 967	}
 968
 969	rps->cur_freq = val;
 970	return 0;
 971}
 972
 973static void gen6_rps_init(struct intel_rps *rps)
 974{
 975	struct drm_i915_private *i915 = rps_to_i915(rps);
 976	struct intel_uncore *uncore = rps_to_uncore(rps);
 977
 978	/* All of these values are in units of 50MHz */
 979
 980	/* static values from HW: RP0 > RP1 > RPn (min_freq) */
 981	if (IS_GEN9_LP(i915)) {
 982		u32 rp_state_cap = intel_uncore_read(uncore, BXT_RP_STATE_CAP);
 983
 984		rps->rp0_freq = (rp_state_cap >> 16) & 0xff;
 985		rps->rp1_freq = (rp_state_cap >>  8) & 0xff;
 986		rps->min_freq = (rp_state_cap >>  0) & 0xff;
 987	} else {
 988		u32 rp_state_cap = intel_uncore_read(uncore, GEN6_RP_STATE_CAP);
 989
 990		rps->rp0_freq = (rp_state_cap >>  0) & 0xff;
 991		rps->rp1_freq = (rp_state_cap >>  8) & 0xff;
 992		rps->min_freq = (rp_state_cap >> 16) & 0xff;
 993	}
 994
 995	/* hw_max = RP0 until we check for overclocking */
 996	rps->max_freq = rps->rp0_freq;
 997
 998	rps->efficient_freq = rps->rp1_freq;
 999	if (IS_HASWELL(i915) || IS_BROADWELL(i915) ||
1000	    IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 10) {
1001		u32 ddcc_status = 0;
1002
1003		if (sandybridge_pcode_read(i915,
1004					   HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
1005					   &ddcc_status, NULL) == 0)
1006			rps->efficient_freq =
1007				clamp_t(u8,
1008					(ddcc_status >> 8) & 0xff,
1009					rps->min_freq,
1010					rps->max_freq);
1011	}
1012
1013	if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 10) {
1014		/* Store the frequency values in 16.66 MHZ units, which is
1015		 * the natural hardware unit for SKL
1016		 */
1017		rps->rp0_freq *= GEN9_FREQ_SCALER;
1018		rps->rp1_freq *= GEN9_FREQ_SCALER;
1019		rps->min_freq *= GEN9_FREQ_SCALER;
1020		rps->max_freq *= GEN9_FREQ_SCALER;
1021		rps->efficient_freq *= GEN9_FREQ_SCALER;
1022	}
1023}
1024
1025static bool rps_reset(struct intel_rps *rps)
1026{
1027	struct drm_i915_private *i915 = rps_to_i915(rps);
1028
1029	/* force a reset */
1030	rps->power.mode = -1;
1031	rps->last_freq = -1;
1032
1033	if (rps_set(rps, rps->min_freq, true)) {
1034		drm_err(&i915->drm, "Failed to reset RPS to initial values\n");
1035		return false;
1036	}
1037
1038	rps->cur_freq = rps->min_freq;
1039	return true;
1040}
1041
1042/* See the Gen9_GT_PM_Programming_Guide doc for the below */
1043static bool gen9_rps_enable(struct intel_rps *rps)
1044{
1045	struct intel_gt *gt = rps_to_gt(rps);
1046	struct intel_uncore *uncore = gt->uncore;
1047
1048	/* Program defaults and thresholds for RPS */
1049	if (GRAPHICS_VER(gt->i915) == 9)
1050		intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ,
1051				      GEN9_FREQUENCY(rps->rp1_freq));
1052
1053	intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 0xa);
1054
1055	rps->pm_events = GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD;
1056
1057	return rps_reset(rps);
1058}
1059
1060static bool gen8_rps_enable(struct intel_rps *rps)
1061{
1062	struct intel_uncore *uncore = rps_to_uncore(rps);
1063
1064	intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ,
1065			      HSW_FREQUENCY(rps->rp1_freq));
1066
1067	intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
1068
1069	rps->pm_events = GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD;
1070
1071	return rps_reset(rps);
1072}
1073
1074static bool gen6_rps_enable(struct intel_rps *rps)
1075{
1076	struct intel_uncore *uncore = rps_to_uncore(rps);
1077
1078	/* Power down if completely idle for over 50ms */
1079	intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 50000);
1080	intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
1081
1082	rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD |
1083			  GEN6_PM_RP_DOWN_THRESHOLD |
1084			  GEN6_PM_RP_DOWN_TIMEOUT);
1085
1086	return rps_reset(rps);
1087}
1088
1089static int chv_rps_max_freq(struct intel_rps *rps)
1090{
1091	struct drm_i915_private *i915 = rps_to_i915(rps);
1092	struct intel_gt *gt = rps_to_gt(rps);
1093	u32 val;
1094
1095	val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE);
1096
1097	switch (gt->info.sseu.eu_total) {
1098	case 8:
1099		/* (2 * 4) config */
1100		val >>= FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT;
1101		break;
1102	case 12:
1103		/* (2 * 6) config */
1104		val >>= FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT;
1105		break;
1106	case 16:
1107		/* (2 * 8) config */
1108	default:
1109		/* Setting (2 * 8) Min RP0 for any other combination */
1110		val >>= FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT;
1111		break;
1112	}
1113
1114	return val & FB_GFX_FREQ_FUSE_MASK;
1115}
1116
1117static int chv_rps_rpe_freq(struct intel_rps *rps)
1118{
1119	struct drm_i915_private *i915 = rps_to_i915(rps);
1120	u32 val;
1121
1122	val = vlv_punit_read(i915, PUNIT_GPU_DUTYCYCLE_REG);
1123	val >>= PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT;
1124
1125	return val & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
1126}
1127
1128static int chv_rps_guar_freq(struct intel_rps *rps)
1129{
1130	struct drm_i915_private *i915 = rps_to_i915(rps);
1131	u32 val;
1132
1133	val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE);
1134
1135	return val & FB_GFX_FREQ_FUSE_MASK;
1136}
1137
1138static u32 chv_rps_min_freq(struct intel_rps *rps)
1139{
1140	struct drm_i915_private *i915 = rps_to_i915(rps);
1141	u32 val;
1142
1143	val = vlv_punit_read(i915, FB_GFX_FMIN_AT_VMIN_FUSE);
1144	val >>= FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT;
1145
1146	return val & FB_GFX_FREQ_FUSE_MASK;
1147}
1148
1149static bool chv_rps_enable(struct intel_rps *rps)
1150{
1151	struct intel_uncore *uncore = rps_to_uncore(rps);
1152	struct drm_i915_private *i915 = rps_to_i915(rps);
1153	u32 val;
1154
1155	/* 1: Program defaults and thresholds for RPS*/
1156	intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000);
1157	intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400);
1158	intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000);
1159	intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000);
1160	intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000);
1161
1162	intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
1163
1164	/* 2: Enable RPS */
1165	intel_uncore_write_fw(uncore, GEN6_RP_CONTROL,
1166			      GEN6_RP_MEDIA_HW_NORMAL_MODE |
1167			      GEN6_RP_MEDIA_IS_GFX |
1168			      GEN6_RP_ENABLE |
1169			      GEN6_RP_UP_BUSY_AVG |
1170			      GEN6_RP_DOWN_IDLE_AVG);
1171
1172	rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD |
1173			  GEN6_PM_RP_DOWN_THRESHOLD |
1174			  GEN6_PM_RP_DOWN_TIMEOUT);
1175
1176	/* Setting Fixed Bias */
1177	vlv_punit_get(i915);
1178
1179	val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | CHV_BIAS_CPU_50_SOC_50;
1180	vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val);
1181
1182	val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
1183
1184	vlv_punit_put(i915);
1185
1186	/* RPS code assumes GPLL is used */
1187	drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0,
1188		      "GPLL not enabled\n");
1189
1190	drm_dbg(&i915->drm, "GPLL enabled? %s\n", yesno(val & GPLLENABLE));
1191	drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val);
1192
1193	return rps_reset(rps);
1194}
1195
1196static int vlv_rps_guar_freq(struct intel_rps *rps)
1197{
1198	struct drm_i915_private *i915 = rps_to_i915(rps);
1199	u32 val, rp1;
1200
1201	val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE);
1202
1203	rp1 = val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK;
1204	rp1 >>= FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
1205
1206	return rp1;
1207}
1208
1209static int vlv_rps_max_freq(struct intel_rps *rps)
1210{
1211	struct drm_i915_private *i915 = rps_to_i915(rps);
1212	u32 val, rp0;
1213
1214	val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE);
1215
1216	rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
1217	/* Clamp to max */
1218	rp0 = min_t(u32, rp0, 0xea);
1219
1220	return rp0;
1221}
1222
1223static int vlv_rps_rpe_freq(struct intel_rps *rps)
1224{
1225	struct drm_i915_private *i915 = rps_to_i915(rps);
1226	u32 val, rpe;
1227
1228	val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
1229	rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
1230	val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
1231	rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
1232
1233	return rpe;
1234}
1235
1236static int vlv_rps_min_freq(struct intel_rps *rps)
1237{
1238	struct drm_i915_private *i915 = rps_to_i915(rps);
1239	u32 val;
1240
1241	val = vlv_punit_read(i915, PUNIT_REG_GPU_LFM) & 0xff;
1242	/*
1243	 * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
1244	 * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
1245	 * a BYT-M B0 the above register contains 0xbf. Moreover when setting
1246	 * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
1247	 * to make sure it matches what Punit accepts.
1248	 */
1249	return max_t(u32, val, 0xc0);
1250}
1251
1252static bool vlv_rps_enable(struct intel_rps *rps)
1253{
1254	struct intel_uncore *uncore = rps_to_uncore(rps);
1255	struct drm_i915_private *i915 = rps_to_i915(rps);
1256	u32 val;
1257
1258	intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000);
1259	intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400);
1260	intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000);
1261	intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000);
1262	intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000);
1263
1264	intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
1265
1266	intel_uncore_write_fw(uncore, GEN6_RP_CONTROL,
1267			      GEN6_RP_MEDIA_TURBO |
1268			      GEN6_RP_MEDIA_HW_NORMAL_MODE |
1269			      GEN6_RP_MEDIA_IS_GFX |
1270			      GEN6_RP_ENABLE |
1271			      GEN6_RP_UP_BUSY_AVG |
1272			      GEN6_RP_DOWN_IDLE_CONT);
1273
1274	/* WaGsvRC0ResidencyMethod:vlv */
1275	rps->pm_events = GEN6_PM_RP_UP_EI_EXPIRED;
1276
1277	vlv_punit_get(i915);
1278
1279	/* Setting Fixed Bias */
1280	val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | VLV_BIAS_CPU_125_SOC_875;
1281	vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val);
1282
1283	val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
1284
1285	vlv_punit_put(i915);
1286
1287	/* RPS code assumes GPLL is used */
1288	drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0,
1289		      "GPLL not enabled\n");
1290
1291	drm_dbg(&i915->drm, "GPLL enabled? %s\n", yesno(val & GPLLENABLE));
1292	drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val);
1293
1294	return rps_reset(rps);
1295}
1296
1297static unsigned long __ips_gfx_val(struct intel_ips *ips)
1298{
1299	struct intel_rps *rps = container_of(ips, typeof(*rps), ips);
1300	struct intel_uncore *uncore = rps_to_uncore(rps);
1301	unsigned int t, state1, state2;
1302	u32 pxvid, ext_v;
1303	u64 corr, corr2;
1304
1305	lockdep_assert_held(&mchdev_lock);
1306
1307	pxvid = intel_uncore_read(uncore, PXVFREQ(rps->cur_freq));
1308	pxvid = (pxvid >> 24) & 0x7f;
1309	ext_v = pvid_to_extvid(rps_to_i915(rps), pxvid);
1310
1311	state1 = ext_v;
1312
1313	/* Revel in the empirically derived constants */
1314
1315	/* Correction factor in 1/100000 units */
1316	t = ips_mch_val(uncore);
1317	if (t > 80)
1318		corr = t * 2349 + 135940;
1319	else if (t >= 50)
1320		corr = t * 964 + 29317;
1321	else /* < 50 */
1322		corr = t * 301 + 1004;
1323
1324	corr = div_u64(corr * 150142 * state1, 10000) - 78642;
1325	corr2 = div_u64(corr, 100000) * ips->corr;
1326
1327	state2 = div_u64(corr2 * state1, 10000);
1328	state2 /= 100; /* convert to mW */
1329
1330	__gen5_ips_update(ips);
1331
1332	return ips->gfx_power + state2;
1333}
1334
1335static bool has_busy_stats(struct intel_rps *rps)
1336{
1337	struct intel_engine_cs *engine;
1338	enum intel_engine_id id;
1339
1340	for_each_engine(engine, rps_to_gt(rps), id) {
1341		if (!intel_engine_supports_stats(engine))
1342			return false;
1343	}
1344
1345	return true;
1346}
1347
1348void intel_rps_enable(struct intel_rps *rps)
1349{
1350	struct drm_i915_private *i915 = rps_to_i915(rps);
1351	struct intel_uncore *uncore = rps_to_uncore(rps);
1352	bool enabled = false;
1353
1354	if (!HAS_RPS(i915))
1355		return;
1356
1357	intel_gt_check_clock_frequency(rps_to_gt(rps));
1358
1359	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
1360	if (rps->max_freq <= rps->min_freq)
1361		/* leave disabled, no room for dynamic reclocking */;
1362	else if (IS_CHERRYVIEW(i915))
1363		enabled = chv_rps_enable(rps);
1364	else if (IS_VALLEYVIEW(i915))
1365		enabled = vlv_rps_enable(rps);
1366	else if (GRAPHICS_VER(i915) >= 9)
1367		enabled = gen9_rps_enable(rps);
1368	else if (GRAPHICS_VER(i915) >= 8)
1369		enabled = gen8_rps_enable(rps);
1370	else if (GRAPHICS_VER(i915) >= 6)
1371		enabled = gen6_rps_enable(rps);
1372	else if (IS_IRONLAKE_M(i915))
1373		enabled = gen5_rps_enable(rps);
1374	else
1375		MISSING_CASE(GRAPHICS_VER(i915));
1376	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
1377	if (!enabled)
1378		return;
1379
1380	GT_TRACE(rps_to_gt(rps),
1381		 "min:%x, max:%x, freq:[%d, %d]\n",
1382		 rps->min_freq, rps->max_freq,
1383		 intel_gpu_freq(rps, rps->min_freq),
1384		 intel_gpu_freq(rps, rps->max_freq));
1385
1386	GEM_BUG_ON(rps->max_freq < rps->min_freq);
1387	GEM_BUG_ON(rps->idle_freq > rps->max_freq);
1388
1389	GEM_BUG_ON(rps->efficient_freq < rps->min_freq);
1390	GEM_BUG_ON(rps->efficient_freq > rps->max_freq);
1391
1392	if (has_busy_stats(rps))
1393		intel_rps_set_timer(rps);
1394	else if (GRAPHICS_VER(i915) >= 6)
1395		intel_rps_set_interrupts(rps);
1396	else
1397		/* Ironlake currently uses intel_ips.ko */ {}
1398
1399	intel_rps_set_enabled(rps);
1400}
1401
1402static void gen6_rps_disable(struct intel_rps *rps)
1403{
1404	set(rps_to_uncore(rps), GEN6_RP_CONTROL, 0);
1405}
1406
1407void intel_rps_disable(struct intel_rps *rps)
1408{
1409	struct drm_i915_private *i915 = rps_to_i915(rps);
1410
1411	intel_rps_clear_enabled(rps);
1412	intel_rps_clear_interrupts(rps);
1413	intel_rps_clear_timer(rps);
1414
1415	if (GRAPHICS_VER(i915) >= 6)
1416		gen6_rps_disable(rps);
1417	else if (IS_IRONLAKE_M(i915))
1418		gen5_rps_disable(rps);
1419}
1420
1421static int byt_gpu_freq(struct intel_rps *rps, int val)
1422{
1423	/*
1424	 * N = val - 0xb7
1425	 * Slow = Fast = GPLL ref * N
1426	 */
1427	return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * (val - 0xb7), 1000);
1428}
1429
1430static int byt_freq_opcode(struct intel_rps *rps, int val)
1431{
1432	return DIV_ROUND_CLOSEST(1000 * val, rps->gpll_ref_freq) + 0xb7;
1433}
1434
1435static int chv_gpu_freq(struct intel_rps *rps, int val)
1436{
1437	/*
1438	 * N = val / 2
1439	 * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
1440	 */
1441	return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * val, 2 * 2 * 1000);
1442}
1443
1444static int chv_freq_opcode(struct intel_rps *rps, int val)
1445{
1446	/* CHV needs even values */
1447	return DIV_ROUND_CLOSEST(2 * 1000 * val, rps->gpll_ref_freq) * 2;
1448}
1449
1450int intel_gpu_freq(struct intel_rps *rps, int val)
1451{
1452	struct drm_i915_private *i915 = rps_to_i915(rps);
1453
1454	if (GRAPHICS_VER(i915) >= 9)
1455		return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
1456					 GEN9_FREQ_SCALER);
1457	else if (IS_CHERRYVIEW(i915))
1458		return chv_gpu_freq(rps, val);
1459	else if (IS_VALLEYVIEW(i915))
1460		return byt_gpu_freq(rps, val);
1461	else if (GRAPHICS_VER(i915) >= 6)
1462		return val * GT_FREQUENCY_MULTIPLIER;
1463	else
1464		return val;
1465}
1466
1467int intel_freq_opcode(struct intel_rps *rps, int val)
1468{
1469	struct drm_i915_private *i915 = rps_to_i915(rps);
1470
1471	if (GRAPHICS_VER(i915) >= 9)
1472		return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
1473					 GT_FREQUENCY_MULTIPLIER);
1474	else if (IS_CHERRYVIEW(i915))
1475		return chv_freq_opcode(rps, val);
1476	else if (IS_VALLEYVIEW(i915))
1477		return byt_freq_opcode(rps, val);
1478	else if (GRAPHICS_VER(i915) >= 6)
1479		return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
1480	else
1481		return val;
1482}
1483
1484static void vlv_init_gpll_ref_freq(struct intel_rps *rps)
1485{
1486	struct drm_i915_private *i915 = rps_to_i915(rps);
1487
1488	rps->gpll_ref_freq =
1489		vlv_get_cck_clock(i915, "GPLL ref",
1490				  CCK_GPLL_CLOCK_CONTROL,
1491				  i915->czclk_freq);
1492
1493	drm_dbg(&i915->drm, "GPLL reference freq: %d kHz\n",
1494		rps->gpll_ref_freq);
1495}
1496
1497static void vlv_rps_init(struct intel_rps *rps)
1498{
1499	struct drm_i915_private *i915 = rps_to_i915(rps);
1500	u32 val;
1501
1502	vlv_iosf_sb_get(i915,
1503			BIT(VLV_IOSF_SB_PUNIT) |
1504			BIT(VLV_IOSF_SB_NC) |
1505			BIT(VLV_IOSF_SB_CCK));
1506
1507	vlv_init_gpll_ref_freq(rps);
1508
1509	val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
1510	switch ((val >> 6) & 3) {
1511	case 0:
1512	case 1:
1513		i915->mem_freq = 800;
1514		break;
1515	case 2:
1516		i915->mem_freq = 1066;
1517		break;
1518	case 3:
1519		i915->mem_freq = 1333;
1520		break;
1521	}
1522	drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq);
1523
1524	rps->max_freq = vlv_rps_max_freq(rps);
1525	rps->rp0_freq = rps->max_freq;
1526	drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n",
1527		intel_gpu_freq(rps, rps->max_freq), rps->max_freq);
1528
1529	rps->efficient_freq = vlv_rps_rpe_freq(rps);
1530	drm_dbg(&i915->drm, "RPe GPU freq: %d MHz (%u)\n",
1531		intel_gpu_freq(rps, rps->efficient_freq), rps->efficient_freq);
1532
1533	rps->rp1_freq = vlv_rps_guar_freq(rps);
1534	drm_dbg(&i915->drm, "RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
1535		intel_gpu_freq(rps, rps->rp1_freq), rps->rp1_freq);
1536
1537	rps->min_freq = vlv_rps_min_freq(rps);
1538	drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n",
1539		intel_gpu_freq(rps, rps->min_freq), rps->min_freq);
1540
1541	vlv_iosf_sb_put(i915,
1542			BIT(VLV_IOSF_SB_PUNIT) |
1543			BIT(VLV_IOSF_SB_NC) |
1544			BIT(VLV_IOSF_SB_CCK));
1545}
1546
1547static void chv_rps_init(struct intel_rps *rps)
1548{
1549	struct drm_i915_private *i915 = rps_to_i915(rps);
1550	u32 val;
1551
1552	vlv_iosf_sb_get(i915,
1553			BIT(VLV_IOSF_SB_PUNIT) |
1554			BIT(VLV_IOSF_SB_NC) |
1555			BIT(VLV_IOSF_SB_CCK));
1556
1557	vlv_init_gpll_ref_freq(rps);
1558
1559	val = vlv_cck_read(i915, CCK_FUSE_REG);
1560
1561	switch ((val >> 2) & 0x7) {
1562	case 3:
1563		i915->mem_freq = 2000;
1564		break;
1565	default:
1566		i915->mem_freq = 1600;
1567		break;
1568	}
1569	drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq);
1570
1571	rps->max_freq = chv_rps_max_freq(rps);
1572	rps->rp0_freq = rps->max_freq;
1573	drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n",
1574		intel_gpu_freq(rps, rps->max_freq), rps->max_freq);
1575
1576	rps->efficient_freq = chv_rps_rpe_freq(rps);
1577	drm_dbg(&i915->drm, "RPe GPU freq: %d MHz (%u)\n",
1578		intel_gpu_freq(rps, rps->efficient_freq), rps->efficient_freq);
1579
1580	rps->rp1_freq = chv_rps_guar_freq(rps);
1581	drm_dbg(&i915->drm, "RP1(Guar) GPU freq: %d MHz (%u)\n",
1582		intel_gpu_freq(rps, rps->rp1_freq), rps->rp1_freq);
1583
1584	rps->min_freq = chv_rps_min_freq(rps);
1585	drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n",
1586		intel_gpu_freq(rps, rps->min_freq), rps->min_freq);
1587
1588	vlv_iosf_sb_put(i915,
1589			BIT(VLV_IOSF_SB_PUNIT) |
1590			BIT(VLV_IOSF_SB_NC) |
1591			BIT(VLV_IOSF_SB_CCK));
1592
1593	drm_WARN_ONCE(&i915->drm, (rps->max_freq | rps->efficient_freq |
1594				   rps->rp1_freq | rps->min_freq) & 1,
1595		      "Odd GPU freq values\n");
1596}
1597
1598static void vlv_c0_read(struct intel_uncore *uncore, struct intel_rps_ei *ei)
1599{
1600	ei->ktime = ktime_get_raw();
1601	ei->render_c0 = intel_uncore_read(uncore, VLV_RENDER_C0_COUNT);
1602	ei->media_c0 = intel_uncore_read(uncore, VLV_MEDIA_C0_COUNT);
1603}
1604
1605static u32 vlv_wa_c0_ei(struct intel_rps *rps, u32 pm_iir)
1606{
1607	struct intel_uncore *uncore = rps_to_uncore(rps);
1608	const struct intel_rps_ei *prev = &rps->ei;
1609	struct intel_rps_ei now;
1610	u32 events = 0;
1611
1612	if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
1613		return 0;
1614
1615	vlv_c0_read(uncore, &now);
1616
1617	if (prev->ktime) {
1618		u64 time, c0;
1619		u32 render, media;
1620
1621		time = ktime_us_delta(now.ktime, prev->ktime);
1622
1623		time *= rps_to_i915(rps)->czclk_freq;
1624
1625		/* Workload can be split between render + media,
1626		 * e.g. SwapBuffers being blitted in X after being rendered in
1627		 * mesa. To account for this we need to combine both engines
1628		 * into our activity counter.
1629		 */
1630		render = now.render_c0 - prev->render_c0;
1631		media = now.media_c0 - prev->media_c0;
1632		c0 = max(render, media);
1633		c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
1634
1635		if (c0 > time * rps->power.up_threshold)
1636			events = GEN6_PM_RP_UP_THRESHOLD;
1637		else if (c0 < time * rps->power.down_threshold)
1638			events = GEN6_PM_RP_DOWN_THRESHOLD;
1639	}
1640
1641	rps->ei = now;
1642	return events;
1643}
1644
1645static void rps_work(struct work_struct *work)
1646{
1647	struct intel_rps *rps = container_of(work, typeof(*rps), work);
1648	struct intel_gt *gt = rps_to_gt(rps);
1649	struct drm_i915_private *i915 = rps_to_i915(rps);
1650	bool client_boost = false;
1651	int new_freq, adj, min, max;
1652	u32 pm_iir = 0;
1653
1654	spin_lock_irq(&gt->irq_lock);
1655	pm_iir = fetch_and_zero(&rps->pm_iir) & rps->pm_events;
1656	client_boost = atomic_read(&rps->num_waiters);
1657	spin_unlock_irq(&gt->irq_lock);
1658
1659	/* Make sure we didn't queue anything we're not going to process. */
1660	if (!pm_iir && !client_boost)
1661		goto out;
1662
1663	mutex_lock(&rps->lock);
1664	if (!intel_rps_is_active(rps)) {
1665		mutex_unlock(&rps->lock);
1666		return;
1667	}
1668
1669	pm_iir |= vlv_wa_c0_ei(rps, pm_iir);
1670
1671	adj = rps->last_adj;
1672	new_freq = rps->cur_freq;
1673	min = rps->min_freq_softlimit;
1674	max = rps->max_freq_softlimit;
1675	if (client_boost)
1676		max = rps->max_freq;
1677
1678	GT_TRACE(gt,
1679		 "pm_iir:%x, client_boost:%s, last:%d, cur:%x, min:%x, max:%x\n",
1680		 pm_iir, yesno(client_boost),
1681		 adj, new_freq, min, max);
1682
1683	if (client_boost && new_freq < rps->boost_freq) {
1684		new_freq = rps->boost_freq;
1685		adj = 0;
1686	} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1687		if (adj > 0)
1688			adj *= 2;
1689		else /* CHV needs even encode values */
1690			adj = IS_CHERRYVIEW(gt->i915) ? 2 : 1;
1691
1692		if (new_freq >= rps->max_freq_softlimit)
1693			adj = 0;
1694	} else if (client_boost) {
1695		adj = 0;
1696	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1697		if (rps->cur_freq > rps->efficient_freq)
1698			new_freq = rps->efficient_freq;
1699		else if (rps->cur_freq > rps->min_freq_softlimit)
1700			new_freq = rps->min_freq_softlimit;
1701		adj = 0;
1702	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1703		if (adj < 0)
1704			adj *= 2;
1705		else /* CHV needs even encode values */
1706			adj = IS_CHERRYVIEW(gt->i915) ? -2 : -1;
1707
1708		if (new_freq <= rps->min_freq_softlimit)
1709			adj = 0;
1710	} else { /* unknown event */
1711		adj = 0;
1712	}
1713
1714	/*
1715	 * sysfs frequency limits may have snuck in while
1716	 * servicing the interrupt
1717	 */
1718	new_freq += adj;
1719	new_freq = clamp_t(int, new_freq, min, max);
1720
1721	if (intel_rps_set(rps, new_freq)) {
1722		drm_dbg(&i915->drm, "Failed to set new GPU frequency\n");
1723		adj = 0;
1724	}
1725	rps->last_adj = adj;
1726
1727	mutex_unlock(&rps->lock);
1728
1729out:
1730	spin_lock_irq(&gt->irq_lock);
1731	gen6_gt_pm_unmask_irq(gt, rps->pm_events);
1732	spin_unlock_irq(&gt->irq_lock);
1733}
1734
1735void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
1736{
1737	struct intel_gt *gt = rps_to_gt(rps);
1738	const u32 events = rps->pm_events & pm_iir;
1739
1740	lockdep_assert_held(&gt->irq_lock);
1741
1742	if (unlikely(!events))
1743		return;
1744
1745	GT_TRACE(gt, "irq events:%x\n", events);
1746
1747	gen6_gt_pm_mask_irq(gt, events);
1748
1749	rps->pm_iir |= events;
1750	schedule_work(&rps->work);
1751}
1752
1753void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
1754{
1755	struct intel_gt *gt = rps_to_gt(rps);
1756	u32 events;
1757
1758	events = pm_iir & rps->pm_events;
1759	if (events) {
1760		spin_lock(&gt->irq_lock);
1761
1762		GT_TRACE(gt, "irq events:%x\n", events);
1763
1764		gen6_gt_pm_mask_irq(gt, events);
1765		rps->pm_iir |= events;
1766
1767		schedule_work(&rps->work);
1768		spin_unlock(&gt->irq_lock);
1769	}
1770
1771	if (GRAPHICS_VER(gt->i915) >= 8)
1772		return;
1773
1774	if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1775		intel_engine_cs_irq(gt->engine[VECS0], pm_iir >> 10);
1776
1777	if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1778		DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1779}
1780
1781void gen5_rps_irq_handler(struct intel_rps *rps)
1782{
1783	struct intel_uncore *uncore = rps_to_uncore(rps);
1784	u32 busy_up, busy_down, max_avg, min_avg;
1785	u8 new_freq;
1786
1787	spin_lock(&mchdev_lock);
1788
1789	intel_uncore_write16(uncore,
1790			     MEMINTRSTS,
1791			     intel_uncore_read(uncore, MEMINTRSTS));
1792
1793	intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
1794	busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG);
1795	busy_down = intel_uncore_read(uncore, RCPREVBSYTDNAVG);
1796	max_avg = intel_uncore_read(uncore, RCBMAXAVG);
1797	min_avg = intel_uncore_read(uncore, RCBMINAVG);
1798
1799	/* Handle RCS change request from hw */
1800	new_freq = rps->cur_freq;
1801	if (busy_up > max_avg)
1802		new_freq++;
1803	else if (busy_down < min_avg)
1804		new_freq--;
1805	new_freq = clamp(new_freq,
1806			 rps->min_freq_softlimit,
1807			 rps->max_freq_softlimit);
1808
1809	if (new_freq != rps->cur_freq && !__gen5_rps_set(rps, new_freq))
1810		rps->cur_freq = new_freq;
1811
1812	spin_unlock(&mchdev_lock);
1813}
1814
1815void intel_rps_init_early(struct intel_rps *rps)
1816{
1817	mutex_init(&rps->lock);
1818	mutex_init(&rps->power.mutex);
1819
1820	INIT_WORK(&rps->work, rps_work);
1821	timer_setup(&rps->timer, rps_timer, 0);
1822
1823	atomic_set(&rps->num_waiters, 0);
1824}
1825
1826void intel_rps_init(struct intel_rps *rps)
1827{
1828	struct drm_i915_private *i915 = rps_to_i915(rps);
1829
1830	if (IS_CHERRYVIEW(i915))
1831		chv_rps_init(rps);
1832	else if (IS_VALLEYVIEW(i915))
1833		vlv_rps_init(rps);
1834	else if (GRAPHICS_VER(i915) >= 6)
1835		gen6_rps_init(rps);
1836	else if (IS_IRONLAKE_M(i915))
1837		gen5_rps_init(rps);
1838
1839	/* Derive initial user preferences/limits from the hardware limits */
1840	rps->max_freq_softlimit = rps->max_freq;
1841	rps->min_freq_softlimit = rps->min_freq;
1842
1843	/* After setting max-softlimit, find the overclock max freq */
1844	if (GRAPHICS_VER(i915) == 6 || IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) {
1845		u32 params = 0;
1846
1847		sandybridge_pcode_read(i915, GEN6_READ_OC_PARAMS,
1848				       &params, NULL);
1849		if (params & BIT(31)) { /* OC supported */
1850			drm_dbg(&i915->drm,
1851				"Overclocking supported, max: %dMHz, overclock: %dMHz\n",
1852				(rps->max_freq & 0xff) * 50,
1853				(params & 0xff) * 50);
1854			rps->max_freq = params & 0xff;
1855		}
1856	}
1857
1858	/* Finally allow us to boost to max by default */
1859	rps->boost_freq = rps->max_freq;
1860	rps->idle_freq = rps->min_freq;
1861
1862	/* Start in the middle, from here we will autotune based on workload */
1863	rps->cur_freq = rps->efficient_freq;
1864
1865	rps->pm_intrmsk_mbz = 0;
1866
1867	/*
1868	 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
1869	 * if GEN6_PM_UP_EI_EXPIRED is masked.
1870	 *
1871	 * TODO: verify if this can be reproduced on VLV,CHV.
1872	 */
1873	if (GRAPHICS_VER(i915) <= 7)
1874		rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
1875
1876	if (GRAPHICS_VER(i915) >= 8 && GRAPHICS_VER(i915) < 11)
1877		rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
1878}
1879
1880void intel_rps_sanitize(struct intel_rps *rps)
1881{
1882	if (GRAPHICS_VER(rps_to_i915(rps)) >= 6)
1883		rps_disable_interrupts(rps);
1884}
1885
1886u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat)
1887{
1888	struct drm_i915_private *i915 = rps_to_i915(rps);
1889	u32 cagf;
1890
1891	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
1892		cagf = (rpstat >> 8) & 0xff;
1893	else if (GRAPHICS_VER(i915) >= 9)
1894		cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
1895	else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
1896		cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
1897	else if (GRAPHICS_VER(i915) >= 6)
1898		cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
1899	else
1900		cagf = gen5_invert_freq(rps, (rpstat & MEMSTAT_PSTATE_MASK) >>
1901					MEMSTAT_PSTATE_SHIFT);
1902
1903	return cagf;
1904}
1905
1906static u32 read_cagf(struct intel_rps *rps)
1907{
1908	struct drm_i915_private *i915 = rps_to_i915(rps);
1909	struct intel_uncore *uncore = rps_to_uncore(rps);
1910	u32 freq;
1911
1912	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
1913		vlv_punit_get(i915);
1914		freq = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
1915		vlv_punit_put(i915);
1916	} else if (GRAPHICS_VER(i915) >= 6) {
1917		freq = intel_uncore_read(uncore, GEN6_RPSTAT1);
1918	} else {
1919		freq = intel_uncore_read(uncore, MEMSTAT_ILK);
1920	}
1921
1922	return intel_rps_get_cagf(rps, freq);
1923}
1924
1925u32 intel_rps_read_actual_frequency(struct intel_rps *rps)
1926{
1927	struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm;
1928	intel_wakeref_t wakeref;
1929	u32 freq = 0;
1930
1931	with_intel_runtime_pm_if_in_use(rpm, wakeref)
1932		freq = intel_gpu_freq(rps, read_cagf(rps));
1933
1934	return freq;
1935}
1936
1937/* External interface for intel_ips.ko */
1938
1939static struct drm_i915_private __rcu *ips_mchdev;
1940
1941/**
1942 * Tells the intel_ips driver that the i915 driver is now loaded, if
1943 * IPS got loaded first.
1944 *
1945 * This awkward dance is so that neither module has to depend on the
1946 * other in order for IPS to do the appropriate communication of
1947 * GPU turbo limits to i915.
1948 */
1949static void
1950ips_ping_for_i915_load(void)
1951{
1952	void (*link)(void);
1953
1954	link = symbol_get(ips_link_to_i915_driver);
1955	if (link) {
1956		link();
1957		symbol_put(ips_link_to_i915_driver);
1958	}
1959}
1960
1961void intel_rps_driver_register(struct intel_rps *rps)
1962{
1963	struct intel_gt *gt = rps_to_gt(rps);
1964
1965	/*
1966	 * We only register the i915 ips part with intel-ips once everything is
1967	 * set up, to avoid intel-ips sneaking in and reading bogus values.
1968	 */
1969	if (GRAPHICS_VER(gt->i915) == 5) {
1970		GEM_BUG_ON(ips_mchdev);
1971		rcu_assign_pointer(ips_mchdev, gt->i915);
1972		ips_ping_for_i915_load();
1973	}
1974}
1975
1976void intel_rps_driver_unregister(struct intel_rps *rps)
1977{
1978	if (rcu_access_pointer(ips_mchdev) == rps_to_i915(rps))
1979		rcu_assign_pointer(ips_mchdev, NULL);
1980}
1981
1982static struct drm_i915_private *mchdev_get(void)
1983{
1984	struct drm_i915_private *i915;
1985
1986	rcu_read_lock();
1987	i915 = rcu_dereference(ips_mchdev);
1988	if (i915 && !kref_get_unless_zero(&i915->drm.ref))
1989		i915 = NULL;
1990	rcu_read_unlock();
1991
1992	return i915;
1993}
1994
1995/**
1996 * i915_read_mch_val - return value for IPS use
1997 *
1998 * Calculate and return a value for the IPS driver to use when deciding whether
1999 * we have thermal and power headroom to increase CPU or GPU power budget.
2000 */
2001unsigned long i915_read_mch_val(void)
2002{
2003	struct drm_i915_private *i915;
2004	unsigned long chipset_val = 0;
2005	unsigned long graphics_val = 0;
2006	intel_wakeref_t wakeref;
2007
2008	i915 = mchdev_get();
2009	if (!i915)
2010		return 0;
2011
2012	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
2013		struct intel_ips *ips = &i915->gt.rps.ips;
2014
2015		spin_lock_irq(&mchdev_lock);
2016		chipset_val = __ips_chipset_val(ips);
2017		graphics_val = __ips_gfx_val(ips);
2018		spin_unlock_irq(&mchdev_lock);
2019	}
2020
2021	drm_dev_put(&i915->drm);
2022	return chipset_val + graphics_val;
2023}
2024EXPORT_SYMBOL_GPL(i915_read_mch_val);
2025
2026/**
2027 * i915_gpu_raise - raise GPU frequency limit
2028 *
2029 * Raise the limit; IPS indicates we have thermal headroom.
2030 */
2031bool i915_gpu_raise(void)
2032{
2033	struct drm_i915_private *i915;
2034	struct intel_rps *rps;
2035
2036	i915 = mchdev_get();
2037	if (!i915)
2038		return false;
2039
2040	rps = &i915->gt.rps;
2041
2042	spin_lock_irq(&mchdev_lock);
2043	if (rps->max_freq_softlimit < rps->max_freq)
2044		rps->max_freq_softlimit++;
2045	spin_unlock_irq(&mchdev_lock);
2046
2047	drm_dev_put(&i915->drm);
2048	return true;
2049}
2050EXPORT_SYMBOL_GPL(i915_gpu_raise);
2051
2052/**
2053 * i915_gpu_lower - lower GPU frequency limit
2054 *
2055 * IPS indicates we're close to a thermal limit, so throttle back the GPU
2056 * frequency maximum.
2057 */
2058bool i915_gpu_lower(void)
2059{
2060	struct drm_i915_private *i915;
2061	struct intel_rps *rps;
2062
2063	i915 = mchdev_get();
2064	if (!i915)
2065		return false;
2066
2067	rps = &i915->gt.rps;
2068
2069	spin_lock_irq(&mchdev_lock);
2070	if (rps->max_freq_softlimit > rps->min_freq)
2071		rps->max_freq_softlimit--;
2072	spin_unlock_irq(&mchdev_lock);
2073
2074	drm_dev_put(&i915->drm);
2075	return true;
2076}
2077EXPORT_SYMBOL_GPL(i915_gpu_lower);
2078
2079/**
2080 * i915_gpu_busy - indicate GPU business to IPS
2081 *
2082 * Tell the IPS driver whether or not the GPU is busy.
2083 */
2084bool i915_gpu_busy(void)
2085{
2086	struct drm_i915_private *i915;
2087	bool ret;
2088
2089	i915 = mchdev_get();
2090	if (!i915)
2091		return false;
2092
2093	ret = i915->gt.awake;
2094
2095	drm_dev_put(&i915->drm);
2096	return ret;
2097}
2098EXPORT_SYMBOL_GPL(i915_gpu_busy);
2099
2100/**
2101 * i915_gpu_turbo_disable - disable graphics turbo
2102 *
2103 * Disable graphics turbo by resetting the max frequency and setting the
2104 * current frequency to the default.
2105 */
2106bool i915_gpu_turbo_disable(void)
2107{
2108	struct drm_i915_private *i915;
2109	struct intel_rps *rps;
2110	bool ret;
2111
2112	i915 = mchdev_get();
2113	if (!i915)
2114		return false;
2115
2116	rps = &i915->gt.rps;
2117
2118	spin_lock_irq(&mchdev_lock);
2119	rps->max_freq_softlimit = rps->min_freq;
2120	ret = !__gen5_rps_set(&i915->gt.rps, rps->min_freq);
2121	spin_unlock_irq(&mchdev_lock);
2122
2123	drm_dev_put(&i915->drm);
2124	return ret;
2125}
2126EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
2127
2128#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2129#include "selftest_rps.c"
2130#endif