Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright © 2022 Intel Corporation
  4 */
  5
  6#include <linux/hwmon.h>
  7#include <linux/hwmon-sysfs.h>
  8#include <linux/jiffies.h>
  9#include <linux/types.h>
 10#include <linux/units.h>
 11
 12#include "i915_drv.h"
 13#include "i915_hwmon.h"
 14#include "i915_reg.h"
 15#include "intel_mchbar_regs.h"
 16#include "intel_pcode.h"
 17#include "gt/intel_gt.h"
 18#include "gt/intel_gt_regs.h"
 19
 20/*
 21 * SF_* - scale factors for particular quantities according to hwmon spec.
 22 * - voltage  - millivolts
 23 * - power  - microwatts
 24 * - curr   - milliamperes
 25 * - energy - microjoules
 26 * - time   - milliseconds
 27 */
 28#define SF_VOLTAGE	1000
 29#define SF_POWER	1000000
 30#define SF_CURR		1000
 31#define SF_ENERGY	1000000
 32#define SF_TIME		1000
 33
 34struct hwm_reg {
 35	i915_reg_t gt_perf_status;
 36	i915_reg_t pkg_temp;
 37	i915_reg_t pkg_power_sku_unit;
 38	i915_reg_t pkg_power_sku;
 39	i915_reg_t pkg_rapl_limit;
 40	i915_reg_t energy_status_all;
 41	i915_reg_t energy_status_tile;
 42	i915_reg_t fan_speed;
 43};
 44
 45struct hwm_energy_info {
 46	u32 reg_val_prev;
 47	long accum_energy;			/* Accumulated energy for energy1_input */
 48};
 49
 50struct hwm_fan_info {
 51	u32 reg_val_prev;
 52	u64 time_prev;
 53};
 54
 55struct hwm_drvdata {
 56	struct i915_hwmon *hwmon;
 57	struct intel_uncore *uncore;
 58	struct device *hwmon_dev;
 59	struct hwm_energy_info ei;		/*  Energy info for energy1_input */
 60	struct hwm_fan_info fi;			/*  Fan info for fan1_input */
 61	char name[12];
 62	int gt_n;
 63	bool reset_in_progress;
 64	wait_queue_head_t waitq;
 65};
 66
 67struct i915_hwmon {
 68	struct hwm_drvdata ddat;
 69	struct hwm_drvdata ddat_gt[I915_MAX_GT];
 70	struct mutex hwmon_lock;		/* counter overflow logic and rmw */
 71	struct hwm_reg rg;
 72	int scl_shift_power;
 73	int scl_shift_energy;
 74	int scl_shift_time;
 75};
 76
 77static void
 78hwm_locked_with_pm_intel_uncore_rmw(struct hwm_drvdata *ddat,
 79				    i915_reg_t reg, u32 clear, u32 set)
 80{
 81	struct i915_hwmon *hwmon = ddat->hwmon;
 82	struct intel_uncore *uncore = ddat->uncore;
 83	intel_wakeref_t wakeref;
 84
 85	with_intel_runtime_pm(uncore->rpm, wakeref) {
 86		mutex_lock(&hwmon->hwmon_lock);
 87
 88		intel_uncore_rmw(uncore, reg, clear, set);
 89
 90		mutex_unlock(&hwmon->hwmon_lock);
 91	}
 92}
 93
 94/*
 95 * This function's return type of u64 allows for the case where the scaling
 96 * of the field taken from the 32-bit register value might cause a result to
 97 * exceed 32 bits.
 98 */
 99static u64
100hwm_field_read_and_scale(struct hwm_drvdata *ddat, i915_reg_t rgadr,
101			 u32 field_msk, int nshift, u32 scale_factor)
102{
103	struct intel_uncore *uncore = ddat->uncore;
104	intel_wakeref_t wakeref;
105	u32 reg_value;
106
107	with_intel_runtime_pm(uncore->rpm, wakeref)
108		reg_value = intel_uncore_read(uncore, rgadr);
109
110	reg_value = REG_FIELD_GET(field_msk, reg_value);
111
112	return mul_u64_u32_shr(reg_value, scale_factor, nshift);
113}
114
115/*
116 * hwm_energy - Obtain energy value
117 *
118 * The underlying energy hardware register is 32-bits and is subject to
119 * overflow. How long before overflow? For example, with an example
120 * scaling bit shift of 14 bits (see register *PACKAGE_POWER_SKU_UNIT) and
121 * a power draw of 1000 watts, the 32-bit counter will overflow in
122 * approximately 4.36 minutes.
123 *
124 * Examples:
125 *    1 watt:  (2^32 >> 14) /    1 W / (60 * 60 * 24) secs/day -> 3 days
126 * 1000 watts: (2^32 >> 14) / 1000 W / 60             secs/min -> 4.36 minutes
127 *
128 * The function significantly increases overflow duration (from 4.36
129 * minutes) by accumulating the energy register into a 'long' as allowed by
130 * the hwmon API. Using x86_64 128 bit arithmetic (see mul_u64_u32_shr()),
131 * a 'long' of 63 bits, SF_ENERGY of 1e6 (~20 bits) and
132 * hwmon->scl_shift_energy of 14 bits we have 57 (63 - 20 + 14) bits before
133 * energy1_input overflows. This at 1000 W is an overflow duration of 278 years.
134 */
135static void
136hwm_energy(struct hwm_drvdata *ddat, long *energy)
137{
138	struct intel_uncore *uncore = ddat->uncore;
139	struct i915_hwmon *hwmon = ddat->hwmon;
140	struct hwm_energy_info *ei = &ddat->ei;
141	intel_wakeref_t wakeref;
142	i915_reg_t rgaddr;
143	u32 reg_val;
144
145	if (ddat->gt_n >= 0)
146		rgaddr = hwmon->rg.energy_status_tile;
147	else
148		rgaddr = hwmon->rg.energy_status_all;
149
150	with_intel_runtime_pm(uncore->rpm, wakeref) {
151		mutex_lock(&hwmon->hwmon_lock);
152
153		reg_val = intel_uncore_read(uncore, rgaddr);
154
155		if (reg_val >= ei->reg_val_prev)
156			ei->accum_energy += reg_val - ei->reg_val_prev;
157		else
158			ei->accum_energy += UINT_MAX - ei->reg_val_prev + reg_val;
159		ei->reg_val_prev = reg_val;
160
161		*energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
162					  hwmon->scl_shift_energy);
163		mutex_unlock(&hwmon->hwmon_lock);
164	}
165}
166
167static ssize_t
168hwm_power1_max_interval_show(struct device *dev, struct device_attribute *attr,
169			     char *buf)
170{
171	struct hwm_drvdata *ddat = dev_get_drvdata(dev);
172	struct i915_hwmon *hwmon = ddat->hwmon;
173	intel_wakeref_t wakeref;
174	u32 r, x, y, x_w = 2; /* 2 bits */
175	u64 tau4, out;
176
177	with_intel_runtime_pm(ddat->uncore->rpm, wakeref)
178		r = intel_uncore_read(ddat->uncore, hwmon->rg.pkg_rapl_limit);
179
180	x = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_X, r);
181	y = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_Y, r);
182	/*
183	 * tau = 1.x * power(2,y), x = bits(23:22), y = bits(21:17)
184	 *     = (4 | x) << (y - 2)
185	 * where (y - 2) ensures a 1.x fixed point representation of 1.x
186	 * However because y can be < 2, we compute
187	 *     tau4 = (4 | x) << y
188	 * but add 2 when doing the final right shift to account for units
189	 */
190	tau4 = (u64)((1 << x_w) | x) << y;
191	/* val in hwmon interface units (millisec) */
192	out = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
193
194	return sysfs_emit(buf, "%llu\n", out);
195}
196
197static ssize_t
198hwm_power1_max_interval_store(struct device *dev,
199			      struct device_attribute *attr,
200			      const char *buf, size_t count)
201{
202	struct hwm_drvdata *ddat = dev_get_drvdata(dev);
203	struct i915_hwmon *hwmon = ddat->hwmon;
204	u32 x, y, rxy, x_w = 2; /* 2 bits */
205	u64 tau4, r, max_win;
206	unsigned long val;
207	int ret;
208
209	ret = kstrtoul(buf, 0, &val);
210	if (ret)
211		return ret;
212
213	/*
214	 * Max HW supported tau in '1.x * power(2,y)' format, x = 0, y = 0x12
215	 * The hwmon->scl_shift_time default of 0xa results in a max tau of 256 seconds
216	 */
217#define PKG_MAX_WIN_DEFAULT 0x12ull
218
219	/*
220	 * val must be < max in hwmon interface units. The steps below are
221	 * explained in i915_power1_max_interval_show()
222	 */
223	r = FIELD_PREP(PKG_MAX_WIN, PKG_MAX_WIN_DEFAULT);
224	x = REG_FIELD_GET(PKG_MAX_WIN_X, r);
225	y = REG_FIELD_GET(PKG_MAX_WIN_Y, r);
226	tau4 = (u64)((1 << x_w) | x) << y;
227	max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
228
229	if (val > max_win)
230		return -EINVAL;
231
232	/* val in hw units */
233	val = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_time, SF_TIME);
234	/* Convert to 1.x * power(2,y) */
235	if (!val) {
236		/* Avoid ilog2(0) */
237		y = 0;
238		x = 0;
239	} else {
240		y = ilog2(val);
241		/* x = (val - (1 << y)) >> (y - 2); */
242		x = (val - (1ul << y)) << x_w >> y;
243	}
244
245	rxy = REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_X, x) | REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_Y, y);
246
247	hwm_locked_with_pm_intel_uncore_rmw(ddat, hwmon->rg.pkg_rapl_limit,
248					    PKG_PWR_LIM_1_TIME, rxy);
249	return count;
250}
251
252static SENSOR_DEVICE_ATTR(power1_max_interval, 0664,
253			  hwm_power1_max_interval_show,
254			  hwm_power1_max_interval_store, 0);
255
256static struct attribute *hwm_attributes[] = {
257	&sensor_dev_attr_power1_max_interval.dev_attr.attr,
258	NULL
259};
260
261static umode_t hwm_attributes_visible(struct kobject *kobj,
262				      struct attribute *attr, int index)
263{
264	struct device *dev = kobj_to_dev(kobj);
265	struct hwm_drvdata *ddat = dev_get_drvdata(dev);
266	struct i915_hwmon *hwmon = ddat->hwmon;
267
268	if (attr == &sensor_dev_attr_power1_max_interval.dev_attr.attr)
269		return i915_mmio_reg_valid(hwmon->rg.pkg_rapl_limit) ? attr->mode : 0;
270
271	return 0;
272}
273
274static const struct attribute_group hwm_attrgroup = {
275	.attrs = hwm_attributes,
276	.is_visible = hwm_attributes_visible,
277};
278
279static const struct attribute_group *hwm_groups[] = {
280	&hwm_attrgroup,
281	NULL
282};
283
284static const struct hwmon_channel_info * const hwm_info[] = {
285	HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
286	HWMON_CHANNEL_INFO(in, HWMON_I_INPUT),
287	HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_CRIT),
288	HWMON_CHANNEL_INFO(energy, HWMON_E_INPUT),
289	HWMON_CHANNEL_INFO(curr, HWMON_C_CRIT),
290	HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT),
291	NULL
292};
293
294static const struct hwmon_channel_info * const hwm_gt_info[] = {
295	HWMON_CHANNEL_INFO(energy, HWMON_E_INPUT),
296	NULL
297};
298
299/* I1 is exposed as power_crit or as curr_crit depending on bit 31 */
300static int hwm_pcode_read_i1(struct drm_i915_private *i915, u32 *uval)
301{
302	/* Avoid ILLEGAL_SUBCOMMAND "mailbox access failed" warning in snb_pcode_read */
303	if (IS_DG1(i915) || IS_DG2(i915))
304		return -ENXIO;
305
306	return snb_pcode_read_p(&i915->uncore, PCODE_POWER_SETUP,
307				POWER_SETUP_SUBCOMMAND_READ_I1, 0, uval);
308}
309
310static int hwm_pcode_write_i1(struct drm_i915_private *i915, u32 uval)
311{
312	return  snb_pcode_write_p(&i915->uncore, PCODE_POWER_SETUP,
313				  POWER_SETUP_SUBCOMMAND_WRITE_I1, 0, uval);
314}
315
316static umode_t
317hwm_temp_is_visible(const struct hwm_drvdata *ddat, u32 attr)
318{
319	struct i915_hwmon *hwmon = ddat->hwmon;
320
321	if (attr == hwmon_temp_input && i915_mmio_reg_valid(hwmon->rg.pkg_temp))
322		return 0444;
323
324	return 0;
325}
326
327static int
328hwm_temp_read(struct hwm_drvdata *ddat, u32 attr, long *val)
329{
330	struct i915_hwmon *hwmon = ddat->hwmon;
331	intel_wakeref_t wakeref;
332	u32 reg_val;
333
334	switch (attr) {
335	case hwmon_temp_input:
336		with_intel_runtime_pm(ddat->uncore->rpm, wakeref)
337			reg_val = intel_uncore_read(ddat->uncore, hwmon->rg.pkg_temp);
338
339		/* HW register value is in degrees Celsius, convert to millidegrees. */
340		*val = REG_FIELD_GET(TEMP_MASK, reg_val) * MILLIDEGREE_PER_DEGREE;
341		return 0;
342	default:
343		return -EOPNOTSUPP;
344	}
345}
346
347static umode_t
348hwm_in_is_visible(const struct hwm_drvdata *ddat, u32 attr)
349{
350	struct drm_i915_private *i915 = ddat->uncore->i915;
351
352	switch (attr) {
353	case hwmon_in_input:
354		return IS_DG1(i915) || IS_DG2(i915) ? 0444 : 0;
355	default:
356		return 0;
357	}
358}
359
360static int
361hwm_in_read(struct hwm_drvdata *ddat, u32 attr, long *val)
362{
363	struct i915_hwmon *hwmon = ddat->hwmon;
364	intel_wakeref_t wakeref;
365	u32 reg_value;
366
367	switch (attr) {
368	case hwmon_in_input:
369		with_intel_runtime_pm(ddat->uncore->rpm, wakeref)
370			reg_value = intel_uncore_read(ddat->uncore, hwmon->rg.gt_perf_status);
371		/* HW register value in units of 2.5 millivolt */
372		*val = DIV_ROUND_CLOSEST(REG_FIELD_GET(GEN12_VOLTAGE_MASK, reg_value) * 25, 10);
373		return 0;
374	default:
375		return -EOPNOTSUPP;
376	}
377}
378
379static umode_t
380hwm_power_is_visible(const struct hwm_drvdata *ddat, u32 attr, int chan)
381{
382	struct drm_i915_private *i915 = ddat->uncore->i915;
383	struct i915_hwmon *hwmon = ddat->hwmon;
384	u32 uval;
385
386	switch (attr) {
387	case hwmon_power_max:
388		return i915_mmio_reg_valid(hwmon->rg.pkg_rapl_limit) ? 0664 : 0;
389	case hwmon_power_rated_max:
390		return i915_mmio_reg_valid(hwmon->rg.pkg_power_sku) ? 0444 : 0;
391	case hwmon_power_crit:
392		return (hwm_pcode_read_i1(i915, &uval) ||
393			!(uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644;
394	default:
395		return 0;
396	}
397}
398
399#define PL1_DISABLE 0
400
401/*
402 * HW allows arbitrary PL1 limits to be set but silently clamps these values to
403 * "typical but not guaranteed" min/max values in rg.pkg_power_sku. Follow the
404 * same pattern for sysfs, allow arbitrary PL1 limits to be set but display
405 * clamped values when read. Write/read I1 also follows the same pattern.
406 */
407static int
408hwm_power_max_read(struct hwm_drvdata *ddat, long *val)
409{
410	struct i915_hwmon *hwmon = ddat->hwmon;
411	intel_wakeref_t wakeref;
412	u64 r, min, max;
413
414	/* Check if PL1 limit is disabled */
415	with_intel_runtime_pm(ddat->uncore->rpm, wakeref)
416		r = intel_uncore_read(ddat->uncore, hwmon->rg.pkg_rapl_limit);
417	if (!(r & PKG_PWR_LIM_1_EN)) {
418		*val = PL1_DISABLE;
419		return 0;
420	}
421
422	*val = hwm_field_read_and_scale(ddat,
423					hwmon->rg.pkg_rapl_limit,
424					PKG_PWR_LIM_1,
425					hwmon->scl_shift_power,
426					SF_POWER);
427
428	with_intel_runtime_pm(ddat->uncore->rpm, wakeref)
429		r = intel_uncore_read64(ddat->uncore, hwmon->rg.pkg_power_sku);
430	min = REG_FIELD_GET(PKG_MIN_PWR, r);
431	min = mul_u64_u32_shr(min, SF_POWER, hwmon->scl_shift_power);
432	max = REG_FIELD_GET(PKG_MAX_PWR, r);
433	max = mul_u64_u32_shr(max, SF_POWER, hwmon->scl_shift_power);
434
435	if (min && max)
436		*val = clamp_t(u64, *val, min, max);
437
438	return 0;
439}
440
441static int
442hwm_power_max_write(struct hwm_drvdata *ddat, long val)
443{
444	struct i915_hwmon *hwmon = ddat->hwmon;
445	intel_wakeref_t wakeref;
446	DEFINE_WAIT(wait);
447	int ret = 0;
448	u32 nval;
449
450	/* Block waiting for GuC reset to complete when needed */
451	for (;;) {
452		wakeref = intel_runtime_pm_get(ddat->uncore->rpm);
453		mutex_lock(&hwmon->hwmon_lock);
454
455		prepare_to_wait(&ddat->waitq, &wait, TASK_INTERRUPTIBLE);
456
457		if (!hwmon->ddat.reset_in_progress)
458			break;
459
460		if (signal_pending(current)) {
461			ret = -EINTR;
462			break;
463		}
464
465		mutex_unlock(&hwmon->hwmon_lock);
466		intel_runtime_pm_put(ddat->uncore->rpm, wakeref);
467
468		schedule();
469	}
470	finish_wait(&ddat->waitq, &wait);
471	if (ret)
472		goto exit;
473
474	/* Disable PL1 limit and verify, because the limit cannot be disabled on all platforms */
475	if (val == PL1_DISABLE) {
476		intel_uncore_rmw(ddat->uncore, hwmon->rg.pkg_rapl_limit,
477				 PKG_PWR_LIM_1_EN, 0);
478		nval = intel_uncore_read(ddat->uncore, hwmon->rg.pkg_rapl_limit);
479
480		if (nval & PKG_PWR_LIM_1_EN)
481			ret = -ENODEV;
482		goto exit;
483	}
484
485	/* Computation in 64-bits to avoid overflow. Round to nearest. */
486	nval = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_power, SF_POWER);
487	nval = PKG_PWR_LIM_1_EN | REG_FIELD_PREP(PKG_PWR_LIM_1, nval);
488
489	intel_uncore_rmw(ddat->uncore, hwmon->rg.pkg_rapl_limit,
490			 PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, nval);
491exit:
492	mutex_unlock(&hwmon->hwmon_lock);
493	intel_runtime_pm_put(ddat->uncore->rpm, wakeref);
494	return ret;
495}
496
497static int
498hwm_power_read(struct hwm_drvdata *ddat, u32 attr, int chan, long *val)
499{
500	struct i915_hwmon *hwmon = ddat->hwmon;
501	int ret;
502	u32 uval;
503
504	switch (attr) {
505	case hwmon_power_max:
506		return hwm_power_max_read(ddat, val);
507	case hwmon_power_rated_max:
508		*val = hwm_field_read_and_scale(ddat,
509						hwmon->rg.pkg_power_sku,
510						PKG_PKG_TDP,
511						hwmon->scl_shift_power,
512						SF_POWER);
513		return 0;
514	case hwmon_power_crit:
515		ret = hwm_pcode_read_i1(ddat->uncore->i915, &uval);
516		if (ret)
517			return ret;
518		if (!(uval & POWER_SETUP_I1_WATTS))
519			return -ENODEV;
520		*val = mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval),
521				       SF_POWER, POWER_SETUP_I1_SHIFT);
522		return 0;
523	default:
524		return -EOPNOTSUPP;
525	}
526}
527
528static int
529hwm_power_write(struct hwm_drvdata *ddat, u32 attr, int chan, long val)
530{
531	u32 uval;
532
533	switch (attr) {
534	case hwmon_power_max:
535		return hwm_power_max_write(ddat, val);
536	case hwmon_power_crit:
537		uval = DIV_ROUND_CLOSEST_ULL(val << POWER_SETUP_I1_SHIFT, SF_POWER);
538		return hwm_pcode_write_i1(ddat->uncore->i915, uval);
539	default:
540		return -EOPNOTSUPP;
541	}
542}
543
544void i915_hwmon_power_max_disable(struct drm_i915_private *i915, bool *old)
545{
546	struct i915_hwmon *hwmon = i915->hwmon;
547	u32 r;
548
549	if (!hwmon || !i915_mmio_reg_valid(hwmon->rg.pkg_rapl_limit))
550		return;
551
552	mutex_lock(&hwmon->hwmon_lock);
553
554	hwmon->ddat.reset_in_progress = true;
555	r = intel_uncore_rmw(hwmon->ddat.uncore, hwmon->rg.pkg_rapl_limit,
556			     PKG_PWR_LIM_1_EN, 0);
557	*old = !!(r & PKG_PWR_LIM_1_EN);
558
559	mutex_unlock(&hwmon->hwmon_lock);
560}
561
562void i915_hwmon_power_max_restore(struct drm_i915_private *i915, bool old)
563{
564	struct i915_hwmon *hwmon = i915->hwmon;
565
566	if (!hwmon || !i915_mmio_reg_valid(hwmon->rg.pkg_rapl_limit))
567		return;
568
569	mutex_lock(&hwmon->hwmon_lock);
570
571	intel_uncore_rmw(hwmon->ddat.uncore, hwmon->rg.pkg_rapl_limit,
572			 PKG_PWR_LIM_1_EN, old ? PKG_PWR_LIM_1_EN : 0);
573	hwmon->ddat.reset_in_progress = false;
574	wake_up_all(&hwmon->ddat.waitq);
575
576	mutex_unlock(&hwmon->hwmon_lock);
577}
578
579static umode_t
580hwm_energy_is_visible(const struct hwm_drvdata *ddat, u32 attr)
581{
582	struct i915_hwmon *hwmon = ddat->hwmon;
583	i915_reg_t rgaddr;
584
585	switch (attr) {
586	case hwmon_energy_input:
587		if (ddat->gt_n >= 0)
588			rgaddr = hwmon->rg.energy_status_tile;
589		else
590			rgaddr = hwmon->rg.energy_status_all;
591		return i915_mmio_reg_valid(rgaddr) ? 0444 : 0;
592	default:
593		return 0;
594	}
595}
596
597static int
598hwm_energy_read(struct hwm_drvdata *ddat, u32 attr, long *val)
599{
600	switch (attr) {
601	case hwmon_energy_input:
602		hwm_energy(ddat, val);
603		return 0;
604	default:
605		return -EOPNOTSUPP;
606	}
607}
608
609static umode_t
610hwm_curr_is_visible(const struct hwm_drvdata *ddat, u32 attr)
611{
612	struct drm_i915_private *i915 = ddat->uncore->i915;
613	u32 uval;
614
615	switch (attr) {
616	case hwmon_curr_crit:
617		return (hwm_pcode_read_i1(i915, &uval) ||
618			(uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644;
619	default:
620		return 0;
621	}
622}
623
624static int
625hwm_curr_read(struct hwm_drvdata *ddat, u32 attr, long *val)
626{
627	int ret;
628	u32 uval;
629
630	switch (attr) {
631	case hwmon_curr_crit:
632		ret = hwm_pcode_read_i1(ddat->uncore->i915, &uval);
633		if (ret)
634			return ret;
635		if (uval & POWER_SETUP_I1_WATTS)
636			return -ENODEV;
637		*val = mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval),
638				       SF_CURR, POWER_SETUP_I1_SHIFT);
639		return 0;
640	default:
641		return -EOPNOTSUPP;
642	}
643}
644
645static int
646hwm_curr_write(struct hwm_drvdata *ddat, u32 attr, long val)
647{
648	u32 uval;
649
650	switch (attr) {
651	case hwmon_curr_crit:
652		uval = DIV_ROUND_CLOSEST_ULL(val << POWER_SETUP_I1_SHIFT, SF_CURR);
653		return hwm_pcode_write_i1(ddat->uncore->i915, uval);
654	default:
655		return -EOPNOTSUPP;
656	}
657}
658
659static umode_t
660hwm_fan_is_visible(const struct hwm_drvdata *ddat, u32 attr)
661{
662	struct i915_hwmon *hwmon = ddat->hwmon;
663
664	if (attr == hwmon_fan_input && i915_mmio_reg_valid(hwmon->rg.fan_speed))
665		return 0444;
666
667	return 0;
668}
669
670static int
671hwm_fan_input_read(struct hwm_drvdata *ddat, long *val)
672{
673	struct i915_hwmon *hwmon = ddat->hwmon;
674	struct hwm_fan_info *fi = &ddat->fi;
675	u64 rotations, time_now, time;
676	intel_wakeref_t wakeref;
677	u32 reg_val;
678	int ret = 0;
679
680	wakeref = intel_runtime_pm_get(ddat->uncore->rpm);
681	mutex_lock(&hwmon->hwmon_lock);
682
683	reg_val = intel_uncore_read(ddat->uncore, hwmon->rg.fan_speed);
684	time_now = get_jiffies_64();
685
686	/*
687	 * HW register value is accumulated count of pulses from
688	 * PWM fan with the scale of 2 pulses per rotation.
689	 */
690	rotations = (reg_val - fi->reg_val_prev) / 2;
691
692	time = jiffies_delta_to_msecs(time_now - fi->time_prev);
693	if (unlikely(!time)) {
694		ret = -EAGAIN;
695		goto exit;
696	}
697
698	/*
699	 * Calculate fan speed in RPM by time averaging two subsequent
700	 * readings in minutes.
701	 * RPM = number of rotations * msecs per minute / time in msecs
702	 */
703	*val = DIV_ROUND_UP_ULL(rotations * (MSEC_PER_SEC * 60), time);
704
705	fi->reg_val_prev = reg_val;
706	fi->time_prev = time_now;
707exit:
708	mutex_unlock(&hwmon->hwmon_lock);
709	intel_runtime_pm_put(ddat->uncore->rpm, wakeref);
710	return ret;
711}
712
713static int
714hwm_fan_read(struct hwm_drvdata *ddat, u32 attr, long *val)
715{
716	if (attr == hwmon_fan_input)
717		return hwm_fan_input_read(ddat, val);
718
719	return -EOPNOTSUPP;
720}
721
722static umode_t
723hwm_is_visible(const void *drvdata, enum hwmon_sensor_types type,
724	       u32 attr, int channel)
725{
726	struct hwm_drvdata *ddat = (struct hwm_drvdata *)drvdata;
727
728	switch (type) {
729	case hwmon_temp:
730		return hwm_temp_is_visible(ddat, attr);
731	case hwmon_in:
732		return hwm_in_is_visible(ddat, attr);
733	case hwmon_power:
734		return hwm_power_is_visible(ddat, attr, channel);
735	case hwmon_energy:
736		return hwm_energy_is_visible(ddat, attr);
737	case hwmon_curr:
738		return hwm_curr_is_visible(ddat, attr);
739	case hwmon_fan:
740		return hwm_fan_is_visible(ddat, attr);
741	default:
742		return 0;
743	}
744}
745
746static int
747hwm_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
748	 int channel, long *val)
749{
750	struct hwm_drvdata *ddat = dev_get_drvdata(dev);
751
752	switch (type) {
753	case hwmon_temp:
754		return hwm_temp_read(ddat, attr, val);
755	case hwmon_in:
756		return hwm_in_read(ddat, attr, val);
757	case hwmon_power:
758		return hwm_power_read(ddat, attr, channel, val);
759	case hwmon_energy:
760		return hwm_energy_read(ddat, attr, val);
761	case hwmon_curr:
762		return hwm_curr_read(ddat, attr, val);
763	case hwmon_fan:
764		return hwm_fan_read(ddat, attr, val);
765	default:
766		return -EOPNOTSUPP;
767	}
768}
769
770static int
771hwm_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
772	  int channel, long val)
773{
774	struct hwm_drvdata *ddat = dev_get_drvdata(dev);
775
776	switch (type) {
777	case hwmon_power:
778		return hwm_power_write(ddat, attr, channel, val);
779	case hwmon_curr:
780		return hwm_curr_write(ddat, attr, val);
781	default:
782		return -EOPNOTSUPP;
783	}
784}
785
786static const struct hwmon_ops hwm_ops = {
787	.is_visible = hwm_is_visible,
788	.read = hwm_read,
789	.write = hwm_write,
790};
791
792static const struct hwmon_chip_info hwm_chip_info = {
793	.ops = &hwm_ops,
794	.info = hwm_info,
795};
796
797static umode_t
798hwm_gt_is_visible(const void *drvdata, enum hwmon_sensor_types type,
799		  u32 attr, int channel)
800{
801	struct hwm_drvdata *ddat = (struct hwm_drvdata *)drvdata;
802
803	switch (type) {
804	case hwmon_energy:
805		return hwm_energy_is_visible(ddat, attr);
806	default:
807		return 0;
808	}
809}
810
811static int
812hwm_gt_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
813	    int channel, long *val)
814{
815	struct hwm_drvdata *ddat = dev_get_drvdata(dev);
816
817	switch (type) {
818	case hwmon_energy:
819		return hwm_energy_read(ddat, attr, val);
820	default:
821		return -EOPNOTSUPP;
822	}
823}
824
825static const struct hwmon_ops hwm_gt_ops = {
826	.is_visible = hwm_gt_is_visible,
827	.read = hwm_gt_read,
828};
829
830static const struct hwmon_chip_info hwm_gt_chip_info = {
831	.ops = &hwm_gt_ops,
832	.info = hwm_gt_info,
833};
834
835static void
836hwm_get_preregistration_info(struct drm_i915_private *i915)
837{
838	struct i915_hwmon *hwmon = i915->hwmon;
839	struct intel_uncore *uncore = &i915->uncore;
840	struct hwm_drvdata *ddat = &hwmon->ddat;
841	intel_wakeref_t wakeref;
842	u32 val_sku_unit = 0;
843	struct intel_gt *gt;
844	long energy;
845	int i;
846
847	/* Available for all Gen12+/dGfx */
848	hwmon->rg.gt_perf_status = GEN12_RPSTAT1;
849
850	if (IS_DG1(i915) || IS_DG2(i915)) {
851		hwmon->rg.pkg_temp = PCU_PACKAGE_TEMPERATURE;
852		hwmon->rg.pkg_power_sku_unit = PCU_PACKAGE_POWER_SKU_UNIT;
853		hwmon->rg.pkg_power_sku = PCU_PACKAGE_POWER_SKU;
854		hwmon->rg.pkg_rapl_limit = PCU_PACKAGE_RAPL_LIMIT;
855		hwmon->rg.energy_status_all = PCU_PACKAGE_ENERGY_STATUS;
856		hwmon->rg.energy_status_tile = INVALID_MMIO_REG;
857		hwmon->rg.fan_speed = PCU_PWM_FAN_SPEED;
858	} else {
859		hwmon->rg.pkg_temp = INVALID_MMIO_REG;
860		hwmon->rg.pkg_power_sku_unit = INVALID_MMIO_REG;
861		hwmon->rg.pkg_power_sku = INVALID_MMIO_REG;
862		hwmon->rg.pkg_rapl_limit = INVALID_MMIO_REG;
863		hwmon->rg.energy_status_all = INVALID_MMIO_REG;
864		hwmon->rg.energy_status_tile = INVALID_MMIO_REG;
865		hwmon->rg.fan_speed = INVALID_MMIO_REG;
866	}
867
868	with_intel_runtime_pm(uncore->rpm, wakeref) {
869		/*
870		 * The contents of register hwmon->rg.pkg_power_sku_unit do not change,
871		 * so read it once and store the shift values.
872		 */
873		if (i915_mmio_reg_valid(hwmon->rg.pkg_power_sku_unit))
874			val_sku_unit = intel_uncore_read(uncore,
875							 hwmon->rg.pkg_power_sku_unit);
876
877		/*
878		 * Store the initial fan register value, so that we can use it for
879		 * initial fan speed calculation.
880		 */
881		if (i915_mmio_reg_valid(hwmon->rg.fan_speed)) {
882			ddat->fi.reg_val_prev = intel_uncore_read(uncore,
883								  hwmon->rg.fan_speed);
884			ddat->fi.time_prev = get_jiffies_64();
885		}
886	}
887
888	hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT, val_sku_unit);
889	hwmon->scl_shift_energy = REG_FIELD_GET(PKG_ENERGY_UNIT, val_sku_unit);
890	hwmon->scl_shift_time = REG_FIELD_GET(PKG_TIME_UNIT, val_sku_unit);
891
892	/*
893	 * Initialize 'struct hwm_energy_info', i.e. set fields to the
894	 * first value of the energy register read
895	 */
896	if (i915_mmio_reg_valid(hwmon->rg.energy_status_all))
897		hwm_energy(ddat, &energy);
898	if (i915_mmio_reg_valid(hwmon->rg.energy_status_tile)) {
899		for_each_gt(gt, i915, i)
900			hwm_energy(&hwmon->ddat_gt[i], &energy);
901	}
902}
903
904void i915_hwmon_register(struct drm_i915_private *i915)
905{
906	struct device *dev = i915->drm.dev;
907	struct i915_hwmon *hwmon;
908	struct device *hwmon_dev;
909	struct hwm_drvdata *ddat;
910	struct hwm_drvdata *ddat_gt;
911	struct intel_gt *gt;
912	int i;
913
914	/* hwmon is available only for dGfx */
915	if (!IS_DGFX(i915))
916		return;
917
918	hwmon = kzalloc(sizeof(*hwmon), GFP_KERNEL);
919	if (!hwmon)
920		return;
921
922	i915->hwmon = hwmon;
923	mutex_init(&hwmon->hwmon_lock);
924	ddat = &hwmon->ddat;
925
926	ddat->hwmon = hwmon;
927	ddat->uncore = &i915->uncore;
928	snprintf(ddat->name, sizeof(ddat->name), "i915");
929	ddat->gt_n = -1;
930	init_waitqueue_head(&ddat->waitq);
931
932	for_each_gt(gt, i915, i) {
933		ddat_gt = hwmon->ddat_gt + i;
934
935		ddat_gt->hwmon = hwmon;
936		ddat_gt->uncore = gt->uncore;
937		snprintf(ddat_gt->name, sizeof(ddat_gt->name), "i915_gt%u", i);
938		ddat_gt->gt_n = i;
939	}
940
941	hwm_get_preregistration_info(i915);
942
943	/*  hwmon_dev points to device hwmon<i> */
944	hwmon_dev = hwmon_device_register_with_info(dev, ddat->name,
945						    ddat,
946						    &hwm_chip_info,
947						    hwm_groups);
948	if (IS_ERR(hwmon_dev))
949		goto err;
950
951	ddat->hwmon_dev = hwmon_dev;
952
953	for_each_gt(gt, i915, i) {
954		ddat_gt = hwmon->ddat_gt + i;
955		/*
956		 * Create per-gt directories only if a per-gt attribute is
957		 * visible. Currently this is only energy
958		 */
959		if (!hwm_gt_is_visible(ddat_gt, hwmon_energy, hwmon_energy_input, 0))
960			continue;
961
962		hwmon_dev = hwmon_device_register_with_info(dev, ddat_gt->name,
963							    ddat_gt,
964							    &hwm_gt_chip_info,
965							    NULL);
966		if (!IS_ERR(hwmon_dev))
967			ddat_gt->hwmon_dev = hwmon_dev;
968	}
969	return;
970err:
971	i915_hwmon_unregister(i915);
972}
973
974void i915_hwmon_unregister(struct drm_i915_private *i915)
975{
976	struct i915_hwmon *hwmon = i915->hwmon;
977	struct intel_gt *gt;
978	int i;
979
980	if (!hwmon)
981		return;
982
983	for_each_gt(gt, i915, i)
984		if (hwmon->ddat_gt[i].hwmon_dev)
985			hwmon_device_unregister(hwmon->ddat_gt[i].hwmon_dev);
986
987	if (hwmon->ddat.hwmon_dev)
988		hwmon_device_unregister(hwmon->ddat.hwmon_dev);
989
990	mutex_destroy(&hwmon->hwmon_lock);
991
992	kfree(i915->hwmon);
993	i915->hwmon = NULL;
994}