Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright © 2022 Intel Corporation
  4 */
  5
  6#include <linux/hwmon.h>
  7#include <linux/hwmon-sysfs.h>
  8#include <linux/jiffies.h>
  9#include <linux/types.h>
 10#include <linux/units.h>
 11
 12#include "i915_drv.h"
 13#include "i915_hwmon.h"
 14#include "i915_reg.h"
 15#include "intel_mchbar_regs.h"
 16#include "intel_pcode.h"
 17#include "gt/intel_gt.h"
 18#include "gt/intel_gt_regs.h"
 19
 20/*
 21 * SF_* - scale factors for particular quantities according to hwmon spec.
 22 * - voltage  - millivolts
 23 * - power  - microwatts
 24 * - curr   - milliamperes
 25 * - energy - microjoules
 26 * - time   - milliseconds
 27 */
 28#define SF_VOLTAGE	1000
 29#define SF_POWER	1000000
 30#define SF_CURR		1000
 31#define SF_ENERGY	1000000
 32#define SF_TIME		1000
 33
 34struct hwm_reg {
 35	i915_reg_t gt_perf_status;
 36	i915_reg_t pkg_temp;
 37	i915_reg_t pkg_power_sku_unit;
 38	i915_reg_t pkg_power_sku;
 39	i915_reg_t pkg_rapl_limit;
 40	i915_reg_t energy_status_all;
 41	i915_reg_t energy_status_tile;
 42	i915_reg_t fan_speed;
 43};
 44
 45struct hwm_energy_info {
 46	u32 reg_val_prev;
 47	long accum_energy;			/* Accumulated energy for energy1_input */
 48};
 49
 50struct hwm_fan_info {
 51	u32 reg_val_prev;
 52	u64 time_prev;
 53};
 54
 55struct hwm_drvdata {
 56	struct i915_hwmon *hwmon;
 57	struct intel_uncore *uncore;
 58	struct device *hwmon_dev;
 59	struct hwm_energy_info ei;		/*  Energy info for energy1_input */
 60	struct hwm_fan_info fi;			/*  Fan info for fan1_input */
 61	char name[12];
 62	int gt_n;
 63	bool reset_in_progress;
 64	wait_queue_head_t waitq;
 65};
 66
 67struct i915_hwmon {
 68	struct hwm_drvdata ddat;
 69	struct hwm_drvdata ddat_gt[I915_MAX_GT];
 70	struct mutex hwmon_lock;		/* counter overflow logic and rmw */
 71	struct hwm_reg rg;
 72	int scl_shift_power;
 73	int scl_shift_energy;
 74	int scl_shift_time;
 75};
 76
 77static void
 78hwm_locked_with_pm_intel_uncore_rmw(struct hwm_drvdata *ddat,
 79				    i915_reg_t reg, u32 clear, u32 set)
 80{
 81	struct i915_hwmon *hwmon = ddat->hwmon;
 82	struct intel_uncore *uncore = ddat->uncore;
 83	intel_wakeref_t wakeref;
 84
 85	with_intel_runtime_pm(uncore->rpm, wakeref) {
 86		mutex_lock(&hwmon->hwmon_lock);
 87
 
 88		intel_uncore_rmw(uncore, reg, clear, set);
 89
 90		mutex_unlock(&hwmon->hwmon_lock);
 91	}
 92}
 93
 94/*
 95 * This function's return type of u64 allows for the case where the scaling
 96 * of the field taken from the 32-bit register value might cause a result to
 97 * exceed 32 bits.
 98 */
 99static u64
100hwm_field_read_and_scale(struct hwm_drvdata *ddat, i915_reg_t rgadr,
101			 u32 field_msk, int nshift, u32 scale_factor)
102{
103	struct intel_uncore *uncore = ddat->uncore;
104	intel_wakeref_t wakeref;
105	u32 reg_value;
106
107	with_intel_runtime_pm(uncore->rpm, wakeref)
108		reg_value = intel_uncore_read(uncore, rgadr);
109
110	reg_value = REG_FIELD_GET(field_msk, reg_value);
111
112	return mul_u64_u32_shr(reg_value, scale_factor, nshift);
113}
114
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115/*
116 * hwm_energy - Obtain energy value
117 *
118 * The underlying energy hardware register is 32-bits and is subject to
119 * overflow. How long before overflow? For example, with an example
120 * scaling bit shift of 14 bits (see register *PACKAGE_POWER_SKU_UNIT) and
121 * a power draw of 1000 watts, the 32-bit counter will overflow in
122 * approximately 4.36 minutes.
123 *
124 * Examples:
125 *    1 watt:  (2^32 >> 14) /    1 W / (60 * 60 * 24) secs/day -> 3 days
126 * 1000 watts: (2^32 >> 14) / 1000 W / 60             secs/min -> 4.36 minutes
127 *
128 * The function significantly increases overflow duration (from 4.36
129 * minutes) by accumulating the energy register into a 'long' as allowed by
130 * the hwmon API. Using x86_64 128 bit arithmetic (see mul_u64_u32_shr()),
131 * a 'long' of 63 bits, SF_ENERGY of 1e6 (~20 bits) and
132 * hwmon->scl_shift_energy of 14 bits we have 57 (63 - 20 + 14) bits before
133 * energy1_input overflows. This at 1000 W is an overflow duration of 278 years.
134 */
135static void
136hwm_energy(struct hwm_drvdata *ddat, long *energy)
137{
138	struct intel_uncore *uncore = ddat->uncore;
139	struct i915_hwmon *hwmon = ddat->hwmon;
140	struct hwm_energy_info *ei = &ddat->ei;
141	intel_wakeref_t wakeref;
142	i915_reg_t rgaddr;
143	u32 reg_val;
144
145	if (ddat->gt_n >= 0)
146		rgaddr = hwmon->rg.energy_status_tile;
147	else
148		rgaddr = hwmon->rg.energy_status_all;
149
150	with_intel_runtime_pm(uncore->rpm, wakeref) {
151		mutex_lock(&hwmon->hwmon_lock);
152
 
153		reg_val = intel_uncore_read(uncore, rgaddr);
154
155		if (reg_val >= ei->reg_val_prev)
156			ei->accum_energy += reg_val - ei->reg_val_prev;
157		else
158			ei->accum_energy += UINT_MAX - ei->reg_val_prev + reg_val;
159		ei->reg_val_prev = reg_val;
160
161		*energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
162					  hwmon->scl_shift_energy);
163		mutex_unlock(&hwmon->hwmon_lock);
164	}
165}
166
167static ssize_t
168hwm_power1_max_interval_show(struct device *dev, struct device_attribute *attr,
169			     char *buf)
170{
171	struct hwm_drvdata *ddat = dev_get_drvdata(dev);
172	struct i915_hwmon *hwmon = ddat->hwmon;
173	intel_wakeref_t wakeref;
174	u32 r, x, y, x_w = 2; /* 2 bits */
175	u64 tau4, out;
176
177	with_intel_runtime_pm(ddat->uncore->rpm, wakeref)
178		r = intel_uncore_read(ddat->uncore, hwmon->rg.pkg_rapl_limit);
179
180	x = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_X, r);
181	y = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_Y, r);
182	/*
183	 * tau = 1.x * power(2,y), x = bits(23:22), y = bits(21:17)
184	 *     = (4 | x) << (y - 2)
185	 * where (y - 2) ensures a 1.x fixed point representation of 1.x
186	 * However because y can be < 2, we compute
187	 *     tau4 = (4 | x) << y
188	 * but add 2 when doing the final right shift to account for units
189	 */
190	tau4 = (u64)((1 << x_w) | x) << y;
191	/* val in hwmon interface units (millisec) */
192	out = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
193
194	return sysfs_emit(buf, "%llu\n", out);
195}
196
197static ssize_t
198hwm_power1_max_interval_store(struct device *dev,
199			      struct device_attribute *attr,
200			      const char *buf, size_t count)
201{
202	struct hwm_drvdata *ddat = dev_get_drvdata(dev);
203	struct i915_hwmon *hwmon = ddat->hwmon;
204	u32 x, y, rxy, x_w = 2; /* 2 bits */
205	u64 tau4, r, max_win;
206	unsigned long val;
207	int ret;
208
209	ret = kstrtoul(buf, 0, &val);
210	if (ret)
211		return ret;
212
213	/*
214	 * Max HW supported tau in '1.x * power(2,y)' format, x = 0, y = 0x12
215	 * The hwmon->scl_shift_time default of 0xa results in a max tau of 256 seconds
216	 */
217#define PKG_MAX_WIN_DEFAULT 0x12ull
218
219	/*
220	 * val must be < max in hwmon interface units. The steps below are
221	 * explained in i915_power1_max_interval_show()
222	 */
223	r = FIELD_PREP(PKG_MAX_WIN, PKG_MAX_WIN_DEFAULT);
224	x = REG_FIELD_GET(PKG_MAX_WIN_X, r);
225	y = REG_FIELD_GET(PKG_MAX_WIN_Y, r);
226	tau4 = (u64)((1 << x_w) | x) << y;
227	max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
228
229	if (val > max_win)
230		return -EINVAL;
231
232	/* val in hw units */
233	val = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_time, SF_TIME);
234	/* Convert to 1.x * power(2,y) */
235	if (!val) {
236		/* Avoid ilog2(0) */
237		y = 0;
238		x = 0;
239	} else {
240		y = ilog2(val);
241		/* x = (val - (1 << y)) >> (y - 2); */
242		x = (val - (1ul << y)) << x_w >> y;
243	}
244
245	rxy = REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_X, x) | REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_Y, y);
246
247	hwm_locked_with_pm_intel_uncore_rmw(ddat, hwmon->rg.pkg_rapl_limit,
248					    PKG_PWR_LIM_1_TIME, rxy);
249	return count;
250}
251
252static SENSOR_DEVICE_ATTR(power1_max_interval, 0664,
253			  hwm_power1_max_interval_show,
254			  hwm_power1_max_interval_store, 0);
255
256static struct attribute *hwm_attributes[] = {
257	&sensor_dev_attr_power1_max_interval.dev_attr.attr,
258	NULL
259};
260
261static umode_t hwm_attributes_visible(struct kobject *kobj,
262				      struct attribute *attr, int index)
263{
264	struct device *dev = kobj_to_dev(kobj);
265	struct hwm_drvdata *ddat = dev_get_drvdata(dev);
266	struct i915_hwmon *hwmon = ddat->hwmon;
267
268	if (attr == &sensor_dev_attr_power1_max_interval.dev_attr.attr)
269		return i915_mmio_reg_valid(hwmon->rg.pkg_rapl_limit) ? attr->mode : 0;
270
271	return 0;
272}
273
274static const struct attribute_group hwm_attrgroup = {
275	.attrs = hwm_attributes,
276	.is_visible = hwm_attributes_visible,
277};
278
279static const struct attribute_group *hwm_groups[] = {
280	&hwm_attrgroup,
281	NULL
282};
283
284static const struct hwmon_channel_info * const hwm_info[] = {
285	HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
286	HWMON_CHANNEL_INFO(in, HWMON_I_INPUT),
287	HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_CRIT),
288	HWMON_CHANNEL_INFO(energy, HWMON_E_INPUT),
289	HWMON_CHANNEL_INFO(curr, HWMON_C_CRIT),
290	HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT),
291	NULL
292};
293
294static const struct hwmon_channel_info * const hwm_gt_info[] = {
295	HWMON_CHANNEL_INFO(energy, HWMON_E_INPUT),
296	NULL
297};
298
299/* I1 is exposed as power_crit or as curr_crit depending on bit 31 */
300static int hwm_pcode_read_i1(struct drm_i915_private *i915, u32 *uval)
301{
302	/* Avoid ILLEGAL_SUBCOMMAND "mailbox access failed" warning in snb_pcode_read */
303	if (IS_DG1(i915) || IS_DG2(i915))
304		return -ENXIO;
305
306	return snb_pcode_read_p(&i915->uncore, PCODE_POWER_SETUP,
307				POWER_SETUP_SUBCOMMAND_READ_I1, 0, uval);
308}
309
310static int hwm_pcode_write_i1(struct drm_i915_private *i915, u32 uval)
311{
312	return  snb_pcode_write_p(&i915->uncore, PCODE_POWER_SETUP,
313				  POWER_SETUP_SUBCOMMAND_WRITE_I1, 0, uval);
314}
315
316static umode_t
317hwm_temp_is_visible(const struct hwm_drvdata *ddat, u32 attr)
318{
319	struct i915_hwmon *hwmon = ddat->hwmon;
320
321	if (attr == hwmon_temp_input && i915_mmio_reg_valid(hwmon->rg.pkg_temp))
322		return 0444;
323
324	return 0;
325}
326
327static int
328hwm_temp_read(struct hwm_drvdata *ddat, u32 attr, long *val)
329{
330	struct i915_hwmon *hwmon = ddat->hwmon;
331	intel_wakeref_t wakeref;
332	u32 reg_val;
333
334	switch (attr) {
335	case hwmon_temp_input:
336		with_intel_runtime_pm(ddat->uncore->rpm, wakeref)
337			reg_val = intel_uncore_read(ddat->uncore, hwmon->rg.pkg_temp);
338
339		/* HW register value is in degrees Celsius, convert to millidegrees. */
340		*val = REG_FIELD_GET(TEMP_MASK, reg_val) * MILLIDEGREE_PER_DEGREE;
341		return 0;
342	default:
343		return -EOPNOTSUPP;
344	}
345}
346
347static umode_t
348hwm_in_is_visible(const struct hwm_drvdata *ddat, u32 attr)
349{
350	struct drm_i915_private *i915 = ddat->uncore->i915;
351
352	switch (attr) {
353	case hwmon_in_input:
354		return IS_DG1(i915) || IS_DG2(i915) ? 0444 : 0;
355	default:
356		return 0;
357	}
358}
359
360static int
361hwm_in_read(struct hwm_drvdata *ddat, u32 attr, long *val)
362{
363	struct i915_hwmon *hwmon = ddat->hwmon;
364	intel_wakeref_t wakeref;
365	u32 reg_value;
366
367	switch (attr) {
368	case hwmon_in_input:
369		with_intel_runtime_pm(ddat->uncore->rpm, wakeref)
370			reg_value = intel_uncore_read(ddat->uncore, hwmon->rg.gt_perf_status);
371		/* HW register value in units of 2.5 millivolt */
372		*val = DIV_ROUND_CLOSEST(REG_FIELD_GET(GEN12_VOLTAGE_MASK, reg_value) * 25, 10);
373		return 0;
374	default:
375		return -EOPNOTSUPP;
376	}
377}
378
379static umode_t
380hwm_power_is_visible(const struct hwm_drvdata *ddat, u32 attr, int chan)
381{
382	struct drm_i915_private *i915 = ddat->uncore->i915;
383	struct i915_hwmon *hwmon = ddat->hwmon;
384	u32 uval;
385
386	switch (attr) {
387	case hwmon_power_max:
388		return i915_mmio_reg_valid(hwmon->rg.pkg_rapl_limit) ? 0664 : 0;
389	case hwmon_power_rated_max:
390		return i915_mmio_reg_valid(hwmon->rg.pkg_power_sku) ? 0444 : 0;
391	case hwmon_power_crit:
392		return (hwm_pcode_read_i1(i915, &uval) ||
393			!(uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644;
394	default:
395		return 0;
396	}
397}
398
399#define PL1_DISABLE 0
400
401/*
402 * HW allows arbitrary PL1 limits to be set but silently clamps these values to
403 * "typical but not guaranteed" min/max values in rg.pkg_power_sku. Follow the
404 * same pattern for sysfs, allow arbitrary PL1 limits to be set but display
405 * clamped values when read. Write/read I1 also follows the same pattern.
406 */
407static int
408hwm_power_max_read(struct hwm_drvdata *ddat, long *val)
409{
410	struct i915_hwmon *hwmon = ddat->hwmon;
411	intel_wakeref_t wakeref;
412	u64 r, min, max;
413
414	/* Check if PL1 limit is disabled */
415	with_intel_runtime_pm(ddat->uncore->rpm, wakeref)
416		r = intel_uncore_read(ddat->uncore, hwmon->rg.pkg_rapl_limit);
417	if (!(r & PKG_PWR_LIM_1_EN)) {
418		*val = PL1_DISABLE;
419		return 0;
420	}
421
422	*val = hwm_field_read_and_scale(ddat,
423					hwmon->rg.pkg_rapl_limit,
424					PKG_PWR_LIM_1,
425					hwmon->scl_shift_power,
426					SF_POWER);
427
428	with_intel_runtime_pm(ddat->uncore->rpm, wakeref)
429		r = intel_uncore_read64(ddat->uncore, hwmon->rg.pkg_power_sku);
430	min = REG_FIELD_GET(PKG_MIN_PWR, r);
431	min = mul_u64_u32_shr(min, SF_POWER, hwmon->scl_shift_power);
432	max = REG_FIELD_GET(PKG_MAX_PWR, r);
433	max = mul_u64_u32_shr(max, SF_POWER, hwmon->scl_shift_power);
434
435	if (min && max)
436		*val = clamp_t(u64, *val, min, max);
437
438	return 0;
439}
440
441static int
442hwm_power_max_write(struct hwm_drvdata *ddat, long val)
443{
444	struct i915_hwmon *hwmon = ddat->hwmon;
445	intel_wakeref_t wakeref;
446	DEFINE_WAIT(wait);
447	int ret = 0;
448	u32 nval;
449
450	/* Block waiting for GuC reset to complete when needed */
451	for (;;) {
452		wakeref = intel_runtime_pm_get(ddat->uncore->rpm);
453		mutex_lock(&hwmon->hwmon_lock);
454
455		prepare_to_wait(&ddat->waitq, &wait, TASK_INTERRUPTIBLE);
456
457		if (!hwmon->ddat.reset_in_progress)
458			break;
459
460		if (signal_pending(current)) {
461			ret = -EINTR;
462			break;
463		}
464
465		mutex_unlock(&hwmon->hwmon_lock);
466		intel_runtime_pm_put(ddat->uncore->rpm, wakeref);
467
468		schedule();
469	}
470	finish_wait(&ddat->waitq, &wait);
471	if (ret)
472		goto exit;
473
474	/* Disable PL1 limit and verify, because the limit cannot be disabled on all platforms */
475	if (val == PL1_DISABLE) {
476		intel_uncore_rmw(ddat->uncore, hwmon->rg.pkg_rapl_limit,
477				 PKG_PWR_LIM_1_EN, 0);
478		nval = intel_uncore_read(ddat->uncore, hwmon->rg.pkg_rapl_limit);
479
480		if (nval & PKG_PWR_LIM_1_EN)
481			ret = -ENODEV;
482		goto exit;
483	}
484
485	/* Computation in 64-bits to avoid overflow. Round to nearest. */
486	nval = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_power, SF_POWER);
487	nval = PKG_PWR_LIM_1_EN | REG_FIELD_PREP(PKG_PWR_LIM_1, nval);
488
489	intel_uncore_rmw(ddat->uncore, hwmon->rg.pkg_rapl_limit,
490			 PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, nval);
491exit:
492	mutex_unlock(&hwmon->hwmon_lock);
493	intel_runtime_pm_put(ddat->uncore->rpm, wakeref);
494	return ret;
495}
496
497static int
498hwm_power_read(struct hwm_drvdata *ddat, u32 attr, int chan, long *val)
499{
500	struct i915_hwmon *hwmon = ddat->hwmon;
501	int ret;
502	u32 uval;
503
504	switch (attr) {
505	case hwmon_power_max:
506		return hwm_power_max_read(ddat, val);
 
 
 
 
 
507	case hwmon_power_rated_max:
508		*val = hwm_field_read_and_scale(ddat,
509						hwmon->rg.pkg_power_sku,
510						PKG_PKG_TDP,
511						hwmon->scl_shift_power,
512						SF_POWER);
513		return 0;
514	case hwmon_power_crit:
515		ret = hwm_pcode_read_i1(ddat->uncore->i915, &uval);
516		if (ret)
517			return ret;
518		if (!(uval & POWER_SETUP_I1_WATTS))
519			return -ENODEV;
520		*val = mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval),
521				       SF_POWER, POWER_SETUP_I1_SHIFT);
522		return 0;
523	default:
524		return -EOPNOTSUPP;
525	}
526}
527
528static int
529hwm_power_write(struct hwm_drvdata *ddat, u32 attr, int chan, long val)
530{
 
531	u32 uval;
532
533	switch (attr) {
534	case hwmon_power_max:
535		return hwm_power_max_write(ddat, val);
 
 
 
 
536	case hwmon_power_crit:
537		uval = DIV_ROUND_CLOSEST_ULL(val << POWER_SETUP_I1_SHIFT, SF_POWER);
538		return hwm_pcode_write_i1(ddat->uncore->i915, uval);
539	default:
540		return -EOPNOTSUPP;
541	}
542}
543
544void i915_hwmon_power_max_disable(struct drm_i915_private *i915, bool *old)
545{
546	struct i915_hwmon *hwmon = i915->hwmon;
547	u32 r;
548
549	if (!hwmon || !i915_mmio_reg_valid(hwmon->rg.pkg_rapl_limit))
550		return;
551
552	mutex_lock(&hwmon->hwmon_lock);
553
554	hwmon->ddat.reset_in_progress = true;
555	r = intel_uncore_rmw(hwmon->ddat.uncore, hwmon->rg.pkg_rapl_limit,
556			     PKG_PWR_LIM_1_EN, 0);
557	*old = !!(r & PKG_PWR_LIM_1_EN);
558
559	mutex_unlock(&hwmon->hwmon_lock);
560}
561
562void i915_hwmon_power_max_restore(struct drm_i915_private *i915, bool old)
563{
564	struct i915_hwmon *hwmon = i915->hwmon;
565
566	if (!hwmon || !i915_mmio_reg_valid(hwmon->rg.pkg_rapl_limit))
567		return;
568
569	mutex_lock(&hwmon->hwmon_lock);
570
571	intel_uncore_rmw(hwmon->ddat.uncore, hwmon->rg.pkg_rapl_limit,
572			 PKG_PWR_LIM_1_EN, old ? PKG_PWR_LIM_1_EN : 0);
573	hwmon->ddat.reset_in_progress = false;
574	wake_up_all(&hwmon->ddat.waitq);
575
576	mutex_unlock(&hwmon->hwmon_lock);
577}
578
579static umode_t
580hwm_energy_is_visible(const struct hwm_drvdata *ddat, u32 attr)
581{
582	struct i915_hwmon *hwmon = ddat->hwmon;
583	i915_reg_t rgaddr;
584
585	switch (attr) {
586	case hwmon_energy_input:
587		if (ddat->gt_n >= 0)
588			rgaddr = hwmon->rg.energy_status_tile;
589		else
590			rgaddr = hwmon->rg.energy_status_all;
591		return i915_mmio_reg_valid(rgaddr) ? 0444 : 0;
592	default:
593		return 0;
594	}
595}
596
597static int
598hwm_energy_read(struct hwm_drvdata *ddat, u32 attr, long *val)
599{
600	switch (attr) {
601	case hwmon_energy_input:
602		hwm_energy(ddat, val);
603		return 0;
604	default:
605		return -EOPNOTSUPP;
606	}
607}
608
609static umode_t
610hwm_curr_is_visible(const struct hwm_drvdata *ddat, u32 attr)
611{
612	struct drm_i915_private *i915 = ddat->uncore->i915;
613	u32 uval;
614
615	switch (attr) {
616	case hwmon_curr_crit:
617		return (hwm_pcode_read_i1(i915, &uval) ||
618			(uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644;
619	default:
620		return 0;
621	}
622}
623
624static int
625hwm_curr_read(struct hwm_drvdata *ddat, u32 attr, long *val)
626{
627	int ret;
628	u32 uval;
629
630	switch (attr) {
631	case hwmon_curr_crit:
632		ret = hwm_pcode_read_i1(ddat->uncore->i915, &uval);
633		if (ret)
634			return ret;
635		if (uval & POWER_SETUP_I1_WATTS)
636			return -ENODEV;
637		*val = mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval),
638				       SF_CURR, POWER_SETUP_I1_SHIFT);
639		return 0;
640	default:
641		return -EOPNOTSUPP;
642	}
643}
644
645static int
646hwm_curr_write(struct hwm_drvdata *ddat, u32 attr, long val)
647{
648	u32 uval;
649
650	switch (attr) {
651	case hwmon_curr_crit:
652		uval = DIV_ROUND_CLOSEST_ULL(val << POWER_SETUP_I1_SHIFT, SF_CURR);
653		return hwm_pcode_write_i1(ddat->uncore->i915, uval);
654	default:
655		return -EOPNOTSUPP;
656	}
657}
658
659static umode_t
660hwm_fan_is_visible(const struct hwm_drvdata *ddat, u32 attr)
661{
662	struct i915_hwmon *hwmon = ddat->hwmon;
663
664	if (attr == hwmon_fan_input && i915_mmio_reg_valid(hwmon->rg.fan_speed))
665		return 0444;
666
667	return 0;
668}
669
670static int
671hwm_fan_input_read(struct hwm_drvdata *ddat, long *val)
672{
673	struct i915_hwmon *hwmon = ddat->hwmon;
674	struct hwm_fan_info *fi = &ddat->fi;
675	u64 rotations, time_now, time;
676	intel_wakeref_t wakeref;
677	u32 reg_val;
678	int ret = 0;
679
680	wakeref = intel_runtime_pm_get(ddat->uncore->rpm);
681	mutex_lock(&hwmon->hwmon_lock);
682
683	reg_val = intel_uncore_read(ddat->uncore, hwmon->rg.fan_speed);
684	time_now = get_jiffies_64();
685
686	/*
687	 * HW register value is accumulated count of pulses from
688	 * PWM fan with the scale of 2 pulses per rotation.
689	 */
690	rotations = (reg_val - fi->reg_val_prev) / 2;
691
692	time = jiffies_delta_to_msecs(time_now - fi->time_prev);
693	if (unlikely(!time)) {
694		ret = -EAGAIN;
695		goto exit;
696	}
697
698	/*
699	 * Calculate fan speed in RPM by time averaging two subsequent
700	 * readings in minutes.
701	 * RPM = number of rotations * msecs per minute / time in msecs
702	 */
703	*val = DIV_ROUND_UP_ULL(rotations * (MSEC_PER_SEC * 60), time);
704
705	fi->reg_val_prev = reg_val;
706	fi->time_prev = time_now;
707exit:
708	mutex_unlock(&hwmon->hwmon_lock);
709	intel_runtime_pm_put(ddat->uncore->rpm, wakeref);
710	return ret;
711}
712
713static int
714hwm_fan_read(struct hwm_drvdata *ddat, u32 attr, long *val)
715{
716	if (attr == hwmon_fan_input)
717		return hwm_fan_input_read(ddat, val);
718
719	return -EOPNOTSUPP;
720}
721
722static umode_t
723hwm_is_visible(const void *drvdata, enum hwmon_sensor_types type,
724	       u32 attr, int channel)
725{
726	struct hwm_drvdata *ddat = (struct hwm_drvdata *)drvdata;
727
728	switch (type) {
729	case hwmon_temp:
730		return hwm_temp_is_visible(ddat, attr);
731	case hwmon_in:
732		return hwm_in_is_visible(ddat, attr);
733	case hwmon_power:
734		return hwm_power_is_visible(ddat, attr, channel);
735	case hwmon_energy:
736		return hwm_energy_is_visible(ddat, attr);
737	case hwmon_curr:
738		return hwm_curr_is_visible(ddat, attr);
739	case hwmon_fan:
740		return hwm_fan_is_visible(ddat, attr);
741	default:
742		return 0;
743	}
744}
745
746static int
747hwm_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
748	 int channel, long *val)
749{
750	struct hwm_drvdata *ddat = dev_get_drvdata(dev);
751
752	switch (type) {
753	case hwmon_temp:
754		return hwm_temp_read(ddat, attr, val);
755	case hwmon_in:
756		return hwm_in_read(ddat, attr, val);
757	case hwmon_power:
758		return hwm_power_read(ddat, attr, channel, val);
759	case hwmon_energy:
760		return hwm_energy_read(ddat, attr, val);
761	case hwmon_curr:
762		return hwm_curr_read(ddat, attr, val);
763	case hwmon_fan:
764		return hwm_fan_read(ddat, attr, val);
765	default:
766		return -EOPNOTSUPP;
767	}
768}
769
770static int
771hwm_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
772	  int channel, long val)
773{
774	struct hwm_drvdata *ddat = dev_get_drvdata(dev);
775
776	switch (type) {
777	case hwmon_power:
778		return hwm_power_write(ddat, attr, channel, val);
779	case hwmon_curr:
780		return hwm_curr_write(ddat, attr, val);
781	default:
782		return -EOPNOTSUPP;
783	}
784}
785
786static const struct hwmon_ops hwm_ops = {
787	.is_visible = hwm_is_visible,
788	.read = hwm_read,
789	.write = hwm_write,
790};
791
792static const struct hwmon_chip_info hwm_chip_info = {
793	.ops = &hwm_ops,
794	.info = hwm_info,
795};
796
797static umode_t
798hwm_gt_is_visible(const void *drvdata, enum hwmon_sensor_types type,
799		  u32 attr, int channel)
800{
801	struct hwm_drvdata *ddat = (struct hwm_drvdata *)drvdata;
802
803	switch (type) {
804	case hwmon_energy:
805		return hwm_energy_is_visible(ddat, attr);
806	default:
807		return 0;
808	}
809}
810
811static int
812hwm_gt_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
813	    int channel, long *val)
814{
815	struct hwm_drvdata *ddat = dev_get_drvdata(dev);
816
817	switch (type) {
818	case hwmon_energy:
819		return hwm_energy_read(ddat, attr, val);
820	default:
821		return -EOPNOTSUPP;
822	}
823}
824
825static const struct hwmon_ops hwm_gt_ops = {
826	.is_visible = hwm_gt_is_visible,
827	.read = hwm_gt_read,
828};
829
830static const struct hwmon_chip_info hwm_gt_chip_info = {
831	.ops = &hwm_gt_ops,
832	.info = hwm_gt_info,
833};
834
835static void
836hwm_get_preregistration_info(struct drm_i915_private *i915)
837{
838	struct i915_hwmon *hwmon = i915->hwmon;
839	struct intel_uncore *uncore = &i915->uncore;
840	struct hwm_drvdata *ddat = &hwmon->ddat;
841	intel_wakeref_t wakeref;
842	u32 val_sku_unit = 0;
843	struct intel_gt *gt;
844	long energy;
845	int i;
846
847	/* Available for all Gen12+/dGfx */
848	hwmon->rg.gt_perf_status = GEN12_RPSTAT1;
849
850	if (IS_DG1(i915) || IS_DG2(i915)) {
851		hwmon->rg.pkg_temp = PCU_PACKAGE_TEMPERATURE;
852		hwmon->rg.pkg_power_sku_unit = PCU_PACKAGE_POWER_SKU_UNIT;
853		hwmon->rg.pkg_power_sku = PCU_PACKAGE_POWER_SKU;
854		hwmon->rg.pkg_rapl_limit = PCU_PACKAGE_RAPL_LIMIT;
855		hwmon->rg.energy_status_all = PCU_PACKAGE_ENERGY_STATUS;
856		hwmon->rg.energy_status_tile = INVALID_MMIO_REG;
857		hwmon->rg.fan_speed = PCU_PWM_FAN_SPEED;
 
 
 
 
 
858	} else {
859		hwmon->rg.pkg_temp = INVALID_MMIO_REG;
860		hwmon->rg.pkg_power_sku_unit = INVALID_MMIO_REG;
861		hwmon->rg.pkg_power_sku = INVALID_MMIO_REG;
862		hwmon->rg.pkg_rapl_limit = INVALID_MMIO_REG;
863		hwmon->rg.energy_status_all = INVALID_MMIO_REG;
864		hwmon->rg.energy_status_tile = INVALID_MMIO_REG;
865		hwmon->rg.fan_speed = INVALID_MMIO_REG;
866	}
867
868	with_intel_runtime_pm(uncore->rpm, wakeref) {
869		/*
870		 * The contents of register hwmon->rg.pkg_power_sku_unit do not change,
871		 * so read it once and store the shift values.
872		 */
873		if (i915_mmio_reg_valid(hwmon->rg.pkg_power_sku_unit))
874			val_sku_unit = intel_uncore_read(uncore,
875							 hwmon->rg.pkg_power_sku_unit);
876
877		/*
878		 * Store the initial fan register value, so that we can use it for
879		 * initial fan speed calculation.
880		 */
881		if (i915_mmio_reg_valid(hwmon->rg.fan_speed)) {
882			ddat->fi.reg_val_prev = intel_uncore_read(uncore,
883								  hwmon->rg.fan_speed);
884			ddat->fi.time_prev = get_jiffies_64();
885		}
886	}
887
888	hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT, val_sku_unit);
889	hwmon->scl_shift_energy = REG_FIELD_GET(PKG_ENERGY_UNIT, val_sku_unit);
890	hwmon->scl_shift_time = REG_FIELD_GET(PKG_TIME_UNIT, val_sku_unit);
891
892	/*
893	 * Initialize 'struct hwm_energy_info', i.e. set fields to the
894	 * first value of the energy register read
895	 */
896	if (i915_mmio_reg_valid(hwmon->rg.energy_status_all))
897		hwm_energy(ddat, &energy);
898	if (i915_mmio_reg_valid(hwmon->rg.energy_status_tile)) {
899		for_each_gt(gt, i915, i)
900			hwm_energy(&hwmon->ddat_gt[i], &energy);
901	}
902}
903
904void i915_hwmon_register(struct drm_i915_private *i915)
905{
906	struct device *dev = i915->drm.dev;
907	struct i915_hwmon *hwmon;
908	struct device *hwmon_dev;
909	struct hwm_drvdata *ddat;
910	struct hwm_drvdata *ddat_gt;
911	struct intel_gt *gt;
912	int i;
913
914	/* hwmon is available only for dGfx */
915	if (!IS_DGFX(i915))
916		return;
917
918	hwmon = kzalloc(sizeof(*hwmon), GFP_KERNEL);
919	if (!hwmon)
920		return;
921
922	i915->hwmon = hwmon;
923	mutex_init(&hwmon->hwmon_lock);
924	ddat = &hwmon->ddat;
925
926	ddat->hwmon = hwmon;
927	ddat->uncore = &i915->uncore;
928	snprintf(ddat->name, sizeof(ddat->name), "i915");
929	ddat->gt_n = -1;
930	init_waitqueue_head(&ddat->waitq);
931
932	for_each_gt(gt, i915, i) {
933		ddat_gt = hwmon->ddat_gt + i;
934
935		ddat_gt->hwmon = hwmon;
936		ddat_gt->uncore = gt->uncore;
937		snprintf(ddat_gt->name, sizeof(ddat_gt->name), "i915_gt%u", i);
938		ddat_gt->gt_n = i;
939	}
940
941	hwm_get_preregistration_info(i915);
942
943	/*  hwmon_dev points to device hwmon<i> */
944	hwmon_dev = hwmon_device_register_with_info(dev, ddat->name,
945						    ddat,
946						    &hwm_chip_info,
947						    hwm_groups);
948	if (IS_ERR(hwmon_dev))
949		goto err;
 
 
950
951	ddat->hwmon_dev = hwmon_dev;
952
953	for_each_gt(gt, i915, i) {
954		ddat_gt = hwmon->ddat_gt + i;
955		/*
956		 * Create per-gt directories only if a per-gt attribute is
957		 * visible. Currently this is only energy
958		 */
959		if (!hwm_gt_is_visible(ddat_gt, hwmon_energy, hwmon_energy_input, 0))
960			continue;
961
962		hwmon_dev = hwmon_device_register_with_info(dev, ddat_gt->name,
963							    ddat_gt,
964							    &hwm_gt_chip_info,
965							    NULL);
966		if (!IS_ERR(hwmon_dev))
967			ddat_gt->hwmon_dev = hwmon_dev;
968	}
969	return;
970err:
971	i915_hwmon_unregister(i915);
972}
973
974void i915_hwmon_unregister(struct drm_i915_private *i915)
975{
976	struct i915_hwmon *hwmon = i915->hwmon;
977	struct intel_gt *gt;
978	int i;
979
980	if (!hwmon)
981		return;
982
983	for_each_gt(gt, i915, i)
984		if (hwmon->ddat_gt[i].hwmon_dev)
985			hwmon_device_unregister(hwmon->ddat_gt[i].hwmon_dev);
986
987	if (hwmon->ddat.hwmon_dev)
988		hwmon_device_unregister(hwmon->ddat.hwmon_dev);
989
990	mutex_destroy(&hwmon->hwmon_lock);
991
992	kfree(i915->hwmon);
993	i915->hwmon = NULL;
994}
v6.2
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright © 2022 Intel Corporation
  4 */
  5
  6#include <linux/hwmon.h>
  7#include <linux/hwmon-sysfs.h>
 
  8#include <linux/types.h>
 
  9
 10#include "i915_drv.h"
 11#include "i915_hwmon.h"
 12#include "i915_reg.h"
 13#include "intel_mchbar_regs.h"
 14#include "intel_pcode.h"
 15#include "gt/intel_gt.h"
 16#include "gt/intel_gt_regs.h"
 17
 18/*
 19 * SF_* - scale factors for particular quantities according to hwmon spec.
 20 * - voltage  - millivolts
 21 * - power  - microwatts
 22 * - curr   - milliamperes
 23 * - energy - microjoules
 24 * - time   - milliseconds
 25 */
 26#define SF_VOLTAGE	1000
 27#define SF_POWER	1000000
 28#define SF_CURR		1000
 29#define SF_ENERGY	1000000
 30#define SF_TIME		1000
 31
 32struct hwm_reg {
 33	i915_reg_t gt_perf_status;
 
 34	i915_reg_t pkg_power_sku_unit;
 35	i915_reg_t pkg_power_sku;
 36	i915_reg_t pkg_rapl_limit;
 37	i915_reg_t energy_status_all;
 38	i915_reg_t energy_status_tile;
 
 39};
 40
 41struct hwm_energy_info {
 42	u32 reg_val_prev;
 43	long accum_energy;			/* Accumulated energy for energy1_input */
 44};
 45
 
 
 
 
 
 46struct hwm_drvdata {
 47	struct i915_hwmon *hwmon;
 48	struct intel_uncore *uncore;
 49	struct device *hwmon_dev;
 50	struct hwm_energy_info ei;		/*  Energy info for energy1_input */
 
 51	char name[12];
 52	int gt_n;
 
 
 53};
 54
 55struct i915_hwmon {
 56	struct hwm_drvdata ddat;
 57	struct hwm_drvdata ddat_gt[I915_MAX_GT];
 58	struct mutex hwmon_lock;		/* counter overflow logic and rmw */
 59	struct hwm_reg rg;
 60	int scl_shift_power;
 61	int scl_shift_energy;
 62	int scl_shift_time;
 63};
 64
 65static void
 66hwm_locked_with_pm_intel_uncore_rmw(struct hwm_drvdata *ddat,
 67				    i915_reg_t reg, u32 clear, u32 set)
 68{
 69	struct i915_hwmon *hwmon = ddat->hwmon;
 70	struct intel_uncore *uncore = ddat->uncore;
 71	intel_wakeref_t wakeref;
 72
 73	mutex_lock(&hwmon->hwmon_lock);
 
 74
 75	with_intel_runtime_pm(uncore->rpm, wakeref)
 76		intel_uncore_rmw(uncore, reg, clear, set);
 77
 78	mutex_unlock(&hwmon->hwmon_lock);
 
 79}
 80
 81/*
 82 * This function's return type of u64 allows for the case where the scaling
 83 * of the field taken from the 32-bit register value might cause a result to
 84 * exceed 32 bits.
 85 */
 86static u64
 87hwm_field_read_and_scale(struct hwm_drvdata *ddat, i915_reg_t rgadr,
 88			 u32 field_msk, int nshift, u32 scale_factor)
 89{
 90	struct intel_uncore *uncore = ddat->uncore;
 91	intel_wakeref_t wakeref;
 92	u32 reg_value;
 93
 94	with_intel_runtime_pm(uncore->rpm, wakeref)
 95		reg_value = intel_uncore_read(uncore, rgadr);
 96
 97	reg_value = REG_FIELD_GET(field_msk, reg_value);
 98
 99	return mul_u64_u32_shr(reg_value, scale_factor, nshift);
100}
101
102static void
103hwm_field_scale_and_write(struct hwm_drvdata *ddat, i915_reg_t rgadr,
104			  int nshift, unsigned int scale_factor, long lval)
105{
106	u32 nval;
107
108	/* Computation in 64-bits to avoid overflow. Round to nearest. */
109	nval = DIV_ROUND_CLOSEST_ULL((u64)lval << nshift, scale_factor);
110
111	hwm_locked_with_pm_intel_uncore_rmw(ddat, rgadr,
112					    PKG_PWR_LIM_1,
113					    REG_FIELD_PREP(PKG_PWR_LIM_1, nval));
114}
115
116/*
117 * hwm_energy - Obtain energy value
118 *
119 * The underlying energy hardware register is 32-bits and is subject to
120 * overflow. How long before overflow? For example, with an example
121 * scaling bit shift of 14 bits (see register *PACKAGE_POWER_SKU_UNIT) and
122 * a power draw of 1000 watts, the 32-bit counter will overflow in
123 * approximately 4.36 minutes.
124 *
125 * Examples:
126 *    1 watt:  (2^32 >> 14) /    1 W / (60 * 60 * 24) secs/day -> 3 days
127 * 1000 watts: (2^32 >> 14) / 1000 W / 60             secs/min -> 4.36 minutes
128 *
129 * The function significantly increases overflow duration (from 4.36
130 * minutes) by accumulating the energy register into a 'long' as allowed by
131 * the hwmon API. Using x86_64 128 bit arithmetic (see mul_u64_u32_shr()),
132 * a 'long' of 63 bits, SF_ENERGY of 1e6 (~20 bits) and
133 * hwmon->scl_shift_energy of 14 bits we have 57 (63 - 20 + 14) bits before
134 * energy1_input overflows. This at 1000 W is an overflow duration of 278 years.
135 */
136static void
137hwm_energy(struct hwm_drvdata *ddat, long *energy)
138{
139	struct intel_uncore *uncore = ddat->uncore;
140	struct i915_hwmon *hwmon = ddat->hwmon;
141	struct hwm_energy_info *ei = &ddat->ei;
142	intel_wakeref_t wakeref;
143	i915_reg_t rgaddr;
144	u32 reg_val;
145
146	if (ddat->gt_n >= 0)
147		rgaddr = hwmon->rg.energy_status_tile;
148	else
149		rgaddr = hwmon->rg.energy_status_all;
150
151	mutex_lock(&hwmon->hwmon_lock);
 
152
153	with_intel_runtime_pm(uncore->rpm, wakeref)
154		reg_val = intel_uncore_read(uncore, rgaddr);
155
156	if (reg_val >= ei->reg_val_prev)
157		ei->accum_energy += reg_val - ei->reg_val_prev;
158	else
159		ei->accum_energy += UINT_MAX - ei->reg_val_prev + reg_val;
160	ei->reg_val_prev = reg_val;
161
162	*energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
163				  hwmon->scl_shift_energy);
164	mutex_unlock(&hwmon->hwmon_lock);
 
165}
166
167static ssize_t
168hwm_power1_max_interval_show(struct device *dev, struct device_attribute *attr,
169			     char *buf)
170{
171	struct hwm_drvdata *ddat = dev_get_drvdata(dev);
172	struct i915_hwmon *hwmon = ddat->hwmon;
173	intel_wakeref_t wakeref;
174	u32 r, x, y, x_w = 2; /* 2 bits */
175	u64 tau4, out;
176
177	with_intel_runtime_pm(ddat->uncore->rpm, wakeref)
178		r = intel_uncore_read(ddat->uncore, hwmon->rg.pkg_rapl_limit);
179
180	x = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_X, r);
181	y = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_Y, r);
182	/*
183	 * tau = 1.x * power(2,y), x = bits(23:22), y = bits(21:17)
184	 *     = (4 | x) << (y - 2)
185	 * where (y - 2) ensures a 1.x fixed point representation of 1.x
186	 * However because y can be < 2, we compute
187	 *     tau4 = (4 | x) << y
188	 * but add 2 when doing the final right shift to account for units
189	 */
190	tau4 = ((1 << x_w) | x) << y;
191	/* val in hwmon interface units (millisec) */
192	out = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
193
194	return sysfs_emit(buf, "%llu\n", out);
195}
196
197static ssize_t
198hwm_power1_max_interval_store(struct device *dev,
199			      struct device_attribute *attr,
200			      const char *buf, size_t count)
201{
202	struct hwm_drvdata *ddat = dev_get_drvdata(dev);
203	struct i915_hwmon *hwmon = ddat->hwmon;
204	u32 x, y, rxy, x_w = 2; /* 2 bits */
205	u64 tau4, r, max_win;
206	unsigned long val;
207	int ret;
208
209	ret = kstrtoul(buf, 0, &val);
210	if (ret)
211		return ret;
212
213	/*
214	 * Max HW supported tau in '1.x * power(2,y)' format, x = 0, y = 0x12
215	 * The hwmon->scl_shift_time default of 0xa results in a max tau of 256 seconds
216	 */
217#define PKG_MAX_WIN_DEFAULT 0x12ull
218
219	/*
220	 * val must be < max in hwmon interface units. The steps below are
221	 * explained in i915_power1_max_interval_show()
222	 */
223	r = FIELD_PREP(PKG_MAX_WIN, PKG_MAX_WIN_DEFAULT);
224	x = REG_FIELD_GET(PKG_MAX_WIN_X, r);
225	y = REG_FIELD_GET(PKG_MAX_WIN_Y, r);
226	tau4 = ((1 << x_w) | x) << y;
227	max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
228
229	if (val > max_win)
230		return -EINVAL;
231
232	/* val in hw units */
233	val = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_time, SF_TIME);
234	/* Convert to 1.x * power(2,y) */
235	if (!val)
236		return -EINVAL;
237	y = ilog2(val);
238	/* x = (val - (1 << y)) >> (y - 2); */
239	x = (val - (1ul << y)) << x_w >> y;
 
 
 
 
240
241	rxy = REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_X, x) | REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_Y, y);
242
243	hwm_locked_with_pm_intel_uncore_rmw(ddat, hwmon->rg.pkg_rapl_limit,
244					    PKG_PWR_LIM_1_TIME, rxy);
245	return count;
246}
247
248static SENSOR_DEVICE_ATTR(power1_max_interval, 0664,
249			  hwm_power1_max_interval_show,
250			  hwm_power1_max_interval_store, 0);
251
252static struct attribute *hwm_attributes[] = {
253	&sensor_dev_attr_power1_max_interval.dev_attr.attr,
254	NULL
255};
256
257static umode_t hwm_attributes_visible(struct kobject *kobj,
258				      struct attribute *attr, int index)
259{
260	struct device *dev = kobj_to_dev(kobj);
261	struct hwm_drvdata *ddat = dev_get_drvdata(dev);
262	struct i915_hwmon *hwmon = ddat->hwmon;
263
264	if (attr == &sensor_dev_attr_power1_max_interval.dev_attr.attr)
265		return i915_mmio_reg_valid(hwmon->rg.pkg_rapl_limit) ? attr->mode : 0;
266
267	return 0;
268}
269
270static const struct attribute_group hwm_attrgroup = {
271	.attrs = hwm_attributes,
272	.is_visible = hwm_attributes_visible,
273};
274
275static const struct attribute_group *hwm_groups[] = {
276	&hwm_attrgroup,
277	NULL
278};
279
280static const struct hwmon_channel_info *hwm_info[] = {
 
281	HWMON_CHANNEL_INFO(in, HWMON_I_INPUT),
282	HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_CRIT),
283	HWMON_CHANNEL_INFO(energy, HWMON_E_INPUT),
284	HWMON_CHANNEL_INFO(curr, HWMON_C_CRIT),
 
285	NULL
286};
287
288static const struct hwmon_channel_info *hwm_gt_info[] = {
289	HWMON_CHANNEL_INFO(energy, HWMON_E_INPUT),
290	NULL
291};
292
293/* I1 is exposed as power_crit or as curr_crit depending on bit 31 */
294static int hwm_pcode_read_i1(struct drm_i915_private *i915, u32 *uval)
295{
 
 
 
 
296	return snb_pcode_read_p(&i915->uncore, PCODE_POWER_SETUP,
297				POWER_SETUP_SUBCOMMAND_READ_I1, 0, uval);
298}
299
300static int hwm_pcode_write_i1(struct drm_i915_private *i915, u32 uval)
301{
302	return  snb_pcode_write_p(&i915->uncore, PCODE_POWER_SETUP,
303				  POWER_SETUP_SUBCOMMAND_WRITE_I1, 0, uval);
304}
305
306static umode_t
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
307hwm_in_is_visible(const struct hwm_drvdata *ddat, u32 attr)
308{
309	struct drm_i915_private *i915 = ddat->uncore->i915;
310
311	switch (attr) {
312	case hwmon_in_input:
313		return IS_DG1(i915) || IS_DG2(i915) ? 0444 : 0;
314	default:
315		return 0;
316	}
317}
318
319static int
320hwm_in_read(struct hwm_drvdata *ddat, u32 attr, long *val)
321{
322	struct i915_hwmon *hwmon = ddat->hwmon;
323	intel_wakeref_t wakeref;
324	u32 reg_value;
325
326	switch (attr) {
327	case hwmon_in_input:
328		with_intel_runtime_pm(ddat->uncore->rpm, wakeref)
329			reg_value = intel_uncore_read(ddat->uncore, hwmon->rg.gt_perf_status);
330		/* HW register value in units of 2.5 millivolt */
331		*val = DIV_ROUND_CLOSEST(REG_FIELD_GET(GEN12_VOLTAGE_MASK, reg_value) * 25, 10);
332		return 0;
333	default:
334		return -EOPNOTSUPP;
335	}
336}
337
338static umode_t
339hwm_power_is_visible(const struct hwm_drvdata *ddat, u32 attr, int chan)
340{
341	struct drm_i915_private *i915 = ddat->uncore->i915;
342	struct i915_hwmon *hwmon = ddat->hwmon;
343	u32 uval;
344
345	switch (attr) {
346	case hwmon_power_max:
347		return i915_mmio_reg_valid(hwmon->rg.pkg_rapl_limit) ? 0664 : 0;
348	case hwmon_power_rated_max:
349		return i915_mmio_reg_valid(hwmon->rg.pkg_power_sku) ? 0444 : 0;
350	case hwmon_power_crit:
351		return (hwm_pcode_read_i1(i915, &uval) ||
352			!(uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644;
353	default:
354		return 0;
355	}
356}
357
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
358static int
359hwm_power_read(struct hwm_drvdata *ddat, u32 attr, int chan, long *val)
360{
361	struct i915_hwmon *hwmon = ddat->hwmon;
362	int ret;
363	u32 uval;
364
365	switch (attr) {
366	case hwmon_power_max:
367		*val = hwm_field_read_and_scale(ddat,
368						hwmon->rg.pkg_rapl_limit,
369						PKG_PWR_LIM_1,
370						hwmon->scl_shift_power,
371						SF_POWER);
372		return 0;
373	case hwmon_power_rated_max:
374		*val = hwm_field_read_and_scale(ddat,
375						hwmon->rg.pkg_power_sku,
376						PKG_PKG_TDP,
377						hwmon->scl_shift_power,
378						SF_POWER);
379		return 0;
380	case hwmon_power_crit:
381		ret = hwm_pcode_read_i1(ddat->uncore->i915, &uval);
382		if (ret)
383			return ret;
384		if (!(uval & POWER_SETUP_I1_WATTS))
385			return -ENODEV;
386		*val = mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval),
387				       SF_POWER, POWER_SETUP_I1_SHIFT);
388		return 0;
389	default:
390		return -EOPNOTSUPP;
391	}
392}
393
394static int
395hwm_power_write(struct hwm_drvdata *ddat, u32 attr, int chan, long val)
396{
397	struct i915_hwmon *hwmon = ddat->hwmon;
398	u32 uval;
399
400	switch (attr) {
401	case hwmon_power_max:
402		hwm_field_scale_and_write(ddat,
403					  hwmon->rg.pkg_rapl_limit,
404					  hwmon->scl_shift_power,
405					  SF_POWER, val);
406		return 0;
407	case hwmon_power_crit:
408		uval = DIV_ROUND_CLOSEST_ULL(val << POWER_SETUP_I1_SHIFT, SF_POWER);
409		return hwm_pcode_write_i1(ddat->uncore->i915, uval);
410	default:
411		return -EOPNOTSUPP;
412	}
413}
414
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
415static umode_t
416hwm_energy_is_visible(const struct hwm_drvdata *ddat, u32 attr)
417{
418	struct i915_hwmon *hwmon = ddat->hwmon;
419	i915_reg_t rgaddr;
420
421	switch (attr) {
422	case hwmon_energy_input:
423		if (ddat->gt_n >= 0)
424			rgaddr = hwmon->rg.energy_status_tile;
425		else
426			rgaddr = hwmon->rg.energy_status_all;
427		return i915_mmio_reg_valid(rgaddr) ? 0444 : 0;
428	default:
429		return 0;
430	}
431}
432
433static int
434hwm_energy_read(struct hwm_drvdata *ddat, u32 attr, long *val)
435{
436	switch (attr) {
437	case hwmon_energy_input:
438		hwm_energy(ddat, val);
439		return 0;
440	default:
441		return -EOPNOTSUPP;
442	}
443}
444
445static umode_t
446hwm_curr_is_visible(const struct hwm_drvdata *ddat, u32 attr)
447{
448	struct drm_i915_private *i915 = ddat->uncore->i915;
449	u32 uval;
450
451	switch (attr) {
452	case hwmon_curr_crit:
453		return (hwm_pcode_read_i1(i915, &uval) ||
454			(uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644;
455	default:
456		return 0;
457	}
458}
459
460static int
461hwm_curr_read(struct hwm_drvdata *ddat, u32 attr, long *val)
462{
463	int ret;
464	u32 uval;
465
466	switch (attr) {
467	case hwmon_curr_crit:
468		ret = hwm_pcode_read_i1(ddat->uncore->i915, &uval);
469		if (ret)
470			return ret;
471		if (uval & POWER_SETUP_I1_WATTS)
472			return -ENODEV;
473		*val = mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval),
474				       SF_CURR, POWER_SETUP_I1_SHIFT);
475		return 0;
476	default:
477		return -EOPNOTSUPP;
478	}
479}
480
481static int
482hwm_curr_write(struct hwm_drvdata *ddat, u32 attr, long val)
483{
484	u32 uval;
485
486	switch (attr) {
487	case hwmon_curr_crit:
488		uval = DIV_ROUND_CLOSEST_ULL(val << POWER_SETUP_I1_SHIFT, SF_CURR);
489		return hwm_pcode_write_i1(ddat->uncore->i915, uval);
490	default:
491		return -EOPNOTSUPP;
492	}
493}
494
495static umode_t
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
496hwm_is_visible(const void *drvdata, enum hwmon_sensor_types type,
497	       u32 attr, int channel)
498{
499	struct hwm_drvdata *ddat = (struct hwm_drvdata *)drvdata;
500
501	switch (type) {
 
 
502	case hwmon_in:
503		return hwm_in_is_visible(ddat, attr);
504	case hwmon_power:
505		return hwm_power_is_visible(ddat, attr, channel);
506	case hwmon_energy:
507		return hwm_energy_is_visible(ddat, attr);
508	case hwmon_curr:
509		return hwm_curr_is_visible(ddat, attr);
 
 
510	default:
511		return 0;
512	}
513}
514
515static int
516hwm_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
517	 int channel, long *val)
518{
519	struct hwm_drvdata *ddat = dev_get_drvdata(dev);
520
521	switch (type) {
 
 
522	case hwmon_in:
523		return hwm_in_read(ddat, attr, val);
524	case hwmon_power:
525		return hwm_power_read(ddat, attr, channel, val);
526	case hwmon_energy:
527		return hwm_energy_read(ddat, attr, val);
528	case hwmon_curr:
529		return hwm_curr_read(ddat, attr, val);
 
 
530	default:
531		return -EOPNOTSUPP;
532	}
533}
534
535static int
536hwm_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
537	  int channel, long val)
538{
539	struct hwm_drvdata *ddat = dev_get_drvdata(dev);
540
541	switch (type) {
542	case hwmon_power:
543		return hwm_power_write(ddat, attr, channel, val);
544	case hwmon_curr:
545		return hwm_curr_write(ddat, attr, val);
546	default:
547		return -EOPNOTSUPP;
548	}
549}
550
551static const struct hwmon_ops hwm_ops = {
552	.is_visible = hwm_is_visible,
553	.read = hwm_read,
554	.write = hwm_write,
555};
556
557static const struct hwmon_chip_info hwm_chip_info = {
558	.ops = &hwm_ops,
559	.info = hwm_info,
560};
561
562static umode_t
563hwm_gt_is_visible(const void *drvdata, enum hwmon_sensor_types type,
564		  u32 attr, int channel)
565{
566	struct hwm_drvdata *ddat = (struct hwm_drvdata *)drvdata;
567
568	switch (type) {
569	case hwmon_energy:
570		return hwm_energy_is_visible(ddat, attr);
571	default:
572		return 0;
573	}
574}
575
576static int
577hwm_gt_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
578	    int channel, long *val)
579{
580	struct hwm_drvdata *ddat = dev_get_drvdata(dev);
581
582	switch (type) {
583	case hwmon_energy:
584		return hwm_energy_read(ddat, attr, val);
585	default:
586		return -EOPNOTSUPP;
587	}
588}
589
590static const struct hwmon_ops hwm_gt_ops = {
591	.is_visible = hwm_gt_is_visible,
592	.read = hwm_gt_read,
593};
594
595static const struct hwmon_chip_info hwm_gt_chip_info = {
596	.ops = &hwm_gt_ops,
597	.info = hwm_gt_info,
598};
599
600static void
601hwm_get_preregistration_info(struct drm_i915_private *i915)
602{
603	struct i915_hwmon *hwmon = i915->hwmon;
604	struct intel_uncore *uncore = &i915->uncore;
605	struct hwm_drvdata *ddat = &hwmon->ddat;
606	intel_wakeref_t wakeref;
607	u32 val_sku_unit = 0;
608	struct intel_gt *gt;
609	long energy;
610	int i;
611
612	/* Available for all Gen12+/dGfx */
613	hwmon->rg.gt_perf_status = GEN12_RPSTAT1;
614
615	if (IS_DG1(i915) || IS_DG2(i915)) {
 
616		hwmon->rg.pkg_power_sku_unit = PCU_PACKAGE_POWER_SKU_UNIT;
617		hwmon->rg.pkg_power_sku = PCU_PACKAGE_POWER_SKU;
618		hwmon->rg.pkg_rapl_limit = PCU_PACKAGE_RAPL_LIMIT;
619		hwmon->rg.energy_status_all = PCU_PACKAGE_ENERGY_STATUS;
620		hwmon->rg.energy_status_tile = INVALID_MMIO_REG;
621	} else if (IS_XEHPSDV(i915)) {
622		hwmon->rg.pkg_power_sku_unit = GT0_PACKAGE_POWER_SKU_UNIT;
623		hwmon->rg.pkg_power_sku = INVALID_MMIO_REG;
624		hwmon->rg.pkg_rapl_limit = GT0_PACKAGE_RAPL_LIMIT;
625		hwmon->rg.energy_status_all = GT0_PLATFORM_ENERGY_STATUS;
626		hwmon->rg.energy_status_tile = GT0_PACKAGE_ENERGY_STATUS;
627	} else {
 
628		hwmon->rg.pkg_power_sku_unit = INVALID_MMIO_REG;
629		hwmon->rg.pkg_power_sku = INVALID_MMIO_REG;
630		hwmon->rg.pkg_rapl_limit = INVALID_MMIO_REG;
631		hwmon->rg.energy_status_all = INVALID_MMIO_REG;
632		hwmon->rg.energy_status_tile = INVALID_MMIO_REG;
 
633	}
634
635	with_intel_runtime_pm(uncore->rpm, wakeref) {
636		/*
637		 * The contents of register hwmon->rg.pkg_power_sku_unit do not change,
638		 * so read it once and store the shift values.
639		 */
640		if (i915_mmio_reg_valid(hwmon->rg.pkg_power_sku_unit))
641			val_sku_unit = intel_uncore_read(uncore,
642							 hwmon->rg.pkg_power_sku_unit);
 
 
 
 
 
 
 
 
 
 
643	}
644
645	hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT, val_sku_unit);
646	hwmon->scl_shift_energy = REG_FIELD_GET(PKG_ENERGY_UNIT, val_sku_unit);
647	hwmon->scl_shift_time = REG_FIELD_GET(PKG_TIME_UNIT, val_sku_unit);
648
649	/*
650	 * Initialize 'struct hwm_energy_info', i.e. set fields to the
651	 * first value of the energy register read
652	 */
653	if (i915_mmio_reg_valid(hwmon->rg.energy_status_all))
654		hwm_energy(ddat, &energy);
655	if (i915_mmio_reg_valid(hwmon->rg.energy_status_tile)) {
656		for_each_gt(gt, i915, i)
657			hwm_energy(&hwmon->ddat_gt[i], &energy);
658	}
659}
660
661void i915_hwmon_register(struct drm_i915_private *i915)
662{
663	struct device *dev = i915->drm.dev;
664	struct i915_hwmon *hwmon;
665	struct device *hwmon_dev;
666	struct hwm_drvdata *ddat;
667	struct hwm_drvdata *ddat_gt;
668	struct intel_gt *gt;
669	int i;
670
671	/* hwmon is available only for dGfx */
672	if (!IS_DGFX(i915))
673		return;
674
675	hwmon = devm_kzalloc(dev, sizeof(*hwmon), GFP_KERNEL);
676	if (!hwmon)
677		return;
678
679	i915->hwmon = hwmon;
680	mutex_init(&hwmon->hwmon_lock);
681	ddat = &hwmon->ddat;
682
683	ddat->hwmon = hwmon;
684	ddat->uncore = &i915->uncore;
685	snprintf(ddat->name, sizeof(ddat->name), "i915");
686	ddat->gt_n = -1;
 
687
688	for_each_gt(gt, i915, i) {
689		ddat_gt = hwmon->ddat_gt + i;
690
691		ddat_gt->hwmon = hwmon;
692		ddat_gt->uncore = gt->uncore;
693		snprintf(ddat_gt->name, sizeof(ddat_gt->name), "i915_gt%u", i);
694		ddat_gt->gt_n = i;
695	}
696
697	hwm_get_preregistration_info(i915);
698
699	/*  hwmon_dev points to device hwmon<i> */
700	hwmon_dev = devm_hwmon_device_register_with_info(dev, ddat->name,
701							 ddat,
702							 &hwm_chip_info,
703							 hwm_groups);
704	if (IS_ERR(hwmon_dev)) {
705		i915->hwmon = NULL;
706		return;
707	}
708
709	ddat->hwmon_dev = hwmon_dev;
710
711	for_each_gt(gt, i915, i) {
712		ddat_gt = hwmon->ddat_gt + i;
713		/*
714		 * Create per-gt directories only if a per-gt attribute is
715		 * visible. Currently this is only energy
716		 */
717		if (!hwm_gt_is_visible(ddat_gt, hwmon_energy, hwmon_energy_input, 0))
718			continue;
719
720		hwmon_dev = devm_hwmon_device_register_with_info(dev, ddat_gt->name,
721								 ddat_gt,
722								 &hwm_gt_chip_info,
723								 NULL);
724		if (!IS_ERR(hwmon_dev))
725			ddat_gt->hwmon_dev = hwmon_dev;
726	}
 
 
 
727}
728
729void i915_hwmon_unregister(struct drm_i915_private *i915)
730{
731	fetch_and_zero(&i915->hwmon);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
732}