Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright © 2023 Intel Corporation
  4 */
  5
  6#include <linux/hwmon-sysfs.h>
  7#include <linux/hwmon.h>
  8#include <linux/types.h>
  9
 10#include <drm/drm_managed.h>
 11#include "regs/xe_gt_regs.h"
 12#include "regs/xe_mchbar_regs.h"
 13#include "xe_device.h"
 14#include "xe_gt.h"
 15#include "xe_hwmon.h"
 16#include "xe_mmio.h"
 17#include "xe_pcode.h"
 18#include "xe_pcode_api.h"
 19
 20enum xe_hwmon_reg {
 21	REG_PKG_RAPL_LIMIT,
 22	REG_PKG_POWER_SKU,
 23	REG_PKG_POWER_SKU_UNIT,
 24	REG_GT_PERF_STATUS,
 25	REG_PKG_ENERGY_STATUS,
 26};
 27
 28enum xe_hwmon_reg_operation {
 29	REG_READ32,
 30	REG_RMW32,
 31	REG_READ64,
 32};
 33
 34/*
 35 * SF_* - scale factors for particular quantities according to hwmon spec.
 36 */
 37#define SF_POWER	1000000		/* microwatts */
 38#define SF_CURR		1000		/* milliamperes */
 39#define SF_VOLTAGE	1000		/* millivolts */
 40#define SF_ENERGY	1000000		/* microjoules */
 41#define SF_TIME		1000		/* milliseconds */
 42
 43/**
 44 * struct xe_hwmon_energy_info - to accumulate energy
 45 */
 46struct xe_hwmon_energy_info {
 47	/** @reg_val_prev: previous energy reg val */
 48	u32 reg_val_prev;
 49	/** @accum_energy: accumulated energy */
 50	long accum_energy;
 51};
 52
 53/**
 54 * struct xe_hwmon - xe hwmon data structure
 55 */
 56struct xe_hwmon {
 57	/** @hwmon_dev: hwmon device for xe */
 58	struct device *hwmon_dev;
 59	/** @gt: primary gt */
 60	struct xe_gt *gt;
 61	/** @hwmon_lock: lock for rw attributes*/
 62	struct mutex hwmon_lock;
 63	/** @scl_shift_power: pkg power unit */
 64	int scl_shift_power;
 65	/** @scl_shift_energy: pkg energy unit */
 66	int scl_shift_energy;
 67	/** @scl_shift_time: pkg time unit */
 68	int scl_shift_time;
 69	/** @ei: Energy info for energy1_input */
 70	struct xe_hwmon_energy_info ei;
 71};
 72
 73static u32 xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg)
 74{
 75	struct xe_device *xe = gt_to_xe(hwmon->gt);
 76	struct xe_reg reg = XE_REG(0);
 77
 78	switch (hwmon_reg) {
 79	case REG_PKG_RAPL_LIMIT:
 80		if (xe->info.platform == XE_DG2)
 81			reg = PCU_CR_PACKAGE_RAPL_LIMIT;
 82		else if (xe->info.platform == XE_PVC)
 83			reg = PVC_GT0_PACKAGE_RAPL_LIMIT;
 84		break;
 85	case REG_PKG_POWER_SKU:
 86		if (xe->info.platform == XE_DG2)
 87			reg = PCU_CR_PACKAGE_POWER_SKU;
 88		else if (xe->info.platform == XE_PVC)
 89			reg = PVC_GT0_PACKAGE_POWER_SKU;
 90		break;
 91	case REG_PKG_POWER_SKU_UNIT:
 92		if (xe->info.platform == XE_DG2)
 93			reg = PCU_CR_PACKAGE_POWER_SKU_UNIT;
 94		else if (xe->info.platform == XE_PVC)
 95			reg = PVC_GT0_PACKAGE_POWER_SKU_UNIT;
 96		break;
 97	case REG_GT_PERF_STATUS:
 98		if (xe->info.platform == XE_DG2)
 99			reg = GT_PERF_STATUS;
100		break;
101	case REG_PKG_ENERGY_STATUS:
102		if (xe->info.platform == XE_DG2)
103			reg = PCU_CR_PACKAGE_ENERGY_STATUS;
104		else if (xe->info.platform == XE_PVC)
105			reg = PVC_GT0_PLATFORM_ENERGY_STATUS;
106		break;
107	default:
108		drm_warn(&xe->drm, "Unknown xe hwmon reg id: %d\n", hwmon_reg);
109		break;
110	}
111
112	return reg.raw;
113}
114
115static void xe_hwmon_process_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg,
116				 enum xe_hwmon_reg_operation operation, u64 *value,
117				 u32 clr, u32 set)
118{
119	struct xe_reg reg;
120
121	reg.raw = xe_hwmon_get_reg(hwmon, hwmon_reg);
122
123	if (!reg.raw)
124		return;
125
126	switch (operation) {
127	case REG_READ32:
128		*value = xe_mmio_read32(hwmon->gt, reg);
129		break;
130	case REG_RMW32:
131		*value = xe_mmio_rmw32(hwmon->gt, reg, clr, set);
132		break;
133	case REG_READ64:
134		*value = xe_mmio_read64_2x32(hwmon->gt, reg);
135		break;
136	default:
137		drm_warn(&gt_to_xe(hwmon->gt)->drm, "Invalid xe hwmon reg operation: %d\n",
138			 operation);
139		break;
140	}
141}
142
143#define PL1_DISABLE 0
144
145/*
146 * HW allows arbitrary PL1 limits to be set but silently clamps these values to
147 * "typical but not guaranteed" min/max values in REG_PKG_POWER_SKU. Follow the
148 * same pattern for sysfs, allow arbitrary PL1 limits to be set but display
149 * clamped values when read.
150 */
151static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, long *value)
152{
153	u64 reg_val, min, max;
154
155	mutex_lock(&hwmon->hwmon_lock);
156
157	xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_READ32, &reg_val, 0, 0);
158	/* Check if PL1 limit is disabled */
159	if (!(reg_val & PKG_PWR_LIM_1_EN)) {
160		*value = PL1_DISABLE;
161		goto unlock;
162	}
163
164	reg_val = REG_FIELD_GET(PKG_PWR_LIM_1, reg_val);
165	*value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
166
167	xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU, REG_READ64, &reg_val, 0, 0);
168	min = REG_FIELD_GET(PKG_MIN_PWR, reg_val);
169	min = mul_u64_u32_shr(min, SF_POWER, hwmon->scl_shift_power);
170	max = REG_FIELD_GET(PKG_MAX_PWR, reg_val);
171	max = mul_u64_u32_shr(max, SF_POWER, hwmon->scl_shift_power);
172
173	if (min && max)
174		*value = clamp_t(u64, *value, min, max);
175unlock:
176	mutex_unlock(&hwmon->hwmon_lock);
177}
178
179static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, long value)
180{
181	int ret = 0;
182	u64 reg_val;
183
184	mutex_lock(&hwmon->hwmon_lock);
185
186	/* Disable PL1 limit and verify, as limit cannot be disabled on all platforms */
187	if (value == PL1_DISABLE) {
188		xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW32, &reg_val,
189				     PKG_PWR_LIM_1_EN, 0);
190		xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_READ32, &reg_val,
191				     PKG_PWR_LIM_1_EN, 0);
192
193		if (reg_val & PKG_PWR_LIM_1_EN) {
194			ret = -EOPNOTSUPP;
195			goto unlock;
196		}
197	}
198
199	/* Computation in 64-bits to avoid overflow. Round to nearest. */
200	reg_val = DIV_ROUND_CLOSEST_ULL((u64)value << hwmon->scl_shift_power, SF_POWER);
201	reg_val = PKG_PWR_LIM_1_EN | REG_FIELD_PREP(PKG_PWR_LIM_1, reg_val);
202
203	xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW32, &reg_val,
204			     PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, reg_val);
205unlock:
206	mutex_unlock(&hwmon->hwmon_lock);
207	return ret;
208}
209
210static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, long *value)
211{
212	u64 reg_val;
213
214	xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU, REG_READ32, &reg_val, 0, 0);
215	reg_val = REG_FIELD_GET(PKG_TDP, reg_val);
216	*value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
217}
218
219/*
220 * xe_hwmon_energy_get - Obtain energy value
221 *
222 * The underlying energy hardware register is 32-bits and is subject to
223 * overflow. How long before overflow? For example, with an example
224 * scaling bit shift of 14 bits (see register *PACKAGE_POWER_SKU_UNIT) and
225 * a power draw of 1000 watts, the 32-bit counter will overflow in
226 * approximately 4.36 minutes.
227 *
228 * Examples:
229 *    1 watt:  (2^32 >> 14) /    1 W / (60 * 60 * 24) secs/day -> 3 days
230 * 1000 watts: (2^32 >> 14) / 1000 W / 60             secs/min -> 4.36 minutes
231 *
232 * The function significantly increases overflow duration (from 4.36
233 * minutes) by accumulating the energy register into a 'long' as allowed by
234 * the hwmon API. Using x86_64 128 bit arithmetic (see mul_u64_u32_shr()),
235 * a 'long' of 63 bits, SF_ENERGY of 1e6 (~20 bits) and
236 * hwmon->scl_shift_energy of 14 bits we have 57 (63 - 20 + 14) bits before
237 * energy1_input overflows. This at 1000 W is an overflow duration of 278 years.
238 */
239static void
240xe_hwmon_energy_get(struct xe_hwmon *hwmon, long *energy)
241{
242	struct xe_hwmon_energy_info *ei = &hwmon->ei;
243	u64 reg_val;
244
245	xe_hwmon_process_reg(hwmon, REG_PKG_ENERGY_STATUS, REG_READ32,
246			     &reg_val, 0, 0);
247
248	if (reg_val >= ei->reg_val_prev)
249		ei->accum_energy += reg_val - ei->reg_val_prev;
250	else
251		ei->accum_energy += UINT_MAX - ei->reg_val_prev + reg_val;
252
253	ei->reg_val_prev = reg_val;
254
255	*energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
256				  hwmon->scl_shift_energy);
257}
258
259static ssize_t
260xe_hwmon_power1_max_interval_show(struct device *dev, struct device_attribute *attr,
261				  char *buf)
262{
263	struct xe_hwmon *hwmon = dev_get_drvdata(dev);
264	u32 x, y, x_w = 2; /* 2 bits */
265	u64 r, tau4, out;
266
267	xe_device_mem_access_get(gt_to_xe(hwmon->gt));
268
269	mutex_lock(&hwmon->hwmon_lock);
270
271	xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT,
272			     REG_READ32, &r, 0, 0);
273
274	mutex_unlock(&hwmon->hwmon_lock);
275
276	xe_device_mem_access_put(gt_to_xe(hwmon->gt));
277
278	x = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_X, r);
279	y = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_Y, r);
280
281	/*
282	 * tau = 1.x * power(2,y), x = bits(23:22), y = bits(21:17)
283	 *     = (4 | x) << (y - 2)
284	 *
285	 * Here (y - 2) ensures a 1.x fixed point representation of 1.x
286	 * As x is 2 bits so 1.x can be 1.0, 1.25, 1.50, 1.75
287	 *
288	 * As y can be < 2, we compute tau4 = (4 | x) << y
289	 * and then add 2 when doing the final right shift to account for units
290	 */
291	tau4 = ((1 << x_w) | x) << y;
292
293	/* val in hwmon interface units (millisec) */
294	out = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
295
296	return sysfs_emit(buf, "%llu\n", out);
297}
298
299static ssize_t
300xe_hwmon_power1_max_interval_store(struct device *dev, struct device_attribute *attr,
301				   const char *buf, size_t count)
302{
303	struct xe_hwmon *hwmon = dev_get_drvdata(dev);
304	u32 x, y, rxy, x_w = 2; /* 2 bits */
305	u64 tau4, r, max_win;
306	unsigned long val;
307	int ret;
308
309	ret = kstrtoul(buf, 0, &val);
310	if (ret)
311		return ret;
312
313	/*
314	 * Max HW supported tau in '1.x * power(2,y)' format, x = 0, y = 0x12.
315	 * The hwmon->scl_shift_time default of 0xa results in a max tau of 256 seconds.
316	 *
317	 * The ideal scenario is for PKG_MAX_WIN to be read from the PKG_PWR_SKU register.
318	 * However, it is observed that existing discrete GPUs does not provide correct
319	 * PKG_MAX_WIN value, therefore a using default constant value. For future discrete GPUs
320	 * this may get resolved, in which case PKG_MAX_WIN should be obtained from PKG_PWR_SKU.
321	 */
322#define PKG_MAX_WIN_DEFAULT 0x12ull
323
324	/*
325	 * val must be < max in hwmon interface units. The steps below are
326	 * explained in xe_hwmon_power1_max_interval_show()
327	 */
328	r = FIELD_PREP(PKG_MAX_WIN, PKG_MAX_WIN_DEFAULT);
329	x = REG_FIELD_GET(PKG_MAX_WIN_X, r);
330	y = REG_FIELD_GET(PKG_MAX_WIN_Y, r);
331	tau4 = ((1 << x_w) | x) << y;
332	max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
333
334	if (val > max_win)
335		return -EINVAL;
336
337	/* val in hw units */
338	val = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_time, SF_TIME);
339
340	/*
341	 * Convert val to 1.x * power(2,y)
342	 * y = ilog2(val)
343	 * x = (val - (1 << y)) >> (y - 2)
344	 */
345	if (!val) {
346		y = 0;
347		x = 0;
348	} else {
349		y = ilog2(val);
350		x = (val - (1ul << y)) << x_w >> y;
351	}
352
353	rxy = REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_X, x) | REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_Y, y);
354
355	xe_device_mem_access_get(gt_to_xe(hwmon->gt));
356
357	mutex_lock(&hwmon->hwmon_lock);
358
359	xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW32, (u64 *)&r,
360			     PKG_PWR_LIM_1_TIME, rxy);
361
362	mutex_unlock(&hwmon->hwmon_lock);
363
364	xe_device_mem_access_put(gt_to_xe(hwmon->gt));
365
366	return count;
367}
368
369static SENSOR_DEVICE_ATTR(power1_max_interval, 0664,
370			  xe_hwmon_power1_max_interval_show,
371			  xe_hwmon_power1_max_interval_store, 0);
372
373static struct attribute *hwmon_attributes[] = {
374	&sensor_dev_attr_power1_max_interval.dev_attr.attr,
375	NULL
376};
377
378static umode_t xe_hwmon_attributes_visible(struct kobject *kobj,
379					   struct attribute *attr, int index)
380{
381	struct device *dev = kobj_to_dev(kobj);
382	struct xe_hwmon *hwmon = dev_get_drvdata(dev);
383	int ret = 0;
384
385	xe_device_mem_access_get(gt_to_xe(hwmon->gt));
386
387	if (attr == &sensor_dev_attr_power1_max_interval.dev_attr.attr)
388		ret = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT) ? attr->mode : 0;
389
390	xe_device_mem_access_put(gt_to_xe(hwmon->gt));
391
392	return ret;
393}
394
395static const struct attribute_group hwmon_attrgroup = {
396	.attrs = hwmon_attributes,
397	.is_visible = xe_hwmon_attributes_visible,
398};
399
400static const struct attribute_group *hwmon_groups[] = {
401	&hwmon_attrgroup,
402	NULL
403};
404
405static const struct hwmon_channel_info *hwmon_info[] = {
406	HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_CRIT),
407	HWMON_CHANNEL_INFO(curr, HWMON_C_CRIT),
408	HWMON_CHANNEL_INFO(in, HWMON_I_INPUT),
409	HWMON_CHANNEL_INFO(energy, HWMON_E_INPUT),
410	NULL
411};
412
413/* I1 is exposed as power_crit or as curr_crit depending on bit 31 */
414static int xe_hwmon_pcode_read_i1(struct xe_gt *gt, u32 *uval)
415{
416	/* Avoid Illegal Subcommand error */
417	if (gt_to_xe(gt)->info.platform == XE_DG2)
418		return -ENXIO;
419
420	return xe_pcode_read(gt, PCODE_MBOX(PCODE_POWER_SETUP,
421			     POWER_SETUP_SUBCOMMAND_READ_I1, 0),
422			     uval, NULL);
423}
424
425static int xe_hwmon_pcode_write_i1(struct xe_gt *gt, u32 uval)
426{
427	return xe_pcode_write(gt, PCODE_MBOX(PCODE_POWER_SETUP,
428			      POWER_SETUP_SUBCOMMAND_WRITE_I1, 0),
429			      uval);
430}
431
432static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon, long *value, u32 scale_factor)
433{
434	int ret;
435	u32 uval;
436
437	mutex_lock(&hwmon->hwmon_lock);
438
439	ret = xe_hwmon_pcode_read_i1(hwmon->gt, &uval);
440	if (ret)
441		goto unlock;
442
443	*value = mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval),
444				 scale_factor, POWER_SETUP_I1_SHIFT);
445unlock:
446	mutex_unlock(&hwmon->hwmon_lock);
447	return ret;
448}
449
450static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, long value, u32 scale_factor)
451{
452	int ret;
453	u32 uval;
454
455	mutex_lock(&hwmon->hwmon_lock);
456
457	uval = DIV_ROUND_CLOSEST_ULL(value << POWER_SETUP_I1_SHIFT, scale_factor);
458	ret = xe_hwmon_pcode_write_i1(hwmon->gt, uval);
459
460	mutex_unlock(&hwmon->hwmon_lock);
461	return ret;
462}
463
464static void xe_hwmon_get_voltage(struct xe_hwmon *hwmon, long *value)
465{
466	u64 reg_val;
467
468	xe_hwmon_process_reg(hwmon, REG_GT_PERF_STATUS,
469			     REG_READ32, &reg_val, 0, 0);
470	/* HW register value in units of 2.5 millivolt */
471	*value = DIV_ROUND_CLOSEST(REG_FIELD_GET(VOLTAGE_MASK, reg_val) * 2500, SF_VOLTAGE);
472}
473
474static umode_t
475xe_hwmon_power_is_visible(struct xe_hwmon *hwmon, u32 attr, int chan)
476{
477	u32 uval;
478
479	switch (attr) {
480	case hwmon_power_max:
481		return xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT) ? 0664 : 0;
482	case hwmon_power_rated_max:
483		return xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU) ? 0444 : 0;
484	case hwmon_power_crit:
485		return (xe_hwmon_pcode_read_i1(hwmon->gt, &uval) ||
486			!(uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644;
487	default:
488		return 0;
489	}
490}
491
492static int
493xe_hwmon_power_read(struct xe_hwmon *hwmon, u32 attr, int chan, long *val)
494{
495	switch (attr) {
496	case hwmon_power_max:
497		xe_hwmon_power_max_read(hwmon, val);
498		return 0;
499	case hwmon_power_rated_max:
500		xe_hwmon_power_rated_max_read(hwmon, val);
501		return 0;
502	case hwmon_power_crit:
503		return xe_hwmon_power_curr_crit_read(hwmon, val, SF_POWER);
504	default:
505		return -EOPNOTSUPP;
506	}
507}
508
509static int
510xe_hwmon_power_write(struct xe_hwmon *hwmon, u32 attr, int chan, long val)
511{
512	switch (attr) {
513	case hwmon_power_max:
514		return xe_hwmon_power_max_write(hwmon, val);
515	case hwmon_power_crit:
516		return xe_hwmon_power_curr_crit_write(hwmon, val, SF_POWER);
517	default:
518		return -EOPNOTSUPP;
519	}
520}
521
522static umode_t
523xe_hwmon_curr_is_visible(const struct xe_hwmon *hwmon, u32 attr)
524{
525	u32 uval;
526
527	switch (attr) {
528	case hwmon_curr_crit:
529		return (xe_hwmon_pcode_read_i1(hwmon->gt, &uval) ||
530			(uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644;
531	default:
532		return 0;
533	}
534}
535
536static int
537xe_hwmon_curr_read(struct xe_hwmon *hwmon, u32 attr, long *val)
538{
539	switch (attr) {
540	case hwmon_curr_crit:
541		return xe_hwmon_power_curr_crit_read(hwmon, val, SF_CURR);
542	default:
543		return -EOPNOTSUPP;
544	}
545}
546
547static int
548xe_hwmon_curr_write(struct xe_hwmon *hwmon, u32 attr, long val)
549{
550	switch (attr) {
551	case hwmon_curr_crit:
552		return xe_hwmon_power_curr_crit_write(hwmon, val, SF_CURR);
553	default:
554		return -EOPNOTSUPP;
555	}
556}
557
558static umode_t
559xe_hwmon_in_is_visible(struct xe_hwmon *hwmon, u32 attr)
560{
561	switch (attr) {
562	case hwmon_in_input:
563		return xe_hwmon_get_reg(hwmon, REG_GT_PERF_STATUS) ? 0444 : 0;
564	default:
565		return 0;
566	}
567}
568
569static int
570xe_hwmon_in_read(struct xe_hwmon *hwmon, u32 attr, long *val)
571{
572	switch (attr) {
573	case hwmon_in_input:
574		xe_hwmon_get_voltage(hwmon, val);
575		return 0;
576	default:
577		return -EOPNOTSUPP;
578	}
579}
580
581static umode_t
582xe_hwmon_energy_is_visible(struct xe_hwmon *hwmon, u32 attr)
583{
584	switch (attr) {
585	case hwmon_energy_input:
586		return xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS) ? 0444 : 0;
587	default:
588		return 0;
589	}
590}
591
592static int
593xe_hwmon_energy_read(struct xe_hwmon *hwmon, u32 attr, long *val)
594{
595	switch (attr) {
596	case hwmon_energy_input:
597		xe_hwmon_energy_get(hwmon, val);
598		return 0;
599	default:
600		return -EOPNOTSUPP;
601	}
602}
603
604static umode_t
605xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type,
606		    u32 attr, int channel)
607{
608	struct xe_hwmon *hwmon = (struct xe_hwmon *)drvdata;
609	int ret;
610
611	xe_device_mem_access_get(gt_to_xe(hwmon->gt));
612
613	switch (type) {
614	case hwmon_power:
615		ret = xe_hwmon_power_is_visible(hwmon, attr, channel);
616		break;
617	case hwmon_curr:
618		ret = xe_hwmon_curr_is_visible(hwmon, attr);
619		break;
620	case hwmon_in:
621		ret = xe_hwmon_in_is_visible(hwmon, attr);
622		break;
623	case hwmon_energy:
624		ret = xe_hwmon_energy_is_visible(hwmon, attr);
625		break;
626	default:
627		ret = 0;
628		break;
629	}
630
631	xe_device_mem_access_put(gt_to_xe(hwmon->gt));
632
633	return ret;
634}
635
636static int
637xe_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
638	      int channel, long *val)
639{
640	struct xe_hwmon *hwmon = dev_get_drvdata(dev);
641	int ret;
642
643	xe_device_mem_access_get(gt_to_xe(hwmon->gt));
644
645	switch (type) {
646	case hwmon_power:
647		ret = xe_hwmon_power_read(hwmon, attr, channel, val);
648		break;
649	case hwmon_curr:
650		ret = xe_hwmon_curr_read(hwmon, attr, val);
651		break;
652	case hwmon_in:
653		ret = xe_hwmon_in_read(hwmon, attr, val);
654		break;
655	case hwmon_energy:
656		ret = xe_hwmon_energy_read(hwmon, attr, val);
657		break;
658	default:
659		ret = -EOPNOTSUPP;
660		break;
661	}
662
663	xe_device_mem_access_put(gt_to_xe(hwmon->gt));
664
665	return ret;
666}
667
668static int
669xe_hwmon_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
670	       int channel, long val)
671{
672	struct xe_hwmon *hwmon = dev_get_drvdata(dev);
673	int ret;
674
675	xe_device_mem_access_get(gt_to_xe(hwmon->gt));
676
677	switch (type) {
678	case hwmon_power:
679		ret = xe_hwmon_power_write(hwmon, attr, channel, val);
680		break;
681	case hwmon_curr:
682		ret = xe_hwmon_curr_write(hwmon, attr, val);
683		break;
684	default:
685		ret = -EOPNOTSUPP;
686		break;
687	}
688
689	xe_device_mem_access_put(gt_to_xe(hwmon->gt));
690
691	return ret;
692}
693
694static const struct hwmon_ops hwmon_ops = {
695	.is_visible = xe_hwmon_is_visible,
696	.read = xe_hwmon_read,
697	.write = xe_hwmon_write,
698};
699
700static const struct hwmon_chip_info hwmon_chip_info = {
701	.ops = &hwmon_ops,
702	.info = hwmon_info,
703};
704
705static void
706xe_hwmon_get_preregistration_info(struct xe_device *xe)
707{
708	struct xe_hwmon *hwmon = xe->hwmon;
709	long energy;
710	u64 val_sku_unit = 0;
711
712	/*
713	 * The contents of register PKG_POWER_SKU_UNIT do not change,
714	 * so read it once and store the shift values.
715	 */
716	if (xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT)) {
717		xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU_UNIT,
718				     REG_READ32, &val_sku_unit, 0, 0);
719		hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT, val_sku_unit);
720		hwmon->scl_shift_energy = REG_FIELD_GET(PKG_ENERGY_UNIT, val_sku_unit);
721		hwmon->scl_shift_time = REG_FIELD_GET(PKG_TIME_UNIT, val_sku_unit);
722	}
723
724	/*
725	 * Initialize 'struct xe_hwmon_energy_info', i.e. set fields to the
726	 * first value of the energy register read
727	 */
728	if (xe_hwmon_is_visible(hwmon, hwmon_energy, hwmon_energy_input, 0))
729		xe_hwmon_energy_get(hwmon, &energy);
730}
731
732static void xe_hwmon_mutex_destroy(void *arg)
733{
734	struct xe_hwmon *hwmon = arg;
735
736	mutex_destroy(&hwmon->hwmon_lock);
737}
738
739void xe_hwmon_register(struct xe_device *xe)
740{
741	struct device *dev = xe->drm.dev;
742	struct xe_hwmon *hwmon;
743
744	/* hwmon is available only for dGfx */
745	if (!IS_DGFX(xe))
746		return;
747
748	hwmon = devm_kzalloc(dev, sizeof(*hwmon), GFP_KERNEL);
749	if (!hwmon)
750		return;
751
752	xe->hwmon = hwmon;
753
754	mutex_init(&hwmon->hwmon_lock);
755	if (devm_add_action_or_reset(dev, xe_hwmon_mutex_destroy, hwmon))
756		return;
757
758	/* primary GT to access device level properties */
759	hwmon->gt = xe->tiles[0].primary_gt;
760
761	xe_hwmon_get_preregistration_info(xe);
762
763	drm_dbg(&xe->drm, "Register xe hwmon interface\n");
764
765	/*  hwmon_dev points to device hwmon<i> */
766	hwmon->hwmon_dev = devm_hwmon_device_register_with_info(dev, "xe", hwmon,
767								&hwmon_chip_info,
768								hwmon_groups);
769
770	if (IS_ERR(hwmon->hwmon_dev)) {
771		drm_warn(&xe->drm, "Failed to register xe hwmon (%pe)\n", hwmon->hwmon_dev);
772		xe->hwmon = NULL;
773		return;
774	}
775}
776