Loading...
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2023 Intel Corporation
4 */
5
6#include <linux/hwmon-sysfs.h>
7#include <linux/hwmon.h>
8#include <linux/types.h>
9
10#include <drm/drm_managed.h>
11#include "regs/xe_gt_regs.h"
12#include "regs/xe_mchbar_regs.h"
13#include "regs/xe_pcode_regs.h"
14#include "xe_device.h"
15#include "xe_hwmon.h"
16#include "xe_mmio.h"
17#include "xe_pcode.h"
18#include "xe_pcode_api.h"
19#include "xe_sriov.h"
20#include "xe_pm.h"
21
22enum xe_hwmon_reg {
23 REG_PKG_RAPL_LIMIT,
24 REG_PKG_POWER_SKU,
25 REG_PKG_POWER_SKU_UNIT,
26 REG_GT_PERF_STATUS,
27 REG_PKG_ENERGY_STATUS,
28};
29
30enum xe_hwmon_reg_operation {
31 REG_READ32,
32 REG_RMW32,
33 REG_READ64,
34};
35
36enum xe_hwmon_channel {
37 CHANNEL_CARD,
38 CHANNEL_PKG,
39 CHANNEL_MAX,
40};
41
42/*
43 * SF_* - scale factors for particular quantities according to hwmon spec.
44 */
45#define SF_POWER 1000000 /* microwatts */
46#define SF_CURR 1000 /* milliamperes */
47#define SF_VOLTAGE 1000 /* millivolts */
48#define SF_ENERGY 1000000 /* microjoules */
49#define SF_TIME 1000 /* milliseconds */
50
51/**
52 * struct xe_hwmon_energy_info - to accumulate energy
53 */
54struct xe_hwmon_energy_info {
55 /** @reg_val_prev: previous energy reg val */
56 u32 reg_val_prev;
57 /** @accum_energy: accumulated energy */
58 long accum_energy;
59};
60
61/**
62 * struct xe_hwmon - xe hwmon data structure
63 */
64struct xe_hwmon {
65 /** @hwmon_dev: hwmon device for xe */
66 struct device *hwmon_dev;
67 /** @xe: Xe device */
68 struct xe_device *xe;
69 /** @hwmon_lock: lock for rw attributes*/
70 struct mutex hwmon_lock;
71 /** @scl_shift_power: pkg power unit */
72 int scl_shift_power;
73 /** @scl_shift_energy: pkg energy unit */
74 int scl_shift_energy;
75 /** @scl_shift_time: pkg time unit */
76 int scl_shift_time;
77 /** @ei: Energy info for energyN_input */
78 struct xe_hwmon_energy_info ei[CHANNEL_MAX];
79};
80
81static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg,
82 int channel)
83{
84 struct xe_device *xe = hwmon->xe;
85
86 switch (hwmon_reg) {
87 case REG_PKG_RAPL_LIMIT:
88 if (xe->info.platform == XE_BATTLEMAGE) {
89 if (channel == CHANNEL_PKG)
90 return BMG_PACKAGE_RAPL_LIMIT;
91 else
92 return BMG_PLATFORM_POWER_LIMIT;
93 } else if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG) {
94 return PVC_GT0_PACKAGE_RAPL_LIMIT;
95 } else if ((xe->info.platform == XE_DG2) && (channel == CHANNEL_PKG)) {
96 return PCU_CR_PACKAGE_RAPL_LIMIT;
97 }
98 break;
99 case REG_PKG_POWER_SKU:
100 if (xe->info.platform == XE_BATTLEMAGE)
101 return BMG_PACKAGE_POWER_SKU;
102 else if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG)
103 return PVC_GT0_PACKAGE_POWER_SKU;
104 else if ((xe->info.platform == XE_DG2) && (channel == CHANNEL_PKG))
105 return PCU_CR_PACKAGE_POWER_SKU;
106 break;
107 case REG_PKG_POWER_SKU_UNIT:
108 if (xe->info.platform == XE_BATTLEMAGE)
109 return BMG_PACKAGE_POWER_SKU_UNIT;
110 else if (xe->info.platform == XE_PVC)
111 return PVC_GT0_PACKAGE_POWER_SKU_UNIT;
112 else if (xe->info.platform == XE_DG2)
113 return PCU_CR_PACKAGE_POWER_SKU_UNIT;
114 break;
115 case REG_GT_PERF_STATUS:
116 if (xe->info.platform == XE_DG2 && channel == CHANNEL_PKG)
117 return GT_PERF_STATUS;
118 break;
119 case REG_PKG_ENERGY_STATUS:
120 if (xe->info.platform == XE_BATTLEMAGE) {
121 if (channel == CHANNEL_PKG)
122 return BMG_PACKAGE_ENERGY_STATUS;
123 else
124 return BMG_PLATFORM_ENERGY_STATUS;
125 } else if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG) {
126 return PVC_GT0_PLATFORM_ENERGY_STATUS;
127 } else if ((xe->info.platform == XE_DG2) && (channel == CHANNEL_PKG)) {
128 return PCU_CR_PACKAGE_ENERGY_STATUS;
129 }
130 break;
131 default:
132 drm_warn(&xe->drm, "Unknown xe hwmon reg id: %d\n", hwmon_reg);
133 break;
134 }
135
136 return XE_REG(0);
137}
138
139#define PL1_DISABLE 0
140
141/*
142 * HW allows arbitrary PL1 limits to be set but silently clamps these values to
143 * "typical but not guaranteed" min/max values in REG_PKG_POWER_SKU. Follow the
144 * same pattern for sysfs, allow arbitrary PL1 limits to be set but display
145 * clamped values when read.
146 */
147static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, int channel, long *value)
148{
149 u64 reg_val, min, max;
150 struct xe_device *xe = hwmon->xe;
151 struct xe_reg rapl_limit, pkg_power_sku;
152 struct xe_mmio *mmio = xe_root_tile_mmio(xe);
153
154 rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel);
155 pkg_power_sku = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel);
156
157 /*
158 * Valid check of REG_PKG_RAPL_LIMIT is already done in xe_hwmon_power_is_visible.
159 * So not checking it again here.
160 */
161 if (!xe_reg_is_valid(pkg_power_sku)) {
162 drm_warn(&xe->drm, "pkg_power_sku invalid\n");
163 *value = 0;
164 return;
165 }
166
167 mutex_lock(&hwmon->hwmon_lock);
168
169 reg_val = xe_mmio_read32(mmio, rapl_limit);
170 /* Check if PL1 limit is disabled */
171 if (!(reg_val & PKG_PWR_LIM_1_EN)) {
172 *value = PL1_DISABLE;
173 goto unlock;
174 }
175
176 reg_val = REG_FIELD_GET(PKG_PWR_LIM_1, reg_val);
177 *value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
178
179 reg_val = xe_mmio_read64_2x32(mmio, pkg_power_sku);
180 min = REG_FIELD_GET(PKG_MIN_PWR, reg_val);
181 min = mul_u64_u32_shr(min, SF_POWER, hwmon->scl_shift_power);
182 max = REG_FIELD_GET(PKG_MAX_PWR, reg_val);
183 max = mul_u64_u32_shr(max, SF_POWER, hwmon->scl_shift_power);
184
185 if (min && max)
186 *value = clamp_t(u64, *value, min, max);
187unlock:
188 mutex_unlock(&hwmon->hwmon_lock);
189}
190
191static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, int channel, long value)
192{
193 struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
194 int ret = 0;
195 u64 reg_val;
196 struct xe_reg rapl_limit;
197
198 rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel);
199
200 mutex_lock(&hwmon->hwmon_lock);
201
202 /* Disable PL1 limit and verify, as limit cannot be disabled on all platforms */
203 if (value == PL1_DISABLE) {
204 reg_val = xe_mmio_rmw32(mmio, rapl_limit, PKG_PWR_LIM_1_EN, 0);
205 reg_val = xe_mmio_read32(mmio, rapl_limit);
206 if (reg_val & PKG_PWR_LIM_1_EN) {
207 drm_warn(&hwmon->xe->drm, "PL1 disable is not supported!\n");
208 ret = -EOPNOTSUPP;
209 }
210 goto unlock;
211 }
212
213 /* Computation in 64-bits to avoid overflow. Round to nearest. */
214 reg_val = DIV_ROUND_CLOSEST_ULL((u64)value << hwmon->scl_shift_power, SF_POWER);
215 reg_val = PKG_PWR_LIM_1_EN | REG_FIELD_PREP(PKG_PWR_LIM_1, reg_val);
216 reg_val = xe_mmio_rmw32(mmio, rapl_limit, PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, reg_val);
217
218unlock:
219 mutex_unlock(&hwmon->hwmon_lock);
220 return ret;
221}
222
223static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, int channel, long *value)
224{
225 struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
226 struct xe_reg reg = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel);
227 u64 reg_val;
228
229 /*
230 * This sysfs file won't be visible if REG_PKG_POWER_SKU is invalid, so valid check
231 * for this register can be skipped.
232 * See xe_hwmon_power_is_visible.
233 */
234 reg_val = xe_mmio_read32(mmio, reg);
235 reg_val = REG_FIELD_GET(PKG_TDP, reg_val);
236 *value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
237}
238
239/*
240 * xe_hwmon_energy_get - Obtain energy value
241 *
242 * The underlying energy hardware register is 32-bits and is subject to
243 * overflow. How long before overflow? For example, with an example
244 * scaling bit shift of 14 bits (see register *PACKAGE_POWER_SKU_UNIT) and
245 * a power draw of 1000 watts, the 32-bit counter will overflow in
246 * approximately 4.36 minutes.
247 *
248 * Examples:
249 * 1 watt: (2^32 >> 14) / 1 W / (60 * 60 * 24) secs/day -> 3 days
250 * 1000 watts: (2^32 >> 14) / 1000 W / 60 secs/min -> 4.36 minutes
251 *
252 * The function significantly increases overflow duration (from 4.36
253 * minutes) by accumulating the energy register into a 'long' as allowed by
254 * the hwmon API. Using x86_64 128 bit arithmetic (see mul_u64_u32_shr()),
255 * a 'long' of 63 bits, SF_ENERGY of 1e6 (~20 bits) and
256 * hwmon->scl_shift_energy of 14 bits we have 57 (63 - 20 + 14) bits before
257 * energyN_input overflows. This at 1000 W is an overflow duration of 278 years.
258 */
259static void
260xe_hwmon_energy_get(struct xe_hwmon *hwmon, int channel, long *energy)
261{
262 struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
263 struct xe_hwmon_energy_info *ei = &hwmon->ei[channel];
264 u64 reg_val;
265
266 reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS,
267 channel));
268
269 if (reg_val >= ei->reg_val_prev)
270 ei->accum_energy += reg_val - ei->reg_val_prev;
271 else
272 ei->accum_energy += UINT_MAX - ei->reg_val_prev + reg_val;
273
274 ei->reg_val_prev = reg_val;
275
276 *energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
277 hwmon->scl_shift_energy);
278}
279
280static ssize_t
281xe_hwmon_power_max_interval_show(struct device *dev, struct device_attribute *attr,
282 char *buf)
283{
284 struct xe_hwmon *hwmon = dev_get_drvdata(dev);
285 struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
286 u32 x, y, x_w = 2; /* 2 bits */
287 u64 r, tau4, out;
288 int sensor_index = to_sensor_dev_attr(attr)->index;
289
290 xe_pm_runtime_get(hwmon->xe);
291
292 mutex_lock(&hwmon->hwmon_lock);
293
294 r = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, sensor_index));
295
296 mutex_unlock(&hwmon->hwmon_lock);
297
298 xe_pm_runtime_put(hwmon->xe);
299
300 x = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_X, r);
301 y = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_Y, r);
302
303 /*
304 * tau = 1.x * power(2,y), x = bits(23:22), y = bits(21:17)
305 * = (4 | x) << (y - 2)
306 *
307 * Here (y - 2) ensures a 1.x fixed point representation of 1.x
308 * As x is 2 bits so 1.x can be 1.0, 1.25, 1.50, 1.75
309 *
310 * As y can be < 2, we compute tau4 = (4 | x) << y
311 * and then add 2 when doing the final right shift to account for units
312 */
313 tau4 = (u64)((1 << x_w) | x) << y;
314
315 /* val in hwmon interface units (millisec) */
316 out = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
317
318 return sysfs_emit(buf, "%llu\n", out);
319}
320
321static ssize_t
322xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *attr,
323 const char *buf, size_t count)
324{
325 struct xe_hwmon *hwmon = dev_get_drvdata(dev);
326 struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
327 u32 x, y, rxy, x_w = 2; /* 2 bits */
328 u64 tau4, r, max_win;
329 unsigned long val;
330 int ret;
331 int sensor_index = to_sensor_dev_attr(attr)->index;
332
333 ret = kstrtoul(buf, 0, &val);
334 if (ret)
335 return ret;
336
337 /*
338 * Max HW supported tau in '1.x * power(2,y)' format, x = 0, y = 0x12.
339 * The hwmon->scl_shift_time default of 0xa results in a max tau of 256 seconds.
340 *
341 * The ideal scenario is for PKG_MAX_WIN to be read from the PKG_PWR_SKU register.
342 * However, it is observed that existing discrete GPUs does not provide correct
343 * PKG_MAX_WIN value, therefore a using default constant value. For future discrete GPUs
344 * this may get resolved, in which case PKG_MAX_WIN should be obtained from PKG_PWR_SKU.
345 */
346#define PKG_MAX_WIN_DEFAULT 0x12ull
347
348 /*
349 * val must be < max in hwmon interface units. The steps below are
350 * explained in xe_hwmon_power_max_interval_show()
351 */
352 r = FIELD_PREP(PKG_MAX_WIN, PKG_MAX_WIN_DEFAULT);
353 x = REG_FIELD_GET(PKG_MAX_WIN_X, r);
354 y = REG_FIELD_GET(PKG_MAX_WIN_Y, r);
355 tau4 = (u64)((1 << x_w) | x) << y;
356 max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
357
358 if (val > max_win)
359 return -EINVAL;
360
361 /* val in hw units */
362 val = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_time, SF_TIME);
363
364 /*
365 * Convert val to 1.x * power(2,y)
366 * y = ilog2(val)
367 * x = (val - (1 << y)) >> (y - 2)
368 */
369 if (!val) {
370 y = 0;
371 x = 0;
372 } else {
373 y = ilog2(val);
374 x = (val - (1ul << y)) << x_w >> y;
375 }
376
377 rxy = REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_X, x) | REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_Y, y);
378
379 xe_pm_runtime_get(hwmon->xe);
380
381 mutex_lock(&hwmon->hwmon_lock);
382
383 r = xe_mmio_rmw32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, sensor_index),
384 PKG_PWR_LIM_1_TIME, rxy);
385
386 mutex_unlock(&hwmon->hwmon_lock);
387
388 xe_pm_runtime_put(hwmon->xe);
389
390 return count;
391}
392
393static SENSOR_DEVICE_ATTR(power1_max_interval, 0664,
394 xe_hwmon_power_max_interval_show,
395 xe_hwmon_power_max_interval_store, CHANNEL_CARD);
396
397static SENSOR_DEVICE_ATTR(power2_max_interval, 0664,
398 xe_hwmon_power_max_interval_show,
399 xe_hwmon_power_max_interval_store, CHANNEL_PKG);
400
401static struct attribute *hwmon_attributes[] = {
402 &sensor_dev_attr_power1_max_interval.dev_attr.attr,
403 &sensor_dev_attr_power2_max_interval.dev_attr.attr,
404 NULL
405};
406
407static umode_t xe_hwmon_attributes_visible(struct kobject *kobj,
408 struct attribute *attr, int index)
409{
410 struct device *dev = kobj_to_dev(kobj);
411 struct xe_hwmon *hwmon = dev_get_drvdata(dev);
412 int ret = 0;
413
414 xe_pm_runtime_get(hwmon->xe);
415
416 ret = xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, index)) ? attr->mode : 0;
417
418 xe_pm_runtime_put(hwmon->xe);
419
420 return ret;
421}
422
423static const struct attribute_group hwmon_attrgroup = {
424 .attrs = hwmon_attributes,
425 .is_visible = xe_hwmon_attributes_visible,
426};
427
428static const struct attribute_group *hwmon_groups[] = {
429 &hwmon_attrgroup,
430 NULL
431};
432
433static const struct hwmon_channel_info * const hwmon_info[] = {
434 HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_LABEL,
435 HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_CRIT | HWMON_P_LABEL),
436 HWMON_CHANNEL_INFO(curr, HWMON_C_LABEL, HWMON_C_CRIT | HWMON_C_LABEL),
437 HWMON_CHANNEL_INFO(in, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL),
438 HWMON_CHANNEL_INFO(energy, HWMON_E_INPUT | HWMON_E_LABEL, HWMON_E_INPUT | HWMON_E_LABEL),
439 NULL
440};
441
442/* I1 is exposed as power_crit or as curr_crit depending on bit 31 */
443static int xe_hwmon_pcode_read_i1(const struct xe_hwmon *hwmon, u32 *uval)
444{
445 struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe);
446
447 /* Avoid Illegal Subcommand error */
448 if (hwmon->xe->info.platform == XE_DG2)
449 return -ENXIO;
450
451 return xe_pcode_read(root_tile, PCODE_MBOX(PCODE_POWER_SETUP,
452 POWER_SETUP_SUBCOMMAND_READ_I1, 0),
453 uval, NULL);
454}
455
456static int xe_hwmon_pcode_write_i1(const struct xe_hwmon *hwmon, u32 uval)
457{
458 struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe);
459
460 return xe_pcode_write(root_tile, PCODE_MBOX(PCODE_POWER_SETUP,
461 POWER_SETUP_SUBCOMMAND_WRITE_I1, 0),
462 (uval & POWER_SETUP_I1_DATA_MASK));
463}
464
465static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon, int channel,
466 long *value, u32 scale_factor)
467{
468 int ret;
469 u32 uval;
470
471 mutex_lock(&hwmon->hwmon_lock);
472
473 ret = xe_hwmon_pcode_read_i1(hwmon, &uval);
474 if (ret)
475 goto unlock;
476
477 *value = mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval),
478 scale_factor, POWER_SETUP_I1_SHIFT);
479unlock:
480 mutex_unlock(&hwmon->hwmon_lock);
481 return ret;
482}
483
484static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, int channel,
485 long value, u32 scale_factor)
486{
487 int ret;
488 u32 uval;
489
490 mutex_lock(&hwmon->hwmon_lock);
491
492 uval = DIV_ROUND_CLOSEST_ULL(value << POWER_SETUP_I1_SHIFT, scale_factor);
493 ret = xe_hwmon_pcode_write_i1(hwmon, uval);
494
495 mutex_unlock(&hwmon->hwmon_lock);
496 return ret;
497}
498
499static void xe_hwmon_get_voltage(struct xe_hwmon *hwmon, int channel, long *value)
500{
501 struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
502 u64 reg_val;
503
504 reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_GT_PERF_STATUS, channel));
505 /* HW register value in units of 2.5 millivolt */
506 *value = DIV_ROUND_CLOSEST(REG_FIELD_GET(VOLTAGE_MASK, reg_val) * 2500, SF_VOLTAGE);
507}
508
509static umode_t
510xe_hwmon_power_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
511{
512 u32 uval;
513
514 switch (attr) {
515 case hwmon_power_max:
516 return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT,
517 channel)) ? 0664 : 0;
518 case hwmon_power_rated_max:
519 return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU,
520 channel)) ? 0444 : 0;
521 case hwmon_power_crit:
522 if (channel == CHANNEL_PKG)
523 return (xe_hwmon_pcode_read_i1(hwmon, &uval) ||
524 !(uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644;
525 break;
526 case hwmon_power_label:
527 return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT,
528 channel)) ? 0444 : 0;
529 default:
530 return 0;
531 }
532 return 0;
533}
534
535static int
536xe_hwmon_power_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
537{
538 switch (attr) {
539 case hwmon_power_max:
540 xe_hwmon_power_max_read(hwmon, channel, val);
541 return 0;
542 case hwmon_power_rated_max:
543 xe_hwmon_power_rated_max_read(hwmon, channel, val);
544 return 0;
545 case hwmon_power_crit:
546 return xe_hwmon_power_curr_crit_read(hwmon, channel, val, SF_POWER);
547 default:
548 return -EOPNOTSUPP;
549 }
550}
551
552static int
553xe_hwmon_power_write(struct xe_hwmon *hwmon, u32 attr, int channel, long val)
554{
555 switch (attr) {
556 case hwmon_power_max:
557 return xe_hwmon_power_max_write(hwmon, channel, val);
558 case hwmon_power_crit:
559 return xe_hwmon_power_curr_crit_write(hwmon, channel, val, SF_POWER);
560 default:
561 return -EOPNOTSUPP;
562 }
563}
564
565static umode_t
566xe_hwmon_curr_is_visible(const struct xe_hwmon *hwmon, u32 attr, int channel)
567{
568 u32 uval;
569
570 /* hwmon sysfs attribute of current available only for package */
571 if (channel != CHANNEL_PKG)
572 return 0;
573
574 switch (attr) {
575 case hwmon_curr_crit:
576 return (xe_hwmon_pcode_read_i1(hwmon, &uval) ||
577 (uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644;
578 case hwmon_curr_label:
579 return (xe_hwmon_pcode_read_i1(hwmon, &uval) ||
580 (uval & POWER_SETUP_I1_WATTS)) ? 0 : 0444;
581 break;
582 default:
583 return 0;
584 }
585 return 0;
586}
587
588static int
589xe_hwmon_curr_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
590{
591 switch (attr) {
592 case hwmon_curr_crit:
593 return xe_hwmon_power_curr_crit_read(hwmon, channel, val, SF_CURR);
594 default:
595 return -EOPNOTSUPP;
596 }
597}
598
599static int
600xe_hwmon_curr_write(struct xe_hwmon *hwmon, u32 attr, int channel, long val)
601{
602 switch (attr) {
603 case hwmon_curr_crit:
604 return xe_hwmon_power_curr_crit_write(hwmon, channel, val, SF_CURR);
605 default:
606 return -EOPNOTSUPP;
607 }
608}
609
610static umode_t
611xe_hwmon_in_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
612{
613 switch (attr) {
614 case hwmon_in_input:
615 case hwmon_in_label:
616 return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_GT_PERF_STATUS,
617 channel)) ? 0444 : 0;
618 default:
619 return 0;
620 }
621}
622
623static int
624xe_hwmon_in_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
625{
626 switch (attr) {
627 case hwmon_in_input:
628 xe_hwmon_get_voltage(hwmon, channel, val);
629 return 0;
630 default:
631 return -EOPNOTSUPP;
632 }
633}
634
635static umode_t
636xe_hwmon_energy_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
637{
638 switch (attr) {
639 case hwmon_energy_input:
640 case hwmon_energy_label:
641 return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS,
642 channel)) ? 0444 : 0;
643 default:
644 return 0;
645 }
646}
647
648static int
649xe_hwmon_energy_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
650{
651 switch (attr) {
652 case hwmon_energy_input:
653 xe_hwmon_energy_get(hwmon, channel, val);
654 return 0;
655 default:
656 return -EOPNOTSUPP;
657 }
658}
659
660static umode_t
661xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type,
662 u32 attr, int channel)
663{
664 struct xe_hwmon *hwmon = (struct xe_hwmon *)drvdata;
665 int ret;
666
667 xe_pm_runtime_get(hwmon->xe);
668
669 switch (type) {
670 case hwmon_power:
671 ret = xe_hwmon_power_is_visible(hwmon, attr, channel);
672 break;
673 case hwmon_curr:
674 ret = xe_hwmon_curr_is_visible(hwmon, attr, channel);
675 break;
676 case hwmon_in:
677 ret = xe_hwmon_in_is_visible(hwmon, attr, channel);
678 break;
679 case hwmon_energy:
680 ret = xe_hwmon_energy_is_visible(hwmon, attr, channel);
681 break;
682 default:
683 ret = 0;
684 break;
685 }
686
687 xe_pm_runtime_put(hwmon->xe);
688
689 return ret;
690}
691
692static int
693xe_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
694 int channel, long *val)
695{
696 struct xe_hwmon *hwmon = dev_get_drvdata(dev);
697 int ret;
698
699 xe_pm_runtime_get(hwmon->xe);
700
701 switch (type) {
702 case hwmon_power:
703 ret = xe_hwmon_power_read(hwmon, attr, channel, val);
704 break;
705 case hwmon_curr:
706 ret = xe_hwmon_curr_read(hwmon, attr, channel, val);
707 break;
708 case hwmon_in:
709 ret = xe_hwmon_in_read(hwmon, attr, channel, val);
710 break;
711 case hwmon_energy:
712 ret = xe_hwmon_energy_read(hwmon, attr, channel, val);
713 break;
714 default:
715 ret = -EOPNOTSUPP;
716 break;
717 }
718
719 xe_pm_runtime_put(hwmon->xe);
720
721 return ret;
722}
723
724static int
725xe_hwmon_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
726 int channel, long val)
727{
728 struct xe_hwmon *hwmon = dev_get_drvdata(dev);
729 int ret;
730
731 xe_pm_runtime_get(hwmon->xe);
732
733 switch (type) {
734 case hwmon_power:
735 ret = xe_hwmon_power_write(hwmon, attr, channel, val);
736 break;
737 case hwmon_curr:
738 ret = xe_hwmon_curr_write(hwmon, attr, channel, val);
739 break;
740 default:
741 ret = -EOPNOTSUPP;
742 break;
743 }
744
745 xe_pm_runtime_put(hwmon->xe);
746
747 return ret;
748}
749
750static int xe_hwmon_read_label(struct device *dev,
751 enum hwmon_sensor_types type,
752 u32 attr, int channel, const char **str)
753{
754 switch (type) {
755 case hwmon_power:
756 case hwmon_energy:
757 case hwmon_curr:
758 case hwmon_in:
759 if (channel == CHANNEL_CARD)
760 *str = "card";
761 else if (channel == CHANNEL_PKG)
762 *str = "pkg";
763 return 0;
764 default:
765 return -EOPNOTSUPP;
766 }
767}
768
769static const struct hwmon_ops hwmon_ops = {
770 .is_visible = xe_hwmon_is_visible,
771 .read = xe_hwmon_read,
772 .write = xe_hwmon_write,
773 .read_string = xe_hwmon_read_label,
774};
775
776static const struct hwmon_chip_info hwmon_chip_info = {
777 .ops = &hwmon_ops,
778 .info = hwmon_info,
779};
780
781static void
782xe_hwmon_get_preregistration_info(struct xe_device *xe)
783{
784 struct xe_mmio *mmio = xe_root_tile_mmio(xe);
785 struct xe_hwmon *hwmon = xe->hwmon;
786 long energy;
787 u64 val_sku_unit = 0;
788 int channel;
789 struct xe_reg pkg_power_sku_unit;
790
791 /*
792 * The contents of register PKG_POWER_SKU_UNIT do not change,
793 * so read it once and store the shift values.
794 */
795 pkg_power_sku_unit = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT, 0);
796 if (xe_reg_is_valid(pkg_power_sku_unit)) {
797 val_sku_unit = xe_mmio_read32(mmio, pkg_power_sku_unit);
798 hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT, val_sku_unit);
799 hwmon->scl_shift_energy = REG_FIELD_GET(PKG_ENERGY_UNIT, val_sku_unit);
800 hwmon->scl_shift_time = REG_FIELD_GET(PKG_TIME_UNIT, val_sku_unit);
801 }
802
803 /*
804 * Initialize 'struct xe_hwmon_energy_info', i.e. set fields to the
805 * first value of the energy register read
806 */
807 for (channel = 0; channel < CHANNEL_MAX; channel++)
808 if (xe_hwmon_is_visible(hwmon, hwmon_energy, hwmon_energy_input, channel))
809 xe_hwmon_energy_get(hwmon, channel, &energy);
810}
811
812static void xe_hwmon_mutex_destroy(void *arg)
813{
814 struct xe_hwmon *hwmon = arg;
815
816 mutex_destroy(&hwmon->hwmon_lock);
817}
818
819void xe_hwmon_register(struct xe_device *xe)
820{
821 struct device *dev = xe->drm.dev;
822 struct xe_hwmon *hwmon;
823
824 /* hwmon is available only for dGfx */
825 if (!IS_DGFX(xe))
826 return;
827
828 /* hwmon is not available on VFs */
829 if (IS_SRIOV_VF(xe))
830 return;
831
832 hwmon = devm_kzalloc(dev, sizeof(*hwmon), GFP_KERNEL);
833 if (!hwmon)
834 return;
835
836 xe->hwmon = hwmon;
837
838 mutex_init(&hwmon->hwmon_lock);
839 if (devm_add_action_or_reset(dev, xe_hwmon_mutex_destroy, hwmon))
840 return;
841
842 /* There's only one instance of hwmon per device */
843 hwmon->xe = xe;
844
845 xe_hwmon_get_preregistration_info(xe);
846
847 drm_dbg(&xe->drm, "Register xe hwmon interface\n");
848
849 /* hwmon_dev points to device hwmon<i> */
850 hwmon->hwmon_dev = devm_hwmon_device_register_with_info(dev, "xe", hwmon,
851 &hwmon_chip_info,
852 hwmon_groups);
853
854 if (IS_ERR(hwmon->hwmon_dev)) {
855 drm_warn(&xe->drm, "Failed to register xe hwmon (%pe)\n", hwmon->hwmon_dev);
856 xe->hwmon = NULL;
857 return;
858 }
859}
860
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2023 Intel Corporation
4 */
5
6#include <linux/hwmon-sysfs.h>
7#include <linux/hwmon.h>
8#include <linux/types.h>
9
10#include <drm/drm_managed.h>
11#include "regs/xe_gt_regs.h"
12#include "regs/xe_mchbar_regs.h"
13#include "regs/xe_pcode_regs.h"
14#include "xe_device.h"
15#include "xe_gt.h"
16#include "xe_hwmon.h"
17#include "xe_mmio.h"
18#include "xe_pcode.h"
19#include "xe_pcode_api.h"
20#include "xe_sriov.h"
21
22enum xe_hwmon_reg {
23 REG_PKG_RAPL_LIMIT,
24 REG_PKG_POWER_SKU,
25 REG_PKG_POWER_SKU_UNIT,
26 REG_GT_PERF_STATUS,
27 REG_PKG_ENERGY_STATUS,
28};
29
30enum xe_hwmon_reg_operation {
31 REG_READ32,
32 REG_RMW32,
33 REG_READ64,
34};
35
36/*
37 * SF_* - scale factors for particular quantities according to hwmon spec.
38 */
39#define SF_POWER 1000000 /* microwatts */
40#define SF_CURR 1000 /* milliamperes */
41#define SF_VOLTAGE 1000 /* millivolts */
42#define SF_ENERGY 1000000 /* microjoules */
43#define SF_TIME 1000 /* milliseconds */
44
45/**
46 * struct xe_hwmon_energy_info - to accumulate energy
47 */
48struct xe_hwmon_energy_info {
49 /** @reg_val_prev: previous energy reg val */
50 u32 reg_val_prev;
51 /** @accum_energy: accumulated energy */
52 long accum_energy;
53};
54
55/**
56 * struct xe_hwmon - xe hwmon data structure
57 */
58struct xe_hwmon {
59 /** @hwmon_dev: hwmon device for xe */
60 struct device *hwmon_dev;
61 /** @gt: primary gt */
62 struct xe_gt *gt;
63 /** @hwmon_lock: lock for rw attributes*/
64 struct mutex hwmon_lock;
65 /** @scl_shift_power: pkg power unit */
66 int scl_shift_power;
67 /** @scl_shift_energy: pkg energy unit */
68 int scl_shift_energy;
69 /** @scl_shift_time: pkg time unit */
70 int scl_shift_time;
71 /** @ei: Energy info for energy1_input */
72 struct xe_hwmon_energy_info ei;
73};
74
75static u32 xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg)
76{
77 struct xe_device *xe = gt_to_xe(hwmon->gt);
78 struct xe_reg reg = XE_REG(0);
79
80 switch (hwmon_reg) {
81 case REG_PKG_RAPL_LIMIT:
82 if (xe->info.platform == XE_PVC)
83 reg = PVC_GT0_PACKAGE_RAPL_LIMIT;
84 else if (xe->info.platform == XE_DG2)
85 reg = PCU_CR_PACKAGE_RAPL_LIMIT;
86 break;
87 case REG_PKG_POWER_SKU:
88 if (xe->info.platform == XE_PVC)
89 reg = PVC_GT0_PACKAGE_POWER_SKU;
90 else if (xe->info.platform == XE_DG2)
91 reg = PCU_CR_PACKAGE_POWER_SKU;
92 break;
93 case REG_PKG_POWER_SKU_UNIT:
94 if (xe->info.platform == XE_PVC)
95 reg = PVC_GT0_PACKAGE_POWER_SKU_UNIT;
96 else if (xe->info.platform == XE_DG2)
97 reg = PCU_CR_PACKAGE_POWER_SKU_UNIT;
98 break;
99 case REG_GT_PERF_STATUS:
100 if (xe->info.platform == XE_DG2)
101 reg = GT_PERF_STATUS;
102 break;
103 case REG_PKG_ENERGY_STATUS:
104 if (xe->info.platform == XE_PVC)
105 reg = PVC_GT0_PLATFORM_ENERGY_STATUS;
106 else if (xe->info.platform == XE_DG2)
107 reg = PCU_CR_PACKAGE_ENERGY_STATUS;
108 break;
109 default:
110 drm_warn(&xe->drm, "Unknown xe hwmon reg id: %d\n", hwmon_reg);
111 break;
112 }
113
114 return reg.raw;
115}
116
117static void xe_hwmon_process_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg,
118 enum xe_hwmon_reg_operation operation, u64 *value,
119 u32 clr, u32 set)
120{
121 struct xe_reg reg;
122
123 reg.raw = xe_hwmon_get_reg(hwmon, hwmon_reg);
124
125 if (!reg.raw)
126 return;
127
128 switch (operation) {
129 case REG_READ32:
130 *value = xe_mmio_read32(hwmon->gt, reg);
131 break;
132 case REG_RMW32:
133 *value = xe_mmio_rmw32(hwmon->gt, reg, clr, set);
134 break;
135 case REG_READ64:
136 *value = xe_mmio_read64_2x32(hwmon->gt, reg);
137 break;
138 default:
139 drm_warn(>_to_xe(hwmon->gt)->drm, "Invalid xe hwmon reg operation: %d\n",
140 operation);
141 break;
142 }
143}
144
145#define PL1_DISABLE 0
146
147/*
148 * HW allows arbitrary PL1 limits to be set but silently clamps these values to
149 * "typical but not guaranteed" min/max values in REG_PKG_POWER_SKU. Follow the
150 * same pattern for sysfs, allow arbitrary PL1 limits to be set but display
151 * clamped values when read.
152 */
153static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, long *value)
154{
155 u64 reg_val, min, max;
156
157 mutex_lock(&hwmon->hwmon_lock);
158
159 xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_READ32, ®_val, 0, 0);
160 /* Check if PL1 limit is disabled */
161 if (!(reg_val & PKG_PWR_LIM_1_EN)) {
162 *value = PL1_DISABLE;
163 goto unlock;
164 }
165
166 reg_val = REG_FIELD_GET(PKG_PWR_LIM_1, reg_val);
167 *value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
168
169 xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU, REG_READ64, ®_val, 0, 0);
170 min = REG_FIELD_GET(PKG_MIN_PWR, reg_val);
171 min = mul_u64_u32_shr(min, SF_POWER, hwmon->scl_shift_power);
172 max = REG_FIELD_GET(PKG_MAX_PWR, reg_val);
173 max = mul_u64_u32_shr(max, SF_POWER, hwmon->scl_shift_power);
174
175 if (min && max)
176 *value = clamp_t(u64, *value, min, max);
177unlock:
178 mutex_unlock(&hwmon->hwmon_lock);
179}
180
181static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, long value)
182{
183 int ret = 0;
184 u64 reg_val;
185
186 mutex_lock(&hwmon->hwmon_lock);
187
188 /* Disable PL1 limit and verify, as limit cannot be disabled on all platforms */
189 if (value == PL1_DISABLE) {
190 xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW32, ®_val,
191 PKG_PWR_LIM_1_EN, 0);
192 xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_READ32, ®_val,
193 PKG_PWR_LIM_1_EN, 0);
194
195 if (reg_val & PKG_PWR_LIM_1_EN) {
196 ret = -EOPNOTSUPP;
197 goto unlock;
198 }
199 }
200
201 /* Computation in 64-bits to avoid overflow. Round to nearest. */
202 reg_val = DIV_ROUND_CLOSEST_ULL((u64)value << hwmon->scl_shift_power, SF_POWER);
203 reg_val = PKG_PWR_LIM_1_EN | REG_FIELD_PREP(PKG_PWR_LIM_1, reg_val);
204
205 xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW32, ®_val,
206 PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, reg_val);
207unlock:
208 mutex_unlock(&hwmon->hwmon_lock);
209 return ret;
210}
211
212static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, long *value)
213{
214 u64 reg_val;
215
216 xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU, REG_READ32, ®_val, 0, 0);
217 reg_val = REG_FIELD_GET(PKG_TDP, reg_val);
218 *value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
219}
220
221/*
222 * xe_hwmon_energy_get - Obtain energy value
223 *
224 * The underlying energy hardware register is 32-bits and is subject to
225 * overflow. How long before overflow? For example, with an example
226 * scaling bit shift of 14 bits (see register *PACKAGE_POWER_SKU_UNIT) and
227 * a power draw of 1000 watts, the 32-bit counter will overflow in
228 * approximately 4.36 minutes.
229 *
230 * Examples:
231 * 1 watt: (2^32 >> 14) / 1 W / (60 * 60 * 24) secs/day -> 3 days
232 * 1000 watts: (2^32 >> 14) / 1000 W / 60 secs/min -> 4.36 minutes
233 *
234 * The function significantly increases overflow duration (from 4.36
235 * minutes) by accumulating the energy register into a 'long' as allowed by
236 * the hwmon API. Using x86_64 128 bit arithmetic (see mul_u64_u32_shr()),
237 * a 'long' of 63 bits, SF_ENERGY of 1e6 (~20 bits) and
238 * hwmon->scl_shift_energy of 14 bits we have 57 (63 - 20 + 14) bits before
239 * energy1_input overflows. This at 1000 W is an overflow duration of 278 years.
240 */
241static void
242xe_hwmon_energy_get(struct xe_hwmon *hwmon, long *energy)
243{
244 struct xe_hwmon_energy_info *ei = &hwmon->ei;
245 u64 reg_val;
246
247 xe_hwmon_process_reg(hwmon, REG_PKG_ENERGY_STATUS, REG_READ32,
248 ®_val, 0, 0);
249
250 if (reg_val >= ei->reg_val_prev)
251 ei->accum_energy += reg_val - ei->reg_val_prev;
252 else
253 ei->accum_energy += UINT_MAX - ei->reg_val_prev + reg_val;
254
255 ei->reg_val_prev = reg_val;
256
257 *energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
258 hwmon->scl_shift_energy);
259}
260
261static ssize_t
262xe_hwmon_power1_max_interval_show(struct device *dev, struct device_attribute *attr,
263 char *buf)
264{
265 struct xe_hwmon *hwmon = dev_get_drvdata(dev);
266 u32 x, y, x_w = 2; /* 2 bits */
267 u64 r, tau4, out;
268
269 xe_device_mem_access_get(gt_to_xe(hwmon->gt));
270
271 mutex_lock(&hwmon->hwmon_lock);
272
273 xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT,
274 REG_READ32, &r, 0, 0);
275
276 mutex_unlock(&hwmon->hwmon_lock);
277
278 xe_device_mem_access_put(gt_to_xe(hwmon->gt));
279
280 x = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_X, r);
281 y = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_Y, r);
282
283 /*
284 * tau = 1.x * power(2,y), x = bits(23:22), y = bits(21:17)
285 * = (4 | x) << (y - 2)
286 *
287 * Here (y - 2) ensures a 1.x fixed point representation of 1.x
288 * As x is 2 bits so 1.x can be 1.0, 1.25, 1.50, 1.75
289 *
290 * As y can be < 2, we compute tau4 = (4 | x) << y
291 * and then add 2 when doing the final right shift to account for units
292 */
293 tau4 = (u64)((1 << x_w) | x) << y;
294
295 /* val in hwmon interface units (millisec) */
296 out = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
297
298 return sysfs_emit(buf, "%llu\n", out);
299}
300
301static ssize_t
302xe_hwmon_power1_max_interval_store(struct device *dev, struct device_attribute *attr,
303 const char *buf, size_t count)
304{
305 struct xe_hwmon *hwmon = dev_get_drvdata(dev);
306 u32 x, y, rxy, x_w = 2; /* 2 bits */
307 u64 tau4, r, max_win;
308 unsigned long val;
309 int ret;
310
311 ret = kstrtoul(buf, 0, &val);
312 if (ret)
313 return ret;
314
315 /*
316 * Max HW supported tau in '1.x * power(2,y)' format, x = 0, y = 0x12.
317 * The hwmon->scl_shift_time default of 0xa results in a max tau of 256 seconds.
318 *
319 * The ideal scenario is for PKG_MAX_WIN to be read from the PKG_PWR_SKU register.
320 * However, it is observed that existing discrete GPUs does not provide correct
321 * PKG_MAX_WIN value, therefore a using default constant value. For future discrete GPUs
322 * this may get resolved, in which case PKG_MAX_WIN should be obtained from PKG_PWR_SKU.
323 */
324#define PKG_MAX_WIN_DEFAULT 0x12ull
325
326 /*
327 * val must be < max in hwmon interface units. The steps below are
328 * explained in xe_hwmon_power1_max_interval_show()
329 */
330 r = FIELD_PREP(PKG_MAX_WIN, PKG_MAX_WIN_DEFAULT);
331 x = REG_FIELD_GET(PKG_MAX_WIN_X, r);
332 y = REG_FIELD_GET(PKG_MAX_WIN_Y, r);
333 tau4 = (u64)((1 << x_w) | x) << y;
334 max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
335
336 if (val > max_win)
337 return -EINVAL;
338
339 /* val in hw units */
340 val = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_time, SF_TIME);
341
342 /*
343 * Convert val to 1.x * power(2,y)
344 * y = ilog2(val)
345 * x = (val - (1 << y)) >> (y - 2)
346 */
347 if (!val) {
348 y = 0;
349 x = 0;
350 } else {
351 y = ilog2(val);
352 x = (val - (1ul << y)) << x_w >> y;
353 }
354
355 rxy = REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_X, x) | REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_Y, y);
356
357 xe_device_mem_access_get(gt_to_xe(hwmon->gt));
358
359 mutex_lock(&hwmon->hwmon_lock);
360
361 xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW32, (u64 *)&r,
362 PKG_PWR_LIM_1_TIME, rxy);
363
364 mutex_unlock(&hwmon->hwmon_lock);
365
366 xe_device_mem_access_put(gt_to_xe(hwmon->gt));
367
368 return count;
369}
370
371static SENSOR_DEVICE_ATTR(power1_max_interval, 0664,
372 xe_hwmon_power1_max_interval_show,
373 xe_hwmon_power1_max_interval_store, 0);
374
375static struct attribute *hwmon_attributes[] = {
376 &sensor_dev_attr_power1_max_interval.dev_attr.attr,
377 NULL
378};
379
380static umode_t xe_hwmon_attributes_visible(struct kobject *kobj,
381 struct attribute *attr, int index)
382{
383 struct device *dev = kobj_to_dev(kobj);
384 struct xe_hwmon *hwmon = dev_get_drvdata(dev);
385 int ret = 0;
386
387 xe_device_mem_access_get(gt_to_xe(hwmon->gt));
388
389 if (attr == &sensor_dev_attr_power1_max_interval.dev_attr.attr)
390 ret = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT) ? attr->mode : 0;
391
392 xe_device_mem_access_put(gt_to_xe(hwmon->gt));
393
394 return ret;
395}
396
397static const struct attribute_group hwmon_attrgroup = {
398 .attrs = hwmon_attributes,
399 .is_visible = xe_hwmon_attributes_visible,
400};
401
402static const struct attribute_group *hwmon_groups[] = {
403 &hwmon_attrgroup,
404 NULL
405};
406
407static const struct hwmon_channel_info * const hwmon_info[] = {
408 HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_CRIT),
409 HWMON_CHANNEL_INFO(curr, HWMON_C_CRIT),
410 HWMON_CHANNEL_INFO(in, HWMON_I_INPUT),
411 HWMON_CHANNEL_INFO(energy, HWMON_E_INPUT),
412 NULL
413};
414
415/* I1 is exposed as power_crit or as curr_crit depending on bit 31 */
416static int xe_hwmon_pcode_read_i1(struct xe_gt *gt, u32 *uval)
417{
418 /* Avoid Illegal Subcommand error */
419 if (gt_to_xe(gt)->info.platform == XE_DG2)
420 return -ENXIO;
421
422 return xe_pcode_read(gt, PCODE_MBOX(PCODE_POWER_SETUP,
423 POWER_SETUP_SUBCOMMAND_READ_I1, 0),
424 uval, NULL);
425}
426
427static int xe_hwmon_pcode_write_i1(struct xe_gt *gt, u32 uval)
428{
429 return xe_pcode_write(gt, PCODE_MBOX(PCODE_POWER_SETUP,
430 POWER_SETUP_SUBCOMMAND_WRITE_I1, 0),
431 uval);
432}
433
434static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon, long *value, u32 scale_factor)
435{
436 int ret;
437 u32 uval;
438
439 mutex_lock(&hwmon->hwmon_lock);
440
441 ret = xe_hwmon_pcode_read_i1(hwmon->gt, &uval);
442 if (ret)
443 goto unlock;
444
445 *value = mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval),
446 scale_factor, POWER_SETUP_I1_SHIFT);
447unlock:
448 mutex_unlock(&hwmon->hwmon_lock);
449 return ret;
450}
451
452static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, long value, u32 scale_factor)
453{
454 int ret;
455 u32 uval;
456
457 mutex_lock(&hwmon->hwmon_lock);
458
459 uval = DIV_ROUND_CLOSEST_ULL(value << POWER_SETUP_I1_SHIFT, scale_factor);
460 ret = xe_hwmon_pcode_write_i1(hwmon->gt, uval);
461
462 mutex_unlock(&hwmon->hwmon_lock);
463 return ret;
464}
465
466static void xe_hwmon_get_voltage(struct xe_hwmon *hwmon, long *value)
467{
468 u64 reg_val;
469
470 xe_hwmon_process_reg(hwmon, REG_GT_PERF_STATUS,
471 REG_READ32, ®_val, 0, 0);
472 /* HW register value in units of 2.5 millivolt */
473 *value = DIV_ROUND_CLOSEST(REG_FIELD_GET(VOLTAGE_MASK, reg_val) * 2500, SF_VOLTAGE);
474}
475
476static umode_t
477xe_hwmon_power_is_visible(struct xe_hwmon *hwmon, u32 attr, int chan)
478{
479 u32 uval;
480
481 switch (attr) {
482 case hwmon_power_max:
483 return xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT) ? 0664 : 0;
484 case hwmon_power_rated_max:
485 return xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU) ? 0444 : 0;
486 case hwmon_power_crit:
487 return (xe_hwmon_pcode_read_i1(hwmon->gt, &uval) ||
488 !(uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644;
489 default:
490 return 0;
491 }
492}
493
494static int
495xe_hwmon_power_read(struct xe_hwmon *hwmon, u32 attr, int chan, long *val)
496{
497 switch (attr) {
498 case hwmon_power_max:
499 xe_hwmon_power_max_read(hwmon, val);
500 return 0;
501 case hwmon_power_rated_max:
502 xe_hwmon_power_rated_max_read(hwmon, val);
503 return 0;
504 case hwmon_power_crit:
505 return xe_hwmon_power_curr_crit_read(hwmon, val, SF_POWER);
506 default:
507 return -EOPNOTSUPP;
508 }
509}
510
511static int
512xe_hwmon_power_write(struct xe_hwmon *hwmon, u32 attr, int chan, long val)
513{
514 switch (attr) {
515 case hwmon_power_max:
516 return xe_hwmon_power_max_write(hwmon, val);
517 case hwmon_power_crit:
518 return xe_hwmon_power_curr_crit_write(hwmon, val, SF_POWER);
519 default:
520 return -EOPNOTSUPP;
521 }
522}
523
524static umode_t
525xe_hwmon_curr_is_visible(const struct xe_hwmon *hwmon, u32 attr)
526{
527 u32 uval;
528
529 switch (attr) {
530 case hwmon_curr_crit:
531 return (xe_hwmon_pcode_read_i1(hwmon->gt, &uval) ||
532 (uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644;
533 default:
534 return 0;
535 }
536}
537
538static int
539xe_hwmon_curr_read(struct xe_hwmon *hwmon, u32 attr, long *val)
540{
541 switch (attr) {
542 case hwmon_curr_crit:
543 return xe_hwmon_power_curr_crit_read(hwmon, val, SF_CURR);
544 default:
545 return -EOPNOTSUPP;
546 }
547}
548
549static int
550xe_hwmon_curr_write(struct xe_hwmon *hwmon, u32 attr, long val)
551{
552 switch (attr) {
553 case hwmon_curr_crit:
554 return xe_hwmon_power_curr_crit_write(hwmon, val, SF_CURR);
555 default:
556 return -EOPNOTSUPP;
557 }
558}
559
560static umode_t
561xe_hwmon_in_is_visible(struct xe_hwmon *hwmon, u32 attr)
562{
563 switch (attr) {
564 case hwmon_in_input:
565 return xe_hwmon_get_reg(hwmon, REG_GT_PERF_STATUS) ? 0444 : 0;
566 default:
567 return 0;
568 }
569}
570
571static int
572xe_hwmon_in_read(struct xe_hwmon *hwmon, u32 attr, long *val)
573{
574 switch (attr) {
575 case hwmon_in_input:
576 xe_hwmon_get_voltage(hwmon, val);
577 return 0;
578 default:
579 return -EOPNOTSUPP;
580 }
581}
582
583static umode_t
584xe_hwmon_energy_is_visible(struct xe_hwmon *hwmon, u32 attr)
585{
586 switch (attr) {
587 case hwmon_energy_input:
588 return xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS) ? 0444 : 0;
589 default:
590 return 0;
591 }
592}
593
594static int
595xe_hwmon_energy_read(struct xe_hwmon *hwmon, u32 attr, long *val)
596{
597 switch (attr) {
598 case hwmon_energy_input:
599 xe_hwmon_energy_get(hwmon, val);
600 return 0;
601 default:
602 return -EOPNOTSUPP;
603 }
604}
605
606static umode_t
607xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type,
608 u32 attr, int channel)
609{
610 struct xe_hwmon *hwmon = (struct xe_hwmon *)drvdata;
611 int ret;
612
613 xe_device_mem_access_get(gt_to_xe(hwmon->gt));
614
615 switch (type) {
616 case hwmon_power:
617 ret = xe_hwmon_power_is_visible(hwmon, attr, channel);
618 break;
619 case hwmon_curr:
620 ret = xe_hwmon_curr_is_visible(hwmon, attr);
621 break;
622 case hwmon_in:
623 ret = xe_hwmon_in_is_visible(hwmon, attr);
624 break;
625 case hwmon_energy:
626 ret = xe_hwmon_energy_is_visible(hwmon, attr);
627 break;
628 default:
629 ret = 0;
630 break;
631 }
632
633 xe_device_mem_access_put(gt_to_xe(hwmon->gt));
634
635 return ret;
636}
637
638static int
639xe_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
640 int channel, long *val)
641{
642 struct xe_hwmon *hwmon = dev_get_drvdata(dev);
643 int ret;
644
645 xe_device_mem_access_get(gt_to_xe(hwmon->gt));
646
647 switch (type) {
648 case hwmon_power:
649 ret = xe_hwmon_power_read(hwmon, attr, channel, val);
650 break;
651 case hwmon_curr:
652 ret = xe_hwmon_curr_read(hwmon, attr, val);
653 break;
654 case hwmon_in:
655 ret = xe_hwmon_in_read(hwmon, attr, val);
656 break;
657 case hwmon_energy:
658 ret = xe_hwmon_energy_read(hwmon, attr, val);
659 break;
660 default:
661 ret = -EOPNOTSUPP;
662 break;
663 }
664
665 xe_device_mem_access_put(gt_to_xe(hwmon->gt));
666
667 return ret;
668}
669
670static int
671xe_hwmon_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
672 int channel, long val)
673{
674 struct xe_hwmon *hwmon = dev_get_drvdata(dev);
675 int ret;
676
677 xe_device_mem_access_get(gt_to_xe(hwmon->gt));
678
679 switch (type) {
680 case hwmon_power:
681 ret = xe_hwmon_power_write(hwmon, attr, channel, val);
682 break;
683 case hwmon_curr:
684 ret = xe_hwmon_curr_write(hwmon, attr, val);
685 break;
686 default:
687 ret = -EOPNOTSUPP;
688 break;
689 }
690
691 xe_device_mem_access_put(gt_to_xe(hwmon->gt));
692
693 return ret;
694}
695
696static const struct hwmon_ops hwmon_ops = {
697 .is_visible = xe_hwmon_is_visible,
698 .read = xe_hwmon_read,
699 .write = xe_hwmon_write,
700};
701
702static const struct hwmon_chip_info hwmon_chip_info = {
703 .ops = &hwmon_ops,
704 .info = hwmon_info,
705};
706
707static void
708xe_hwmon_get_preregistration_info(struct xe_device *xe)
709{
710 struct xe_hwmon *hwmon = xe->hwmon;
711 long energy;
712 u64 val_sku_unit = 0;
713
714 /*
715 * The contents of register PKG_POWER_SKU_UNIT do not change,
716 * so read it once and store the shift values.
717 */
718 if (xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT)) {
719 xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU_UNIT,
720 REG_READ32, &val_sku_unit, 0, 0);
721 hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT, val_sku_unit);
722 hwmon->scl_shift_energy = REG_FIELD_GET(PKG_ENERGY_UNIT, val_sku_unit);
723 hwmon->scl_shift_time = REG_FIELD_GET(PKG_TIME_UNIT, val_sku_unit);
724 }
725
726 /*
727 * Initialize 'struct xe_hwmon_energy_info', i.e. set fields to the
728 * first value of the energy register read
729 */
730 if (xe_hwmon_is_visible(hwmon, hwmon_energy, hwmon_energy_input, 0))
731 xe_hwmon_energy_get(hwmon, &energy);
732}
733
734static void xe_hwmon_mutex_destroy(void *arg)
735{
736 struct xe_hwmon *hwmon = arg;
737
738 mutex_destroy(&hwmon->hwmon_lock);
739}
740
741void xe_hwmon_register(struct xe_device *xe)
742{
743 struct device *dev = xe->drm.dev;
744 struct xe_hwmon *hwmon;
745
746 /* hwmon is available only for dGfx */
747 if (!IS_DGFX(xe))
748 return;
749
750 /* hwmon is not available on VFs */
751 if (IS_SRIOV_VF(xe))
752 return;
753
754 hwmon = devm_kzalloc(dev, sizeof(*hwmon), GFP_KERNEL);
755 if (!hwmon)
756 return;
757
758 xe->hwmon = hwmon;
759
760 mutex_init(&hwmon->hwmon_lock);
761 if (devm_add_action_or_reset(dev, xe_hwmon_mutex_destroy, hwmon))
762 return;
763
764 /* primary GT to access device level properties */
765 hwmon->gt = xe->tiles[0].primary_gt;
766
767 xe_hwmon_get_preregistration_info(xe);
768
769 drm_dbg(&xe->drm, "Register xe hwmon interface\n");
770
771 /* hwmon_dev points to device hwmon<i> */
772 hwmon->hwmon_dev = devm_hwmon_device_register_with_info(dev, "xe", hwmon,
773 &hwmon_chip_info,
774 hwmon_groups);
775
776 if (IS_ERR(hwmon->hwmon_dev)) {
777 drm_warn(&xe->drm, "Failed to register xe hwmon (%pe)\n", hwmon->hwmon_dev);
778 xe->hwmon = NULL;
779 return;
780 }
781}
782