Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.5.6.
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright © 2023 Intel Corporation
  4 */
  5
  6#include <drm/drm_managed.h>
  7
  8#include "xe_force_wake.h"
  9#include "xe_device.h"
 10#include "xe_gt.h"
 11#include "xe_gt_idle.h"
 12#include "xe_gt_sysfs.h"
 13#include "xe_guc_pc.h"
 14#include "regs/xe_gt_regs.h"
 15#include "xe_macros.h"
 16#include "xe_mmio.h"
 17#include "xe_pm.h"
 18#include "xe_sriov.h"
 19
 20/**
 21 * DOC: Xe GT Idle
 22 *
 23 * Contains functions that init GT idle features like C6
 24 *
 25 * device/gt#/gtidle/name - name of the state
 26 * device/gt#/gtidle/idle_residency_ms - Provides residency of the idle state in ms
 27 * device/gt#/gtidle/idle_status - Provides current idle state
 28 */
 29
 30static struct xe_gt_idle *dev_to_gtidle(struct device *dev)
 31{
 32	struct kobject *kobj = &dev->kobj;
 33
 34	return &kobj_to_gt(kobj->parent)->gtidle;
 35}
 36
 37static struct xe_gt *gtidle_to_gt(struct xe_gt_idle *gtidle)
 38{
 39	return container_of(gtidle, struct xe_gt, gtidle);
 40}
 41
 42static struct xe_guc_pc *gtidle_to_pc(struct xe_gt_idle *gtidle)
 43{
 44	return &gtidle_to_gt(gtidle)->uc.guc.pc;
 45}
 46
 47static struct xe_device *
 48pc_to_xe(struct xe_guc_pc *pc)
 49{
 50	struct xe_guc *guc = container_of(pc, struct xe_guc, pc);
 51	struct xe_gt *gt = container_of(guc, struct xe_gt, uc.guc);
 52
 53	return gt_to_xe(gt);
 54}
 55
 56static const char *gt_idle_state_to_string(enum xe_gt_idle_state state)
 57{
 58	switch (state) {
 59	case GT_IDLE_C0:
 60		return "gt-c0";
 61	case GT_IDLE_C6:
 62		return "gt-c6";
 63	default:
 64		return "unknown";
 65	}
 66}
 67
 68static u64 get_residency_ms(struct xe_gt_idle *gtidle, u64 cur_residency)
 69{
 70	u64 delta, overflow_residency, prev_residency;
 71
 72	overflow_residency = BIT_ULL(32);
 73
 74	/*
 75	 * Counter wrap handling
 76	 * Store previous hw counter values for counter wrap-around handling
 77	 * Relying on sufficient frequency of queries otherwise counters can still wrap.
 78	 */
 79	prev_residency = gtidle->prev_residency;
 80	gtidle->prev_residency = cur_residency;
 81
 82	/* delta */
 83	if (cur_residency >= prev_residency)
 84		delta = cur_residency - prev_residency;
 85	else
 86		delta = cur_residency + (overflow_residency - prev_residency);
 87
 88	/* Add delta to extended raw driver copy of idle residency */
 89	cur_residency = gtidle->cur_residency + delta;
 90	gtidle->cur_residency = cur_residency;
 91
 92	/* residency multiplier in ns, convert to ms */
 93	cur_residency = mul_u64_u32_div(cur_residency, gtidle->residency_multiplier, 1e6);
 94
 95	return cur_residency;
 96}
 97
 98void xe_gt_idle_enable_pg(struct xe_gt *gt)
 99{
100	struct xe_device *xe = gt_to_xe(gt);
101	struct xe_gt_idle *gtidle = &gt->gtidle;
102	struct xe_mmio *mmio = &gt->mmio;
103	u32 vcs_mask, vecs_mask;
104	unsigned int fw_ref;
105	int i, j;
106
107	if (IS_SRIOV_VF(xe))
108		return;
109
110	/* Disable CPG for PVC */
111	if (xe->info.platform == XE_PVC)
112		return;
113
114	xe_device_assert_mem_access(gt_to_xe(gt));
115
116	vcs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_VIDEO_DECODE);
117	vecs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_VIDEO_ENHANCE);
118
119	if (vcs_mask || vecs_mask)
120		gtidle->powergate_enable = MEDIA_POWERGATE_ENABLE;
121
122	if (!xe_gt_is_media_type(gt))
123		gtidle->powergate_enable |= RENDER_POWERGATE_ENABLE;
124
125	if (xe->info.platform != XE_DG1) {
126		for (i = XE_HW_ENGINE_VCS0, j = 0; i <= XE_HW_ENGINE_VCS7; ++i, ++j) {
127			if ((gt->info.engine_mask & BIT(i)))
128				gtidle->powergate_enable |= (VDN_HCP_POWERGATE_ENABLE(j) |
129							     VDN_MFXVDENC_POWERGATE_ENABLE(j));
130		}
131	}
132
133	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
134	if (xe->info.skip_guc_pc) {
135		/*
136		 * GuC sets the hysteresis value when GuC PC is enabled
137		 * else set it to 25 (25 * 1.28us)
138		 */
139		xe_mmio_write32(mmio, MEDIA_POWERGATE_IDLE_HYSTERESIS, 25);
140		xe_mmio_write32(mmio, RENDER_POWERGATE_IDLE_HYSTERESIS, 25);
141	}
142
143	xe_mmio_write32(mmio, POWERGATE_ENABLE, gtidle->powergate_enable);
144	xe_force_wake_put(gt_to_fw(gt), fw_ref);
145}
146
147void xe_gt_idle_disable_pg(struct xe_gt *gt)
148{
149	struct xe_gt_idle *gtidle = &gt->gtidle;
150	unsigned int fw_ref;
151
152	if (IS_SRIOV_VF(gt_to_xe(gt)))
153		return;
154
155	xe_device_assert_mem_access(gt_to_xe(gt));
156	gtidle->powergate_enable = 0;
157
158	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
159	xe_mmio_write32(&gt->mmio, POWERGATE_ENABLE, gtidle->powergate_enable);
160	xe_force_wake_put(gt_to_fw(gt), fw_ref);
161}
162
163/**
164 * xe_gt_idle_pg_print - Xe powergating info
165 * @gt: GT object
166 * @p: drm_printer.
167 *
168 * This function prints the powergating information
169 *
170 * Return: 0 on success, negative error code otherwise
171 */
172int xe_gt_idle_pg_print(struct xe_gt *gt, struct drm_printer *p)
173{
174	struct xe_gt_idle *gtidle = &gt->gtidle;
175	struct xe_device *xe = gt_to_xe(gt);
176	enum xe_gt_idle_state state;
177	u32 pg_enabled, pg_status = 0;
178	u32 vcs_mask, vecs_mask;
179	unsigned int fw_ref;
180	int n;
181	/*
182	 * Media Slices
183	 *
184	 * Slice 0: VCS0, VCS1, VECS0
185	 * Slice 1: VCS2, VCS3, VECS1
186	 * Slice 2: VCS4, VCS5, VECS2
187	 * Slice 3: VCS6, VCS7, VECS3
188	 */
189	static const struct {
190		u64 engines;
191		u32 status_bit;
192	} media_slices[] = {
193		{(BIT(XE_HW_ENGINE_VCS0) | BIT(XE_HW_ENGINE_VCS1) |
194		  BIT(XE_HW_ENGINE_VECS0)), MEDIA_SLICE0_AWAKE_STATUS},
195
196		{(BIT(XE_HW_ENGINE_VCS2) | BIT(XE_HW_ENGINE_VCS3) |
197		   BIT(XE_HW_ENGINE_VECS1)), MEDIA_SLICE1_AWAKE_STATUS},
198
199		{(BIT(XE_HW_ENGINE_VCS4) | BIT(XE_HW_ENGINE_VCS5) |
200		   BIT(XE_HW_ENGINE_VECS2)), MEDIA_SLICE2_AWAKE_STATUS},
201
202		{(BIT(XE_HW_ENGINE_VCS6) | BIT(XE_HW_ENGINE_VCS7) |
203		   BIT(XE_HW_ENGINE_VECS3)), MEDIA_SLICE3_AWAKE_STATUS},
204	};
205
206	if (xe->info.platform == XE_PVC) {
207		drm_printf(p, "Power Gating not supported\n");
208		return 0;
209	}
210
211	state = gtidle->idle_status(gtidle_to_pc(gtidle));
212	pg_enabled = gtidle->powergate_enable;
213
214	/* Do not wake the GT to read powergating status */
215	if (state != GT_IDLE_C6) {
216		fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
217		if (!fw_ref)
218			return -ETIMEDOUT;
219
220		pg_enabled = xe_mmio_read32(&gt->mmio, POWERGATE_ENABLE);
221		pg_status = xe_mmio_read32(&gt->mmio, POWERGATE_DOMAIN_STATUS);
222
223		xe_force_wake_put(gt_to_fw(gt), fw_ref);
224	}
225
226	if (gt->info.engine_mask & XE_HW_ENGINE_RCS_MASK) {
227		drm_printf(p, "Render Power Gating Enabled: %s\n",
228			   str_yes_no(pg_enabled & RENDER_POWERGATE_ENABLE));
229
230		drm_printf(p, "Render Power Gate Status: %s\n",
231			   str_up_down(pg_status & RENDER_AWAKE_STATUS));
232	}
233
234	vcs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_VIDEO_DECODE);
235	vecs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_VIDEO_ENHANCE);
236
237	/* Print media CPG status only if media is present */
238	if (vcs_mask || vecs_mask) {
239		drm_printf(p, "Media Power Gating Enabled: %s\n",
240			   str_yes_no(pg_enabled & MEDIA_POWERGATE_ENABLE));
241
242		for (n = 0; n < ARRAY_SIZE(media_slices); n++)
243			if (gt->info.engine_mask & media_slices[n].engines)
244				drm_printf(p, "Media Slice%d Power Gate Status: %s\n", n,
245					   str_up_down(pg_status & media_slices[n].status_bit));
246	}
247	return 0;
248}
249
250static ssize_t name_show(struct device *dev,
251			 struct device_attribute *attr, char *buff)
252{
253	struct xe_gt_idle *gtidle = dev_to_gtidle(dev);
254	struct xe_guc_pc *pc = gtidle_to_pc(gtidle);
255	ssize_t ret;
256
257	xe_pm_runtime_get(pc_to_xe(pc));
258	ret = sysfs_emit(buff, "%s\n", gtidle->name);
259	xe_pm_runtime_put(pc_to_xe(pc));
260
261	return ret;
262}
263static DEVICE_ATTR_RO(name);
264
265static ssize_t idle_status_show(struct device *dev,
266				struct device_attribute *attr, char *buff)
267{
268	struct xe_gt_idle *gtidle = dev_to_gtidle(dev);
269	struct xe_guc_pc *pc = gtidle_to_pc(gtidle);
270	enum xe_gt_idle_state state;
271
272	xe_pm_runtime_get(pc_to_xe(pc));
273	state = gtidle->idle_status(pc);
274	xe_pm_runtime_put(pc_to_xe(pc));
275
276	return sysfs_emit(buff, "%s\n", gt_idle_state_to_string(state));
277}
278static DEVICE_ATTR_RO(idle_status);
279
280static ssize_t idle_residency_ms_show(struct device *dev,
281				      struct device_attribute *attr, char *buff)
282{
283	struct xe_gt_idle *gtidle = dev_to_gtidle(dev);
284	struct xe_guc_pc *pc = gtidle_to_pc(gtidle);
285	u64 residency;
286
287	xe_pm_runtime_get(pc_to_xe(pc));
288	residency = gtidle->idle_residency(pc);
289	xe_pm_runtime_put(pc_to_xe(pc));
290
291	return sysfs_emit(buff, "%llu\n", get_residency_ms(gtidle, residency));
292}
293static DEVICE_ATTR_RO(idle_residency_ms);
294
295static const struct attribute *gt_idle_attrs[] = {
296	&dev_attr_name.attr,
297	&dev_attr_idle_status.attr,
298	&dev_attr_idle_residency_ms.attr,
299	NULL,
300};
301
302static void gt_idle_fini(void *arg)
303{
304	struct kobject *kobj = arg;
305	struct xe_gt *gt = kobj_to_gt(kobj->parent);
306	unsigned int fw_ref;
307
308	xe_gt_idle_disable_pg(gt);
309
310	if (gt_to_xe(gt)->info.skip_guc_pc) {
311		fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
312		xe_gt_idle_disable_c6(gt);
313		xe_force_wake_put(gt_to_fw(gt), fw_ref);
314	}
315
316	sysfs_remove_files(kobj, gt_idle_attrs);
317	kobject_put(kobj);
318}
319
320int xe_gt_idle_init(struct xe_gt_idle *gtidle)
321{
322	struct xe_gt *gt = gtidle_to_gt(gtidle);
323	struct xe_device *xe = gt_to_xe(gt);
324	struct kobject *kobj;
325	int err;
326
327	if (IS_SRIOV_VF(xe))
328		return 0;
329
330	kobj = kobject_create_and_add("gtidle", gt->sysfs);
331	if (!kobj)
332		return -ENOMEM;
333
334	if (xe_gt_is_media_type(gt)) {
335		snprintf(gtidle->name, sizeof(gtidle->name), "gt%d-mc", gt->info.id);
336		gtidle->idle_residency = xe_guc_pc_mc6_residency;
337	} else {
338		snprintf(gtidle->name, sizeof(gtidle->name), "gt%d-rc", gt->info.id);
339		gtidle->idle_residency = xe_guc_pc_rc6_residency;
340	}
341
342	/* Multiplier for Residency counter in units of 1.28us */
343	gtidle->residency_multiplier = 1280;
344	gtidle->idle_status = xe_guc_pc_c_status;
345
346	err = sysfs_create_files(kobj, gt_idle_attrs);
347	if (err) {
348		kobject_put(kobj);
349		return err;
350	}
351
352	xe_gt_idle_enable_pg(gt);
353
354	return devm_add_action_or_reset(xe->drm.dev, gt_idle_fini, kobj);
355}
356
357void xe_gt_idle_enable_c6(struct xe_gt *gt)
358{
359	xe_device_assert_mem_access(gt_to_xe(gt));
360	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
361
362	if (IS_SRIOV_VF(gt_to_xe(gt)))
363		return;
364
365	/* Units of 1280 ns for a total of 5s */
366	xe_mmio_write32(&gt->mmio, RC_IDLE_HYSTERSIS, 0x3B9ACA);
367	/* Enable RC6 */
368	xe_mmio_write32(&gt->mmio, RC_CONTROL,
369			RC_CTL_HW_ENABLE | RC_CTL_TO_MODE | RC_CTL_RC6_ENABLE);
370}
371
372void xe_gt_idle_disable_c6(struct xe_gt *gt)
373{
374	xe_device_assert_mem_access(gt_to_xe(gt));
375	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
376
377	if (IS_SRIOV_VF(gt_to_xe(gt)))
378		return;
379
380	xe_mmio_write32(&gt->mmio, RC_CONTROL, 0);
381	xe_mmio_write32(&gt->mmio, RC_STATE, 0);
382}