Loading...
Note: File does not exist in v5.9.
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2023 Intel Corporation
4 */
5
6#include <drm/drm_managed.h>
7
8#include "regs/xe_gt_regs.h"
9#include "xe_assert.h"
10#include "xe_gt.h"
11#include "xe_gt_ccs_mode.h"
12#include "xe_gt_printk.h"
13#include "xe_gt_sysfs.h"
14#include "xe_mmio.h"
15#include "xe_sriov.h"
16
17static void __xe_gt_apply_ccs_mode(struct xe_gt *gt, u32 num_engines)
18{
19 u32 mode = CCS_MODE_CSLICE_0_3_MASK; /* disable all by default */
20 int num_slices = hweight32(CCS_MASK(gt));
21 struct xe_device *xe = gt_to_xe(gt);
22 int width, cslice = 0;
23 u32 config = 0;
24
25 xe_assert(xe, xe_gt_ccs_mode_enabled(gt));
26
27 xe_assert(xe, num_engines && num_engines <= num_slices);
28 xe_assert(xe, !(num_slices % num_engines));
29
30 /*
31 * Loop over all available slices and assign each a user engine.
32 * For example, if there are four compute slices available, the
33 * assignment of compute slices to compute engines would be,
34 *
35 * With 1 engine (ccs0):
36 * slice 0, 1, 2, 3: ccs0
37 *
38 * With 2 engines (ccs0, ccs1):
39 * slice 0, 2: ccs0
40 * slice 1, 3: ccs1
41 *
42 * With 4 engines (ccs0, ccs1, ccs2, ccs3):
43 * slice 0: ccs0
44 * slice 1: ccs1
45 * slice 2: ccs2
46 * slice 3: ccs3
47 */
48 for (width = num_slices / num_engines; width; width--) {
49 struct xe_hw_engine *hwe;
50 enum xe_hw_engine_id id;
51
52 for_each_hw_engine(hwe, gt, id) {
53 if (hwe->class != XE_ENGINE_CLASS_COMPUTE)
54 continue;
55
56 if (hwe->logical_instance >= num_engines)
57 break;
58
59 config |= BIT(hwe->instance) << XE_HW_ENGINE_CCS0;
60
61 /* If a slice is fused off, leave disabled */
62 while ((CCS_MASK(gt) & BIT(cslice)) == 0)
63 cslice++;
64
65 mode &= ~CCS_MODE_CSLICE(cslice, CCS_MODE_CSLICE_MASK);
66 mode |= CCS_MODE_CSLICE(cslice, hwe->instance);
67 cslice++;
68 }
69 }
70
71 /*
72 * Mask bits need to be set for the register. Though only Xe2+
73 * platforms require setting of mask bits, it won't harm for older
74 * platforms as these bits are unused there.
75 */
76 mode |= CCS_MODE_CSLICE_0_3_MASK << 16;
77 xe_mmio_write32(>->mmio, CCS_MODE, mode);
78
79 xe_gt_dbg(gt, "CCS_MODE=%x config:%08x, num_engines:%d, num_slices:%d\n",
80 mode, config, num_engines, num_slices);
81}
82
83void xe_gt_apply_ccs_mode(struct xe_gt *gt)
84{
85 if (!gt->ccs_mode || IS_SRIOV_VF(gt_to_xe(gt)))
86 return;
87
88 __xe_gt_apply_ccs_mode(gt, gt->ccs_mode);
89}
90
91static ssize_t
92num_cslices_show(struct device *kdev,
93 struct device_attribute *attr, char *buf)
94{
95 struct xe_gt *gt = kobj_to_gt(&kdev->kobj);
96
97 return sysfs_emit(buf, "%u\n", hweight32(CCS_MASK(gt)));
98}
99
100static DEVICE_ATTR_RO(num_cslices);
101
102static ssize_t
103ccs_mode_show(struct device *kdev,
104 struct device_attribute *attr, char *buf)
105{
106 struct xe_gt *gt = kobj_to_gt(&kdev->kobj);
107
108 return sysfs_emit(buf, "%u\n", gt->ccs_mode);
109}
110
111static ssize_t
112ccs_mode_store(struct device *kdev, struct device_attribute *attr,
113 const char *buff, size_t count)
114{
115 struct xe_gt *gt = kobj_to_gt(&kdev->kobj);
116 struct xe_device *xe = gt_to_xe(gt);
117 u32 num_engines, num_slices;
118 int ret;
119
120 if (IS_SRIOV(xe)) {
121 xe_gt_dbg(gt, "Can't change compute mode when running as %s\n",
122 xe_sriov_mode_to_string(xe_device_sriov_mode(xe)));
123 return -EOPNOTSUPP;
124 }
125
126 ret = kstrtou32(buff, 0, &num_engines);
127 if (ret)
128 return ret;
129
130 /*
131 * Ensure number of engines specified is valid and there is an
132 * exact multiple of engines for slices.
133 */
134 num_slices = hweight32(CCS_MASK(gt));
135 if (!num_engines || num_engines > num_slices || num_slices % num_engines) {
136 xe_gt_dbg(gt, "Invalid compute config, %d engines %d slices\n",
137 num_engines, num_slices);
138 return -EINVAL;
139 }
140
141 /* CCS mode can only be updated when there are no drm clients */
142 mutex_lock(&xe->drm.filelist_mutex);
143 if (!list_empty(&xe->drm.filelist)) {
144 mutex_unlock(&xe->drm.filelist_mutex);
145 xe_gt_dbg(gt, "Rejecting compute mode change as there are active drm clients\n");
146 return -EBUSY;
147 }
148
149 if (gt->ccs_mode != num_engines) {
150 xe_gt_info(gt, "Setting compute mode to %d\n", num_engines);
151 gt->ccs_mode = num_engines;
152 xe_gt_record_user_engines(gt);
153 xe_gt_reset(gt);
154 }
155
156 mutex_unlock(&xe->drm.filelist_mutex);
157
158 return count;
159}
160
161static DEVICE_ATTR_RW(ccs_mode);
162
163static const struct attribute *gt_ccs_mode_attrs[] = {
164 &dev_attr_ccs_mode.attr,
165 &dev_attr_num_cslices.attr,
166 NULL,
167};
168
169static void xe_gt_ccs_mode_sysfs_fini(void *arg)
170{
171 struct xe_gt *gt = arg;
172
173 sysfs_remove_files(gt->sysfs, gt_ccs_mode_attrs);
174}
175
176/**
177 * xe_gt_ccs_mode_sysfs_init - Initialize CCS mode sysfs interfaces
178 * @gt: GT structure
179 *
180 * Through a per-gt 'ccs_mode' sysfs interface, the user can enable a fixed
181 * number of compute hardware engines to which the available compute slices
182 * are to be allocated. This user configuration change triggers a gt reset
183 * and it is expected that there are no open drm clients while doing so.
184 * The number of available compute slices is exposed to user through a per-gt
185 * 'num_cslices' sysfs interface.
186 *
187 * Returns: Returns error value for failure and 0 for success.
188 */
189int xe_gt_ccs_mode_sysfs_init(struct xe_gt *gt)
190{
191 struct xe_device *xe = gt_to_xe(gt);
192 int err;
193
194 if (!xe_gt_ccs_mode_enabled(gt))
195 return 0;
196
197 err = sysfs_create_files(gt->sysfs, gt_ccs_mode_attrs);
198 if (err)
199 return err;
200
201 return devm_add_action_or_reset(xe->drm.dev, xe_gt_ccs_mode_sysfs_fini, gt);
202}