Loading...
Note: File does not exist in v4.6.
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2022 Intel Corporation
4 */
5
6#include "xe_gt_mcr.h"
7
8#include "regs/xe_gt_regs.h"
9#include "xe_gt.h"
10#include "xe_gt_topology.h"
11#include "xe_gt_types.h"
12#include "xe_mmio.h"
13
14/**
15 * DOC: GT Multicast/Replicated (MCR) Register Support
16 *
17 * Some GT registers are designed as "multicast" or "replicated" registers:
18 * multiple instances of the same register share a single MMIO offset. MCR
19 * registers are generally used when the hardware needs to potentially track
20 * independent values of a register per hardware unit (e.g., per-subslice,
21 * per-L3bank, etc.). The specific types of replication that exist vary
22 * per-platform.
23 *
24 * MMIO accesses to MCR registers are controlled according to the settings
25 * programmed in the platform's MCR_SELECTOR register(s). MMIO writes to MCR
26 * registers can be done in either multicast (a single write updates all
27 * instances of the register to the same value) or unicast (a write updates only
28 * one specific instance) form. Reads of MCR registers always operate in a
29 * unicast manner regardless of how the multicast/unicast bit is set in
30 * MCR_SELECTOR. Selection of a specific MCR instance for unicast operations is
31 * referred to as "steering."
32 *
33 * If MCR register operations are steered toward a hardware unit that is
34 * fused off or currently powered down due to power gating, the MMIO operation
35 * is "terminated" by the hardware. Terminated read operations will return a
36 * value of zero and terminated unicast write operations will be silently
37 * ignored. During device initialization, the goal of the various
38 * ``init_steering_*()`` functions is to apply the platform-specific rules for
39 * each MCR register type to identify a steering target that will select a
40 * non-terminated instance.
41 */
42
43#define STEER_SEMAPHORE XE_REG(0xFD0)
44
45static inline struct xe_reg to_xe_reg(struct xe_reg_mcr reg_mcr)
46{
47 return reg_mcr.__reg;
48}
49
50enum {
51 MCR_OP_READ,
52 MCR_OP_WRITE
53};
54
55static const struct xe_mmio_range xelp_l3bank_steering_table[] = {
56 { 0x00B100, 0x00B3FF },
57 {},
58};
59
60static const struct xe_mmio_range xehp_l3bank_steering_table[] = {
61 { 0x008C80, 0x008CFF },
62 { 0x00B100, 0x00B3FF },
63 {},
64};
65
66/*
67 * Although the bspec lists more "MSLICE" ranges than shown here, some of those
68 * are of a "GAM" subclass that has special rules and doesn't need to be
69 * included here.
70 */
71static const struct xe_mmio_range xehp_mslice_steering_table[] = {
72 { 0x00DD00, 0x00DDFF },
73 { 0x00E900, 0x00FFFF }, /* 0xEA00 - OxEFFF is unused */
74 {},
75};
76
77static const struct xe_mmio_range xehp_lncf_steering_table[] = {
78 { 0x00B000, 0x00B0FF },
79 { 0x00D880, 0x00D8FF },
80 {},
81};
82
83/*
84 * We have several types of MCR registers where steering to (0,0) will always
85 * provide us with a non-terminated value. We'll stick them all in the same
86 * table for simplicity.
87 */
88static const struct xe_mmio_range xehpc_instance0_steering_table[] = {
89 { 0x004000, 0x004AFF }, /* HALF-BSLICE */
90 { 0x008800, 0x00887F }, /* CC */
91 { 0x008A80, 0x008AFF }, /* TILEPSMI */
92 { 0x00B000, 0x00B0FF }, /* HALF-BSLICE */
93 { 0x00B100, 0x00B3FF }, /* L3BANK */
94 { 0x00C800, 0x00CFFF }, /* HALF-BSLICE */
95 { 0x00D800, 0x00D8FF }, /* HALF-BSLICE */
96 { 0x00DD00, 0x00DDFF }, /* BSLICE */
97 { 0x00E900, 0x00E9FF }, /* HALF-BSLICE */
98 { 0x00EC00, 0x00EEFF }, /* HALF-BSLICE */
99 { 0x00F000, 0x00FFFF }, /* HALF-BSLICE */
100 { 0x024180, 0x0241FF }, /* HALF-BSLICE */
101 {},
102};
103
104static const struct xe_mmio_range xelpg_instance0_steering_table[] = {
105 { 0x000B00, 0x000BFF }, /* SQIDI */
106 { 0x001000, 0x001FFF }, /* SQIDI */
107 { 0x004000, 0x0048FF }, /* GAM */
108 { 0x008700, 0x0087FF }, /* SQIDI */
109 { 0x00B000, 0x00B0FF }, /* NODE */
110 { 0x00C800, 0x00CFFF }, /* GAM */
111 { 0x00D880, 0x00D8FF }, /* NODE */
112 { 0x00DD00, 0x00DDFF }, /* OAAL2 */
113 {},
114};
115
116static const struct xe_mmio_range xelpg_l3bank_steering_table[] = {
117 { 0x00B100, 0x00B3FF },
118 {},
119};
120
121static const struct xe_mmio_range xelp_dss_steering_table[] = {
122 { 0x008150, 0x00815F },
123 { 0x009520, 0x00955F },
124 { 0x00DE80, 0x00E8FF },
125 { 0x024A00, 0x024A7F },
126 {},
127};
128
129/* DSS steering is used for GSLICE ranges as well */
130static const struct xe_mmio_range xehp_dss_steering_table[] = {
131 { 0x005200, 0x0052FF }, /* GSLICE */
132 { 0x005400, 0x007FFF }, /* GSLICE */
133 { 0x008140, 0x00815F }, /* GSLICE (0x8140-0x814F), DSS (0x8150-0x815F) */
134 { 0x008D00, 0x008DFF }, /* DSS */
135 { 0x0094D0, 0x00955F }, /* GSLICE (0x94D0-0x951F), DSS (0x9520-0x955F) */
136 { 0x009680, 0x0096FF }, /* DSS */
137 { 0x00D800, 0x00D87F }, /* GSLICE */
138 { 0x00DC00, 0x00DCFF }, /* GSLICE */
139 { 0x00DE80, 0x00E8FF }, /* DSS (0xE000-0xE0FF reserved ) */
140 { 0x017000, 0x017FFF }, /* GSLICE */
141 { 0x024A00, 0x024A7F }, /* DSS */
142 {},
143};
144
145/* DSS steering is used for COMPUTE ranges as well */
146static const struct xe_mmio_range xehpc_dss_steering_table[] = {
147 { 0x008140, 0x00817F }, /* COMPUTE (0x8140-0x814F & 0x8160-0x817F), DSS (0x8150-0x815F) */
148 { 0x0094D0, 0x00955F }, /* COMPUTE (0x94D0-0x951F), DSS (0x9520-0x955F) */
149 { 0x009680, 0x0096FF }, /* DSS */
150 { 0x00DC00, 0x00DCFF }, /* COMPUTE */
151 { 0x00DE80, 0x00E7FF }, /* DSS (0xDF00-0xE1FF reserved ) */
152 {},
153};
154
155/* DSS steering is used for SLICE ranges as well */
156static const struct xe_mmio_range xelpg_dss_steering_table[] = {
157 { 0x005200, 0x0052FF }, /* SLICE */
158 { 0x005500, 0x007FFF }, /* SLICE */
159 { 0x008140, 0x00815F }, /* SLICE (0x8140-0x814F), DSS (0x8150-0x815F) */
160 { 0x0094D0, 0x00955F }, /* SLICE (0x94D0-0x951F), DSS (0x9520-0x955F) */
161 { 0x009680, 0x0096FF }, /* DSS */
162 { 0x00D800, 0x00D87F }, /* SLICE */
163 { 0x00DC00, 0x00DCFF }, /* SLICE */
164 { 0x00DE80, 0x00E8FF }, /* DSS (0xE000-0xE0FF reserved) */
165 {},
166};
167
168static const struct xe_mmio_range xelpmp_oaddrm_steering_table[] = {
169 { 0x393200, 0x39323F },
170 { 0x393400, 0x3934FF },
171 {},
172};
173
174static const struct xe_mmio_range dg2_implicit_steering_table[] = {
175 { 0x000B00, 0x000BFF }, /* SF (SQIDI replication) */
176 { 0x001000, 0x001FFF }, /* SF (SQIDI replication) */
177 { 0x004000, 0x004AFF }, /* GAM (MSLICE replication) */
178 { 0x008700, 0x0087FF }, /* MCFG (SQIDI replication) */
179 { 0x00C800, 0x00CFFF }, /* GAM (MSLICE replication) */
180 { 0x00F000, 0x00FFFF }, /* GAM (MSLICE replication) */
181 {},
182};
183
184static const struct xe_mmio_range xe2lpg_dss_steering_table[] = {
185 { 0x005200, 0x0052FF }, /* SLICE */
186 { 0x005500, 0x007FFF }, /* SLICE */
187 { 0x008140, 0x00815F }, /* SLICE (0x8140-0x814F), DSS (0x8150-0x815F) */
188 { 0x0094D0, 0x00955F }, /* SLICE (0x94D0-0x951F), DSS (0x9520-0x955F) */
189 { 0x009680, 0x0096FF }, /* DSS */
190 { 0x00D800, 0x00D87F }, /* SLICE */
191 { 0x00DC00, 0x00DCFF }, /* SLICE */
192 { 0x00DE80, 0x00E8FF }, /* DSS (0xE000-0xE0FF reserved) */
193 { 0x00E980, 0x00E9FF }, /* SLICE */
194 { 0x013000, 0x0133FF }, /* DSS (0x13000-0x131FF), SLICE (0x13200-0x133FF) */
195 {},
196};
197
198static const struct xe_mmio_range xe2lpg_sqidi_psmi_steering_table[] = {
199 { 0x000B00, 0x000BFF },
200 { 0x001000, 0x001FFF },
201 {},
202};
203
204static const struct xe_mmio_range xe2lpg_instance0_steering_table[] = {
205 { 0x004000, 0x004AFF }, /* GAM, rsvd, GAMWKR */
206 { 0x008700, 0x00887F }, /* SQIDI, MEMPIPE */
207 { 0x00B000, 0x00B3FF }, /* NODE, L3BANK */
208 { 0x00C800, 0x00CFFF }, /* GAM */
209 { 0x00D880, 0x00D8FF }, /* NODE */
210 { 0x00DD00, 0x00DDFF }, /* MEMPIPE */
211 { 0x00E900, 0x00E97F }, /* MEMPIPE */
212 { 0x00F000, 0x00FFFF }, /* GAM, GAMWKR */
213 { 0x013400, 0x0135FF }, /* MEMPIPE */
214 {},
215};
216
217static const struct xe_mmio_range xe2lpm_gpmxmt_steering_table[] = {
218 { 0x388160, 0x38817F },
219 { 0x389480, 0x3894CF },
220 {},
221};
222
223static const struct xe_mmio_range xe2lpm_instance0_steering_table[] = {
224 { 0x384000, 0x3847DF }, /* GAM, rsvd, GAM */
225 { 0x384900, 0x384AFF }, /* GAM */
226 { 0x389560, 0x3895FF }, /* MEDIAINF */
227 { 0x38B600, 0x38B8FF }, /* L3BANK */
228 { 0x38C800, 0x38D07F }, /* GAM, MEDIAINF */
229 { 0x38F000, 0x38F0FF }, /* GAM */
230 { 0x393C00, 0x393C7F }, /* MEDIAINF */
231 {},
232};
233
234static void init_steering_l3bank(struct xe_gt *gt)
235{
236 if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) {
237 u32 mslice_mask = REG_FIELD_GET(MEML3_EN_MASK,
238 xe_mmio_read32(gt, MIRROR_FUSE3));
239 u32 bank_mask = REG_FIELD_GET(GT_L3_EXC_MASK,
240 xe_mmio_read32(gt, XEHP_FUSE4));
241
242 /*
243 * Group selects mslice, instance selects bank within mslice.
244 * Bank 0 is always valid _except_ when the bank mask is 010b.
245 */
246 gt->steering[L3BANK].group_target = __ffs(mslice_mask);
247 gt->steering[L3BANK].instance_target =
248 bank_mask & BIT(0) ? 0 : 2;
249 } else if (gt_to_xe(gt)->info.platform == XE_DG2) {
250 u32 mslice_mask = REG_FIELD_GET(MEML3_EN_MASK,
251 xe_mmio_read32(gt, MIRROR_FUSE3));
252 u32 bank = __ffs(mslice_mask) * 8;
253
254 /*
255 * Like mslice registers, look for a valid mslice and steer to
256 * the first L3BANK of that quad. Access to the Nth L3 bank is
257 * split between the first bits of group and instance
258 */
259 gt->steering[L3BANK].group_target = (bank >> 2) & 0x7;
260 gt->steering[L3BANK].instance_target = bank & 0x3;
261 } else {
262 u32 fuse = REG_FIELD_GET(L3BANK_MASK,
263 ~xe_mmio_read32(gt, MIRROR_FUSE3));
264
265 gt->steering[L3BANK].group_target = 0; /* unused */
266 gt->steering[L3BANK].instance_target = __ffs(fuse);
267 }
268}
269
270static void init_steering_mslice(struct xe_gt *gt)
271{
272 u32 mask = REG_FIELD_GET(MEML3_EN_MASK,
273 xe_mmio_read32(gt, MIRROR_FUSE3));
274
275 /*
276 * mslice registers are valid (not terminated) if either the meml3
277 * associated with the mslice is present, or at least one DSS associated
278 * with the mslice is present. There will always be at least one meml3
279 * so we can just use that to find a non-terminated mslice and ignore
280 * the DSS fusing.
281 */
282 gt->steering[MSLICE].group_target = __ffs(mask);
283 gt->steering[MSLICE].instance_target = 0; /* unused */
284
285 /*
286 * LNCF termination is also based on mslice presence, so we'll set
287 * it up here. Either LNCF within a non-terminated mslice will work,
288 * so we just always pick LNCF 0 here.
289 */
290 gt->steering[LNCF].group_target = __ffs(mask) << 1;
291 gt->steering[LNCF].instance_target = 0; /* unused */
292}
293
294static void init_steering_dss(struct xe_gt *gt)
295{
296 unsigned int dss = min(xe_dss_mask_group_ffs(gt->fuse_topo.g_dss_mask, 0, 0),
297 xe_dss_mask_group_ffs(gt->fuse_topo.c_dss_mask, 0, 0));
298 unsigned int dss_per_grp = gt_to_xe(gt)->info.platform == XE_PVC ? 8 : 4;
299
300 gt->steering[DSS].group_target = dss / dss_per_grp;
301 gt->steering[DSS].instance_target = dss % dss_per_grp;
302}
303
304static void init_steering_oaddrm(struct xe_gt *gt)
305{
306 /*
307 * First instance is only terminated if the entire first media slice
308 * is absent (i.e., no VCS0 or VECS0).
309 */
310 if (gt->info.engine_mask & (XE_HW_ENGINE_VCS0 | XE_HW_ENGINE_VECS0))
311 gt->steering[OADDRM].group_target = 0;
312 else
313 gt->steering[OADDRM].group_target = 1;
314
315 gt->steering[DSS].instance_target = 0; /* unused */
316}
317
318static void init_steering_sqidi_psmi(struct xe_gt *gt)
319{
320 u32 mask = REG_FIELD_GET(XE2_NODE_ENABLE_MASK,
321 xe_mmio_read32(gt, MIRROR_FUSE3));
322 u32 select = __ffs(mask);
323
324 gt->steering[SQIDI_PSMI].group_target = select >> 1;
325 gt->steering[SQIDI_PSMI].instance_target = select & 0x1;
326}
327
328static void init_steering_inst0(struct xe_gt *gt)
329{
330 gt->steering[DSS].group_target = 0; /* unused */
331 gt->steering[DSS].instance_target = 0; /* unused */
332}
333
334static const struct {
335 const char *name;
336 void (*init)(struct xe_gt *gt);
337} xe_steering_types[] = {
338 [L3BANK] = { "L3BANK", init_steering_l3bank },
339 [MSLICE] = { "MSLICE", init_steering_mslice },
340 [LNCF] = { "LNCF", NULL }, /* initialized by mslice init */
341 [DSS] = { "DSS", init_steering_dss },
342 [OADDRM] = { "OADDRM / GPMXMT", init_steering_oaddrm },
343 [SQIDI_PSMI] = { "SQIDI_PSMI", init_steering_sqidi_psmi },
344 [INSTANCE0] = { "INSTANCE 0", init_steering_inst0 },
345 [IMPLICIT_STEERING] = { "IMPLICIT", NULL },
346};
347
348void xe_gt_mcr_init(struct xe_gt *gt)
349{
350 struct xe_device *xe = gt_to_xe(gt);
351
352 BUILD_BUG_ON(IMPLICIT_STEERING + 1 != NUM_STEERING_TYPES);
353 BUILD_BUG_ON(ARRAY_SIZE(xe_steering_types) != NUM_STEERING_TYPES);
354
355 spin_lock_init(>->mcr_lock);
356
357 if (gt->info.type == XE_GT_TYPE_MEDIA) {
358 drm_WARN_ON(&xe->drm, MEDIA_VER(xe) < 13);
359
360 if (MEDIA_VER(xe) >= 20) {
361 gt->steering[OADDRM].ranges = xe2lpm_gpmxmt_steering_table;
362 gt->steering[INSTANCE0].ranges = xe2lpm_instance0_steering_table;
363 } else {
364 gt->steering[OADDRM].ranges = xelpmp_oaddrm_steering_table;
365 }
366 } else {
367 if (GRAPHICS_VER(xe) >= 20) {
368 gt->steering[DSS].ranges = xe2lpg_dss_steering_table;
369 gt->steering[SQIDI_PSMI].ranges = xe2lpg_sqidi_psmi_steering_table;
370 gt->steering[INSTANCE0].ranges = xe2lpg_instance0_steering_table;
371 } else if (GRAPHICS_VERx100(xe) >= 1270) {
372 gt->steering[INSTANCE0].ranges = xelpg_instance0_steering_table;
373 gt->steering[L3BANK].ranges = xelpg_l3bank_steering_table;
374 gt->steering[DSS].ranges = xelpg_dss_steering_table;
375 } else if (xe->info.platform == XE_PVC) {
376 gt->steering[INSTANCE0].ranges = xehpc_instance0_steering_table;
377 gt->steering[DSS].ranges = xehpc_dss_steering_table;
378 } else if (xe->info.platform == XE_DG2) {
379 gt->steering[L3BANK].ranges = xehp_l3bank_steering_table;
380 gt->steering[MSLICE].ranges = xehp_mslice_steering_table;
381 gt->steering[LNCF].ranges = xehp_lncf_steering_table;
382 gt->steering[DSS].ranges = xehp_dss_steering_table;
383 gt->steering[IMPLICIT_STEERING].ranges = dg2_implicit_steering_table;
384 } else {
385 gt->steering[L3BANK].ranges = xelp_l3bank_steering_table;
386 gt->steering[DSS].ranges = xelp_dss_steering_table;
387 }
388 }
389
390 /* Select non-terminated steering target for each type */
391 for (int i = 0; i < NUM_STEERING_TYPES; i++)
392 if (gt->steering[i].ranges && xe_steering_types[i].init)
393 xe_steering_types[i].init(gt);
394}
395
396/**
397 * xe_gt_mcr_set_implicit_defaults - Initialize steer control registers
398 * @gt: GT structure
399 *
400 * Some register ranges don't need to have their steering control registers
401 * changed on each access - it's sufficient to set them once on initialization.
402 * This function sets those registers for each platform *
403 */
404void xe_gt_mcr_set_implicit_defaults(struct xe_gt *gt)
405{
406 struct xe_device *xe = gt_to_xe(gt);
407
408 if (xe->info.platform == XE_DG2) {
409 u32 steer_val = REG_FIELD_PREP(MCR_SLICE_MASK, 0) |
410 REG_FIELD_PREP(MCR_SUBSLICE_MASK, 2);
411
412 xe_mmio_write32(gt, MCFG_MCR_SELECTOR, steer_val);
413 xe_mmio_write32(gt, SF_MCR_SELECTOR, steer_val);
414 /*
415 * For GAM registers, all reads should be directed to instance 1
416 * (unicast reads against other instances are not allowed),
417 * and instance 1 is already the hardware's default steering
418 * target, which we never change
419 */
420 }
421}
422
423/*
424 * xe_gt_mcr_get_nonterminated_steering - find group/instance values that
425 * will steer a register to a non-terminated instance
426 * @gt: GT structure
427 * @reg: register for which the steering is required
428 * @group: return variable for group steering
429 * @instance: return variable for instance steering
430 *
431 * This function returns a group/instance pair that is guaranteed to work for
432 * read steering of the given register. Note that a value will be returned even
433 * if the register is not replicated and therefore does not actually require
434 * steering.
435 *
436 * Returns true if the caller should steer to the @group/@instance values
437 * returned. Returns false if the caller need not perform any steering
438 */
439static bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
440 struct xe_reg_mcr reg_mcr,
441 u8 *group, u8 *instance)
442{
443 const struct xe_reg reg = to_xe_reg(reg_mcr);
444 const struct xe_mmio_range *implicit_ranges;
445
446 for (int type = 0; type < IMPLICIT_STEERING; type++) {
447 if (!gt->steering[type].ranges)
448 continue;
449
450 for (int i = 0; gt->steering[type].ranges[i].end > 0; i++) {
451 if (xe_mmio_in_range(gt, >->steering[type].ranges[i], reg)) {
452 *group = gt->steering[type].group_target;
453 *instance = gt->steering[type].instance_target;
454 return true;
455 }
456 }
457 }
458
459 implicit_ranges = gt->steering[IMPLICIT_STEERING].ranges;
460 if (implicit_ranges)
461 for (int i = 0; implicit_ranges[i].end > 0; i++)
462 if (xe_mmio_in_range(gt, &implicit_ranges[i], reg))
463 return false;
464
465 /*
466 * Not found in a steering table and not a register with implicit
467 * steering. Just steer to 0/0 as a guess and raise a warning.
468 */
469 drm_WARN(>_to_xe(gt)->drm, true,
470 "Did not find MCR register %#x in any MCR steering table\n",
471 reg.addr);
472 *group = 0;
473 *instance = 0;
474
475 return true;
476}
477
478/*
479 * Obtain exclusive access to MCR steering. On MTL and beyond we also need
480 * to synchronize with external clients (e.g., firmware), so a semaphore
481 * register will also need to be taken.
482 */
483static void mcr_lock(struct xe_gt *gt) __acquires(>->mcr_lock)
484{
485 struct xe_device *xe = gt_to_xe(gt);
486 int ret = 0;
487
488 spin_lock(>->mcr_lock);
489
490 /*
491 * Starting with MTL we also need to grab a semaphore register
492 * to synchronize with external agents (e.g., firmware) that now
493 * shares the same steering control register. The semaphore is obtained
494 * when a read to the relevant register returns 1.
495 */
496 if (GRAPHICS_VERx100(xe) >= 1270)
497 ret = xe_mmio_wait32(gt, STEER_SEMAPHORE, 0x1, 0x1, 10, NULL,
498 true);
499
500 drm_WARN_ON_ONCE(&xe->drm, ret == -ETIMEDOUT);
501}
502
503static void mcr_unlock(struct xe_gt *gt) __releases(>->mcr_lock)
504{
505 /* Release hardware semaphore - this is done by writing 1 to the register */
506 if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270)
507 xe_mmio_write32(gt, STEER_SEMAPHORE, 0x1);
508
509 spin_unlock(>->mcr_lock);
510}
511
512/*
513 * Access a register with specific MCR steering
514 *
515 * Caller needs to make sure the relevant forcewake wells are up.
516 */
517static u32 rw_with_mcr_steering(struct xe_gt *gt, struct xe_reg_mcr reg_mcr,
518 u8 rw_flag, int group, int instance, u32 value)
519{
520 const struct xe_reg reg = to_xe_reg(reg_mcr);
521 struct xe_reg steer_reg;
522 u32 steer_val, val = 0;
523
524 lockdep_assert_held(>->mcr_lock);
525
526 if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) {
527 steer_reg = MTL_MCR_SELECTOR;
528 steer_val = REG_FIELD_PREP(MTL_MCR_GROUPID, group) |
529 REG_FIELD_PREP(MTL_MCR_INSTANCEID, instance);
530 } else {
531 steer_reg = MCR_SELECTOR;
532 steer_val = REG_FIELD_PREP(MCR_SLICE_MASK, group) |
533 REG_FIELD_PREP(MCR_SUBSLICE_MASK, instance);
534 }
535
536 /*
537 * Always leave the hardware in multicast mode when doing reads and only
538 * change it to unicast mode when doing writes of a specific instance.
539 *
540 * The setting of the multicast/unicast bit usually wouldn't matter for
541 * read operations (which always return the value from a single register
542 * instance regardless of how that bit is set), but some platforms may
543 * have workarounds requiring us to remain in multicast mode for reads,
544 * e.g. Wa_22013088509 on PVC. There's no real downside to this, so
545 * we'll just go ahead and do so on all platforms; we'll only clear the
546 * multicast bit from the mask when explicitly doing a write operation.
547 *
548 * No need to save old steering reg value.
549 */
550 if (rw_flag == MCR_OP_READ)
551 steer_val |= MCR_MULTICAST;
552
553 xe_mmio_write32(gt, steer_reg, steer_val);
554
555 if (rw_flag == MCR_OP_READ)
556 val = xe_mmio_read32(gt, reg);
557 else
558 xe_mmio_write32(gt, reg, value);
559
560 /*
561 * If we turned off the multicast bit (during a write) we're required
562 * to turn it back on before finishing. The group and instance values
563 * don't matter since they'll be re-programmed on the next MCR
564 * operation.
565 */
566 if (rw_flag == MCR_OP_WRITE)
567 xe_mmio_write32(gt, steer_reg, MCR_MULTICAST);
568
569 return val;
570}
571
572/**
573 * xe_gt_mcr_unicast_read_any - reads a non-terminated instance of an MCR register
574 * @gt: GT structure
575 * @reg_mcr: register to read
576 *
577 * Reads a GT MCR register. The read will be steered to a non-terminated
578 * instance (i.e., one that isn't fused off or powered down by power gating).
579 * This function assumes the caller is already holding any necessary forcewake
580 * domains.
581 *
582 * Returns the value from a non-terminated instance of @reg.
583 */
584u32 xe_gt_mcr_unicast_read_any(struct xe_gt *gt, struct xe_reg_mcr reg_mcr)
585{
586 const struct xe_reg reg = to_xe_reg(reg_mcr);
587 u8 group, instance;
588 u32 val;
589 bool steer;
590
591 steer = xe_gt_mcr_get_nonterminated_steering(gt, reg_mcr,
592 &group, &instance);
593
594 if (steer) {
595 mcr_lock(gt);
596 val = rw_with_mcr_steering(gt, reg_mcr, MCR_OP_READ,
597 group, instance, 0);
598 mcr_unlock(gt);
599 } else {
600 val = xe_mmio_read32(gt, reg);
601 }
602
603 return val;
604}
605
606/**
607 * xe_gt_mcr_unicast_read - read a specific instance of an MCR register
608 * @gt: GT structure
609 * @reg_mcr: the MCR register to read
610 * @group: the MCR group
611 * @instance: the MCR instance
612 *
613 * Returns the value read from an MCR register after steering toward a specific
614 * group/instance.
615 */
616u32 xe_gt_mcr_unicast_read(struct xe_gt *gt,
617 struct xe_reg_mcr reg_mcr,
618 int group, int instance)
619{
620 u32 val;
621
622 mcr_lock(gt);
623 val = rw_with_mcr_steering(gt, reg_mcr, MCR_OP_READ, group, instance, 0);
624 mcr_unlock(gt);
625
626 return val;
627}
628
629/**
630 * xe_gt_mcr_unicast_write - write a specific instance of an MCR register
631 * @gt: GT structure
632 * @reg_mcr: the MCR register to write
633 * @value: value to write
634 * @group: the MCR group
635 * @instance: the MCR instance
636 *
637 * Write an MCR register in unicast mode after steering toward a specific
638 * group/instance.
639 */
640void xe_gt_mcr_unicast_write(struct xe_gt *gt, struct xe_reg_mcr reg_mcr,
641 u32 value, int group, int instance)
642{
643 mcr_lock(gt);
644 rw_with_mcr_steering(gt, reg_mcr, MCR_OP_WRITE, group, instance, value);
645 mcr_unlock(gt);
646}
647
648/**
649 * xe_gt_mcr_multicast_write - write a value to all instances of an MCR register
650 * @gt: GT structure
651 * @reg_mcr: the MCR register to write
652 * @value: value to write
653 *
654 * Write an MCR register in multicast mode to update all instances.
655 */
656void xe_gt_mcr_multicast_write(struct xe_gt *gt, struct xe_reg_mcr reg_mcr,
657 u32 value)
658{
659 struct xe_reg reg = to_xe_reg(reg_mcr);
660
661 /*
662 * Synchronize with any unicast operations. Once we have exclusive
663 * access, the MULTICAST bit should already be set, so there's no need
664 * to touch the steering register.
665 */
666 mcr_lock(gt);
667 xe_mmio_write32(gt, reg, value);
668 mcr_unlock(gt);
669}
670
671void xe_gt_mcr_steering_dump(struct xe_gt *gt, struct drm_printer *p)
672{
673 for (int i = 0; i < NUM_STEERING_TYPES; i++) {
674 if (gt->steering[i].ranges) {
675 drm_printf(p, "%s steering: group=%#x, instance=%#x\n",
676 xe_steering_types[i].name,
677 gt->steering[i].group_target,
678 gt->steering[i].instance_target);
679 for (int j = 0; gt->steering[i].ranges[j].end; j++)
680 drm_printf(p, "\t0x%06x - 0x%06x\n",
681 gt->steering[i].ranges[j].start,
682 gt->steering[i].ranges[j].end);
683 }
684 }
685}