Loading...
Note: File does not exist in v3.1.
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2022 Intel Corporation
4 */
5
6#include "xe_guc_ct.h"
7
8#include <linux/bitfield.h>
9#include <linux/circ_buf.h>
10#include <linux/delay.h>
11
12#include <kunit/static_stub.h>
13
14#include <drm/drm_managed.h>
15
16#include "abi/guc_actions_abi.h"
17#include "abi/guc_actions_sriov_abi.h"
18#include "abi/guc_klvs_abi.h"
19#include "xe_bo.h"
20#include "xe_device.h"
21#include "xe_gt.h"
22#include "xe_gt_pagefault.h"
23#include "xe_gt_printk.h"
24#include "xe_gt_tlb_invalidation.h"
25#include "xe_guc.h"
26#include "xe_guc_relay.h"
27#include "xe_guc_submit.h"
28#include "xe_map.h"
29#include "xe_pm.h"
30#include "xe_trace.h"
31
32/* Used when a CT send wants to block and / or receive data */
33struct g2h_fence {
34 u32 *response_buffer;
35 u32 seqno;
36 u32 response_data;
37 u16 response_len;
38 u16 error;
39 u16 hint;
40 u16 reason;
41 bool retry;
42 bool fail;
43 bool done;
44};
45
46static void g2h_fence_init(struct g2h_fence *g2h_fence, u32 *response_buffer)
47{
48 g2h_fence->response_buffer = response_buffer;
49 g2h_fence->response_data = 0;
50 g2h_fence->response_len = 0;
51 g2h_fence->fail = false;
52 g2h_fence->retry = false;
53 g2h_fence->done = false;
54 g2h_fence->seqno = ~0x0;
55}
56
57static bool g2h_fence_needs_alloc(struct g2h_fence *g2h_fence)
58{
59 return g2h_fence->seqno == ~0x0;
60}
61
62static struct xe_guc *
63ct_to_guc(struct xe_guc_ct *ct)
64{
65 return container_of(ct, struct xe_guc, ct);
66}
67
68static struct xe_gt *
69ct_to_gt(struct xe_guc_ct *ct)
70{
71 return container_of(ct, struct xe_gt, uc.guc.ct);
72}
73
74static struct xe_device *
75ct_to_xe(struct xe_guc_ct *ct)
76{
77 return gt_to_xe(ct_to_gt(ct));
78}
79
80/**
81 * DOC: GuC CTB Blob
82 *
83 * We allocate single blob to hold both CTB descriptors and buffers:
84 *
85 * +--------+-----------------------------------------------+------+
86 * | offset | contents | size |
87 * +========+===============================================+======+
88 * | 0x0000 | H2G CTB Descriptor (send) | |
89 * +--------+-----------------------------------------------+ 4K |
90 * | 0x0800 | G2H CTB Descriptor (g2h) | |
91 * +--------+-----------------------------------------------+------+
92 * | 0x1000 | H2G CT Buffer (send) | n*4K |
93 * | | | |
94 * +--------+-----------------------------------------------+------+
95 * | 0x1000 | G2H CT Buffer (g2h) | m*4K |
96 * | + n*4K | | |
97 * +--------+-----------------------------------------------+------+
98 *
99 * Size of each ``CT Buffer`` must be multiple of 4K.
100 * We don't expect too many messages in flight at any time, unless we are
101 * using the GuC submission. In that case each request requires a minimum
102 * 2 dwords which gives us a maximum 256 queue'd requests. Hopefully this
103 * enough space to avoid backpressure on the driver. We increase the size
104 * of the receive buffer (relative to the send) to ensure a G2H response
105 * CTB has a landing spot.
106 */
107
108#define CTB_DESC_SIZE ALIGN(sizeof(struct guc_ct_buffer_desc), SZ_2K)
109#define CTB_H2G_BUFFER_SIZE (SZ_4K)
110#define CTB_G2H_BUFFER_SIZE (4 * CTB_H2G_BUFFER_SIZE)
111#define G2H_ROOM_BUFFER_SIZE (CTB_G2H_BUFFER_SIZE / 4)
112
113static size_t guc_ct_size(void)
114{
115 return 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE +
116 CTB_G2H_BUFFER_SIZE;
117}
118
119static void guc_ct_fini(struct drm_device *drm, void *arg)
120{
121 struct xe_guc_ct *ct = arg;
122
123 destroy_workqueue(ct->g2h_wq);
124 xa_destroy(&ct->fence_lookup);
125}
126
127static void g2h_worker_func(struct work_struct *w);
128
129static void primelockdep(struct xe_guc_ct *ct)
130{
131 if (!IS_ENABLED(CONFIG_LOCKDEP))
132 return;
133
134 fs_reclaim_acquire(GFP_KERNEL);
135 might_lock(&ct->lock);
136 fs_reclaim_release(GFP_KERNEL);
137}
138
139int xe_guc_ct_init(struct xe_guc_ct *ct)
140{
141 struct xe_device *xe = ct_to_xe(ct);
142 struct xe_gt *gt = ct_to_gt(ct);
143 struct xe_tile *tile = gt_to_tile(gt);
144 struct xe_bo *bo;
145 int err;
146
147 xe_assert(xe, !(guc_ct_size() % PAGE_SIZE));
148
149 ct->g2h_wq = alloc_ordered_workqueue("xe-g2h-wq", 0);
150 if (!ct->g2h_wq)
151 return -ENOMEM;
152
153 spin_lock_init(&ct->fast_lock);
154 xa_init(&ct->fence_lookup);
155 INIT_WORK(&ct->g2h_worker, g2h_worker_func);
156 init_waitqueue_head(&ct->wq);
157 init_waitqueue_head(&ct->g2h_fence_wq);
158
159 err = drmm_mutex_init(&xe->drm, &ct->lock);
160 if (err)
161 return err;
162
163 primelockdep(ct);
164
165 bo = xe_managed_bo_create_pin_map(xe, tile, guc_ct_size(),
166 XE_BO_CREATE_SYSTEM_BIT |
167 XE_BO_CREATE_GGTT_BIT);
168 if (IS_ERR(bo))
169 return PTR_ERR(bo);
170
171 ct->bo = bo;
172
173 err = drmm_add_action_or_reset(&xe->drm, guc_ct_fini, ct);
174 if (err)
175 return err;
176
177 xe_assert(xe, ct->state == XE_GUC_CT_STATE_NOT_INITIALIZED);
178 ct->state = XE_GUC_CT_STATE_DISABLED;
179 return 0;
180}
181
182#define desc_read(xe_, guc_ctb__, field_) \
183 xe_map_rd_field(xe_, &guc_ctb__->desc, 0, \
184 struct guc_ct_buffer_desc, field_)
185
186#define desc_write(xe_, guc_ctb__, field_, val_) \
187 xe_map_wr_field(xe_, &guc_ctb__->desc, 0, \
188 struct guc_ct_buffer_desc, field_, val_)
189
190static void guc_ct_ctb_h2g_init(struct xe_device *xe, struct guc_ctb *h2g,
191 struct iosys_map *map)
192{
193 h2g->info.size = CTB_H2G_BUFFER_SIZE / sizeof(u32);
194 h2g->info.resv_space = 0;
195 h2g->info.tail = 0;
196 h2g->info.head = 0;
197 h2g->info.space = CIRC_SPACE(h2g->info.tail, h2g->info.head,
198 h2g->info.size) -
199 h2g->info.resv_space;
200 h2g->info.broken = false;
201
202 h2g->desc = *map;
203 xe_map_memset(xe, &h2g->desc, 0, 0, sizeof(struct guc_ct_buffer_desc));
204
205 h2g->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE * 2);
206}
207
208static void guc_ct_ctb_g2h_init(struct xe_device *xe, struct guc_ctb *g2h,
209 struct iosys_map *map)
210{
211 g2h->info.size = CTB_G2H_BUFFER_SIZE / sizeof(u32);
212 g2h->info.resv_space = G2H_ROOM_BUFFER_SIZE / sizeof(u32);
213 g2h->info.head = 0;
214 g2h->info.tail = 0;
215 g2h->info.space = CIRC_SPACE(g2h->info.tail, g2h->info.head,
216 g2h->info.size) -
217 g2h->info.resv_space;
218 g2h->info.broken = false;
219
220 g2h->desc = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE);
221 xe_map_memset(xe, &g2h->desc, 0, 0, sizeof(struct guc_ct_buffer_desc));
222
223 g2h->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE * 2 +
224 CTB_H2G_BUFFER_SIZE);
225}
226
227static int guc_ct_ctb_h2g_register(struct xe_guc_ct *ct)
228{
229 struct xe_guc *guc = ct_to_guc(ct);
230 u32 desc_addr, ctb_addr, size;
231 int err;
232
233 desc_addr = xe_bo_ggtt_addr(ct->bo);
234 ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE * 2;
235 size = ct->ctbs.h2g.info.size * sizeof(u32);
236
237 err = xe_guc_self_cfg64(guc,
238 GUC_KLV_SELF_CFG_H2G_CTB_DESCRIPTOR_ADDR_KEY,
239 desc_addr);
240 if (err)
241 return err;
242
243 err = xe_guc_self_cfg64(guc,
244 GUC_KLV_SELF_CFG_H2G_CTB_ADDR_KEY,
245 ctb_addr);
246 if (err)
247 return err;
248
249 return xe_guc_self_cfg32(guc,
250 GUC_KLV_SELF_CFG_H2G_CTB_SIZE_KEY,
251 size);
252}
253
254static int guc_ct_ctb_g2h_register(struct xe_guc_ct *ct)
255{
256 struct xe_guc *guc = ct_to_guc(ct);
257 u32 desc_addr, ctb_addr, size;
258 int err;
259
260 desc_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE;
261 ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE * 2 +
262 CTB_H2G_BUFFER_SIZE;
263 size = ct->ctbs.g2h.info.size * sizeof(u32);
264
265 err = xe_guc_self_cfg64(guc,
266 GUC_KLV_SELF_CFG_G2H_CTB_DESCRIPTOR_ADDR_KEY,
267 desc_addr);
268 if (err)
269 return err;
270
271 err = xe_guc_self_cfg64(guc,
272 GUC_KLV_SELF_CFG_G2H_CTB_ADDR_KEY,
273 ctb_addr);
274 if (err)
275 return err;
276
277 return xe_guc_self_cfg32(guc,
278 GUC_KLV_SELF_CFG_G2H_CTB_SIZE_KEY,
279 size);
280}
281
282static int guc_ct_control_toggle(struct xe_guc_ct *ct, bool enable)
283{
284 u32 request[HOST2GUC_CONTROL_CTB_REQUEST_MSG_LEN] = {
285 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
286 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
287 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION,
288 GUC_ACTION_HOST2GUC_CONTROL_CTB),
289 FIELD_PREP(HOST2GUC_CONTROL_CTB_REQUEST_MSG_1_CONTROL,
290 enable ? GUC_CTB_CONTROL_ENABLE :
291 GUC_CTB_CONTROL_DISABLE),
292 };
293 int ret = xe_guc_mmio_send(ct_to_guc(ct), request, ARRAY_SIZE(request));
294
295 return ret > 0 ? -EPROTO : ret;
296}
297
298static void xe_guc_ct_set_state(struct xe_guc_ct *ct,
299 enum xe_guc_ct_state state)
300{
301 mutex_lock(&ct->lock); /* Serialise dequeue_one_g2h() */
302 spin_lock_irq(&ct->fast_lock); /* Serialise CT fast-path */
303
304 xe_gt_assert(ct_to_gt(ct), ct->g2h_outstanding == 0 ||
305 state == XE_GUC_CT_STATE_STOPPED);
306
307 ct->g2h_outstanding = 0;
308 ct->state = state;
309
310 spin_unlock_irq(&ct->fast_lock);
311
312 /*
313 * Lockdep doesn't like this under the fast lock and he destroy only
314 * needs to be serialized with the send path which ct lock provides.
315 */
316 xa_destroy(&ct->fence_lookup);
317
318 mutex_unlock(&ct->lock);
319}
320
321int xe_guc_ct_enable(struct xe_guc_ct *ct)
322{
323 struct xe_device *xe = ct_to_xe(ct);
324 int err;
325
326 xe_assert(xe, !xe_guc_ct_enabled(ct));
327
328 guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap);
329 guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap);
330
331 err = guc_ct_ctb_h2g_register(ct);
332 if (err)
333 goto err_out;
334
335 err = guc_ct_ctb_g2h_register(ct);
336 if (err)
337 goto err_out;
338
339 err = guc_ct_control_toggle(ct, true);
340 if (err)
341 goto err_out;
342
343 xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_ENABLED);
344
345 smp_mb();
346 wake_up_all(&ct->wq);
347 drm_dbg(&xe->drm, "GuC CT communication channel enabled\n");
348
349 return 0;
350
351err_out:
352 drm_err(&xe->drm, "Failed to enable CT (%d)\n", err);
353
354 return err;
355}
356
357static void stop_g2h_handler(struct xe_guc_ct *ct)
358{
359 cancel_work_sync(&ct->g2h_worker);
360}
361
362/**
363 * xe_guc_ct_disable - Set GuC to disabled state
364 * @ct: the &xe_guc_ct
365 *
366 * Set GuC CT to disabled state and stop g2h handler. No outstanding g2h expected
367 * in this transition.
368 */
369void xe_guc_ct_disable(struct xe_guc_ct *ct)
370{
371 xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_DISABLED);
372 stop_g2h_handler(ct);
373}
374
375/**
376 * xe_guc_ct_stop - Set GuC to stopped state
377 * @ct: the &xe_guc_ct
378 *
379 * Set GuC CT to stopped state, stop g2h handler, and clear any outstanding g2h
380 */
381void xe_guc_ct_stop(struct xe_guc_ct *ct)
382{
383 xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_STOPPED);
384 stop_g2h_handler(ct);
385}
386
387static bool h2g_has_room(struct xe_guc_ct *ct, u32 cmd_len)
388{
389 struct guc_ctb *h2g = &ct->ctbs.h2g;
390
391 lockdep_assert_held(&ct->lock);
392
393 if (cmd_len > h2g->info.space) {
394 h2g->info.head = desc_read(ct_to_xe(ct), h2g, head);
395 h2g->info.space = CIRC_SPACE(h2g->info.tail, h2g->info.head,
396 h2g->info.size) -
397 h2g->info.resv_space;
398 if (cmd_len > h2g->info.space)
399 return false;
400 }
401
402 return true;
403}
404
405static bool g2h_has_room(struct xe_guc_ct *ct, u32 g2h_len)
406{
407 if (!g2h_len)
408 return true;
409
410 lockdep_assert_held(&ct->fast_lock);
411
412 return ct->ctbs.g2h.info.space > g2h_len;
413}
414
415static int has_room(struct xe_guc_ct *ct, u32 cmd_len, u32 g2h_len)
416{
417 lockdep_assert_held(&ct->lock);
418
419 if (!g2h_has_room(ct, g2h_len) || !h2g_has_room(ct, cmd_len))
420 return -EBUSY;
421
422 return 0;
423}
424
425static void h2g_reserve_space(struct xe_guc_ct *ct, u32 cmd_len)
426{
427 lockdep_assert_held(&ct->lock);
428 ct->ctbs.h2g.info.space -= cmd_len;
429}
430
431static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h)
432{
433 xe_assert(ct_to_xe(ct), g2h_len <= ct->ctbs.g2h.info.space);
434
435 if (g2h_len) {
436 lockdep_assert_held(&ct->fast_lock);
437
438 ct->ctbs.g2h.info.space -= g2h_len;
439 ct->g2h_outstanding += num_g2h;
440 }
441}
442
443static void __g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
444{
445 lockdep_assert_held(&ct->fast_lock);
446 xe_assert(ct_to_xe(ct), ct->ctbs.g2h.info.space + g2h_len <=
447 ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space);
448
449 ct->ctbs.g2h.info.space += g2h_len;
450 --ct->g2h_outstanding;
451}
452
453static void g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
454{
455 spin_lock_irq(&ct->fast_lock);
456 __g2h_release_space(ct, g2h_len);
457 spin_unlock_irq(&ct->fast_lock);
458}
459
460#define H2G_CT_HEADERS (GUC_CTB_HDR_LEN + 1) /* one DW CTB header and one DW HxG header */
461
462static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
463 u32 ct_fence_value, bool want_response)
464{
465 struct xe_device *xe = ct_to_xe(ct);
466 struct guc_ctb *h2g = &ct->ctbs.h2g;
467 u32 cmd[H2G_CT_HEADERS];
468 u32 tail = h2g->info.tail;
469 u32 full_len;
470 struct iosys_map map = IOSYS_MAP_INIT_OFFSET(&h2g->cmds,
471 tail * sizeof(u32));
472
473 full_len = len + GUC_CTB_HDR_LEN;
474
475 lockdep_assert_held(&ct->lock);
476 xe_assert(xe, full_len <= GUC_CTB_MSG_MAX_LEN);
477 xe_assert(xe, tail <= h2g->info.size);
478
479 /* Command will wrap, zero fill (NOPs), return and check credits again */
480 if (tail + full_len > h2g->info.size) {
481 xe_map_memset(xe, &map, 0, 0,
482 (h2g->info.size - tail) * sizeof(u32));
483 h2g_reserve_space(ct, (h2g->info.size - tail));
484 h2g->info.tail = 0;
485 desc_write(xe, h2g, tail, h2g->info.tail);
486
487 return -EAGAIN;
488 }
489
490 /*
491 * dw0: CT header (including fence)
492 * dw1: HXG header (including action code)
493 * dw2+: action data
494 */
495 cmd[0] = FIELD_PREP(GUC_CTB_MSG_0_FORMAT, GUC_CTB_FORMAT_HXG) |
496 FIELD_PREP(GUC_CTB_MSG_0_NUM_DWORDS, len) |
497 FIELD_PREP(GUC_CTB_MSG_0_FENCE, ct_fence_value);
498 if (want_response) {
499 cmd[1] =
500 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
501 FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |
502 GUC_HXG_EVENT_MSG_0_DATA0, action[0]);
503 } else {
504 cmd[1] =
505 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_FAST_REQUEST) |
506 FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |
507 GUC_HXG_EVENT_MSG_0_DATA0, action[0]);
508 }
509
510 /* H2G header in cmd[1] replaces action[0] so: */
511 --len;
512 ++action;
513
514 /* Write H2G ensuring visable before descriptor update */
515 xe_map_memcpy_to(xe, &map, 0, cmd, H2G_CT_HEADERS * sizeof(u32));
516 xe_map_memcpy_to(xe, &map, H2G_CT_HEADERS * sizeof(u32), action, len * sizeof(u32));
517 xe_device_wmb(xe);
518
519 /* Update local copies */
520 h2g->info.tail = (tail + full_len) % h2g->info.size;
521 h2g_reserve_space(ct, full_len);
522
523 /* Update descriptor */
524 desc_write(xe, h2g, tail, h2g->info.tail);
525
526 trace_xe_guc_ctb_h2g(ct_to_gt(ct)->info.id, *(action - 1), full_len,
527 desc_read(xe, h2g, head), h2g->info.tail);
528
529 return 0;
530}
531
532/*
533 * The CT protocol accepts a 16 bits fence. This field is fully owned by the
534 * driver, the GuC will just copy it to the reply message. Since we need to
535 * be able to distinguish between replies to REQUEST and FAST_REQUEST messages,
536 * we use one bit of the seqno as an indicator for that and a rolling counter
537 * for the remaining 15 bits.
538 */
539#define CT_SEQNO_MASK GENMASK(14, 0)
540#define CT_SEQNO_UNTRACKED BIT(15)
541static u16 next_ct_seqno(struct xe_guc_ct *ct, bool is_g2h_fence)
542{
543 u32 seqno = ct->fence_seqno++ & CT_SEQNO_MASK;
544
545 if (!is_g2h_fence)
546 seqno |= CT_SEQNO_UNTRACKED;
547
548 return seqno;
549}
550
551static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
552 u32 len, u32 g2h_len, u32 num_g2h,
553 struct g2h_fence *g2h_fence)
554{
555 struct xe_device *xe = ct_to_xe(ct);
556 u16 seqno;
557 int ret;
558
559 xe_assert(xe, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED);
560 xe_assert(xe, !g2h_len || !g2h_fence);
561 xe_assert(xe, !num_g2h || !g2h_fence);
562 xe_assert(xe, !g2h_len || num_g2h);
563 xe_assert(xe, g2h_len || !num_g2h);
564 lockdep_assert_held(&ct->lock);
565
566 if (unlikely(ct->ctbs.h2g.info.broken)) {
567 ret = -EPIPE;
568 goto out;
569 }
570
571 if (ct->state == XE_GUC_CT_STATE_DISABLED) {
572 ret = -ENODEV;
573 goto out;
574 }
575
576 if (ct->state == XE_GUC_CT_STATE_STOPPED) {
577 ret = -ECANCELED;
578 goto out;
579 }
580
581 xe_assert(xe, xe_guc_ct_enabled(ct));
582
583 if (g2h_fence) {
584 g2h_len = GUC_CTB_HXG_MSG_MAX_LEN;
585 num_g2h = 1;
586
587 if (g2h_fence_needs_alloc(g2h_fence)) {
588 void *ptr;
589
590 g2h_fence->seqno = next_ct_seqno(ct, true);
591 ptr = xa_store(&ct->fence_lookup,
592 g2h_fence->seqno,
593 g2h_fence, GFP_ATOMIC);
594 if (IS_ERR(ptr)) {
595 ret = PTR_ERR(ptr);
596 goto out;
597 }
598 }
599
600 seqno = g2h_fence->seqno;
601 } else {
602 seqno = next_ct_seqno(ct, false);
603 }
604
605 if (g2h_len)
606 spin_lock_irq(&ct->fast_lock);
607retry:
608 ret = has_room(ct, len + GUC_CTB_HDR_LEN, g2h_len);
609 if (unlikely(ret))
610 goto out_unlock;
611
612 ret = h2g_write(ct, action, len, seqno, !!g2h_fence);
613 if (unlikely(ret)) {
614 if (ret == -EAGAIN)
615 goto retry;
616 goto out_unlock;
617 }
618
619 __g2h_reserve_space(ct, g2h_len, num_g2h);
620 xe_guc_notify(ct_to_guc(ct));
621out_unlock:
622 if (g2h_len)
623 spin_unlock_irq(&ct->fast_lock);
624out:
625 return ret;
626}
627
628static void kick_reset(struct xe_guc_ct *ct)
629{
630 xe_gt_reset_async(ct_to_gt(ct));
631}
632
633static int dequeue_one_g2h(struct xe_guc_ct *ct);
634
635static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
636 u32 g2h_len, u32 num_g2h,
637 struct g2h_fence *g2h_fence)
638{
639 struct drm_device *drm = &ct_to_xe(ct)->drm;
640 struct drm_printer p = drm_info_printer(drm->dev);
641 unsigned int sleep_period_ms = 1;
642 int ret;
643
644 xe_assert(ct_to_xe(ct), !g2h_len || !g2h_fence);
645 lockdep_assert_held(&ct->lock);
646 xe_device_assert_mem_access(ct_to_xe(ct));
647
648try_again:
649 ret = __guc_ct_send_locked(ct, action, len, g2h_len, num_g2h,
650 g2h_fence);
651
652 /*
653 * We wait to try to restore credits for about 1 second before bailing.
654 * In the case of H2G credits we have no choice but just to wait for the
655 * GuC to consume H2Gs in the channel so we use a wait / sleep loop. In
656 * the case of G2H we process any G2H in the channel, hopefully freeing
657 * credits as we consume the G2H messages.
658 */
659 if (unlikely(ret == -EBUSY &&
660 !h2g_has_room(ct, len + GUC_CTB_HDR_LEN))) {
661 struct guc_ctb *h2g = &ct->ctbs.h2g;
662
663 if (sleep_period_ms == 1024)
664 goto broken;
665
666 trace_xe_guc_ct_h2g_flow_control(h2g->info.head, h2g->info.tail,
667 h2g->info.size,
668 h2g->info.space,
669 len + GUC_CTB_HDR_LEN);
670 msleep(sleep_period_ms);
671 sleep_period_ms <<= 1;
672
673 goto try_again;
674 } else if (unlikely(ret == -EBUSY)) {
675 struct xe_device *xe = ct_to_xe(ct);
676 struct guc_ctb *g2h = &ct->ctbs.g2h;
677
678 trace_xe_guc_ct_g2h_flow_control(g2h->info.head,
679 desc_read(xe, g2h, tail),
680 g2h->info.size,
681 g2h->info.space,
682 g2h_fence ?
683 GUC_CTB_HXG_MSG_MAX_LEN :
684 g2h_len);
685
686#define g2h_avail(ct) \
687 (desc_read(ct_to_xe(ct), (&ct->ctbs.g2h), tail) != ct->ctbs.g2h.info.head)
688 if (!wait_event_timeout(ct->wq, !ct->g2h_outstanding ||
689 g2h_avail(ct), HZ))
690 goto broken;
691#undef g2h_avail
692
693 if (dequeue_one_g2h(ct) < 0)
694 goto broken;
695
696 goto try_again;
697 }
698
699 return ret;
700
701broken:
702 drm_err(drm, "No forward process on H2G, reset required");
703 xe_guc_ct_print(ct, &p, true);
704 ct->ctbs.h2g.info.broken = true;
705
706 return -EDEADLK;
707}
708
709static int guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len,
710 u32 g2h_len, u32 num_g2h, struct g2h_fence *g2h_fence)
711{
712 int ret;
713
714 xe_assert(ct_to_xe(ct), !g2h_len || !g2h_fence);
715
716 mutex_lock(&ct->lock);
717 ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, g2h_fence);
718 mutex_unlock(&ct->lock);
719
720 return ret;
721}
722
723int xe_guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len,
724 u32 g2h_len, u32 num_g2h)
725{
726 int ret;
727
728 ret = guc_ct_send(ct, action, len, g2h_len, num_g2h, NULL);
729 if (ret == -EDEADLK)
730 kick_reset(ct);
731
732 return ret;
733}
734
735int xe_guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
736 u32 g2h_len, u32 num_g2h)
737{
738 int ret;
739
740 ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, NULL);
741 if (ret == -EDEADLK)
742 kick_reset(ct);
743
744 return ret;
745}
746
747int xe_guc_ct_send_g2h_handler(struct xe_guc_ct *ct, const u32 *action, u32 len)
748{
749 int ret;
750
751 lockdep_assert_held(&ct->lock);
752
753 ret = guc_ct_send_locked(ct, action, len, 0, 0, NULL);
754 if (ret == -EDEADLK)
755 kick_reset(ct);
756
757 return ret;
758}
759
760/*
761 * Check if a GT reset is in progress or will occur and if GT reset brought the
762 * CT back up. Randomly picking 5 seconds for an upper limit to do a GT a reset.
763 */
764static bool retry_failure(struct xe_guc_ct *ct, int ret)
765{
766 if (!(ret == -EDEADLK || ret == -EPIPE || ret == -ENODEV))
767 return false;
768
769#define ct_alive(ct) \
770 (xe_guc_ct_enabled(ct) && !ct->ctbs.h2g.info.broken && \
771 !ct->ctbs.g2h.info.broken)
772 if (!wait_event_interruptible_timeout(ct->wq, ct_alive(ct), HZ * 5))
773 return false;
774#undef ct_alive
775
776 return true;
777}
778
779static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
780 u32 *response_buffer, bool no_fail)
781{
782 struct xe_device *xe = ct_to_xe(ct);
783 struct g2h_fence g2h_fence;
784 int ret = 0;
785
786 /*
787 * We use a fence to implement blocking sends / receiving response data.
788 * The seqno of the fence is sent in the H2G, returned in the G2H, and
789 * an xarray is used as storage media with the seqno being to key.
790 * Fields in the fence hold success, failure, retry status and the
791 * response data. Safe to allocate on the stack as the xarray is the
792 * only reference and it cannot be present after this function exits.
793 */
794retry:
795 g2h_fence_init(&g2h_fence, response_buffer);
796retry_same_fence:
797 ret = guc_ct_send(ct, action, len, 0, 0, &g2h_fence);
798 if (unlikely(ret == -ENOMEM)) {
799 void *ptr;
800
801 /* Retry allocation /w GFP_KERNEL */
802 ptr = xa_store(&ct->fence_lookup,
803 g2h_fence.seqno,
804 &g2h_fence, GFP_KERNEL);
805 if (IS_ERR(ptr))
806 return PTR_ERR(ptr);
807
808 goto retry_same_fence;
809 } else if (unlikely(ret)) {
810 if (ret == -EDEADLK)
811 kick_reset(ct);
812
813 if (no_fail && retry_failure(ct, ret))
814 goto retry_same_fence;
815
816 if (!g2h_fence_needs_alloc(&g2h_fence))
817 xa_erase_irq(&ct->fence_lookup, g2h_fence.seqno);
818
819 return ret;
820 }
821
822 ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ);
823 if (!ret) {
824 drm_err(&xe->drm, "Timed out wait for G2H, fence %u, action %04x",
825 g2h_fence.seqno, action[0]);
826 xa_erase_irq(&ct->fence_lookup, g2h_fence.seqno);
827 return -ETIME;
828 }
829
830 if (g2h_fence.retry) {
831 drm_warn(&xe->drm, "Send retry, action 0x%04x, reason %d",
832 action[0], g2h_fence.reason);
833 goto retry;
834 }
835 if (g2h_fence.fail) {
836 drm_err(&xe->drm, "Send failed, action 0x%04x, error %d, hint %d",
837 action[0], g2h_fence.error, g2h_fence.hint);
838 ret = -EIO;
839 }
840
841 return ret > 0 ? response_buffer ? g2h_fence.response_len : g2h_fence.response_data : ret;
842}
843
844/**
845 * xe_guc_ct_send_recv - Send and receive HXG to the GuC
846 * @ct: the &xe_guc_ct
847 * @action: the dword array with `HXG Request`_ message (can't be NULL)
848 * @len: length of the `HXG Request`_ message (in dwords, can't be 0)
849 * @response_buffer: placeholder for the `HXG Response`_ message (can be NULL)
850 *
851 * Send a `HXG Request`_ message to the GuC over CT communication channel and
852 * blocks until GuC replies with a `HXG Response`_ message.
853 *
854 * For non-blocking communication with GuC use xe_guc_ct_send().
855 *
856 * Note: The size of &response_buffer must be at least GUC_CTB_MAX_DWORDS_.
857 *
858 * Return: response length (in dwords) if &response_buffer was not NULL, or
859 * DATA0 from `HXG Response`_ if &response_buffer was NULL, or
860 * a negative error code on failure.
861 */
862int xe_guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
863 u32 *response_buffer)
864{
865 KUNIT_STATIC_STUB_REDIRECT(xe_guc_ct_send_recv, ct, action, len, response_buffer);
866 return guc_ct_send_recv(ct, action, len, response_buffer, false);
867}
868
869int xe_guc_ct_send_recv_no_fail(struct xe_guc_ct *ct, const u32 *action,
870 u32 len, u32 *response_buffer)
871{
872 return guc_ct_send_recv(ct, action, len, response_buffer, true);
873}
874
875static u32 *msg_to_hxg(u32 *msg)
876{
877 return msg + GUC_CTB_MSG_MIN_LEN;
878}
879
880static u32 msg_len_to_hxg_len(u32 len)
881{
882 return len - GUC_CTB_MSG_MIN_LEN;
883}
884
885static int parse_g2h_event(struct xe_guc_ct *ct, u32 *msg, u32 len)
886{
887 u32 *hxg = msg_to_hxg(msg);
888 u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
889
890 lockdep_assert_held(&ct->lock);
891
892 switch (action) {
893 case XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
894 case XE_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
895 case XE_GUC_ACTION_SCHED_ENGINE_MODE_DONE:
896 case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
897 g2h_release_space(ct, len);
898 }
899
900 return 0;
901}
902
903static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
904{
905 struct xe_gt *gt = ct_to_gt(ct);
906 struct xe_device *xe = gt_to_xe(gt);
907 u32 *hxg = msg_to_hxg(msg);
908 u32 hxg_len = msg_len_to_hxg_len(len);
909 u32 fence = FIELD_GET(GUC_CTB_MSG_0_FENCE, msg[0]);
910 u32 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]);
911 struct g2h_fence *g2h_fence;
912
913 lockdep_assert_held(&ct->lock);
914
915 /*
916 * Fences for FAST_REQUEST messages are not tracked in ct->fence_lookup.
917 * Those messages should never fail, so if we do get an error back it
918 * means we're likely doing an illegal operation and the GuC is
919 * rejecting it. We have no way to inform the code that submitted the
920 * H2G that the message was rejected, so we need to escalate the
921 * failure to trigger a reset.
922 */
923 if (fence & CT_SEQNO_UNTRACKED) {
924 if (type == GUC_HXG_TYPE_RESPONSE_FAILURE)
925 xe_gt_err(gt, "FAST_REQ H2G fence 0x%x failed! e=0x%x, h=%u\n",
926 fence,
927 FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, hxg[0]),
928 FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, hxg[0]));
929 else
930 xe_gt_err(gt, "unexpected response %u for FAST_REQ H2G fence 0x%x!\n",
931 type, fence);
932
933 return -EPROTO;
934 }
935
936 g2h_fence = xa_erase(&ct->fence_lookup, fence);
937 if (unlikely(!g2h_fence)) {
938 /* Don't tear down channel, as send could've timed out */
939 xe_gt_warn(gt, "G2H fence (%u) not found!\n", fence);
940 g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
941 return 0;
942 }
943
944 xe_assert(xe, fence == g2h_fence->seqno);
945
946 if (type == GUC_HXG_TYPE_RESPONSE_FAILURE) {
947 g2h_fence->fail = true;
948 g2h_fence->error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, hxg[0]);
949 g2h_fence->hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, hxg[0]);
950 } else if (type == GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
951 g2h_fence->retry = true;
952 g2h_fence->reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, hxg[0]);
953 } else if (g2h_fence->response_buffer) {
954 g2h_fence->response_len = hxg_len;
955 memcpy(g2h_fence->response_buffer, hxg, hxg_len * sizeof(u32));
956 } else {
957 g2h_fence->response_data = FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, hxg[0]);
958 }
959
960 g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
961
962 g2h_fence->done = true;
963 smp_mb();
964
965 wake_up_all(&ct->g2h_fence_wq);
966
967 return 0;
968}
969
970static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
971{
972 struct xe_device *xe = ct_to_xe(ct);
973 u32 *hxg = msg_to_hxg(msg);
974 u32 origin, type;
975 int ret;
976
977 lockdep_assert_held(&ct->lock);
978
979 origin = FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg[0]);
980 if (unlikely(origin != GUC_HXG_ORIGIN_GUC)) {
981 drm_err(&xe->drm,
982 "G2H channel broken on read, origin=%d, reset required\n",
983 origin);
984 ct->ctbs.g2h.info.broken = true;
985
986 return -EPROTO;
987 }
988
989 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]);
990 switch (type) {
991 case GUC_HXG_TYPE_EVENT:
992 ret = parse_g2h_event(ct, msg, len);
993 break;
994 case GUC_HXG_TYPE_RESPONSE_SUCCESS:
995 case GUC_HXG_TYPE_RESPONSE_FAILURE:
996 case GUC_HXG_TYPE_NO_RESPONSE_RETRY:
997 ret = parse_g2h_response(ct, msg, len);
998 break;
999 default:
1000 drm_err(&xe->drm,
1001 "G2H channel broken on read, type=%d, reset required\n",
1002 type);
1003 ct->ctbs.g2h.info.broken = true;
1004
1005 ret = -EOPNOTSUPP;
1006 }
1007
1008 return ret;
1009}
1010
1011static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
1012{
1013 struct xe_device *xe = ct_to_xe(ct);
1014 struct xe_guc *guc = ct_to_guc(ct);
1015 u32 hxg_len = msg_len_to_hxg_len(len);
1016 u32 *hxg = msg_to_hxg(msg);
1017 u32 action, adj_len;
1018 u32 *payload;
1019 int ret = 0;
1020
1021 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT)
1022 return 0;
1023
1024 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
1025 payload = hxg + GUC_HXG_EVENT_MSG_MIN_LEN;
1026 adj_len = hxg_len - GUC_HXG_EVENT_MSG_MIN_LEN;
1027
1028 switch (action) {
1029 case XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
1030 ret = xe_guc_sched_done_handler(guc, payload, adj_len);
1031 break;
1032 case XE_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
1033 ret = xe_guc_deregister_done_handler(guc, payload, adj_len);
1034 break;
1035 case XE_GUC_ACTION_CONTEXT_RESET_NOTIFICATION:
1036 ret = xe_guc_exec_queue_reset_handler(guc, payload, adj_len);
1037 break;
1038 case XE_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION:
1039 ret = xe_guc_exec_queue_reset_failure_handler(guc, payload,
1040 adj_len);
1041 break;
1042 case XE_GUC_ACTION_SCHED_ENGINE_MODE_DONE:
1043 /* Selftest only at the moment */
1044 break;
1045 case XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION:
1046 case XE_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE:
1047 /* FIXME: Handle this */
1048 break;
1049 case XE_GUC_ACTION_NOTIFY_MEMORY_CAT_ERROR:
1050 ret = xe_guc_exec_queue_memory_cat_error_handler(guc, payload,
1051 adj_len);
1052 break;
1053 case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
1054 ret = xe_guc_pagefault_handler(guc, payload, adj_len);
1055 break;
1056 case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
1057 ret = xe_guc_tlb_invalidation_done_handler(guc, payload,
1058 adj_len);
1059 break;
1060 case XE_GUC_ACTION_ACCESS_COUNTER_NOTIFY:
1061 ret = xe_guc_access_counter_notify_handler(guc, payload,
1062 adj_len);
1063 break;
1064 case XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF:
1065 ret = xe_guc_relay_process_guc2pf(&guc->relay, hxg, hxg_len);
1066 break;
1067 case XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF:
1068 ret = xe_guc_relay_process_guc2vf(&guc->relay, hxg, hxg_len);
1069 break;
1070 default:
1071 drm_err(&xe->drm, "unexpected action 0x%04x\n", action);
1072 }
1073
1074 if (ret)
1075 drm_err(&xe->drm, "action 0x%04x failed processing, ret=%d\n",
1076 action, ret);
1077
1078 return 0;
1079}
1080
1081static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
1082{
1083 struct xe_device *xe = ct_to_xe(ct);
1084 struct guc_ctb *g2h = &ct->ctbs.g2h;
1085 u32 tail, head, len;
1086 s32 avail;
1087 u32 action;
1088 u32 *hxg;
1089
1090 xe_assert(xe, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED);
1091 lockdep_assert_held(&ct->fast_lock);
1092
1093 if (ct->state == XE_GUC_CT_STATE_DISABLED)
1094 return -ENODEV;
1095
1096 if (ct->state == XE_GUC_CT_STATE_STOPPED)
1097 return -ECANCELED;
1098
1099 if (g2h->info.broken)
1100 return -EPIPE;
1101
1102 xe_assert(xe, xe_guc_ct_enabled(ct));
1103
1104 /* Calculate DW available to read */
1105 tail = desc_read(xe, g2h, tail);
1106 avail = tail - g2h->info.head;
1107 if (unlikely(avail == 0))
1108 return 0;
1109
1110 if (avail < 0)
1111 avail += g2h->info.size;
1112
1113 /* Read header */
1114 xe_map_memcpy_from(xe, msg, &g2h->cmds, sizeof(u32) * g2h->info.head,
1115 sizeof(u32));
1116 len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, msg[0]) + GUC_CTB_MSG_MIN_LEN;
1117 if (len > avail) {
1118 drm_err(&xe->drm,
1119 "G2H channel broken on read, avail=%d, len=%d, reset required\n",
1120 avail, len);
1121 g2h->info.broken = true;
1122
1123 return -EPROTO;
1124 }
1125
1126 head = (g2h->info.head + 1) % g2h->info.size;
1127 avail = len - 1;
1128
1129 /* Read G2H message */
1130 if (avail + head > g2h->info.size) {
1131 u32 avail_til_wrap = g2h->info.size - head;
1132
1133 xe_map_memcpy_from(xe, msg + 1,
1134 &g2h->cmds, sizeof(u32) * head,
1135 avail_til_wrap * sizeof(u32));
1136 xe_map_memcpy_from(xe, msg + 1 + avail_til_wrap,
1137 &g2h->cmds, 0,
1138 (avail - avail_til_wrap) * sizeof(u32));
1139 } else {
1140 xe_map_memcpy_from(xe, msg + 1,
1141 &g2h->cmds, sizeof(u32) * head,
1142 avail * sizeof(u32));
1143 }
1144
1145 hxg = msg_to_hxg(msg);
1146 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
1147
1148 if (fast_path) {
1149 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT)
1150 return 0;
1151
1152 switch (action) {
1153 case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
1154 case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
1155 break; /* Process these in fast-path */
1156 default:
1157 return 0;
1158 }
1159 }
1160
1161 /* Update local / descriptor header */
1162 g2h->info.head = (head + avail) % g2h->info.size;
1163 desc_write(xe, g2h, head, g2h->info.head);
1164
1165 trace_xe_guc_ctb_g2h(ct_to_gt(ct)->info.id, action, len,
1166 g2h->info.head, tail);
1167
1168 return len;
1169}
1170
1171static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len)
1172{
1173 struct xe_device *xe = ct_to_xe(ct);
1174 struct xe_guc *guc = ct_to_guc(ct);
1175 u32 hxg_len = msg_len_to_hxg_len(len);
1176 u32 *hxg = msg_to_hxg(msg);
1177 u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
1178 u32 *payload = hxg + GUC_HXG_MSG_MIN_LEN;
1179 u32 adj_len = hxg_len - GUC_HXG_MSG_MIN_LEN;
1180 int ret = 0;
1181
1182 switch (action) {
1183 case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
1184 ret = xe_guc_pagefault_handler(guc, payload, adj_len);
1185 break;
1186 case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
1187 __g2h_release_space(ct, len);
1188 ret = xe_guc_tlb_invalidation_done_handler(guc, payload,
1189 adj_len);
1190 break;
1191 default:
1192 drm_warn(&xe->drm, "NOT_POSSIBLE");
1193 }
1194
1195 if (ret)
1196 drm_err(&xe->drm, "action 0x%04x failed processing, ret=%d\n",
1197 action, ret);
1198}
1199
1200/**
1201 * xe_guc_ct_fast_path - process critical G2H in the IRQ handler
1202 * @ct: GuC CT object
1203 *
1204 * Anything related to page faults is critical for performance, process these
1205 * critical G2H in the IRQ. This is safe as these handlers either just wake up
1206 * waiters or queue another worker.
1207 */
1208void xe_guc_ct_fast_path(struct xe_guc_ct *ct)
1209{
1210 struct xe_device *xe = ct_to_xe(ct);
1211 bool ongoing;
1212 int len;
1213
1214 ongoing = xe_device_mem_access_get_if_ongoing(ct_to_xe(ct));
1215 if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL)
1216 return;
1217
1218 spin_lock(&ct->fast_lock);
1219 do {
1220 len = g2h_read(ct, ct->fast_msg, true);
1221 if (len > 0)
1222 g2h_fast_path(ct, ct->fast_msg, len);
1223 } while (len > 0);
1224 spin_unlock(&ct->fast_lock);
1225
1226 if (ongoing)
1227 xe_device_mem_access_put(xe);
1228}
1229
1230/* Returns less than zero on error, 0 on done, 1 on more available */
1231static int dequeue_one_g2h(struct xe_guc_ct *ct)
1232{
1233 int len;
1234 int ret;
1235
1236 lockdep_assert_held(&ct->lock);
1237
1238 spin_lock_irq(&ct->fast_lock);
1239 len = g2h_read(ct, ct->msg, false);
1240 spin_unlock_irq(&ct->fast_lock);
1241 if (len <= 0)
1242 return len;
1243
1244 ret = parse_g2h_msg(ct, ct->msg, len);
1245 if (unlikely(ret < 0))
1246 return ret;
1247
1248 ret = process_g2h_msg(ct, ct->msg, len);
1249 if (unlikely(ret < 0))
1250 return ret;
1251
1252 return 1;
1253}
1254
1255static void g2h_worker_func(struct work_struct *w)
1256{
1257 struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, g2h_worker);
1258 bool ongoing;
1259 int ret;
1260
1261 /*
1262 * Normal users must always hold mem_access.ref around CT calls. However
1263 * during the runtime pm callbacks we rely on CT to talk to the GuC, but
1264 * at this stage we can't rely on mem_access.ref and even the
1265 * callback_task will be different than current. For such cases we just
1266 * need to ensure we always process the responses from any blocking
1267 * ct_send requests or where we otherwise expect some response when
1268 * initiated from those callbacks (which will need to wait for the below
1269 * dequeue_one_g2h()). The dequeue_one_g2h() will gracefully fail if
1270 * the device has suspended to the point that the CT communication has
1271 * been disabled.
1272 *
1273 * If we are inside the runtime pm callback, we can be the only task
1274 * still issuing CT requests (since that requires having the
1275 * mem_access.ref). It seems like it might in theory be possible to
1276 * receive unsolicited events from the GuC just as we are
1277 * suspending-resuming, but those will currently anyway be lost when
1278 * eventually exiting from suspend, hence no need to wake up the device
1279 * here. If we ever need something stronger than get_if_ongoing() then
1280 * we need to be careful with blocking the pm callbacks from getting CT
1281 * responses, if the worker here is blocked on those callbacks
1282 * completing, creating a deadlock.
1283 */
1284 ongoing = xe_device_mem_access_get_if_ongoing(ct_to_xe(ct));
1285 if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL)
1286 return;
1287
1288 do {
1289 mutex_lock(&ct->lock);
1290 ret = dequeue_one_g2h(ct);
1291 mutex_unlock(&ct->lock);
1292
1293 if (unlikely(ret == -EPROTO || ret == -EOPNOTSUPP)) {
1294 struct drm_device *drm = &ct_to_xe(ct)->drm;
1295 struct drm_printer p = drm_info_printer(drm->dev);
1296
1297 xe_guc_ct_print(ct, &p, false);
1298 kick_reset(ct);
1299 }
1300 } while (ret == 1);
1301
1302 if (ongoing)
1303 xe_device_mem_access_put(ct_to_xe(ct));
1304}
1305
1306static void guc_ctb_snapshot_capture(struct xe_device *xe, struct guc_ctb *ctb,
1307 struct guc_ctb_snapshot *snapshot,
1308 bool atomic)
1309{
1310 u32 head, tail;
1311
1312 xe_map_memcpy_from(xe, &snapshot->desc, &ctb->desc, 0,
1313 sizeof(struct guc_ct_buffer_desc));
1314 memcpy(&snapshot->info, &ctb->info, sizeof(struct guc_ctb_info));
1315
1316 snapshot->cmds = kmalloc_array(ctb->info.size, sizeof(u32),
1317 atomic ? GFP_ATOMIC : GFP_KERNEL);
1318
1319 if (!snapshot->cmds) {
1320 drm_err(&xe->drm, "Skipping CTB commands snapshot. Only CTB info will be available.\n");
1321 return;
1322 }
1323
1324 head = snapshot->desc.head;
1325 tail = snapshot->desc.tail;
1326
1327 if (head != tail) {
1328 struct iosys_map map =
1329 IOSYS_MAP_INIT_OFFSET(&ctb->cmds, head * sizeof(u32));
1330
1331 while (head != tail) {
1332 snapshot->cmds[head] = xe_map_rd(xe, &map, 0, u32);
1333 ++head;
1334 if (head == ctb->info.size) {
1335 head = 0;
1336 map = ctb->cmds;
1337 } else {
1338 iosys_map_incr(&map, sizeof(u32));
1339 }
1340 }
1341 }
1342}
1343
1344static void guc_ctb_snapshot_print(struct guc_ctb_snapshot *snapshot,
1345 struct drm_printer *p)
1346{
1347 u32 head, tail;
1348
1349 drm_printf(p, "\tsize: %d\n", snapshot->info.size);
1350 drm_printf(p, "\tresv_space: %d\n", snapshot->info.resv_space);
1351 drm_printf(p, "\thead: %d\n", snapshot->info.head);
1352 drm_printf(p, "\ttail: %d\n", snapshot->info.tail);
1353 drm_printf(p, "\tspace: %d\n", snapshot->info.space);
1354 drm_printf(p, "\tbroken: %d\n", snapshot->info.broken);
1355 drm_printf(p, "\thead (memory): %d\n", snapshot->desc.head);
1356 drm_printf(p, "\ttail (memory): %d\n", snapshot->desc.tail);
1357 drm_printf(p, "\tstatus (memory): 0x%x\n", snapshot->desc.status);
1358
1359 if (!snapshot->cmds)
1360 return;
1361
1362 head = snapshot->desc.head;
1363 tail = snapshot->desc.tail;
1364
1365 while (head != tail) {
1366 drm_printf(p, "\tcmd[%d]: 0x%08x\n", head,
1367 snapshot->cmds[head]);
1368 ++head;
1369 if (head == snapshot->info.size)
1370 head = 0;
1371 }
1372}
1373
1374static void guc_ctb_snapshot_free(struct guc_ctb_snapshot *snapshot)
1375{
1376 kfree(snapshot->cmds);
1377}
1378
1379/**
1380 * xe_guc_ct_snapshot_capture - Take a quick snapshot of the CT state.
1381 * @ct: GuC CT object.
1382 * @atomic: Boolean to indicate if this is called from atomic context like
1383 * reset or CTB handler or from some regular path like debugfs.
1384 *
1385 * This can be printed out in a later stage like during dev_coredump
1386 * analysis.
1387 *
1388 * Returns: a GuC CT snapshot object that must be freed by the caller
1389 * by using `xe_guc_ct_snapshot_free`.
1390 */
1391struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct,
1392 bool atomic)
1393{
1394 struct xe_device *xe = ct_to_xe(ct);
1395 struct xe_guc_ct_snapshot *snapshot;
1396
1397 snapshot = kzalloc(sizeof(*snapshot),
1398 atomic ? GFP_ATOMIC : GFP_KERNEL);
1399
1400 if (!snapshot) {
1401 drm_err(&xe->drm, "Skipping CTB snapshot entirely.\n");
1402 return NULL;
1403 }
1404
1405 if (xe_guc_ct_enabled(ct)) {
1406 snapshot->ct_enabled = true;
1407 snapshot->g2h_outstanding = READ_ONCE(ct->g2h_outstanding);
1408 guc_ctb_snapshot_capture(xe, &ct->ctbs.h2g,
1409 &snapshot->h2g, atomic);
1410 guc_ctb_snapshot_capture(xe, &ct->ctbs.g2h,
1411 &snapshot->g2h, atomic);
1412 }
1413
1414 return snapshot;
1415}
1416
1417/**
1418 * xe_guc_ct_snapshot_print - Print out a given GuC CT snapshot.
1419 * @snapshot: GuC CT snapshot object.
1420 * @p: drm_printer where it will be printed out.
1421 *
1422 * This function prints out a given GuC CT snapshot object.
1423 */
1424void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot,
1425 struct drm_printer *p)
1426{
1427 if (!snapshot)
1428 return;
1429
1430 if (snapshot->ct_enabled) {
1431 drm_puts(p, "H2G CTB (all sizes in DW):\n");
1432 guc_ctb_snapshot_print(&snapshot->h2g, p);
1433
1434 drm_puts(p, "\nG2H CTB (all sizes in DW):\n");
1435 guc_ctb_snapshot_print(&snapshot->g2h, p);
1436
1437 drm_printf(p, "\tg2h outstanding: %d\n",
1438 snapshot->g2h_outstanding);
1439 } else {
1440 drm_puts(p, "CT disabled\n");
1441 }
1442}
1443
1444/**
1445 * xe_guc_ct_snapshot_free - Free all allocated objects for a given snapshot.
1446 * @snapshot: GuC CT snapshot object.
1447 *
1448 * This function free all the memory that needed to be allocated at capture
1449 * time.
1450 */
1451void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot)
1452{
1453 if (!snapshot)
1454 return;
1455
1456 guc_ctb_snapshot_free(&snapshot->h2g);
1457 guc_ctb_snapshot_free(&snapshot->g2h);
1458 kfree(snapshot);
1459}
1460
1461/**
1462 * xe_guc_ct_print - GuC CT Print.
1463 * @ct: GuC CT.
1464 * @p: drm_printer where it will be printed out.
1465 * @atomic: Boolean to indicate if this is called from atomic context like
1466 * reset or CTB handler or from some regular path like debugfs.
1467 *
1468 * This function quickly capture a snapshot and immediately print it out.
1469 */
1470void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool atomic)
1471{
1472 struct xe_guc_ct_snapshot *snapshot;
1473
1474 snapshot = xe_guc_ct_snapshot_capture(ct, atomic);
1475 xe_guc_ct_snapshot_print(snapshot, p);
1476 xe_guc_ct_snapshot_free(snapshot);
1477}