Loading...
Note: File does not exist in v5.9.
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2023 Intel Corporation
4 */
5
6#include "xe_gt_tlb_invalidation.h"
7
8#include "abi/guc_actions_abi.h"
9#include "xe_device.h"
10#include "xe_force_wake.h"
11#include "xe_gt.h"
12#include "xe_gt_printk.h"
13#include "xe_guc.h"
14#include "xe_guc_ct.h"
15#include "xe_gt_stats.h"
16#include "xe_mmio.h"
17#include "xe_pm.h"
18#include "xe_sriov.h"
19#include "xe_trace.h"
20#include "regs/xe_guc_regs.h"
21
22#define FENCE_STACK_BIT DMA_FENCE_FLAG_USER_BITS
23
24/*
25 * TLB inval depends on pending commands in the CT queue and then the real
26 * invalidation time. Double up the time to process full CT queue
27 * just to be on the safe side.
28 */
29static long tlb_timeout_jiffies(struct xe_gt *gt)
30{
31 /* this reflects what HW/GuC needs to process TLB inv request */
32 const long hw_tlb_timeout = HZ / 4;
33
34 /* this estimates actual delay caused by the CTB transport */
35 long delay = xe_guc_ct_queue_proc_time_jiffies(>->uc.guc.ct);
36
37 return hw_tlb_timeout + 2 * delay;
38}
39
40static void xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence *fence)
41{
42 if (WARN_ON_ONCE(!fence->gt))
43 return;
44
45 xe_pm_runtime_put(gt_to_xe(fence->gt));
46 fence->gt = NULL; /* fini() should be called once */
47}
48
49static void
50__invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
51{
52 bool stack = test_bit(FENCE_STACK_BIT, &fence->base.flags);
53
54 trace_xe_gt_tlb_invalidation_fence_signal(xe, fence);
55 xe_gt_tlb_invalidation_fence_fini(fence);
56 dma_fence_signal(&fence->base);
57 if (!stack)
58 dma_fence_put(&fence->base);
59}
60
61static void
62invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
63{
64 list_del(&fence->link);
65 __invalidation_fence_signal(xe, fence);
66}
67
68void xe_gt_tlb_invalidation_fence_signal(struct xe_gt_tlb_invalidation_fence *fence)
69{
70 if (WARN_ON_ONCE(!fence->gt))
71 return;
72
73 __invalidation_fence_signal(gt_to_xe(fence->gt), fence);
74}
75
76static void xe_gt_tlb_fence_timeout(struct work_struct *work)
77{
78 struct xe_gt *gt = container_of(work, struct xe_gt,
79 tlb_invalidation.fence_tdr.work);
80 struct xe_device *xe = gt_to_xe(gt);
81 struct xe_gt_tlb_invalidation_fence *fence, *next;
82
83 LNL_FLUSH_WORK(>->uc.guc.ct.g2h_worker);
84
85 spin_lock_irq(>->tlb_invalidation.pending_lock);
86 list_for_each_entry_safe(fence, next,
87 >->tlb_invalidation.pending_fences, link) {
88 s64 since_inval_ms = ktime_ms_delta(ktime_get(),
89 fence->invalidation_time);
90
91 if (msecs_to_jiffies(since_inval_ms) < tlb_timeout_jiffies(gt))
92 break;
93
94 trace_xe_gt_tlb_invalidation_fence_timeout(xe, fence);
95 xe_gt_err(gt, "TLB invalidation fence timeout, seqno=%d recv=%d",
96 fence->seqno, gt->tlb_invalidation.seqno_recv);
97
98 fence->base.error = -ETIME;
99 invalidation_fence_signal(xe, fence);
100 }
101 if (!list_empty(>->tlb_invalidation.pending_fences))
102 queue_delayed_work(system_wq,
103 >->tlb_invalidation.fence_tdr,
104 tlb_timeout_jiffies(gt));
105 spin_unlock_irq(>->tlb_invalidation.pending_lock);
106}
107
108/**
109 * xe_gt_tlb_invalidation_init_early - Initialize GT TLB invalidation state
110 * @gt: graphics tile
111 *
112 * Initialize GT TLB invalidation state, purely software initialization, should
113 * be called once during driver load.
114 *
115 * Return: 0 on success, negative error code on error.
116 */
117int xe_gt_tlb_invalidation_init_early(struct xe_gt *gt)
118{
119 gt->tlb_invalidation.seqno = 1;
120 INIT_LIST_HEAD(>->tlb_invalidation.pending_fences);
121 spin_lock_init(>->tlb_invalidation.pending_lock);
122 spin_lock_init(>->tlb_invalidation.lock);
123 INIT_DELAYED_WORK(>->tlb_invalidation.fence_tdr,
124 xe_gt_tlb_fence_timeout);
125
126 return 0;
127}
128
129/**
130 * xe_gt_tlb_invalidation_reset - Initialize GT TLB invalidation reset
131 * @gt: graphics tile
132 *
133 * Signal any pending invalidation fences, should be called during a GT reset
134 */
135void xe_gt_tlb_invalidation_reset(struct xe_gt *gt)
136{
137 struct xe_gt_tlb_invalidation_fence *fence, *next;
138 int pending_seqno;
139
140 /*
141 * CT channel is already disabled at this point. No new TLB requests can
142 * appear.
143 */
144
145 mutex_lock(>->uc.guc.ct.lock);
146 spin_lock_irq(>->tlb_invalidation.pending_lock);
147 cancel_delayed_work(>->tlb_invalidation.fence_tdr);
148 /*
149 * We might have various kworkers waiting for TLB flushes to complete
150 * which are not tracked with an explicit TLB fence, however at this
151 * stage that will never happen since the CT is already disabled, so
152 * make sure we signal them here under the assumption that we have
153 * completed a full GT reset.
154 */
155 if (gt->tlb_invalidation.seqno == 1)
156 pending_seqno = TLB_INVALIDATION_SEQNO_MAX - 1;
157 else
158 pending_seqno = gt->tlb_invalidation.seqno - 1;
159 WRITE_ONCE(gt->tlb_invalidation.seqno_recv, pending_seqno);
160
161 list_for_each_entry_safe(fence, next,
162 >->tlb_invalidation.pending_fences, link)
163 invalidation_fence_signal(gt_to_xe(gt), fence);
164 spin_unlock_irq(>->tlb_invalidation.pending_lock);
165 mutex_unlock(>->uc.guc.ct.lock);
166}
167
168static bool tlb_invalidation_seqno_past(struct xe_gt *gt, int seqno)
169{
170 int seqno_recv = READ_ONCE(gt->tlb_invalidation.seqno_recv);
171
172 if (seqno - seqno_recv < -(TLB_INVALIDATION_SEQNO_MAX / 2))
173 return false;
174
175 if (seqno - seqno_recv > (TLB_INVALIDATION_SEQNO_MAX / 2))
176 return true;
177
178 return seqno_recv >= seqno;
179}
180
181static int send_tlb_invalidation(struct xe_guc *guc,
182 struct xe_gt_tlb_invalidation_fence *fence,
183 u32 *action, int len)
184{
185 struct xe_gt *gt = guc_to_gt(guc);
186 struct xe_device *xe = gt_to_xe(gt);
187 int seqno;
188 int ret;
189
190 xe_gt_assert(gt, fence);
191
192 /*
193 * XXX: The seqno algorithm relies on TLB invalidation being processed
194 * in order which they currently are, if that changes the algorithm will
195 * need to be updated.
196 */
197
198 mutex_lock(&guc->ct.lock);
199 seqno = gt->tlb_invalidation.seqno;
200 fence->seqno = seqno;
201 trace_xe_gt_tlb_invalidation_fence_send(xe, fence);
202 action[1] = seqno;
203 ret = xe_guc_ct_send_locked(&guc->ct, action, len,
204 G2H_LEN_DW_TLB_INVALIDATE, 1);
205 if (!ret) {
206 spin_lock_irq(>->tlb_invalidation.pending_lock);
207 /*
208 * We haven't actually published the TLB fence as per
209 * pending_fences, but in theory our seqno could have already
210 * been written as we acquired the pending_lock. In such a case
211 * we can just go ahead and signal the fence here.
212 */
213 if (tlb_invalidation_seqno_past(gt, seqno)) {
214 __invalidation_fence_signal(xe, fence);
215 } else {
216 fence->invalidation_time = ktime_get();
217 list_add_tail(&fence->link,
218 >->tlb_invalidation.pending_fences);
219
220 if (list_is_singular(>->tlb_invalidation.pending_fences))
221 queue_delayed_work(system_wq,
222 >->tlb_invalidation.fence_tdr,
223 tlb_timeout_jiffies(gt));
224 }
225 spin_unlock_irq(>->tlb_invalidation.pending_lock);
226 } else {
227 __invalidation_fence_signal(xe, fence);
228 }
229 if (!ret) {
230 gt->tlb_invalidation.seqno = (gt->tlb_invalidation.seqno + 1) %
231 TLB_INVALIDATION_SEQNO_MAX;
232 if (!gt->tlb_invalidation.seqno)
233 gt->tlb_invalidation.seqno = 1;
234 }
235 mutex_unlock(&guc->ct.lock);
236 xe_gt_stats_incr(gt, XE_GT_STATS_ID_TLB_INVAL, 1);
237
238 return ret;
239}
240
241#define MAKE_INVAL_OP(type) ((type << XE_GUC_TLB_INVAL_TYPE_SHIFT) | \
242 XE_GUC_TLB_INVAL_MODE_HEAVY << XE_GUC_TLB_INVAL_MODE_SHIFT | \
243 XE_GUC_TLB_INVAL_FLUSH_CACHE)
244
245/**
246 * xe_gt_tlb_invalidation_guc - Issue a TLB invalidation on this GT for the GuC
247 * @gt: graphics tile
248 * @fence: invalidation fence which will be signal on TLB invalidation
249 * completion
250 *
251 * Issue a TLB invalidation for the GuC. Completion of TLB is asynchronous and
252 * caller can use the invalidation fence to wait for completion.
253 *
254 * Return: 0 on success, negative error code on error
255 */
256static int xe_gt_tlb_invalidation_guc(struct xe_gt *gt,
257 struct xe_gt_tlb_invalidation_fence *fence)
258{
259 u32 action[] = {
260 XE_GUC_ACTION_TLB_INVALIDATION,
261 0, /* seqno, replaced in send_tlb_invalidation */
262 MAKE_INVAL_OP(XE_GUC_TLB_INVAL_GUC),
263 };
264
265 return send_tlb_invalidation(>->uc.guc, fence, action,
266 ARRAY_SIZE(action));
267}
268
269/**
270 * xe_gt_tlb_invalidation_ggtt - Issue a TLB invalidation on this GT for the GGTT
271 * @gt: graphics tile
272 *
273 * Issue a TLB invalidation for the GGTT. Completion of TLB invalidation is
274 * synchronous.
275 *
276 * Return: 0 on success, negative error code on error
277 */
278int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
279{
280 struct xe_device *xe = gt_to_xe(gt);
281 unsigned int fw_ref;
282
283 if (xe_guc_ct_enabled(>->uc.guc.ct) &&
284 gt->uc.guc.submission_state.enabled) {
285 struct xe_gt_tlb_invalidation_fence fence;
286 int ret;
287
288 xe_gt_tlb_invalidation_fence_init(gt, &fence, true);
289 ret = xe_gt_tlb_invalidation_guc(gt, &fence);
290 if (ret)
291 return ret;
292
293 xe_gt_tlb_invalidation_fence_wait(&fence);
294 } else if (xe_device_uc_enabled(xe) && !xe_device_wedged(xe)) {
295 struct xe_mmio *mmio = >->mmio;
296
297 if (IS_SRIOV_VF(xe))
298 return 0;
299
300 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
301 if (xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20) {
302 xe_mmio_write32(mmio, PVC_GUC_TLB_INV_DESC1,
303 PVC_GUC_TLB_INV_DESC1_INVALIDATE);
304 xe_mmio_write32(mmio, PVC_GUC_TLB_INV_DESC0,
305 PVC_GUC_TLB_INV_DESC0_VALID);
306 } else {
307 xe_mmio_write32(mmio, GUC_TLB_INV_CR,
308 GUC_TLB_INV_CR_INVALIDATE);
309 }
310 xe_force_wake_put(gt_to_fw(gt), fw_ref);
311 }
312
313 return 0;
314}
315
316/**
317 * xe_gt_tlb_invalidation_range - Issue a TLB invalidation on this GT for an
318 * address range
319 *
320 * @gt: graphics tile
321 * @fence: invalidation fence which will be signal on TLB invalidation
322 * completion
323 * @start: start address
324 * @end: end address
325 * @asid: address space id
326 *
327 * Issue a range based TLB invalidation if supported, if not fallback to a full
328 * TLB invalidation. Completion of TLB is asynchronous and caller can use
329 * the invalidation fence to wait for completion.
330 *
331 * Return: Negative error code on error, 0 on success
332 */
333int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
334 struct xe_gt_tlb_invalidation_fence *fence,
335 u64 start, u64 end, u32 asid)
336{
337 struct xe_device *xe = gt_to_xe(gt);
338#define MAX_TLB_INVALIDATION_LEN 7
339 u32 action[MAX_TLB_INVALIDATION_LEN];
340 int len = 0;
341
342 xe_gt_assert(gt, fence);
343
344 /* Execlists not supported */
345 if (gt_to_xe(gt)->info.force_execlist) {
346 __invalidation_fence_signal(xe, fence);
347 return 0;
348 }
349
350 action[len++] = XE_GUC_ACTION_TLB_INVALIDATION;
351 action[len++] = 0; /* seqno, replaced in send_tlb_invalidation */
352 if (!xe->info.has_range_tlb_invalidation) {
353 action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL);
354 } else {
355 u64 orig_start = start;
356 u64 length = end - start;
357 u64 align;
358
359 if (length < SZ_4K)
360 length = SZ_4K;
361
362 /*
363 * We need to invalidate a higher granularity if start address
364 * is not aligned to length. When start is not aligned with
365 * length we need to find the length large enough to create an
366 * address mask covering the required range.
367 */
368 align = roundup_pow_of_two(length);
369 start = ALIGN_DOWN(start, align);
370 end = ALIGN(end, align);
371 length = align;
372 while (start + length < end) {
373 length <<= 1;
374 start = ALIGN_DOWN(orig_start, length);
375 }
376
377 /*
378 * Minimum invalidation size for a 2MB page that the hardware
379 * expects is 16MB
380 */
381 if (length >= SZ_2M) {
382 length = max_t(u64, SZ_16M, length);
383 start = ALIGN_DOWN(orig_start, length);
384 }
385
386 xe_gt_assert(gt, length >= SZ_4K);
387 xe_gt_assert(gt, is_power_of_2(length));
388 xe_gt_assert(gt, !(length & GENMASK(ilog2(SZ_16M) - 1,
389 ilog2(SZ_2M) + 1)));
390 xe_gt_assert(gt, IS_ALIGNED(start, length));
391
392 action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_PAGE_SELECTIVE);
393 action[len++] = asid;
394 action[len++] = lower_32_bits(start);
395 action[len++] = upper_32_bits(start);
396 action[len++] = ilog2(length) - ilog2(SZ_4K);
397 }
398
399 xe_gt_assert(gt, len <= MAX_TLB_INVALIDATION_LEN);
400
401 return send_tlb_invalidation(>->uc.guc, fence, action, len);
402}
403
404/**
405 * xe_gt_tlb_invalidation_vma - Issue a TLB invalidation on this GT for a VMA
406 * @gt: graphics tile
407 * @fence: invalidation fence which will be signal on TLB invalidation
408 * completion, can be NULL
409 * @vma: VMA to invalidate
410 *
411 * Issue a range based TLB invalidation if supported, if not fallback to a full
412 * TLB invalidation. Completion of TLB is asynchronous and caller can use
413 * the invalidation fence to wait for completion.
414 *
415 * Return: Negative error code on error, 0 on success
416 */
417int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
418 struct xe_gt_tlb_invalidation_fence *fence,
419 struct xe_vma *vma)
420{
421 xe_gt_assert(gt, vma);
422
423 return xe_gt_tlb_invalidation_range(gt, fence, xe_vma_start(vma),
424 xe_vma_end(vma),
425 xe_vma_vm(vma)->usm.asid);
426}
427
428/**
429 * xe_guc_tlb_invalidation_done_handler - TLB invalidation done handler
430 * @guc: guc
431 * @msg: message indicating TLB invalidation done
432 * @len: length of message
433 *
434 * Parse seqno of TLB invalidation, wake any waiters for seqno, and signal any
435 * invalidation fences for seqno. Algorithm for this depends on seqno being
436 * received in-order and asserts this assumption.
437 *
438 * Return: 0 on success, -EPROTO for malformed messages.
439 */
440int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
441{
442 struct xe_gt *gt = guc_to_gt(guc);
443 struct xe_device *xe = gt_to_xe(gt);
444 struct xe_gt_tlb_invalidation_fence *fence, *next;
445 unsigned long flags;
446
447 if (unlikely(len != 1))
448 return -EPROTO;
449
450 /*
451 * This can also be run both directly from the IRQ handler and also in
452 * process_g2h_msg(). Only one may process any individual CT message,
453 * however the order they are processed here could result in skipping a
454 * seqno. To handle that we just process all the seqnos from the last
455 * seqno_recv up to and including the one in msg[0]. The delta should be
456 * very small so there shouldn't be much of pending_fences we actually
457 * need to iterate over here.
458 *
459 * From GuC POV we expect the seqnos to always appear in-order, so if we
460 * see something later in the timeline we can be sure that anything
461 * appearing earlier has already signalled, just that we have yet to
462 * officially process the CT message like if racing against
463 * process_g2h_msg().
464 */
465 spin_lock_irqsave(>->tlb_invalidation.pending_lock, flags);
466 if (tlb_invalidation_seqno_past(gt, msg[0])) {
467 spin_unlock_irqrestore(>->tlb_invalidation.pending_lock, flags);
468 return 0;
469 }
470
471 WRITE_ONCE(gt->tlb_invalidation.seqno_recv, msg[0]);
472
473 list_for_each_entry_safe(fence, next,
474 >->tlb_invalidation.pending_fences, link) {
475 trace_xe_gt_tlb_invalidation_fence_recv(xe, fence);
476
477 if (!tlb_invalidation_seqno_past(gt, fence->seqno))
478 break;
479
480 invalidation_fence_signal(xe, fence);
481 }
482
483 if (!list_empty(>->tlb_invalidation.pending_fences))
484 mod_delayed_work(system_wq,
485 >->tlb_invalidation.fence_tdr,
486 tlb_timeout_jiffies(gt));
487 else
488 cancel_delayed_work(>->tlb_invalidation.fence_tdr);
489
490 spin_unlock_irqrestore(>->tlb_invalidation.pending_lock, flags);
491
492 return 0;
493}
494
495static const char *
496invalidation_fence_get_driver_name(struct dma_fence *dma_fence)
497{
498 return "xe";
499}
500
501static const char *
502invalidation_fence_get_timeline_name(struct dma_fence *dma_fence)
503{
504 return "invalidation_fence";
505}
506
507static const struct dma_fence_ops invalidation_fence_ops = {
508 .get_driver_name = invalidation_fence_get_driver_name,
509 .get_timeline_name = invalidation_fence_get_timeline_name,
510};
511
512/**
513 * xe_gt_tlb_invalidation_fence_init - Initialize TLB invalidation fence
514 * @gt: GT
515 * @fence: TLB invalidation fence to initialize
516 * @stack: fence is stack variable
517 *
518 * Initialize TLB invalidation fence for use. xe_gt_tlb_invalidation_fence_fini
519 * will be automatically called when fence is signalled (all fences must signal),
520 * even on error.
521 */
522void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
523 struct xe_gt_tlb_invalidation_fence *fence,
524 bool stack)
525{
526 xe_pm_runtime_get_noresume(gt_to_xe(gt));
527
528 spin_lock_irq(>->tlb_invalidation.lock);
529 dma_fence_init(&fence->base, &invalidation_fence_ops,
530 >->tlb_invalidation.lock,
531 dma_fence_context_alloc(1), 1);
532 spin_unlock_irq(>->tlb_invalidation.lock);
533 INIT_LIST_HEAD(&fence->link);
534 if (stack)
535 set_bit(FENCE_STACK_BIT, &fence->base.flags);
536 else
537 dma_fence_get(&fence->base);
538 fence->gt = gt;
539}