Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright © 2023 Intel Corporation
  4 */
  5
  6#include "xe_gt_tlb_invalidation.h"
  7
  8#include "abi/guc_actions_abi.h"
  9#include "xe_device.h"
 10#include "xe_gt.h"
 11#include "xe_guc.h"
 12#include "xe_guc_ct.h"
 13#include "xe_trace.h"
 14
 15#define TLB_TIMEOUT	(HZ / 4)
 16
 17static void xe_gt_tlb_fence_timeout(struct work_struct *work)
 18{
 19	struct xe_gt *gt = container_of(work, struct xe_gt,
 20					tlb_invalidation.fence_tdr.work);
 21	struct xe_gt_tlb_invalidation_fence *fence, *next;
 22
 23	spin_lock_irq(&gt->tlb_invalidation.pending_lock);
 24	list_for_each_entry_safe(fence, next,
 25				 &gt->tlb_invalidation.pending_fences, link) {
 26		s64 since_inval_ms = ktime_ms_delta(ktime_get(),
 27						    fence->invalidation_time);
 28
 29		if (msecs_to_jiffies(since_inval_ms) < TLB_TIMEOUT)
 30			break;
 31
 32		trace_xe_gt_tlb_invalidation_fence_timeout(fence);
 33		drm_err(&gt_to_xe(gt)->drm, "gt%d: TLB invalidation fence timeout, seqno=%d recv=%d",
 34			gt->info.id, fence->seqno, gt->tlb_invalidation.seqno_recv);
 35
 36		list_del(&fence->link);
 37		fence->base.error = -ETIME;
 38		dma_fence_signal(&fence->base);
 39		dma_fence_put(&fence->base);
 40	}
 41	if (!list_empty(&gt->tlb_invalidation.pending_fences))
 42		queue_delayed_work(system_wq,
 43				   &gt->tlb_invalidation.fence_tdr,
 44				   TLB_TIMEOUT);
 45	spin_unlock_irq(&gt->tlb_invalidation.pending_lock);
 46}
 47
 48/**
 49 * xe_gt_tlb_invalidation_init - Initialize GT TLB invalidation state
 50 * @gt: graphics tile
 51 *
 52 * Initialize GT TLB invalidation state, purely software initialization, should
 53 * be called once during driver load.
 54 *
 55 * Return: 0 on success, negative error code on error.
 56 */
 57int xe_gt_tlb_invalidation_init(struct xe_gt *gt)
 58{
 59	gt->tlb_invalidation.seqno = 1;
 60	INIT_LIST_HEAD(&gt->tlb_invalidation.pending_fences);
 61	spin_lock_init(&gt->tlb_invalidation.pending_lock);
 62	spin_lock_init(&gt->tlb_invalidation.lock);
 63	gt->tlb_invalidation.fence_context = dma_fence_context_alloc(1);
 64	INIT_DELAYED_WORK(&gt->tlb_invalidation.fence_tdr,
 65			  xe_gt_tlb_fence_timeout);
 66
 67	return 0;
 68}
 69
 70static void
 71__invalidation_fence_signal(struct xe_gt_tlb_invalidation_fence *fence)
 72{
 73	trace_xe_gt_tlb_invalidation_fence_signal(fence);
 74	dma_fence_signal(&fence->base);
 75	dma_fence_put(&fence->base);
 76}
 77
 78static void
 79invalidation_fence_signal(struct xe_gt_tlb_invalidation_fence *fence)
 80{
 81	list_del(&fence->link);
 82	__invalidation_fence_signal(fence);
 83}
 84
 85/**
 86 * xe_gt_tlb_invalidation_reset - Initialize GT TLB invalidation reset
 87 * @gt: graphics tile
 88 *
 89 * Signal any pending invalidation fences, should be called during a GT reset
 90 */
 91void xe_gt_tlb_invalidation_reset(struct xe_gt *gt)
 92{
 93	struct xe_gt_tlb_invalidation_fence *fence, *next;
 94	struct xe_guc *guc = &gt->uc.guc;
 95	int pending_seqno;
 96
 97	/*
 98	 * CT channel is already disabled at this point. No new TLB requests can
 99	 * appear.
100	 */
101
102	mutex_lock(&gt->uc.guc.ct.lock);
103	spin_lock_irq(&gt->tlb_invalidation.pending_lock);
104	cancel_delayed_work(&gt->tlb_invalidation.fence_tdr);
105	/*
106	 * We might have various kworkers waiting for TLB flushes to complete
107	 * which are not tracked with an explicit TLB fence, however at this
108	 * stage that will never happen since the CT is already disabled, so
109	 * make sure we signal them here under the assumption that we have
110	 * completed a full GT reset.
111	 */
112	if (gt->tlb_invalidation.seqno == 1)
113		pending_seqno = TLB_INVALIDATION_SEQNO_MAX - 1;
114	else
115		pending_seqno = gt->tlb_invalidation.seqno - 1;
116	WRITE_ONCE(gt->tlb_invalidation.seqno_recv, pending_seqno);
117	wake_up_all(&guc->ct.wq);
118
119	list_for_each_entry_safe(fence, next,
120				 &gt->tlb_invalidation.pending_fences, link)
121		invalidation_fence_signal(fence);
122	spin_unlock_irq(&gt->tlb_invalidation.pending_lock);
123	mutex_unlock(&gt->uc.guc.ct.lock);
124}
125
126static bool tlb_invalidation_seqno_past(struct xe_gt *gt, int seqno)
127{
128	int seqno_recv = READ_ONCE(gt->tlb_invalidation.seqno_recv);
129
130	if (seqno - seqno_recv < -(TLB_INVALIDATION_SEQNO_MAX / 2))
131		return false;
132
133	if (seqno - seqno_recv > (TLB_INVALIDATION_SEQNO_MAX / 2))
134		return true;
135
136	return seqno_recv >= seqno;
137}
138
139static int send_tlb_invalidation(struct xe_guc *guc,
140				 struct xe_gt_tlb_invalidation_fence *fence,
141				 u32 *action, int len)
142{
143	struct xe_gt *gt = guc_to_gt(guc);
144	int seqno;
145	int ret;
146
147	/*
148	 * XXX: The seqno algorithm relies on TLB invalidation being processed
149	 * in order which they currently are, if that changes the algorithm will
150	 * need to be updated.
151	 */
152
153	mutex_lock(&guc->ct.lock);
154	seqno = gt->tlb_invalidation.seqno;
155	if (fence) {
156		fence->seqno = seqno;
157		trace_xe_gt_tlb_invalidation_fence_send(fence);
158	}
159	action[1] = seqno;
160	ret = xe_guc_ct_send_locked(&guc->ct, action, len,
161				    G2H_LEN_DW_TLB_INVALIDATE, 1);
162	if (!ret && fence) {
163		spin_lock_irq(&gt->tlb_invalidation.pending_lock);
164		/*
165		 * We haven't actually published the TLB fence as per
166		 * pending_fences, but in theory our seqno could have already
167		 * been written as we acquired the pending_lock. In such a case
168		 * we can just go ahead and signal the fence here.
169		 */
170		if (tlb_invalidation_seqno_past(gt, seqno)) {
171			__invalidation_fence_signal(fence);
172		} else {
173			fence->invalidation_time = ktime_get();
174			list_add_tail(&fence->link,
175				      &gt->tlb_invalidation.pending_fences);
176
177			if (list_is_singular(&gt->tlb_invalidation.pending_fences))
178				queue_delayed_work(system_wq,
179						   &gt->tlb_invalidation.fence_tdr,
180						   TLB_TIMEOUT);
181		}
182		spin_unlock_irq(&gt->tlb_invalidation.pending_lock);
183	} else if (ret < 0 && fence) {
184		__invalidation_fence_signal(fence);
185	}
186	if (!ret) {
187		gt->tlb_invalidation.seqno = (gt->tlb_invalidation.seqno + 1) %
188			TLB_INVALIDATION_SEQNO_MAX;
189		if (!gt->tlb_invalidation.seqno)
190			gt->tlb_invalidation.seqno = 1;
191		ret = seqno;
192	}
193	mutex_unlock(&guc->ct.lock);
194
195	return ret;
196}
197
198#define MAKE_INVAL_OP(type)	((type << XE_GUC_TLB_INVAL_TYPE_SHIFT) | \
199		XE_GUC_TLB_INVAL_MODE_HEAVY << XE_GUC_TLB_INVAL_MODE_SHIFT | \
200		XE_GUC_TLB_INVAL_FLUSH_CACHE)
201
202/**
203 * xe_gt_tlb_invalidation_guc - Issue a TLB invalidation on this GT for the GuC
204 * @gt: graphics tile
205 *
206 * Issue a TLB invalidation for the GuC. Completion of TLB is asynchronous and
207 * caller can use seqno + xe_gt_tlb_invalidation_wait to wait for completion.
208 *
209 * Return: Seqno which can be passed to xe_gt_tlb_invalidation_wait on success,
210 * negative error code on error.
211 */
212int xe_gt_tlb_invalidation_guc(struct xe_gt *gt)
213{
214	u32 action[] = {
215		XE_GUC_ACTION_TLB_INVALIDATION,
216		0,  /* seqno, replaced in send_tlb_invalidation */
217		MAKE_INVAL_OP(XE_GUC_TLB_INVAL_GUC),
218	};
219
220	return send_tlb_invalidation(&gt->uc.guc, NULL, action,
221				     ARRAY_SIZE(action));
222}
223
224/**
225 * xe_gt_tlb_invalidation_vma - Issue a TLB invalidation on this GT for a VMA
226 * @gt: graphics tile
227 * @fence: invalidation fence which will be signal on TLB invalidation
228 * completion, can be NULL
229 * @vma: VMA to invalidate
230 *
231 * Issue a range based TLB invalidation if supported, if not fallback to a full
232 * TLB invalidation. Completion of TLB is asynchronous and caller can either use
233 * the invalidation fence or seqno + xe_gt_tlb_invalidation_wait to wait for
234 * completion.
235 *
236 * Return: Seqno which can be passed to xe_gt_tlb_invalidation_wait on success,
237 * negative error code on error.
238 */
239int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
240			       struct xe_gt_tlb_invalidation_fence *fence,
241			       struct xe_vma *vma)
242{
243	struct xe_device *xe = gt_to_xe(gt);
244#define MAX_TLB_INVALIDATION_LEN	7
245	u32 action[MAX_TLB_INVALIDATION_LEN];
246	int len = 0;
247
248	xe_gt_assert(gt, vma);
249
250	/* Execlists not supported */
251	if (gt_to_xe(gt)->info.force_execlist) {
252		if (fence)
253			__invalidation_fence_signal(fence);
254
255		return 0;
256	}
257
258	action[len++] = XE_GUC_ACTION_TLB_INVALIDATION;
259	action[len++] = 0; /* seqno, replaced in send_tlb_invalidation */
260	if (!xe->info.has_range_tlb_invalidation) {
261		action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL);
262	} else {
263		u64 start = xe_vma_start(vma);
264		u64 length = xe_vma_size(vma);
265		u64 align, end;
266
267		if (length < SZ_4K)
268			length = SZ_4K;
269
270		/*
271		 * We need to invalidate a higher granularity if start address
272		 * is not aligned to length. When start is not aligned with
273		 * length we need to find the length large enough to create an
274		 * address mask covering the required range.
275		 */
276		align = roundup_pow_of_two(length);
277		start = ALIGN_DOWN(xe_vma_start(vma), align);
278		end = ALIGN(xe_vma_end(vma), align);
279		length = align;
280		while (start + length < end) {
281			length <<= 1;
282			start = ALIGN_DOWN(xe_vma_start(vma), length);
283		}
284
285		/*
286		 * Minimum invalidation size for a 2MB page that the hardware
287		 * expects is 16MB
288		 */
289		if (length >= SZ_2M) {
290			length = max_t(u64, SZ_16M, length);
291			start = ALIGN_DOWN(xe_vma_start(vma), length);
292		}
293
294		xe_gt_assert(gt, length >= SZ_4K);
295		xe_gt_assert(gt, is_power_of_2(length));
296		xe_gt_assert(gt, !(length & GENMASK(ilog2(SZ_16M) - 1, ilog2(SZ_2M) + 1)));
297		xe_gt_assert(gt, IS_ALIGNED(start, length));
298
299		action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_PAGE_SELECTIVE);
300		action[len++] = xe_vma_vm(vma)->usm.asid;
301		action[len++] = lower_32_bits(start);
302		action[len++] = upper_32_bits(start);
303		action[len++] = ilog2(length) - ilog2(SZ_4K);
304	}
305
306	xe_gt_assert(gt, len <= MAX_TLB_INVALIDATION_LEN);
307
308	return send_tlb_invalidation(&gt->uc.guc, fence, action, len);
309}
310
311/**
312 * xe_gt_tlb_invalidation_wait - Wait for TLB to complete
313 * @gt: graphics tile
314 * @seqno: seqno to wait which was returned from xe_gt_tlb_invalidation
315 *
316 * Wait for 200ms for a TLB invalidation to complete, in practice we always
317 * should receive the TLB invalidation within 200ms.
318 *
319 * Return: 0 on success, -ETIME on TLB invalidation timeout
320 */
321int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno)
322{
323	struct xe_device *xe = gt_to_xe(gt);
324	struct xe_guc *guc = &gt->uc.guc;
325	struct drm_printer p = drm_err_printer(__func__);
326	int ret;
327
328	/* Execlists not supported */
329	if (gt_to_xe(gt)->info.force_execlist)
330		return 0;
331
332	/*
333	 * XXX: See above, this algorithm only works if seqno are always in
334	 * order
335	 */
336	ret = wait_event_timeout(guc->ct.wq,
337				 tlb_invalidation_seqno_past(gt, seqno),
338				 TLB_TIMEOUT);
339	if (!ret) {
340		drm_err(&xe->drm, "gt%d: TLB invalidation time'd out, seqno=%d, recv=%d\n",
341			gt->info.id, seqno, gt->tlb_invalidation.seqno_recv);
342		xe_guc_ct_print(&guc->ct, &p, true);
343		return -ETIME;
344	}
345
346	return 0;
347}
348
349/**
350 * xe_guc_tlb_invalidation_done_handler - TLB invalidation done handler
351 * @guc: guc
352 * @msg: message indicating TLB invalidation done
353 * @len: length of message
354 *
355 * Parse seqno of TLB invalidation, wake any waiters for seqno, and signal any
356 * invalidation fences for seqno. Algorithm for this depends on seqno being
357 * received in-order and asserts this assumption.
358 *
359 * Return: 0 on success, -EPROTO for malformed messages.
360 */
361int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
362{
363	struct xe_gt *gt = guc_to_gt(guc);
364	struct xe_gt_tlb_invalidation_fence *fence, *next;
365	unsigned long flags;
366
367	if (unlikely(len != 1))
368		return -EPROTO;
369
370	/*
371	 * This can also be run both directly from the IRQ handler and also in
372	 * process_g2h_msg(). Only one may process any individual CT message,
373	 * however the order they are processed here could result in skipping a
374	 * seqno. To handle that we just process all the seqnos from the last
375	 * seqno_recv up to and including the one in msg[0]. The delta should be
376	 * very small so there shouldn't be much of pending_fences we actually
377	 * need to iterate over here.
378	 *
379	 * From GuC POV we expect the seqnos to always appear in-order, so if we
380	 * see something later in the timeline we can be sure that anything
381	 * appearing earlier has already signalled, just that we have yet to
382	 * officially process the CT message like if racing against
383	 * process_g2h_msg().
384	 */
385	spin_lock_irqsave(&gt->tlb_invalidation.pending_lock, flags);
386	if (tlb_invalidation_seqno_past(gt, msg[0])) {
387		spin_unlock_irqrestore(&gt->tlb_invalidation.pending_lock, flags);
388		return 0;
389	}
390
391	/*
392	 * wake_up_all() and wait_event_timeout() already have the correct
393	 * barriers.
394	 */
395	WRITE_ONCE(gt->tlb_invalidation.seqno_recv, msg[0]);
396	wake_up_all(&guc->ct.wq);
397
398	list_for_each_entry_safe(fence, next,
399				 &gt->tlb_invalidation.pending_fences, link) {
400		trace_xe_gt_tlb_invalidation_fence_recv(fence);
401
402		if (!tlb_invalidation_seqno_past(gt, fence->seqno))
403			break;
404
405		invalidation_fence_signal(fence);
406	}
407
408	if (!list_empty(&gt->tlb_invalidation.pending_fences))
409		mod_delayed_work(system_wq,
410				 &gt->tlb_invalidation.fence_tdr,
411				 TLB_TIMEOUT);
412	else
413		cancel_delayed_work(&gt->tlb_invalidation.fence_tdr);
414
415	spin_unlock_irqrestore(&gt->tlb_invalidation.pending_lock, flags);
416
417	return 0;
418}