Loading...
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2022 Intel Corporation
4 */
5
6#include "xe_bb.h"
7
8#include "instructions/xe_mi_commands.h"
9#include "regs/xe_gpu_commands.h"
10#include "xe_device.h"
11#include "xe_exec_queue_types.h"
12#include "xe_gt.h"
13#include "xe_hw_fence.h"
14#include "xe_sa.h"
15#include "xe_sched_job.h"
16#include "xe_vm_types.h"
17
18static int bb_prefetch(struct xe_gt *gt)
19{
20 struct xe_device *xe = gt_to_xe(gt);
21
22 if (GRAPHICS_VERx100(xe) >= 1250 && !xe_gt_is_media_type(gt))
23 /*
24 * RCS and CCS require 1K, although other engines would be
25 * okay with 512.
26 */
27 return SZ_1K;
28 else
29 return SZ_512;
30}
31
32struct xe_bb *xe_bb_new(struct xe_gt *gt, u32 dwords, bool usm)
33{
34 struct xe_tile *tile = gt_to_tile(gt);
35 struct xe_bb *bb = kmalloc(sizeof(*bb), GFP_KERNEL);
36 int err;
37
38 if (!bb)
39 return ERR_PTR(-ENOMEM);
40
41 /*
42 * We need to allocate space for the requested number of dwords,
43 * one additional MI_BATCH_BUFFER_END dword, and additional buffer
44 * space to accomodate the platform-specific hardware prefetch
45 * requirements.
46 */
47 bb->bo = xe_sa_bo_new(!usm ? tile->mem.kernel_bb_pool : gt->usm.bb_pool,
48 4 * (dwords + 1) + bb_prefetch(gt));
49 if (IS_ERR(bb->bo)) {
50 err = PTR_ERR(bb->bo);
51 goto err;
52 }
53
54 bb->cs = xe_sa_bo_cpu_addr(bb->bo);
55 bb->len = 0;
56
57 return bb;
58err:
59 kfree(bb);
60 return ERR_PTR(err);
61}
62
63static struct xe_sched_job *
64__xe_bb_create_job(struct xe_exec_queue *q, struct xe_bb *bb, u64 *addr)
65{
66 u32 size = drm_suballoc_size(bb->bo);
67
68 bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
69
70 xe_gt_assert(q->gt, bb->len * 4 + bb_prefetch(q->gt) <= size);
71
72 xe_sa_bo_flush_write(bb->bo);
73
74 return xe_sched_job_create(q, addr);
75}
76
77struct xe_sched_job *xe_bb_create_migration_job(struct xe_exec_queue *q,
78 struct xe_bb *bb,
79 u64 batch_base_ofs,
80 u32 second_idx)
81{
82 u64 addr[2] = {
83 batch_base_ofs + drm_suballoc_soffset(bb->bo),
84 batch_base_ofs + drm_suballoc_soffset(bb->bo) +
85 4 * second_idx,
86 };
87
88 xe_gt_assert(q->gt, second_idx <= bb->len);
89 xe_gt_assert(q->gt, q->vm->flags & XE_VM_FLAG_MIGRATION);
90
91 return __xe_bb_create_job(q, bb, addr);
92}
93
94struct xe_sched_job *xe_bb_create_job(struct xe_exec_queue *q,
95 struct xe_bb *bb)
96{
97 u64 addr = xe_sa_bo_gpu_addr(bb->bo);
98
99 xe_gt_assert(q->gt, !(q->vm && q->vm->flags & XE_VM_FLAG_MIGRATION));
100 return __xe_bb_create_job(q, bb, &addr);
101}
102
103void xe_bb_free(struct xe_bb *bb, struct dma_fence *fence)
104{
105 if (!bb)
106 return;
107
108 xe_sa_bo_free(bb->bo, fence);
109 kfree(bb);
110}