Loading...
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2023 Intel Corporation
4 */
5
6#include "xe_gsc_submit.h"
7
8#include <linux/poison.h>
9
10#include "abi/gsc_command_header_abi.h"
11#include "xe_assert.h"
12#include "xe_bb.h"
13#include "xe_exec_queue.h"
14#include "xe_gt_printk.h"
15#include "xe_gt_types.h"
16#include "xe_map.h"
17#include "xe_sched_job.h"
18#include "instructions/xe_gsc_commands.h"
19#include "regs/xe_gsc_regs.h"
20
21#define GSC_HDR_SIZE (sizeof(struct intel_gsc_mtl_header)) /* shorthand define */
22
23#define mtl_gsc_header_wr(xe_, map_, offset_, field_, val_) \
24 xe_map_wr_field(xe_, map_, offset_, struct intel_gsc_mtl_header, field_, val_)
25
26#define mtl_gsc_header_rd(xe_, map_, offset_, field_) \
27 xe_map_rd_field(xe_, map_, offset_, struct intel_gsc_mtl_header, field_)
28
29/*
30 * GSC FW allows us to define the host_session_handle as we see fit, as long
31 * as we use unique identifier for each user, with handle 0 being reserved for
32 * kernel usage.
33 * To be able to differentiate which client subsystem owns the given session, we
34 * include the client id in the top 8 bits of the handle.
35 */
36#define HOST_SESSION_CLIENT_MASK GENMASK_ULL(63, 56)
37
38static struct xe_gt *
39gsc_to_gt(struct xe_gsc *gsc)
40{
41 return container_of(gsc, struct xe_gt, uc.gsc);
42}
43
44/**
45 * xe_gsc_create_host_session_id - Creates a random 64 bit host_session id with
46 * bits 56-63 masked.
47 *
48 * Returns: random host_session_id which can be used to send messages to gsc cs
49 */
50u64 xe_gsc_create_host_session_id(void)
51{
52 u64 host_session_id;
53
54 get_random_bytes(&host_session_id, sizeof(u64));
55 host_session_id &= ~HOST_SESSION_CLIENT_MASK;
56 return host_session_id;
57}
58
59/**
60 * xe_gsc_emit_header - write the MTL GSC header in memory
61 * @xe: the Xe device
62 * @map: the iosys map to write to
63 * @offset: offset from the start of the map at which to write the header
64 * @heci_client_id: client id identifying the type of command (see abi for values)
65 * @host_session_id: host session ID of the caller
66 * @payload_size: size of the payload that follows the header
67 *
68 * Returns: offset memory location following the header
69 */
70u32 xe_gsc_emit_header(struct xe_device *xe, struct iosys_map *map, u32 offset,
71 u8 heci_client_id, u64 host_session_id, u32 payload_size)
72{
73 xe_assert(xe, !(host_session_id & HOST_SESSION_CLIENT_MASK));
74
75 if (host_session_id)
76 host_session_id |= FIELD_PREP(HOST_SESSION_CLIENT_MASK, heci_client_id);
77
78 xe_map_memset(xe, map, offset, 0, GSC_HDR_SIZE);
79
80 mtl_gsc_header_wr(xe, map, offset, validity_marker, GSC_HECI_VALIDITY_MARKER);
81 mtl_gsc_header_wr(xe, map, offset, heci_client_id, heci_client_id);
82 mtl_gsc_header_wr(xe, map, offset, host_session_handle, host_session_id);
83 mtl_gsc_header_wr(xe, map, offset, header_version, MTL_GSC_HEADER_VERSION);
84 mtl_gsc_header_wr(xe, map, offset, message_size, payload_size + GSC_HDR_SIZE);
85
86 return offset + GSC_HDR_SIZE;
87};
88
89/**
90 * xe_gsc_poison_header - poison the MTL GSC header in memory
91 * @xe: the Xe device
92 * @map: the iosys map to write to
93 * @offset: offset from the start of the map at which the header resides
94 */
95void xe_gsc_poison_header(struct xe_device *xe, struct iosys_map *map, u32 offset)
96{
97 xe_map_memset(xe, map, offset, POISON_FREE, GSC_HDR_SIZE);
98};
99
100/**
101 * xe_gsc_check_and_update_pending - check the pending bit and update the input
102 * header with the retry handle from the output header
103 * @xe: the Xe device
104 * @in: the iosys map containing the input buffer
105 * @offset_in: offset within the iosys at which the input buffer is located
106 * @out: the iosys map containing the output buffer
107 * @offset_out: offset within the iosys at which the output buffer is located
108 *
109 * Returns: true if the pending bit was set, false otherwise
110 */
111bool xe_gsc_check_and_update_pending(struct xe_device *xe,
112 struct iosys_map *in, u32 offset_in,
113 struct iosys_map *out, u32 offset_out)
114{
115 if (mtl_gsc_header_rd(xe, out, offset_out, flags) & GSC_OUTFLAG_MSG_PENDING) {
116 u64 handle = mtl_gsc_header_rd(xe, out, offset_out, gsc_message_handle);
117
118 mtl_gsc_header_wr(xe, in, offset_in, gsc_message_handle, handle);
119
120 return true;
121 }
122
123 return false;
124}
125
126/**
127 * xe_gsc_read_out_header - reads and validates the output header and returns
128 * the offset of the reply following the header
129 * @xe: the Xe device
130 * @map: the iosys map containing the output buffer
131 * @offset: offset within the iosys at which the output buffer is located
132 * @min_payload_size: minimum size of the message excluding the gsc header
133 * @payload_offset: optional pointer to be set to the payload offset
134 *
135 * Returns: -errno value on failure, 0 otherwise
136 */
137int xe_gsc_read_out_header(struct xe_device *xe,
138 struct iosys_map *map, u32 offset,
139 u32 min_payload_size,
140 u32 *payload_offset)
141{
142 u32 marker = mtl_gsc_header_rd(xe, map, offset, validity_marker);
143 u32 size = mtl_gsc_header_rd(xe, map, offset, message_size);
144 u32 status = mtl_gsc_header_rd(xe, map, offset, status);
145 u32 payload_size = size - GSC_HDR_SIZE;
146
147 if (marker != GSC_HECI_VALIDITY_MARKER)
148 return -EPROTO;
149
150 if (status != 0) {
151 drm_err(&xe->drm, "GSC header readout indicates error: %d\n",
152 status);
153 return -EINVAL;
154 }
155
156 if (size < GSC_HDR_SIZE || payload_size < min_payload_size)
157 return -ENODATA;
158
159 if (payload_offset)
160 *payload_offset = offset + GSC_HDR_SIZE;
161
162 return 0;
163}
164
165/**
166 * xe_gsc_pkt_submit_kernel - submit a kernel heci pkt to the GSC
167 * @gsc: the GSC uC
168 * @addr_in: GGTT address of the message to send to the GSC
169 * @size_in: size of the message to send to the GSC
170 * @addr_out: GGTT address for the GSC to write the reply to
171 * @size_out: size of the memory reserved for the reply
172 */
173int xe_gsc_pkt_submit_kernel(struct xe_gsc *gsc, u64 addr_in, u32 size_in,
174 u64 addr_out, u32 size_out)
175{
176 struct xe_gt *gt = gsc_to_gt(gsc);
177 struct xe_bb *bb;
178 struct xe_sched_job *job;
179 struct dma_fence *fence;
180 long timeout;
181
182 if (size_in < GSC_HDR_SIZE)
183 return -ENODATA;
184
185 if (size_out < GSC_HDR_SIZE)
186 return -ENOMEM;
187
188 bb = xe_bb_new(gt, 8, false);
189 if (IS_ERR(bb))
190 return PTR_ERR(bb);
191
192 bb->cs[bb->len++] = GSC_HECI_CMD_PKT;
193 bb->cs[bb->len++] = lower_32_bits(addr_in);
194 bb->cs[bb->len++] = upper_32_bits(addr_in);
195 bb->cs[bb->len++] = size_in;
196 bb->cs[bb->len++] = lower_32_bits(addr_out);
197 bb->cs[bb->len++] = upper_32_bits(addr_out);
198 bb->cs[bb->len++] = size_out;
199 bb->cs[bb->len++] = 0;
200
201 job = xe_bb_create_job(gsc->q, bb);
202 if (IS_ERR(job)) {
203 xe_bb_free(bb, NULL);
204 return PTR_ERR(job);
205 }
206
207 xe_sched_job_arm(job);
208 fence = dma_fence_get(&job->drm.s_fence->finished);
209 xe_sched_job_push(job);
210
211 timeout = dma_fence_wait_timeout(fence, false, HZ);
212 dma_fence_put(fence);
213 xe_bb_free(bb, NULL);
214 if (timeout < 0)
215 return timeout;
216 else if (!timeout)
217 return -ETIME;
218
219 return 0;
220}
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2023 Intel Corporation
4 */
5
6#include "xe_gsc_submit.h"
7
8#include "abi/gsc_command_header_abi.h"
9#include "xe_bb.h"
10#include "xe_exec_queue.h"
11#include "xe_gt_printk.h"
12#include "xe_gt_types.h"
13#include "xe_map.h"
14#include "xe_sched_job.h"
15#include "instructions/xe_gsc_commands.h"
16#include "regs/xe_gsc_regs.h"
17
18#define GSC_HDR_SIZE (sizeof(struct intel_gsc_mtl_header)) /* shorthand define */
19
20#define mtl_gsc_header_wr(xe_, map_, offset_, field_, val_) \
21 xe_map_wr_field(xe_, map_, offset_, struct intel_gsc_mtl_header, field_, val_)
22
23#define mtl_gsc_header_rd(xe_, map_, offset_, field_) \
24 xe_map_rd_field(xe_, map_, offset_, struct intel_gsc_mtl_header, field_)
25
26/*
27 * GSC FW allows us to define the host_session_handle as we see fit, as long
28 * as we use unique identifier for each user, with handle 0 being reserved for
29 * kernel usage.
30 * To be able to differentiate which client subsystem owns the given session, we
31 * include the client id in the top 8 bits of the handle.
32 */
33#define HOST_SESSION_CLIENT_MASK GENMASK_ULL(63, 56)
34
35static struct xe_gt *
36gsc_to_gt(struct xe_gsc *gsc)
37{
38 return container_of(gsc, struct xe_gt, uc.gsc);
39}
40
41/**
42 * xe_gsc_emit_header - write the MTL GSC header in memory
43 * @xe: the Xe device
44 * @map: the iosys map to write to
45 * @offset: offset from the start of the map at which to write the header
46 * @heci_client_id: client id identifying the type of command (see abi for values)
47 * @host_session_id: host session ID of the caller
48 * @payload_size: size of the payload that follows the header
49 *
50 * Returns: offset memory location following the header
51 */
52u32 xe_gsc_emit_header(struct xe_device *xe, struct iosys_map *map, u32 offset,
53 u8 heci_client_id, u64 host_session_id, u32 payload_size)
54{
55 xe_assert(xe, !(host_session_id & HOST_SESSION_CLIENT_MASK));
56
57 if (host_session_id)
58 host_session_id |= FIELD_PREP(HOST_SESSION_CLIENT_MASK, heci_client_id);
59
60 xe_map_memset(xe, map, offset, 0, GSC_HDR_SIZE);
61
62 mtl_gsc_header_wr(xe, map, offset, validity_marker, GSC_HECI_VALIDITY_MARKER);
63 mtl_gsc_header_wr(xe, map, offset, heci_client_id, heci_client_id);
64 mtl_gsc_header_wr(xe, map, offset, host_session_handle, host_session_id);
65 mtl_gsc_header_wr(xe, map, offset, header_version, MTL_GSC_HEADER_VERSION);
66 mtl_gsc_header_wr(xe, map, offset, message_size, payload_size + GSC_HDR_SIZE);
67
68 return offset + GSC_HDR_SIZE;
69};
70
71/**
72 * xe_gsc_check_and_update_pending - check the pending bit and update the input
73 * header with the retry handle from the output header
74 * @xe: the Xe device
75 * @in: the iosys map containing the input buffer
76 * @offset_in: offset within the iosys at which the input buffer is located
77 * @out: the iosys map containing the output buffer
78 * @offset_out: offset within the iosys at which the output buffer is located
79 *
80 * Returns: true if the pending bit was set, false otherwise
81 */
82bool xe_gsc_check_and_update_pending(struct xe_device *xe,
83 struct iosys_map *in, u32 offset_in,
84 struct iosys_map *out, u32 offset_out)
85{
86 if (mtl_gsc_header_rd(xe, out, offset_out, flags) & GSC_OUTFLAG_MSG_PENDING) {
87 u64 handle = mtl_gsc_header_rd(xe, out, offset_out, gsc_message_handle);
88
89 mtl_gsc_header_wr(xe, in, offset_in, gsc_message_handle, handle);
90
91 return true;
92 }
93
94 return false;
95}
96
97/**
98 * xe_gsc_read_out_header - reads and validates the output header and returns
99 * the offset of the reply following the header
100 * @xe: the Xe device
101 * @map: the iosys map containing the output buffer
102 * @offset: offset within the iosys at which the output buffer is located
103 * @min_payload_size: minimum size of the message excluding the gsc header
104 * @payload_offset: optional pointer to be set to the payload offset
105 *
106 * Returns: -errno value on failure, 0 otherwise
107 */
108int xe_gsc_read_out_header(struct xe_device *xe,
109 struct iosys_map *map, u32 offset,
110 u32 min_payload_size,
111 u32 *payload_offset)
112{
113 u32 marker = mtl_gsc_header_rd(xe, map, offset, validity_marker);
114 u32 size = mtl_gsc_header_rd(xe, map, offset, message_size);
115 u32 payload_size = size - GSC_HDR_SIZE;
116
117 if (marker != GSC_HECI_VALIDITY_MARKER)
118 return -EPROTO;
119
120 if (size < GSC_HDR_SIZE || payload_size < min_payload_size)
121 return -ENODATA;
122
123 if (payload_offset)
124 *payload_offset = offset + GSC_HDR_SIZE;
125
126 return 0;
127}
128
129/**
130 * xe_gsc_pkt_submit_kernel - submit a kernel heci pkt to the GSC
131 * @gsc: the GSC uC
132 * @addr_in: GGTT address of the message to send to the GSC
133 * @size_in: size of the message to send to the GSC
134 * @addr_out: GGTT address for the GSC to write the reply to
135 * @size_out: size of the memory reserved for the reply
136 */
137int xe_gsc_pkt_submit_kernel(struct xe_gsc *gsc, u64 addr_in, u32 size_in,
138 u64 addr_out, u32 size_out)
139{
140 struct xe_gt *gt = gsc_to_gt(gsc);
141 struct xe_bb *bb;
142 struct xe_sched_job *job;
143 struct dma_fence *fence;
144 long timeout;
145
146 if (size_in < GSC_HDR_SIZE)
147 return -ENODATA;
148
149 if (size_out < GSC_HDR_SIZE)
150 return -ENOMEM;
151
152 bb = xe_bb_new(gt, 8, false);
153 if (IS_ERR(bb))
154 return PTR_ERR(bb);
155
156 bb->cs[bb->len++] = GSC_HECI_CMD_PKT;
157 bb->cs[bb->len++] = lower_32_bits(addr_in);
158 bb->cs[bb->len++] = upper_32_bits(addr_in);
159 bb->cs[bb->len++] = size_in;
160 bb->cs[bb->len++] = lower_32_bits(addr_out);
161 bb->cs[bb->len++] = upper_32_bits(addr_out);
162 bb->cs[bb->len++] = size_out;
163 bb->cs[bb->len++] = 0;
164
165 job = xe_bb_create_job(gsc->q, bb);
166 if (IS_ERR(job)) {
167 xe_bb_free(bb, NULL);
168 return PTR_ERR(job);
169 }
170
171 xe_sched_job_arm(job);
172 fence = dma_fence_get(&job->drm.s_fence->finished);
173 xe_sched_job_push(job);
174
175 timeout = dma_fence_wait_timeout(fence, false, HZ);
176 dma_fence_put(fence);
177 xe_bb_free(bb, NULL);
178 if (timeout < 0)
179 return timeout;
180 else if (!timeout)
181 return -ETIME;
182
183 return 0;
184}