Loading...
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "amdgpu.h"
25#include "nbio/nbio_6_1_offset.h"
26#include "nbio/nbio_6_1_sh_mask.h"
27#include "gc/gc_9_0_offset.h"
28#include "gc/gc_9_0_sh_mask.h"
29#include "soc15.h"
30#include "vega10_ih.h"
31#include "soc15_common.h"
32#include "mxgpu_ai.h"
33
34static void xgpu_ai_mailbox_send_ack(struct amdgpu_device *adev)
35{
36 WREG8(AI_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
37}
38
39static void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val)
40{
41 WREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
42}
43
44/*
45 * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
46 * RCV_MSG_VALID filed of BIF_BX_PF0_MAILBOX_CONTROL must already be set to 1
47 * by host.
48 *
49 * if called no in IRQ routine, this peek_msg cannot guaranteed to return the
50 * correct value since it doesn't return the RCV_DW0 under the case that
51 * RCV_MSG_VALID is set by host.
52 */
53static enum idh_event xgpu_ai_mailbox_peek_msg(struct amdgpu_device *adev)
54{
55 return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
56 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
57}
58
59
60static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev,
61 enum idh_event event)
62{
63 u32 reg;
64
65 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
66 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
67 if (reg != event)
68 return -ENOENT;
69
70 xgpu_ai_mailbox_send_ack(adev);
71
72 return 0;
73}
74
75static uint8_t xgpu_ai_peek_ack(struct amdgpu_device *adev) {
76 return RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
77}
78
79static int xgpu_ai_poll_ack(struct amdgpu_device *adev)
80{
81 int timeout = AI_MAILBOX_POLL_ACK_TIMEDOUT;
82 u8 reg;
83
84 do {
85 reg = RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
86 if (reg & 2)
87 return 0;
88
89 mdelay(5);
90 timeout -= 5;
91 } while (timeout > 1);
92
93 pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", AI_MAILBOX_POLL_ACK_TIMEDOUT);
94
95 return -ETIME;
96}
97
98static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event)
99{
100 int r, timeout = AI_MAILBOX_POLL_MSG_TIMEDOUT;
101
102 do {
103 r = xgpu_ai_mailbox_rcv_msg(adev, event);
104 if (!r)
105 return 0;
106
107 msleep(10);
108 timeout -= 10;
109 } while (timeout > 1);
110
111 pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
112
113 return -ETIME;
114}
115
116static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
117 enum idh_request req, u32 data1, u32 data2, u32 data3) {
118 u32 reg;
119 int r;
120 uint8_t trn;
121
122 /* IMPORTANT:
123 * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
124 * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
125 * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_ai_poll_ack()
126 * will return immediatly
127 */
128 do {
129 xgpu_ai_mailbox_set_valid(adev, false);
130 trn = xgpu_ai_peek_ack(adev);
131 if (trn) {
132 pr_err("trn=%x ACK should not assert! wait again !\n", trn);
133 msleep(1);
134 }
135 } while(trn);
136
137 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
138 mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0));
139 reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0,
140 MSGBUF_DATA, req);
141 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0),
142 reg);
143 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW1),
144 data1);
145 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW2),
146 data2);
147 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW3),
148 data3);
149
150 xgpu_ai_mailbox_set_valid(adev, true);
151
152 /* start to poll ack */
153 r = xgpu_ai_poll_ack(adev);
154 if (r)
155 pr_err("Doesn't get ack from pf, continue\n");
156
157 xgpu_ai_mailbox_set_valid(adev, false);
158}
159
160static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
161 enum idh_request req)
162{
163 int r;
164
165 xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
166
167 /* start to check msg if request is idh_req_gpu_init_access */
168 if (req == IDH_REQ_GPU_INIT_ACCESS ||
169 req == IDH_REQ_GPU_FINI_ACCESS ||
170 req == IDH_REQ_GPU_RESET_ACCESS) {
171 r = xgpu_ai_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
172 if (r) {
173 pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n");
174 return r;
175 }
176 /* Retrieve checksum from mailbox2 */
177 if (req == IDH_REQ_GPU_INIT_ACCESS) {
178 adev->virt.fw_reserve.checksum_key =
179 RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
180 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2));
181 }
182 }
183
184 return 0;
185}
186
187static int xgpu_ai_request_reset(struct amdgpu_device *adev)
188{
189 return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
190}
191
192static int xgpu_ai_request_full_gpu_access(struct amdgpu_device *adev,
193 bool init)
194{
195 enum idh_request req;
196
197 req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
198 return xgpu_ai_send_access_requests(adev, req);
199}
200
201static int xgpu_ai_release_full_gpu_access(struct amdgpu_device *adev,
202 bool init)
203{
204 enum idh_request req;
205 int r = 0;
206
207 req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
208 r = xgpu_ai_send_access_requests(adev, req);
209
210 return r;
211}
212
213static int xgpu_ai_mailbox_ack_irq(struct amdgpu_device *adev,
214 struct amdgpu_irq_src *source,
215 struct amdgpu_iv_entry *entry)
216{
217 DRM_DEBUG("get ack intr and do nothing.\n");
218 return 0;
219}
220
221static int xgpu_ai_set_mailbox_ack_irq(struct amdgpu_device *adev,
222 struct amdgpu_irq_src *source,
223 unsigned type,
224 enum amdgpu_interrupt_state state)
225{
226 u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
227
228 tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, ACK_INT_EN,
229 (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
230 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
231
232 return 0;
233}
234
235static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
236{
237 struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
238 struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
239 int timeout = AI_MAILBOX_POLL_FLR_TIMEDOUT;
240 int locked;
241
242 /* block amdgpu_gpu_recover till msg FLR COMPLETE received,
243 * otherwise the mailbox msg will be ruined/reseted by
244 * the VF FLR.
245 *
246 * we can unlock the lock_reset to allow "amdgpu_job_timedout"
247 * to run gpu_recover() after FLR_NOTIFICATION_CMPL received
248 * which means host side had finished this VF's FLR.
249 */
250 locked = mutex_trylock(&adev->lock_reset);
251 if (locked)
252 adev->in_gpu_reset = 1;
253
254 do {
255 if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
256 goto flr_done;
257
258 msleep(10);
259 timeout -= 10;
260 } while (timeout > 1);
261
262flr_done:
263 if (locked)
264 mutex_unlock(&adev->lock_reset);
265
266 /* Trigger recovery for world switch failure if no TDR */
267 if (amdgpu_lockup_timeout == 0)
268 amdgpu_device_gpu_recover(adev, NULL, true);
269}
270
271static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
272 struct amdgpu_irq_src *src,
273 unsigned type,
274 enum amdgpu_interrupt_state state)
275{
276 u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
277
278 tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, VALID_INT_EN,
279 (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
280 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
281
282 return 0;
283}
284
285static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
286 struct amdgpu_irq_src *source,
287 struct amdgpu_iv_entry *entry)
288{
289 enum idh_event event = xgpu_ai_mailbox_peek_msg(adev);
290
291 switch (event) {
292 case IDH_FLR_NOTIFICATION:
293 if (amdgpu_sriov_runtime(adev))
294 schedule_work(&adev->virt.flr_work);
295 break;
296 /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
297 * it byfar since that polling thread will handle it,
298 * other msg like flr complete is not handled here.
299 */
300 case IDH_CLR_MSG_BUF:
301 case IDH_FLR_NOTIFICATION_CMPL:
302 case IDH_READY_TO_ACCESS_GPU:
303 default:
304 break;
305 }
306
307 return 0;
308}
309
310static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_ack_irq_funcs = {
311 .set = xgpu_ai_set_mailbox_ack_irq,
312 .process = xgpu_ai_mailbox_ack_irq,
313};
314
315static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_rcv_irq_funcs = {
316 .set = xgpu_ai_set_mailbox_rcv_irq,
317 .process = xgpu_ai_mailbox_rcv_irq,
318};
319
320void xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device *adev)
321{
322 adev->virt.ack_irq.num_types = 1;
323 adev->virt.ack_irq.funcs = &xgpu_ai_mailbox_ack_irq_funcs;
324 adev->virt.rcv_irq.num_types = 1;
325 adev->virt.rcv_irq.funcs = &xgpu_ai_mailbox_rcv_irq_funcs;
326}
327
328int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev)
329{
330 int r;
331
332 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
333 if (r)
334 return r;
335
336 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
337 if (r) {
338 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
339 return r;
340 }
341
342 return 0;
343}
344
345int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev)
346{
347 int r;
348
349 r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
350 if (r)
351 return r;
352 r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
353 if (r) {
354 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
355 return r;
356 }
357
358 INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work);
359
360 return 0;
361}
362
363void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev)
364{
365 amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
366 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
367}
368
369const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
370 .req_full_gpu = xgpu_ai_request_full_gpu_access,
371 .rel_full_gpu = xgpu_ai_release_full_gpu_access,
372 .reset_gpu = xgpu_ai_request_reset,
373 .wait_reset = NULL,
374 .trans_msg = xgpu_ai_mailbox_trans_msg,
375};
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "amdgpu.h"
25#include "nbio/nbio_6_1_offset.h"
26#include "nbio/nbio_6_1_sh_mask.h"
27#include "gc/gc_9_0_offset.h"
28#include "gc/gc_9_0_sh_mask.h"
29#include "mp/mp_9_0_offset.h"
30#include "soc15.h"
31#include "vega10_ih.h"
32#include "soc15_common.h"
33#include "mxgpu_ai.h"
34
35static void xgpu_ai_mailbox_send_ack(struct amdgpu_device *adev)
36{
37 WREG8(AI_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
38}
39
40static void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val)
41{
42 WREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
43}
44
45/*
46 * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
47 * RCV_MSG_VALID filed of BIF_BX_PF0_MAILBOX_CONTROL must already be set to 1
48 * by host.
49 *
50 * if called no in IRQ routine, this peek_msg cannot guaranteed to return the
51 * correct value since it doesn't return the RCV_DW0 under the case that
52 * RCV_MSG_VALID is set by host.
53 */
54static enum idh_event xgpu_ai_mailbox_peek_msg(struct amdgpu_device *adev)
55{
56 return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
57 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
58}
59
60
61static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev,
62 enum idh_event event)
63{
64 u32 reg;
65
66 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
67 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
68 if (reg != event)
69 return -ENOENT;
70
71 xgpu_ai_mailbox_send_ack(adev);
72
73 return 0;
74}
75
76static uint8_t xgpu_ai_peek_ack(struct amdgpu_device *adev) {
77 return RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
78}
79
80static int xgpu_ai_poll_ack(struct amdgpu_device *adev)
81{
82 int timeout = AI_MAILBOX_POLL_ACK_TIMEDOUT;
83 u8 reg;
84
85 do {
86 reg = RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
87 if (reg & 2)
88 return 0;
89
90 mdelay(5);
91 timeout -= 5;
92 } while (timeout > 1);
93
94 pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", AI_MAILBOX_POLL_ACK_TIMEDOUT);
95
96 return -ETIME;
97}
98
99static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event)
100{
101 int r, timeout = AI_MAILBOX_POLL_MSG_TIMEDOUT;
102
103 do {
104 r = xgpu_ai_mailbox_rcv_msg(adev, event);
105 if (!r)
106 return 0;
107
108 msleep(10);
109 timeout -= 10;
110 } while (timeout > 1);
111
112 pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
113
114 return -ETIME;
115}
116
117static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
118 enum idh_request req, u32 data1, u32 data2, u32 data3) {
119 u32 reg;
120 int r;
121 uint8_t trn;
122
123 /* IMPORTANT:
124 * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
125 * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
126 * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_ai_poll_ack()
127 * will return immediatly
128 */
129 do {
130 xgpu_ai_mailbox_set_valid(adev, false);
131 trn = xgpu_ai_peek_ack(adev);
132 if (trn) {
133 pr_err("trn=%x ACK should not assert! wait again !\n", trn);
134 msleep(1);
135 }
136 } while(trn);
137
138 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
139 mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0));
140 reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0,
141 MSGBUF_DATA, req);
142 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0),
143 reg);
144 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW1),
145 data1);
146 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW2),
147 data2);
148 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW3),
149 data3);
150
151 xgpu_ai_mailbox_set_valid(adev, true);
152
153 /* start to poll ack */
154 r = xgpu_ai_poll_ack(adev);
155 if (r)
156 pr_err("Doesn't get ack from pf, continue\n");
157
158 xgpu_ai_mailbox_set_valid(adev, false);
159}
160
161static int xgpu_ai_get_pp_clk(struct amdgpu_device *adev, u32 type, char *buf)
162{
163 int r = 0;
164 u32 req, val, size;
165
166 if (!amdgim_is_hwperf(adev) || buf == NULL)
167 return -EBADRQC;
168
169 switch(type) {
170 case PP_SCLK:
171 req = IDH_IRQ_GET_PP_SCLK;
172 break;
173 case PP_MCLK:
174 req = IDH_IRQ_GET_PP_MCLK;
175 break;
176 default:
177 return -EBADRQC;
178 }
179
180 mutex_lock(&adev->virt.dpm_mutex);
181
182 xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
183
184 r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
185 if (!r && adev->fw_vram_usage.va != NULL) {
186 val = RREG32_NO_KIQ(
187 SOC15_REG_OFFSET(NBIO, 0,
188 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1));
189 size = strnlen((((char *)adev->virt.fw_reserve.p_pf2vf) +
190 val), PAGE_SIZE);
191
192 if (size < PAGE_SIZE)
193 strcpy(buf,((char *)adev->virt.fw_reserve.p_pf2vf + val));
194 else
195 size = 0;
196
197 r = size;
198 goto out;
199 }
200
201 r = xgpu_ai_poll_msg(adev, IDH_FAIL);
202 if(r)
203 pr_info("%s DPM request failed",
204 (type == PP_SCLK)? "SCLK" : "MCLK");
205
206out:
207 mutex_unlock(&adev->virt.dpm_mutex);
208 return r;
209}
210
211static int xgpu_ai_force_dpm_level(struct amdgpu_device *adev, u32 level)
212{
213 int r = 0;
214 u32 req = IDH_IRQ_FORCE_DPM_LEVEL;
215
216 if (!amdgim_is_hwperf(adev))
217 return -EBADRQC;
218
219 mutex_lock(&adev->virt.dpm_mutex);
220 xgpu_ai_mailbox_trans_msg(adev, req, level, 0, 0);
221
222 r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
223 if (!r)
224 goto out;
225
226 r = xgpu_ai_poll_msg(adev, IDH_FAIL);
227 if (!r)
228 pr_info("DPM request failed");
229 else
230 pr_info("Mailbox is broken");
231
232out:
233 mutex_unlock(&adev->virt.dpm_mutex);
234 return r;
235}
236
237static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
238 enum idh_request req)
239{
240 int r;
241
242 xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
243
244 /* start to check msg if request is idh_req_gpu_init_access */
245 if (req == IDH_REQ_GPU_INIT_ACCESS ||
246 req == IDH_REQ_GPU_FINI_ACCESS ||
247 req == IDH_REQ_GPU_RESET_ACCESS) {
248 r = xgpu_ai_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
249 if (r) {
250 pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n");
251 return r;
252 }
253 /* Retrieve checksum from mailbox2 */
254 if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
255 adev->virt.fw_reserve.checksum_key =
256 RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
257 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2));
258 }
259 }
260
261 return 0;
262}
263
264static int xgpu_ai_request_reset(struct amdgpu_device *adev)
265{
266 return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
267}
268
269static int xgpu_ai_request_full_gpu_access(struct amdgpu_device *adev,
270 bool init)
271{
272 enum idh_request req;
273
274 req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
275 return xgpu_ai_send_access_requests(adev, req);
276}
277
278static int xgpu_ai_release_full_gpu_access(struct amdgpu_device *adev,
279 bool init)
280{
281 enum idh_request req;
282 int r = 0;
283
284 req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
285 r = xgpu_ai_send_access_requests(adev, req);
286
287 return r;
288}
289
290static int xgpu_ai_mailbox_ack_irq(struct amdgpu_device *adev,
291 struct amdgpu_irq_src *source,
292 struct amdgpu_iv_entry *entry)
293{
294 DRM_DEBUG("get ack intr and do nothing.\n");
295 return 0;
296}
297
298static int xgpu_ai_set_mailbox_ack_irq(struct amdgpu_device *adev,
299 struct amdgpu_irq_src *source,
300 unsigned type,
301 enum amdgpu_interrupt_state state)
302{
303 u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
304
305 tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, ACK_INT_EN,
306 (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
307 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
308
309 return 0;
310}
311
312static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
313{
314 struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
315 struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
316 int timeout = AI_MAILBOX_POLL_FLR_TIMEDOUT;
317 int locked;
318
319 /* block amdgpu_gpu_recover till msg FLR COMPLETE received,
320 * otherwise the mailbox msg will be ruined/reseted by
321 * the VF FLR.
322 *
323 * we can unlock the lock_reset to allow "amdgpu_job_timedout"
324 * to run gpu_recover() after FLR_NOTIFICATION_CMPL received
325 * which means host side had finished this VF's FLR.
326 */
327 locked = mutex_trylock(&adev->lock_reset);
328 if (locked)
329 adev->in_gpu_reset = 1;
330
331 do {
332 if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
333 goto flr_done;
334
335 msleep(10);
336 timeout -= 10;
337 } while (timeout > 1);
338
339flr_done:
340 if (locked) {
341 adev->in_gpu_reset = 0;
342 mutex_unlock(&adev->lock_reset);
343 }
344
345 /* Trigger recovery for world switch failure if no TDR */
346 if (amdgpu_device_should_recover_gpu(adev)
347 && adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT)
348 amdgpu_device_gpu_recover(adev, NULL);
349}
350
351static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
352 struct amdgpu_irq_src *src,
353 unsigned type,
354 enum amdgpu_interrupt_state state)
355{
356 u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
357
358 tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, VALID_INT_EN,
359 (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
360 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
361
362 return 0;
363}
364
365static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
366 struct amdgpu_irq_src *source,
367 struct amdgpu_iv_entry *entry)
368{
369 enum idh_event event = xgpu_ai_mailbox_peek_msg(adev);
370
371 switch (event) {
372 case IDH_FLR_NOTIFICATION:
373 if (amdgpu_sriov_runtime(adev))
374 schedule_work(&adev->virt.flr_work);
375 break;
376 case IDH_QUERY_ALIVE:
377 xgpu_ai_mailbox_send_ack(adev);
378 break;
379 /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
380 * it byfar since that polling thread will handle it,
381 * other msg like flr complete is not handled here.
382 */
383 case IDH_CLR_MSG_BUF:
384 case IDH_FLR_NOTIFICATION_CMPL:
385 case IDH_READY_TO_ACCESS_GPU:
386 default:
387 break;
388 }
389
390 return 0;
391}
392
393static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_ack_irq_funcs = {
394 .set = xgpu_ai_set_mailbox_ack_irq,
395 .process = xgpu_ai_mailbox_ack_irq,
396};
397
398static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_rcv_irq_funcs = {
399 .set = xgpu_ai_set_mailbox_rcv_irq,
400 .process = xgpu_ai_mailbox_rcv_irq,
401};
402
403void xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device *adev)
404{
405 adev->virt.ack_irq.num_types = 1;
406 adev->virt.ack_irq.funcs = &xgpu_ai_mailbox_ack_irq_funcs;
407 adev->virt.rcv_irq.num_types = 1;
408 adev->virt.rcv_irq.funcs = &xgpu_ai_mailbox_rcv_irq_funcs;
409}
410
411int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev)
412{
413 int r;
414
415 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
416 if (r)
417 return r;
418
419 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
420 if (r) {
421 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
422 return r;
423 }
424
425 return 0;
426}
427
428int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev)
429{
430 int r;
431
432 r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
433 if (r)
434 return r;
435 r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
436 if (r) {
437 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
438 return r;
439 }
440
441 INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work);
442
443 return 0;
444}
445
446void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev)
447{
448 amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
449 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
450}
451
452const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
453 .req_full_gpu = xgpu_ai_request_full_gpu_access,
454 .rel_full_gpu = xgpu_ai_release_full_gpu_access,
455 .reset_gpu = xgpu_ai_request_reset,
456 .wait_reset = NULL,
457 .trans_msg = xgpu_ai_mailbox_trans_msg,
458 .get_pp_clk = xgpu_ai_get_pp_clk,
459 .force_dpm_level = xgpu_ai_force_dpm_level,
460};