Loading...
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "amdgpu.h"
25#include "nbio/nbio_2_3_offset.h"
26#include "nbio/nbio_2_3_sh_mask.h"
27#include "gc/gc_10_1_0_offset.h"
28#include "gc/gc_10_1_0_sh_mask.h"
29#include "soc15.h"
30#include "navi10_ih.h"
31#include "soc15_common.h"
32#include "mxgpu_nv.h"
33
34static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev)
35{
36 WREG8(NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
37}
38
39static void xgpu_nv_mailbox_set_valid(struct amdgpu_device *adev, bool val)
40{
41 WREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
42}
43
44/*
45 * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
46 * RCV_MSG_VALID filed of BIF_BX_PF_MAILBOX_CONTROL must already be set to 1
47 * by host.
48 *
49 * if called no in IRQ routine, this peek_msg cannot guaranteed to return the
50 * correct value since it doesn't return the RCV_DW0 under the case that
51 * RCV_MSG_VALID is set by host.
52 */
53static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev)
54{
55 return RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
56}
57
58
59static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev,
60 enum idh_event event)
61{
62 u32 reg;
63
64 reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
65 if (reg != event)
66 return -ENOENT;
67
68 xgpu_nv_mailbox_send_ack(adev);
69
70 return 0;
71}
72
73static uint8_t xgpu_nv_peek_ack(struct amdgpu_device *adev)
74{
75 return RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
76}
77
78static int xgpu_nv_poll_ack(struct amdgpu_device *adev)
79{
80 int timeout = NV_MAILBOX_POLL_ACK_TIMEDOUT;
81 u8 reg;
82
83 do {
84 reg = RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
85 if (reg & 2)
86 return 0;
87
88 mdelay(5);
89 timeout -= 5;
90 } while (timeout > 1);
91
92 pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", NV_MAILBOX_POLL_ACK_TIMEDOUT);
93
94 return -ETIME;
95}
96
97static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
98{
99 int r, timeout = NV_MAILBOX_POLL_MSG_TIMEDOUT;
100
101 do {
102 r = xgpu_nv_mailbox_rcv_msg(adev, event);
103 if (!r)
104 return 0;
105
106 msleep(10);
107 timeout -= 10;
108 } while (timeout > 1);
109
110
111 return -ETIME;
112}
113
114static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
115 enum idh_request req, u32 data1, u32 data2, u32 data3)
116{
117 int r;
118 uint8_t trn;
119
120 /* IMPORTANT:
121 * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
122 * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
123 * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_nv_poll_ack()
124 * will return immediatly
125 */
126 do {
127 xgpu_nv_mailbox_set_valid(adev, false);
128 trn = xgpu_nv_peek_ack(adev);
129 if (trn) {
130 pr_err("trn=%x ACK should not assert! wait again !\n", trn);
131 msleep(1);
132 }
133 } while (trn);
134
135 WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0, req);
136 WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW1, data1);
137 WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW2, data2);
138 WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW3, data3);
139 xgpu_nv_mailbox_set_valid(adev, true);
140
141 /* start to poll ack */
142 r = xgpu_nv_poll_ack(adev);
143 if (r)
144 pr_err("Doesn't get ack from pf, continue\n");
145
146 xgpu_nv_mailbox_set_valid(adev, false);
147}
148
149static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
150 enum idh_request req)
151{
152 int r;
153 enum idh_event event = -1;
154
155 xgpu_nv_mailbox_trans_msg(adev, req, 0, 0, 0);
156
157 switch (req) {
158 case IDH_REQ_GPU_INIT_ACCESS:
159 case IDH_REQ_GPU_FINI_ACCESS:
160 case IDH_REQ_GPU_RESET_ACCESS:
161 event = IDH_READY_TO_ACCESS_GPU;
162 break;
163 case IDH_REQ_GPU_INIT_DATA:
164 event = IDH_REQ_GPU_INIT_DATA_READY;
165 break;
166 default:
167 break;
168 }
169
170 if (event != -1) {
171 r = xgpu_nv_poll_msg(adev, event);
172 if (r) {
173 if (req != IDH_REQ_GPU_INIT_DATA) {
174 pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
175 return r;
176 }
177 else /* host doesn't support REQ_GPU_INIT_DATA handshake */
178 adev->virt.req_init_data_ver = 0;
179 } else {
180 if (req == IDH_REQ_GPU_INIT_DATA)
181 {
182 adev->virt.req_init_data_ver =
183 RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1);
184
185 /* assume V1 in case host doesn't set version number */
186 if (adev->virt.req_init_data_ver < 1)
187 adev->virt.req_init_data_ver = 1;
188 }
189 }
190
191 /* Retrieve checksum from mailbox2 */
192 if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
193 adev->virt.fw_reserve.checksum_key =
194 RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW2);
195 }
196 }
197
198 return 0;
199}
200
201static int xgpu_nv_request_reset(struct amdgpu_device *adev)
202{
203 return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
204}
205
206static int xgpu_nv_request_full_gpu_access(struct amdgpu_device *adev,
207 bool init)
208{
209 enum idh_request req;
210
211 req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
212 return xgpu_nv_send_access_requests(adev, req);
213}
214
215static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev,
216 bool init)
217{
218 enum idh_request req;
219 int r = 0;
220
221 req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
222 r = xgpu_nv_send_access_requests(adev, req);
223
224 return r;
225}
226
227static int xgpu_nv_request_init_data(struct amdgpu_device *adev)
228{
229 return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
230}
231
232static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev,
233 struct amdgpu_irq_src *source,
234 struct amdgpu_iv_entry *entry)
235{
236 DRM_DEBUG("get ack intr and do nothing.\n");
237 return 0;
238}
239
240static int xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device *adev,
241 struct amdgpu_irq_src *source,
242 unsigned type,
243 enum amdgpu_interrupt_state state)
244{
245 u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
246
247 if (state == AMDGPU_IRQ_STATE_ENABLE)
248 tmp |= 2;
249 else
250 tmp &= ~2;
251
252 WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
253
254 return 0;
255}
256
257static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
258{
259 struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
260 struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
261 int timeout = NV_MAILBOX_POLL_FLR_TIMEDOUT;
262 int locked;
263
264 /* block amdgpu_gpu_recover till msg FLR COMPLETE received,
265 * otherwise the mailbox msg will be ruined/reseted by
266 * the VF FLR.
267 *
268 * we can unlock the lock_reset to allow "amdgpu_job_timedout"
269 * to run gpu_recover() after FLR_NOTIFICATION_CMPL received
270 * which means host side had finished this VF's FLR.
271 */
272 locked = mutex_trylock(&adev->lock_reset);
273 if (locked)
274 adev->in_gpu_reset = true;
275
276 do {
277 if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
278 goto flr_done;
279
280 msleep(10);
281 timeout -= 10;
282 } while (timeout > 1);
283
284flr_done:
285 if (locked) {
286 adev->in_gpu_reset = false;
287 mutex_unlock(&adev->lock_reset);
288 }
289
290 /* Trigger recovery for world switch failure if no TDR */
291 if (amdgpu_device_should_recover_gpu(adev)
292 && (adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT ||
293 adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
294 adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
295 adev->video_timeout == MAX_SCHEDULE_TIMEOUT))
296 amdgpu_device_gpu_recover(adev, NULL);
297}
298
299static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
300 struct amdgpu_irq_src *src,
301 unsigned type,
302 enum amdgpu_interrupt_state state)
303{
304 u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
305
306 if (state == AMDGPU_IRQ_STATE_ENABLE)
307 tmp |= 1;
308 else
309 tmp &= ~1;
310
311 WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
312
313 return 0;
314}
315
316static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev,
317 struct amdgpu_irq_src *source,
318 struct amdgpu_iv_entry *entry)
319{
320 enum idh_event event = xgpu_nv_mailbox_peek_msg(adev);
321
322 switch (event) {
323 case IDH_FLR_NOTIFICATION:
324 if (amdgpu_sriov_runtime(adev))
325 schedule_work(&adev->virt.flr_work);
326 break;
327 /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
328 * it byfar since that polling thread will handle it,
329 * other msg like flr complete is not handled here.
330 */
331 case IDH_CLR_MSG_BUF:
332 case IDH_FLR_NOTIFICATION_CMPL:
333 case IDH_READY_TO_ACCESS_GPU:
334 default:
335 break;
336 }
337
338 return 0;
339}
340
341static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_ack_irq_funcs = {
342 .set = xgpu_nv_set_mailbox_ack_irq,
343 .process = xgpu_nv_mailbox_ack_irq,
344};
345
346static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_rcv_irq_funcs = {
347 .set = xgpu_nv_set_mailbox_rcv_irq,
348 .process = xgpu_nv_mailbox_rcv_irq,
349};
350
351void xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device *adev)
352{
353 adev->virt.ack_irq.num_types = 1;
354 adev->virt.ack_irq.funcs = &xgpu_nv_mailbox_ack_irq_funcs;
355 adev->virt.rcv_irq.num_types = 1;
356 adev->virt.rcv_irq.funcs = &xgpu_nv_mailbox_rcv_irq_funcs;
357}
358
359int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev)
360{
361 int r;
362
363 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
364 if (r)
365 return r;
366
367 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
368 if (r) {
369 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
370 return r;
371 }
372
373 return 0;
374}
375
376int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev)
377{
378 int r;
379
380 r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
381 if (r)
382 return r;
383 r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
384 if (r) {
385 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
386 return r;
387 }
388
389 INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work);
390
391 return 0;
392}
393
394void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev)
395{
396 amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
397 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
398}
399
400const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
401 .req_full_gpu = xgpu_nv_request_full_gpu_access,
402 .rel_full_gpu = xgpu_nv_release_full_gpu_access,
403 .req_init_data = xgpu_nv_request_init_data,
404 .reset_gpu = xgpu_nv_request_reset,
405 .wait_reset = NULL,
406 .trans_msg = xgpu_nv_mailbox_trans_msg,
407};
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "amdgpu.h"
25#include "nbio/nbio_2_3_offset.h"
26#include "nbio/nbio_2_3_sh_mask.h"
27#include "gc/gc_10_1_0_offset.h"
28#include "gc/gc_10_1_0_sh_mask.h"
29#include "soc15.h"
30#include "navi10_ih.h"
31#include "soc15_common.h"
32#include "mxgpu_nv.h"
33
34#include "amdgpu_reset.h"
35
36static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev)
37{
38 WREG8(NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
39}
40
41static void xgpu_nv_mailbox_set_valid(struct amdgpu_device *adev, bool val)
42{
43 WREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
44}
45
46/*
47 * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
48 * RCV_MSG_VALID filed of BIF_BX_PF_MAILBOX_CONTROL must already be set to 1
49 * by host.
50 *
51 * if called no in IRQ routine, this peek_msg cannot guaranteed to return the
52 * correct value since it doesn't return the RCV_DW0 under the case that
53 * RCV_MSG_VALID is set by host.
54 */
55static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev)
56{
57 return RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
58}
59
60
61static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev,
62 enum idh_event event)
63{
64 u32 reg;
65
66 reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
67 if (reg != event)
68 return -ENOENT;
69
70 xgpu_nv_mailbox_send_ack(adev);
71
72 return 0;
73}
74
75static uint8_t xgpu_nv_peek_ack(struct amdgpu_device *adev)
76{
77 return RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
78}
79
80static int xgpu_nv_poll_ack(struct amdgpu_device *adev)
81{
82 int timeout = NV_MAILBOX_POLL_ACK_TIMEDOUT;
83 u8 reg;
84
85 do {
86 reg = RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
87 if (reg & 2)
88 return 0;
89
90 mdelay(5);
91 timeout -= 5;
92 } while (timeout > 1);
93
94 pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", NV_MAILBOX_POLL_ACK_TIMEDOUT);
95
96 return -ETIME;
97}
98
99static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
100{
101 int r;
102 uint64_t timeout, now;
103
104 now = (uint64_t)ktime_to_ms(ktime_get());
105 timeout = now + NV_MAILBOX_POLL_MSG_TIMEDOUT;
106
107 do {
108 r = xgpu_nv_mailbox_rcv_msg(adev, event);
109 if (!r)
110 return 0;
111
112 msleep(10);
113 now = (uint64_t)ktime_to_ms(ktime_get());
114 } while (timeout > now);
115
116
117 return -ETIME;
118}
119
120static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
121 enum idh_request req, u32 data1, u32 data2, u32 data3)
122{
123 int r;
124 uint8_t trn;
125
126 /* IMPORTANT:
127 * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
128 * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
129 * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_nv_poll_ack()
130 * will return immediatly
131 */
132 do {
133 xgpu_nv_mailbox_set_valid(adev, false);
134 trn = xgpu_nv_peek_ack(adev);
135 if (trn) {
136 pr_err("trn=%x ACK should not assert! wait again !\n", trn);
137 msleep(1);
138 }
139 } while (trn);
140
141 WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0, req);
142 WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW1, data1);
143 WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW2, data2);
144 WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW3, data3);
145 xgpu_nv_mailbox_set_valid(adev, true);
146
147 /* start to poll ack */
148 r = xgpu_nv_poll_ack(adev);
149 if (r)
150 pr_err("Doesn't get ack from pf, continue\n");
151
152 xgpu_nv_mailbox_set_valid(adev, false);
153}
154
155static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
156 enum idh_request req)
157{
158 int r, retry = 1;
159 enum idh_event event = -1;
160
161send_request:
162 xgpu_nv_mailbox_trans_msg(adev, req, 0, 0, 0);
163
164 switch (req) {
165 case IDH_REQ_GPU_INIT_ACCESS:
166 case IDH_REQ_GPU_FINI_ACCESS:
167 case IDH_REQ_GPU_RESET_ACCESS:
168 event = IDH_READY_TO_ACCESS_GPU;
169 break;
170 case IDH_REQ_GPU_INIT_DATA:
171 event = IDH_REQ_GPU_INIT_DATA_READY;
172 break;
173 default:
174 break;
175 }
176
177 if (event != -1) {
178 r = xgpu_nv_poll_msg(adev, event);
179 if (r) {
180 if (retry++ < 2)
181 goto send_request;
182
183 if (req != IDH_REQ_GPU_INIT_DATA) {
184 pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
185 return r;
186 }
187 else /* host doesn't support REQ_GPU_INIT_DATA handshake */
188 adev->virt.req_init_data_ver = 0;
189 } else {
190 if (req == IDH_REQ_GPU_INIT_DATA)
191 {
192 adev->virt.req_init_data_ver =
193 RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1);
194
195 /* assume V1 in case host doesn't set version number */
196 if (adev->virt.req_init_data_ver < 1)
197 adev->virt.req_init_data_ver = 1;
198 }
199 }
200
201 /* Retrieve checksum from mailbox2 */
202 if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
203 adev->virt.fw_reserve.checksum_key =
204 RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW2);
205 }
206 }
207
208 return 0;
209}
210
211static int xgpu_nv_request_reset(struct amdgpu_device *adev)
212{
213 int ret, i = 0;
214
215 while (i < NV_MAILBOX_POLL_MSG_REP_MAX) {
216 ret = xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
217 if (!ret)
218 break;
219 i++;
220 }
221
222 return ret;
223}
224
225static int xgpu_nv_request_full_gpu_access(struct amdgpu_device *adev,
226 bool init)
227{
228 enum idh_request req;
229
230 req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
231 return xgpu_nv_send_access_requests(adev, req);
232}
233
234static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev,
235 bool init)
236{
237 enum idh_request req;
238 int r = 0;
239
240 req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
241 r = xgpu_nv_send_access_requests(adev, req);
242
243 return r;
244}
245
246static int xgpu_nv_request_init_data(struct amdgpu_device *adev)
247{
248 return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
249}
250
251static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev,
252 struct amdgpu_irq_src *source,
253 struct amdgpu_iv_entry *entry)
254{
255 DRM_DEBUG("get ack intr and do nothing.\n");
256 return 0;
257}
258
259static int xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device *adev,
260 struct amdgpu_irq_src *source,
261 unsigned type,
262 enum amdgpu_interrupt_state state)
263{
264 u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
265
266 if (state == AMDGPU_IRQ_STATE_ENABLE)
267 tmp |= 2;
268 else
269 tmp &= ~2;
270
271 WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
272
273 return 0;
274}
275
276static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
277{
278 struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
279 struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
280 int timeout = NV_MAILBOX_POLL_FLR_TIMEDOUT;
281
282 /* block amdgpu_gpu_recover till msg FLR COMPLETE received,
283 * otherwise the mailbox msg will be ruined/reseted by
284 * the VF FLR.
285 */
286 if (atomic_cmpxchg(&adev->reset_domain->in_gpu_reset, 0, 1) != 0)
287 return;
288
289 down_write(&adev->reset_domain->sem);
290
291 amdgpu_virt_fini_data_exchange(adev);
292
293 xgpu_nv_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0);
294
295 do {
296 if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
297 goto flr_done;
298
299 msleep(10);
300 timeout -= 10;
301 } while (timeout > 1);
302
303flr_done:
304 atomic_set(&adev->reset_domain->in_gpu_reset, 0);
305 up_write(&adev->reset_domain->sem);
306
307 /* Trigger recovery for world switch failure if no TDR */
308 if (amdgpu_device_should_recover_gpu(adev)
309 && (!amdgpu_device_has_job_running(adev) ||
310 adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT ||
311 adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
312 adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
313 adev->video_timeout == MAX_SCHEDULE_TIMEOUT)) {
314 struct amdgpu_reset_context reset_context;
315 memset(&reset_context, 0, sizeof(reset_context));
316
317 reset_context.method = AMD_RESET_METHOD_NONE;
318 reset_context.reset_req_dev = adev;
319 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
320
321 amdgpu_device_gpu_recover(adev, NULL, &reset_context);
322 }
323}
324
325static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
326 struct amdgpu_irq_src *src,
327 unsigned type,
328 enum amdgpu_interrupt_state state)
329{
330 u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
331
332 if (state == AMDGPU_IRQ_STATE_ENABLE)
333 tmp |= 1;
334 else
335 tmp &= ~1;
336
337 WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
338
339 return 0;
340}
341
342static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev,
343 struct amdgpu_irq_src *source,
344 struct amdgpu_iv_entry *entry)
345{
346 enum idh_event event = xgpu_nv_mailbox_peek_msg(adev);
347
348 switch (event) {
349 case IDH_FLR_NOTIFICATION:
350 if (amdgpu_sriov_runtime(adev) && !amdgpu_in_reset(adev))
351 WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
352 &adev->virt.flr_work),
353 "Failed to queue work! at %s",
354 __func__);
355 break;
356 /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
357 * it byfar since that polling thread will handle it,
358 * other msg like flr complete is not handled here.
359 */
360 case IDH_CLR_MSG_BUF:
361 case IDH_FLR_NOTIFICATION_CMPL:
362 case IDH_READY_TO_ACCESS_GPU:
363 default:
364 break;
365 }
366
367 return 0;
368}
369
370static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_ack_irq_funcs = {
371 .set = xgpu_nv_set_mailbox_ack_irq,
372 .process = xgpu_nv_mailbox_ack_irq,
373};
374
375static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_rcv_irq_funcs = {
376 .set = xgpu_nv_set_mailbox_rcv_irq,
377 .process = xgpu_nv_mailbox_rcv_irq,
378};
379
380void xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device *adev)
381{
382 adev->virt.ack_irq.num_types = 1;
383 adev->virt.ack_irq.funcs = &xgpu_nv_mailbox_ack_irq_funcs;
384 adev->virt.rcv_irq.num_types = 1;
385 adev->virt.rcv_irq.funcs = &xgpu_nv_mailbox_rcv_irq_funcs;
386}
387
388int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev)
389{
390 int r;
391
392 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
393 if (r)
394 return r;
395
396 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
397 if (r) {
398 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
399 return r;
400 }
401
402 return 0;
403}
404
405int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev)
406{
407 int r;
408
409 r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
410 if (r)
411 return r;
412 r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
413 if (r) {
414 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
415 return r;
416 }
417
418 INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work);
419
420 return 0;
421}
422
423void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev)
424{
425 amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
426 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
427}
428
429const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
430 .req_full_gpu = xgpu_nv_request_full_gpu_access,
431 .rel_full_gpu = xgpu_nv_release_full_gpu_access,
432 .req_init_data = xgpu_nv_request_init_data,
433 .reset_gpu = xgpu_nv_request_reset,
434 .wait_reset = NULL,
435 .trans_msg = xgpu_nv_mailbox_trans_msg,
436};