Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2023 Advanced Micro Devices, Inc */
3
4#include <linux/dynamic_debug.h>
5
6#include "core.h"
7
8struct pdsc_wait_context {
9 struct pdsc_qcq *qcq;
10 struct completion wait_completion;
11};
12
13static int pdsc_process_notifyq(struct pdsc_qcq *qcq)
14{
15 union pds_core_notifyq_comp *comp;
16 struct pdsc *pdsc = qcq->pdsc;
17 struct pdsc_cq *cq = &qcq->cq;
18 struct pdsc_cq_info *cq_info;
19 int nq_work = 0;
20 u64 eid;
21
22 cq_info = &cq->info[cq->tail_idx];
23 comp = cq_info->comp;
24 eid = le64_to_cpu(comp->event.eid);
25 while (eid > pdsc->last_eid) {
26 u16 ecode = le16_to_cpu(comp->event.ecode);
27
28 switch (ecode) {
29 case PDS_EVENT_LINK_CHANGE:
30 dev_info(pdsc->dev, "NotifyQ LINK_CHANGE ecode %d eid %lld\n",
31 ecode, eid);
32 pdsc_notify(PDS_EVENT_LINK_CHANGE, comp);
33 break;
34
35 case PDS_EVENT_RESET:
36 dev_info(pdsc->dev, "NotifyQ RESET ecode %d eid %lld\n",
37 ecode, eid);
38 pdsc_notify(PDS_EVENT_RESET, comp);
39 break;
40
41 case PDS_EVENT_XCVR:
42 dev_info(pdsc->dev, "NotifyQ XCVR ecode %d eid %lld\n",
43 ecode, eid);
44 break;
45
46 default:
47 dev_info(pdsc->dev, "NotifyQ ecode %d eid %lld\n",
48 ecode, eid);
49 break;
50 }
51
52 pdsc->last_eid = eid;
53 cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
54 cq_info = &cq->info[cq->tail_idx];
55 comp = cq_info->comp;
56 eid = le64_to_cpu(comp->event.eid);
57
58 nq_work++;
59 }
60
61 qcq->accum_work += nq_work;
62
63 return nq_work;
64}
65
66static bool pdsc_adminq_inc_if_up(struct pdsc *pdsc)
67{
68 if (pdsc->state & BIT_ULL(PDSC_S_STOPPING_DRIVER) ||
69 pdsc->state & BIT_ULL(PDSC_S_FW_DEAD))
70 return false;
71
72 return refcount_inc_not_zero(&pdsc->adminq_refcnt);
73}
74
75void pdsc_process_adminq(struct pdsc_qcq *qcq)
76{
77 union pds_core_adminq_comp *comp;
78 struct pdsc_queue *q = &qcq->q;
79 struct pdsc *pdsc = qcq->pdsc;
80 struct pdsc_cq *cq = &qcq->cq;
81 struct pdsc_q_info *q_info;
82 unsigned long irqflags;
83 int nq_work = 0;
84 int aq_work = 0;
85
86 /* Don't process AdminQ when it's not up */
87 if (!pdsc_adminq_inc_if_up(pdsc)) {
88 dev_err(pdsc->dev, "%s: called while adminq is unavailable\n",
89 __func__);
90 return;
91 }
92
93 /* Check for NotifyQ event */
94 nq_work = pdsc_process_notifyq(&pdsc->notifyqcq);
95
96 /* Check for empty queue, which can happen if the interrupt was
97 * for a NotifyQ event and there are no new AdminQ completions.
98 */
99 if (q->tail_idx == q->head_idx)
100 goto credits;
101
102 /* Find the first completion to clean,
103 * run the callback in the related q_info,
104 * and continue while we still match done color
105 */
106 spin_lock_irqsave(&pdsc->adminq_lock, irqflags);
107 comp = cq->info[cq->tail_idx].comp;
108 while (pdsc_color_match(comp->color, cq->done_color)) {
109 q_info = &q->info[q->tail_idx];
110 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
111
112 /* Copy out the completion data */
113 memcpy(q_info->dest, comp, sizeof(*comp));
114
115 complete_all(&q_info->wc->wait_completion);
116
117 if (cq->tail_idx == cq->num_descs - 1)
118 cq->done_color = !cq->done_color;
119 cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
120 comp = cq->info[cq->tail_idx].comp;
121
122 aq_work++;
123 }
124 spin_unlock_irqrestore(&pdsc->adminq_lock, irqflags);
125
126 qcq->accum_work += aq_work;
127
128credits:
129 /* Return the interrupt credits, one for each completion */
130 pds_core_intr_credits(&pdsc->intr_ctrl[qcq->intx],
131 nq_work + aq_work,
132 PDS_CORE_INTR_CRED_REARM);
133 refcount_dec(&pdsc->adminq_refcnt);
134}
135
136void pdsc_work_thread(struct work_struct *work)
137{
138 struct pdsc_qcq *qcq = container_of(work, struct pdsc_qcq, work);
139
140 pdsc_process_adminq(qcq);
141}
142
143irqreturn_t pdsc_adminq_isr(int irq, void *data)
144{
145 struct pdsc *pdsc = data;
146 struct pdsc_qcq *qcq;
147
148 /* Don't process AdminQ when it's not up */
149 if (!pdsc_adminq_inc_if_up(pdsc)) {
150 dev_err(pdsc->dev, "%s: called while adminq is unavailable\n",
151 __func__);
152 return IRQ_HANDLED;
153 }
154
155 qcq = &pdsc->adminqcq;
156 queue_work(pdsc->wq, &qcq->work);
157 refcount_dec(&pdsc->adminq_refcnt);
158
159 return IRQ_HANDLED;
160}
161
162static int __pdsc_adminq_post(struct pdsc *pdsc,
163 struct pdsc_qcq *qcq,
164 union pds_core_adminq_cmd *cmd,
165 union pds_core_adminq_comp *comp,
166 struct pdsc_wait_context *wc)
167{
168 struct pdsc_queue *q = &qcq->q;
169 struct pdsc_q_info *q_info;
170 unsigned long irqflags;
171 unsigned int avail;
172 int index;
173 int ret;
174
175 spin_lock_irqsave(&pdsc->adminq_lock, irqflags);
176
177 /* Check for space in the queue */
178 avail = q->tail_idx;
179 if (q->head_idx >= avail)
180 avail += q->num_descs - q->head_idx - 1;
181 else
182 avail -= q->head_idx + 1;
183 if (!avail) {
184 ret = -ENOSPC;
185 goto err_out_unlock;
186 }
187
188 /* Check that the FW is running */
189 if (!pdsc_is_fw_running(pdsc)) {
190 if (pdsc->info_regs) {
191 u8 fw_status =
192 ioread8(&pdsc->info_regs->fw_status);
193
194 dev_info(pdsc->dev, "%s: post failed - fw not running %#02x:\n",
195 __func__, fw_status);
196 } else {
197 dev_info(pdsc->dev, "%s: post failed - BARs not setup\n",
198 __func__);
199 }
200 ret = -ENXIO;
201
202 goto err_out_unlock;
203 }
204
205 /* Post the request */
206 index = q->head_idx;
207 q_info = &q->info[index];
208 q_info->wc = wc;
209 q_info->dest = comp;
210 memcpy(q_info->desc, cmd, sizeof(*cmd));
211
212 dev_dbg(pdsc->dev, "head_idx %d tail_idx %d\n",
213 q->head_idx, q->tail_idx);
214 dev_dbg(pdsc->dev, "post admin queue command:\n");
215 dynamic_hex_dump("cmd ", DUMP_PREFIX_OFFSET, 16, 1,
216 cmd, sizeof(*cmd), true);
217
218 q->head_idx = (q->head_idx + 1) & (q->num_descs - 1);
219
220 pds_core_dbell_ring(pdsc->kern_dbpage,
221 q->hw_type, q->dbval | q->head_idx);
222 ret = index;
223
224err_out_unlock:
225 spin_unlock_irqrestore(&pdsc->adminq_lock, irqflags);
226 return ret;
227}
228
229int pdsc_adminq_post(struct pdsc *pdsc,
230 union pds_core_adminq_cmd *cmd,
231 union pds_core_adminq_comp *comp,
232 bool fast_poll)
233{
234 struct pdsc_wait_context wc = {
235 .wait_completion =
236 COMPLETION_INITIALIZER_ONSTACK(wc.wait_completion),
237 };
238 unsigned long poll_interval = 1;
239 unsigned long poll_jiffies;
240 unsigned long time_limit;
241 unsigned long time_start;
242 unsigned long time_done;
243 unsigned long remaining;
244 int err = 0;
245 int index;
246
247 if (!pdsc_adminq_inc_if_up(pdsc)) {
248 dev_dbg(pdsc->dev, "%s: preventing adminq cmd %u\n",
249 __func__, cmd->opcode);
250 return -ENXIO;
251 }
252
253 wc.qcq = &pdsc->adminqcq;
254 index = __pdsc_adminq_post(pdsc, &pdsc->adminqcq, cmd, comp, &wc);
255 if (index < 0) {
256 err = index;
257 goto err_out;
258 }
259
260 time_start = jiffies;
261 time_limit = time_start + HZ * pdsc->devcmd_timeout;
262 do {
263 /* Timeslice the actual wait to catch IO errors etc early */
264 poll_jiffies = msecs_to_jiffies(poll_interval);
265 remaining = wait_for_completion_timeout(&wc.wait_completion,
266 poll_jiffies);
267 if (remaining)
268 break;
269
270 if (!pdsc_is_fw_running(pdsc)) {
271 if (pdsc->info_regs) {
272 u8 fw_status =
273 ioread8(&pdsc->info_regs->fw_status);
274
275 dev_dbg(pdsc->dev, "%s: post wait failed - fw not running %#02x:\n",
276 __func__, fw_status);
277 } else {
278 dev_dbg(pdsc->dev, "%s: post wait failed - BARs not setup\n",
279 __func__);
280 }
281 err = -ENXIO;
282 break;
283 }
284
285 /* When fast_poll is not requested, prevent aggressive polling
286 * on failures due to timeouts by doing exponential back off.
287 */
288 if (!fast_poll && poll_interval < PDSC_ADMINQ_MAX_POLL_INTERVAL)
289 poll_interval <<= 1;
290 } while (time_before(jiffies, time_limit));
291 time_done = jiffies;
292 dev_dbg(pdsc->dev, "%s: elapsed %d msecs\n",
293 __func__, jiffies_to_msecs(time_done - time_start));
294
295 /* Check the results */
296 if (time_after_eq(time_done, time_limit))
297 err = -ETIMEDOUT;
298
299 dev_dbg(pdsc->dev, "read admin queue completion idx %d:\n", index);
300 dynamic_hex_dump("comp ", DUMP_PREFIX_OFFSET, 16, 1,
301 comp, sizeof(*comp), true);
302
303 if (remaining && comp->status)
304 err = pdsc_err_to_errno(comp->status);
305
306err_out:
307 if (err) {
308 dev_dbg(pdsc->dev, "%s: opcode %d status %d err %pe\n",
309 __func__, cmd->opcode, comp->status, ERR_PTR(err));
310 if (err == -ENXIO || err == -ETIMEDOUT)
311 queue_work(pdsc->wq, &pdsc->health_work);
312 }
313
314 refcount_dec(&pdsc->adminq_refcnt);
315
316 return err;
317}
318EXPORT_SYMBOL_GPL(pdsc_adminq_post);
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2023 Advanced Micro Devices, Inc */
3
4#include <linux/dynamic_debug.h>
5
6#include "core.h"
7
8struct pdsc_wait_context {
9 struct pdsc_qcq *qcq;
10 struct completion wait_completion;
11};
12
13static int pdsc_process_notifyq(struct pdsc_qcq *qcq)
14{
15 union pds_core_notifyq_comp *comp;
16 struct pdsc *pdsc = qcq->pdsc;
17 struct pdsc_cq *cq = &qcq->cq;
18 struct pdsc_cq_info *cq_info;
19 int nq_work = 0;
20 u64 eid;
21
22 cq_info = &cq->info[cq->tail_idx];
23 comp = cq_info->comp;
24 eid = le64_to_cpu(comp->event.eid);
25 while (eid > pdsc->last_eid) {
26 u16 ecode = le16_to_cpu(comp->event.ecode);
27
28 switch (ecode) {
29 case PDS_EVENT_LINK_CHANGE:
30 dev_info(pdsc->dev, "NotifyQ LINK_CHANGE ecode %d eid %lld\n",
31 ecode, eid);
32 pdsc_notify(PDS_EVENT_LINK_CHANGE, comp);
33 break;
34
35 case PDS_EVENT_RESET:
36 dev_info(pdsc->dev, "NotifyQ RESET ecode %d eid %lld\n",
37 ecode, eid);
38 pdsc_notify(PDS_EVENT_RESET, comp);
39 break;
40
41 case PDS_EVENT_XCVR:
42 dev_info(pdsc->dev, "NotifyQ XCVR ecode %d eid %lld\n",
43 ecode, eid);
44 break;
45
46 default:
47 dev_info(pdsc->dev, "NotifyQ ecode %d eid %lld\n",
48 ecode, eid);
49 break;
50 }
51
52 pdsc->last_eid = eid;
53 cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
54 cq_info = &cq->info[cq->tail_idx];
55 comp = cq_info->comp;
56 eid = le64_to_cpu(comp->event.eid);
57
58 nq_work++;
59 }
60
61 qcq->accum_work += nq_work;
62
63 return nq_work;
64}
65
66static bool pdsc_adminq_inc_if_up(struct pdsc *pdsc)
67{
68 if (pdsc->state & BIT_ULL(PDSC_S_STOPPING_DRIVER) ||
69 pdsc->state & BIT_ULL(PDSC_S_FW_DEAD))
70 return false;
71
72 return refcount_inc_not_zero(&pdsc->adminq_refcnt);
73}
74
75void pdsc_process_adminq(struct pdsc_qcq *qcq)
76{
77 union pds_core_adminq_comp *comp;
78 struct pdsc_queue *q = &qcq->q;
79 struct pdsc *pdsc = qcq->pdsc;
80 struct pdsc_cq *cq = &qcq->cq;
81 struct pdsc_q_info *q_info;
82 unsigned long irqflags;
83 int nq_work = 0;
84 int aq_work = 0;
85 int credits;
86
87 /* Don't process AdminQ when it's not up */
88 if (!pdsc_adminq_inc_if_up(pdsc)) {
89 dev_err(pdsc->dev, "%s: called while adminq is unavailable\n",
90 __func__);
91 return;
92 }
93
94 /* Check for NotifyQ event */
95 nq_work = pdsc_process_notifyq(&pdsc->notifyqcq);
96
97 /* Check for empty queue, which can happen if the interrupt was
98 * for a NotifyQ event and there are no new AdminQ completions.
99 */
100 if (q->tail_idx == q->head_idx)
101 goto credits;
102
103 /* Find the first completion to clean,
104 * run the callback in the related q_info,
105 * and continue while we still match done color
106 */
107 spin_lock_irqsave(&pdsc->adminq_lock, irqflags);
108 comp = cq->info[cq->tail_idx].comp;
109 while (pdsc_color_match(comp->color, cq->done_color)) {
110 q_info = &q->info[q->tail_idx];
111 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
112
113 /* Copy out the completion data */
114 memcpy(q_info->dest, comp, sizeof(*comp));
115
116 complete_all(&q_info->wc->wait_completion);
117
118 if (cq->tail_idx == cq->num_descs - 1)
119 cq->done_color = !cq->done_color;
120 cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
121 comp = cq->info[cq->tail_idx].comp;
122
123 aq_work++;
124 }
125 spin_unlock_irqrestore(&pdsc->adminq_lock, irqflags);
126
127 qcq->accum_work += aq_work;
128
129credits:
130 /* Return the interrupt credits, one for each completion */
131 credits = nq_work + aq_work;
132 if (credits)
133 pds_core_intr_credits(&pdsc->intr_ctrl[qcq->intx],
134 credits,
135 PDS_CORE_INTR_CRED_REARM);
136 refcount_dec(&pdsc->adminq_refcnt);
137}
138
139void pdsc_work_thread(struct work_struct *work)
140{
141 struct pdsc_qcq *qcq = container_of(work, struct pdsc_qcq, work);
142
143 pdsc_process_adminq(qcq);
144}
145
146irqreturn_t pdsc_adminq_isr(int irq, void *data)
147{
148 struct pdsc *pdsc = data;
149 struct pdsc_qcq *qcq;
150
151 /* Don't process AdminQ when it's not up */
152 if (!pdsc_adminq_inc_if_up(pdsc)) {
153 dev_err(pdsc->dev, "%s: called while adminq is unavailable\n",
154 __func__);
155 return IRQ_HANDLED;
156 }
157
158 qcq = &pdsc->adminqcq;
159 queue_work(pdsc->wq, &qcq->work);
160 pds_core_intr_mask(&pdsc->intr_ctrl[qcq->intx], PDS_CORE_INTR_MASK_CLEAR);
161 refcount_dec(&pdsc->adminq_refcnt);
162
163 return IRQ_HANDLED;
164}
165
166static int __pdsc_adminq_post(struct pdsc *pdsc,
167 struct pdsc_qcq *qcq,
168 union pds_core_adminq_cmd *cmd,
169 union pds_core_adminq_comp *comp,
170 struct pdsc_wait_context *wc)
171{
172 struct pdsc_queue *q = &qcq->q;
173 struct pdsc_q_info *q_info;
174 unsigned long irqflags;
175 unsigned int avail;
176 int index;
177 int ret;
178
179 spin_lock_irqsave(&pdsc->adminq_lock, irqflags);
180
181 /* Check for space in the queue */
182 avail = q->tail_idx;
183 if (q->head_idx >= avail)
184 avail += q->num_descs - q->head_idx - 1;
185 else
186 avail -= q->head_idx + 1;
187 if (!avail) {
188 ret = -ENOSPC;
189 goto err_out_unlock;
190 }
191
192 /* Check that the FW is running */
193 if (!pdsc_is_fw_running(pdsc)) {
194 if (pdsc->info_regs) {
195 u8 fw_status =
196 ioread8(&pdsc->info_regs->fw_status);
197
198 dev_info(pdsc->dev, "%s: post failed - fw not running %#02x:\n",
199 __func__, fw_status);
200 } else {
201 dev_info(pdsc->dev, "%s: post failed - BARs not setup\n",
202 __func__);
203 }
204 ret = -ENXIO;
205
206 goto err_out_unlock;
207 }
208
209 /* Post the request */
210 index = q->head_idx;
211 q_info = &q->info[index];
212 q_info->wc = wc;
213 q_info->dest = comp;
214 memcpy(q_info->desc, cmd, sizeof(*cmd));
215
216 dev_dbg(pdsc->dev, "head_idx %d tail_idx %d\n",
217 q->head_idx, q->tail_idx);
218 dev_dbg(pdsc->dev, "post admin queue command:\n");
219 dynamic_hex_dump("cmd ", DUMP_PREFIX_OFFSET, 16, 1,
220 cmd, sizeof(*cmd), true);
221
222 q->head_idx = (q->head_idx + 1) & (q->num_descs - 1);
223
224 pds_core_dbell_ring(pdsc->kern_dbpage,
225 q->hw_type, q->dbval | q->head_idx);
226 ret = index;
227
228err_out_unlock:
229 spin_unlock_irqrestore(&pdsc->adminq_lock, irqflags);
230 return ret;
231}
232
233int pdsc_adminq_post(struct pdsc *pdsc,
234 union pds_core_adminq_cmd *cmd,
235 union pds_core_adminq_comp *comp,
236 bool fast_poll)
237{
238 struct pdsc_wait_context wc = {
239 .wait_completion =
240 COMPLETION_INITIALIZER_ONSTACK(wc.wait_completion),
241 };
242 unsigned long poll_interval = 1;
243 unsigned long poll_jiffies;
244 unsigned long time_limit;
245 unsigned long time_start;
246 unsigned long time_done;
247 unsigned long remaining;
248 int err = 0;
249 int index;
250
251 if (!pdsc_adminq_inc_if_up(pdsc)) {
252 dev_dbg(pdsc->dev, "%s: preventing adminq cmd %u\n",
253 __func__, cmd->opcode);
254 return -ENXIO;
255 }
256
257 wc.qcq = &pdsc->adminqcq;
258 index = __pdsc_adminq_post(pdsc, &pdsc->adminqcq, cmd, comp, &wc);
259 if (index < 0) {
260 err = index;
261 goto err_out;
262 }
263
264 time_start = jiffies;
265 time_limit = time_start + HZ * pdsc->devcmd_timeout;
266 do {
267 /* Timeslice the actual wait to catch IO errors etc early */
268 poll_jiffies = msecs_to_jiffies(poll_interval);
269 remaining = wait_for_completion_timeout(&wc.wait_completion,
270 poll_jiffies);
271 if (remaining)
272 break;
273
274 if (!pdsc_is_fw_running(pdsc)) {
275 if (pdsc->info_regs) {
276 u8 fw_status =
277 ioread8(&pdsc->info_regs->fw_status);
278
279 dev_dbg(pdsc->dev, "%s: post wait failed - fw not running %#02x:\n",
280 __func__, fw_status);
281 } else {
282 dev_dbg(pdsc->dev, "%s: post wait failed - BARs not setup\n",
283 __func__);
284 }
285 err = -ENXIO;
286 break;
287 }
288
289 /* When fast_poll is not requested, prevent aggressive polling
290 * on failures due to timeouts by doing exponential back off.
291 */
292 if (!fast_poll && poll_interval < PDSC_ADMINQ_MAX_POLL_INTERVAL)
293 poll_interval <<= 1;
294 } while (time_before(jiffies, time_limit));
295 time_done = jiffies;
296 dev_dbg(pdsc->dev, "%s: elapsed %d msecs\n",
297 __func__, jiffies_to_msecs(time_done - time_start));
298
299 /* Check the results */
300 if (time_after_eq(time_done, time_limit))
301 err = -ETIMEDOUT;
302
303 dev_dbg(pdsc->dev, "read admin queue completion idx %d:\n", index);
304 dynamic_hex_dump("comp ", DUMP_PREFIX_OFFSET, 16, 1,
305 comp, sizeof(*comp), true);
306
307 if (remaining && comp->status)
308 err = pdsc_err_to_errno(comp->status);
309
310err_out:
311 if (err) {
312 dev_dbg(pdsc->dev, "%s: opcode %d status %d err %pe\n",
313 __func__, cmd->opcode, comp->status, ERR_PTR(err));
314 if (err == -ENXIO || err == -ETIMEDOUT)
315 queue_work(pdsc->wq, &pdsc->health_work);
316 }
317
318 refcount_dec(&pdsc->adminq_refcnt);
319
320 return err;
321}
322EXPORT_SYMBOL_GPL(pdsc_adminq_post);