Loading...
1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/*
3 * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
4 */
5
6#include "ena_com.h"
7
8/*****************************************************************************/
9/*****************************************************************************/
10
11/* Timeout in micro-sec */
12#define ADMIN_CMD_TIMEOUT_US (3000000)
13
14#define ENA_ASYNC_QUEUE_DEPTH 16
15#define ENA_ADMIN_QUEUE_DEPTH 32
16
17
18#define ENA_CTRL_MAJOR 0
19#define ENA_CTRL_MINOR 0
20#define ENA_CTRL_SUB_MINOR 1
21
22#define MIN_ENA_CTRL_VER \
23 (((ENA_CTRL_MAJOR) << \
24 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
25 ((ENA_CTRL_MINOR) << \
26 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
27 (ENA_CTRL_SUB_MINOR))
28
29#define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
30#define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
31
32#define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
33
34#define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4
35
36#define ENA_REGS_ADMIN_INTR_MASK 1
37
38#define ENA_MAX_BACKOFF_DELAY_EXP 16U
39
40#define ENA_MIN_ADMIN_POLL_US 100
41
42#define ENA_MAX_ADMIN_POLL_US 5000
43
44/*****************************************************************************/
45/*****************************************************************************/
46/*****************************************************************************/
47
48enum ena_cmd_status {
49 ENA_CMD_SUBMITTED,
50 ENA_CMD_COMPLETED,
51 /* Abort - canceled by the driver */
52 ENA_CMD_ABORTED,
53};
54
55struct ena_comp_ctx {
56 struct completion wait_event;
57 struct ena_admin_acq_entry *user_cqe;
58 u32 comp_size;
59 enum ena_cmd_status status;
60 /* status from the device */
61 u8 comp_status;
62 u8 cmd_opcode;
63 bool occupied;
64};
65
66struct ena_com_stats_ctx {
67 struct ena_admin_aq_get_stats_cmd get_cmd;
68 struct ena_admin_acq_get_stats_resp get_resp;
69};
70
71static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
72 struct ena_common_mem_addr *ena_addr,
73 dma_addr_t addr)
74{
75 if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
76 netdev_err(ena_dev->net_device,
77 "DMA address has more bits that the device supports\n");
78 return -EINVAL;
79 }
80
81 ena_addr->mem_addr_low = lower_32_bits(addr);
82 ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
83
84 return 0;
85}
86
87static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue)
88{
89 struct ena_com_dev *ena_dev = admin_queue->ena_dev;
90 struct ena_com_admin_sq *sq = &admin_queue->sq;
91 u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
92
93 sq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size,
94 &sq->dma_addr, GFP_KERNEL);
95
96 if (!sq->entries) {
97 netdev_err(ena_dev->net_device, "Memory allocation failed\n");
98 return -ENOMEM;
99 }
100
101 sq->head = 0;
102 sq->tail = 0;
103 sq->phase = 1;
104
105 sq->db_addr = NULL;
106
107 return 0;
108}
109
110static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue)
111{
112 struct ena_com_dev *ena_dev = admin_queue->ena_dev;
113 struct ena_com_admin_cq *cq = &admin_queue->cq;
114 u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
115
116 cq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size,
117 &cq->dma_addr, GFP_KERNEL);
118
119 if (!cq->entries) {
120 netdev_err(ena_dev->net_device, "Memory allocation failed\n");
121 return -ENOMEM;
122 }
123
124 cq->head = 0;
125 cq->phase = 1;
126
127 return 0;
128}
129
130static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
131 struct ena_aenq_handlers *aenq_handlers)
132{
133 struct ena_com_aenq *aenq = &ena_dev->aenq;
134 u32 addr_low, addr_high, aenq_caps;
135 u16 size;
136
137 ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
138 size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
139 aenq->entries = dma_alloc_coherent(ena_dev->dmadev, size,
140 &aenq->dma_addr, GFP_KERNEL);
141
142 if (!aenq->entries) {
143 netdev_err(ena_dev->net_device, "Memory allocation failed\n");
144 return -ENOMEM;
145 }
146
147 aenq->head = aenq->q_depth;
148 aenq->phase = 1;
149
150 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
151 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
152
153 writel(addr_low, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
154 writel(addr_high, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
155
156 aenq_caps = 0;
157 aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
158 aenq_caps |= (sizeof(struct ena_admin_aenq_entry)
159 << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
160 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
161 writel(aenq_caps, ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
162
163 if (unlikely(!aenq_handlers)) {
164 netdev_err(ena_dev->net_device,
165 "AENQ handlers pointer is NULL\n");
166 return -EINVAL;
167 }
168
169 aenq->aenq_handlers = aenq_handlers;
170
171 return 0;
172}
173
174static void comp_ctxt_release(struct ena_com_admin_queue *queue,
175 struct ena_comp_ctx *comp_ctx)
176{
177 comp_ctx->occupied = false;
178 atomic_dec(&queue->outstanding_cmds);
179}
180
181static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *admin_queue,
182 u16 command_id, bool capture)
183{
184 if (unlikely(command_id >= admin_queue->q_depth)) {
185 netdev_err(admin_queue->ena_dev->net_device,
186 "Command id is larger than the queue size. cmd_id: %u queue size %d\n",
187 command_id, admin_queue->q_depth);
188 return NULL;
189 }
190
191 if (unlikely(!admin_queue->comp_ctx)) {
192 netdev_err(admin_queue->ena_dev->net_device,
193 "Completion context is NULL\n");
194 return NULL;
195 }
196
197 if (unlikely(admin_queue->comp_ctx[command_id].occupied && capture)) {
198 netdev_err(admin_queue->ena_dev->net_device,
199 "Completion context is occupied\n");
200 return NULL;
201 }
202
203 if (capture) {
204 atomic_inc(&admin_queue->outstanding_cmds);
205 admin_queue->comp_ctx[command_id].occupied = true;
206 }
207
208 return &admin_queue->comp_ctx[command_id];
209}
210
211static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
212 struct ena_admin_aq_entry *cmd,
213 size_t cmd_size_in_bytes,
214 struct ena_admin_acq_entry *comp,
215 size_t comp_size_in_bytes)
216{
217 struct ena_comp_ctx *comp_ctx;
218 u16 tail_masked, cmd_id;
219 u16 queue_size_mask;
220 u16 cnt;
221
222 queue_size_mask = admin_queue->q_depth - 1;
223
224 tail_masked = admin_queue->sq.tail & queue_size_mask;
225
226 /* In case of queue FULL */
227 cnt = (u16)atomic_read(&admin_queue->outstanding_cmds);
228 if (cnt >= admin_queue->q_depth) {
229 netdev_dbg(admin_queue->ena_dev->net_device,
230 "Admin queue is full.\n");
231 admin_queue->stats.out_of_space++;
232 return ERR_PTR(-ENOSPC);
233 }
234
235 cmd_id = admin_queue->curr_cmd_id;
236
237 cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
238 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
239
240 cmd->aq_common_descriptor.command_id |= cmd_id &
241 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
242
243 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
244 if (unlikely(!comp_ctx))
245 return ERR_PTR(-EINVAL);
246
247 comp_ctx->status = ENA_CMD_SUBMITTED;
248 comp_ctx->comp_size = (u32)comp_size_in_bytes;
249 comp_ctx->user_cqe = comp;
250 comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
251
252 reinit_completion(&comp_ctx->wait_event);
253
254 memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
255
256 admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
257 queue_size_mask;
258
259 admin_queue->sq.tail++;
260 admin_queue->stats.submitted_cmd++;
261
262 if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
263 admin_queue->sq.phase = !admin_queue->sq.phase;
264
265 writel(admin_queue->sq.tail, admin_queue->sq.db_addr);
266
267 return comp_ctx;
268}
269
270static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue)
271{
272 struct ena_com_dev *ena_dev = admin_queue->ena_dev;
273 size_t size = admin_queue->q_depth * sizeof(struct ena_comp_ctx);
274 struct ena_comp_ctx *comp_ctx;
275 u16 i;
276
277 admin_queue->comp_ctx =
278 devm_kzalloc(admin_queue->q_dmadev, size, GFP_KERNEL);
279 if (unlikely(!admin_queue->comp_ctx)) {
280 netdev_err(ena_dev->net_device, "Memory allocation failed\n");
281 return -ENOMEM;
282 }
283
284 for (i = 0; i < admin_queue->q_depth; i++) {
285 comp_ctx = get_comp_ctxt(admin_queue, i, false);
286 if (comp_ctx)
287 init_completion(&comp_ctx->wait_event);
288 }
289
290 return 0;
291}
292
293static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
294 struct ena_admin_aq_entry *cmd,
295 size_t cmd_size_in_bytes,
296 struct ena_admin_acq_entry *comp,
297 size_t comp_size_in_bytes)
298{
299 unsigned long flags = 0;
300 struct ena_comp_ctx *comp_ctx;
301
302 spin_lock_irqsave(&admin_queue->q_lock, flags);
303 if (unlikely(!admin_queue->running_state)) {
304 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
305 return ERR_PTR(-ENODEV);
306 }
307 comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
308 cmd_size_in_bytes,
309 comp,
310 comp_size_in_bytes);
311 if (IS_ERR(comp_ctx))
312 admin_queue->running_state = false;
313 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
314
315 return comp_ctx;
316}
317
318static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
319 struct ena_com_create_io_ctx *ctx,
320 struct ena_com_io_sq *io_sq)
321{
322 size_t size;
323 int dev_node = 0;
324
325 memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
326
327 io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits;
328 io_sq->desc_entry_size =
329 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
330 sizeof(struct ena_eth_io_tx_desc) :
331 sizeof(struct ena_eth_io_rx_desc);
332
333 size = io_sq->desc_entry_size * io_sq->q_depth;
334
335 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
336 dev_node = dev_to_node(ena_dev->dmadev);
337 set_dev_node(ena_dev->dmadev, ctx->numa_node);
338 io_sq->desc_addr.virt_addr =
339 dma_alloc_coherent(ena_dev->dmadev, size,
340 &io_sq->desc_addr.phys_addr,
341 GFP_KERNEL);
342 set_dev_node(ena_dev->dmadev, dev_node);
343 if (!io_sq->desc_addr.virt_addr) {
344 io_sq->desc_addr.virt_addr =
345 dma_alloc_coherent(ena_dev->dmadev, size,
346 &io_sq->desc_addr.phys_addr,
347 GFP_KERNEL);
348 }
349
350 if (!io_sq->desc_addr.virt_addr) {
351 netdev_err(ena_dev->net_device,
352 "Memory allocation failed\n");
353 return -ENOMEM;
354 }
355 }
356
357 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
358 /* Allocate bounce buffers */
359 io_sq->bounce_buf_ctrl.buffer_size =
360 ena_dev->llq_info.desc_list_entry_size;
361 io_sq->bounce_buf_ctrl.buffers_num =
362 ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
363 io_sq->bounce_buf_ctrl.next_to_use = 0;
364
365 size = io_sq->bounce_buf_ctrl.buffer_size *
366 io_sq->bounce_buf_ctrl.buffers_num;
367
368 dev_node = dev_to_node(ena_dev->dmadev);
369 set_dev_node(ena_dev->dmadev, ctx->numa_node);
370 io_sq->bounce_buf_ctrl.base_buffer =
371 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
372 set_dev_node(ena_dev->dmadev, dev_node);
373 if (!io_sq->bounce_buf_ctrl.base_buffer)
374 io_sq->bounce_buf_ctrl.base_buffer =
375 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
376
377 if (!io_sq->bounce_buf_ctrl.base_buffer) {
378 netdev_err(ena_dev->net_device,
379 "Bounce buffer memory allocation failed\n");
380 return -ENOMEM;
381 }
382
383 memcpy(&io_sq->llq_info, &ena_dev->llq_info,
384 sizeof(io_sq->llq_info));
385
386 /* Initiate the first bounce buffer */
387 io_sq->llq_buf_ctrl.curr_bounce_buf =
388 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
389 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
390 0x0, io_sq->llq_info.desc_list_entry_size);
391 io_sq->llq_buf_ctrl.descs_left_in_line =
392 io_sq->llq_info.descs_num_before_header;
393 io_sq->disable_meta_caching =
394 io_sq->llq_info.disable_meta_caching;
395
396 if (io_sq->llq_info.max_entries_in_tx_burst > 0)
397 io_sq->entries_in_tx_burst_left =
398 io_sq->llq_info.max_entries_in_tx_burst;
399 }
400
401 io_sq->tail = 0;
402 io_sq->next_to_comp = 0;
403 io_sq->phase = 1;
404
405 return 0;
406}
407
408static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
409 struct ena_com_create_io_ctx *ctx,
410 struct ena_com_io_cq *io_cq)
411{
412 size_t size;
413 int prev_node = 0;
414
415 memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
416
417 /* Use the basic completion descriptor for Rx */
418 io_cq->cdesc_entry_size_in_bytes =
419 (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
420 sizeof(struct ena_eth_io_tx_cdesc) :
421 sizeof(struct ena_eth_io_rx_cdesc_base);
422
423 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
424
425 prev_node = dev_to_node(ena_dev->dmadev);
426 set_dev_node(ena_dev->dmadev, ctx->numa_node);
427 io_cq->cdesc_addr.virt_addr =
428 dma_alloc_coherent(ena_dev->dmadev, size,
429 &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
430 set_dev_node(ena_dev->dmadev, prev_node);
431 if (!io_cq->cdesc_addr.virt_addr) {
432 io_cq->cdesc_addr.virt_addr =
433 dma_alloc_coherent(ena_dev->dmadev, size,
434 &io_cq->cdesc_addr.phys_addr,
435 GFP_KERNEL);
436 }
437
438 if (!io_cq->cdesc_addr.virt_addr) {
439 netdev_err(ena_dev->net_device, "Memory allocation failed\n");
440 return -ENOMEM;
441 }
442
443 io_cq->phase = 1;
444 io_cq->head = 0;
445
446 return 0;
447}
448
449static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
450 struct ena_admin_acq_entry *cqe)
451{
452 struct ena_comp_ctx *comp_ctx;
453 u16 cmd_id;
454
455 cmd_id = cqe->acq_common_descriptor.command &
456 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
457
458 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
459 if (unlikely(!comp_ctx)) {
460 netdev_err(admin_queue->ena_dev->net_device,
461 "comp_ctx is NULL. Changing the admin queue running state\n");
462 admin_queue->running_state = false;
463 return;
464 }
465
466 comp_ctx->status = ENA_CMD_COMPLETED;
467 comp_ctx->comp_status = cqe->acq_common_descriptor.status;
468
469 if (comp_ctx->user_cqe)
470 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
471
472 if (!admin_queue->polling)
473 complete(&comp_ctx->wait_event);
474}
475
476static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
477{
478 struct ena_admin_acq_entry *cqe = NULL;
479 u16 comp_num = 0;
480 u16 head_masked;
481 u8 phase;
482
483 head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
484 phase = admin_queue->cq.phase;
485
486 cqe = &admin_queue->cq.entries[head_masked];
487
488 /* Go over all the completions */
489 while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
490 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
491 /* Do not read the rest of the completion entry before the
492 * phase bit was validated
493 */
494 dma_rmb();
495 ena_com_handle_single_admin_completion(admin_queue, cqe);
496
497 head_masked++;
498 comp_num++;
499 if (unlikely(head_masked == admin_queue->q_depth)) {
500 head_masked = 0;
501 phase = !phase;
502 }
503
504 cqe = &admin_queue->cq.entries[head_masked];
505 }
506
507 admin_queue->cq.head += comp_num;
508 admin_queue->cq.phase = phase;
509 admin_queue->sq.head += comp_num;
510 admin_queue->stats.completed_cmd += comp_num;
511}
512
513static int ena_com_comp_status_to_errno(struct ena_com_admin_queue *admin_queue,
514 u8 comp_status)
515{
516 if (unlikely(comp_status != 0))
517 netdev_err(admin_queue->ena_dev->net_device,
518 "Admin command failed[%u]\n", comp_status);
519
520 switch (comp_status) {
521 case ENA_ADMIN_SUCCESS:
522 return 0;
523 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
524 return -ENOMEM;
525 case ENA_ADMIN_UNSUPPORTED_OPCODE:
526 return -EOPNOTSUPP;
527 case ENA_ADMIN_BAD_OPCODE:
528 case ENA_ADMIN_MALFORMED_REQUEST:
529 case ENA_ADMIN_ILLEGAL_PARAMETER:
530 case ENA_ADMIN_UNKNOWN_ERROR:
531 return -EINVAL;
532 case ENA_ADMIN_RESOURCE_BUSY:
533 return -EAGAIN;
534 }
535
536 return -EINVAL;
537}
538
539static void ena_delay_exponential_backoff_us(u32 exp, u32 delay_us)
540{
541 exp = min_t(u32, exp, ENA_MAX_BACKOFF_DELAY_EXP);
542 delay_us = max_t(u32, ENA_MIN_ADMIN_POLL_US, delay_us);
543 delay_us = min_t(u32, delay_us * (1U << exp), ENA_MAX_ADMIN_POLL_US);
544 usleep_range(delay_us, 2 * delay_us);
545}
546
547static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
548 struct ena_com_admin_queue *admin_queue)
549{
550 unsigned long flags = 0;
551 unsigned long timeout;
552 int ret;
553 u32 exp = 0;
554
555 timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout);
556
557 while (1) {
558 spin_lock_irqsave(&admin_queue->q_lock, flags);
559 ena_com_handle_admin_completion(admin_queue);
560 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
561
562 if (comp_ctx->status != ENA_CMD_SUBMITTED)
563 break;
564
565 if (time_is_before_jiffies(timeout)) {
566 netdev_err(admin_queue->ena_dev->net_device,
567 "Wait for completion (polling) timeout\n");
568 /* ENA didn't have any completion */
569 spin_lock_irqsave(&admin_queue->q_lock, flags);
570 admin_queue->stats.no_completion++;
571 admin_queue->running_state = false;
572 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
573
574 ret = -ETIME;
575 goto err;
576 }
577
578 ena_delay_exponential_backoff_us(exp++,
579 admin_queue->ena_dev->ena_min_poll_delay_us);
580 }
581
582 if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
583 netdev_err(admin_queue->ena_dev->net_device,
584 "Command was aborted\n");
585 spin_lock_irqsave(&admin_queue->q_lock, flags);
586 admin_queue->stats.aborted_cmd++;
587 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
588 ret = -ENODEV;
589 goto err;
590 }
591
592 WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n",
593 comp_ctx->status);
594
595 ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
596err:
597 comp_ctxt_release(admin_queue, comp_ctx);
598 return ret;
599}
600
601/*
602 * Set the LLQ configurations of the firmware
603 *
604 * The driver provides only the enabled feature values to the device,
605 * which in turn, checks if they are supported.
606 */
607static int ena_com_set_llq(struct ena_com_dev *ena_dev)
608{
609 struct ena_com_admin_queue *admin_queue;
610 struct ena_admin_set_feat_cmd cmd;
611 struct ena_admin_set_feat_resp resp;
612 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
613 int ret;
614
615 memset(&cmd, 0x0, sizeof(cmd));
616 admin_queue = &ena_dev->admin_queue;
617
618 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
619 cmd.feat_common.feature_id = ENA_ADMIN_LLQ;
620
621 cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl;
622 cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl;
623 cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
624 cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
625
626 cmd.u.llq.accel_mode.u.set.enabled_flags =
627 BIT(ENA_ADMIN_DISABLE_META_CACHING) |
628 BIT(ENA_ADMIN_LIMIT_TX_BURST);
629
630 ret = ena_com_execute_admin_command(admin_queue,
631 (struct ena_admin_aq_entry *)&cmd,
632 sizeof(cmd),
633 (struct ena_admin_acq_entry *)&resp,
634 sizeof(resp));
635
636 if (unlikely(ret))
637 netdev_err(ena_dev->net_device,
638 "Failed to set LLQ configurations: %d\n", ret);
639
640 return ret;
641}
642
643static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
644 struct ena_admin_feature_llq_desc *llq_features,
645 struct ena_llq_configurations *llq_default_cfg)
646{
647 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
648 struct ena_admin_accel_mode_get llq_accel_mode_get;
649 u16 supported_feat;
650 int rc;
651
652 memset(llq_info, 0, sizeof(*llq_info));
653
654 supported_feat = llq_features->header_location_ctrl_supported;
655
656 if (likely(supported_feat & llq_default_cfg->llq_header_location)) {
657 llq_info->header_location_ctrl =
658 llq_default_cfg->llq_header_location;
659 } else {
660 netdev_err(ena_dev->net_device,
661 "Invalid header location control, supported: 0x%x\n",
662 supported_feat);
663 return -EINVAL;
664 }
665
666 if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) {
667 supported_feat = llq_features->descriptors_stride_ctrl_supported;
668 if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) {
669 llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl;
670 } else {
671 if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) {
672 llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
673 } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
674 llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
675 } else {
676 netdev_err(ena_dev->net_device,
677 "Invalid desc_stride_ctrl, supported: 0x%x\n",
678 supported_feat);
679 return -EINVAL;
680 }
681
682 netdev_err(ena_dev->net_device,
683 "Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
684 llq_default_cfg->llq_stride_ctrl,
685 supported_feat, llq_info->desc_stride_ctrl);
686 }
687 } else {
688 llq_info->desc_stride_ctrl = 0;
689 }
690
691 supported_feat = llq_features->entry_size_ctrl_supported;
692 if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) {
693 llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size;
694 llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value;
695 } else {
696 if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) {
697 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
698 llq_info->desc_list_entry_size = 128;
699 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) {
700 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B;
701 llq_info->desc_list_entry_size = 192;
702 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
703 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
704 llq_info->desc_list_entry_size = 256;
705 } else {
706 netdev_err(ena_dev->net_device,
707 "Invalid entry_size_ctrl, supported: 0x%x\n",
708 supported_feat);
709 return -EINVAL;
710 }
711
712 netdev_err(ena_dev->net_device,
713 "Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
714 llq_default_cfg->llq_ring_entry_size, supported_feat,
715 llq_info->desc_list_entry_size);
716 }
717 if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
718 /* The desc list entry size should be whole multiply of 8
719 * This requirement comes from __iowrite64_copy()
720 */
721 netdev_err(ena_dev->net_device, "Illegal entry size %d\n",
722 llq_info->desc_list_entry_size);
723 return -EINVAL;
724 }
725
726 if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)
727 llq_info->descs_per_entry = llq_info->desc_list_entry_size /
728 sizeof(struct ena_eth_io_tx_desc);
729 else
730 llq_info->descs_per_entry = 1;
731
732 supported_feat = llq_features->desc_num_before_header_supported;
733 if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) {
734 llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header;
735 } else {
736 if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) {
737 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
738 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) {
739 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1;
740 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) {
741 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4;
742 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
743 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
744 } else {
745 netdev_err(ena_dev->net_device,
746 "Invalid descs_num_before_header, supported: 0x%x\n",
747 supported_feat);
748 return -EINVAL;
749 }
750
751 netdev_err(ena_dev->net_device,
752 "Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
753 llq_default_cfg->llq_num_decs_before_header,
754 supported_feat, llq_info->descs_num_before_header);
755 }
756 /* Check for accelerated queue supported */
757 llq_accel_mode_get = llq_features->accel_mode.u.get;
758
759 llq_info->disable_meta_caching =
760 !!(llq_accel_mode_get.supported_flags &
761 BIT(ENA_ADMIN_DISABLE_META_CACHING));
762
763 if (llq_accel_mode_get.supported_flags & BIT(ENA_ADMIN_LIMIT_TX_BURST))
764 llq_info->max_entries_in_tx_burst =
765 llq_accel_mode_get.max_tx_burst_size /
766 llq_default_cfg->llq_ring_entry_size_value;
767
768 rc = ena_com_set_llq(ena_dev);
769 if (rc)
770 netdev_err(ena_dev->net_device,
771 "Cannot set LLQ configuration: %d\n", rc);
772
773 return rc;
774}
775
776static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
777 struct ena_com_admin_queue *admin_queue)
778{
779 unsigned long flags = 0;
780 int ret;
781
782 wait_for_completion_timeout(&comp_ctx->wait_event,
783 usecs_to_jiffies(
784 admin_queue->completion_timeout));
785
786 /* In case the command wasn't completed find out the root cause.
787 * There might be 2 kinds of errors
788 * 1) No completion (timeout reached)
789 * 2) There is completion but the device didn't get any msi-x interrupt.
790 */
791 if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
792 spin_lock_irqsave(&admin_queue->q_lock, flags);
793 ena_com_handle_admin_completion(admin_queue);
794 admin_queue->stats.no_completion++;
795 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
796
797 if (comp_ctx->status == ENA_CMD_COMPLETED) {
798 netdev_err(admin_queue->ena_dev->net_device,
799 "The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
800 comp_ctx->cmd_opcode,
801 admin_queue->auto_polling ? "ON" : "OFF");
802 /* Check if fallback to polling is enabled */
803 if (admin_queue->auto_polling)
804 admin_queue->polling = true;
805 } else {
806 netdev_err(admin_queue->ena_dev->net_device,
807 "The ena device didn't send a completion for the admin cmd %d status %d\n",
808 comp_ctx->cmd_opcode, comp_ctx->status);
809 }
810 /* Check if shifted to polling mode.
811 * This will happen if there is a completion without an interrupt
812 * and autopolling mode is enabled. Continuing normal execution in such case
813 */
814 if (!admin_queue->polling) {
815 admin_queue->running_state = false;
816 ret = -ETIME;
817 goto err;
818 }
819 }
820
821 ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
822err:
823 comp_ctxt_release(admin_queue, comp_ctx);
824 return ret;
825}
826
827/* This method read the hardware device register through posting writes
828 * and waiting for response
829 * On timeout the function will return ENA_MMIO_READ_TIMEOUT
830 */
831static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
832{
833 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
834 volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
835 mmio_read->read_resp;
836 u32 mmio_read_reg, ret, i;
837 unsigned long flags = 0;
838 u32 timeout = mmio_read->reg_read_to;
839
840 might_sleep();
841
842 if (timeout == 0)
843 timeout = ENA_REG_READ_TIMEOUT;
844
845 /* If readless is disabled, perform regular read */
846 if (!mmio_read->readless_supported)
847 return readl(ena_dev->reg_bar + offset);
848
849 spin_lock_irqsave(&mmio_read->lock, flags);
850 mmio_read->seq_num++;
851
852 read_resp->req_id = mmio_read->seq_num + 0xDEAD;
853 mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
854 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
855 mmio_read_reg |= mmio_read->seq_num &
856 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
857
858 writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
859
860 for (i = 0; i < timeout; i++) {
861 if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
862 break;
863
864 udelay(1);
865 }
866
867 if (unlikely(i == timeout)) {
868 netdev_err(ena_dev->net_device,
869 "Reading reg failed for timeout. expected: req id[%u] offset[%u] actual: req id[%u] offset[%u]\n",
870 mmio_read->seq_num, offset, read_resp->req_id,
871 read_resp->reg_off);
872 ret = ENA_MMIO_READ_TIMEOUT;
873 goto err;
874 }
875
876 if (read_resp->reg_off != offset) {
877 netdev_err(ena_dev->net_device,
878 "Read failure: wrong offset provided\n");
879 ret = ENA_MMIO_READ_TIMEOUT;
880 } else {
881 ret = read_resp->reg_val;
882 }
883err:
884 spin_unlock_irqrestore(&mmio_read->lock, flags);
885
886 return ret;
887}
888
889/* There are two types to wait for completion.
890 * Polling mode - wait until the completion is available.
891 * Async mode - wait on wait queue until the completion is ready
892 * (or the timeout expired).
893 * It is expected that the IRQ called ena_com_handle_admin_completion
894 * to mark the completions.
895 */
896static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
897 struct ena_com_admin_queue *admin_queue)
898{
899 if (admin_queue->polling)
900 return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
901 admin_queue);
902
903 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
904 admin_queue);
905}
906
907static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
908 struct ena_com_io_sq *io_sq)
909{
910 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
911 struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
912 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
913 u8 direction;
914 int ret;
915
916 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
917
918 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
919 direction = ENA_ADMIN_SQ_DIRECTION_TX;
920 else
921 direction = ENA_ADMIN_SQ_DIRECTION_RX;
922
923 destroy_cmd.sq.sq_identity |= (direction <<
924 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
925 ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
926
927 destroy_cmd.sq.sq_idx = io_sq->idx;
928 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
929
930 ret = ena_com_execute_admin_command(admin_queue,
931 (struct ena_admin_aq_entry *)&destroy_cmd,
932 sizeof(destroy_cmd),
933 (struct ena_admin_acq_entry *)&destroy_resp,
934 sizeof(destroy_resp));
935
936 if (unlikely(ret && (ret != -ENODEV)))
937 netdev_err(ena_dev->net_device,
938 "Failed to destroy io sq error: %d\n", ret);
939
940 return ret;
941}
942
943static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
944 struct ena_com_io_sq *io_sq,
945 struct ena_com_io_cq *io_cq)
946{
947 size_t size;
948
949 if (io_cq->cdesc_addr.virt_addr) {
950 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
951
952 dma_free_coherent(ena_dev->dmadev, size,
953 io_cq->cdesc_addr.virt_addr,
954 io_cq->cdesc_addr.phys_addr);
955
956 io_cq->cdesc_addr.virt_addr = NULL;
957 }
958
959 if (io_sq->desc_addr.virt_addr) {
960 size = io_sq->desc_entry_size * io_sq->q_depth;
961
962 dma_free_coherent(ena_dev->dmadev, size,
963 io_sq->desc_addr.virt_addr,
964 io_sq->desc_addr.phys_addr);
965
966 io_sq->desc_addr.virt_addr = NULL;
967 }
968
969 if (io_sq->bounce_buf_ctrl.base_buffer) {
970 devm_kfree(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer);
971 io_sq->bounce_buf_ctrl.base_buffer = NULL;
972 }
973}
974
975static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
976 u16 exp_state)
977{
978 u32 val, exp = 0;
979 unsigned long timeout_stamp;
980
981 /* Convert timeout from resolution of 100ms to us resolution. */
982 timeout_stamp = jiffies + usecs_to_jiffies(100 * 1000 * timeout);
983
984 while (1) {
985 val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
986
987 if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
988 netdev_err(ena_dev->net_device,
989 "Reg read timeout occurred\n");
990 return -ETIME;
991 }
992
993 if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
994 exp_state)
995 return 0;
996
997 if (time_is_before_jiffies(timeout_stamp))
998 return -ETIME;
999
1000 ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
1001 }
1002}
1003
1004static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
1005 enum ena_admin_aq_feature_id feature_id)
1006{
1007 u32 feature_mask = 1 << feature_id;
1008
1009 /* Device attributes is always supported */
1010 if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
1011 !(ena_dev->supported_features & feature_mask))
1012 return false;
1013
1014 return true;
1015}
1016
1017static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
1018 struct ena_admin_get_feat_resp *get_resp,
1019 enum ena_admin_aq_feature_id feature_id,
1020 dma_addr_t control_buf_dma_addr,
1021 u32 control_buff_size,
1022 u8 feature_ver)
1023{
1024 struct ena_com_admin_queue *admin_queue;
1025 struct ena_admin_get_feat_cmd get_cmd;
1026 int ret;
1027
1028 if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
1029 netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
1030 feature_id);
1031 return -EOPNOTSUPP;
1032 }
1033
1034 memset(&get_cmd, 0x0, sizeof(get_cmd));
1035 admin_queue = &ena_dev->admin_queue;
1036
1037 get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
1038
1039 if (control_buff_size)
1040 get_cmd.aq_common_descriptor.flags =
1041 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
1042 else
1043 get_cmd.aq_common_descriptor.flags = 0;
1044
1045 ret = ena_com_mem_addr_set(ena_dev,
1046 &get_cmd.control_buffer.address,
1047 control_buf_dma_addr);
1048 if (unlikely(ret)) {
1049 netdev_err(ena_dev->net_device, "Memory address set failed\n");
1050 return ret;
1051 }
1052
1053 get_cmd.control_buffer.length = control_buff_size;
1054 get_cmd.feat_common.feature_version = feature_ver;
1055 get_cmd.feat_common.feature_id = feature_id;
1056
1057 ret = ena_com_execute_admin_command(admin_queue,
1058 (struct ena_admin_aq_entry *)
1059 &get_cmd,
1060 sizeof(get_cmd),
1061 (struct ena_admin_acq_entry *)
1062 get_resp,
1063 sizeof(*get_resp));
1064
1065 if (unlikely(ret))
1066 netdev_err(ena_dev->net_device,
1067 "Failed to submit get_feature command %d error: %d\n",
1068 feature_id, ret);
1069
1070 return ret;
1071}
1072
1073static int ena_com_get_feature(struct ena_com_dev *ena_dev,
1074 struct ena_admin_get_feat_resp *get_resp,
1075 enum ena_admin_aq_feature_id feature_id,
1076 u8 feature_ver)
1077{
1078 return ena_com_get_feature_ex(ena_dev,
1079 get_resp,
1080 feature_id,
1081 0,
1082 0,
1083 feature_ver);
1084}
1085
1086int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev)
1087{
1088 return ena_dev->rss.hash_func;
1089}
1090
1091static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
1092{
1093 struct ena_admin_feature_rss_flow_hash_control *hash_key =
1094 (ena_dev->rss).hash_key;
1095
1096 netdev_rss_key_fill(&hash_key->key, sizeof(hash_key->key));
1097 /* The key buffer is stored in the device in an array of
1098 * uint32 elements.
1099 */
1100 hash_key->key_parts = ENA_ADMIN_RSS_KEY_PARTS;
1101}
1102
1103static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
1104{
1105 struct ena_rss *rss = &ena_dev->rss;
1106
1107 if (!ena_com_check_supported_feature_id(ena_dev,
1108 ENA_ADMIN_RSS_HASH_FUNCTION))
1109 return -EOPNOTSUPP;
1110
1111 rss->hash_key =
1112 dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
1113 &rss->hash_key_dma_addr, GFP_KERNEL);
1114
1115 if (unlikely(!rss->hash_key))
1116 return -ENOMEM;
1117
1118 return 0;
1119}
1120
1121static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
1122{
1123 struct ena_rss *rss = &ena_dev->rss;
1124
1125 if (rss->hash_key)
1126 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
1127 rss->hash_key, rss->hash_key_dma_addr);
1128 rss->hash_key = NULL;
1129}
1130
1131static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
1132{
1133 struct ena_rss *rss = &ena_dev->rss;
1134
1135 rss->hash_ctrl =
1136 dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
1137 &rss->hash_ctrl_dma_addr, GFP_KERNEL);
1138
1139 if (unlikely(!rss->hash_ctrl))
1140 return -ENOMEM;
1141
1142 return 0;
1143}
1144
1145static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
1146{
1147 struct ena_rss *rss = &ena_dev->rss;
1148
1149 if (rss->hash_ctrl)
1150 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
1151 rss->hash_ctrl, rss->hash_ctrl_dma_addr);
1152 rss->hash_ctrl = NULL;
1153}
1154
1155static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
1156 u16 log_size)
1157{
1158 struct ena_rss *rss = &ena_dev->rss;
1159 struct ena_admin_get_feat_resp get_resp;
1160 size_t tbl_size;
1161 int ret;
1162
1163 ret = ena_com_get_feature(ena_dev, &get_resp,
1164 ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG, 0);
1165 if (unlikely(ret))
1166 return ret;
1167
1168 if ((get_resp.u.ind_table.min_size > log_size) ||
1169 (get_resp.u.ind_table.max_size < log_size)) {
1170 netdev_err(ena_dev->net_device,
1171 "Indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
1172 1 << log_size, 1 << get_resp.u.ind_table.min_size,
1173 1 << get_resp.u.ind_table.max_size);
1174 return -EINVAL;
1175 }
1176
1177 tbl_size = (1ULL << log_size) *
1178 sizeof(struct ena_admin_rss_ind_table_entry);
1179
1180 rss->rss_ind_tbl =
1181 dma_alloc_coherent(ena_dev->dmadev, tbl_size,
1182 &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
1183 if (unlikely(!rss->rss_ind_tbl))
1184 goto mem_err1;
1185
1186 tbl_size = (1ULL << log_size) * sizeof(u16);
1187 rss->host_rss_ind_tbl =
1188 devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
1189 if (unlikely(!rss->host_rss_ind_tbl))
1190 goto mem_err2;
1191
1192 rss->tbl_log_size = log_size;
1193
1194 return 0;
1195
1196mem_err2:
1197 tbl_size = (1ULL << log_size) *
1198 sizeof(struct ena_admin_rss_ind_table_entry);
1199
1200 dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
1201 rss->rss_ind_tbl_dma_addr);
1202 rss->rss_ind_tbl = NULL;
1203mem_err1:
1204 rss->tbl_log_size = 0;
1205 return -ENOMEM;
1206}
1207
1208static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
1209{
1210 struct ena_rss *rss = &ena_dev->rss;
1211 size_t tbl_size = (1ULL << rss->tbl_log_size) *
1212 sizeof(struct ena_admin_rss_ind_table_entry);
1213
1214 if (rss->rss_ind_tbl)
1215 dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
1216 rss->rss_ind_tbl_dma_addr);
1217 rss->rss_ind_tbl = NULL;
1218
1219 if (rss->host_rss_ind_tbl)
1220 devm_kfree(ena_dev->dmadev, rss->host_rss_ind_tbl);
1221 rss->host_rss_ind_tbl = NULL;
1222}
1223
1224static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
1225 struct ena_com_io_sq *io_sq, u16 cq_idx)
1226{
1227 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1228 struct ena_admin_aq_create_sq_cmd create_cmd;
1229 struct ena_admin_acq_create_sq_resp_desc cmd_completion;
1230 u8 direction;
1231 int ret;
1232
1233 memset(&create_cmd, 0x0, sizeof(create_cmd));
1234
1235 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
1236
1237 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1238 direction = ENA_ADMIN_SQ_DIRECTION_TX;
1239 else
1240 direction = ENA_ADMIN_SQ_DIRECTION_RX;
1241
1242 create_cmd.sq_identity |= (direction <<
1243 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
1244 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
1245
1246 create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
1247 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
1248
1249 create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
1250 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
1251 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
1252
1253 create_cmd.sq_caps_3 |=
1254 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
1255
1256 create_cmd.cq_idx = cq_idx;
1257 create_cmd.sq_depth = io_sq->q_depth;
1258
1259 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
1260 ret = ena_com_mem_addr_set(ena_dev,
1261 &create_cmd.sq_ba,
1262 io_sq->desc_addr.phys_addr);
1263 if (unlikely(ret)) {
1264 netdev_err(ena_dev->net_device,
1265 "Memory address set failed\n");
1266 return ret;
1267 }
1268 }
1269
1270 ret = ena_com_execute_admin_command(admin_queue,
1271 (struct ena_admin_aq_entry *)&create_cmd,
1272 sizeof(create_cmd),
1273 (struct ena_admin_acq_entry *)&cmd_completion,
1274 sizeof(cmd_completion));
1275 if (unlikely(ret)) {
1276 netdev_err(ena_dev->net_device,
1277 "Failed to create IO SQ. error: %d\n", ret);
1278 return ret;
1279 }
1280
1281 io_sq->idx = cmd_completion.sq_idx;
1282
1283 io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1284 (uintptr_t)cmd_completion.sq_doorbell_offset);
1285
1286 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1287 io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
1288 + cmd_completion.llq_headers_offset);
1289
1290 io_sq->desc_addr.pbuf_dev_addr =
1291 (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
1292 cmd_completion.llq_descriptors_offset);
1293 }
1294
1295 netdev_dbg(ena_dev->net_device, "Created sq[%u], depth[%u]\n",
1296 io_sq->idx, io_sq->q_depth);
1297
1298 return ret;
1299}
1300
1301static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1302{
1303 struct ena_rss *rss = &ena_dev->rss;
1304 struct ena_com_io_sq *io_sq;
1305 u16 qid;
1306 int i;
1307
1308 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1309 qid = rss->host_rss_ind_tbl[i];
1310 if (qid >= ENA_TOTAL_NUM_QUEUES)
1311 return -EINVAL;
1312
1313 io_sq = &ena_dev->io_sq_queues[qid];
1314
1315 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
1316 return -EINVAL;
1317
1318 rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
1319 }
1320
1321 return 0;
1322}
1323
1324static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1325 u16 intr_delay_resolution)
1326{
1327 u16 prev_intr_delay_resolution = ena_dev->intr_delay_resolution;
1328
1329 if (unlikely(!intr_delay_resolution)) {
1330 netdev_err(ena_dev->net_device,
1331 "Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1332 intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
1333 }
1334
1335 /* update Rx */
1336 ena_dev->intr_moder_rx_interval =
1337 ena_dev->intr_moder_rx_interval *
1338 prev_intr_delay_resolution /
1339 intr_delay_resolution;
1340
1341 /* update Tx */
1342 ena_dev->intr_moder_tx_interval =
1343 ena_dev->intr_moder_tx_interval *
1344 prev_intr_delay_resolution /
1345 intr_delay_resolution;
1346
1347 ena_dev->intr_delay_resolution = intr_delay_resolution;
1348}
1349
1350/*****************************************************************************/
1351/******************************* API ******************************/
1352/*****************************************************************************/
1353
1354int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
1355 struct ena_admin_aq_entry *cmd,
1356 size_t cmd_size,
1357 struct ena_admin_acq_entry *comp,
1358 size_t comp_size)
1359{
1360 struct ena_comp_ctx *comp_ctx;
1361 int ret;
1362
1363 comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
1364 comp, comp_size);
1365 if (IS_ERR(comp_ctx)) {
1366 ret = PTR_ERR(comp_ctx);
1367 if (ret == -ENODEV)
1368 netdev_dbg(admin_queue->ena_dev->net_device,
1369 "Failed to submit command [%d]\n", ret);
1370 else
1371 netdev_err(admin_queue->ena_dev->net_device,
1372 "Failed to submit command [%d]\n", ret);
1373
1374 return ret;
1375 }
1376
1377 ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
1378 if (unlikely(ret)) {
1379 if (admin_queue->running_state)
1380 netdev_err(admin_queue->ena_dev->net_device,
1381 "Failed to process command. ret = %d\n", ret);
1382 else
1383 netdev_dbg(admin_queue->ena_dev->net_device,
1384 "Failed to process command. ret = %d\n", ret);
1385 }
1386 return ret;
1387}
1388
1389int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
1390 struct ena_com_io_cq *io_cq)
1391{
1392 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1393 struct ena_admin_aq_create_cq_cmd create_cmd;
1394 struct ena_admin_acq_create_cq_resp_desc cmd_completion;
1395 int ret;
1396
1397 memset(&create_cmd, 0x0, sizeof(create_cmd));
1398
1399 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
1400
1401 create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
1402 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
1403 create_cmd.cq_caps_1 |=
1404 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
1405
1406 create_cmd.msix_vector = io_cq->msix_vector;
1407 create_cmd.cq_depth = io_cq->q_depth;
1408
1409 ret = ena_com_mem_addr_set(ena_dev,
1410 &create_cmd.cq_ba,
1411 io_cq->cdesc_addr.phys_addr);
1412 if (unlikely(ret)) {
1413 netdev_err(ena_dev->net_device, "Memory address set failed\n");
1414 return ret;
1415 }
1416
1417 ret = ena_com_execute_admin_command(admin_queue,
1418 (struct ena_admin_aq_entry *)&create_cmd,
1419 sizeof(create_cmd),
1420 (struct ena_admin_acq_entry *)&cmd_completion,
1421 sizeof(cmd_completion));
1422 if (unlikely(ret)) {
1423 netdev_err(ena_dev->net_device,
1424 "Failed to create IO CQ. error: %d\n", ret);
1425 return ret;
1426 }
1427
1428 io_cq->idx = cmd_completion.cq_idx;
1429
1430 io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1431 cmd_completion.cq_interrupt_unmask_register_offset);
1432
1433 if (cmd_completion.cq_head_db_register_offset)
1434 io_cq->cq_head_db_reg =
1435 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1436 cmd_completion.cq_head_db_register_offset);
1437
1438 if (cmd_completion.numa_node_register_offset)
1439 io_cq->numa_node_cfg_reg =
1440 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1441 cmd_completion.numa_node_register_offset);
1442
1443 netdev_dbg(ena_dev->net_device, "Created cq[%u], depth[%u]\n",
1444 io_cq->idx, io_cq->q_depth);
1445
1446 return ret;
1447}
1448
1449int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
1450 struct ena_com_io_sq **io_sq,
1451 struct ena_com_io_cq **io_cq)
1452{
1453 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1454 netdev_err(ena_dev->net_device,
1455 "Invalid queue number %d but the max is %d\n", qid,
1456 ENA_TOTAL_NUM_QUEUES);
1457 return -EINVAL;
1458 }
1459
1460 *io_sq = &ena_dev->io_sq_queues[qid];
1461 *io_cq = &ena_dev->io_cq_queues[qid];
1462
1463 return 0;
1464}
1465
1466void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
1467{
1468 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1469 struct ena_comp_ctx *comp_ctx;
1470 u16 i;
1471
1472 if (!admin_queue->comp_ctx)
1473 return;
1474
1475 for (i = 0; i < admin_queue->q_depth; i++) {
1476 comp_ctx = get_comp_ctxt(admin_queue, i, false);
1477 if (unlikely(!comp_ctx))
1478 break;
1479
1480 comp_ctx->status = ENA_CMD_ABORTED;
1481
1482 complete(&comp_ctx->wait_event);
1483 }
1484}
1485
1486void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
1487{
1488 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1489 unsigned long flags = 0;
1490 u32 exp = 0;
1491
1492 spin_lock_irqsave(&admin_queue->q_lock, flags);
1493 while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
1494 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1495 ena_delay_exponential_backoff_us(exp++,
1496 ena_dev->ena_min_poll_delay_us);
1497 spin_lock_irqsave(&admin_queue->q_lock, flags);
1498 }
1499 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1500}
1501
1502int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
1503 struct ena_com_io_cq *io_cq)
1504{
1505 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1506 struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
1507 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
1508 int ret;
1509
1510 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
1511
1512 destroy_cmd.cq_idx = io_cq->idx;
1513 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
1514
1515 ret = ena_com_execute_admin_command(admin_queue,
1516 (struct ena_admin_aq_entry *)&destroy_cmd,
1517 sizeof(destroy_cmd),
1518 (struct ena_admin_acq_entry *)&destroy_resp,
1519 sizeof(destroy_resp));
1520
1521 if (unlikely(ret && (ret != -ENODEV)))
1522 netdev_err(ena_dev->net_device,
1523 "Failed to destroy IO CQ. error: %d\n", ret);
1524
1525 return ret;
1526}
1527
1528bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
1529{
1530 return ena_dev->admin_queue.running_state;
1531}
1532
1533void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
1534{
1535 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1536 unsigned long flags = 0;
1537
1538 spin_lock_irqsave(&admin_queue->q_lock, flags);
1539 ena_dev->admin_queue.running_state = state;
1540 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1541}
1542
1543void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
1544{
1545 u16 depth = ena_dev->aenq.q_depth;
1546
1547 WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
1548
1549 /* Init head_db to mark that all entries in the queue
1550 * are initially available
1551 */
1552 writel(depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1553}
1554
1555int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
1556{
1557 struct ena_com_admin_queue *admin_queue;
1558 struct ena_admin_set_feat_cmd cmd;
1559 struct ena_admin_set_feat_resp resp;
1560 struct ena_admin_get_feat_resp get_resp;
1561 int ret;
1562
1563 ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0);
1564 if (ret) {
1565 dev_info(ena_dev->dmadev, "Can't get aenq configuration\n");
1566 return ret;
1567 }
1568
1569 if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
1570 netdev_warn(ena_dev->net_device,
1571 "Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
1572 get_resp.u.aenq.supported_groups, groups_flag);
1573 return -EOPNOTSUPP;
1574 }
1575
1576 memset(&cmd, 0x0, sizeof(cmd));
1577 admin_queue = &ena_dev->admin_queue;
1578
1579 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1580 cmd.aq_common_descriptor.flags = 0;
1581 cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
1582 cmd.u.aenq.enabled_groups = groups_flag;
1583
1584 ret = ena_com_execute_admin_command(admin_queue,
1585 (struct ena_admin_aq_entry *)&cmd,
1586 sizeof(cmd),
1587 (struct ena_admin_acq_entry *)&resp,
1588 sizeof(resp));
1589
1590 if (unlikely(ret))
1591 netdev_err(ena_dev->net_device,
1592 "Failed to config AENQ ret: %d\n", ret);
1593
1594 return ret;
1595}
1596
1597int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
1598{
1599 u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1600 u32 width;
1601
1602 if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
1603 netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
1604 return -ETIME;
1605 }
1606
1607 width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
1608 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
1609
1610 netdev_dbg(ena_dev->net_device, "ENA dma width: %d\n", width);
1611
1612 if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
1613 netdev_err(ena_dev->net_device, "DMA width illegal value: %d\n",
1614 width);
1615 return -EINVAL;
1616 }
1617
1618 ena_dev->dma_addr_bits = width;
1619
1620 return width;
1621}
1622
1623int ena_com_validate_version(struct ena_com_dev *ena_dev)
1624{
1625 u32 ver;
1626 u32 ctrl_ver;
1627 u32 ctrl_ver_masked;
1628
1629 /* Make sure the ENA version and the controller version are at least
1630 * as the driver expects
1631 */
1632 ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
1633 ctrl_ver = ena_com_reg_bar_read32(ena_dev,
1634 ENA_REGS_CONTROLLER_VERSION_OFF);
1635
1636 if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
1637 (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
1638 netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
1639 return -ETIME;
1640 }
1641
1642 dev_info(ena_dev->dmadev, "ENA device version: %d.%d\n",
1643 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
1644 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
1645 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
1646
1647 dev_info(ena_dev->dmadev,
1648 "ENA controller version: %d.%d.%d implementation version %d\n",
1649 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
1650 ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
1651 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
1652 ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
1653 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
1654 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
1655 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
1656
1657 ctrl_ver_masked =
1658 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
1659 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
1660 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
1661
1662 /* Validate the ctrl version without the implementation ID */
1663 if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
1664 netdev_err(ena_dev->net_device,
1665 "ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1666 return -1;
1667 }
1668
1669 return 0;
1670}
1671
1672static void
1673ena_com_free_ena_admin_queue_comp_ctx(struct ena_com_dev *ena_dev,
1674 struct ena_com_admin_queue *admin_queue)
1675
1676{
1677 if (!admin_queue->comp_ctx)
1678 return;
1679
1680 devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx);
1681
1682 admin_queue->comp_ctx = NULL;
1683}
1684
1685void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1686{
1687 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1688 struct ena_com_admin_cq *cq = &admin_queue->cq;
1689 struct ena_com_admin_sq *sq = &admin_queue->sq;
1690 struct ena_com_aenq *aenq = &ena_dev->aenq;
1691 u16 size;
1692
1693 ena_com_free_ena_admin_queue_comp_ctx(ena_dev, admin_queue);
1694
1695 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
1696 if (sq->entries)
1697 dma_free_coherent(ena_dev->dmadev, size, sq->entries,
1698 sq->dma_addr);
1699 sq->entries = NULL;
1700
1701 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
1702 if (cq->entries)
1703 dma_free_coherent(ena_dev->dmadev, size, cq->entries,
1704 cq->dma_addr);
1705 cq->entries = NULL;
1706
1707 size = ADMIN_AENQ_SIZE(aenq->q_depth);
1708 if (ena_dev->aenq.entries)
1709 dma_free_coherent(ena_dev->dmadev, size, aenq->entries,
1710 aenq->dma_addr);
1711 aenq->entries = NULL;
1712}
1713
1714void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1715{
1716 u32 mask_value = 0;
1717
1718 if (polling)
1719 mask_value = ENA_REGS_ADMIN_INTR_MASK;
1720
1721 writel(mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
1722 ena_dev->admin_queue.polling = polling;
1723}
1724
1725void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
1726 bool polling)
1727{
1728 ena_dev->admin_queue.auto_polling = polling;
1729}
1730
1731int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1732{
1733 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1734
1735 spin_lock_init(&mmio_read->lock);
1736 mmio_read->read_resp =
1737 dma_alloc_coherent(ena_dev->dmadev,
1738 sizeof(*mmio_read->read_resp),
1739 &mmio_read->read_resp_dma_addr, GFP_KERNEL);
1740 if (unlikely(!mmio_read->read_resp))
1741 goto err;
1742
1743 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1744
1745 mmio_read->read_resp->req_id = 0x0;
1746 mmio_read->seq_num = 0x0;
1747 mmio_read->readless_supported = true;
1748
1749 return 0;
1750
1751err:
1752
1753 return -ENOMEM;
1754}
1755
1756void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
1757{
1758 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1759
1760 mmio_read->readless_supported = readless_supported;
1761}
1762
1763void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
1764{
1765 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1766
1767 writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1768 writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1769
1770 dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
1771 mmio_read->read_resp, mmio_read->read_resp_dma_addr);
1772
1773 mmio_read->read_resp = NULL;
1774}
1775
1776void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
1777{
1778 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1779 u32 addr_low, addr_high;
1780
1781 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
1782 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
1783
1784 writel(addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1785 writel(addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1786}
1787
1788int ena_com_admin_init(struct ena_com_dev *ena_dev,
1789 struct ena_aenq_handlers *aenq_handlers)
1790{
1791 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1792 u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
1793 int ret;
1794
1795 dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1796
1797 if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
1798 netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
1799 return -ETIME;
1800 }
1801
1802 if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
1803 netdev_err(ena_dev->net_device,
1804 "Device isn't ready, abort com init\n");
1805 return -ENODEV;
1806 }
1807
1808 admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
1809
1810 admin_queue->q_dmadev = ena_dev->dmadev;
1811 admin_queue->polling = false;
1812 admin_queue->curr_cmd_id = 0;
1813
1814 atomic_set(&admin_queue->outstanding_cmds, 0);
1815
1816 spin_lock_init(&admin_queue->q_lock);
1817
1818 ret = ena_com_init_comp_ctxt(admin_queue);
1819 if (ret)
1820 goto error;
1821
1822 ret = ena_com_admin_init_sq(admin_queue);
1823 if (ret)
1824 goto error;
1825
1826 ret = ena_com_admin_init_cq(admin_queue);
1827 if (ret)
1828 goto error;
1829
1830 admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1831 ENA_REGS_AQ_DB_OFF);
1832
1833 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
1834 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
1835
1836 writel(addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
1837 writel(addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
1838
1839 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
1840 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
1841
1842 writel(addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
1843 writel(addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
1844
1845 aq_caps = 0;
1846 aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
1847 aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
1848 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
1849 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
1850
1851 acq_caps = 0;
1852 acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
1853 acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
1854 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
1855 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
1856
1857 writel(aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
1858 writel(acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
1859 ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
1860 if (ret)
1861 goto error;
1862
1863 admin_queue->ena_dev = ena_dev;
1864 admin_queue->running_state = true;
1865
1866 return 0;
1867error:
1868 ena_com_admin_destroy(ena_dev);
1869
1870 return ret;
1871}
1872
1873int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
1874 struct ena_com_create_io_ctx *ctx)
1875{
1876 struct ena_com_io_sq *io_sq;
1877 struct ena_com_io_cq *io_cq;
1878 int ret;
1879
1880 if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
1881 netdev_err(ena_dev->net_device,
1882 "Qid (%d) is bigger than max num of queues (%d)\n",
1883 ctx->qid, ENA_TOTAL_NUM_QUEUES);
1884 return -EINVAL;
1885 }
1886
1887 io_sq = &ena_dev->io_sq_queues[ctx->qid];
1888 io_cq = &ena_dev->io_cq_queues[ctx->qid];
1889
1890 memset(io_sq, 0x0, sizeof(*io_sq));
1891 memset(io_cq, 0x0, sizeof(*io_cq));
1892
1893 /* Init CQ */
1894 io_cq->q_depth = ctx->queue_size;
1895 io_cq->direction = ctx->direction;
1896 io_cq->qid = ctx->qid;
1897
1898 io_cq->msix_vector = ctx->msix_vector;
1899
1900 io_sq->q_depth = ctx->queue_size;
1901 io_sq->direction = ctx->direction;
1902 io_sq->qid = ctx->qid;
1903
1904 io_sq->mem_queue_type = ctx->mem_queue_type;
1905
1906 if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1907 /* header length is limited to 8 bits */
1908 io_sq->tx_max_header_size =
1909 min_t(u32, ena_dev->tx_max_header_size, SZ_256);
1910
1911 ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
1912 if (ret)
1913 goto error;
1914 ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
1915 if (ret)
1916 goto error;
1917
1918 ret = ena_com_create_io_cq(ena_dev, io_cq);
1919 if (ret)
1920 goto error;
1921
1922 ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
1923 if (ret)
1924 goto destroy_io_cq;
1925
1926 return 0;
1927
1928destroy_io_cq:
1929 ena_com_destroy_io_cq(ena_dev, io_cq);
1930error:
1931 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1932 return ret;
1933}
1934
1935void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
1936{
1937 struct ena_com_io_sq *io_sq;
1938 struct ena_com_io_cq *io_cq;
1939
1940 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1941 netdev_err(ena_dev->net_device,
1942 "Qid (%d) is bigger than max num of queues (%d)\n",
1943 qid, ENA_TOTAL_NUM_QUEUES);
1944 return;
1945 }
1946
1947 io_sq = &ena_dev->io_sq_queues[qid];
1948 io_cq = &ena_dev->io_cq_queues[qid];
1949
1950 ena_com_destroy_io_sq(ena_dev, io_sq);
1951 ena_com_destroy_io_cq(ena_dev, io_cq);
1952
1953 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1954}
1955
1956int ena_com_get_link_params(struct ena_com_dev *ena_dev,
1957 struct ena_admin_get_feat_resp *resp)
1958{
1959 return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0);
1960}
1961
1962int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
1963 struct ena_com_dev_get_features_ctx *get_feat_ctx)
1964{
1965 struct ena_admin_get_feat_resp get_resp;
1966 int rc;
1967
1968 rc = ena_com_get_feature(ena_dev, &get_resp,
1969 ENA_ADMIN_DEVICE_ATTRIBUTES, 0);
1970 if (rc)
1971 return rc;
1972
1973 memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
1974 sizeof(get_resp.u.dev_attr));
1975
1976 ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
1977 ena_dev->capabilities = get_resp.u.dev_attr.capabilities;
1978
1979 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
1980 rc = ena_com_get_feature(ena_dev, &get_resp,
1981 ENA_ADMIN_MAX_QUEUES_EXT,
1982 ENA_FEATURE_MAX_QUEUE_EXT_VER);
1983 if (rc)
1984 return rc;
1985
1986 if (get_resp.u.max_queue_ext.version !=
1987 ENA_FEATURE_MAX_QUEUE_EXT_VER)
1988 return -EINVAL;
1989
1990 memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
1991 sizeof(get_resp.u.max_queue_ext));
1992 ena_dev->tx_max_header_size =
1993 get_resp.u.max_queue_ext.max_queue_ext.max_tx_header_size;
1994 } else {
1995 rc = ena_com_get_feature(ena_dev, &get_resp,
1996 ENA_ADMIN_MAX_QUEUES_NUM, 0);
1997 memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
1998 sizeof(get_resp.u.max_queue));
1999 ena_dev->tx_max_header_size =
2000 get_resp.u.max_queue.max_header_size;
2001
2002 if (rc)
2003 return rc;
2004 }
2005
2006 rc = ena_com_get_feature(ena_dev, &get_resp,
2007 ENA_ADMIN_AENQ_CONFIG, 0);
2008 if (rc)
2009 return rc;
2010
2011 memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
2012 sizeof(get_resp.u.aenq));
2013
2014 rc = ena_com_get_feature(ena_dev, &get_resp,
2015 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
2016 if (rc)
2017 return rc;
2018
2019 memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
2020 sizeof(get_resp.u.offload));
2021
2022 /* Driver hints isn't mandatory admin command. So in case the
2023 * command isn't supported set driver hints to 0
2024 */
2025 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0);
2026
2027 if (!rc)
2028 memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
2029 sizeof(get_resp.u.hw_hints));
2030 else if (rc == -EOPNOTSUPP)
2031 memset(&get_feat_ctx->hw_hints, 0x0,
2032 sizeof(get_feat_ctx->hw_hints));
2033 else
2034 return rc;
2035
2036 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0);
2037 if (!rc)
2038 memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
2039 sizeof(get_resp.u.llq));
2040 else if (rc == -EOPNOTSUPP)
2041 memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
2042 else
2043 return rc;
2044
2045 return 0;
2046}
2047
2048void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
2049{
2050 ena_com_handle_admin_completion(&ena_dev->admin_queue);
2051}
2052
2053/* ena_handle_specific_aenq_event:
2054 * return the handler that is relevant to the specific event group
2055 */
2056static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *ena_dev,
2057 u16 group)
2058{
2059 struct ena_aenq_handlers *aenq_handlers = ena_dev->aenq.aenq_handlers;
2060
2061 if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
2062 return aenq_handlers->handlers[group];
2063
2064 return aenq_handlers->unimplemented_handler;
2065}
2066
2067/* ena_aenq_intr_handler:
2068 * handles the aenq incoming events.
2069 * pop events from the queue and apply the specific handler
2070 */
2071void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
2072{
2073 struct ena_admin_aenq_entry *aenq_e;
2074 struct ena_admin_aenq_common_desc *aenq_common;
2075 struct ena_com_aenq *aenq = &ena_dev->aenq;
2076 u64 timestamp;
2077 ena_aenq_handler handler_cb;
2078 u16 masked_head, processed = 0;
2079 u8 phase;
2080
2081 masked_head = aenq->head & (aenq->q_depth - 1);
2082 phase = aenq->phase;
2083 aenq_e = &aenq->entries[masked_head]; /* Get first entry */
2084 aenq_common = &aenq_e->aenq_common_desc;
2085
2086 /* Go over all the events */
2087 while ((READ_ONCE(aenq_common->flags) &
2088 ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
2089 /* Make sure the phase bit (ownership) is as expected before
2090 * reading the rest of the descriptor.
2091 */
2092 dma_rmb();
2093
2094 timestamp = (u64)aenq_common->timestamp_low |
2095 ((u64)aenq_common->timestamp_high << 32);
2096
2097 netdev_dbg(ena_dev->net_device,
2098 "AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n",
2099 aenq_common->group, aenq_common->syndrome, timestamp);
2100
2101 /* Handle specific event*/
2102 handler_cb = ena_com_get_specific_aenq_cb(ena_dev,
2103 aenq_common->group);
2104 handler_cb(data, aenq_e); /* call the actual event handler*/
2105
2106 /* Get next event entry */
2107 masked_head++;
2108 processed++;
2109
2110 if (unlikely(masked_head == aenq->q_depth)) {
2111 masked_head = 0;
2112 phase = !phase;
2113 }
2114 aenq_e = &aenq->entries[masked_head];
2115 aenq_common = &aenq_e->aenq_common_desc;
2116 }
2117
2118 aenq->head += processed;
2119 aenq->phase = phase;
2120
2121 /* Don't update aenq doorbell if there weren't any processed events */
2122 if (!processed)
2123 return;
2124
2125 /* write the aenq doorbell after all AENQ descriptors were read */
2126 mb();
2127 writel_relaxed((u32)aenq->head,
2128 ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
2129}
2130
2131int ena_com_dev_reset(struct ena_com_dev *ena_dev,
2132 enum ena_regs_reset_reason_types reset_reason)
2133{
2134 u32 stat, timeout, cap, reset_val;
2135 int rc;
2136
2137 stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
2138 cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
2139
2140 if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
2141 (cap == ENA_MMIO_READ_TIMEOUT))) {
2142 netdev_err(ena_dev->net_device, "Reg read32 timeout occurred\n");
2143 return -ETIME;
2144 }
2145
2146 if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
2147 netdev_err(ena_dev->net_device,
2148 "Device isn't ready, can't reset device\n");
2149 return -EINVAL;
2150 }
2151
2152 timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
2153 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
2154 if (timeout == 0) {
2155 netdev_err(ena_dev->net_device, "Invalid timeout value\n");
2156 return -EINVAL;
2157 }
2158
2159 /* start reset */
2160 reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
2161 reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
2162 ENA_REGS_DEV_CTL_RESET_REASON_MASK;
2163 writel(reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2164
2165 /* Write again the MMIO read request address */
2166 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
2167
2168 rc = wait_for_reset_state(ena_dev, timeout,
2169 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
2170 if (rc != 0) {
2171 netdev_err(ena_dev->net_device,
2172 "Reset indication didn't turn on\n");
2173 return rc;
2174 }
2175
2176 /* reset done */
2177 writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2178 rc = wait_for_reset_state(ena_dev, timeout, 0);
2179 if (rc != 0) {
2180 netdev_err(ena_dev->net_device,
2181 "Reset indication didn't turn off\n");
2182 return rc;
2183 }
2184
2185 timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
2186 ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
2187 if (timeout)
2188 /* the resolution of timeout reg is 100ms */
2189 ena_dev->admin_queue.completion_timeout = timeout * 100000;
2190 else
2191 ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
2192
2193 return 0;
2194}
2195
2196static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
2197 struct ena_com_stats_ctx *ctx,
2198 enum ena_admin_get_stats_type type)
2199{
2200 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
2201 struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
2202 struct ena_com_admin_queue *admin_queue;
2203 int ret;
2204
2205 admin_queue = &ena_dev->admin_queue;
2206
2207 get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
2208 get_cmd->aq_common_descriptor.flags = 0;
2209 get_cmd->type = type;
2210
2211 ret = ena_com_execute_admin_command(admin_queue,
2212 (struct ena_admin_aq_entry *)get_cmd,
2213 sizeof(*get_cmd),
2214 (struct ena_admin_acq_entry *)get_resp,
2215 sizeof(*get_resp));
2216
2217 if (unlikely(ret))
2218 netdev_err(ena_dev->net_device,
2219 "Failed to get stats. error: %d\n", ret);
2220
2221 return ret;
2222}
2223
2224int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
2225 struct ena_admin_eni_stats *stats)
2226{
2227 struct ena_com_stats_ctx ctx;
2228 int ret;
2229
2230 if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) {
2231 netdev_err(ena_dev->net_device,
2232 "Capability %d isn't supported\n",
2233 ENA_ADMIN_ENI_STATS);
2234 return -EOPNOTSUPP;
2235 }
2236
2237 memset(&ctx, 0x0, sizeof(ctx));
2238 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENI);
2239 if (likely(ret == 0))
2240 memcpy(stats, &ctx.get_resp.u.eni_stats,
2241 sizeof(ctx.get_resp.u.eni_stats));
2242
2243 return ret;
2244}
2245
2246int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
2247 struct ena_admin_basic_stats *stats)
2248{
2249 struct ena_com_stats_ctx ctx;
2250 int ret;
2251
2252 memset(&ctx, 0x0, sizeof(ctx));
2253 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
2254 if (likely(ret == 0))
2255 memcpy(stats, &ctx.get_resp.u.basic_stats,
2256 sizeof(ctx.get_resp.u.basic_stats));
2257
2258 return ret;
2259}
2260
2261int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu)
2262{
2263 struct ena_com_admin_queue *admin_queue;
2264 struct ena_admin_set_feat_cmd cmd;
2265 struct ena_admin_set_feat_resp resp;
2266 int ret;
2267
2268 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
2269 netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
2270 ENA_ADMIN_MTU);
2271 return -EOPNOTSUPP;
2272 }
2273
2274 memset(&cmd, 0x0, sizeof(cmd));
2275 admin_queue = &ena_dev->admin_queue;
2276
2277 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2278 cmd.aq_common_descriptor.flags = 0;
2279 cmd.feat_common.feature_id = ENA_ADMIN_MTU;
2280 cmd.u.mtu.mtu = mtu;
2281
2282 ret = ena_com_execute_admin_command(admin_queue,
2283 (struct ena_admin_aq_entry *)&cmd,
2284 sizeof(cmd),
2285 (struct ena_admin_acq_entry *)&resp,
2286 sizeof(resp));
2287
2288 if (unlikely(ret))
2289 netdev_err(ena_dev->net_device,
2290 "Failed to set mtu %d. error: %d\n", mtu, ret);
2291
2292 return ret;
2293}
2294
2295int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
2296 struct ena_admin_feature_offload_desc *offload)
2297{
2298 int ret;
2299 struct ena_admin_get_feat_resp resp;
2300
2301 ret = ena_com_get_feature(ena_dev, &resp,
2302 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
2303 if (unlikely(ret)) {
2304 netdev_err(ena_dev->net_device,
2305 "Failed to get offload capabilities %d\n", ret);
2306 return ret;
2307 }
2308
2309 memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
2310
2311 return 0;
2312}
2313
2314int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
2315{
2316 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2317 struct ena_rss *rss = &ena_dev->rss;
2318 struct ena_admin_set_feat_cmd cmd;
2319 struct ena_admin_set_feat_resp resp;
2320 struct ena_admin_get_feat_resp get_resp;
2321 int ret;
2322
2323 if (!ena_com_check_supported_feature_id(ena_dev,
2324 ENA_ADMIN_RSS_HASH_FUNCTION)) {
2325 netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
2326 ENA_ADMIN_RSS_HASH_FUNCTION);
2327 return -EOPNOTSUPP;
2328 }
2329
2330 /* Validate hash function is supported */
2331 ret = ena_com_get_feature(ena_dev, &get_resp,
2332 ENA_ADMIN_RSS_HASH_FUNCTION, 0);
2333 if (unlikely(ret))
2334 return ret;
2335
2336 if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
2337 netdev_err(ena_dev->net_device,
2338 "Func hash %d isn't supported by device, abort\n",
2339 rss->hash_func);
2340 return -EOPNOTSUPP;
2341 }
2342
2343 memset(&cmd, 0x0, sizeof(cmd));
2344
2345 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2346 cmd.aq_common_descriptor.flags =
2347 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2348 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
2349 cmd.u.flow_hash_func.init_val = rss->hash_init_val;
2350 cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
2351
2352 ret = ena_com_mem_addr_set(ena_dev,
2353 &cmd.control_buffer.address,
2354 rss->hash_key_dma_addr);
2355 if (unlikely(ret)) {
2356 netdev_err(ena_dev->net_device, "Memory address set failed\n");
2357 return ret;
2358 }
2359
2360 cmd.control_buffer.length = sizeof(*rss->hash_key);
2361
2362 ret = ena_com_execute_admin_command(admin_queue,
2363 (struct ena_admin_aq_entry *)&cmd,
2364 sizeof(cmd),
2365 (struct ena_admin_acq_entry *)&resp,
2366 sizeof(resp));
2367 if (unlikely(ret)) {
2368 netdev_err(ena_dev->net_device,
2369 "Failed to set hash function %d. error: %d\n",
2370 rss->hash_func, ret);
2371 return -EINVAL;
2372 }
2373
2374 return 0;
2375}
2376
2377int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
2378 enum ena_admin_hash_functions func,
2379 const u8 *key, u16 key_len, u32 init_val)
2380{
2381 struct ena_admin_feature_rss_flow_hash_control *hash_key;
2382 struct ena_admin_get_feat_resp get_resp;
2383 enum ena_admin_hash_functions old_func;
2384 struct ena_rss *rss = &ena_dev->rss;
2385 int rc;
2386
2387 hash_key = rss->hash_key;
2388
2389 /* Make sure size is a mult of DWs */
2390 if (unlikely(key_len & 0x3))
2391 return -EINVAL;
2392
2393 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2394 ENA_ADMIN_RSS_HASH_FUNCTION,
2395 rss->hash_key_dma_addr,
2396 sizeof(*rss->hash_key), 0);
2397 if (unlikely(rc))
2398 return rc;
2399
2400 if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) {
2401 netdev_err(ena_dev->net_device,
2402 "Flow hash function %d isn't supported\n", func);
2403 return -EOPNOTSUPP;
2404 }
2405
2406 if ((func == ENA_ADMIN_TOEPLITZ) && key) {
2407 if (key_len != sizeof(hash_key->key)) {
2408 netdev_err(ena_dev->net_device,
2409 "key len (%u) doesn't equal the supported size (%zu)\n",
2410 key_len, sizeof(hash_key->key));
2411 return -EINVAL;
2412 }
2413 memcpy(hash_key->key, key, key_len);
2414 hash_key->key_parts = key_len / sizeof(hash_key->key[0]);
2415 }
2416
2417 rss->hash_init_val = init_val;
2418 old_func = rss->hash_func;
2419 rss->hash_func = func;
2420 rc = ena_com_set_hash_function(ena_dev);
2421
2422 /* Restore the old function */
2423 if (unlikely(rc))
2424 rss->hash_func = old_func;
2425
2426 return rc;
2427}
2428
2429int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
2430 enum ena_admin_hash_functions *func)
2431{
2432 struct ena_rss *rss = &ena_dev->rss;
2433 struct ena_admin_get_feat_resp get_resp;
2434 int rc;
2435
2436 if (unlikely(!func))
2437 return -EINVAL;
2438
2439 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2440 ENA_ADMIN_RSS_HASH_FUNCTION,
2441 rss->hash_key_dma_addr,
2442 sizeof(*rss->hash_key), 0);
2443 if (unlikely(rc))
2444 return rc;
2445
2446 /* ffs() returns 1 in case the lsb is set */
2447 rss->hash_func = ffs(get_resp.u.flow_hash_func.selected_func);
2448 if (rss->hash_func)
2449 rss->hash_func--;
2450
2451 *func = rss->hash_func;
2452
2453 return 0;
2454}
2455
2456int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key)
2457{
2458 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2459 ena_dev->rss.hash_key;
2460
2461 if (key)
2462 memcpy(key, hash_key->key,
2463 (size_t)(hash_key->key_parts) * sizeof(hash_key->key[0]));
2464
2465 return 0;
2466}
2467
2468int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
2469 enum ena_admin_flow_hash_proto proto,
2470 u16 *fields)
2471{
2472 struct ena_rss *rss = &ena_dev->rss;
2473 struct ena_admin_get_feat_resp get_resp;
2474 int rc;
2475
2476 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2477 ENA_ADMIN_RSS_HASH_INPUT,
2478 rss->hash_ctrl_dma_addr,
2479 sizeof(*rss->hash_ctrl), 0);
2480 if (unlikely(rc))
2481 return rc;
2482
2483 if (fields)
2484 *fields = rss->hash_ctrl->selected_fields[proto].fields;
2485
2486 return 0;
2487}
2488
2489int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
2490{
2491 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2492 struct ena_rss *rss = &ena_dev->rss;
2493 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2494 struct ena_admin_set_feat_cmd cmd;
2495 struct ena_admin_set_feat_resp resp;
2496 int ret;
2497
2498 if (!ena_com_check_supported_feature_id(ena_dev,
2499 ENA_ADMIN_RSS_HASH_INPUT)) {
2500 netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
2501 ENA_ADMIN_RSS_HASH_INPUT);
2502 return -EOPNOTSUPP;
2503 }
2504
2505 memset(&cmd, 0x0, sizeof(cmd));
2506
2507 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2508 cmd.aq_common_descriptor.flags =
2509 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2510 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
2511 cmd.u.flow_hash_input.enabled_input_sort =
2512 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
2513 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
2514
2515 ret = ena_com_mem_addr_set(ena_dev,
2516 &cmd.control_buffer.address,
2517 rss->hash_ctrl_dma_addr);
2518 if (unlikely(ret)) {
2519 netdev_err(ena_dev->net_device, "Memory address set failed\n");
2520 return ret;
2521 }
2522 cmd.control_buffer.length = sizeof(*hash_ctrl);
2523
2524 ret = ena_com_execute_admin_command(admin_queue,
2525 (struct ena_admin_aq_entry *)&cmd,
2526 sizeof(cmd),
2527 (struct ena_admin_acq_entry *)&resp,
2528 sizeof(resp));
2529 if (unlikely(ret))
2530 netdev_err(ena_dev->net_device,
2531 "Failed to set hash input. error: %d\n", ret);
2532
2533 return ret;
2534}
2535
2536int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
2537{
2538 struct ena_rss *rss = &ena_dev->rss;
2539 struct ena_admin_feature_rss_hash_control *hash_ctrl =
2540 rss->hash_ctrl;
2541 u16 available_fields = 0;
2542 int rc, i;
2543
2544 /* Get the supported hash input */
2545 rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2546 if (unlikely(rc))
2547 return rc;
2548
2549 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
2550 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2551 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2552
2553 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
2554 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2555 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2556
2557 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
2558 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2559 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2560
2561 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
2562 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2563 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2564
2565 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
2566 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2567
2568 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
2569 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2570
2571 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
2572 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2573
2574 hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
2575 ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
2576
2577 for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
2578 available_fields = hash_ctrl->selected_fields[i].fields &
2579 hash_ctrl->supported_fields[i].fields;
2580 if (available_fields != hash_ctrl->selected_fields[i].fields) {
2581 netdev_err(ena_dev->net_device,
2582 "Hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2583 i, hash_ctrl->supported_fields[i].fields,
2584 hash_ctrl->selected_fields[i].fields);
2585 return -EOPNOTSUPP;
2586 }
2587 }
2588
2589 rc = ena_com_set_hash_ctrl(ena_dev);
2590
2591 /* In case of failure, restore the old hash ctrl */
2592 if (unlikely(rc))
2593 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2594
2595 return rc;
2596}
2597
2598int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
2599 enum ena_admin_flow_hash_proto proto,
2600 u16 hash_fields)
2601{
2602 struct ena_rss *rss = &ena_dev->rss;
2603 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2604 u16 supported_fields;
2605 int rc;
2606
2607 if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
2608 netdev_err(ena_dev->net_device, "Invalid proto num (%u)\n",
2609 proto);
2610 return -EINVAL;
2611 }
2612
2613 /* Get the ctrl table */
2614 rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
2615 if (unlikely(rc))
2616 return rc;
2617
2618 /* Make sure all the fields are supported */
2619 supported_fields = hash_ctrl->supported_fields[proto].fields;
2620 if ((hash_fields & supported_fields) != hash_fields) {
2621 netdev_err(ena_dev->net_device,
2622 "Proto %d doesn't support the required fields %x. supports only: %x\n",
2623 proto, hash_fields, supported_fields);
2624 }
2625
2626 hash_ctrl->selected_fields[proto].fields = hash_fields;
2627
2628 rc = ena_com_set_hash_ctrl(ena_dev);
2629
2630 /* In case of failure, restore the old hash ctrl */
2631 if (unlikely(rc))
2632 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2633
2634 return 0;
2635}
2636
2637int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
2638 u16 entry_idx, u16 entry_value)
2639{
2640 struct ena_rss *rss = &ena_dev->rss;
2641
2642 if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
2643 return -EINVAL;
2644
2645 if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
2646 return -EINVAL;
2647
2648 rss->host_rss_ind_tbl[entry_idx] = entry_value;
2649
2650 return 0;
2651}
2652
2653int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
2654{
2655 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2656 struct ena_rss *rss = &ena_dev->rss;
2657 struct ena_admin_set_feat_cmd cmd;
2658 struct ena_admin_set_feat_resp resp;
2659 int ret;
2660
2661 if (!ena_com_check_supported_feature_id(
2662 ena_dev, ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) {
2663 netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
2664 ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG);
2665 return -EOPNOTSUPP;
2666 }
2667
2668 ret = ena_com_ind_tbl_convert_to_device(ena_dev);
2669 if (ret) {
2670 netdev_err(ena_dev->net_device,
2671 "Failed to convert host indirection table to device table\n");
2672 return ret;
2673 }
2674
2675 memset(&cmd, 0x0, sizeof(cmd));
2676
2677 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2678 cmd.aq_common_descriptor.flags =
2679 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2680 cmd.feat_common.feature_id = ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG;
2681 cmd.u.ind_table.size = rss->tbl_log_size;
2682 cmd.u.ind_table.inline_index = 0xFFFFFFFF;
2683
2684 ret = ena_com_mem_addr_set(ena_dev,
2685 &cmd.control_buffer.address,
2686 rss->rss_ind_tbl_dma_addr);
2687 if (unlikely(ret)) {
2688 netdev_err(ena_dev->net_device, "Memory address set failed\n");
2689 return ret;
2690 }
2691
2692 cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
2693 sizeof(struct ena_admin_rss_ind_table_entry);
2694
2695 ret = ena_com_execute_admin_command(admin_queue,
2696 (struct ena_admin_aq_entry *)&cmd,
2697 sizeof(cmd),
2698 (struct ena_admin_acq_entry *)&resp,
2699 sizeof(resp));
2700
2701 if (unlikely(ret))
2702 netdev_err(ena_dev->net_device,
2703 "Failed to set indirect table. error: %d\n", ret);
2704
2705 return ret;
2706}
2707
2708int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
2709{
2710 struct ena_rss *rss = &ena_dev->rss;
2711 struct ena_admin_get_feat_resp get_resp;
2712 u32 tbl_size;
2713 int i, rc;
2714
2715 tbl_size = (1ULL << rss->tbl_log_size) *
2716 sizeof(struct ena_admin_rss_ind_table_entry);
2717
2718 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2719 ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG,
2720 rss->rss_ind_tbl_dma_addr,
2721 tbl_size, 0);
2722 if (unlikely(rc))
2723 return rc;
2724
2725 if (!ind_tbl)
2726 return 0;
2727
2728 for (i = 0; i < (1 << rss->tbl_log_size); i++)
2729 ind_tbl[i] = rss->host_rss_ind_tbl[i];
2730
2731 return 0;
2732}
2733
2734int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
2735{
2736 int rc;
2737
2738 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2739
2740 rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
2741 if (unlikely(rc))
2742 goto err_indr_tbl;
2743
2744 /* The following function might return unsupported in case the
2745 * device doesn't support setting the key / hash function. We can safely
2746 * ignore this error and have indirection table support only.
2747 */
2748 rc = ena_com_hash_key_allocate(ena_dev);
2749 if (likely(!rc))
2750 ena_com_hash_key_fill_default_key(ena_dev);
2751 else if (rc != -EOPNOTSUPP)
2752 goto err_hash_key;
2753
2754 rc = ena_com_hash_ctrl_init(ena_dev);
2755 if (unlikely(rc))
2756 goto err_hash_ctrl;
2757
2758 return 0;
2759
2760err_hash_ctrl:
2761 ena_com_hash_key_destroy(ena_dev);
2762err_hash_key:
2763 ena_com_indirect_table_destroy(ena_dev);
2764err_indr_tbl:
2765
2766 return rc;
2767}
2768
2769void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
2770{
2771 ena_com_indirect_table_destroy(ena_dev);
2772 ena_com_hash_key_destroy(ena_dev);
2773 ena_com_hash_ctrl_destroy(ena_dev);
2774
2775 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2776}
2777
2778int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
2779{
2780 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2781
2782 host_attr->host_info =
2783 dma_alloc_coherent(ena_dev->dmadev, SZ_4K,
2784 &host_attr->host_info_dma_addr, GFP_KERNEL);
2785 if (unlikely(!host_attr->host_info))
2786 return -ENOMEM;
2787
2788 host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR <<
2789 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
2790 (ENA_COMMON_SPEC_VERSION_MINOR));
2791
2792 return 0;
2793}
2794
2795int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
2796 u32 debug_area_size)
2797{
2798 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2799
2800 host_attr->debug_area_virt_addr =
2801 dma_alloc_coherent(ena_dev->dmadev, debug_area_size,
2802 &host_attr->debug_area_dma_addr, GFP_KERNEL);
2803 if (unlikely(!host_attr->debug_area_virt_addr)) {
2804 host_attr->debug_area_size = 0;
2805 return -ENOMEM;
2806 }
2807
2808 host_attr->debug_area_size = debug_area_size;
2809
2810 return 0;
2811}
2812
2813void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
2814{
2815 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2816
2817 if (host_attr->host_info) {
2818 dma_free_coherent(ena_dev->dmadev, SZ_4K, host_attr->host_info,
2819 host_attr->host_info_dma_addr);
2820 host_attr->host_info = NULL;
2821 }
2822}
2823
2824void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
2825{
2826 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2827
2828 if (host_attr->debug_area_virt_addr) {
2829 dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size,
2830 host_attr->debug_area_virt_addr,
2831 host_attr->debug_area_dma_addr);
2832 host_attr->debug_area_virt_addr = NULL;
2833 }
2834}
2835
2836int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
2837{
2838 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2839 struct ena_com_admin_queue *admin_queue;
2840 struct ena_admin_set_feat_cmd cmd;
2841 struct ena_admin_set_feat_resp resp;
2842
2843 int ret;
2844
2845 /* Host attribute config is called before ena_com_get_dev_attr_feat
2846 * so ena_com can't check if the feature is supported.
2847 */
2848
2849 memset(&cmd, 0x0, sizeof(cmd));
2850 admin_queue = &ena_dev->admin_queue;
2851
2852 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2853 cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
2854
2855 ret = ena_com_mem_addr_set(ena_dev,
2856 &cmd.u.host_attr.debug_ba,
2857 host_attr->debug_area_dma_addr);
2858 if (unlikely(ret)) {
2859 netdev_err(ena_dev->net_device, "Memory address set failed\n");
2860 return ret;
2861 }
2862
2863 ret = ena_com_mem_addr_set(ena_dev,
2864 &cmd.u.host_attr.os_info_ba,
2865 host_attr->host_info_dma_addr);
2866 if (unlikely(ret)) {
2867 netdev_err(ena_dev->net_device, "Memory address set failed\n");
2868 return ret;
2869 }
2870
2871 cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
2872
2873 ret = ena_com_execute_admin_command(admin_queue,
2874 (struct ena_admin_aq_entry *)&cmd,
2875 sizeof(cmd),
2876 (struct ena_admin_acq_entry *)&resp,
2877 sizeof(resp));
2878
2879 if (unlikely(ret))
2880 netdev_err(ena_dev->net_device,
2881 "Failed to set host attributes: %d\n", ret);
2882
2883 return ret;
2884}
2885
2886/* Interrupt moderation */
2887bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
2888{
2889 return ena_com_check_supported_feature_id(ena_dev,
2890 ENA_ADMIN_INTERRUPT_MODERATION);
2891}
2892
2893static int ena_com_update_nonadaptive_moderation_interval(struct ena_com_dev *ena_dev,
2894 u32 coalesce_usecs,
2895 u32 intr_delay_resolution,
2896 u32 *intr_moder_interval)
2897{
2898 if (!intr_delay_resolution) {
2899 netdev_err(ena_dev->net_device,
2900 "Illegal interrupt delay granularity value\n");
2901 return -EFAULT;
2902 }
2903
2904 *intr_moder_interval = coalesce_usecs / intr_delay_resolution;
2905
2906 return 0;
2907}
2908
2909int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
2910 u32 tx_coalesce_usecs)
2911{
2912 return ena_com_update_nonadaptive_moderation_interval(ena_dev,
2913 tx_coalesce_usecs,
2914 ena_dev->intr_delay_resolution,
2915 &ena_dev->intr_moder_tx_interval);
2916}
2917
2918int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
2919 u32 rx_coalesce_usecs)
2920{
2921 return ena_com_update_nonadaptive_moderation_interval(ena_dev,
2922 rx_coalesce_usecs,
2923 ena_dev->intr_delay_resolution,
2924 &ena_dev->intr_moder_rx_interval);
2925}
2926
2927int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
2928{
2929 struct ena_admin_get_feat_resp get_resp;
2930 u16 delay_resolution;
2931 int rc;
2932
2933 rc = ena_com_get_feature(ena_dev, &get_resp,
2934 ENA_ADMIN_INTERRUPT_MODERATION, 0);
2935
2936 if (rc) {
2937 if (rc == -EOPNOTSUPP) {
2938 netdev_dbg(ena_dev->net_device,
2939 "Feature %d isn't supported\n",
2940 ENA_ADMIN_INTERRUPT_MODERATION);
2941 rc = 0;
2942 } else {
2943 netdev_err(ena_dev->net_device,
2944 "Failed to get interrupt moderation admin cmd. rc: %d\n",
2945 rc);
2946 }
2947
2948 /* no moderation supported, disable adaptive support */
2949 ena_com_disable_adaptive_moderation(ena_dev);
2950 return rc;
2951 }
2952
2953 /* if moderation is supported by device we set adaptive moderation */
2954 delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
2955 ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
2956
2957 /* Disable adaptive moderation by default - can be enabled later */
2958 ena_com_disable_adaptive_moderation(ena_dev);
2959
2960 return 0;
2961}
2962
2963unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
2964{
2965 return ena_dev->intr_moder_tx_interval;
2966}
2967
2968unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
2969{
2970 return ena_dev->intr_moder_rx_interval;
2971}
2972
2973int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
2974 struct ena_admin_feature_llq_desc *llq_features,
2975 struct ena_llq_configurations *llq_default_cfg)
2976{
2977 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
2978 int rc;
2979
2980 if (!llq_features->max_llq_num) {
2981 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2982 return 0;
2983 }
2984
2985 rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg);
2986 if (rc)
2987 return rc;
2988
2989 ena_dev->tx_max_header_size = llq_info->desc_list_entry_size -
2990 (llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
2991
2992 if (unlikely(ena_dev->tx_max_header_size == 0)) {
2993 netdev_err(ena_dev->net_device,
2994 "The size of the LLQ entry is smaller than needed\n");
2995 return -EINVAL;
2996 }
2997
2998 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
2999
3000 return 0;
3001}
1/*
2 * Copyright 2015 Amazon.com, Inc. or its affiliates.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include "ena_com.h"
34
35/*****************************************************************************/
36/*****************************************************************************/
37
38/* Timeout in micro-sec */
39#define ADMIN_CMD_TIMEOUT_US (3000000)
40
41#define ENA_ASYNC_QUEUE_DEPTH 16
42#define ENA_ADMIN_QUEUE_DEPTH 32
43
44
45#define ENA_CTRL_MAJOR 0
46#define ENA_CTRL_MINOR 0
47#define ENA_CTRL_SUB_MINOR 1
48
49#define MIN_ENA_CTRL_VER \
50 (((ENA_CTRL_MAJOR) << \
51 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
52 ((ENA_CTRL_MINOR) << \
53 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
54 (ENA_CTRL_SUB_MINOR))
55
56#define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
57#define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
58
59#define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
60
61#define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4
62
63#define ENA_REGS_ADMIN_INTR_MASK 1
64
65#define ENA_POLL_MS 5
66
67/*****************************************************************************/
68/*****************************************************************************/
69/*****************************************************************************/
70
71enum ena_cmd_status {
72 ENA_CMD_SUBMITTED,
73 ENA_CMD_COMPLETED,
74 /* Abort - canceled by the driver */
75 ENA_CMD_ABORTED,
76};
77
78struct ena_comp_ctx {
79 struct completion wait_event;
80 struct ena_admin_acq_entry *user_cqe;
81 u32 comp_size;
82 enum ena_cmd_status status;
83 /* status from the device */
84 u8 comp_status;
85 u8 cmd_opcode;
86 bool occupied;
87};
88
89struct ena_com_stats_ctx {
90 struct ena_admin_aq_get_stats_cmd get_cmd;
91 struct ena_admin_acq_get_stats_resp get_resp;
92};
93
94static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
95 struct ena_common_mem_addr *ena_addr,
96 dma_addr_t addr)
97{
98 if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
99 pr_err("dma address has more bits that the device supports\n");
100 return -EINVAL;
101 }
102
103 ena_addr->mem_addr_low = lower_32_bits(addr);
104 ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
105
106 return 0;
107}
108
109static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
110{
111 struct ena_com_admin_sq *sq = &queue->sq;
112 u16 size = ADMIN_SQ_SIZE(queue->q_depth);
113
114 sq->entries = dma_alloc_coherent(queue->q_dmadev, size, &sq->dma_addr,
115 GFP_KERNEL);
116
117 if (!sq->entries) {
118 pr_err("memory allocation failed\n");
119 return -ENOMEM;
120 }
121
122 sq->head = 0;
123 sq->tail = 0;
124 sq->phase = 1;
125
126 sq->db_addr = NULL;
127
128 return 0;
129}
130
131static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
132{
133 struct ena_com_admin_cq *cq = &queue->cq;
134 u16 size = ADMIN_CQ_SIZE(queue->q_depth);
135
136 cq->entries = dma_alloc_coherent(queue->q_dmadev, size, &cq->dma_addr,
137 GFP_KERNEL);
138
139 if (!cq->entries) {
140 pr_err("memory allocation failed\n");
141 return -ENOMEM;
142 }
143
144 cq->head = 0;
145 cq->phase = 1;
146
147 return 0;
148}
149
150static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
151 struct ena_aenq_handlers *aenq_handlers)
152{
153 struct ena_com_aenq *aenq = &dev->aenq;
154 u32 addr_low, addr_high, aenq_caps;
155 u16 size;
156
157 dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
158 size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
159 aenq->entries = dma_alloc_coherent(dev->dmadev, size, &aenq->dma_addr,
160 GFP_KERNEL);
161
162 if (!aenq->entries) {
163 pr_err("memory allocation failed\n");
164 return -ENOMEM;
165 }
166
167 aenq->head = aenq->q_depth;
168 aenq->phase = 1;
169
170 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
171 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
172
173 writel(addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
174 writel(addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
175
176 aenq_caps = 0;
177 aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
178 aenq_caps |= (sizeof(struct ena_admin_aenq_entry)
179 << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
180 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
181 writel(aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
182
183 if (unlikely(!aenq_handlers)) {
184 pr_err("aenq handlers pointer is NULL\n");
185 return -EINVAL;
186 }
187
188 aenq->aenq_handlers = aenq_handlers;
189
190 return 0;
191}
192
193static void comp_ctxt_release(struct ena_com_admin_queue *queue,
194 struct ena_comp_ctx *comp_ctx)
195{
196 comp_ctx->occupied = false;
197 atomic_dec(&queue->outstanding_cmds);
198}
199
200static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
201 u16 command_id, bool capture)
202{
203 if (unlikely(command_id >= queue->q_depth)) {
204 pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
205 command_id, queue->q_depth);
206 return NULL;
207 }
208
209 if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
210 pr_err("Completion context is occupied\n");
211 return NULL;
212 }
213
214 if (capture) {
215 atomic_inc(&queue->outstanding_cmds);
216 queue->comp_ctx[command_id].occupied = true;
217 }
218
219 return &queue->comp_ctx[command_id];
220}
221
222static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
223 struct ena_admin_aq_entry *cmd,
224 size_t cmd_size_in_bytes,
225 struct ena_admin_acq_entry *comp,
226 size_t comp_size_in_bytes)
227{
228 struct ena_comp_ctx *comp_ctx;
229 u16 tail_masked, cmd_id;
230 u16 queue_size_mask;
231 u16 cnt;
232
233 queue_size_mask = admin_queue->q_depth - 1;
234
235 tail_masked = admin_queue->sq.tail & queue_size_mask;
236
237 /* In case of queue FULL */
238 cnt = (u16)atomic_read(&admin_queue->outstanding_cmds);
239 if (cnt >= admin_queue->q_depth) {
240 pr_debug("admin queue is full.\n");
241 admin_queue->stats.out_of_space++;
242 return ERR_PTR(-ENOSPC);
243 }
244
245 cmd_id = admin_queue->curr_cmd_id;
246
247 cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
248 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
249
250 cmd->aq_common_descriptor.command_id |= cmd_id &
251 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
252
253 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
254 if (unlikely(!comp_ctx))
255 return ERR_PTR(-EINVAL);
256
257 comp_ctx->status = ENA_CMD_SUBMITTED;
258 comp_ctx->comp_size = (u32)comp_size_in_bytes;
259 comp_ctx->user_cqe = comp;
260 comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
261
262 reinit_completion(&comp_ctx->wait_event);
263
264 memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
265
266 admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
267 queue_size_mask;
268
269 admin_queue->sq.tail++;
270 admin_queue->stats.submitted_cmd++;
271
272 if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
273 admin_queue->sq.phase = !admin_queue->sq.phase;
274
275 writel(admin_queue->sq.tail, admin_queue->sq.db_addr);
276
277 return comp_ctx;
278}
279
280static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
281{
282 size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
283 struct ena_comp_ctx *comp_ctx;
284 u16 i;
285
286 queue->comp_ctx = devm_kzalloc(queue->q_dmadev, size, GFP_KERNEL);
287 if (unlikely(!queue->comp_ctx)) {
288 pr_err("memory allocation failed\n");
289 return -ENOMEM;
290 }
291
292 for (i = 0; i < queue->q_depth; i++) {
293 comp_ctx = get_comp_ctxt(queue, i, false);
294 if (comp_ctx)
295 init_completion(&comp_ctx->wait_event);
296 }
297
298 return 0;
299}
300
301static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
302 struct ena_admin_aq_entry *cmd,
303 size_t cmd_size_in_bytes,
304 struct ena_admin_acq_entry *comp,
305 size_t comp_size_in_bytes)
306{
307 unsigned long flags = 0;
308 struct ena_comp_ctx *comp_ctx;
309
310 spin_lock_irqsave(&admin_queue->q_lock, flags);
311 if (unlikely(!admin_queue->running_state)) {
312 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
313 return ERR_PTR(-ENODEV);
314 }
315 comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
316 cmd_size_in_bytes,
317 comp,
318 comp_size_in_bytes);
319 if (IS_ERR(comp_ctx))
320 admin_queue->running_state = false;
321 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
322
323 return comp_ctx;
324}
325
326static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
327 struct ena_com_create_io_ctx *ctx,
328 struct ena_com_io_sq *io_sq)
329{
330 size_t size;
331 int dev_node = 0;
332
333 memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
334
335 io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits;
336 io_sq->desc_entry_size =
337 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
338 sizeof(struct ena_eth_io_tx_desc) :
339 sizeof(struct ena_eth_io_rx_desc);
340
341 size = io_sq->desc_entry_size * io_sq->q_depth;
342
343 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
344 dev_node = dev_to_node(ena_dev->dmadev);
345 set_dev_node(ena_dev->dmadev, ctx->numa_node);
346 io_sq->desc_addr.virt_addr =
347 dma_alloc_coherent(ena_dev->dmadev, size,
348 &io_sq->desc_addr.phys_addr,
349 GFP_KERNEL);
350 set_dev_node(ena_dev->dmadev, dev_node);
351 if (!io_sq->desc_addr.virt_addr) {
352 io_sq->desc_addr.virt_addr =
353 dma_alloc_coherent(ena_dev->dmadev, size,
354 &io_sq->desc_addr.phys_addr,
355 GFP_KERNEL);
356 }
357
358 if (!io_sq->desc_addr.virt_addr) {
359 pr_err("memory allocation failed\n");
360 return -ENOMEM;
361 }
362 }
363
364 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
365 /* Allocate bounce buffers */
366 io_sq->bounce_buf_ctrl.buffer_size =
367 ena_dev->llq_info.desc_list_entry_size;
368 io_sq->bounce_buf_ctrl.buffers_num =
369 ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
370 io_sq->bounce_buf_ctrl.next_to_use = 0;
371
372 size = io_sq->bounce_buf_ctrl.buffer_size *
373 io_sq->bounce_buf_ctrl.buffers_num;
374
375 dev_node = dev_to_node(ena_dev->dmadev);
376 set_dev_node(ena_dev->dmadev, ctx->numa_node);
377 io_sq->bounce_buf_ctrl.base_buffer =
378 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
379 set_dev_node(ena_dev->dmadev, dev_node);
380 if (!io_sq->bounce_buf_ctrl.base_buffer)
381 io_sq->bounce_buf_ctrl.base_buffer =
382 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
383
384 if (!io_sq->bounce_buf_ctrl.base_buffer) {
385 pr_err("bounce buffer memory allocation failed\n");
386 return -ENOMEM;
387 }
388
389 memcpy(&io_sq->llq_info, &ena_dev->llq_info,
390 sizeof(io_sq->llq_info));
391
392 /* Initiate the first bounce buffer */
393 io_sq->llq_buf_ctrl.curr_bounce_buf =
394 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
395 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
396 0x0, io_sq->llq_info.desc_list_entry_size);
397 io_sq->llq_buf_ctrl.descs_left_in_line =
398 io_sq->llq_info.descs_num_before_header;
399
400 if (io_sq->llq_info.max_entries_in_tx_burst > 0)
401 io_sq->entries_in_tx_burst_left =
402 io_sq->llq_info.max_entries_in_tx_burst;
403 }
404
405 io_sq->tail = 0;
406 io_sq->next_to_comp = 0;
407 io_sq->phase = 1;
408
409 return 0;
410}
411
412static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
413 struct ena_com_create_io_ctx *ctx,
414 struct ena_com_io_cq *io_cq)
415{
416 size_t size;
417 int prev_node = 0;
418
419 memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
420
421 /* Use the basic completion descriptor for Rx */
422 io_cq->cdesc_entry_size_in_bytes =
423 (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
424 sizeof(struct ena_eth_io_tx_cdesc) :
425 sizeof(struct ena_eth_io_rx_cdesc_base);
426
427 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
428
429 prev_node = dev_to_node(ena_dev->dmadev);
430 set_dev_node(ena_dev->dmadev, ctx->numa_node);
431 io_cq->cdesc_addr.virt_addr =
432 dma_alloc_coherent(ena_dev->dmadev, size,
433 &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
434 set_dev_node(ena_dev->dmadev, prev_node);
435 if (!io_cq->cdesc_addr.virt_addr) {
436 io_cq->cdesc_addr.virt_addr =
437 dma_alloc_coherent(ena_dev->dmadev, size,
438 &io_cq->cdesc_addr.phys_addr,
439 GFP_KERNEL);
440 }
441
442 if (!io_cq->cdesc_addr.virt_addr) {
443 pr_err("memory allocation failed\n");
444 return -ENOMEM;
445 }
446
447 io_cq->phase = 1;
448 io_cq->head = 0;
449
450 return 0;
451}
452
453static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
454 struct ena_admin_acq_entry *cqe)
455{
456 struct ena_comp_ctx *comp_ctx;
457 u16 cmd_id;
458
459 cmd_id = cqe->acq_common_descriptor.command &
460 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
461
462 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
463 if (unlikely(!comp_ctx)) {
464 pr_err("comp_ctx is NULL. Changing the admin queue running state\n");
465 admin_queue->running_state = false;
466 return;
467 }
468
469 comp_ctx->status = ENA_CMD_COMPLETED;
470 comp_ctx->comp_status = cqe->acq_common_descriptor.status;
471
472 if (comp_ctx->user_cqe)
473 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
474
475 if (!admin_queue->polling)
476 complete(&comp_ctx->wait_event);
477}
478
479static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
480{
481 struct ena_admin_acq_entry *cqe = NULL;
482 u16 comp_num = 0;
483 u16 head_masked;
484 u8 phase;
485
486 head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
487 phase = admin_queue->cq.phase;
488
489 cqe = &admin_queue->cq.entries[head_masked];
490
491 /* Go over all the completions */
492 while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
493 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
494 /* Do not read the rest of the completion entry before the
495 * phase bit was validated
496 */
497 dma_rmb();
498 ena_com_handle_single_admin_completion(admin_queue, cqe);
499
500 head_masked++;
501 comp_num++;
502 if (unlikely(head_masked == admin_queue->q_depth)) {
503 head_masked = 0;
504 phase = !phase;
505 }
506
507 cqe = &admin_queue->cq.entries[head_masked];
508 }
509
510 admin_queue->cq.head += comp_num;
511 admin_queue->cq.phase = phase;
512 admin_queue->sq.head += comp_num;
513 admin_queue->stats.completed_cmd += comp_num;
514}
515
516static int ena_com_comp_status_to_errno(u8 comp_status)
517{
518 if (unlikely(comp_status != 0))
519 pr_err("admin command failed[%u]\n", comp_status);
520
521 if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR))
522 return -EINVAL;
523
524 switch (comp_status) {
525 case ENA_ADMIN_SUCCESS:
526 return 0;
527 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
528 return -ENOMEM;
529 case ENA_ADMIN_UNSUPPORTED_OPCODE:
530 return -EOPNOTSUPP;
531 case ENA_ADMIN_BAD_OPCODE:
532 case ENA_ADMIN_MALFORMED_REQUEST:
533 case ENA_ADMIN_ILLEGAL_PARAMETER:
534 case ENA_ADMIN_UNKNOWN_ERROR:
535 return -EINVAL;
536 }
537
538 return 0;
539}
540
541static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
542 struct ena_com_admin_queue *admin_queue)
543{
544 unsigned long flags = 0;
545 unsigned long timeout;
546 int ret;
547
548 timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout);
549
550 while (1) {
551 spin_lock_irqsave(&admin_queue->q_lock, flags);
552 ena_com_handle_admin_completion(admin_queue);
553 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
554
555 if (comp_ctx->status != ENA_CMD_SUBMITTED)
556 break;
557
558 if (time_is_before_jiffies(timeout)) {
559 pr_err("Wait for completion (polling) timeout\n");
560 /* ENA didn't have any completion */
561 spin_lock_irqsave(&admin_queue->q_lock, flags);
562 admin_queue->stats.no_completion++;
563 admin_queue->running_state = false;
564 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
565
566 ret = -ETIME;
567 goto err;
568 }
569
570 msleep(ENA_POLL_MS);
571 }
572
573 if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
574 pr_err("Command was aborted\n");
575 spin_lock_irqsave(&admin_queue->q_lock, flags);
576 admin_queue->stats.aborted_cmd++;
577 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
578 ret = -ENODEV;
579 goto err;
580 }
581
582 WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n",
583 comp_ctx->status);
584
585 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
586err:
587 comp_ctxt_release(admin_queue, comp_ctx);
588 return ret;
589}
590
591/**
592 * Set the LLQ configurations of the firmware
593 *
594 * The driver provides only the enabled feature values to the device,
595 * which in turn, checks if they are supported.
596 */
597static int ena_com_set_llq(struct ena_com_dev *ena_dev)
598{
599 struct ena_com_admin_queue *admin_queue;
600 struct ena_admin_set_feat_cmd cmd;
601 struct ena_admin_set_feat_resp resp;
602 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
603 int ret;
604
605 memset(&cmd, 0x0, sizeof(cmd));
606 admin_queue = &ena_dev->admin_queue;
607
608 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
609 cmd.feat_common.feature_id = ENA_ADMIN_LLQ;
610
611 cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl;
612 cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl;
613 cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
614 cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
615
616 ret = ena_com_execute_admin_command(admin_queue,
617 (struct ena_admin_aq_entry *)&cmd,
618 sizeof(cmd),
619 (struct ena_admin_acq_entry *)&resp,
620 sizeof(resp));
621
622 if (unlikely(ret))
623 pr_err("Failed to set LLQ configurations: %d\n", ret);
624
625 return ret;
626}
627
628static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
629 struct ena_admin_feature_llq_desc *llq_features,
630 struct ena_llq_configurations *llq_default_cfg)
631{
632 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
633 u16 supported_feat;
634 int rc;
635
636 memset(llq_info, 0, sizeof(*llq_info));
637
638 supported_feat = llq_features->header_location_ctrl_supported;
639
640 if (likely(supported_feat & llq_default_cfg->llq_header_location)) {
641 llq_info->header_location_ctrl =
642 llq_default_cfg->llq_header_location;
643 } else {
644 pr_err("Invalid header location control, supported: 0x%x\n",
645 supported_feat);
646 return -EINVAL;
647 }
648
649 if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) {
650 supported_feat = llq_features->descriptors_stride_ctrl_supported;
651 if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) {
652 llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl;
653 } else {
654 if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) {
655 llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
656 } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
657 llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
658 } else {
659 pr_err("Invalid desc_stride_ctrl, supported: 0x%x\n",
660 supported_feat);
661 return -EINVAL;
662 }
663
664 pr_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
665 llq_default_cfg->llq_stride_ctrl, supported_feat,
666 llq_info->desc_stride_ctrl);
667 }
668 } else {
669 llq_info->desc_stride_ctrl = 0;
670 }
671
672 supported_feat = llq_features->entry_size_ctrl_supported;
673 if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) {
674 llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size;
675 llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value;
676 } else {
677 if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) {
678 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
679 llq_info->desc_list_entry_size = 128;
680 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) {
681 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B;
682 llq_info->desc_list_entry_size = 192;
683 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
684 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
685 llq_info->desc_list_entry_size = 256;
686 } else {
687 pr_err("Invalid entry_size_ctrl, supported: 0x%x\n",
688 supported_feat);
689 return -EINVAL;
690 }
691
692 pr_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
693 llq_default_cfg->llq_ring_entry_size, supported_feat,
694 llq_info->desc_list_entry_size);
695 }
696 if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
697 /* The desc list entry size should be whole multiply of 8
698 * This requirement comes from __iowrite64_copy()
699 */
700 pr_err("illegal entry size %d\n",
701 llq_info->desc_list_entry_size);
702 return -EINVAL;
703 }
704
705 if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)
706 llq_info->descs_per_entry = llq_info->desc_list_entry_size /
707 sizeof(struct ena_eth_io_tx_desc);
708 else
709 llq_info->descs_per_entry = 1;
710
711 supported_feat = llq_features->desc_num_before_header_supported;
712 if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) {
713 llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header;
714 } else {
715 if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) {
716 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
717 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) {
718 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1;
719 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) {
720 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4;
721 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
722 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
723 } else {
724 pr_err("Invalid descs_num_before_header, supported: 0x%x\n",
725 supported_feat);
726 return -EINVAL;
727 }
728
729 pr_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
730 llq_default_cfg->llq_num_decs_before_header,
731 supported_feat, llq_info->descs_num_before_header);
732 }
733
734 llq_info->max_entries_in_tx_burst =
735 (u16)(llq_features->max_tx_burst_size / llq_default_cfg->llq_ring_entry_size_value);
736
737 rc = ena_com_set_llq(ena_dev);
738 if (rc)
739 pr_err("Cannot set LLQ configuration: %d\n", rc);
740
741 return rc;
742}
743
744static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
745 struct ena_com_admin_queue *admin_queue)
746{
747 unsigned long flags = 0;
748 int ret;
749
750 wait_for_completion_timeout(&comp_ctx->wait_event,
751 usecs_to_jiffies(
752 admin_queue->completion_timeout));
753
754 /* In case the command wasn't completed find out the root cause.
755 * There might be 2 kinds of errors
756 * 1) No completion (timeout reached)
757 * 2) There is completion but the device didn't get any msi-x interrupt.
758 */
759 if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
760 spin_lock_irqsave(&admin_queue->q_lock, flags);
761 ena_com_handle_admin_completion(admin_queue);
762 admin_queue->stats.no_completion++;
763 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
764
765 if (comp_ctx->status == ENA_CMD_COMPLETED) {
766 pr_err("The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
767 comp_ctx->cmd_opcode,
768 admin_queue->auto_polling ? "ON" : "OFF");
769 /* Check if fallback to polling is enabled */
770 if (admin_queue->auto_polling)
771 admin_queue->polling = true;
772 } else {
773 pr_err("The ena device doesn't send a completion for the admin cmd %d status %d\n",
774 comp_ctx->cmd_opcode, comp_ctx->status);
775 }
776 /* Check if shifted to polling mode.
777 * This will happen if there is a completion without an interrupt
778 * and autopolling mode is enabled. Continuing normal execution in such case
779 */
780 if (!admin_queue->polling) {
781 admin_queue->running_state = false;
782 ret = -ETIME;
783 goto err;
784 }
785 }
786
787 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
788err:
789 comp_ctxt_release(admin_queue, comp_ctx);
790 return ret;
791}
792
793/* This method read the hardware device register through posting writes
794 * and waiting for response
795 * On timeout the function will return ENA_MMIO_READ_TIMEOUT
796 */
797static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
798{
799 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
800 volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
801 mmio_read->read_resp;
802 u32 mmio_read_reg, ret, i;
803 unsigned long flags = 0;
804 u32 timeout = mmio_read->reg_read_to;
805
806 might_sleep();
807
808 if (timeout == 0)
809 timeout = ENA_REG_READ_TIMEOUT;
810
811 /* If readless is disabled, perform regular read */
812 if (!mmio_read->readless_supported)
813 return readl(ena_dev->reg_bar + offset);
814
815 spin_lock_irqsave(&mmio_read->lock, flags);
816 mmio_read->seq_num++;
817
818 read_resp->req_id = mmio_read->seq_num + 0xDEAD;
819 mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
820 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
821 mmio_read_reg |= mmio_read->seq_num &
822 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
823
824 writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
825
826 for (i = 0; i < timeout; i++) {
827 if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
828 break;
829
830 udelay(1);
831 }
832
833 if (unlikely(i == timeout)) {
834 pr_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
835 mmio_read->seq_num, offset, read_resp->req_id,
836 read_resp->reg_off);
837 ret = ENA_MMIO_READ_TIMEOUT;
838 goto err;
839 }
840
841 if (read_resp->reg_off != offset) {
842 pr_err("Read failure: wrong offset provided\n");
843 ret = ENA_MMIO_READ_TIMEOUT;
844 } else {
845 ret = read_resp->reg_val;
846 }
847err:
848 spin_unlock_irqrestore(&mmio_read->lock, flags);
849
850 return ret;
851}
852
853/* There are two types to wait for completion.
854 * Polling mode - wait until the completion is available.
855 * Async mode - wait on wait queue until the completion is ready
856 * (or the timeout expired).
857 * It is expected that the IRQ called ena_com_handle_admin_completion
858 * to mark the completions.
859 */
860static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
861 struct ena_com_admin_queue *admin_queue)
862{
863 if (admin_queue->polling)
864 return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
865 admin_queue);
866
867 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
868 admin_queue);
869}
870
871static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
872 struct ena_com_io_sq *io_sq)
873{
874 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
875 struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
876 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
877 u8 direction;
878 int ret;
879
880 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
881
882 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
883 direction = ENA_ADMIN_SQ_DIRECTION_TX;
884 else
885 direction = ENA_ADMIN_SQ_DIRECTION_RX;
886
887 destroy_cmd.sq.sq_identity |= (direction <<
888 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
889 ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
890
891 destroy_cmd.sq.sq_idx = io_sq->idx;
892 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
893
894 ret = ena_com_execute_admin_command(admin_queue,
895 (struct ena_admin_aq_entry *)&destroy_cmd,
896 sizeof(destroy_cmd),
897 (struct ena_admin_acq_entry *)&destroy_resp,
898 sizeof(destroy_resp));
899
900 if (unlikely(ret && (ret != -ENODEV)))
901 pr_err("failed to destroy io sq error: %d\n", ret);
902
903 return ret;
904}
905
906static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
907 struct ena_com_io_sq *io_sq,
908 struct ena_com_io_cq *io_cq)
909{
910 size_t size;
911
912 if (io_cq->cdesc_addr.virt_addr) {
913 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
914
915 dma_free_coherent(ena_dev->dmadev, size,
916 io_cq->cdesc_addr.virt_addr,
917 io_cq->cdesc_addr.phys_addr);
918
919 io_cq->cdesc_addr.virt_addr = NULL;
920 }
921
922 if (io_sq->desc_addr.virt_addr) {
923 size = io_sq->desc_entry_size * io_sq->q_depth;
924
925 dma_free_coherent(ena_dev->dmadev, size,
926 io_sq->desc_addr.virt_addr,
927 io_sq->desc_addr.phys_addr);
928
929 io_sq->desc_addr.virt_addr = NULL;
930 }
931
932 if (io_sq->bounce_buf_ctrl.base_buffer) {
933 devm_kfree(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer);
934 io_sq->bounce_buf_ctrl.base_buffer = NULL;
935 }
936}
937
938static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
939 u16 exp_state)
940{
941 u32 val, i;
942
943 /* Convert timeout from resolution of 100ms to ENA_POLL_MS */
944 timeout = (timeout * 100) / ENA_POLL_MS;
945
946 for (i = 0; i < timeout; i++) {
947 val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
948
949 if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
950 pr_err("Reg read timeout occurred\n");
951 return -ETIME;
952 }
953
954 if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
955 exp_state)
956 return 0;
957
958 msleep(ENA_POLL_MS);
959 }
960
961 return -ETIME;
962}
963
964static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
965 enum ena_admin_aq_feature_id feature_id)
966{
967 u32 feature_mask = 1 << feature_id;
968
969 /* Device attributes is always supported */
970 if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
971 !(ena_dev->supported_features & feature_mask))
972 return false;
973
974 return true;
975}
976
977static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
978 struct ena_admin_get_feat_resp *get_resp,
979 enum ena_admin_aq_feature_id feature_id,
980 dma_addr_t control_buf_dma_addr,
981 u32 control_buff_size,
982 u8 feature_ver)
983{
984 struct ena_com_admin_queue *admin_queue;
985 struct ena_admin_get_feat_cmd get_cmd;
986 int ret;
987
988 if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
989 pr_debug("Feature %d isn't supported\n", feature_id);
990 return -EOPNOTSUPP;
991 }
992
993 memset(&get_cmd, 0x0, sizeof(get_cmd));
994 admin_queue = &ena_dev->admin_queue;
995
996 get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
997
998 if (control_buff_size)
999 get_cmd.aq_common_descriptor.flags =
1000 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
1001 else
1002 get_cmd.aq_common_descriptor.flags = 0;
1003
1004 ret = ena_com_mem_addr_set(ena_dev,
1005 &get_cmd.control_buffer.address,
1006 control_buf_dma_addr);
1007 if (unlikely(ret)) {
1008 pr_err("memory address set failed\n");
1009 return ret;
1010 }
1011
1012 get_cmd.control_buffer.length = control_buff_size;
1013 get_cmd.feat_common.feature_version = feature_ver;
1014 get_cmd.feat_common.feature_id = feature_id;
1015
1016 ret = ena_com_execute_admin_command(admin_queue,
1017 (struct ena_admin_aq_entry *)
1018 &get_cmd,
1019 sizeof(get_cmd),
1020 (struct ena_admin_acq_entry *)
1021 get_resp,
1022 sizeof(*get_resp));
1023
1024 if (unlikely(ret))
1025 pr_err("Failed to submit get_feature command %d error: %d\n",
1026 feature_id, ret);
1027
1028 return ret;
1029}
1030
1031static int ena_com_get_feature(struct ena_com_dev *ena_dev,
1032 struct ena_admin_get_feat_resp *get_resp,
1033 enum ena_admin_aq_feature_id feature_id,
1034 u8 feature_ver)
1035{
1036 return ena_com_get_feature_ex(ena_dev,
1037 get_resp,
1038 feature_id,
1039 0,
1040 0,
1041 feature_ver);
1042}
1043
1044static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
1045{
1046 struct ena_rss *rss = &ena_dev->rss;
1047
1048 rss->hash_key =
1049 dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
1050 &rss->hash_key_dma_addr, GFP_KERNEL);
1051
1052 if (unlikely(!rss->hash_key))
1053 return -ENOMEM;
1054
1055 return 0;
1056}
1057
1058static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
1059{
1060 struct ena_rss *rss = &ena_dev->rss;
1061
1062 if (rss->hash_key)
1063 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
1064 rss->hash_key, rss->hash_key_dma_addr);
1065 rss->hash_key = NULL;
1066}
1067
1068static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
1069{
1070 struct ena_rss *rss = &ena_dev->rss;
1071
1072 rss->hash_ctrl =
1073 dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
1074 &rss->hash_ctrl_dma_addr, GFP_KERNEL);
1075
1076 if (unlikely(!rss->hash_ctrl))
1077 return -ENOMEM;
1078
1079 return 0;
1080}
1081
1082static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
1083{
1084 struct ena_rss *rss = &ena_dev->rss;
1085
1086 if (rss->hash_ctrl)
1087 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
1088 rss->hash_ctrl, rss->hash_ctrl_dma_addr);
1089 rss->hash_ctrl = NULL;
1090}
1091
1092static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
1093 u16 log_size)
1094{
1095 struct ena_rss *rss = &ena_dev->rss;
1096 struct ena_admin_get_feat_resp get_resp;
1097 size_t tbl_size;
1098 int ret;
1099
1100 ret = ena_com_get_feature(ena_dev, &get_resp,
1101 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 0);
1102 if (unlikely(ret))
1103 return ret;
1104
1105 if ((get_resp.u.ind_table.min_size > log_size) ||
1106 (get_resp.u.ind_table.max_size < log_size)) {
1107 pr_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
1108 1 << log_size, 1 << get_resp.u.ind_table.min_size,
1109 1 << get_resp.u.ind_table.max_size);
1110 return -EINVAL;
1111 }
1112
1113 tbl_size = (1ULL << log_size) *
1114 sizeof(struct ena_admin_rss_ind_table_entry);
1115
1116 rss->rss_ind_tbl =
1117 dma_alloc_coherent(ena_dev->dmadev, tbl_size,
1118 &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
1119 if (unlikely(!rss->rss_ind_tbl))
1120 goto mem_err1;
1121
1122 tbl_size = (1ULL << log_size) * sizeof(u16);
1123 rss->host_rss_ind_tbl =
1124 devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
1125 if (unlikely(!rss->host_rss_ind_tbl))
1126 goto mem_err2;
1127
1128 rss->tbl_log_size = log_size;
1129
1130 return 0;
1131
1132mem_err2:
1133 tbl_size = (1ULL << log_size) *
1134 sizeof(struct ena_admin_rss_ind_table_entry);
1135
1136 dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
1137 rss->rss_ind_tbl_dma_addr);
1138 rss->rss_ind_tbl = NULL;
1139mem_err1:
1140 rss->tbl_log_size = 0;
1141 return -ENOMEM;
1142}
1143
1144static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
1145{
1146 struct ena_rss *rss = &ena_dev->rss;
1147 size_t tbl_size = (1ULL << rss->tbl_log_size) *
1148 sizeof(struct ena_admin_rss_ind_table_entry);
1149
1150 if (rss->rss_ind_tbl)
1151 dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
1152 rss->rss_ind_tbl_dma_addr);
1153 rss->rss_ind_tbl = NULL;
1154
1155 if (rss->host_rss_ind_tbl)
1156 devm_kfree(ena_dev->dmadev, rss->host_rss_ind_tbl);
1157 rss->host_rss_ind_tbl = NULL;
1158}
1159
1160static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
1161 struct ena_com_io_sq *io_sq, u16 cq_idx)
1162{
1163 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1164 struct ena_admin_aq_create_sq_cmd create_cmd;
1165 struct ena_admin_acq_create_sq_resp_desc cmd_completion;
1166 u8 direction;
1167 int ret;
1168
1169 memset(&create_cmd, 0x0, sizeof(create_cmd));
1170
1171 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
1172
1173 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1174 direction = ENA_ADMIN_SQ_DIRECTION_TX;
1175 else
1176 direction = ENA_ADMIN_SQ_DIRECTION_RX;
1177
1178 create_cmd.sq_identity |= (direction <<
1179 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
1180 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
1181
1182 create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
1183 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
1184
1185 create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
1186 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
1187 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
1188
1189 create_cmd.sq_caps_3 |=
1190 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
1191
1192 create_cmd.cq_idx = cq_idx;
1193 create_cmd.sq_depth = io_sq->q_depth;
1194
1195 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
1196 ret = ena_com_mem_addr_set(ena_dev,
1197 &create_cmd.sq_ba,
1198 io_sq->desc_addr.phys_addr);
1199 if (unlikely(ret)) {
1200 pr_err("memory address set failed\n");
1201 return ret;
1202 }
1203 }
1204
1205 ret = ena_com_execute_admin_command(admin_queue,
1206 (struct ena_admin_aq_entry *)&create_cmd,
1207 sizeof(create_cmd),
1208 (struct ena_admin_acq_entry *)&cmd_completion,
1209 sizeof(cmd_completion));
1210 if (unlikely(ret)) {
1211 pr_err("Failed to create IO SQ. error: %d\n", ret);
1212 return ret;
1213 }
1214
1215 io_sq->idx = cmd_completion.sq_idx;
1216
1217 io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1218 (uintptr_t)cmd_completion.sq_doorbell_offset);
1219
1220 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1221 io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
1222 + cmd_completion.llq_headers_offset);
1223
1224 io_sq->desc_addr.pbuf_dev_addr =
1225 (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
1226 cmd_completion.llq_descriptors_offset);
1227 }
1228
1229 pr_debug("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
1230
1231 return ret;
1232}
1233
1234static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1235{
1236 struct ena_rss *rss = &ena_dev->rss;
1237 struct ena_com_io_sq *io_sq;
1238 u16 qid;
1239 int i;
1240
1241 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1242 qid = rss->host_rss_ind_tbl[i];
1243 if (qid >= ENA_TOTAL_NUM_QUEUES)
1244 return -EINVAL;
1245
1246 io_sq = &ena_dev->io_sq_queues[qid];
1247
1248 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
1249 return -EINVAL;
1250
1251 rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
1252 }
1253
1254 return 0;
1255}
1256
1257static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev)
1258{
1259 u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 };
1260 struct ena_rss *rss = &ena_dev->rss;
1261 u8 idx;
1262 u16 i;
1263
1264 for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++)
1265 dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i;
1266
1267 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1268 if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES)
1269 return -EINVAL;
1270 idx = (u8)rss->rss_ind_tbl[i].cq_idx;
1271
1272 if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES)
1273 return -EINVAL;
1274
1275 rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx];
1276 }
1277
1278 return 0;
1279}
1280
1281static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1282 u16 intr_delay_resolution)
1283{
1284 /* Initial value of intr_delay_resolution might be 0 */
1285 u16 prev_intr_delay_resolution =
1286 ena_dev->intr_delay_resolution ?
1287 ena_dev->intr_delay_resolution :
1288 ENA_DEFAULT_INTR_DELAY_RESOLUTION;
1289
1290 if (!intr_delay_resolution) {
1291 pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1292 intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
1293 }
1294
1295 /* update Rx */
1296 ena_dev->intr_moder_rx_interval =
1297 ena_dev->intr_moder_rx_interval *
1298 prev_intr_delay_resolution /
1299 intr_delay_resolution;
1300
1301 /* update Tx */
1302 ena_dev->intr_moder_tx_interval =
1303 ena_dev->intr_moder_tx_interval *
1304 prev_intr_delay_resolution /
1305 intr_delay_resolution;
1306
1307 ena_dev->intr_delay_resolution = intr_delay_resolution;
1308}
1309
1310/*****************************************************************************/
1311/******************************* API ******************************/
1312/*****************************************************************************/
1313
1314int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
1315 struct ena_admin_aq_entry *cmd,
1316 size_t cmd_size,
1317 struct ena_admin_acq_entry *comp,
1318 size_t comp_size)
1319{
1320 struct ena_comp_ctx *comp_ctx;
1321 int ret;
1322
1323 comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
1324 comp, comp_size);
1325 if (IS_ERR(comp_ctx)) {
1326 if (comp_ctx == ERR_PTR(-ENODEV))
1327 pr_debug("Failed to submit command [%ld]\n",
1328 PTR_ERR(comp_ctx));
1329 else
1330 pr_err("Failed to submit command [%ld]\n",
1331 PTR_ERR(comp_ctx));
1332
1333 return PTR_ERR(comp_ctx);
1334 }
1335
1336 ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
1337 if (unlikely(ret)) {
1338 if (admin_queue->running_state)
1339 pr_err("Failed to process command. ret = %d\n", ret);
1340 else
1341 pr_debug("Failed to process command. ret = %d\n", ret);
1342 }
1343 return ret;
1344}
1345
1346int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
1347 struct ena_com_io_cq *io_cq)
1348{
1349 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1350 struct ena_admin_aq_create_cq_cmd create_cmd;
1351 struct ena_admin_acq_create_cq_resp_desc cmd_completion;
1352 int ret;
1353
1354 memset(&create_cmd, 0x0, sizeof(create_cmd));
1355
1356 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
1357
1358 create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
1359 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
1360 create_cmd.cq_caps_1 |=
1361 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
1362
1363 create_cmd.msix_vector = io_cq->msix_vector;
1364 create_cmd.cq_depth = io_cq->q_depth;
1365
1366 ret = ena_com_mem_addr_set(ena_dev,
1367 &create_cmd.cq_ba,
1368 io_cq->cdesc_addr.phys_addr);
1369 if (unlikely(ret)) {
1370 pr_err("memory address set failed\n");
1371 return ret;
1372 }
1373
1374 ret = ena_com_execute_admin_command(admin_queue,
1375 (struct ena_admin_aq_entry *)&create_cmd,
1376 sizeof(create_cmd),
1377 (struct ena_admin_acq_entry *)&cmd_completion,
1378 sizeof(cmd_completion));
1379 if (unlikely(ret)) {
1380 pr_err("Failed to create IO CQ. error: %d\n", ret);
1381 return ret;
1382 }
1383
1384 io_cq->idx = cmd_completion.cq_idx;
1385
1386 io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1387 cmd_completion.cq_interrupt_unmask_register_offset);
1388
1389 if (cmd_completion.cq_head_db_register_offset)
1390 io_cq->cq_head_db_reg =
1391 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1392 cmd_completion.cq_head_db_register_offset);
1393
1394 if (cmd_completion.numa_node_register_offset)
1395 io_cq->numa_node_cfg_reg =
1396 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1397 cmd_completion.numa_node_register_offset);
1398
1399 pr_debug("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
1400
1401 return ret;
1402}
1403
1404int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
1405 struct ena_com_io_sq **io_sq,
1406 struct ena_com_io_cq **io_cq)
1407{
1408 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1409 pr_err("Invalid queue number %d but the max is %d\n", qid,
1410 ENA_TOTAL_NUM_QUEUES);
1411 return -EINVAL;
1412 }
1413
1414 *io_sq = &ena_dev->io_sq_queues[qid];
1415 *io_cq = &ena_dev->io_cq_queues[qid];
1416
1417 return 0;
1418}
1419
1420void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
1421{
1422 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1423 struct ena_comp_ctx *comp_ctx;
1424 u16 i;
1425
1426 if (!admin_queue->comp_ctx)
1427 return;
1428
1429 for (i = 0; i < admin_queue->q_depth; i++) {
1430 comp_ctx = get_comp_ctxt(admin_queue, i, false);
1431 if (unlikely(!comp_ctx))
1432 break;
1433
1434 comp_ctx->status = ENA_CMD_ABORTED;
1435
1436 complete(&comp_ctx->wait_event);
1437 }
1438}
1439
1440void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
1441{
1442 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1443 unsigned long flags = 0;
1444
1445 spin_lock_irqsave(&admin_queue->q_lock, flags);
1446 while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
1447 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1448 msleep(ENA_POLL_MS);
1449 spin_lock_irqsave(&admin_queue->q_lock, flags);
1450 }
1451 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1452}
1453
1454int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
1455 struct ena_com_io_cq *io_cq)
1456{
1457 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1458 struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
1459 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
1460 int ret;
1461
1462 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
1463
1464 destroy_cmd.cq_idx = io_cq->idx;
1465 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
1466
1467 ret = ena_com_execute_admin_command(admin_queue,
1468 (struct ena_admin_aq_entry *)&destroy_cmd,
1469 sizeof(destroy_cmd),
1470 (struct ena_admin_acq_entry *)&destroy_resp,
1471 sizeof(destroy_resp));
1472
1473 if (unlikely(ret && (ret != -ENODEV)))
1474 pr_err("Failed to destroy IO CQ. error: %d\n", ret);
1475
1476 return ret;
1477}
1478
1479bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
1480{
1481 return ena_dev->admin_queue.running_state;
1482}
1483
1484void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
1485{
1486 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1487 unsigned long flags = 0;
1488
1489 spin_lock_irqsave(&admin_queue->q_lock, flags);
1490 ena_dev->admin_queue.running_state = state;
1491 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1492}
1493
1494void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
1495{
1496 u16 depth = ena_dev->aenq.q_depth;
1497
1498 WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
1499
1500 /* Init head_db to mark that all entries in the queue
1501 * are initially available
1502 */
1503 writel(depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1504}
1505
1506int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
1507{
1508 struct ena_com_admin_queue *admin_queue;
1509 struct ena_admin_set_feat_cmd cmd;
1510 struct ena_admin_set_feat_resp resp;
1511 struct ena_admin_get_feat_resp get_resp;
1512 int ret;
1513
1514 ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0);
1515 if (ret) {
1516 pr_info("Can't get aenq configuration\n");
1517 return ret;
1518 }
1519
1520 if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
1521 pr_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
1522 get_resp.u.aenq.supported_groups, groups_flag);
1523 return -EOPNOTSUPP;
1524 }
1525
1526 memset(&cmd, 0x0, sizeof(cmd));
1527 admin_queue = &ena_dev->admin_queue;
1528
1529 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1530 cmd.aq_common_descriptor.flags = 0;
1531 cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
1532 cmd.u.aenq.enabled_groups = groups_flag;
1533
1534 ret = ena_com_execute_admin_command(admin_queue,
1535 (struct ena_admin_aq_entry *)&cmd,
1536 sizeof(cmd),
1537 (struct ena_admin_acq_entry *)&resp,
1538 sizeof(resp));
1539
1540 if (unlikely(ret))
1541 pr_err("Failed to config AENQ ret: %d\n", ret);
1542
1543 return ret;
1544}
1545
1546int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
1547{
1548 u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1549 int width;
1550
1551 if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
1552 pr_err("Reg read timeout occurred\n");
1553 return -ETIME;
1554 }
1555
1556 width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
1557 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
1558
1559 pr_debug("ENA dma width: %d\n", width);
1560
1561 if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
1562 pr_err("DMA width illegal value: %d\n", width);
1563 return -EINVAL;
1564 }
1565
1566 ena_dev->dma_addr_bits = width;
1567
1568 return width;
1569}
1570
1571int ena_com_validate_version(struct ena_com_dev *ena_dev)
1572{
1573 u32 ver;
1574 u32 ctrl_ver;
1575 u32 ctrl_ver_masked;
1576
1577 /* Make sure the ENA version and the controller version are at least
1578 * as the driver expects
1579 */
1580 ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
1581 ctrl_ver = ena_com_reg_bar_read32(ena_dev,
1582 ENA_REGS_CONTROLLER_VERSION_OFF);
1583
1584 if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
1585 (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
1586 pr_err("Reg read timeout occurred\n");
1587 return -ETIME;
1588 }
1589
1590 pr_info("ena device version: %d.%d\n",
1591 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
1592 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
1593 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
1594
1595 pr_info("ena controller version: %d.%d.%d implementation version %d\n",
1596 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
1597 ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
1598 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
1599 ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
1600 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
1601 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
1602 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
1603
1604 ctrl_ver_masked =
1605 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
1606 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
1607 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
1608
1609 /* Validate the ctrl version without the implementation ID */
1610 if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
1611 pr_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1612 return -1;
1613 }
1614
1615 return 0;
1616}
1617
1618void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1619{
1620 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1621 struct ena_com_admin_cq *cq = &admin_queue->cq;
1622 struct ena_com_admin_sq *sq = &admin_queue->sq;
1623 struct ena_com_aenq *aenq = &ena_dev->aenq;
1624 u16 size;
1625
1626 if (admin_queue->comp_ctx)
1627 devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx);
1628 admin_queue->comp_ctx = NULL;
1629 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
1630 if (sq->entries)
1631 dma_free_coherent(ena_dev->dmadev, size, sq->entries,
1632 sq->dma_addr);
1633 sq->entries = NULL;
1634
1635 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
1636 if (cq->entries)
1637 dma_free_coherent(ena_dev->dmadev, size, cq->entries,
1638 cq->dma_addr);
1639 cq->entries = NULL;
1640
1641 size = ADMIN_AENQ_SIZE(aenq->q_depth);
1642 if (ena_dev->aenq.entries)
1643 dma_free_coherent(ena_dev->dmadev, size, aenq->entries,
1644 aenq->dma_addr);
1645 aenq->entries = NULL;
1646}
1647
1648void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1649{
1650 u32 mask_value = 0;
1651
1652 if (polling)
1653 mask_value = ENA_REGS_ADMIN_INTR_MASK;
1654
1655 writel(mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
1656 ena_dev->admin_queue.polling = polling;
1657}
1658
1659void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
1660 bool polling)
1661{
1662 ena_dev->admin_queue.auto_polling = polling;
1663}
1664
1665int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1666{
1667 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1668
1669 spin_lock_init(&mmio_read->lock);
1670 mmio_read->read_resp =
1671 dma_alloc_coherent(ena_dev->dmadev,
1672 sizeof(*mmio_read->read_resp),
1673 &mmio_read->read_resp_dma_addr, GFP_KERNEL);
1674 if (unlikely(!mmio_read->read_resp))
1675 goto err;
1676
1677 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1678
1679 mmio_read->read_resp->req_id = 0x0;
1680 mmio_read->seq_num = 0x0;
1681 mmio_read->readless_supported = true;
1682
1683 return 0;
1684
1685err:
1686
1687 return -ENOMEM;
1688}
1689
1690void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
1691{
1692 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1693
1694 mmio_read->readless_supported = readless_supported;
1695}
1696
1697void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
1698{
1699 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1700
1701 writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1702 writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1703
1704 dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
1705 mmio_read->read_resp, mmio_read->read_resp_dma_addr);
1706
1707 mmio_read->read_resp = NULL;
1708}
1709
1710void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
1711{
1712 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1713 u32 addr_low, addr_high;
1714
1715 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
1716 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
1717
1718 writel(addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1719 writel(addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1720}
1721
1722int ena_com_admin_init(struct ena_com_dev *ena_dev,
1723 struct ena_aenq_handlers *aenq_handlers)
1724{
1725 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1726 u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
1727 int ret;
1728
1729 dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1730
1731 if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
1732 pr_err("Reg read timeout occurred\n");
1733 return -ETIME;
1734 }
1735
1736 if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
1737 pr_err("Device isn't ready, abort com init\n");
1738 return -ENODEV;
1739 }
1740
1741 admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
1742
1743 admin_queue->q_dmadev = ena_dev->dmadev;
1744 admin_queue->polling = false;
1745 admin_queue->curr_cmd_id = 0;
1746
1747 atomic_set(&admin_queue->outstanding_cmds, 0);
1748
1749 spin_lock_init(&admin_queue->q_lock);
1750
1751 ret = ena_com_init_comp_ctxt(admin_queue);
1752 if (ret)
1753 goto error;
1754
1755 ret = ena_com_admin_init_sq(admin_queue);
1756 if (ret)
1757 goto error;
1758
1759 ret = ena_com_admin_init_cq(admin_queue);
1760 if (ret)
1761 goto error;
1762
1763 admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1764 ENA_REGS_AQ_DB_OFF);
1765
1766 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
1767 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
1768
1769 writel(addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
1770 writel(addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
1771
1772 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
1773 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
1774
1775 writel(addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
1776 writel(addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
1777
1778 aq_caps = 0;
1779 aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
1780 aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
1781 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
1782 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
1783
1784 acq_caps = 0;
1785 acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
1786 acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
1787 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
1788 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
1789
1790 writel(aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
1791 writel(acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
1792 ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
1793 if (ret)
1794 goto error;
1795
1796 admin_queue->running_state = true;
1797
1798 return 0;
1799error:
1800 ena_com_admin_destroy(ena_dev);
1801
1802 return ret;
1803}
1804
1805int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
1806 struct ena_com_create_io_ctx *ctx)
1807{
1808 struct ena_com_io_sq *io_sq;
1809 struct ena_com_io_cq *io_cq;
1810 int ret;
1811
1812 if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
1813 pr_err("Qid (%d) is bigger than max num of queues (%d)\n",
1814 ctx->qid, ENA_TOTAL_NUM_QUEUES);
1815 return -EINVAL;
1816 }
1817
1818 io_sq = &ena_dev->io_sq_queues[ctx->qid];
1819 io_cq = &ena_dev->io_cq_queues[ctx->qid];
1820
1821 memset(io_sq, 0x0, sizeof(*io_sq));
1822 memset(io_cq, 0x0, sizeof(*io_cq));
1823
1824 /* Init CQ */
1825 io_cq->q_depth = ctx->queue_size;
1826 io_cq->direction = ctx->direction;
1827 io_cq->qid = ctx->qid;
1828
1829 io_cq->msix_vector = ctx->msix_vector;
1830
1831 io_sq->q_depth = ctx->queue_size;
1832 io_sq->direction = ctx->direction;
1833 io_sq->qid = ctx->qid;
1834
1835 io_sq->mem_queue_type = ctx->mem_queue_type;
1836
1837 if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1838 /* header length is limited to 8 bits */
1839 io_sq->tx_max_header_size =
1840 min_t(u32, ena_dev->tx_max_header_size, SZ_256);
1841
1842 ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
1843 if (ret)
1844 goto error;
1845 ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
1846 if (ret)
1847 goto error;
1848
1849 ret = ena_com_create_io_cq(ena_dev, io_cq);
1850 if (ret)
1851 goto error;
1852
1853 ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
1854 if (ret)
1855 goto destroy_io_cq;
1856
1857 return 0;
1858
1859destroy_io_cq:
1860 ena_com_destroy_io_cq(ena_dev, io_cq);
1861error:
1862 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1863 return ret;
1864}
1865
1866void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
1867{
1868 struct ena_com_io_sq *io_sq;
1869 struct ena_com_io_cq *io_cq;
1870
1871 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1872 pr_err("Qid (%d) is bigger than max num of queues (%d)\n", qid,
1873 ENA_TOTAL_NUM_QUEUES);
1874 return;
1875 }
1876
1877 io_sq = &ena_dev->io_sq_queues[qid];
1878 io_cq = &ena_dev->io_cq_queues[qid];
1879
1880 ena_com_destroy_io_sq(ena_dev, io_sq);
1881 ena_com_destroy_io_cq(ena_dev, io_cq);
1882
1883 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1884}
1885
1886int ena_com_get_link_params(struct ena_com_dev *ena_dev,
1887 struct ena_admin_get_feat_resp *resp)
1888{
1889 return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0);
1890}
1891
1892int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
1893 struct ena_com_dev_get_features_ctx *get_feat_ctx)
1894{
1895 struct ena_admin_get_feat_resp get_resp;
1896 int rc;
1897
1898 rc = ena_com_get_feature(ena_dev, &get_resp,
1899 ENA_ADMIN_DEVICE_ATTRIBUTES, 0);
1900 if (rc)
1901 return rc;
1902
1903 memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
1904 sizeof(get_resp.u.dev_attr));
1905 ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
1906
1907 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
1908 rc = ena_com_get_feature(ena_dev, &get_resp,
1909 ENA_ADMIN_MAX_QUEUES_EXT,
1910 ENA_FEATURE_MAX_QUEUE_EXT_VER);
1911 if (rc)
1912 return rc;
1913
1914 if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER)
1915 return -EINVAL;
1916
1917 memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
1918 sizeof(get_resp.u.max_queue_ext));
1919 ena_dev->tx_max_header_size =
1920 get_resp.u.max_queue_ext.max_queue_ext.max_tx_header_size;
1921 } else {
1922 rc = ena_com_get_feature(ena_dev, &get_resp,
1923 ENA_ADMIN_MAX_QUEUES_NUM, 0);
1924 memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
1925 sizeof(get_resp.u.max_queue));
1926 ena_dev->tx_max_header_size =
1927 get_resp.u.max_queue.max_header_size;
1928
1929 if (rc)
1930 return rc;
1931 }
1932
1933 rc = ena_com_get_feature(ena_dev, &get_resp,
1934 ENA_ADMIN_AENQ_CONFIG, 0);
1935 if (rc)
1936 return rc;
1937
1938 memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
1939 sizeof(get_resp.u.aenq));
1940
1941 rc = ena_com_get_feature(ena_dev, &get_resp,
1942 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
1943 if (rc)
1944 return rc;
1945
1946 memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
1947 sizeof(get_resp.u.offload));
1948
1949 /* Driver hints isn't mandatory admin command. So in case the
1950 * command isn't supported set driver hints to 0
1951 */
1952 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0);
1953
1954 if (!rc)
1955 memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
1956 sizeof(get_resp.u.hw_hints));
1957 else if (rc == -EOPNOTSUPP)
1958 memset(&get_feat_ctx->hw_hints, 0x0,
1959 sizeof(get_feat_ctx->hw_hints));
1960 else
1961 return rc;
1962
1963 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0);
1964 if (!rc)
1965 memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
1966 sizeof(get_resp.u.llq));
1967 else if (rc == -EOPNOTSUPP)
1968 memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
1969 else
1970 return rc;
1971
1972 return 0;
1973}
1974
1975void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
1976{
1977 ena_com_handle_admin_completion(&ena_dev->admin_queue);
1978}
1979
1980/* ena_handle_specific_aenq_event:
1981 * return the handler that is relevant to the specific event group
1982 */
1983static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
1984 u16 group)
1985{
1986 struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
1987
1988 if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
1989 return aenq_handlers->handlers[group];
1990
1991 return aenq_handlers->unimplemented_handler;
1992}
1993
1994/* ena_aenq_intr_handler:
1995 * handles the aenq incoming events.
1996 * pop events from the queue and apply the specific handler
1997 */
1998void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
1999{
2000 struct ena_admin_aenq_entry *aenq_e;
2001 struct ena_admin_aenq_common_desc *aenq_common;
2002 struct ena_com_aenq *aenq = &dev->aenq;
2003 unsigned long long timestamp;
2004 ena_aenq_handler handler_cb;
2005 u16 masked_head, processed = 0;
2006 u8 phase;
2007
2008 masked_head = aenq->head & (aenq->q_depth - 1);
2009 phase = aenq->phase;
2010 aenq_e = &aenq->entries[masked_head]; /* Get first entry */
2011 aenq_common = &aenq_e->aenq_common_desc;
2012
2013 /* Go over all the events */
2014 while ((READ_ONCE(aenq_common->flags) &
2015 ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
2016 /* Make sure the phase bit (ownership) is as expected before
2017 * reading the rest of the descriptor.
2018 */
2019 dma_rmb();
2020
2021 timestamp =
2022 (unsigned long long)aenq_common->timestamp_low |
2023 ((unsigned long long)aenq_common->timestamp_high << 32);
2024 pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
2025 aenq_common->group, aenq_common->syndrom, timestamp);
2026
2027 /* Handle specific event*/
2028 handler_cb = ena_com_get_specific_aenq_cb(dev,
2029 aenq_common->group);
2030 handler_cb(data, aenq_e); /* call the actual event handler*/
2031
2032 /* Get next event entry */
2033 masked_head++;
2034 processed++;
2035
2036 if (unlikely(masked_head == aenq->q_depth)) {
2037 masked_head = 0;
2038 phase = !phase;
2039 }
2040 aenq_e = &aenq->entries[masked_head];
2041 aenq_common = &aenq_e->aenq_common_desc;
2042 }
2043
2044 aenq->head += processed;
2045 aenq->phase = phase;
2046
2047 /* Don't update aenq doorbell if there weren't any processed events */
2048 if (!processed)
2049 return;
2050
2051 /* write the aenq doorbell after all AENQ descriptors were read */
2052 mb();
2053 writel_relaxed((u32)aenq->head,
2054 dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
2055}
2056
2057int ena_com_dev_reset(struct ena_com_dev *ena_dev,
2058 enum ena_regs_reset_reason_types reset_reason)
2059{
2060 u32 stat, timeout, cap, reset_val;
2061 int rc;
2062
2063 stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
2064 cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
2065
2066 if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
2067 (cap == ENA_MMIO_READ_TIMEOUT))) {
2068 pr_err("Reg read32 timeout occurred\n");
2069 return -ETIME;
2070 }
2071
2072 if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
2073 pr_err("Device isn't ready, can't reset device\n");
2074 return -EINVAL;
2075 }
2076
2077 timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
2078 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
2079 if (timeout == 0) {
2080 pr_err("Invalid timeout value\n");
2081 return -EINVAL;
2082 }
2083
2084 /* start reset */
2085 reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
2086 reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
2087 ENA_REGS_DEV_CTL_RESET_REASON_MASK;
2088 writel(reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2089
2090 /* Write again the MMIO read request address */
2091 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
2092
2093 rc = wait_for_reset_state(ena_dev, timeout,
2094 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
2095 if (rc != 0) {
2096 pr_err("Reset indication didn't turn on\n");
2097 return rc;
2098 }
2099
2100 /* reset done */
2101 writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2102 rc = wait_for_reset_state(ena_dev, timeout, 0);
2103 if (rc != 0) {
2104 pr_err("Reset indication didn't turn off\n");
2105 return rc;
2106 }
2107
2108 timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
2109 ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
2110 if (timeout)
2111 /* the resolution of timeout reg is 100ms */
2112 ena_dev->admin_queue.completion_timeout = timeout * 100000;
2113 else
2114 ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
2115
2116 return 0;
2117}
2118
2119static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
2120 struct ena_com_stats_ctx *ctx,
2121 enum ena_admin_get_stats_type type)
2122{
2123 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
2124 struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
2125 struct ena_com_admin_queue *admin_queue;
2126 int ret;
2127
2128 admin_queue = &ena_dev->admin_queue;
2129
2130 get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
2131 get_cmd->aq_common_descriptor.flags = 0;
2132 get_cmd->type = type;
2133
2134 ret = ena_com_execute_admin_command(admin_queue,
2135 (struct ena_admin_aq_entry *)get_cmd,
2136 sizeof(*get_cmd),
2137 (struct ena_admin_acq_entry *)get_resp,
2138 sizeof(*get_resp));
2139
2140 if (unlikely(ret))
2141 pr_err("Failed to get stats. error: %d\n", ret);
2142
2143 return ret;
2144}
2145
2146int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
2147 struct ena_admin_basic_stats *stats)
2148{
2149 struct ena_com_stats_ctx ctx;
2150 int ret;
2151
2152 memset(&ctx, 0x0, sizeof(ctx));
2153 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
2154 if (likely(ret == 0))
2155 memcpy(stats, &ctx.get_resp.basic_stats,
2156 sizeof(ctx.get_resp.basic_stats));
2157
2158 return ret;
2159}
2160
2161int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
2162{
2163 struct ena_com_admin_queue *admin_queue;
2164 struct ena_admin_set_feat_cmd cmd;
2165 struct ena_admin_set_feat_resp resp;
2166 int ret;
2167
2168 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
2169 pr_debug("Feature %d isn't supported\n", ENA_ADMIN_MTU);
2170 return -EOPNOTSUPP;
2171 }
2172
2173 memset(&cmd, 0x0, sizeof(cmd));
2174 admin_queue = &ena_dev->admin_queue;
2175
2176 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2177 cmd.aq_common_descriptor.flags = 0;
2178 cmd.feat_common.feature_id = ENA_ADMIN_MTU;
2179 cmd.u.mtu.mtu = mtu;
2180
2181 ret = ena_com_execute_admin_command(admin_queue,
2182 (struct ena_admin_aq_entry *)&cmd,
2183 sizeof(cmd),
2184 (struct ena_admin_acq_entry *)&resp,
2185 sizeof(resp));
2186
2187 if (unlikely(ret))
2188 pr_err("Failed to set mtu %d. error: %d\n", mtu, ret);
2189
2190 return ret;
2191}
2192
2193int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
2194 struct ena_admin_feature_offload_desc *offload)
2195{
2196 int ret;
2197 struct ena_admin_get_feat_resp resp;
2198
2199 ret = ena_com_get_feature(ena_dev, &resp,
2200 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
2201 if (unlikely(ret)) {
2202 pr_err("Failed to get offload capabilities %d\n", ret);
2203 return ret;
2204 }
2205
2206 memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
2207
2208 return 0;
2209}
2210
2211int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
2212{
2213 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2214 struct ena_rss *rss = &ena_dev->rss;
2215 struct ena_admin_set_feat_cmd cmd;
2216 struct ena_admin_set_feat_resp resp;
2217 struct ena_admin_get_feat_resp get_resp;
2218 int ret;
2219
2220 if (!ena_com_check_supported_feature_id(ena_dev,
2221 ENA_ADMIN_RSS_HASH_FUNCTION)) {
2222 pr_debug("Feature %d isn't supported\n",
2223 ENA_ADMIN_RSS_HASH_FUNCTION);
2224 return -EOPNOTSUPP;
2225 }
2226
2227 /* Validate hash function is supported */
2228 ret = ena_com_get_feature(ena_dev, &get_resp,
2229 ENA_ADMIN_RSS_HASH_FUNCTION, 0);
2230 if (unlikely(ret))
2231 return ret;
2232
2233 if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
2234 pr_err("Func hash %d isn't supported by device, abort\n",
2235 rss->hash_func);
2236 return -EOPNOTSUPP;
2237 }
2238
2239 memset(&cmd, 0x0, sizeof(cmd));
2240
2241 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2242 cmd.aq_common_descriptor.flags =
2243 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2244 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
2245 cmd.u.flow_hash_func.init_val = rss->hash_init_val;
2246 cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
2247
2248 ret = ena_com_mem_addr_set(ena_dev,
2249 &cmd.control_buffer.address,
2250 rss->hash_key_dma_addr);
2251 if (unlikely(ret)) {
2252 pr_err("memory address set failed\n");
2253 return ret;
2254 }
2255
2256 cmd.control_buffer.length = sizeof(*rss->hash_key);
2257
2258 ret = ena_com_execute_admin_command(admin_queue,
2259 (struct ena_admin_aq_entry *)&cmd,
2260 sizeof(cmd),
2261 (struct ena_admin_acq_entry *)&resp,
2262 sizeof(resp));
2263 if (unlikely(ret)) {
2264 pr_err("Failed to set hash function %d. error: %d\n",
2265 rss->hash_func, ret);
2266 return -EINVAL;
2267 }
2268
2269 return 0;
2270}
2271
2272int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
2273 enum ena_admin_hash_functions func,
2274 const u8 *key, u16 key_len, u32 init_val)
2275{
2276 struct ena_rss *rss = &ena_dev->rss;
2277 struct ena_admin_get_feat_resp get_resp;
2278 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2279 rss->hash_key;
2280 int rc;
2281
2282 /* Make sure size is a mult of DWs */
2283 if (unlikely(key_len & 0x3))
2284 return -EINVAL;
2285
2286 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2287 ENA_ADMIN_RSS_HASH_FUNCTION,
2288 rss->hash_key_dma_addr,
2289 sizeof(*rss->hash_key), 0);
2290 if (unlikely(rc))
2291 return rc;
2292
2293 if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
2294 pr_err("Flow hash function %d isn't supported\n", func);
2295 return -EOPNOTSUPP;
2296 }
2297
2298 switch (func) {
2299 case ENA_ADMIN_TOEPLITZ:
2300 if (key_len > sizeof(hash_key->key)) {
2301 pr_err("key len (%hu) is bigger than the max supported (%zu)\n",
2302 key_len, sizeof(hash_key->key));
2303 return -EINVAL;
2304 }
2305
2306 memcpy(hash_key->key, key, key_len);
2307 rss->hash_init_val = init_val;
2308 hash_key->keys_num = key_len >> 2;
2309 break;
2310 case ENA_ADMIN_CRC32:
2311 rss->hash_init_val = init_val;
2312 break;
2313 default:
2314 pr_err("Invalid hash function (%d)\n", func);
2315 return -EINVAL;
2316 }
2317
2318 rss->hash_func = func;
2319 rc = ena_com_set_hash_function(ena_dev);
2320
2321 /* Restore the old function */
2322 if (unlikely(rc))
2323 ena_com_get_hash_function(ena_dev, NULL, NULL);
2324
2325 return rc;
2326}
2327
2328int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
2329 enum ena_admin_hash_functions *func,
2330 u8 *key)
2331{
2332 struct ena_rss *rss = &ena_dev->rss;
2333 struct ena_admin_get_feat_resp get_resp;
2334 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2335 rss->hash_key;
2336 int rc;
2337
2338 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2339 ENA_ADMIN_RSS_HASH_FUNCTION,
2340 rss->hash_key_dma_addr,
2341 sizeof(*rss->hash_key), 0);
2342 if (unlikely(rc))
2343 return rc;
2344
2345 rss->hash_func = get_resp.u.flow_hash_func.selected_func;
2346 if (func)
2347 *func = rss->hash_func;
2348
2349 if (key)
2350 memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
2351
2352 return 0;
2353}
2354
2355int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
2356 enum ena_admin_flow_hash_proto proto,
2357 u16 *fields)
2358{
2359 struct ena_rss *rss = &ena_dev->rss;
2360 struct ena_admin_get_feat_resp get_resp;
2361 int rc;
2362
2363 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2364 ENA_ADMIN_RSS_HASH_INPUT,
2365 rss->hash_ctrl_dma_addr,
2366 sizeof(*rss->hash_ctrl), 0);
2367 if (unlikely(rc))
2368 return rc;
2369
2370 if (fields)
2371 *fields = rss->hash_ctrl->selected_fields[proto].fields;
2372
2373 return 0;
2374}
2375
2376int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
2377{
2378 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2379 struct ena_rss *rss = &ena_dev->rss;
2380 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2381 struct ena_admin_set_feat_cmd cmd;
2382 struct ena_admin_set_feat_resp resp;
2383 int ret;
2384
2385 if (!ena_com_check_supported_feature_id(ena_dev,
2386 ENA_ADMIN_RSS_HASH_INPUT)) {
2387 pr_debug("Feature %d isn't supported\n",
2388 ENA_ADMIN_RSS_HASH_INPUT);
2389 return -EOPNOTSUPP;
2390 }
2391
2392 memset(&cmd, 0x0, sizeof(cmd));
2393
2394 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2395 cmd.aq_common_descriptor.flags =
2396 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2397 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
2398 cmd.u.flow_hash_input.enabled_input_sort =
2399 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
2400 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
2401
2402 ret = ena_com_mem_addr_set(ena_dev,
2403 &cmd.control_buffer.address,
2404 rss->hash_ctrl_dma_addr);
2405 if (unlikely(ret)) {
2406 pr_err("memory address set failed\n");
2407 return ret;
2408 }
2409 cmd.control_buffer.length = sizeof(*hash_ctrl);
2410
2411 ret = ena_com_execute_admin_command(admin_queue,
2412 (struct ena_admin_aq_entry *)&cmd,
2413 sizeof(cmd),
2414 (struct ena_admin_acq_entry *)&resp,
2415 sizeof(resp));
2416 if (unlikely(ret))
2417 pr_err("Failed to set hash input. error: %d\n", ret);
2418
2419 return ret;
2420}
2421
2422int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
2423{
2424 struct ena_rss *rss = &ena_dev->rss;
2425 struct ena_admin_feature_rss_hash_control *hash_ctrl =
2426 rss->hash_ctrl;
2427 u16 available_fields = 0;
2428 int rc, i;
2429
2430 /* Get the supported hash input */
2431 rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2432 if (unlikely(rc))
2433 return rc;
2434
2435 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
2436 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2437 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2438
2439 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
2440 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2441 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2442
2443 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
2444 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2445 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2446
2447 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
2448 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2449 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2450
2451 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
2452 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2453
2454 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
2455 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2456
2457 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
2458 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2459
2460 hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
2461 ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
2462
2463 for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
2464 available_fields = hash_ctrl->selected_fields[i].fields &
2465 hash_ctrl->supported_fields[i].fields;
2466 if (available_fields != hash_ctrl->selected_fields[i].fields) {
2467 pr_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2468 i, hash_ctrl->supported_fields[i].fields,
2469 hash_ctrl->selected_fields[i].fields);
2470 return -EOPNOTSUPP;
2471 }
2472 }
2473
2474 rc = ena_com_set_hash_ctrl(ena_dev);
2475
2476 /* In case of failure, restore the old hash ctrl */
2477 if (unlikely(rc))
2478 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2479
2480 return rc;
2481}
2482
2483int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
2484 enum ena_admin_flow_hash_proto proto,
2485 u16 hash_fields)
2486{
2487 struct ena_rss *rss = &ena_dev->rss;
2488 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2489 u16 supported_fields;
2490 int rc;
2491
2492 if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
2493 pr_err("Invalid proto num (%u)\n", proto);
2494 return -EINVAL;
2495 }
2496
2497 /* Get the ctrl table */
2498 rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
2499 if (unlikely(rc))
2500 return rc;
2501
2502 /* Make sure all the fields are supported */
2503 supported_fields = hash_ctrl->supported_fields[proto].fields;
2504 if ((hash_fields & supported_fields) != hash_fields) {
2505 pr_err("proto %d doesn't support the required fields %x. supports only: %x\n",
2506 proto, hash_fields, supported_fields);
2507 }
2508
2509 hash_ctrl->selected_fields[proto].fields = hash_fields;
2510
2511 rc = ena_com_set_hash_ctrl(ena_dev);
2512
2513 /* In case of failure, restore the old hash ctrl */
2514 if (unlikely(rc))
2515 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2516
2517 return 0;
2518}
2519
2520int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
2521 u16 entry_idx, u16 entry_value)
2522{
2523 struct ena_rss *rss = &ena_dev->rss;
2524
2525 if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
2526 return -EINVAL;
2527
2528 if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
2529 return -EINVAL;
2530
2531 rss->host_rss_ind_tbl[entry_idx] = entry_value;
2532
2533 return 0;
2534}
2535
2536int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
2537{
2538 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2539 struct ena_rss *rss = &ena_dev->rss;
2540 struct ena_admin_set_feat_cmd cmd;
2541 struct ena_admin_set_feat_resp resp;
2542 int ret;
2543
2544 if (!ena_com_check_supported_feature_id(
2545 ena_dev, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
2546 pr_debug("Feature %d isn't supported\n",
2547 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
2548 return -EOPNOTSUPP;
2549 }
2550
2551 ret = ena_com_ind_tbl_convert_to_device(ena_dev);
2552 if (ret) {
2553 pr_err("Failed to convert host indirection table to device table\n");
2554 return ret;
2555 }
2556
2557 memset(&cmd, 0x0, sizeof(cmd));
2558
2559 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2560 cmd.aq_common_descriptor.flags =
2561 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2562 cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG;
2563 cmd.u.ind_table.size = rss->tbl_log_size;
2564 cmd.u.ind_table.inline_index = 0xFFFFFFFF;
2565
2566 ret = ena_com_mem_addr_set(ena_dev,
2567 &cmd.control_buffer.address,
2568 rss->rss_ind_tbl_dma_addr);
2569 if (unlikely(ret)) {
2570 pr_err("memory address set failed\n");
2571 return ret;
2572 }
2573
2574 cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
2575 sizeof(struct ena_admin_rss_ind_table_entry);
2576
2577 ret = ena_com_execute_admin_command(admin_queue,
2578 (struct ena_admin_aq_entry *)&cmd,
2579 sizeof(cmd),
2580 (struct ena_admin_acq_entry *)&resp,
2581 sizeof(resp));
2582
2583 if (unlikely(ret))
2584 pr_err("Failed to set indirect table. error: %d\n", ret);
2585
2586 return ret;
2587}
2588
2589int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
2590{
2591 struct ena_rss *rss = &ena_dev->rss;
2592 struct ena_admin_get_feat_resp get_resp;
2593 u32 tbl_size;
2594 int i, rc;
2595
2596 tbl_size = (1ULL << rss->tbl_log_size) *
2597 sizeof(struct ena_admin_rss_ind_table_entry);
2598
2599 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2600 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
2601 rss->rss_ind_tbl_dma_addr,
2602 tbl_size, 0);
2603 if (unlikely(rc))
2604 return rc;
2605
2606 if (!ind_tbl)
2607 return 0;
2608
2609 rc = ena_com_ind_tbl_convert_from_device(ena_dev);
2610 if (unlikely(rc))
2611 return rc;
2612
2613 for (i = 0; i < (1 << rss->tbl_log_size); i++)
2614 ind_tbl[i] = rss->host_rss_ind_tbl[i];
2615
2616 return 0;
2617}
2618
2619int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
2620{
2621 int rc;
2622
2623 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2624
2625 rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
2626 if (unlikely(rc))
2627 goto err_indr_tbl;
2628
2629 rc = ena_com_hash_key_allocate(ena_dev);
2630 if (unlikely(rc))
2631 goto err_hash_key;
2632
2633 rc = ena_com_hash_ctrl_init(ena_dev);
2634 if (unlikely(rc))
2635 goto err_hash_ctrl;
2636
2637 return 0;
2638
2639err_hash_ctrl:
2640 ena_com_hash_key_destroy(ena_dev);
2641err_hash_key:
2642 ena_com_indirect_table_destroy(ena_dev);
2643err_indr_tbl:
2644
2645 return rc;
2646}
2647
2648void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
2649{
2650 ena_com_indirect_table_destroy(ena_dev);
2651 ena_com_hash_key_destroy(ena_dev);
2652 ena_com_hash_ctrl_destroy(ena_dev);
2653
2654 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2655}
2656
2657int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
2658{
2659 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2660
2661 host_attr->host_info =
2662 dma_alloc_coherent(ena_dev->dmadev, SZ_4K,
2663 &host_attr->host_info_dma_addr, GFP_KERNEL);
2664 if (unlikely(!host_attr->host_info))
2665 return -ENOMEM;
2666
2667 host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR <<
2668 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
2669 (ENA_COMMON_SPEC_VERSION_MINOR));
2670
2671 return 0;
2672}
2673
2674int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
2675 u32 debug_area_size)
2676{
2677 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2678
2679 host_attr->debug_area_virt_addr =
2680 dma_alloc_coherent(ena_dev->dmadev, debug_area_size,
2681 &host_attr->debug_area_dma_addr,
2682 GFP_KERNEL);
2683 if (unlikely(!host_attr->debug_area_virt_addr)) {
2684 host_attr->debug_area_size = 0;
2685 return -ENOMEM;
2686 }
2687
2688 host_attr->debug_area_size = debug_area_size;
2689
2690 return 0;
2691}
2692
2693void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
2694{
2695 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2696
2697 if (host_attr->host_info) {
2698 dma_free_coherent(ena_dev->dmadev, SZ_4K, host_attr->host_info,
2699 host_attr->host_info_dma_addr);
2700 host_attr->host_info = NULL;
2701 }
2702}
2703
2704void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
2705{
2706 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2707
2708 if (host_attr->debug_area_virt_addr) {
2709 dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size,
2710 host_attr->debug_area_virt_addr,
2711 host_attr->debug_area_dma_addr);
2712 host_attr->debug_area_virt_addr = NULL;
2713 }
2714}
2715
2716int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
2717{
2718 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2719 struct ena_com_admin_queue *admin_queue;
2720 struct ena_admin_set_feat_cmd cmd;
2721 struct ena_admin_set_feat_resp resp;
2722
2723 int ret;
2724
2725 /* Host attribute config is called before ena_com_get_dev_attr_feat
2726 * so ena_com can't check if the feature is supported.
2727 */
2728
2729 memset(&cmd, 0x0, sizeof(cmd));
2730 admin_queue = &ena_dev->admin_queue;
2731
2732 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2733 cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
2734
2735 ret = ena_com_mem_addr_set(ena_dev,
2736 &cmd.u.host_attr.debug_ba,
2737 host_attr->debug_area_dma_addr);
2738 if (unlikely(ret)) {
2739 pr_err("memory address set failed\n");
2740 return ret;
2741 }
2742
2743 ret = ena_com_mem_addr_set(ena_dev,
2744 &cmd.u.host_attr.os_info_ba,
2745 host_attr->host_info_dma_addr);
2746 if (unlikely(ret)) {
2747 pr_err("memory address set failed\n");
2748 return ret;
2749 }
2750
2751 cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
2752
2753 ret = ena_com_execute_admin_command(admin_queue,
2754 (struct ena_admin_aq_entry *)&cmd,
2755 sizeof(cmd),
2756 (struct ena_admin_acq_entry *)&resp,
2757 sizeof(resp));
2758
2759 if (unlikely(ret))
2760 pr_err("Failed to set host attributes: %d\n", ret);
2761
2762 return ret;
2763}
2764
2765/* Interrupt moderation */
2766bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
2767{
2768 return ena_com_check_supported_feature_id(ena_dev,
2769 ENA_ADMIN_INTERRUPT_MODERATION);
2770}
2771
2772static int ena_com_update_nonadaptive_moderation_interval(u32 coalesce_usecs,
2773 u32 intr_delay_resolution,
2774 u32 *intr_moder_interval)
2775{
2776 if (!intr_delay_resolution) {
2777 pr_err("Illegal interrupt delay granularity value\n");
2778 return -EFAULT;
2779 }
2780
2781 *intr_moder_interval = coalesce_usecs / intr_delay_resolution;
2782
2783 return 0;
2784}
2785
2786int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
2787 u32 tx_coalesce_usecs)
2788{
2789 return ena_com_update_nonadaptive_moderation_interval(tx_coalesce_usecs,
2790 ena_dev->intr_delay_resolution,
2791 &ena_dev->intr_moder_tx_interval);
2792}
2793
2794int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
2795 u32 rx_coalesce_usecs)
2796{
2797 return ena_com_update_nonadaptive_moderation_interval(rx_coalesce_usecs,
2798 ena_dev->intr_delay_resolution,
2799 &ena_dev->intr_moder_rx_interval);
2800}
2801
2802int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
2803{
2804 struct ena_admin_get_feat_resp get_resp;
2805 u16 delay_resolution;
2806 int rc;
2807
2808 rc = ena_com_get_feature(ena_dev, &get_resp,
2809 ENA_ADMIN_INTERRUPT_MODERATION, 0);
2810
2811 if (rc) {
2812 if (rc == -EOPNOTSUPP) {
2813 pr_debug("Feature %d isn't supported\n",
2814 ENA_ADMIN_INTERRUPT_MODERATION);
2815 rc = 0;
2816 } else {
2817 pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
2818 rc);
2819 }
2820
2821 /* no moderation supported, disable adaptive support */
2822 ena_com_disable_adaptive_moderation(ena_dev);
2823 return rc;
2824 }
2825
2826 /* if moderation is supported by device we set adaptive moderation */
2827 delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
2828 ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
2829
2830 /* Disable adaptive moderation by default - can be enabled later */
2831 ena_com_disable_adaptive_moderation(ena_dev);
2832
2833 return 0;
2834}
2835
2836unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
2837{
2838 return ena_dev->intr_moder_tx_interval;
2839}
2840
2841unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
2842{
2843 return ena_dev->intr_moder_rx_interval;
2844}
2845
2846int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
2847 struct ena_admin_feature_llq_desc *llq_features,
2848 struct ena_llq_configurations *llq_default_cfg)
2849{
2850 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
2851 int rc;
2852
2853 if (!llq_features->max_llq_num) {
2854 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2855 return 0;
2856 }
2857
2858 rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg);
2859 if (rc)
2860 return rc;
2861
2862 ena_dev->tx_max_header_size = llq_info->desc_list_entry_size -
2863 (llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
2864
2865 if (unlikely(ena_dev->tx_max_header_size == 0)) {
2866 pr_err("the size of the LLQ entry is smaller than needed\n");
2867 return -EINVAL;
2868 }
2869
2870 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
2871
2872 return 0;
2873}