Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
   2/*
   3 * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
   4 */
   5
   6#include "ena_com.h"
   7
   8/*****************************************************************************/
   9/*****************************************************************************/
  10
  11/* Timeout in micro-sec */
  12#define ADMIN_CMD_TIMEOUT_US (3000000)
  13
  14#define ENA_ASYNC_QUEUE_DEPTH 16
  15#define ENA_ADMIN_QUEUE_DEPTH 32
  16
  17
  18#define ENA_CTRL_MAJOR		0
  19#define ENA_CTRL_MINOR		0
  20#define ENA_CTRL_SUB_MINOR	1
  21
  22#define MIN_ENA_CTRL_VER \
  23	(((ENA_CTRL_MAJOR) << \
  24	(ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
  25	((ENA_CTRL_MINOR) << \
  26	(ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
  27	(ENA_CTRL_SUB_MINOR))
  28
  29#define ENA_DMA_ADDR_TO_UINT32_LOW(x)	((u32)((u64)(x)))
  30#define ENA_DMA_ADDR_TO_UINT32_HIGH(x)	((u32)(((u64)(x)) >> 32))
  31
  32#define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
  33
  34#define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT	4
  35
  36#define ENA_REGS_ADMIN_INTR_MASK 1
  37
  38#define ENA_MAX_BACKOFF_DELAY_EXP 16U
  39
  40#define ENA_MIN_ADMIN_POLL_US 100
  41
  42#define ENA_MAX_ADMIN_POLL_US 5000
  43
  44/*****************************************************************************/
  45/*****************************************************************************/
  46/*****************************************************************************/
  47
  48enum ena_cmd_status {
  49	ENA_CMD_SUBMITTED,
  50	ENA_CMD_COMPLETED,
  51	/* Abort - canceled by the driver */
  52	ENA_CMD_ABORTED,
  53};
  54
  55struct ena_comp_ctx {
  56	struct completion wait_event;
  57	struct ena_admin_acq_entry *user_cqe;
  58	u32 comp_size;
  59	enum ena_cmd_status status;
  60	/* status from the device */
  61	u8 comp_status;
  62	u8 cmd_opcode;
  63	bool occupied;
  64};
  65
  66struct ena_com_stats_ctx {
  67	struct ena_admin_aq_get_stats_cmd get_cmd;
  68	struct ena_admin_acq_get_stats_resp get_resp;
  69};
  70
  71static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
  72				       struct ena_common_mem_addr *ena_addr,
  73				       dma_addr_t addr)
  74{
  75	if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
  76		netdev_err(ena_dev->net_device,
  77			   "DMA address has more bits that the device supports\n");
  78		return -EINVAL;
  79	}
  80
  81	ena_addr->mem_addr_low = lower_32_bits(addr);
  82	ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
  83
  84	return 0;
  85}
  86
  87static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue)
  88{
  89	struct ena_com_dev *ena_dev = admin_queue->ena_dev;
  90	struct ena_com_admin_sq *sq = &admin_queue->sq;
  91	u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
  92
  93	sq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size, &sq->dma_addr, GFP_KERNEL);
 
  94
  95	if (!sq->entries) {
  96		netdev_err(ena_dev->net_device, "Memory allocation failed\n");
  97		return -ENOMEM;
  98	}
  99
 100	sq->head = 0;
 101	sq->tail = 0;
 102	sq->phase = 1;
 103
 104	sq->db_addr = NULL;
 105
 106	return 0;
 107}
 108
 109static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue)
 110{
 111	struct ena_com_dev *ena_dev = admin_queue->ena_dev;
 112	struct ena_com_admin_cq *cq = &admin_queue->cq;
 113	u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
 114
 115	cq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size, &cq->dma_addr, GFP_KERNEL);
 
 116
 117	if (!cq->entries) {
 118		netdev_err(ena_dev->net_device, "Memory allocation failed\n");
 119		return -ENOMEM;
 120	}
 121
 122	cq->head = 0;
 123	cq->phase = 1;
 124
 125	return 0;
 126}
 127
 128static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
 129				   struct ena_aenq_handlers *aenq_handlers)
 130{
 131	struct ena_com_aenq *aenq = &ena_dev->aenq;
 132	u32 addr_low, addr_high, aenq_caps;
 133	u16 size;
 134
 135	ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
 136	size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
 137	aenq->entries = dma_alloc_coherent(ena_dev->dmadev, size, &aenq->dma_addr, GFP_KERNEL);
 
 138
 139	if (!aenq->entries) {
 140		netdev_err(ena_dev->net_device, "Memory allocation failed\n");
 141		return -ENOMEM;
 142	}
 143
 144	aenq->head = aenq->q_depth;
 145	aenq->phase = 1;
 146
 147	addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
 148	addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
 149
 150	writel(addr_low, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
 151	writel(addr_high, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
 152
 153	aenq_caps = 0;
 154	aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
 155	aenq_caps |=
 156		(sizeof(struct ena_admin_aenq_entry) << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
 157		ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
 158	writel(aenq_caps, ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
 159
 160	if (unlikely(!aenq_handlers)) {
 161		netdev_err(ena_dev->net_device, "AENQ handlers pointer is NULL\n");
 
 162		return -EINVAL;
 163	}
 164
 165	aenq->aenq_handlers = aenq_handlers;
 166
 167	return 0;
 168}
 169
 170static void comp_ctxt_release(struct ena_com_admin_queue *queue,
 171				     struct ena_comp_ctx *comp_ctx)
 172{
 173	comp_ctx->occupied = false;
 174	atomic_dec(&queue->outstanding_cmds);
 175}
 176
 177static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *admin_queue,
 178					  u16 command_id, bool capture)
 179{
 180	if (unlikely(command_id >= admin_queue->q_depth)) {
 181		netdev_err(admin_queue->ena_dev->net_device,
 182			   "Command id is larger than the queue size. cmd_id: %u queue size %d\n",
 183			   command_id, admin_queue->q_depth);
 184		return NULL;
 185	}
 186
 187	if (unlikely(!admin_queue->comp_ctx)) {
 188		netdev_err(admin_queue->ena_dev->net_device, "Completion context is NULL\n");
 
 189		return NULL;
 190	}
 191
 192	if (unlikely(admin_queue->comp_ctx[command_id].occupied && capture)) {
 193		netdev_err(admin_queue->ena_dev->net_device, "Completion context is occupied\n");
 
 194		return NULL;
 195	}
 196
 197	if (capture) {
 198		atomic_inc(&admin_queue->outstanding_cmds);
 199		admin_queue->comp_ctx[command_id].occupied = true;
 200	}
 201
 202	return &admin_queue->comp_ctx[command_id];
 203}
 204
 205static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
 206						       struct ena_admin_aq_entry *cmd,
 207						       size_t cmd_size_in_bytes,
 208						       struct ena_admin_acq_entry *comp,
 209						       size_t comp_size_in_bytes)
 210{
 211	struct ena_comp_ctx *comp_ctx;
 212	u16 tail_masked, cmd_id;
 213	u16 queue_size_mask;
 214	u16 cnt;
 215
 216	queue_size_mask = admin_queue->q_depth - 1;
 217
 218	tail_masked = admin_queue->sq.tail & queue_size_mask;
 219
 220	/* In case of queue FULL */
 221	cnt = (u16)atomic_read(&admin_queue->outstanding_cmds);
 222	if (cnt >= admin_queue->q_depth) {
 223		netdev_dbg(admin_queue->ena_dev->net_device, "Admin queue is full.\n");
 
 224		admin_queue->stats.out_of_space++;
 225		return ERR_PTR(-ENOSPC);
 226	}
 227
 228	cmd_id = admin_queue->curr_cmd_id;
 229
 230	cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
 231		ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
 232
 233	cmd->aq_common_descriptor.command_id |= cmd_id &
 234		ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
 235
 236	comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
 237	if (unlikely(!comp_ctx))
 238		return ERR_PTR(-EINVAL);
 239
 240	comp_ctx->status = ENA_CMD_SUBMITTED;
 241	comp_ctx->comp_size = (u32)comp_size_in_bytes;
 242	comp_ctx->user_cqe = comp;
 243	comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
 244
 245	reinit_completion(&comp_ctx->wait_event);
 246
 247	memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
 248
 249	admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
 250		queue_size_mask;
 251
 252	admin_queue->sq.tail++;
 253	admin_queue->stats.submitted_cmd++;
 254
 255	if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
 256		admin_queue->sq.phase = !admin_queue->sq.phase;
 257
 258	writel(admin_queue->sq.tail, admin_queue->sq.db_addr);
 259
 260	return comp_ctx;
 261}
 262
 263static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue)
 264{
 265	struct ena_com_dev *ena_dev = admin_queue->ena_dev;
 266	size_t size = admin_queue->q_depth * sizeof(struct ena_comp_ctx);
 267	struct ena_comp_ctx *comp_ctx;
 268	u16 i;
 269
 270	admin_queue->comp_ctx = devm_kzalloc(admin_queue->q_dmadev, size, GFP_KERNEL);
 
 271	if (unlikely(!admin_queue->comp_ctx)) {
 272		netdev_err(ena_dev->net_device, "Memory allocation failed\n");
 273		return -ENOMEM;
 274	}
 275
 276	for (i = 0; i < admin_queue->q_depth; i++) {
 277		comp_ctx = get_comp_ctxt(admin_queue, i, false);
 278		if (comp_ctx)
 279			init_completion(&comp_ctx->wait_event);
 280	}
 281
 282	return 0;
 283}
 284
 285static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
 286						     struct ena_admin_aq_entry *cmd,
 287						     size_t cmd_size_in_bytes,
 288						     struct ena_admin_acq_entry *comp,
 289						     size_t comp_size_in_bytes)
 290{
 291	unsigned long flags = 0;
 292	struct ena_comp_ctx *comp_ctx;
 293
 294	spin_lock_irqsave(&admin_queue->q_lock, flags);
 295	if (unlikely(!admin_queue->running_state)) {
 296		spin_unlock_irqrestore(&admin_queue->q_lock, flags);
 297		return ERR_PTR(-ENODEV);
 298	}
 299	comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
 300					      cmd_size_in_bytes,
 301					      comp,
 302					      comp_size_in_bytes);
 303	if (IS_ERR(comp_ctx))
 304		admin_queue->running_state = false;
 305	spin_unlock_irqrestore(&admin_queue->q_lock, flags);
 306
 307	return comp_ctx;
 308}
 309
 310static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
 311			      struct ena_com_create_io_ctx *ctx,
 312			      struct ena_com_io_sq *io_sq)
 313{
 314	size_t size;
 
 315
 316	memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
 317
 318	io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits;
 319	io_sq->desc_entry_size =
 320		(io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
 321		sizeof(struct ena_eth_io_tx_desc) :
 322		sizeof(struct ena_eth_io_rx_desc);
 323
 324	size = io_sq->desc_entry_size * io_sq->q_depth;
 325
 326	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
 
 
 327		io_sq->desc_addr.virt_addr =
 328			dma_alloc_coherent(ena_dev->dmadev, size, &io_sq->desc_addr.phys_addr,
 
 329					   GFP_KERNEL);
 
 330		if (!io_sq->desc_addr.virt_addr) {
 331			io_sq->desc_addr.virt_addr =
 332				dma_alloc_coherent(ena_dev->dmadev, size,
 333						   &io_sq->desc_addr.phys_addr, GFP_KERNEL);
 
 334		}
 335
 336		if (!io_sq->desc_addr.virt_addr) {
 337			netdev_err(ena_dev->net_device, "Memory allocation failed\n");
 
 338			return -ENOMEM;
 339		}
 340	}
 341
 342	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
 343		/* Allocate bounce buffers */
 344		io_sq->bounce_buf_ctrl.buffer_size =
 345			ena_dev->llq_info.desc_list_entry_size;
 346		io_sq->bounce_buf_ctrl.buffers_num =
 347			ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
 348		io_sq->bounce_buf_ctrl.next_to_use = 0;
 349
 350		size = (size_t)io_sq->bounce_buf_ctrl.buffer_size *
 351			io_sq->bounce_buf_ctrl.buffers_num;
 352
 353		io_sq->bounce_buf_ctrl.base_buffer = devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
 
 
 
 
 354		if (!io_sq->bounce_buf_ctrl.base_buffer)
 355			io_sq->bounce_buf_ctrl.base_buffer =
 356				devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
 357
 358		if (!io_sq->bounce_buf_ctrl.base_buffer) {
 359			netdev_err(ena_dev->net_device, "Bounce buffer memory allocation failed\n");
 
 360			return -ENOMEM;
 361		}
 362
 363		memcpy(&io_sq->llq_info, &ena_dev->llq_info,
 364		       sizeof(io_sq->llq_info));
 365
 366		/* Initiate the first bounce buffer */
 367		io_sq->llq_buf_ctrl.curr_bounce_buf =
 368			ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
 369		memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
 370		       0x0, io_sq->llq_info.desc_list_entry_size);
 371		io_sq->llq_buf_ctrl.descs_left_in_line =
 372			io_sq->llq_info.descs_num_before_header;
 373		io_sq->disable_meta_caching =
 374			io_sq->llq_info.disable_meta_caching;
 375
 376		if (io_sq->llq_info.max_entries_in_tx_burst > 0)
 377			io_sq->entries_in_tx_burst_left =
 378				io_sq->llq_info.max_entries_in_tx_burst;
 379	}
 380
 381	io_sq->tail = 0;
 382	io_sq->next_to_comp = 0;
 383	io_sq->phase = 1;
 384
 385	return 0;
 386}
 387
 388static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
 389			      struct ena_com_create_io_ctx *ctx,
 390			      struct ena_com_io_cq *io_cq)
 391{
 392	size_t size;
 
 393
 394	memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
 395
 396	/* Use the basic completion descriptor for Rx */
 397	io_cq->cdesc_entry_size_in_bytes =
 398		(io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
 399		sizeof(struct ena_eth_io_tx_cdesc) :
 400		sizeof(struct ena_eth_io_rx_cdesc_base);
 401
 402	size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
 403
 
 
 404	io_cq->cdesc_addr.virt_addr =
 405		dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
 
 
 406	if (!io_cq->cdesc_addr.virt_addr) {
 407		io_cq->cdesc_addr.virt_addr =
 408			dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr,
 
 409					   GFP_KERNEL);
 410	}
 411
 412	if (!io_cq->cdesc_addr.virt_addr) {
 413		netdev_err(ena_dev->net_device, "Memory allocation failed\n");
 414		return -ENOMEM;
 415	}
 416
 417	io_cq->phase = 1;
 418	io_cq->head = 0;
 419
 420	return 0;
 421}
 422
 423static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
 424						   struct ena_admin_acq_entry *cqe)
 425{
 426	struct ena_comp_ctx *comp_ctx;
 427	u16 cmd_id;
 428
 429	cmd_id = cqe->acq_common_descriptor.command &
 430		ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
 431
 432	comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
 433	if (unlikely(!comp_ctx)) {
 434		netdev_err(admin_queue->ena_dev->net_device,
 435			   "comp_ctx is NULL. Changing the admin queue running state\n");
 436		admin_queue->running_state = false;
 437		return;
 438	}
 439
 440	comp_ctx->status = ENA_CMD_COMPLETED;
 441	comp_ctx->comp_status = cqe->acq_common_descriptor.status;
 442
 443	if (comp_ctx->user_cqe)
 444		memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
 445
 446	if (!admin_queue->polling)
 447		complete(&comp_ctx->wait_event);
 448}
 449
 450static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
 451{
 452	struct ena_admin_acq_entry *cqe = NULL;
 453	u16 comp_num = 0;
 454	u16 head_masked;
 455	u8 phase;
 456
 457	head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
 458	phase = admin_queue->cq.phase;
 459
 460	cqe = &admin_queue->cq.entries[head_masked];
 461
 462	/* Go over all the completions */
 463	while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
 464		ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
 465		/* Do not read the rest of the completion entry before the
 466		 * phase bit was validated
 467		 */
 468		dma_rmb();
 469		ena_com_handle_single_admin_completion(admin_queue, cqe);
 470
 471		head_masked++;
 472		comp_num++;
 473		if (unlikely(head_masked == admin_queue->q_depth)) {
 474			head_masked = 0;
 475			phase = !phase;
 476		}
 477
 478		cqe = &admin_queue->cq.entries[head_masked];
 479	}
 480
 481	admin_queue->cq.head += comp_num;
 482	admin_queue->cq.phase = phase;
 483	admin_queue->sq.head += comp_num;
 484	admin_queue->stats.completed_cmd += comp_num;
 485}
 486
 487static int ena_com_comp_status_to_errno(struct ena_com_admin_queue *admin_queue,
 488					u8 comp_status)
 489{
 490	if (unlikely(comp_status != 0))
 491		netdev_err(admin_queue->ena_dev->net_device, "Admin command failed[%u]\n",
 492			   comp_status);
 493
 494	switch (comp_status) {
 495	case ENA_ADMIN_SUCCESS:
 496		return 0;
 497	case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
 498		return -ENOMEM;
 499	case ENA_ADMIN_UNSUPPORTED_OPCODE:
 500		return -EOPNOTSUPP;
 501	case ENA_ADMIN_BAD_OPCODE:
 502	case ENA_ADMIN_MALFORMED_REQUEST:
 503	case ENA_ADMIN_ILLEGAL_PARAMETER:
 504	case ENA_ADMIN_UNKNOWN_ERROR:
 505		return -EINVAL;
 506	case ENA_ADMIN_RESOURCE_BUSY:
 507		return -EAGAIN;
 508	}
 509
 510	return -EINVAL;
 511}
 512
 513static void ena_delay_exponential_backoff_us(u32 exp, u32 delay_us)
 514{
 515	exp = min_t(u32, exp, ENA_MAX_BACKOFF_DELAY_EXP);
 516	delay_us = max_t(u32, ENA_MIN_ADMIN_POLL_US, delay_us);
 517	delay_us = min_t(u32, delay_us * (1U << exp), ENA_MAX_ADMIN_POLL_US);
 518	usleep_range(delay_us, 2 * delay_us);
 519}
 520
 521static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
 522						     struct ena_com_admin_queue *admin_queue)
 523{
 524	unsigned long flags = 0;
 525	unsigned long timeout;
 526	int ret;
 527	u32 exp = 0;
 528
 529	timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout);
 530
 531	while (1) {
 532		spin_lock_irqsave(&admin_queue->q_lock, flags);
 533		ena_com_handle_admin_completion(admin_queue);
 534		spin_unlock_irqrestore(&admin_queue->q_lock, flags);
 535
 536		if (comp_ctx->status != ENA_CMD_SUBMITTED)
 537			break;
 538
 539		if (time_is_before_jiffies(timeout)) {
 540			netdev_err(admin_queue->ena_dev->net_device,
 541				   "Wait for completion (polling) timeout\n");
 542			/* ENA didn't have any completion */
 543			spin_lock_irqsave(&admin_queue->q_lock, flags);
 544			admin_queue->stats.no_completion++;
 545			admin_queue->running_state = false;
 546			spin_unlock_irqrestore(&admin_queue->q_lock, flags);
 547
 548			ret = -ETIME;
 549			goto err;
 550		}
 551
 552		ena_delay_exponential_backoff_us(exp++,
 553						 admin_queue->ena_dev->ena_min_poll_delay_us);
 554	}
 555
 556	if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
 557		netdev_err(admin_queue->ena_dev->net_device, "Command was aborted\n");
 
 558		spin_lock_irqsave(&admin_queue->q_lock, flags);
 559		admin_queue->stats.aborted_cmd++;
 560		spin_unlock_irqrestore(&admin_queue->q_lock, flags);
 561		ret = -ENODEV;
 562		goto err;
 563	}
 564
 565	WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n", comp_ctx->status);
 
 566
 567	ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
 568err:
 569	comp_ctxt_release(admin_queue, comp_ctx);
 570	return ret;
 571}
 572
 573/*
 574 * Set the LLQ configurations of the firmware
 575 *
 576 * The driver provides only the enabled feature values to the device,
 577 * which in turn, checks if they are supported.
 578 */
 579static int ena_com_set_llq(struct ena_com_dev *ena_dev)
 580{
 581	struct ena_com_admin_queue *admin_queue;
 582	struct ena_admin_set_feat_cmd cmd;
 583	struct ena_admin_set_feat_resp resp;
 584	struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
 585	int ret;
 586
 587	memset(&cmd, 0x0, sizeof(cmd));
 588	admin_queue = &ena_dev->admin_queue;
 589
 590	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
 591	cmd.feat_common.feature_id = ENA_ADMIN_LLQ;
 592
 593	cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl;
 594	cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl;
 595	cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
 596	cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
 597
 598	cmd.u.llq.accel_mode.u.set.enabled_flags =
 599		BIT(ENA_ADMIN_DISABLE_META_CACHING) |
 600		BIT(ENA_ADMIN_LIMIT_TX_BURST);
 601
 602	ret = ena_com_execute_admin_command(admin_queue,
 603					    (struct ena_admin_aq_entry *)&cmd,
 604					    sizeof(cmd),
 605					    (struct ena_admin_acq_entry *)&resp,
 606					    sizeof(resp));
 607
 608	if (unlikely(ret))
 609		netdev_err(ena_dev->net_device, "Failed to set LLQ configurations: %d\n", ret);
 
 610
 611	return ret;
 612}
 613
 614static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
 615				   struct ena_admin_feature_llq_desc *llq_features,
 616				   struct ena_llq_configurations *llq_default_cfg)
 617{
 618	struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
 619	struct ena_admin_accel_mode_get llq_accel_mode_get;
 620	u16 supported_feat;
 621	int rc;
 622
 623	memset(llq_info, 0, sizeof(*llq_info));
 624
 625	supported_feat = llq_features->header_location_ctrl_supported;
 626
 627	if (likely(supported_feat & llq_default_cfg->llq_header_location)) {
 628		llq_info->header_location_ctrl =
 629			llq_default_cfg->llq_header_location;
 630	} else {
 631		netdev_err(ena_dev->net_device,
 632			   "Invalid header location control, supported: 0x%x\n", supported_feat);
 
 633		return -EINVAL;
 634	}
 635
 636	if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) {
 637		supported_feat = llq_features->descriptors_stride_ctrl_supported;
 638		if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) {
 639			llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl;
 640		} else	{
 641			if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) {
 642				llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
 643			} else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
 644				llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
 645			} else {
 646				netdev_err(ena_dev->net_device,
 647					   "Invalid desc_stride_ctrl, supported: 0x%x\n",
 648					   supported_feat);
 649				return -EINVAL;
 650			}
 651
 652			netdev_err(ena_dev->net_device,
 653				   "Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
 654				   llq_default_cfg->llq_stride_ctrl, supported_feat,
 655				   llq_info->desc_stride_ctrl);
 656		}
 657	} else {
 658		llq_info->desc_stride_ctrl = 0;
 659	}
 660
 661	supported_feat = llq_features->entry_size_ctrl_supported;
 662	if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) {
 663		llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size;
 664		llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value;
 665	} else {
 666		if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) {
 667			llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
 668			llq_info->desc_list_entry_size = 128;
 669		} else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) {
 670			llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B;
 671			llq_info->desc_list_entry_size = 192;
 672		} else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
 673			llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
 674			llq_info->desc_list_entry_size = 256;
 675		} else {
 676			netdev_err(ena_dev->net_device,
 677				   "Invalid entry_size_ctrl, supported: 0x%x\n", supported_feat);
 
 678			return -EINVAL;
 679		}
 680
 681		netdev_err(ena_dev->net_device,
 682			   "Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
 683			   llq_default_cfg->llq_ring_entry_size, supported_feat,
 684			   llq_info->desc_list_entry_size);
 685	}
 686	if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
 687		/* The desc list entry size should be whole multiply of 8
 688		 * This requirement comes from __iowrite64_copy()
 689		 */
 690		netdev_err(ena_dev->net_device, "Illegal entry size %d\n",
 691			   llq_info->desc_list_entry_size);
 692		return -EINVAL;
 693	}
 694
 695	if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)
 696		llq_info->descs_per_entry = llq_info->desc_list_entry_size /
 697			sizeof(struct ena_eth_io_tx_desc);
 698	else
 699		llq_info->descs_per_entry = 1;
 700
 701	supported_feat = llq_features->desc_num_before_header_supported;
 702	if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) {
 703		llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header;
 704	} else {
 705		if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) {
 706			llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
 707		} else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) {
 708			llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1;
 709		} else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) {
 710			llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4;
 711		} else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
 712			llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
 713		} else {
 714			netdev_err(ena_dev->net_device,
 715				   "Invalid descs_num_before_header, supported: 0x%x\n",
 716				   supported_feat);
 717			return -EINVAL;
 718		}
 719
 720		netdev_err(ena_dev->net_device,
 721			   "Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
 722			   llq_default_cfg->llq_num_decs_before_header, supported_feat,
 723			   llq_info->descs_num_before_header);
 724	}
 725	/* Check for accelerated queue supported */
 726	llq_accel_mode_get = llq_features->accel_mode.u.get;
 727
 728	llq_info->disable_meta_caching =
 729		!!(llq_accel_mode_get.supported_flags &
 730		   BIT(ENA_ADMIN_DISABLE_META_CACHING));
 731
 732	if (llq_accel_mode_get.supported_flags & BIT(ENA_ADMIN_LIMIT_TX_BURST))
 733		llq_info->max_entries_in_tx_burst =
 734			llq_accel_mode_get.max_tx_burst_size /
 735			llq_default_cfg->llq_ring_entry_size_value;
 736
 737	rc = ena_com_set_llq(ena_dev);
 738	if (rc)
 739		netdev_err(ena_dev->net_device, "Cannot set LLQ configuration: %d\n", rc);
 
 740
 741	return rc;
 742}
 743
 744static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
 745							struct ena_com_admin_queue *admin_queue)
 746{
 747	unsigned long flags = 0;
 748	int ret;
 749
 750	wait_for_completion_timeout(&comp_ctx->wait_event,
 751				    usecs_to_jiffies(admin_queue->completion_timeout));
 
 752
 753	/* In case the command wasn't completed find out the root cause.
 754	 * There might be 2 kinds of errors
 755	 * 1) No completion (timeout reached)
 756	 * 2) There is completion but the device didn't get any msi-x interrupt.
 757	 */
 758	if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
 759		spin_lock_irqsave(&admin_queue->q_lock, flags);
 760		ena_com_handle_admin_completion(admin_queue);
 761		admin_queue->stats.no_completion++;
 762		spin_unlock_irqrestore(&admin_queue->q_lock, flags);
 763
 764		if (comp_ctx->status == ENA_CMD_COMPLETED) {
 765			netdev_err(admin_queue->ena_dev->net_device,
 766				   "The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d)\n",
 767				   comp_ctx->cmd_opcode);
 
 
 
 
 768		} else {
 769			netdev_err(admin_queue->ena_dev->net_device,
 770				   "The ena device didn't send a completion for the admin cmd %d status %d\n",
 771				   comp_ctx->cmd_opcode, comp_ctx->status);
 772		}
 773		admin_queue->running_state = false;
 774		ret = -ETIME;
 775		goto err;
 
 
 
 
 
 
 776	}
 777
 778	ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
 779err:
 780	comp_ctxt_release(admin_queue, comp_ctx);
 781	return ret;
 782}
 783
 784/* This method read the hardware device register through posting writes
 785 * and waiting for response
 786 * On timeout the function will return ENA_MMIO_READ_TIMEOUT
 787 */
 788static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
 789{
 790	struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
 791	volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
 792		mmio_read->read_resp;
 793	u32 mmio_read_reg, ret, i;
 794	unsigned long flags = 0;
 795	u32 timeout = mmio_read->reg_read_to;
 796
 797	might_sleep();
 798
 799	if (timeout == 0)
 800		timeout = ENA_REG_READ_TIMEOUT;
 801
 802	/* If readless is disabled, perform regular read */
 803	if (!mmio_read->readless_supported)
 804		return readl(ena_dev->reg_bar + offset);
 805
 806	spin_lock_irqsave(&mmio_read->lock, flags);
 807	mmio_read->seq_num++;
 808
 809	read_resp->req_id = mmio_read->seq_num + 0xDEAD;
 810	mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
 811			ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
 812	mmio_read_reg |= mmio_read->seq_num &
 813			ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
 814
 815	writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
 816
 817	for (i = 0; i < timeout; i++) {
 818		if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
 819			break;
 820
 821		udelay(1);
 822	}
 823
 824	if (unlikely(i == timeout)) {
 825		netdev_err(ena_dev->net_device,
 826			   "Reading reg failed for timeout. expected: req id[%u] offset[%u] actual: req id[%u] offset[%u]\n",
 827			   mmio_read->seq_num, offset, read_resp->req_id, read_resp->reg_off);
 
 828		ret = ENA_MMIO_READ_TIMEOUT;
 829		goto err;
 830	}
 831
 832	if (read_resp->reg_off != offset) {
 833		netdev_err(ena_dev->net_device, "Read failure: wrong offset provided\n");
 
 834		ret = ENA_MMIO_READ_TIMEOUT;
 835	} else {
 836		ret = read_resp->reg_val;
 837	}
 838err:
 839	spin_unlock_irqrestore(&mmio_read->lock, flags);
 840
 841	return ret;
 842}
 843
 844/* There are two types to wait for completion.
 845 * Polling mode - wait until the completion is available.
 846 * Async mode - wait on wait queue until the completion is ready
 847 * (or the timeout expired).
 848 * It is expected that the IRQ called ena_com_handle_admin_completion
 849 * to mark the completions.
 850 */
 851static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
 852					     struct ena_com_admin_queue *admin_queue)
 853{
 854	if (admin_queue->polling)
 855		return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
 856								 admin_queue);
 857
 858	return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
 859							    admin_queue);
 860}
 861
 862static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
 863				 struct ena_com_io_sq *io_sq)
 864{
 865	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
 866	struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
 867	struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
 868	u8 direction;
 869	int ret;
 870
 871	memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
 872
 873	if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
 874		direction = ENA_ADMIN_SQ_DIRECTION_TX;
 875	else
 876		direction = ENA_ADMIN_SQ_DIRECTION_RX;
 877
 878	destroy_cmd.sq.sq_identity |= (direction <<
 879		ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
 880		ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
 881
 882	destroy_cmd.sq.sq_idx = io_sq->idx;
 883	destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
 884
 885	ret = ena_com_execute_admin_command(admin_queue,
 886					    (struct ena_admin_aq_entry *)&destroy_cmd,
 887					    sizeof(destroy_cmd),
 888					    (struct ena_admin_acq_entry *)&destroy_resp,
 889					    sizeof(destroy_resp));
 890
 891	if (unlikely(ret && (ret != -ENODEV)))
 892		netdev_err(ena_dev->net_device, "Failed to destroy io sq error: %d\n", ret);
 
 893
 894	return ret;
 895}
 896
 897static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
 898				  struct ena_com_io_sq *io_sq,
 899				  struct ena_com_io_cq *io_cq)
 900{
 901	size_t size;
 902
 903	if (io_cq->cdesc_addr.virt_addr) {
 904		size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
 905
 906		dma_free_coherent(ena_dev->dmadev, size, io_cq->cdesc_addr.virt_addr,
 
 907				  io_cq->cdesc_addr.phys_addr);
 908
 909		io_cq->cdesc_addr.virt_addr = NULL;
 910	}
 911
 912	if (io_sq->desc_addr.virt_addr) {
 913		size = io_sq->desc_entry_size * io_sq->q_depth;
 914
 915		dma_free_coherent(ena_dev->dmadev, size, io_sq->desc_addr.virt_addr,
 
 916				  io_sq->desc_addr.phys_addr);
 917
 918		io_sq->desc_addr.virt_addr = NULL;
 919	}
 920
 921	if (io_sq->bounce_buf_ctrl.base_buffer) {
 922		devm_kfree(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer);
 923		io_sq->bounce_buf_ctrl.base_buffer = NULL;
 924	}
 925}
 926
 927static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
 928				u16 exp_state)
 929{
 930	u32 val, exp = 0;
 931	unsigned long timeout_stamp;
 932
 933	/* Convert timeout from resolution of 100ms to us resolution. */
 934	timeout_stamp = jiffies + usecs_to_jiffies(100 * 1000 * timeout);
 935
 936	while (1) {
 937		val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
 938
 939		if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
 940			netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
 
 941			return -ETIME;
 942		}
 943
 944		if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
 945			exp_state)
 946			return 0;
 947
 948		if (time_is_before_jiffies(timeout_stamp))
 949			return -ETIME;
 950
 951		ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
 952	}
 953}
 954
 955static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
 956					       enum ena_admin_aq_feature_id feature_id)
 957{
 958	u32 feature_mask = 1 << feature_id;
 959
 960	/* Device attributes is always supported */
 961	if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
 962	    !(ena_dev->supported_features & feature_mask))
 963		return false;
 964
 965	return true;
 966}
 967
 968static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
 969				  struct ena_admin_get_feat_resp *get_resp,
 970				  enum ena_admin_aq_feature_id feature_id,
 971				  dma_addr_t control_buf_dma_addr,
 972				  u32 control_buff_size,
 973				  u8 feature_ver)
 974{
 975	struct ena_com_admin_queue *admin_queue;
 976	struct ena_admin_get_feat_cmd get_cmd;
 977	int ret;
 978
 979	if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
 980		netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", feature_id);
 
 981		return -EOPNOTSUPP;
 982	}
 983
 984	memset(&get_cmd, 0x0, sizeof(get_cmd));
 985	admin_queue = &ena_dev->admin_queue;
 986
 987	get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
 988
 989	if (control_buff_size)
 990		get_cmd.aq_common_descriptor.flags =
 991			ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
 992	else
 993		get_cmd.aq_common_descriptor.flags = 0;
 994
 995	ret = ena_com_mem_addr_set(ena_dev,
 996				   &get_cmd.control_buffer.address,
 997				   control_buf_dma_addr);
 998	if (unlikely(ret)) {
 999		netdev_err(ena_dev->net_device, "Memory address set failed\n");
1000		return ret;
1001	}
1002
1003	get_cmd.control_buffer.length = control_buff_size;
1004	get_cmd.feat_common.feature_version = feature_ver;
1005	get_cmd.feat_common.feature_id = feature_id;
1006
1007	ret = ena_com_execute_admin_command(admin_queue,
1008					    (struct ena_admin_aq_entry *)
1009					    &get_cmd,
1010					    sizeof(get_cmd),
1011					    (struct ena_admin_acq_entry *)
1012					    get_resp,
1013					    sizeof(*get_resp));
1014
1015	if (unlikely(ret))
1016		netdev_err(ena_dev->net_device,
1017			   "Failed to submit get_feature command %d error: %d\n", feature_id, ret);
 
1018
1019	return ret;
1020}
1021
1022static int ena_com_get_feature(struct ena_com_dev *ena_dev,
1023			       struct ena_admin_get_feat_resp *get_resp,
1024			       enum ena_admin_aq_feature_id feature_id,
1025			       u8 feature_ver)
1026{
1027	return ena_com_get_feature_ex(ena_dev,
1028				      get_resp,
1029				      feature_id,
1030				      0,
1031				      0,
1032				      feature_ver);
1033}
1034
1035int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev)
1036{
1037	return ena_dev->rss.hash_func;
1038}
1039
1040static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
1041{
1042	struct ena_admin_feature_rss_flow_hash_control *hash_key =
1043		(ena_dev->rss).hash_key;
1044
1045	netdev_rss_key_fill(&hash_key->key, sizeof(hash_key->key));
1046	/* The key buffer is stored in the device in an array of
1047	 * uint32 elements.
1048	 */
1049	hash_key->key_parts = ENA_ADMIN_RSS_KEY_PARTS;
1050}
1051
1052static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
1053{
1054	struct ena_rss *rss = &ena_dev->rss;
1055
1056	if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION))
 
1057		return -EOPNOTSUPP;
1058
1059	rss->hash_key = dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
1060					   &rss->hash_key_dma_addr, GFP_KERNEL);
 
1061
1062	if (unlikely(!rss->hash_key))
1063		return -ENOMEM;
1064
1065	return 0;
1066}
1067
1068static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
1069{
1070	struct ena_rss *rss = &ena_dev->rss;
1071
1072	if (rss->hash_key)
1073		dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), rss->hash_key,
1074				  rss->hash_key_dma_addr);
1075	rss->hash_key = NULL;
1076}
1077
1078static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
1079{
1080	struct ena_rss *rss = &ena_dev->rss;
1081
1082	rss->hash_ctrl = dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
1083					    &rss->hash_ctrl_dma_addr, GFP_KERNEL);
 
1084
1085	if (unlikely(!rss->hash_ctrl))
1086		return -ENOMEM;
1087
1088	return 0;
1089}
1090
1091static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
1092{
1093	struct ena_rss *rss = &ena_dev->rss;
1094
1095	if (rss->hash_ctrl)
1096		dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), rss->hash_ctrl,
1097				  rss->hash_ctrl_dma_addr);
1098	rss->hash_ctrl = NULL;
1099}
1100
1101static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
1102					   u16 log_size)
1103{
1104	struct ena_rss *rss = &ena_dev->rss;
1105	struct ena_admin_get_feat_resp get_resp;
1106	size_t tbl_size;
1107	int ret;
1108
1109	ret = ena_com_get_feature(ena_dev, &get_resp,
1110				  ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG, 0);
1111	if (unlikely(ret))
1112		return ret;
1113
1114	if ((get_resp.u.ind_table.min_size > log_size) ||
1115	    (get_resp.u.ind_table.max_size < log_size)) {
1116		netdev_err(ena_dev->net_device,
1117			   "Indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
1118			   1 << log_size, 1 << get_resp.u.ind_table.min_size,
1119			   1 << get_resp.u.ind_table.max_size);
1120		return -EINVAL;
1121	}
1122
1123	tbl_size = (1ULL << log_size) *
1124		sizeof(struct ena_admin_rss_ind_table_entry);
1125
1126	rss->rss_ind_tbl = dma_alloc_coherent(ena_dev->dmadev, tbl_size, &rss->rss_ind_tbl_dma_addr,
1127					      GFP_KERNEL);
 
1128	if (unlikely(!rss->rss_ind_tbl))
1129		goto mem_err1;
1130
1131	tbl_size = (1ULL << log_size) * sizeof(u16);
1132	rss->host_rss_ind_tbl = devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
 
1133	if (unlikely(!rss->host_rss_ind_tbl))
1134		goto mem_err2;
1135
1136	rss->tbl_log_size = log_size;
1137
1138	return 0;
1139
1140mem_err2:
1141	tbl_size = (1ULL << log_size) *
1142		sizeof(struct ena_admin_rss_ind_table_entry);
1143
1144	dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl, rss->rss_ind_tbl_dma_addr);
 
1145	rss->rss_ind_tbl = NULL;
1146mem_err1:
1147	rss->tbl_log_size = 0;
1148	return -ENOMEM;
1149}
1150
1151static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
1152{
1153	struct ena_rss *rss = &ena_dev->rss;
1154	size_t tbl_size = (1ULL << rss->tbl_log_size) *
1155		sizeof(struct ena_admin_rss_ind_table_entry);
1156
1157	if (rss->rss_ind_tbl)
1158		dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
1159				  rss->rss_ind_tbl_dma_addr);
1160	rss->rss_ind_tbl = NULL;
1161
1162	if (rss->host_rss_ind_tbl)
1163		devm_kfree(ena_dev->dmadev, rss->host_rss_ind_tbl);
1164	rss->host_rss_ind_tbl = NULL;
1165}
1166
1167static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
1168				struct ena_com_io_sq *io_sq, u16 cq_idx)
1169{
1170	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1171	struct ena_admin_aq_create_sq_cmd create_cmd;
1172	struct ena_admin_acq_create_sq_resp_desc cmd_completion;
1173	u8 direction;
1174	int ret;
1175
1176	memset(&create_cmd, 0x0, sizeof(create_cmd));
1177
1178	create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
1179
1180	if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1181		direction = ENA_ADMIN_SQ_DIRECTION_TX;
1182	else
1183		direction = ENA_ADMIN_SQ_DIRECTION_RX;
1184
1185	create_cmd.sq_identity |= (direction <<
1186		ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
1187		ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
1188
1189	create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
1190		ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
1191
1192	create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
1193		ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
1194		ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
1195
1196	create_cmd.sq_caps_3 |=
1197		ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
1198
1199	create_cmd.cq_idx = cq_idx;
1200	create_cmd.sq_depth = io_sq->q_depth;
1201
1202	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
1203		ret = ena_com_mem_addr_set(ena_dev,
1204					   &create_cmd.sq_ba,
1205					   io_sq->desc_addr.phys_addr);
1206		if (unlikely(ret)) {
1207			netdev_err(ena_dev->net_device, "Memory address set failed\n");
 
1208			return ret;
1209		}
1210	}
1211
1212	ret = ena_com_execute_admin_command(admin_queue,
1213					    (struct ena_admin_aq_entry *)&create_cmd,
1214					    sizeof(create_cmd),
1215					    (struct ena_admin_acq_entry *)&cmd_completion,
1216					    sizeof(cmd_completion));
1217	if (unlikely(ret)) {
1218		netdev_err(ena_dev->net_device, "Failed to create IO SQ. error: %d\n", ret);
 
1219		return ret;
1220	}
1221
1222	io_sq->idx = cmd_completion.sq_idx;
1223
1224	io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1225		(uintptr_t)cmd_completion.sq_doorbell_offset);
1226
1227	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
 
 
 
1228		io_sq->desc_addr.pbuf_dev_addr =
1229			(u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
1230			cmd_completion.llq_descriptors_offset);
1231	}
1232
1233	netdev_dbg(ena_dev->net_device, "Created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
 
1234
1235	return ret;
1236}
1237
1238static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1239{
1240	struct ena_rss *rss = &ena_dev->rss;
1241	struct ena_com_io_sq *io_sq;
1242	u16 qid;
1243	int i;
1244
1245	for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1246		qid = rss->host_rss_ind_tbl[i];
1247		if (qid >= ENA_TOTAL_NUM_QUEUES)
1248			return -EINVAL;
1249
1250		io_sq = &ena_dev->io_sq_queues[qid];
1251
1252		if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
1253			return -EINVAL;
1254
1255		rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
1256	}
1257
1258	return 0;
1259}
1260
1261static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1262						 u16 intr_delay_resolution)
1263{
1264	u16 prev_intr_delay_resolution = ena_dev->intr_delay_resolution;
1265
1266	if (unlikely(!intr_delay_resolution)) {
1267		netdev_err(ena_dev->net_device,
1268			   "Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1269		intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
1270	}
1271
1272	/* update Rx */
1273	ena_dev->intr_moder_rx_interval =
1274		ena_dev->intr_moder_rx_interval *
1275		prev_intr_delay_resolution /
1276		intr_delay_resolution;
1277
1278	/* update Tx */
1279	ena_dev->intr_moder_tx_interval =
1280		ena_dev->intr_moder_tx_interval *
1281		prev_intr_delay_resolution /
1282		intr_delay_resolution;
1283
1284	ena_dev->intr_delay_resolution = intr_delay_resolution;
1285}
1286
1287/*****************************************************************************/
1288/*******************************      API       ******************************/
1289/*****************************************************************************/
1290
1291int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
1292				  struct ena_admin_aq_entry *cmd,
1293				  size_t cmd_size,
1294				  struct ena_admin_acq_entry *comp,
1295				  size_t comp_size)
1296{
1297	struct ena_comp_ctx *comp_ctx;
1298	int ret;
1299
1300	comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
1301					    comp, comp_size);
1302	if (IS_ERR(comp_ctx)) {
1303		ret = PTR_ERR(comp_ctx);
1304		if (ret == -ENODEV)
1305			netdev_dbg(admin_queue->ena_dev->net_device,
1306				   "Failed to submit command [%d]\n", ret);
1307		else
1308			netdev_err(admin_queue->ena_dev->net_device,
1309				   "Failed to submit command [%d]\n", ret);
1310
1311		return ret;
1312	}
1313
1314	ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
1315	if (unlikely(ret)) {
1316		if (admin_queue->running_state)
1317			netdev_err(admin_queue->ena_dev->net_device,
1318				   "Failed to process command. ret = %d\n", ret);
1319		else
1320			netdev_dbg(admin_queue->ena_dev->net_device,
1321				   "Failed to process command. ret = %d\n", ret);
1322	}
1323	return ret;
1324}
1325
1326int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
1327			 struct ena_com_io_cq *io_cq)
1328{
1329	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1330	struct ena_admin_aq_create_cq_cmd create_cmd;
1331	struct ena_admin_acq_create_cq_resp_desc cmd_completion;
1332	int ret;
1333
1334	memset(&create_cmd, 0x0, sizeof(create_cmd));
1335
1336	create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
1337
1338	create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
1339		ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
1340	create_cmd.cq_caps_1 |=
1341		ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
1342
1343	create_cmd.msix_vector = io_cq->msix_vector;
1344	create_cmd.cq_depth = io_cq->q_depth;
1345
1346	ret = ena_com_mem_addr_set(ena_dev,
1347				   &create_cmd.cq_ba,
1348				   io_cq->cdesc_addr.phys_addr);
1349	if (unlikely(ret)) {
1350		netdev_err(ena_dev->net_device, "Memory address set failed\n");
1351		return ret;
1352	}
1353
1354	ret = ena_com_execute_admin_command(admin_queue,
1355					    (struct ena_admin_aq_entry *)&create_cmd,
1356					    sizeof(create_cmd),
1357					    (struct ena_admin_acq_entry *)&cmd_completion,
1358					    sizeof(cmd_completion));
1359	if (unlikely(ret)) {
1360		netdev_err(ena_dev->net_device, "Failed to create IO CQ. error: %d\n", ret);
 
1361		return ret;
1362	}
1363
1364	io_cq->idx = cmd_completion.cq_idx;
1365
1366	io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1367		cmd_completion.cq_interrupt_unmask_register_offset);
1368
 
 
 
 
 
1369	if (cmd_completion.numa_node_register_offset)
1370		io_cq->numa_node_cfg_reg =
1371			(u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1372			cmd_completion.numa_node_register_offset);
1373
1374	netdev_dbg(ena_dev->net_device, "Created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
 
1375
1376	return ret;
1377}
1378
1379int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
1380			    struct ena_com_io_sq **io_sq,
1381			    struct ena_com_io_cq **io_cq)
1382{
1383	if (qid >= ENA_TOTAL_NUM_QUEUES) {
1384		netdev_err(ena_dev->net_device, "Invalid queue number %d but the max is %d\n", qid,
 
1385			   ENA_TOTAL_NUM_QUEUES);
1386		return -EINVAL;
1387	}
1388
1389	*io_sq = &ena_dev->io_sq_queues[qid];
1390	*io_cq = &ena_dev->io_cq_queues[qid];
1391
1392	return 0;
1393}
1394
1395void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
1396{
1397	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1398	struct ena_comp_ctx *comp_ctx;
1399	u16 i;
1400
1401	if (!admin_queue->comp_ctx)
1402		return;
1403
1404	for (i = 0; i < admin_queue->q_depth; i++) {
1405		comp_ctx = get_comp_ctxt(admin_queue, i, false);
1406		if (unlikely(!comp_ctx))
1407			break;
1408
1409		comp_ctx->status = ENA_CMD_ABORTED;
1410
1411		complete(&comp_ctx->wait_event);
1412	}
1413}
1414
1415void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
1416{
1417	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1418	unsigned long flags = 0;
1419	u32 exp = 0;
1420
1421	spin_lock_irqsave(&admin_queue->q_lock, flags);
1422	while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
1423		spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1424		ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
 
1425		spin_lock_irqsave(&admin_queue->q_lock, flags);
1426	}
1427	spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1428}
1429
1430int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
1431			  struct ena_com_io_cq *io_cq)
1432{
1433	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1434	struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
1435	struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
1436	int ret;
1437
1438	memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
1439
1440	destroy_cmd.cq_idx = io_cq->idx;
1441	destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
1442
1443	ret = ena_com_execute_admin_command(admin_queue,
1444					    (struct ena_admin_aq_entry *)&destroy_cmd,
1445					    sizeof(destroy_cmd),
1446					    (struct ena_admin_acq_entry *)&destroy_resp,
1447					    sizeof(destroy_resp));
1448
1449	if (unlikely(ret && (ret != -ENODEV)))
1450		netdev_err(ena_dev->net_device, "Failed to destroy IO CQ. error: %d\n", ret);
 
1451
1452	return ret;
1453}
1454
1455bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
1456{
1457	return ena_dev->admin_queue.running_state;
1458}
1459
1460void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
1461{
1462	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1463	unsigned long flags = 0;
1464
1465	spin_lock_irqsave(&admin_queue->q_lock, flags);
1466	ena_dev->admin_queue.running_state = state;
1467	spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1468}
1469
1470void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
1471{
1472	u16 depth = ena_dev->aenq.q_depth;
1473
1474	WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
1475
1476	/* Init head_db to mark that all entries in the queue
1477	 * are initially available
1478	 */
1479	writel(depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1480}
1481
1482int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
1483{
1484	struct ena_com_admin_queue *admin_queue;
1485	struct ena_admin_set_feat_cmd cmd;
1486	struct ena_admin_set_feat_resp resp;
1487	struct ena_admin_get_feat_resp get_resp;
1488	int ret;
1489
1490	ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0);
1491	if (ret) {
1492		dev_info(ena_dev->dmadev, "Can't get aenq configuration\n");
1493		return ret;
1494	}
1495
1496	if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
1497		netdev_warn(ena_dev->net_device,
1498			    "Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
1499			    get_resp.u.aenq.supported_groups, groups_flag);
1500		return -EOPNOTSUPP;
1501	}
1502
1503	memset(&cmd, 0x0, sizeof(cmd));
1504	admin_queue = &ena_dev->admin_queue;
1505
1506	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1507	cmd.aq_common_descriptor.flags = 0;
1508	cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
1509	cmd.u.aenq.enabled_groups = groups_flag;
1510
1511	ret = ena_com_execute_admin_command(admin_queue,
1512					    (struct ena_admin_aq_entry *)&cmd,
1513					    sizeof(cmd),
1514					    (struct ena_admin_acq_entry *)&resp,
1515					    sizeof(resp));
1516
1517	if (unlikely(ret))
1518		netdev_err(ena_dev->net_device, "Failed to config AENQ ret: %d\n", ret);
 
1519
1520	return ret;
1521}
1522
1523int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
1524{
1525	u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1526	u32 width;
1527
1528	if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
1529		netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
1530		return -ETIME;
1531	}
1532
1533	width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
1534		ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
1535
1536	netdev_dbg(ena_dev->net_device, "ENA dma width: %d\n", width);
1537
1538	if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
1539		netdev_err(ena_dev->net_device, "DMA width illegal value: %d\n", width);
 
1540		return -EINVAL;
1541	}
1542
1543	ena_dev->dma_addr_bits = width;
1544
1545	return width;
1546}
1547
1548int ena_com_validate_version(struct ena_com_dev *ena_dev)
1549{
1550	u32 ver;
1551	u32 ctrl_ver;
1552	u32 ctrl_ver_masked;
1553
1554	/* Make sure the ENA version and the controller version are at least
1555	 * as the driver expects
1556	 */
1557	ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
1558	ctrl_ver = ena_com_reg_bar_read32(ena_dev,
1559					  ENA_REGS_CONTROLLER_VERSION_OFF);
1560
1561	if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) || (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
 
1562		netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
1563		return -ETIME;
1564	}
1565
1566	dev_info(ena_dev->dmadev, "ENA device version: %d.%d\n",
1567		 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >> ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
 
1568		 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
1569
1570	dev_info(ena_dev->dmadev, "ENA controller version: %d.%d.%d implementation version %d\n",
 
1571		 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
1572			 ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
1573		 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
1574			 ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
1575		 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
1576		 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
1577			 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
1578
1579	ctrl_ver_masked =
1580		(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
1581		(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
1582		(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
1583
1584	/* Validate the ctrl version without the implementation ID */
1585	if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
1586		netdev_err(ena_dev->net_device,
1587			   "ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1588		return -1;
1589	}
1590
1591	return 0;
1592}
1593
1594static void
1595ena_com_free_ena_admin_queue_comp_ctx(struct ena_com_dev *ena_dev,
1596				      struct ena_com_admin_queue *admin_queue)
1597
1598{
1599	if (!admin_queue->comp_ctx)
1600		return;
1601
1602	devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx);
1603
1604	admin_queue->comp_ctx = NULL;
1605}
1606
1607void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1608{
1609	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1610	struct ena_com_admin_cq *cq = &admin_queue->cq;
1611	struct ena_com_admin_sq *sq = &admin_queue->sq;
1612	struct ena_com_aenq *aenq = &ena_dev->aenq;
1613	u16 size;
1614
1615	ena_com_free_ena_admin_queue_comp_ctx(ena_dev, admin_queue);
1616
1617	size = ADMIN_SQ_SIZE(admin_queue->q_depth);
1618	if (sq->entries)
1619		dma_free_coherent(ena_dev->dmadev, size, sq->entries, sq->dma_addr);
 
1620	sq->entries = NULL;
1621
1622	size = ADMIN_CQ_SIZE(admin_queue->q_depth);
1623	if (cq->entries)
1624		dma_free_coherent(ena_dev->dmadev, size, cq->entries, cq->dma_addr);
 
1625	cq->entries = NULL;
1626
1627	size = ADMIN_AENQ_SIZE(aenq->q_depth);
1628	if (ena_dev->aenq.entries)
1629		dma_free_coherent(ena_dev->dmadev, size, aenq->entries, aenq->dma_addr);
 
1630	aenq->entries = NULL;
1631}
1632
1633void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1634{
1635	u32 mask_value = 0;
1636
1637	if (polling)
1638		mask_value = ENA_REGS_ADMIN_INTR_MASK;
1639
1640	writel(mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
1641	ena_dev->admin_queue.polling = polling;
1642}
1643
 
 
 
 
 
 
1644int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1645{
1646	struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1647
1648	spin_lock_init(&mmio_read->lock);
1649	mmio_read->read_resp = dma_alloc_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
1650						  &mmio_read->read_resp_dma_addr, GFP_KERNEL);
 
 
1651	if (unlikely(!mmio_read->read_resp))
1652		goto err;
1653
1654	ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1655
1656	mmio_read->read_resp->req_id = 0x0;
1657	mmio_read->seq_num = 0x0;
1658	mmio_read->readless_supported = true;
1659
1660	return 0;
1661
1662err:
1663
1664	return -ENOMEM;
1665}
1666
1667void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
1668{
1669	struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1670
1671	mmio_read->readless_supported = readless_supported;
1672}
1673
1674void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
1675{
1676	struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1677
1678	writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1679	writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1680
1681	dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp), mmio_read->read_resp,
1682			  mmio_read->read_resp_dma_addr);
1683
1684	mmio_read->read_resp = NULL;
1685}
1686
1687void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
1688{
1689	struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1690	u32 addr_low, addr_high;
1691
1692	addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
1693	addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
1694
1695	writel(addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1696	writel(addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1697}
1698
1699int ena_com_admin_init(struct ena_com_dev *ena_dev,
1700		       struct ena_aenq_handlers *aenq_handlers)
1701{
1702	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1703	u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
1704	int ret;
1705
1706	dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1707
1708	if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
1709		netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
1710		return -ETIME;
1711	}
1712
1713	if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
1714		netdev_err(ena_dev->net_device, "Device isn't ready, abort com init\n");
 
1715		return -ENODEV;
1716	}
1717
1718	admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
1719
1720	admin_queue->q_dmadev = ena_dev->dmadev;
1721	admin_queue->polling = false;
1722	admin_queue->curr_cmd_id = 0;
1723
1724	atomic_set(&admin_queue->outstanding_cmds, 0);
1725
1726	spin_lock_init(&admin_queue->q_lock);
1727
1728	ret = ena_com_init_comp_ctxt(admin_queue);
1729	if (ret)
1730		goto error;
1731
1732	ret = ena_com_admin_init_sq(admin_queue);
1733	if (ret)
1734		goto error;
1735
1736	ret = ena_com_admin_init_cq(admin_queue);
1737	if (ret)
1738		goto error;
1739
1740	admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1741		ENA_REGS_AQ_DB_OFF);
1742
1743	addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
1744	addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
1745
1746	writel(addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
1747	writel(addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
1748
1749	addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
1750	addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
1751
1752	writel(addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
1753	writel(addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
1754
1755	aq_caps = 0;
1756	aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
1757	aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
1758			ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
1759			ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
1760
1761	acq_caps = 0;
1762	acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
1763	acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
1764		ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
1765		ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
1766
1767	writel(aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
1768	writel(acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
1769	ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
1770	if (ret)
1771		goto error;
1772
1773	admin_queue->ena_dev = ena_dev;
1774	admin_queue->running_state = true;
1775
1776	return 0;
1777error:
1778	ena_com_admin_destroy(ena_dev);
1779
1780	return ret;
1781}
1782
1783int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
1784			    struct ena_com_create_io_ctx *ctx)
1785{
1786	struct ena_com_io_sq *io_sq;
1787	struct ena_com_io_cq *io_cq;
1788	int ret;
1789
1790	if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
1791		netdev_err(ena_dev->net_device, "Qid (%d) is bigger than max num of queues (%d)\n",
 
1792			   ctx->qid, ENA_TOTAL_NUM_QUEUES);
1793		return -EINVAL;
1794	}
1795
1796	io_sq = &ena_dev->io_sq_queues[ctx->qid];
1797	io_cq = &ena_dev->io_cq_queues[ctx->qid];
1798
1799	memset(io_sq, 0x0, sizeof(*io_sq));
1800	memset(io_cq, 0x0, sizeof(*io_cq));
1801
1802	/* Init CQ */
1803	io_cq->q_depth = ctx->queue_size;
1804	io_cq->direction = ctx->direction;
1805	io_cq->qid = ctx->qid;
1806
1807	io_cq->msix_vector = ctx->msix_vector;
1808
1809	io_sq->q_depth = ctx->queue_size;
1810	io_sq->direction = ctx->direction;
1811	io_sq->qid = ctx->qid;
1812
1813	io_sq->mem_queue_type = ctx->mem_queue_type;
1814
1815	if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1816		/* header length is limited to 8 bits */
1817		io_sq->tx_max_header_size = min_t(u32, ena_dev->tx_max_header_size, SZ_256);
 
1818
1819	ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
1820	if (ret)
1821		goto error;
1822	ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
1823	if (ret)
1824		goto error;
1825
1826	ret = ena_com_create_io_cq(ena_dev, io_cq);
1827	if (ret)
1828		goto error;
1829
1830	ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
1831	if (ret)
1832		goto destroy_io_cq;
1833
1834	return 0;
1835
1836destroy_io_cq:
1837	ena_com_destroy_io_cq(ena_dev, io_cq);
1838error:
1839	ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1840	return ret;
1841}
1842
1843void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
1844{
1845	struct ena_com_io_sq *io_sq;
1846	struct ena_com_io_cq *io_cq;
1847
1848	if (qid >= ENA_TOTAL_NUM_QUEUES) {
1849		netdev_err(ena_dev->net_device, "Qid (%d) is bigger than max num of queues (%d)\n",
 
1850			   qid, ENA_TOTAL_NUM_QUEUES);
1851		return;
1852	}
1853
1854	io_sq = &ena_dev->io_sq_queues[qid];
1855	io_cq = &ena_dev->io_cq_queues[qid];
1856
1857	ena_com_destroy_io_sq(ena_dev, io_sq);
1858	ena_com_destroy_io_cq(ena_dev, io_cq);
1859
1860	ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1861}
1862
1863int ena_com_get_link_params(struct ena_com_dev *ena_dev,
1864			    struct ena_admin_get_feat_resp *resp)
1865{
1866	return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0);
1867}
1868
1869static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
1870			     struct ena_com_stats_ctx *ctx,
1871			     enum ena_admin_get_stats_type type)
1872{
1873	struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
1874	struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
1875	struct ena_com_admin_queue *admin_queue;
1876	int ret;
1877
1878	admin_queue = &ena_dev->admin_queue;
1879
1880	get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
1881	get_cmd->aq_common_descriptor.flags = 0;
1882	get_cmd->type = type;
1883
1884	ret = ena_com_execute_admin_command(admin_queue,
1885					    (struct ena_admin_aq_entry *)get_cmd,
1886					    sizeof(*get_cmd),
1887					    (struct ena_admin_acq_entry *)get_resp,
1888					    sizeof(*get_resp));
1889
1890	if (unlikely(ret))
1891		netdev_err(ena_dev->net_device, "Failed to get stats. error: %d\n", ret);
1892
1893	return ret;
1894}
1895
1896static void ena_com_set_supported_customer_metrics(struct ena_com_dev *ena_dev)
1897{
1898	struct ena_customer_metrics *customer_metrics;
1899	struct ena_com_stats_ctx ctx;
1900	int ret;
1901
1902	customer_metrics = &ena_dev->customer_metrics;
1903	if (!ena_com_get_cap(ena_dev, ENA_ADMIN_CUSTOMER_METRICS)) {
1904		customer_metrics->supported_metrics = ENA_ADMIN_CUSTOMER_METRICS_MIN_SUPPORT_MASK;
1905		return;
1906	}
1907
1908	memset(&ctx, 0x0, sizeof(ctx));
1909	ctx.get_cmd.requested_metrics = ENA_ADMIN_CUSTOMER_METRICS_SUPPORT_MASK;
1910	ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_CUSTOMER_METRICS);
1911	if (likely(ret == 0))
1912		customer_metrics->supported_metrics =
1913			ctx.get_resp.u.customer_metrics.reported_metrics;
1914	else
1915		netdev_err(ena_dev->net_device,
1916			   "Failed to query customer metrics support. error: %d\n", ret);
1917}
1918
1919int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
1920			      struct ena_com_dev_get_features_ctx *get_feat_ctx)
1921{
1922	struct ena_admin_get_feat_resp get_resp;
1923	int rc;
1924
1925	rc = ena_com_get_feature(ena_dev, &get_resp,
1926				 ENA_ADMIN_DEVICE_ATTRIBUTES, 0);
1927	if (rc)
1928		return rc;
1929
1930	memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
1931	       sizeof(get_resp.u.dev_attr));
1932
1933	ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
1934	ena_dev->capabilities = get_resp.u.dev_attr.capabilities;
1935
1936	if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
1937		rc = ena_com_get_feature(ena_dev, &get_resp,
1938					 ENA_ADMIN_MAX_QUEUES_EXT,
1939					 ENA_FEATURE_MAX_QUEUE_EXT_VER);
1940		if (rc)
1941			return rc;
1942
1943		if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER)
 
1944			return -EINVAL;
1945
1946		memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
1947		       sizeof(get_resp.u.max_queue_ext));
1948		ena_dev->tx_max_header_size =
1949			get_resp.u.max_queue_ext.max_queue_ext.max_tx_header_size;
1950	} else {
1951		rc = ena_com_get_feature(ena_dev, &get_resp,
1952					 ENA_ADMIN_MAX_QUEUES_NUM, 0);
1953		memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
1954		       sizeof(get_resp.u.max_queue));
1955		ena_dev->tx_max_header_size =
1956			get_resp.u.max_queue.max_header_size;
1957
1958		if (rc)
1959			return rc;
1960	}
1961
1962	rc = ena_com_get_feature(ena_dev, &get_resp,
1963				 ENA_ADMIN_AENQ_CONFIG, 0);
1964	if (rc)
1965		return rc;
1966
1967	memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
1968	       sizeof(get_resp.u.aenq));
1969
1970	rc = ena_com_get_feature(ena_dev, &get_resp,
1971				 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
1972	if (rc)
1973		return rc;
1974
1975	memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
1976	       sizeof(get_resp.u.offload));
1977
1978	/* Driver hints isn't mandatory admin command. So in case the
1979	 * command isn't supported set driver hints to 0
1980	 */
1981	rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0);
1982
1983	if (!rc)
1984		memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints, sizeof(get_resp.u.hw_hints));
 
1985	else if (rc == -EOPNOTSUPP)
1986		memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints));
 
1987	else
1988		return rc;
1989
1990	rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0);
1991	if (!rc)
1992		memcpy(&get_feat_ctx->llq, &get_resp.u.llq, sizeof(get_resp.u.llq));
 
1993	else if (rc == -EOPNOTSUPP)
1994		memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
1995	else
1996		return rc;
1997
1998	ena_com_set_supported_customer_metrics(ena_dev);
1999
2000	return 0;
2001}
2002
2003void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
2004{
2005	ena_com_handle_admin_completion(&ena_dev->admin_queue);
2006}
2007
2008/* ena_handle_specific_aenq_event:
2009 * return the handler that is relevant to the specific event group
2010 */
2011static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *ena_dev,
2012						     u16 group)
2013{
2014	struct ena_aenq_handlers *aenq_handlers = ena_dev->aenq.aenq_handlers;
2015
2016	if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
2017		return aenq_handlers->handlers[group];
2018
2019	return aenq_handlers->unimplemented_handler;
2020}
2021
2022/* ena_aenq_intr_handler:
2023 * handles the aenq incoming events.
2024 * pop events from the queue and apply the specific handler
2025 */
2026void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
2027{
2028	struct ena_admin_aenq_entry *aenq_e;
2029	struct ena_admin_aenq_common_desc *aenq_common;
2030	struct ena_com_aenq *aenq  = &ena_dev->aenq;
2031	u64 timestamp;
2032	ena_aenq_handler handler_cb;
2033	u16 masked_head, processed = 0;
2034	u8 phase;
2035
2036	masked_head = aenq->head & (aenq->q_depth - 1);
2037	phase = aenq->phase;
2038	aenq_e = &aenq->entries[masked_head]; /* Get first entry */
2039	aenq_common = &aenq_e->aenq_common_desc;
2040
2041	/* Go over all the events */
2042	while ((READ_ONCE(aenq_common->flags) & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
 
2043		/* Make sure the phase bit (ownership) is as expected before
2044		 * reading the rest of the descriptor.
2045		 */
2046		dma_rmb();
2047
2048		timestamp = (u64)aenq_common->timestamp_low |
2049			((u64)aenq_common->timestamp_high << 32);
2050
2051		netdev_dbg(ena_dev->net_device, "AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n",
 
2052			   aenq_common->group, aenq_common->syndrome, timestamp);
2053
2054		/* Handle specific event*/
2055		handler_cb = ena_com_get_specific_aenq_cb(ena_dev,
2056							  aenq_common->group);
2057		handler_cb(data, aenq_e); /* call the actual event handler*/
2058
2059		/* Get next event entry */
2060		masked_head++;
2061		processed++;
2062
2063		if (unlikely(masked_head == aenq->q_depth)) {
2064			masked_head = 0;
2065			phase = !phase;
2066		}
2067		aenq_e = &aenq->entries[masked_head];
2068		aenq_common = &aenq_e->aenq_common_desc;
2069	}
2070
2071	aenq->head += processed;
2072	aenq->phase = phase;
2073
2074	/* Don't update aenq doorbell if there weren't any processed events */
2075	if (!processed)
2076		return;
2077
2078	/* write the aenq doorbell after all AENQ descriptors were read */
2079	mb();
2080	writel_relaxed((u32)aenq->head, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
 
2081}
2082
2083int ena_com_dev_reset(struct ena_com_dev *ena_dev,
2084		      enum ena_regs_reset_reason_types reset_reason)
2085{
2086	u32 stat, timeout, cap, reset_val;
2087	int rc;
2088
2089	stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
2090	cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
2091
2092	if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) || (cap == ENA_MMIO_READ_TIMEOUT))) {
 
2093		netdev_err(ena_dev->net_device, "Reg read32 timeout occurred\n");
2094		return -ETIME;
2095	}
2096
2097	if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
2098		netdev_err(ena_dev->net_device, "Device isn't ready, can't reset device\n");
 
2099		return -EINVAL;
2100	}
2101
2102	timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
2103			ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
2104	if (timeout == 0) {
2105		netdev_err(ena_dev->net_device, "Invalid timeout value\n");
2106		return -EINVAL;
2107	}
2108
2109	/* start reset */
2110	reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
2111	reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
2112		     ENA_REGS_DEV_CTL_RESET_REASON_MASK;
2113	writel(reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2114
2115	/* Write again the MMIO read request address */
2116	ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
2117
2118	rc = wait_for_reset_state(ena_dev, timeout,
2119				  ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
2120	if (rc != 0) {
2121		netdev_err(ena_dev->net_device, "Reset indication didn't turn on\n");
 
2122		return rc;
2123	}
2124
2125	/* reset done */
2126	writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2127	rc = wait_for_reset_state(ena_dev, timeout, 0);
2128	if (rc != 0) {
2129		netdev_err(ena_dev->net_device, "Reset indication didn't turn off\n");
 
2130		return rc;
2131	}
2132
2133	timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
2134		ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
2135	if (timeout)
2136		/* the resolution of timeout reg is 100ms */
2137		ena_dev->admin_queue.completion_timeout = timeout * 100000;
2138	else
2139		ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
2140
2141	return 0;
2142}
2143
2144int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
2145			  struct ena_admin_eni_stats *stats)
 
2146{
2147	struct ena_com_stats_ctx ctx;
 
 
2148	int ret;
2149
2150	if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) {
2151		netdev_err(ena_dev->net_device, "Capability %d isn't supported\n",
2152			   ENA_ADMIN_ENI_STATS);
2153		return -EOPNOTSUPP;
2154	}
2155
2156	memset(&ctx, 0x0, sizeof(ctx));
2157	ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENI);
2158	if (likely(ret == 0))
2159		memcpy(stats, &ctx.get_resp.u.eni_stats,
2160		       sizeof(ctx.get_resp.u.eni_stats));
 
 
 
 
 
 
 
 
2161
2162	return ret;
2163}
2164
2165int ena_com_get_ena_srd_info(struct ena_com_dev *ena_dev,
2166			     struct ena_admin_ena_srd_info *info)
2167{
2168	struct ena_com_stats_ctx ctx;
2169	int ret;
2170
2171	if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENA_SRD_INFO)) {
2172		netdev_err(ena_dev->net_device, "Capability %d isn't supported\n",
2173			   ENA_ADMIN_ENA_SRD_INFO);
 
2174		return -EOPNOTSUPP;
2175	}
2176
2177	memset(&ctx, 0x0, sizeof(ctx));
2178	ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENA_SRD);
2179	if (likely(ret == 0))
2180		memcpy(info, &ctx.get_resp.u.ena_srd_info,
2181		       sizeof(ctx.get_resp.u.ena_srd_info));
2182
2183	return ret;
2184}
2185
2186int ena_com_get_customer_metrics(struct ena_com_dev *ena_dev, char *buffer, u32 len)
 
2187{
2188	struct ena_admin_aq_get_stats_cmd *get_cmd;
2189	struct ena_com_stats_ctx ctx;
2190	int ret;
2191
2192	if (unlikely(len > ena_dev->customer_metrics.buffer_len)) {
2193		netdev_err(ena_dev->net_device,
2194			   "Invalid buffer size %u. The given buffer is too big.\n", len);
2195		return -EINVAL;
2196	}
2197
2198	if (!ena_com_get_cap(ena_dev, ENA_ADMIN_CUSTOMER_METRICS)) {
2199		netdev_err(ena_dev->net_device, "Capability %d not supported.\n",
2200			   ENA_ADMIN_CUSTOMER_METRICS);
2201		return -EOPNOTSUPP;
2202	}
2203
2204	if (!ena_dev->customer_metrics.supported_metrics) {
2205		netdev_err(ena_dev->net_device, "No supported customer metrics.\n");
2206		return -EOPNOTSUPP;
2207	}
2208
2209	get_cmd = &ctx.get_cmd;
2210	memset(&ctx, 0x0, sizeof(ctx));
2211	ret = ena_com_mem_addr_set(ena_dev,
2212				   &get_cmd->u.control_buffer.address,
2213				   ena_dev->customer_metrics.buffer_dma_addr);
2214	if (unlikely(ret)) {
2215		netdev_err(ena_dev->net_device, "Memory address set failed.\n");
2216		return ret;
2217	}
2218
2219	get_cmd->u.control_buffer.length = ena_dev->customer_metrics.buffer_len;
2220	get_cmd->requested_metrics = ena_dev->customer_metrics.supported_metrics;
2221	ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_CUSTOMER_METRICS);
2222	if (likely(ret == 0))
2223		memcpy(buffer, ena_dev->customer_metrics.buffer_virt_addr, len);
2224	else
2225		netdev_err(ena_dev->net_device, "Failed to get customer metrics. error: %d\n", ret);
2226
2227	return ret;
2228}
2229
2230int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu)
2231{
2232	struct ena_com_admin_queue *admin_queue;
2233	struct ena_admin_set_feat_cmd cmd;
2234	struct ena_admin_set_feat_resp resp;
2235	int ret;
2236
2237	if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
2238		netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", ENA_ADMIN_MTU);
 
2239		return -EOPNOTSUPP;
2240	}
2241
2242	memset(&cmd, 0x0, sizeof(cmd));
2243	admin_queue = &ena_dev->admin_queue;
2244
2245	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2246	cmd.aq_common_descriptor.flags = 0;
2247	cmd.feat_common.feature_id = ENA_ADMIN_MTU;
2248	cmd.u.mtu.mtu = mtu;
2249
2250	ret = ena_com_execute_admin_command(admin_queue,
2251					    (struct ena_admin_aq_entry *)&cmd,
2252					    sizeof(cmd),
2253					    (struct ena_admin_acq_entry *)&resp,
2254					    sizeof(resp));
2255
2256	if (unlikely(ret))
2257		netdev_err(ena_dev->net_device, "Failed to set mtu %d. error: %d\n", mtu, ret);
 
2258
2259	return ret;
2260}
2261
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2262int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
2263{
2264	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2265	struct ena_rss *rss = &ena_dev->rss;
2266	struct ena_admin_set_feat_cmd cmd;
2267	struct ena_admin_set_feat_resp resp;
2268	struct ena_admin_get_feat_resp get_resp;
2269	int ret;
2270
2271	if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION)) {
 
2272		netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
2273			   ENA_ADMIN_RSS_HASH_FUNCTION);
2274		return -EOPNOTSUPP;
2275	}
2276
2277	/* Validate hash function is supported */
2278	ret = ena_com_get_feature(ena_dev, &get_resp,
2279				  ENA_ADMIN_RSS_HASH_FUNCTION, 0);
2280	if (unlikely(ret))
2281		return ret;
2282
2283	if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
2284		netdev_err(ena_dev->net_device, "Func hash %d isn't supported by device, abort\n",
 
2285			   rss->hash_func);
2286		return -EOPNOTSUPP;
2287	}
2288
2289	memset(&cmd, 0x0, sizeof(cmd));
2290
2291	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2292	cmd.aq_common_descriptor.flags =
2293		ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2294	cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
2295	cmd.u.flow_hash_func.init_val = rss->hash_init_val;
2296	cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
2297
2298	ret = ena_com_mem_addr_set(ena_dev,
2299				   &cmd.control_buffer.address,
2300				   rss->hash_key_dma_addr);
2301	if (unlikely(ret)) {
2302		netdev_err(ena_dev->net_device, "Memory address set failed\n");
2303		return ret;
2304	}
2305
2306	cmd.control_buffer.length = sizeof(*rss->hash_key);
2307
2308	ret = ena_com_execute_admin_command(admin_queue,
2309					    (struct ena_admin_aq_entry *)&cmd,
2310					    sizeof(cmd),
2311					    (struct ena_admin_acq_entry *)&resp,
2312					    sizeof(resp));
2313	if (unlikely(ret)) {
2314		netdev_err(ena_dev->net_device, "Failed to set hash function %d. error: %d\n",
 
2315			   rss->hash_func, ret);
2316		return -EINVAL;
2317	}
2318
2319	return 0;
2320}
2321
2322int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
2323			       enum ena_admin_hash_functions func,
2324			       const u8 *key, u16 key_len, u32 init_val)
2325{
2326	struct ena_admin_feature_rss_flow_hash_control *hash_key;
2327	struct ena_admin_get_feat_resp get_resp;
2328	enum ena_admin_hash_functions old_func;
2329	struct ena_rss *rss = &ena_dev->rss;
2330	int rc;
2331
2332	hash_key = rss->hash_key;
2333
2334	/* Make sure size is a mult of DWs */
2335	if (unlikely(key_len & 0x3))
2336		return -EINVAL;
2337
2338	rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2339				    ENA_ADMIN_RSS_HASH_FUNCTION,
2340				    rss->hash_key_dma_addr,
2341				    sizeof(*rss->hash_key), 0);
2342	if (unlikely(rc))
2343		return rc;
2344
2345	if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) {
2346		netdev_err(ena_dev->net_device, "Flow hash function %d isn't supported\n", func);
 
2347		return -EOPNOTSUPP;
2348	}
2349
2350	if ((func == ENA_ADMIN_TOEPLITZ) && key) {
2351		if (key_len != sizeof(hash_key->key)) {
2352			netdev_err(ena_dev->net_device,
2353				   "key len (%u) doesn't equal the supported size (%zu)\n", key_len,
2354				   sizeof(hash_key->key));
2355			return -EINVAL;
2356		}
2357		memcpy(hash_key->key, key, key_len);
2358		hash_key->key_parts = key_len / sizeof(hash_key->key[0]);
2359	}
2360
2361	rss->hash_init_val = init_val;
2362	old_func = rss->hash_func;
2363	rss->hash_func = func;
2364	rc = ena_com_set_hash_function(ena_dev);
2365
2366	/* Restore the old function */
2367	if (unlikely(rc))
2368		rss->hash_func = old_func;
2369
2370	return rc;
2371}
2372
2373int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
2374			      enum ena_admin_hash_functions *func)
2375{
2376	struct ena_rss *rss = &ena_dev->rss;
2377	struct ena_admin_get_feat_resp get_resp;
2378	int rc;
2379
2380	if (unlikely(!func))
2381		return -EINVAL;
2382
2383	rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2384				    ENA_ADMIN_RSS_HASH_FUNCTION,
2385				    rss->hash_key_dma_addr,
2386				    sizeof(*rss->hash_key), 0);
2387	if (unlikely(rc))
2388		return rc;
2389
2390	/* ffs() returns 1 in case the lsb is set */
2391	rss->hash_func = ffs(get_resp.u.flow_hash_func.selected_func);
2392	if (rss->hash_func)
2393		rss->hash_func--;
2394
2395	*func = rss->hash_func;
2396
2397	return 0;
2398}
2399
2400int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key)
2401{
2402	struct ena_admin_feature_rss_flow_hash_control *hash_key =
2403		ena_dev->rss.hash_key;
2404
2405	if (key)
2406		memcpy(key, hash_key->key,
2407		       (size_t)(hash_key->key_parts) * sizeof(hash_key->key[0]));
2408
2409	return 0;
2410}
2411
2412int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
2413			  enum ena_admin_flow_hash_proto proto,
2414			  u16 *fields)
2415{
2416	struct ena_rss *rss = &ena_dev->rss;
2417	struct ena_admin_get_feat_resp get_resp;
2418	int rc;
2419
2420	rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2421				    ENA_ADMIN_RSS_HASH_INPUT,
2422				    rss->hash_ctrl_dma_addr,
2423				    sizeof(*rss->hash_ctrl), 0);
2424	if (unlikely(rc))
2425		return rc;
2426
2427	if (fields)
2428		*fields = rss->hash_ctrl->selected_fields[proto].fields;
2429
2430	return 0;
2431}
2432
2433int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
2434{
2435	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2436	struct ena_rss *rss = &ena_dev->rss;
2437	struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2438	struct ena_admin_set_feat_cmd cmd;
2439	struct ena_admin_set_feat_resp resp;
2440	int ret;
2441
2442	if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_INPUT)) {
 
2443		netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
2444			   ENA_ADMIN_RSS_HASH_INPUT);
2445		return -EOPNOTSUPP;
2446	}
2447
2448	memset(&cmd, 0x0, sizeof(cmd));
2449
2450	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2451	cmd.aq_common_descriptor.flags =
2452		ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2453	cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
2454	cmd.u.flow_hash_input.enabled_input_sort =
2455		ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
2456		ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
2457
2458	ret = ena_com_mem_addr_set(ena_dev,
2459				   &cmd.control_buffer.address,
2460				   rss->hash_ctrl_dma_addr);
2461	if (unlikely(ret)) {
2462		netdev_err(ena_dev->net_device, "Memory address set failed\n");
2463		return ret;
2464	}
2465	cmd.control_buffer.length = sizeof(*hash_ctrl);
2466
2467	ret = ena_com_execute_admin_command(admin_queue,
2468					    (struct ena_admin_aq_entry *)&cmd,
2469					    sizeof(cmd),
2470					    (struct ena_admin_acq_entry *)&resp,
2471					    sizeof(resp));
2472	if (unlikely(ret))
2473		netdev_err(ena_dev->net_device, "Failed to set hash input. error: %d\n", ret);
 
2474
2475	return ret;
2476}
2477
2478int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
2479{
2480	struct ena_rss *rss = &ena_dev->rss;
2481	struct ena_admin_feature_rss_hash_control *hash_ctrl =
2482		rss->hash_ctrl;
2483	u16 available_fields = 0;
2484	int rc, i;
2485
2486	/* Get the supported hash input */
2487	rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2488	if (unlikely(rc))
2489		return rc;
2490
2491	hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
2492		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2493		ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2494
2495	hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
2496		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2497		ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2498
2499	hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
2500		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2501		ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2502
2503	hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
2504		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2505		ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2506
2507	hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
2508		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2509
2510	hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
2511		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2512
2513	hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
2514		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2515
2516	hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
2517		ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
2518
2519	for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
2520		available_fields = hash_ctrl->selected_fields[i].fields &
2521				hash_ctrl->supported_fields[i].fields;
2522		if (available_fields != hash_ctrl->selected_fields[i].fields) {
2523			netdev_err(ena_dev->net_device,
2524				   "Hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2525				   i, hash_ctrl->supported_fields[i].fields,
2526				   hash_ctrl->selected_fields[i].fields);
2527			return -EOPNOTSUPP;
2528		}
2529	}
2530
2531	rc = ena_com_set_hash_ctrl(ena_dev);
2532
2533	/* In case of failure, restore the old hash ctrl */
2534	if (unlikely(rc))
2535		ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2536
2537	return rc;
2538}
2539
2540int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
2541			   enum ena_admin_flow_hash_proto proto,
2542			   u16 hash_fields)
2543{
2544	struct ena_rss *rss = &ena_dev->rss;
2545	struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2546	u16 supported_fields;
2547	int rc;
2548
2549	if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
2550		netdev_err(ena_dev->net_device, "Invalid proto num (%u)\n", proto);
 
2551		return -EINVAL;
2552	}
2553
2554	/* Get the ctrl table */
2555	rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
2556	if (unlikely(rc))
2557		return rc;
2558
2559	/* Make sure all the fields are supported */
2560	supported_fields = hash_ctrl->supported_fields[proto].fields;
2561	if ((hash_fields & supported_fields) != hash_fields) {
2562		netdev_err(ena_dev->net_device,
2563			   "Proto %d doesn't support the required fields %x. supports only: %x\n",
2564			   proto, hash_fields, supported_fields);
2565	}
2566
2567	hash_ctrl->selected_fields[proto].fields = hash_fields;
2568
2569	rc = ena_com_set_hash_ctrl(ena_dev);
2570
2571	/* In case of failure, restore the old hash ctrl */
2572	if (unlikely(rc))
2573		ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2574
2575	return 0;
2576}
2577
2578int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
2579				      u16 entry_idx, u16 entry_value)
2580{
2581	struct ena_rss *rss = &ena_dev->rss;
2582
2583	if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
2584		return -EINVAL;
2585
2586	if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
2587		return -EINVAL;
2588
2589	rss->host_rss_ind_tbl[entry_idx] = entry_value;
2590
2591	return 0;
2592}
2593
2594int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
2595{
2596	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2597	struct ena_rss *rss = &ena_dev->rss;
2598	struct ena_admin_set_feat_cmd cmd;
2599	struct ena_admin_set_feat_resp resp;
2600	int ret;
2601
2602	if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) {
 
2603		netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
2604			   ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG);
2605		return -EOPNOTSUPP;
2606	}
2607
2608	ret = ena_com_ind_tbl_convert_to_device(ena_dev);
2609	if (ret) {
2610		netdev_err(ena_dev->net_device,
2611			   "Failed to convert host indirection table to device table\n");
2612		return ret;
2613	}
2614
2615	memset(&cmd, 0x0, sizeof(cmd));
2616
2617	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2618	cmd.aq_common_descriptor.flags =
2619		ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2620	cmd.feat_common.feature_id = ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG;
2621	cmd.u.ind_table.size = rss->tbl_log_size;
2622	cmd.u.ind_table.inline_index = 0xFFFFFFFF;
2623
2624	ret = ena_com_mem_addr_set(ena_dev,
2625				   &cmd.control_buffer.address,
2626				   rss->rss_ind_tbl_dma_addr);
2627	if (unlikely(ret)) {
2628		netdev_err(ena_dev->net_device, "Memory address set failed\n");
2629		return ret;
2630	}
2631
2632	cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
2633		sizeof(struct ena_admin_rss_ind_table_entry);
2634
2635	ret = ena_com_execute_admin_command(admin_queue,
2636					    (struct ena_admin_aq_entry *)&cmd,
2637					    sizeof(cmd),
2638					    (struct ena_admin_acq_entry *)&resp,
2639					    sizeof(resp));
2640
2641	if (unlikely(ret))
2642		netdev_err(ena_dev->net_device, "Failed to set indirect table. error: %d\n", ret);
 
2643
2644	return ret;
2645}
2646
2647int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
2648{
2649	struct ena_rss *rss = &ena_dev->rss;
2650	struct ena_admin_get_feat_resp get_resp;
2651	u32 tbl_size;
2652	int i, rc;
2653
2654	tbl_size = (1ULL << rss->tbl_log_size) *
2655		sizeof(struct ena_admin_rss_ind_table_entry);
2656
2657	rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2658				    ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG,
2659				    rss->rss_ind_tbl_dma_addr,
2660				    tbl_size, 0);
2661	if (unlikely(rc))
2662		return rc;
2663
2664	if (!ind_tbl)
2665		return 0;
2666
2667	for (i = 0; i < (1 << rss->tbl_log_size); i++)
2668		ind_tbl[i] = rss->host_rss_ind_tbl[i];
2669
2670	return 0;
2671}
2672
2673int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
2674{
2675	int rc;
2676
2677	memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2678
2679	rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
2680	if (unlikely(rc))
2681		goto err_indr_tbl;
2682
2683	/* The following function might return unsupported in case the
2684	 * device doesn't support setting the key / hash function. We can safely
2685	 * ignore this error and have indirection table support only.
2686	 */
2687	rc = ena_com_hash_key_allocate(ena_dev);
2688	if (likely(!rc))
2689		ena_com_hash_key_fill_default_key(ena_dev);
2690	else if (rc != -EOPNOTSUPP)
2691		goto err_hash_key;
2692
2693	rc = ena_com_hash_ctrl_init(ena_dev);
2694	if (unlikely(rc))
2695		goto err_hash_ctrl;
2696
2697	return 0;
2698
2699err_hash_ctrl:
2700	ena_com_hash_key_destroy(ena_dev);
2701err_hash_key:
2702	ena_com_indirect_table_destroy(ena_dev);
2703err_indr_tbl:
2704
2705	return rc;
2706}
2707
2708void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
2709{
2710	ena_com_indirect_table_destroy(ena_dev);
2711	ena_com_hash_key_destroy(ena_dev);
2712	ena_com_hash_ctrl_destroy(ena_dev);
2713
2714	memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2715}
2716
2717int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
2718{
2719	struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2720
2721	host_attr->host_info = dma_alloc_coherent(ena_dev->dmadev, SZ_4K,
2722						  &host_attr->host_info_dma_addr, GFP_KERNEL);
 
2723	if (unlikely(!host_attr->host_info))
2724		return -ENOMEM;
2725
2726	host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR <<
2727		ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
2728		(ENA_COMMON_SPEC_VERSION_MINOR));
2729
2730	return 0;
2731}
2732
2733int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
2734				u32 debug_area_size)
2735{
2736	struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2737
2738	host_attr->debug_area_virt_addr =
2739		dma_alloc_coherent(ena_dev->dmadev, debug_area_size,
2740				   &host_attr->debug_area_dma_addr, GFP_KERNEL);
2741	if (unlikely(!host_attr->debug_area_virt_addr)) {
2742		host_attr->debug_area_size = 0;
2743		return -ENOMEM;
2744	}
2745
2746	host_attr->debug_area_size = debug_area_size;
2747
2748	return 0;
2749}
2750
2751int ena_com_allocate_customer_metrics_buffer(struct ena_com_dev *ena_dev)
2752{
2753	struct ena_customer_metrics *customer_metrics = &ena_dev->customer_metrics;
2754
2755	customer_metrics->buffer_len = ENA_CUSTOMER_METRICS_BUFFER_SIZE;
2756	customer_metrics->buffer_virt_addr = NULL;
2757
2758	customer_metrics->buffer_virt_addr =
2759		dma_alloc_coherent(ena_dev->dmadev, customer_metrics->buffer_len,
2760				   &customer_metrics->buffer_dma_addr, GFP_KERNEL);
2761	if (!customer_metrics->buffer_virt_addr) {
2762		customer_metrics->buffer_len = 0;
2763		return -ENOMEM;
2764	}
2765
2766	return 0;
2767}
2768
2769void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
2770{
2771	struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2772
2773	if (host_attr->host_info) {
2774		dma_free_coherent(ena_dev->dmadev, SZ_4K, host_attr->host_info,
2775				  host_attr->host_info_dma_addr);
2776		host_attr->host_info = NULL;
2777	}
2778}
2779
2780void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
2781{
2782	struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2783
2784	if (host_attr->debug_area_virt_addr) {
2785		dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size,
2786				  host_attr->debug_area_virt_addr, host_attr->debug_area_dma_addr);
 
2787		host_attr->debug_area_virt_addr = NULL;
2788	}
2789}
2790
2791void ena_com_delete_customer_metrics_buffer(struct ena_com_dev *ena_dev)
2792{
2793	struct ena_customer_metrics *customer_metrics = &ena_dev->customer_metrics;
2794
2795	if (customer_metrics->buffer_virt_addr) {
2796		dma_free_coherent(ena_dev->dmadev, customer_metrics->buffer_len,
2797				  customer_metrics->buffer_virt_addr,
2798				  customer_metrics->buffer_dma_addr);
2799		customer_metrics->buffer_virt_addr = NULL;
2800		customer_metrics->buffer_len = 0;
2801	}
2802}
2803
2804int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
2805{
2806	struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2807	struct ena_com_admin_queue *admin_queue;
2808	struct ena_admin_set_feat_cmd cmd;
2809	struct ena_admin_set_feat_resp resp;
2810
2811	int ret;
2812
2813	/* Host attribute config is called before ena_com_get_dev_attr_feat
2814	 * so ena_com can't check if the feature is supported.
2815	 */
2816
2817	memset(&cmd, 0x0, sizeof(cmd));
2818	admin_queue = &ena_dev->admin_queue;
2819
2820	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2821	cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
2822
2823	ret = ena_com_mem_addr_set(ena_dev,
2824				   &cmd.u.host_attr.debug_ba,
2825				   host_attr->debug_area_dma_addr);
2826	if (unlikely(ret)) {
2827		netdev_err(ena_dev->net_device, "Memory address set failed\n");
2828		return ret;
2829	}
2830
2831	ret = ena_com_mem_addr_set(ena_dev,
2832				   &cmd.u.host_attr.os_info_ba,
2833				   host_attr->host_info_dma_addr);
2834	if (unlikely(ret)) {
2835		netdev_err(ena_dev->net_device, "Memory address set failed\n");
2836		return ret;
2837	}
2838
2839	cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
2840
2841	ret = ena_com_execute_admin_command(admin_queue,
2842					    (struct ena_admin_aq_entry *)&cmd,
2843					    sizeof(cmd),
2844					    (struct ena_admin_acq_entry *)&resp,
2845					    sizeof(resp));
2846
2847	if (unlikely(ret))
2848		netdev_err(ena_dev->net_device, "Failed to set host attributes: %d\n", ret);
 
2849
2850	return ret;
2851}
2852
2853/* Interrupt moderation */
2854bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
2855{
2856	return ena_com_check_supported_feature_id(ena_dev,
2857						  ENA_ADMIN_INTERRUPT_MODERATION);
2858}
2859
2860static int ena_com_update_nonadaptive_moderation_interval(struct ena_com_dev *ena_dev,
2861							  u32 coalesce_usecs,
2862							  u32 intr_delay_resolution,
2863							  u32 *intr_moder_interval)
2864{
2865	if (!intr_delay_resolution) {
2866		netdev_err(ena_dev->net_device, "Illegal interrupt delay granularity value\n");
 
2867		return -EFAULT;
2868	}
2869
2870	*intr_moder_interval = coalesce_usecs / intr_delay_resolution;
2871
2872	return 0;
2873}
2874
2875int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
2876						      u32 tx_coalesce_usecs)
2877{
2878	return ena_com_update_nonadaptive_moderation_interval(ena_dev,
2879							      tx_coalesce_usecs,
2880							      ena_dev->intr_delay_resolution,
2881							      &ena_dev->intr_moder_tx_interval);
2882}
2883
2884int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
2885						      u32 rx_coalesce_usecs)
2886{
2887	return ena_com_update_nonadaptive_moderation_interval(ena_dev,
2888							      rx_coalesce_usecs,
2889							      ena_dev->intr_delay_resolution,
2890							      &ena_dev->intr_moder_rx_interval);
2891}
2892
2893int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
2894{
2895	struct ena_admin_get_feat_resp get_resp;
2896	u16 delay_resolution;
2897	int rc;
2898
2899	rc = ena_com_get_feature(ena_dev, &get_resp,
2900				 ENA_ADMIN_INTERRUPT_MODERATION, 0);
2901
2902	if (rc) {
2903		if (rc == -EOPNOTSUPP) {
2904			netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
 
2905				   ENA_ADMIN_INTERRUPT_MODERATION);
2906			rc = 0;
2907		} else {
2908			netdev_err(ena_dev->net_device,
2909				   "Failed to get interrupt moderation admin cmd. rc: %d\n", rc);
 
2910		}
2911
2912		/* no moderation supported, disable adaptive support */
2913		ena_com_disable_adaptive_moderation(ena_dev);
2914		return rc;
2915	}
2916
2917	/* if moderation is supported by device we set adaptive moderation */
2918	delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
2919	ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
2920
2921	/* Disable adaptive moderation by default - can be enabled later */
2922	ena_com_disable_adaptive_moderation(ena_dev);
2923
2924	return 0;
2925}
2926
2927unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
2928{
2929	return ena_dev->intr_moder_tx_interval;
2930}
2931
2932unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
2933{
2934	return ena_dev->intr_moder_rx_interval;
2935}
2936
2937int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
2938			    struct ena_admin_feature_llq_desc *llq_features,
2939			    struct ena_llq_configurations *llq_default_cfg)
2940{
2941	struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
2942	int rc;
2943
2944	if (!llq_features->max_llq_num) {
2945		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2946		return 0;
2947	}
2948
2949	rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg);
2950	if (rc)
2951		return rc;
2952
2953	ena_dev->tx_max_header_size = llq_info->desc_list_entry_size -
2954		(llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
2955
2956	if (unlikely(ena_dev->tx_max_header_size == 0)) {
2957		netdev_err(ena_dev->net_device, "The size of the LLQ entry is smaller than needed\n");
 
2958		return -EINVAL;
2959	}
2960
2961	ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
2962
2963	return 0;
2964}
v6.2
   1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
   2/*
   3 * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
   4 */
   5
   6#include "ena_com.h"
   7
   8/*****************************************************************************/
   9/*****************************************************************************/
  10
  11/* Timeout in micro-sec */
  12#define ADMIN_CMD_TIMEOUT_US (3000000)
  13
  14#define ENA_ASYNC_QUEUE_DEPTH 16
  15#define ENA_ADMIN_QUEUE_DEPTH 32
  16
  17
  18#define ENA_CTRL_MAJOR		0
  19#define ENA_CTRL_MINOR		0
  20#define ENA_CTRL_SUB_MINOR	1
  21
  22#define MIN_ENA_CTRL_VER \
  23	(((ENA_CTRL_MAJOR) << \
  24	(ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
  25	((ENA_CTRL_MINOR) << \
  26	(ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
  27	(ENA_CTRL_SUB_MINOR))
  28
  29#define ENA_DMA_ADDR_TO_UINT32_LOW(x)	((u32)((u64)(x)))
  30#define ENA_DMA_ADDR_TO_UINT32_HIGH(x)	((u32)(((u64)(x)) >> 32))
  31
  32#define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
  33
  34#define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT	4
  35
  36#define ENA_REGS_ADMIN_INTR_MASK 1
  37
 
 
  38#define ENA_MIN_ADMIN_POLL_US 100
  39
  40#define ENA_MAX_ADMIN_POLL_US 5000
  41
  42/*****************************************************************************/
  43/*****************************************************************************/
  44/*****************************************************************************/
  45
  46enum ena_cmd_status {
  47	ENA_CMD_SUBMITTED,
  48	ENA_CMD_COMPLETED,
  49	/* Abort - canceled by the driver */
  50	ENA_CMD_ABORTED,
  51};
  52
  53struct ena_comp_ctx {
  54	struct completion wait_event;
  55	struct ena_admin_acq_entry *user_cqe;
  56	u32 comp_size;
  57	enum ena_cmd_status status;
  58	/* status from the device */
  59	u8 comp_status;
  60	u8 cmd_opcode;
  61	bool occupied;
  62};
  63
  64struct ena_com_stats_ctx {
  65	struct ena_admin_aq_get_stats_cmd get_cmd;
  66	struct ena_admin_acq_get_stats_resp get_resp;
  67};
  68
  69static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
  70				       struct ena_common_mem_addr *ena_addr,
  71				       dma_addr_t addr)
  72{
  73	if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
  74		netdev_err(ena_dev->net_device,
  75			   "DMA address has more bits that the device supports\n");
  76		return -EINVAL;
  77	}
  78
  79	ena_addr->mem_addr_low = lower_32_bits(addr);
  80	ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
  81
  82	return 0;
  83}
  84
  85static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue)
  86{
  87	struct ena_com_dev *ena_dev = admin_queue->ena_dev;
  88	struct ena_com_admin_sq *sq = &admin_queue->sq;
  89	u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
  90
  91	sq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size,
  92					 &sq->dma_addr, GFP_KERNEL);
  93
  94	if (!sq->entries) {
  95		netdev_err(ena_dev->net_device, "Memory allocation failed\n");
  96		return -ENOMEM;
  97	}
  98
  99	sq->head = 0;
 100	sq->tail = 0;
 101	sq->phase = 1;
 102
 103	sq->db_addr = NULL;
 104
 105	return 0;
 106}
 107
 108static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue)
 109{
 110	struct ena_com_dev *ena_dev = admin_queue->ena_dev;
 111	struct ena_com_admin_cq *cq = &admin_queue->cq;
 112	u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
 113
 114	cq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size,
 115					 &cq->dma_addr, GFP_KERNEL);
 116
 117	if (!cq->entries) {
 118		netdev_err(ena_dev->net_device, "Memory allocation failed\n");
 119		return -ENOMEM;
 120	}
 121
 122	cq->head = 0;
 123	cq->phase = 1;
 124
 125	return 0;
 126}
 127
 128static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
 129				   struct ena_aenq_handlers *aenq_handlers)
 130{
 131	struct ena_com_aenq *aenq = &ena_dev->aenq;
 132	u32 addr_low, addr_high, aenq_caps;
 133	u16 size;
 134
 135	ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
 136	size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
 137	aenq->entries = dma_alloc_coherent(ena_dev->dmadev, size,
 138					   &aenq->dma_addr, GFP_KERNEL);
 139
 140	if (!aenq->entries) {
 141		netdev_err(ena_dev->net_device, "Memory allocation failed\n");
 142		return -ENOMEM;
 143	}
 144
 145	aenq->head = aenq->q_depth;
 146	aenq->phase = 1;
 147
 148	addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
 149	addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
 150
 151	writel(addr_low, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
 152	writel(addr_high, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
 153
 154	aenq_caps = 0;
 155	aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
 156	aenq_caps |= (sizeof(struct ena_admin_aenq_entry)
 157		      << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
 158		     ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
 159	writel(aenq_caps, ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
 160
 161	if (unlikely(!aenq_handlers)) {
 162		netdev_err(ena_dev->net_device,
 163			   "AENQ handlers pointer is NULL\n");
 164		return -EINVAL;
 165	}
 166
 167	aenq->aenq_handlers = aenq_handlers;
 168
 169	return 0;
 170}
 171
 172static void comp_ctxt_release(struct ena_com_admin_queue *queue,
 173				     struct ena_comp_ctx *comp_ctx)
 174{
 175	comp_ctx->occupied = false;
 176	atomic_dec(&queue->outstanding_cmds);
 177}
 178
 179static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *admin_queue,
 180					  u16 command_id, bool capture)
 181{
 182	if (unlikely(command_id >= admin_queue->q_depth)) {
 183		netdev_err(admin_queue->ena_dev->net_device,
 184			   "Command id is larger than the queue size. cmd_id: %u queue size %d\n",
 185			   command_id, admin_queue->q_depth);
 186		return NULL;
 187	}
 188
 189	if (unlikely(!admin_queue->comp_ctx)) {
 190		netdev_err(admin_queue->ena_dev->net_device,
 191			   "Completion context is NULL\n");
 192		return NULL;
 193	}
 194
 195	if (unlikely(admin_queue->comp_ctx[command_id].occupied && capture)) {
 196		netdev_err(admin_queue->ena_dev->net_device,
 197			   "Completion context is occupied\n");
 198		return NULL;
 199	}
 200
 201	if (capture) {
 202		atomic_inc(&admin_queue->outstanding_cmds);
 203		admin_queue->comp_ctx[command_id].occupied = true;
 204	}
 205
 206	return &admin_queue->comp_ctx[command_id];
 207}
 208
 209static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
 210						       struct ena_admin_aq_entry *cmd,
 211						       size_t cmd_size_in_bytes,
 212						       struct ena_admin_acq_entry *comp,
 213						       size_t comp_size_in_bytes)
 214{
 215	struct ena_comp_ctx *comp_ctx;
 216	u16 tail_masked, cmd_id;
 217	u16 queue_size_mask;
 218	u16 cnt;
 219
 220	queue_size_mask = admin_queue->q_depth - 1;
 221
 222	tail_masked = admin_queue->sq.tail & queue_size_mask;
 223
 224	/* In case of queue FULL */
 225	cnt = (u16)atomic_read(&admin_queue->outstanding_cmds);
 226	if (cnt >= admin_queue->q_depth) {
 227		netdev_dbg(admin_queue->ena_dev->net_device,
 228			   "Admin queue is full.\n");
 229		admin_queue->stats.out_of_space++;
 230		return ERR_PTR(-ENOSPC);
 231	}
 232
 233	cmd_id = admin_queue->curr_cmd_id;
 234
 235	cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
 236		ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
 237
 238	cmd->aq_common_descriptor.command_id |= cmd_id &
 239		ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
 240
 241	comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
 242	if (unlikely(!comp_ctx))
 243		return ERR_PTR(-EINVAL);
 244
 245	comp_ctx->status = ENA_CMD_SUBMITTED;
 246	comp_ctx->comp_size = (u32)comp_size_in_bytes;
 247	comp_ctx->user_cqe = comp;
 248	comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
 249
 250	reinit_completion(&comp_ctx->wait_event);
 251
 252	memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
 253
 254	admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
 255		queue_size_mask;
 256
 257	admin_queue->sq.tail++;
 258	admin_queue->stats.submitted_cmd++;
 259
 260	if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
 261		admin_queue->sq.phase = !admin_queue->sq.phase;
 262
 263	writel(admin_queue->sq.tail, admin_queue->sq.db_addr);
 264
 265	return comp_ctx;
 266}
 267
 268static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue)
 269{
 270	struct ena_com_dev *ena_dev = admin_queue->ena_dev;
 271	size_t size = admin_queue->q_depth * sizeof(struct ena_comp_ctx);
 272	struct ena_comp_ctx *comp_ctx;
 273	u16 i;
 274
 275	admin_queue->comp_ctx =
 276		devm_kzalloc(admin_queue->q_dmadev, size, GFP_KERNEL);
 277	if (unlikely(!admin_queue->comp_ctx)) {
 278		netdev_err(ena_dev->net_device, "Memory allocation failed\n");
 279		return -ENOMEM;
 280	}
 281
 282	for (i = 0; i < admin_queue->q_depth; i++) {
 283		comp_ctx = get_comp_ctxt(admin_queue, i, false);
 284		if (comp_ctx)
 285			init_completion(&comp_ctx->wait_event);
 286	}
 287
 288	return 0;
 289}
 290
 291static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
 292						     struct ena_admin_aq_entry *cmd,
 293						     size_t cmd_size_in_bytes,
 294						     struct ena_admin_acq_entry *comp,
 295						     size_t comp_size_in_bytes)
 296{
 297	unsigned long flags = 0;
 298	struct ena_comp_ctx *comp_ctx;
 299
 300	spin_lock_irqsave(&admin_queue->q_lock, flags);
 301	if (unlikely(!admin_queue->running_state)) {
 302		spin_unlock_irqrestore(&admin_queue->q_lock, flags);
 303		return ERR_PTR(-ENODEV);
 304	}
 305	comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
 306					      cmd_size_in_bytes,
 307					      comp,
 308					      comp_size_in_bytes);
 309	if (IS_ERR(comp_ctx))
 310		admin_queue->running_state = false;
 311	spin_unlock_irqrestore(&admin_queue->q_lock, flags);
 312
 313	return comp_ctx;
 314}
 315
 316static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
 317			      struct ena_com_create_io_ctx *ctx,
 318			      struct ena_com_io_sq *io_sq)
 319{
 320	size_t size;
 321	int dev_node = 0;
 322
 323	memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
 324
 325	io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits;
 326	io_sq->desc_entry_size =
 327		(io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
 328		sizeof(struct ena_eth_io_tx_desc) :
 329		sizeof(struct ena_eth_io_rx_desc);
 330
 331	size = io_sq->desc_entry_size * io_sq->q_depth;
 332
 333	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
 334		dev_node = dev_to_node(ena_dev->dmadev);
 335		set_dev_node(ena_dev->dmadev, ctx->numa_node);
 336		io_sq->desc_addr.virt_addr =
 337			dma_alloc_coherent(ena_dev->dmadev, size,
 338					   &io_sq->desc_addr.phys_addr,
 339					   GFP_KERNEL);
 340		set_dev_node(ena_dev->dmadev, dev_node);
 341		if (!io_sq->desc_addr.virt_addr) {
 342			io_sq->desc_addr.virt_addr =
 343				dma_alloc_coherent(ena_dev->dmadev, size,
 344						   &io_sq->desc_addr.phys_addr,
 345						   GFP_KERNEL);
 346		}
 347
 348		if (!io_sq->desc_addr.virt_addr) {
 349			netdev_err(ena_dev->net_device,
 350				   "Memory allocation failed\n");
 351			return -ENOMEM;
 352		}
 353	}
 354
 355	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
 356		/* Allocate bounce buffers */
 357		io_sq->bounce_buf_ctrl.buffer_size =
 358			ena_dev->llq_info.desc_list_entry_size;
 359		io_sq->bounce_buf_ctrl.buffers_num =
 360			ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
 361		io_sq->bounce_buf_ctrl.next_to_use = 0;
 362
 363		size = io_sq->bounce_buf_ctrl.buffer_size *
 364			io_sq->bounce_buf_ctrl.buffers_num;
 365
 366		dev_node = dev_to_node(ena_dev->dmadev);
 367		set_dev_node(ena_dev->dmadev, ctx->numa_node);
 368		io_sq->bounce_buf_ctrl.base_buffer =
 369			devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
 370		set_dev_node(ena_dev->dmadev, dev_node);
 371		if (!io_sq->bounce_buf_ctrl.base_buffer)
 372			io_sq->bounce_buf_ctrl.base_buffer =
 373				devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
 374
 375		if (!io_sq->bounce_buf_ctrl.base_buffer) {
 376			netdev_err(ena_dev->net_device,
 377				   "Bounce buffer memory allocation failed\n");
 378			return -ENOMEM;
 379		}
 380
 381		memcpy(&io_sq->llq_info, &ena_dev->llq_info,
 382		       sizeof(io_sq->llq_info));
 383
 384		/* Initiate the first bounce buffer */
 385		io_sq->llq_buf_ctrl.curr_bounce_buf =
 386			ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
 387		memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
 388		       0x0, io_sq->llq_info.desc_list_entry_size);
 389		io_sq->llq_buf_ctrl.descs_left_in_line =
 390			io_sq->llq_info.descs_num_before_header;
 391		io_sq->disable_meta_caching =
 392			io_sq->llq_info.disable_meta_caching;
 393
 394		if (io_sq->llq_info.max_entries_in_tx_burst > 0)
 395			io_sq->entries_in_tx_burst_left =
 396				io_sq->llq_info.max_entries_in_tx_burst;
 397	}
 398
 399	io_sq->tail = 0;
 400	io_sq->next_to_comp = 0;
 401	io_sq->phase = 1;
 402
 403	return 0;
 404}
 405
 406static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
 407			      struct ena_com_create_io_ctx *ctx,
 408			      struct ena_com_io_cq *io_cq)
 409{
 410	size_t size;
 411	int prev_node = 0;
 412
 413	memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
 414
 415	/* Use the basic completion descriptor for Rx */
 416	io_cq->cdesc_entry_size_in_bytes =
 417		(io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
 418		sizeof(struct ena_eth_io_tx_cdesc) :
 419		sizeof(struct ena_eth_io_rx_cdesc_base);
 420
 421	size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
 422
 423	prev_node = dev_to_node(ena_dev->dmadev);
 424	set_dev_node(ena_dev->dmadev, ctx->numa_node);
 425	io_cq->cdesc_addr.virt_addr =
 426		dma_alloc_coherent(ena_dev->dmadev, size,
 427				   &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
 428	set_dev_node(ena_dev->dmadev, prev_node);
 429	if (!io_cq->cdesc_addr.virt_addr) {
 430		io_cq->cdesc_addr.virt_addr =
 431			dma_alloc_coherent(ena_dev->dmadev, size,
 432					   &io_cq->cdesc_addr.phys_addr,
 433					   GFP_KERNEL);
 434	}
 435
 436	if (!io_cq->cdesc_addr.virt_addr) {
 437		netdev_err(ena_dev->net_device, "Memory allocation failed\n");
 438		return -ENOMEM;
 439	}
 440
 441	io_cq->phase = 1;
 442	io_cq->head = 0;
 443
 444	return 0;
 445}
 446
 447static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
 448						   struct ena_admin_acq_entry *cqe)
 449{
 450	struct ena_comp_ctx *comp_ctx;
 451	u16 cmd_id;
 452
 453	cmd_id = cqe->acq_common_descriptor.command &
 454		ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
 455
 456	comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
 457	if (unlikely(!comp_ctx)) {
 458		netdev_err(admin_queue->ena_dev->net_device,
 459			   "comp_ctx is NULL. Changing the admin queue running state\n");
 460		admin_queue->running_state = false;
 461		return;
 462	}
 463
 464	comp_ctx->status = ENA_CMD_COMPLETED;
 465	comp_ctx->comp_status = cqe->acq_common_descriptor.status;
 466
 467	if (comp_ctx->user_cqe)
 468		memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
 469
 470	if (!admin_queue->polling)
 471		complete(&comp_ctx->wait_event);
 472}
 473
 474static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
 475{
 476	struct ena_admin_acq_entry *cqe = NULL;
 477	u16 comp_num = 0;
 478	u16 head_masked;
 479	u8 phase;
 480
 481	head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
 482	phase = admin_queue->cq.phase;
 483
 484	cqe = &admin_queue->cq.entries[head_masked];
 485
 486	/* Go over all the completions */
 487	while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
 488		ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
 489		/* Do not read the rest of the completion entry before the
 490		 * phase bit was validated
 491		 */
 492		dma_rmb();
 493		ena_com_handle_single_admin_completion(admin_queue, cqe);
 494
 495		head_masked++;
 496		comp_num++;
 497		if (unlikely(head_masked == admin_queue->q_depth)) {
 498			head_masked = 0;
 499			phase = !phase;
 500		}
 501
 502		cqe = &admin_queue->cq.entries[head_masked];
 503	}
 504
 505	admin_queue->cq.head += comp_num;
 506	admin_queue->cq.phase = phase;
 507	admin_queue->sq.head += comp_num;
 508	admin_queue->stats.completed_cmd += comp_num;
 509}
 510
 511static int ena_com_comp_status_to_errno(struct ena_com_admin_queue *admin_queue,
 512					u8 comp_status)
 513{
 514	if (unlikely(comp_status != 0))
 515		netdev_err(admin_queue->ena_dev->net_device,
 516			   "Admin command failed[%u]\n", comp_status);
 517
 518	switch (comp_status) {
 519	case ENA_ADMIN_SUCCESS:
 520		return 0;
 521	case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
 522		return -ENOMEM;
 523	case ENA_ADMIN_UNSUPPORTED_OPCODE:
 524		return -EOPNOTSUPP;
 525	case ENA_ADMIN_BAD_OPCODE:
 526	case ENA_ADMIN_MALFORMED_REQUEST:
 527	case ENA_ADMIN_ILLEGAL_PARAMETER:
 528	case ENA_ADMIN_UNKNOWN_ERROR:
 529		return -EINVAL;
 530	case ENA_ADMIN_RESOURCE_BUSY:
 531		return -EAGAIN;
 532	}
 533
 534	return -EINVAL;
 535}
 536
 537static void ena_delay_exponential_backoff_us(u32 exp, u32 delay_us)
 538{
 
 539	delay_us = max_t(u32, ENA_MIN_ADMIN_POLL_US, delay_us);
 540	delay_us = min_t(u32, delay_us * (1U << exp), ENA_MAX_ADMIN_POLL_US);
 541	usleep_range(delay_us, 2 * delay_us);
 542}
 543
 544static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
 545						     struct ena_com_admin_queue *admin_queue)
 546{
 547	unsigned long flags = 0;
 548	unsigned long timeout;
 549	int ret;
 550	u32 exp = 0;
 551
 552	timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout);
 553
 554	while (1) {
 555		spin_lock_irqsave(&admin_queue->q_lock, flags);
 556		ena_com_handle_admin_completion(admin_queue);
 557		spin_unlock_irqrestore(&admin_queue->q_lock, flags);
 558
 559		if (comp_ctx->status != ENA_CMD_SUBMITTED)
 560			break;
 561
 562		if (time_is_before_jiffies(timeout)) {
 563			netdev_err(admin_queue->ena_dev->net_device,
 564				   "Wait for completion (polling) timeout\n");
 565			/* ENA didn't have any completion */
 566			spin_lock_irqsave(&admin_queue->q_lock, flags);
 567			admin_queue->stats.no_completion++;
 568			admin_queue->running_state = false;
 569			spin_unlock_irqrestore(&admin_queue->q_lock, flags);
 570
 571			ret = -ETIME;
 572			goto err;
 573		}
 574
 575		ena_delay_exponential_backoff_us(exp++,
 576						 admin_queue->ena_dev->ena_min_poll_delay_us);
 577	}
 578
 579	if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
 580		netdev_err(admin_queue->ena_dev->net_device,
 581			   "Command was aborted\n");
 582		spin_lock_irqsave(&admin_queue->q_lock, flags);
 583		admin_queue->stats.aborted_cmd++;
 584		spin_unlock_irqrestore(&admin_queue->q_lock, flags);
 585		ret = -ENODEV;
 586		goto err;
 587	}
 588
 589	WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n",
 590	     comp_ctx->status);
 591
 592	ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
 593err:
 594	comp_ctxt_release(admin_queue, comp_ctx);
 595	return ret;
 596}
 597
 598/*
 599 * Set the LLQ configurations of the firmware
 600 *
 601 * The driver provides only the enabled feature values to the device,
 602 * which in turn, checks if they are supported.
 603 */
 604static int ena_com_set_llq(struct ena_com_dev *ena_dev)
 605{
 606	struct ena_com_admin_queue *admin_queue;
 607	struct ena_admin_set_feat_cmd cmd;
 608	struct ena_admin_set_feat_resp resp;
 609	struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
 610	int ret;
 611
 612	memset(&cmd, 0x0, sizeof(cmd));
 613	admin_queue = &ena_dev->admin_queue;
 614
 615	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
 616	cmd.feat_common.feature_id = ENA_ADMIN_LLQ;
 617
 618	cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl;
 619	cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl;
 620	cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
 621	cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
 622
 623	cmd.u.llq.accel_mode.u.set.enabled_flags =
 624		BIT(ENA_ADMIN_DISABLE_META_CACHING) |
 625		BIT(ENA_ADMIN_LIMIT_TX_BURST);
 626
 627	ret = ena_com_execute_admin_command(admin_queue,
 628					    (struct ena_admin_aq_entry *)&cmd,
 629					    sizeof(cmd),
 630					    (struct ena_admin_acq_entry *)&resp,
 631					    sizeof(resp));
 632
 633	if (unlikely(ret))
 634		netdev_err(ena_dev->net_device,
 635			   "Failed to set LLQ configurations: %d\n", ret);
 636
 637	return ret;
 638}
 639
 640static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
 641				   struct ena_admin_feature_llq_desc *llq_features,
 642				   struct ena_llq_configurations *llq_default_cfg)
 643{
 644	struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
 645	struct ena_admin_accel_mode_get llq_accel_mode_get;
 646	u16 supported_feat;
 647	int rc;
 648
 649	memset(llq_info, 0, sizeof(*llq_info));
 650
 651	supported_feat = llq_features->header_location_ctrl_supported;
 652
 653	if (likely(supported_feat & llq_default_cfg->llq_header_location)) {
 654		llq_info->header_location_ctrl =
 655			llq_default_cfg->llq_header_location;
 656	} else {
 657		netdev_err(ena_dev->net_device,
 658			   "Invalid header location control, supported: 0x%x\n",
 659			   supported_feat);
 660		return -EINVAL;
 661	}
 662
 663	if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) {
 664		supported_feat = llq_features->descriptors_stride_ctrl_supported;
 665		if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) {
 666			llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl;
 667		} else	{
 668			if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) {
 669				llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
 670			} else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
 671				llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
 672			} else {
 673				netdev_err(ena_dev->net_device,
 674					   "Invalid desc_stride_ctrl, supported: 0x%x\n",
 675					   supported_feat);
 676				return -EINVAL;
 677			}
 678
 679			netdev_err(ena_dev->net_device,
 680				   "Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
 681				   llq_default_cfg->llq_stride_ctrl,
 682				   supported_feat, llq_info->desc_stride_ctrl);
 683		}
 684	} else {
 685		llq_info->desc_stride_ctrl = 0;
 686	}
 687
 688	supported_feat = llq_features->entry_size_ctrl_supported;
 689	if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) {
 690		llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size;
 691		llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value;
 692	} else {
 693		if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) {
 694			llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
 695			llq_info->desc_list_entry_size = 128;
 696		} else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) {
 697			llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B;
 698			llq_info->desc_list_entry_size = 192;
 699		} else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
 700			llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
 701			llq_info->desc_list_entry_size = 256;
 702		} else {
 703			netdev_err(ena_dev->net_device,
 704				   "Invalid entry_size_ctrl, supported: 0x%x\n",
 705				   supported_feat);
 706			return -EINVAL;
 707		}
 708
 709		netdev_err(ena_dev->net_device,
 710			   "Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
 711			   llq_default_cfg->llq_ring_entry_size, supported_feat,
 712			   llq_info->desc_list_entry_size);
 713	}
 714	if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
 715		/* The desc list entry size should be whole multiply of 8
 716		 * This requirement comes from __iowrite64_copy()
 717		 */
 718		netdev_err(ena_dev->net_device, "Illegal entry size %d\n",
 719			   llq_info->desc_list_entry_size);
 720		return -EINVAL;
 721	}
 722
 723	if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)
 724		llq_info->descs_per_entry = llq_info->desc_list_entry_size /
 725			sizeof(struct ena_eth_io_tx_desc);
 726	else
 727		llq_info->descs_per_entry = 1;
 728
 729	supported_feat = llq_features->desc_num_before_header_supported;
 730	if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) {
 731		llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header;
 732	} else {
 733		if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) {
 734			llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
 735		} else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) {
 736			llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1;
 737		} else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) {
 738			llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4;
 739		} else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
 740			llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
 741		} else {
 742			netdev_err(ena_dev->net_device,
 743				   "Invalid descs_num_before_header, supported: 0x%x\n",
 744				   supported_feat);
 745			return -EINVAL;
 746		}
 747
 748		netdev_err(ena_dev->net_device,
 749			   "Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
 750			   llq_default_cfg->llq_num_decs_before_header,
 751			   supported_feat, llq_info->descs_num_before_header);
 752	}
 753	/* Check for accelerated queue supported */
 754	llq_accel_mode_get = llq_features->accel_mode.u.get;
 755
 756	llq_info->disable_meta_caching =
 757		!!(llq_accel_mode_get.supported_flags &
 758		   BIT(ENA_ADMIN_DISABLE_META_CACHING));
 759
 760	if (llq_accel_mode_get.supported_flags & BIT(ENA_ADMIN_LIMIT_TX_BURST))
 761		llq_info->max_entries_in_tx_burst =
 762			llq_accel_mode_get.max_tx_burst_size /
 763			llq_default_cfg->llq_ring_entry_size_value;
 764
 765	rc = ena_com_set_llq(ena_dev);
 766	if (rc)
 767		netdev_err(ena_dev->net_device,
 768			   "Cannot set LLQ configuration: %d\n", rc);
 769
 770	return rc;
 771}
 772
 773static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
 774							struct ena_com_admin_queue *admin_queue)
 775{
 776	unsigned long flags = 0;
 777	int ret;
 778
 779	wait_for_completion_timeout(&comp_ctx->wait_event,
 780				    usecs_to_jiffies(
 781					    admin_queue->completion_timeout));
 782
 783	/* In case the command wasn't completed find out the root cause.
 784	 * There might be 2 kinds of errors
 785	 * 1) No completion (timeout reached)
 786	 * 2) There is completion but the device didn't get any msi-x interrupt.
 787	 */
 788	if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
 789		spin_lock_irqsave(&admin_queue->q_lock, flags);
 790		ena_com_handle_admin_completion(admin_queue);
 791		admin_queue->stats.no_completion++;
 792		spin_unlock_irqrestore(&admin_queue->q_lock, flags);
 793
 794		if (comp_ctx->status == ENA_CMD_COMPLETED) {
 795			netdev_err(admin_queue->ena_dev->net_device,
 796				   "The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
 797				   comp_ctx->cmd_opcode,
 798				   admin_queue->auto_polling ? "ON" : "OFF");
 799			/* Check if fallback to polling is enabled */
 800			if (admin_queue->auto_polling)
 801				admin_queue->polling = true;
 802		} else {
 803			netdev_err(admin_queue->ena_dev->net_device,
 804				   "The ena device didn't send a completion for the admin cmd %d status %d\n",
 805				   comp_ctx->cmd_opcode, comp_ctx->status);
 806		}
 807		/* Check if shifted to polling mode.
 808		 * This will happen if there is a completion without an interrupt
 809		 * and autopolling mode is enabled. Continuing normal execution in such case
 810		 */
 811		if (!admin_queue->polling) {
 812			admin_queue->running_state = false;
 813			ret = -ETIME;
 814			goto err;
 815		}
 816	}
 817
 818	ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
 819err:
 820	comp_ctxt_release(admin_queue, comp_ctx);
 821	return ret;
 822}
 823
 824/* This method read the hardware device register through posting writes
 825 * and waiting for response
 826 * On timeout the function will return ENA_MMIO_READ_TIMEOUT
 827 */
 828static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
 829{
 830	struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
 831	volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
 832		mmio_read->read_resp;
 833	u32 mmio_read_reg, ret, i;
 834	unsigned long flags = 0;
 835	u32 timeout = mmio_read->reg_read_to;
 836
 837	might_sleep();
 838
 839	if (timeout == 0)
 840		timeout = ENA_REG_READ_TIMEOUT;
 841
 842	/* If readless is disabled, perform regular read */
 843	if (!mmio_read->readless_supported)
 844		return readl(ena_dev->reg_bar + offset);
 845
 846	spin_lock_irqsave(&mmio_read->lock, flags);
 847	mmio_read->seq_num++;
 848
 849	read_resp->req_id = mmio_read->seq_num + 0xDEAD;
 850	mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
 851			ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
 852	mmio_read_reg |= mmio_read->seq_num &
 853			ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
 854
 855	writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
 856
 857	for (i = 0; i < timeout; i++) {
 858		if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
 859			break;
 860
 861		udelay(1);
 862	}
 863
 864	if (unlikely(i == timeout)) {
 865		netdev_err(ena_dev->net_device,
 866			   "Reading reg failed for timeout. expected: req id[%u] offset[%u] actual: req id[%u] offset[%u]\n",
 867			   mmio_read->seq_num, offset, read_resp->req_id,
 868			   read_resp->reg_off);
 869		ret = ENA_MMIO_READ_TIMEOUT;
 870		goto err;
 871	}
 872
 873	if (read_resp->reg_off != offset) {
 874		netdev_err(ena_dev->net_device,
 875			   "Read failure: wrong offset provided\n");
 876		ret = ENA_MMIO_READ_TIMEOUT;
 877	} else {
 878		ret = read_resp->reg_val;
 879	}
 880err:
 881	spin_unlock_irqrestore(&mmio_read->lock, flags);
 882
 883	return ret;
 884}
 885
 886/* There are two types to wait for completion.
 887 * Polling mode - wait until the completion is available.
 888 * Async mode - wait on wait queue until the completion is ready
 889 * (or the timeout expired).
 890 * It is expected that the IRQ called ena_com_handle_admin_completion
 891 * to mark the completions.
 892 */
 893static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
 894					     struct ena_com_admin_queue *admin_queue)
 895{
 896	if (admin_queue->polling)
 897		return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
 898								 admin_queue);
 899
 900	return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
 901							    admin_queue);
 902}
 903
 904static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
 905				 struct ena_com_io_sq *io_sq)
 906{
 907	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
 908	struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
 909	struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
 910	u8 direction;
 911	int ret;
 912
 913	memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
 914
 915	if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
 916		direction = ENA_ADMIN_SQ_DIRECTION_TX;
 917	else
 918		direction = ENA_ADMIN_SQ_DIRECTION_RX;
 919
 920	destroy_cmd.sq.sq_identity |= (direction <<
 921		ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
 922		ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
 923
 924	destroy_cmd.sq.sq_idx = io_sq->idx;
 925	destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
 926
 927	ret = ena_com_execute_admin_command(admin_queue,
 928					    (struct ena_admin_aq_entry *)&destroy_cmd,
 929					    sizeof(destroy_cmd),
 930					    (struct ena_admin_acq_entry *)&destroy_resp,
 931					    sizeof(destroy_resp));
 932
 933	if (unlikely(ret && (ret != -ENODEV)))
 934		netdev_err(ena_dev->net_device,
 935			   "Failed to destroy io sq error: %d\n", ret);
 936
 937	return ret;
 938}
 939
 940static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
 941				  struct ena_com_io_sq *io_sq,
 942				  struct ena_com_io_cq *io_cq)
 943{
 944	size_t size;
 945
 946	if (io_cq->cdesc_addr.virt_addr) {
 947		size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
 948
 949		dma_free_coherent(ena_dev->dmadev, size,
 950				  io_cq->cdesc_addr.virt_addr,
 951				  io_cq->cdesc_addr.phys_addr);
 952
 953		io_cq->cdesc_addr.virt_addr = NULL;
 954	}
 955
 956	if (io_sq->desc_addr.virt_addr) {
 957		size = io_sq->desc_entry_size * io_sq->q_depth;
 958
 959		dma_free_coherent(ena_dev->dmadev, size,
 960				  io_sq->desc_addr.virt_addr,
 961				  io_sq->desc_addr.phys_addr);
 962
 963		io_sq->desc_addr.virt_addr = NULL;
 964	}
 965
 966	if (io_sq->bounce_buf_ctrl.base_buffer) {
 967		devm_kfree(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer);
 968		io_sq->bounce_buf_ctrl.base_buffer = NULL;
 969	}
 970}
 971
 972static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
 973				u16 exp_state)
 974{
 975	u32 val, exp = 0;
 976	unsigned long timeout_stamp;
 977
 978	/* Convert timeout from resolution of 100ms to us resolution. */
 979	timeout_stamp = jiffies + usecs_to_jiffies(100 * 1000 * timeout);
 980
 981	while (1) {
 982		val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
 983
 984		if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
 985			netdev_err(ena_dev->net_device,
 986				   "Reg read timeout occurred\n");
 987			return -ETIME;
 988		}
 989
 990		if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
 991			exp_state)
 992			return 0;
 993
 994		if (time_is_before_jiffies(timeout_stamp))
 995			return -ETIME;
 996
 997		ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
 998	}
 999}
1000
1001static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
1002					       enum ena_admin_aq_feature_id feature_id)
1003{
1004	u32 feature_mask = 1 << feature_id;
1005
1006	/* Device attributes is always supported */
1007	if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
1008	    !(ena_dev->supported_features & feature_mask))
1009		return false;
1010
1011	return true;
1012}
1013
1014static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
1015				  struct ena_admin_get_feat_resp *get_resp,
1016				  enum ena_admin_aq_feature_id feature_id,
1017				  dma_addr_t control_buf_dma_addr,
1018				  u32 control_buff_size,
1019				  u8 feature_ver)
1020{
1021	struct ena_com_admin_queue *admin_queue;
1022	struct ena_admin_get_feat_cmd get_cmd;
1023	int ret;
1024
1025	if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
1026		netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
1027			   feature_id);
1028		return -EOPNOTSUPP;
1029	}
1030
1031	memset(&get_cmd, 0x0, sizeof(get_cmd));
1032	admin_queue = &ena_dev->admin_queue;
1033
1034	get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
1035
1036	if (control_buff_size)
1037		get_cmd.aq_common_descriptor.flags =
1038			ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
1039	else
1040		get_cmd.aq_common_descriptor.flags = 0;
1041
1042	ret = ena_com_mem_addr_set(ena_dev,
1043				   &get_cmd.control_buffer.address,
1044				   control_buf_dma_addr);
1045	if (unlikely(ret)) {
1046		netdev_err(ena_dev->net_device, "Memory address set failed\n");
1047		return ret;
1048	}
1049
1050	get_cmd.control_buffer.length = control_buff_size;
1051	get_cmd.feat_common.feature_version = feature_ver;
1052	get_cmd.feat_common.feature_id = feature_id;
1053
1054	ret = ena_com_execute_admin_command(admin_queue,
1055					    (struct ena_admin_aq_entry *)
1056					    &get_cmd,
1057					    sizeof(get_cmd),
1058					    (struct ena_admin_acq_entry *)
1059					    get_resp,
1060					    sizeof(*get_resp));
1061
1062	if (unlikely(ret))
1063		netdev_err(ena_dev->net_device,
1064			   "Failed to submit get_feature command %d error: %d\n",
1065			   feature_id, ret);
1066
1067	return ret;
1068}
1069
1070static int ena_com_get_feature(struct ena_com_dev *ena_dev,
1071			       struct ena_admin_get_feat_resp *get_resp,
1072			       enum ena_admin_aq_feature_id feature_id,
1073			       u8 feature_ver)
1074{
1075	return ena_com_get_feature_ex(ena_dev,
1076				      get_resp,
1077				      feature_id,
1078				      0,
1079				      0,
1080				      feature_ver);
1081}
1082
1083int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev)
1084{
1085	return ena_dev->rss.hash_func;
1086}
1087
1088static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
1089{
1090	struct ena_admin_feature_rss_flow_hash_control *hash_key =
1091		(ena_dev->rss).hash_key;
1092
1093	netdev_rss_key_fill(&hash_key->key, sizeof(hash_key->key));
1094	/* The key buffer is stored in the device in an array of
1095	 * uint32 elements.
1096	 */
1097	hash_key->key_parts = ENA_ADMIN_RSS_KEY_PARTS;
1098}
1099
1100static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
1101{
1102	struct ena_rss *rss = &ena_dev->rss;
1103
1104	if (!ena_com_check_supported_feature_id(ena_dev,
1105						ENA_ADMIN_RSS_HASH_FUNCTION))
1106		return -EOPNOTSUPP;
1107
1108	rss->hash_key =
1109		dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
1110				   &rss->hash_key_dma_addr, GFP_KERNEL);
1111
1112	if (unlikely(!rss->hash_key))
1113		return -ENOMEM;
1114
1115	return 0;
1116}
1117
1118static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
1119{
1120	struct ena_rss *rss = &ena_dev->rss;
1121
1122	if (rss->hash_key)
1123		dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
1124				  rss->hash_key, rss->hash_key_dma_addr);
1125	rss->hash_key = NULL;
1126}
1127
1128static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
1129{
1130	struct ena_rss *rss = &ena_dev->rss;
1131
1132	rss->hash_ctrl =
1133		dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
1134				   &rss->hash_ctrl_dma_addr, GFP_KERNEL);
1135
1136	if (unlikely(!rss->hash_ctrl))
1137		return -ENOMEM;
1138
1139	return 0;
1140}
1141
1142static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
1143{
1144	struct ena_rss *rss = &ena_dev->rss;
1145
1146	if (rss->hash_ctrl)
1147		dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
1148				  rss->hash_ctrl, rss->hash_ctrl_dma_addr);
1149	rss->hash_ctrl = NULL;
1150}
1151
1152static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
1153					   u16 log_size)
1154{
1155	struct ena_rss *rss = &ena_dev->rss;
1156	struct ena_admin_get_feat_resp get_resp;
1157	size_t tbl_size;
1158	int ret;
1159
1160	ret = ena_com_get_feature(ena_dev, &get_resp,
1161				  ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG, 0);
1162	if (unlikely(ret))
1163		return ret;
1164
1165	if ((get_resp.u.ind_table.min_size > log_size) ||
1166	    (get_resp.u.ind_table.max_size < log_size)) {
1167		netdev_err(ena_dev->net_device,
1168			   "Indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
1169			   1 << log_size, 1 << get_resp.u.ind_table.min_size,
1170			   1 << get_resp.u.ind_table.max_size);
1171		return -EINVAL;
1172	}
1173
1174	tbl_size = (1ULL << log_size) *
1175		sizeof(struct ena_admin_rss_ind_table_entry);
1176
1177	rss->rss_ind_tbl =
1178		dma_alloc_coherent(ena_dev->dmadev, tbl_size,
1179				   &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
1180	if (unlikely(!rss->rss_ind_tbl))
1181		goto mem_err1;
1182
1183	tbl_size = (1ULL << log_size) * sizeof(u16);
1184	rss->host_rss_ind_tbl =
1185		devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
1186	if (unlikely(!rss->host_rss_ind_tbl))
1187		goto mem_err2;
1188
1189	rss->tbl_log_size = log_size;
1190
1191	return 0;
1192
1193mem_err2:
1194	tbl_size = (1ULL << log_size) *
1195		sizeof(struct ena_admin_rss_ind_table_entry);
1196
1197	dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
1198			  rss->rss_ind_tbl_dma_addr);
1199	rss->rss_ind_tbl = NULL;
1200mem_err1:
1201	rss->tbl_log_size = 0;
1202	return -ENOMEM;
1203}
1204
1205static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
1206{
1207	struct ena_rss *rss = &ena_dev->rss;
1208	size_t tbl_size = (1ULL << rss->tbl_log_size) *
1209		sizeof(struct ena_admin_rss_ind_table_entry);
1210
1211	if (rss->rss_ind_tbl)
1212		dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
1213				  rss->rss_ind_tbl_dma_addr);
1214	rss->rss_ind_tbl = NULL;
1215
1216	if (rss->host_rss_ind_tbl)
1217		devm_kfree(ena_dev->dmadev, rss->host_rss_ind_tbl);
1218	rss->host_rss_ind_tbl = NULL;
1219}
1220
1221static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
1222				struct ena_com_io_sq *io_sq, u16 cq_idx)
1223{
1224	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1225	struct ena_admin_aq_create_sq_cmd create_cmd;
1226	struct ena_admin_acq_create_sq_resp_desc cmd_completion;
1227	u8 direction;
1228	int ret;
1229
1230	memset(&create_cmd, 0x0, sizeof(create_cmd));
1231
1232	create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
1233
1234	if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1235		direction = ENA_ADMIN_SQ_DIRECTION_TX;
1236	else
1237		direction = ENA_ADMIN_SQ_DIRECTION_RX;
1238
1239	create_cmd.sq_identity |= (direction <<
1240		ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
1241		ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
1242
1243	create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
1244		ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
1245
1246	create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
1247		ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
1248		ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
1249
1250	create_cmd.sq_caps_3 |=
1251		ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
1252
1253	create_cmd.cq_idx = cq_idx;
1254	create_cmd.sq_depth = io_sq->q_depth;
1255
1256	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
1257		ret = ena_com_mem_addr_set(ena_dev,
1258					   &create_cmd.sq_ba,
1259					   io_sq->desc_addr.phys_addr);
1260		if (unlikely(ret)) {
1261			netdev_err(ena_dev->net_device,
1262				   "Memory address set failed\n");
1263			return ret;
1264		}
1265	}
1266
1267	ret = ena_com_execute_admin_command(admin_queue,
1268					    (struct ena_admin_aq_entry *)&create_cmd,
1269					    sizeof(create_cmd),
1270					    (struct ena_admin_acq_entry *)&cmd_completion,
1271					    sizeof(cmd_completion));
1272	if (unlikely(ret)) {
1273		netdev_err(ena_dev->net_device,
1274			   "Failed to create IO SQ. error: %d\n", ret);
1275		return ret;
1276	}
1277
1278	io_sq->idx = cmd_completion.sq_idx;
1279
1280	io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1281		(uintptr_t)cmd_completion.sq_doorbell_offset);
1282
1283	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1284		io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
1285				+ cmd_completion.llq_headers_offset);
1286
1287		io_sq->desc_addr.pbuf_dev_addr =
1288			(u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
1289			cmd_completion.llq_descriptors_offset);
1290	}
1291
1292	netdev_dbg(ena_dev->net_device, "Created sq[%u], depth[%u]\n",
1293		   io_sq->idx, io_sq->q_depth);
1294
1295	return ret;
1296}
1297
1298static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1299{
1300	struct ena_rss *rss = &ena_dev->rss;
1301	struct ena_com_io_sq *io_sq;
1302	u16 qid;
1303	int i;
1304
1305	for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1306		qid = rss->host_rss_ind_tbl[i];
1307		if (qid >= ENA_TOTAL_NUM_QUEUES)
1308			return -EINVAL;
1309
1310		io_sq = &ena_dev->io_sq_queues[qid];
1311
1312		if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
1313			return -EINVAL;
1314
1315		rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
1316	}
1317
1318	return 0;
1319}
1320
1321static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1322						 u16 intr_delay_resolution)
1323{
1324	u16 prev_intr_delay_resolution = ena_dev->intr_delay_resolution;
1325
1326	if (unlikely(!intr_delay_resolution)) {
1327		netdev_err(ena_dev->net_device,
1328			   "Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1329		intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
1330	}
1331
1332	/* update Rx */
1333	ena_dev->intr_moder_rx_interval =
1334		ena_dev->intr_moder_rx_interval *
1335		prev_intr_delay_resolution /
1336		intr_delay_resolution;
1337
1338	/* update Tx */
1339	ena_dev->intr_moder_tx_interval =
1340		ena_dev->intr_moder_tx_interval *
1341		prev_intr_delay_resolution /
1342		intr_delay_resolution;
1343
1344	ena_dev->intr_delay_resolution = intr_delay_resolution;
1345}
1346
1347/*****************************************************************************/
1348/*******************************      API       ******************************/
1349/*****************************************************************************/
1350
1351int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
1352				  struct ena_admin_aq_entry *cmd,
1353				  size_t cmd_size,
1354				  struct ena_admin_acq_entry *comp,
1355				  size_t comp_size)
1356{
1357	struct ena_comp_ctx *comp_ctx;
1358	int ret;
1359
1360	comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
1361					    comp, comp_size);
1362	if (IS_ERR(comp_ctx)) {
1363		ret = PTR_ERR(comp_ctx);
1364		if (ret == -ENODEV)
1365			netdev_dbg(admin_queue->ena_dev->net_device,
1366				   "Failed to submit command [%d]\n", ret);
1367		else
1368			netdev_err(admin_queue->ena_dev->net_device,
1369				   "Failed to submit command [%d]\n", ret);
1370
1371		return ret;
1372	}
1373
1374	ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
1375	if (unlikely(ret)) {
1376		if (admin_queue->running_state)
1377			netdev_err(admin_queue->ena_dev->net_device,
1378				   "Failed to process command. ret = %d\n", ret);
1379		else
1380			netdev_dbg(admin_queue->ena_dev->net_device,
1381				   "Failed to process command. ret = %d\n", ret);
1382	}
1383	return ret;
1384}
1385
1386int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
1387			 struct ena_com_io_cq *io_cq)
1388{
1389	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1390	struct ena_admin_aq_create_cq_cmd create_cmd;
1391	struct ena_admin_acq_create_cq_resp_desc cmd_completion;
1392	int ret;
1393
1394	memset(&create_cmd, 0x0, sizeof(create_cmd));
1395
1396	create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
1397
1398	create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
1399		ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
1400	create_cmd.cq_caps_1 |=
1401		ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
1402
1403	create_cmd.msix_vector = io_cq->msix_vector;
1404	create_cmd.cq_depth = io_cq->q_depth;
1405
1406	ret = ena_com_mem_addr_set(ena_dev,
1407				   &create_cmd.cq_ba,
1408				   io_cq->cdesc_addr.phys_addr);
1409	if (unlikely(ret)) {
1410		netdev_err(ena_dev->net_device, "Memory address set failed\n");
1411		return ret;
1412	}
1413
1414	ret = ena_com_execute_admin_command(admin_queue,
1415					    (struct ena_admin_aq_entry *)&create_cmd,
1416					    sizeof(create_cmd),
1417					    (struct ena_admin_acq_entry *)&cmd_completion,
1418					    sizeof(cmd_completion));
1419	if (unlikely(ret)) {
1420		netdev_err(ena_dev->net_device,
1421			   "Failed to create IO CQ. error: %d\n", ret);
1422		return ret;
1423	}
1424
1425	io_cq->idx = cmd_completion.cq_idx;
1426
1427	io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1428		cmd_completion.cq_interrupt_unmask_register_offset);
1429
1430	if (cmd_completion.cq_head_db_register_offset)
1431		io_cq->cq_head_db_reg =
1432			(u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1433			cmd_completion.cq_head_db_register_offset);
1434
1435	if (cmd_completion.numa_node_register_offset)
1436		io_cq->numa_node_cfg_reg =
1437			(u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1438			cmd_completion.numa_node_register_offset);
1439
1440	netdev_dbg(ena_dev->net_device, "Created cq[%u], depth[%u]\n",
1441		   io_cq->idx, io_cq->q_depth);
1442
1443	return ret;
1444}
1445
1446int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
1447			    struct ena_com_io_sq **io_sq,
1448			    struct ena_com_io_cq **io_cq)
1449{
1450	if (qid >= ENA_TOTAL_NUM_QUEUES) {
1451		netdev_err(ena_dev->net_device,
1452			   "Invalid queue number %d but the max is %d\n", qid,
1453			   ENA_TOTAL_NUM_QUEUES);
1454		return -EINVAL;
1455	}
1456
1457	*io_sq = &ena_dev->io_sq_queues[qid];
1458	*io_cq = &ena_dev->io_cq_queues[qid];
1459
1460	return 0;
1461}
1462
1463void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
1464{
1465	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1466	struct ena_comp_ctx *comp_ctx;
1467	u16 i;
1468
1469	if (!admin_queue->comp_ctx)
1470		return;
1471
1472	for (i = 0; i < admin_queue->q_depth; i++) {
1473		comp_ctx = get_comp_ctxt(admin_queue, i, false);
1474		if (unlikely(!comp_ctx))
1475			break;
1476
1477		comp_ctx->status = ENA_CMD_ABORTED;
1478
1479		complete(&comp_ctx->wait_event);
1480	}
1481}
1482
1483void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
1484{
1485	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1486	unsigned long flags = 0;
1487	u32 exp = 0;
1488
1489	spin_lock_irqsave(&admin_queue->q_lock, flags);
1490	while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
1491		spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1492		ena_delay_exponential_backoff_us(exp++,
1493						 ena_dev->ena_min_poll_delay_us);
1494		spin_lock_irqsave(&admin_queue->q_lock, flags);
1495	}
1496	spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1497}
1498
1499int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
1500			  struct ena_com_io_cq *io_cq)
1501{
1502	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1503	struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
1504	struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
1505	int ret;
1506
1507	memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
1508
1509	destroy_cmd.cq_idx = io_cq->idx;
1510	destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
1511
1512	ret = ena_com_execute_admin_command(admin_queue,
1513					    (struct ena_admin_aq_entry *)&destroy_cmd,
1514					    sizeof(destroy_cmd),
1515					    (struct ena_admin_acq_entry *)&destroy_resp,
1516					    sizeof(destroy_resp));
1517
1518	if (unlikely(ret && (ret != -ENODEV)))
1519		netdev_err(ena_dev->net_device,
1520			   "Failed to destroy IO CQ. error: %d\n", ret);
1521
1522	return ret;
1523}
1524
1525bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
1526{
1527	return ena_dev->admin_queue.running_state;
1528}
1529
1530void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
1531{
1532	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1533	unsigned long flags = 0;
1534
1535	spin_lock_irqsave(&admin_queue->q_lock, flags);
1536	ena_dev->admin_queue.running_state = state;
1537	spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1538}
1539
1540void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
1541{
1542	u16 depth = ena_dev->aenq.q_depth;
1543
1544	WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
1545
1546	/* Init head_db to mark that all entries in the queue
1547	 * are initially available
1548	 */
1549	writel(depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1550}
1551
1552int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
1553{
1554	struct ena_com_admin_queue *admin_queue;
1555	struct ena_admin_set_feat_cmd cmd;
1556	struct ena_admin_set_feat_resp resp;
1557	struct ena_admin_get_feat_resp get_resp;
1558	int ret;
1559
1560	ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0);
1561	if (ret) {
1562		dev_info(ena_dev->dmadev, "Can't get aenq configuration\n");
1563		return ret;
1564	}
1565
1566	if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
1567		netdev_warn(ena_dev->net_device,
1568			    "Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
1569			    get_resp.u.aenq.supported_groups, groups_flag);
1570		return -EOPNOTSUPP;
1571	}
1572
1573	memset(&cmd, 0x0, sizeof(cmd));
1574	admin_queue = &ena_dev->admin_queue;
1575
1576	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1577	cmd.aq_common_descriptor.flags = 0;
1578	cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
1579	cmd.u.aenq.enabled_groups = groups_flag;
1580
1581	ret = ena_com_execute_admin_command(admin_queue,
1582					    (struct ena_admin_aq_entry *)&cmd,
1583					    sizeof(cmd),
1584					    (struct ena_admin_acq_entry *)&resp,
1585					    sizeof(resp));
1586
1587	if (unlikely(ret))
1588		netdev_err(ena_dev->net_device,
1589			   "Failed to config AENQ ret: %d\n", ret);
1590
1591	return ret;
1592}
1593
1594int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
1595{
1596	u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1597	u32 width;
1598
1599	if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
1600		netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
1601		return -ETIME;
1602	}
1603
1604	width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
1605		ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
1606
1607	netdev_dbg(ena_dev->net_device, "ENA dma width: %d\n", width);
1608
1609	if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
1610		netdev_err(ena_dev->net_device, "DMA width illegal value: %d\n",
1611			   width);
1612		return -EINVAL;
1613	}
1614
1615	ena_dev->dma_addr_bits = width;
1616
1617	return width;
1618}
1619
1620int ena_com_validate_version(struct ena_com_dev *ena_dev)
1621{
1622	u32 ver;
1623	u32 ctrl_ver;
1624	u32 ctrl_ver_masked;
1625
1626	/* Make sure the ENA version and the controller version are at least
1627	 * as the driver expects
1628	 */
1629	ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
1630	ctrl_ver = ena_com_reg_bar_read32(ena_dev,
1631					  ENA_REGS_CONTROLLER_VERSION_OFF);
1632
1633	if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
1634		     (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
1635		netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
1636		return -ETIME;
1637	}
1638
1639	dev_info(ena_dev->dmadev, "ENA device version: %d.%d\n",
1640		 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
1641			 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
1642		 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
1643
1644	dev_info(ena_dev->dmadev,
1645		 "ENA controller version: %d.%d.%d implementation version %d\n",
1646		 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
1647			 ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
1648		 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
1649			 ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
1650		 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
1651		 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
1652			 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
1653
1654	ctrl_ver_masked =
1655		(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
1656		(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
1657		(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
1658
1659	/* Validate the ctrl version without the implementation ID */
1660	if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
1661		netdev_err(ena_dev->net_device,
1662			   "ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1663		return -1;
1664	}
1665
1666	return 0;
1667}
1668
1669static void
1670ena_com_free_ena_admin_queue_comp_ctx(struct ena_com_dev *ena_dev,
1671				      struct ena_com_admin_queue *admin_queue)
1672
1673{
1674	if (!admin_queue->comp_ctx)
1675		return;
1676
1677	devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx);
1678
1679	admin_queue->comp_ctx = NULL;
1680}
1681
1682void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1683{
1684	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1685	struct ena_com_admin_cq *cq = &admin_queue->cq;
1686	struct ena_com_admin_sq *sq = &admin_queue->sq;
1687	struct ena_com_aenq *aenq = &ena_dev->aenq;
1688	u16 size;
1689
1690	ena_com_free_ena_admin_queue_comp_ctx(ena_dev, admin_queue);
1691
1692	size = ADMIN_SQ_SIZE(admin_queue->q_depth);
1693	if (sq->entries)
1694		dma_free_coherent(ena_dev->dmadev, size, sq->entries,
1695				  sq->dma_addr);
1696	sq->entries = NULL;
1697
1698	size = ADMIN_CQ_SIZE(admin_queue->q_depth);
1699	if (cq->entries)
1700		dma_free_coherent(ena_dev->dmadev, size, cq->entries,
1701				  cq->dma_addr);
1702	cq->entries = NULL;
1703
1704	size = ADMIN_AENQ_SIZE(aenq->q_depth);
1705	if (ena_dev->aenq.entries)
1706		dma_free_coherent(ena_dev->dmadev, size, aenq->entries,
1707				  aenq->dma_addr);
1708	aenq->entries = NULL;
1709}
1710
1711void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1712{
1713	u32 mask_value = 0;
1714
1715	if (polling)
1716		mask_value = ENA_REGS_ADMIN_INTR_MASK;
1717
1718	writel(mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
1719	ena_dev->admin_queue.polling = polling;
1720}
1721
1722void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
1723					 bool polling)
1724{
1725	ena_dev->admin_queue.auto_polling = polling;
1726}
1727
1728int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1729{
1730	struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1731
1732	spin_lock_init(&mmio_read->lock);
1733	mmio_read->read_resp =
1734		dma_alloc_coherent(ena_dev->dmadev,
1735				   sizeof(*mmio_read->read_resp),
1736				   &mmio_read->read_resp_dma_addr, GFP_KERNEL);
1737	if (unlikely(!mmio_read->read_resp))
1738		goto err;
1739
1740	ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1741
1742	mmio_read->read_resp->req_id = 0x0;
1743	mmio_read->seq_num = 0x0;
1744	mmio_read->readless_supported = true;
1745
1746	return 0;
1747
1748err:
1749
1750	return -ENOMEM;
1751}
1752
1753void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
1754{
1755	struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1756
1757	mmio_read->readless_supported = readless_supported;
1758}
1759
1760void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
1761{
1762	struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1763
1764	writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1765	writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1766
1767	dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
1768			  mmio_read->read_resp, mmio_read->read_resp_dma_addr);
1769
1770	mmio_read->read_resp = NULL;
1771}
1772
1773void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
1774{
1775	struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1776	u32 addr_low, addr_high;
1777
1778	addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
1779	addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
1780
1781	writel(addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1782	writel(addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1783}
1784
1785int ena_com_admin_init(struct ena_com_dev *ena_dev,
1786		       struct ena_aenq_handlers *aenq_handlers)
1787{
1788	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1789	u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
1790	int ret;
1791
1792	dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1793
1794	if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
1795		netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
1796		return -ETIME;
1797	}
1798
1799	if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
1800		netdev_err(ena_dev->net_device,
1801			   "Device isn't ready, abort com init\n");
1802		return -ENODEV;
1803	}
1804
1805	admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
1806
1807	admin_queue->q_dmadev = ena_dev->dmadev;
1808	admin_queue->polling = false;
1809	admin_queue->curr_cmd_id = 0;
1810
1811	atomic_set(&admin_queue->outstanding_cmds, 0);
1812
1813	spin_lock_init(&admin_queue->q_lock);
1814
1815	ret = ena_com_init_comp_ctxt(admin_queue);
1816	if (ret)
1817		goto error;
1818
1819	ret = ena_com_admin_init_sq(admin_queue);
1820	if (ret)
1821		goto error;
1822
1823	ret = ena_com_admin_init_cq(admin_queue);
1824	if (ret)
1825		goto error;
1826
1827	admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1828		ENA_REGS_AQ_DB_OFF);
1829
1830	addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
1831	addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
1832
1833	writel(addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
1834	writel(addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
1835
1836	addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
1837	addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
1838
1839	writel(addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
1840	writel(addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
1841
1842	aq_caps = 0;
1843	aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
1844	aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
1845			ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
1846			ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
1847
1848	acq_caps = 0;
1849	acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
1850	acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
1851		ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
1852		ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
1853
1854	writel(aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
1855	writel(acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
1856	ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
1857	if (ret)
1858		goto error;
1859
1860	admin_queue->ena_dev = ena_dev;
1861	admin_queue->running_state = true;
1862
1863	return 0;
1864error:
1865	ena_com_admin_destroy(ena_dev);
1866
1867	return ret;
1868}
1869
1870int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
1871			    struct ena_com_create_io_ctx *ctx)
1872{
1873	struct ena_com_io_sq *io_sq;
1874	struct ena_com_io_cq *io_cq;
1875	int ret;
1876
1877	if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
1878		netdev_err(ena_dev->net_device,
1879			   "Qid (%d) is bigger than max num of queues (%d)\n",
1880			   ctx->qid, ENA_TOTAL_NUM_QUEUES);
1881		return -EINVAL;
1882	}
1883
1884	io_sq = &ena_dev->io_sq_queues[ctx->qid];
1885	io_cq = &ena_dev->io_cq_queues[ctx->qid];
1886
1887	memset(io_sq, 0x0, sizeof(*io_sq));
1888	memset(io_cq, 0x0, sizeof(*io_cq));
1889
1890	/* Init CQ */
1891	io_cq->q_depth = ctx->queue_size;
1892	io_cq->direction = ctx->direction;
1893	io_cq->qid = ctx->qid;
1894
1895	io_cq->msix_vector = ctx->msix_vector;
1896
1897	io_sq->q_depth = ctx->queue_size;
1898	io_sq->direction = ctx->direction;
1899	io_sq->qid = ctx->qid;
1900
1901	io_sq->mem_queue_type = ctx->mem_queue_type;
1902
1903	if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1904		/* header length is limited to 8 bits */
1905		io_sq->tx_max_header_size =
1906			min_t(u32, ena_dev->tx_max_header_size, SZ_256);
1907
1908	ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
1909	if (ret)
1910		goto error;
1911	ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
1912	if (ret)
1913		goto error;
1914
1915	ret = ena_com_create_io_cq(ena_dev, io_cq);
1916	if (ret)
1917		goto error;
1918
1919	ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
1920	if (ret)
1921		goto destroy_io_cq;
1922
1923	return 0;
1924
1925destroy_io_cq:
1926	ena_com_destroy_io_cq(ena_dev, io_cq);
1927error:
1928	ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1929	return ret;
1930}
1931
1932void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
1933{
1934	struct ena_com_io_sq *io_sq;
1935	struct ena_com_io_cq *io_cq;
1936
1937	if (qid >= ENA_TOTAL_NUM_QUEUES) {
1938		netdev_err(ena_dev->net_device,
1939			   "Qid (%d) is bigger than max num of queues (%d)\n",
1940			   qid, ENA_TOTAL_NUM_QUEUES);
1941		return;
1942	}
1943
1944	io_sq = &ena_dev->io_sq_queues[qid];
1945	io_cq = &ena_dev->io_cq_queues[qid];
1946
1947	ena_com_destroy_io_sq(ena_dev, io_sq);
1948	ena_com_destroy_io_cq(ena_dev, io_cq);
1949
1950	ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1951}
1952
1953int ena_com_get_link_params(struct ena_com_dev *ena_dev,
1954			    struct ena_admin_get_feat_resp *resp)
1955{
1956	return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0);
1957}
1958
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1959int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
1960			      struct ena_com_dev_get_features_ctx *get_feat_ctx)
1961{
1962	struct ena_admin_get_feat_resp get_resp;
1963	int rc;
1964
1965	rc = ena_com_get_feature(ena_dev, &get_resp,
1966				 ENA_ADMIN_DEVICE_ATTRIBUTES, 0);
1967	if (rc)
1968		return rc;
1969
1970	memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
1971	       sizeof(get_resp.u.dev_attr));
1972
1973	ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
1974	ena_dev->capabilities = get_resp.u.dev_attr.capabilities;
1975
1976	if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
1977		rc = ena_com_get_feature(ena_dev, &get_resp,
1978					 ENA_ADMIN_MAX_QUEUES_EXT,
1979					 ENA_FEATURE_MAX_QUEUE_EXT_VER);
1980		if (rc)
1981			return rc;
1982
1983		if (get_resp.u.max_queue_ext.version !=
1984		    ENA_FEATURE_MAX_QUEUE_EXT_VER)
1985			return -EINVAL;
1986
1987		memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
1988		       sizeof(get_resp.u.max_queue_ext));
1989		ena_dev->tx_max_header_size =
1990			get_resp.u.max_queue_ext.max_queue_ext.max_tx_header_size;
1991	} else {
1992		rc = ena_com_get_feature(ena_dev, &get_resp,
1993					 ENA_ADMIN_MAX_QUEUES_NUM, 0);
1994		memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
1995		       sizeof(get_resp.u.max_queue));
1996		ena_dev->tx_max_header_size =
1997			get_resp.u.max_queue.max_header_size;
1998
1999		if (rc)
2000			return rc;
2001	}
2002
2003	rc = ena_com_get_feature(ena_dev, &get_resp,
2004				 ENA_ADMIN_AENQ_CONFIG, 0);
2005	if (rc)
2006		return rc;
2007
2008	memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
2009	       sizeof(get_resp.u.aenq));
2010
2011	rc = ena_com_get_feature(ena_dev, &get_resp,
2012				 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
2013	if (rc)
2014		return rc;
2015
2016	memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
2017	       sizeof(get_resp.u.offload));
2018
2019	/* Driver hints isn't mandatory admin command. So in case the
2020	 * command isn't supported set driver hints to 0
2021	 */
2022	rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0);
2023
2024	if (!rc)
2025		memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
2026		       sizeof(get_resp.u.hw_hints));
2027	else if (rc == -EOPNOTSUPP)
2028		memset(&get_feat_ctx->hw_hints, 0x0,
2029		       sizeof(get_feat_ctx->hw_hints));
2030	else
2031		return rc;
2032
2033	rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0);
2034	if (!rc)
2035		memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
2036		       sizeof(get_resp.u.llq));
2037	else if (rc == -EOPNOTSUPP)
2038		memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
2039	else
2040		return rc;
2041
 
 
2042	return 0;
2043}
2044
2045void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
2046{
2047	ena_com_handle_admin_completion(&ena_dev->admin_queue);
2048}
2049
2050/* ena_handle_specific_aenq_event:
2051 * return the handler that is relevant to the specific event group
2052 */
2053static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *ena_dev,
2054						     u16 group)
2055{
2056	struct ena_aenq_handlers *aenq_handlers = ena_dev->aenq.aenq_handlers;
2057
2058	if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
2059		return aenq_handlers->handlers[group];
2060
2061	return aenq_handlers->unimplemented_handler;
2062}
2063
2064/* ena_aenq_intr_handler:
2065 * handles the aenq incoming events.
2066 * pop events from the queue and apply the specific handler
2067 */
2068void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
2069{
2070	struct ena_admin_aenq_entry *aenq_e;
2071	struct ena_admin_aenq_common_desc *aenq_common;
2072	struct ena_com_aenq *aenq  = &ena_dev->aenq;
2073	u64 timestamp;
2074	ena_aenq_handler handler_cb;
2075	u16 masked_head, processed = 0;
2076	u8 phase;
2077
2078	masked_head = aenq->head & (aenq->q_depth - 1);
2079	phase = aenq->phase;
2080	aenq_e = &aenq->entries[masked_head]; /* Get first entry */
2081	aenq_common = &aenq_e->aenq_common_desc;
2082
2083	/* Go over all the events */
2084	while ((READ_ONCE(aenq_common->flags) &
2085		ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
2086		/* Make sure the phase bit (ownership) is as expected before
2087		 * reading the rest of the descriptor.
2088		 */
2089		dma_rmb();
2090
2091		timestamp = (u64)aenq_common->timestamp_low |
2092			((u64)aenq_common->timestamp_high << 32);
2093
2094		netdev_dbg(ena_dev->net_device,
2095			   "AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n",
2096			   aenq_common->group, aenq_common->syndrome, timestamp);
2097
2098		/* Handle specific event*/
2099		handler_cb = ena_com_get_specific_aenq_cb(ena_dev,
2100							  aenq_common->group);
2101		handler_cb(data, aenq_e); /* call the actual event handler*/
2102
2103		/* Get next event entry */
2104		masked_head++;
2105		processed++;
2106
2107		if (unlikely(masked_head == aenq->q_depth)) {
2108			masked_head = 0;
2109			phase = !phase;
2110		}
2111		aenq_e = &aenq->entries[masked_head];
2112		aenq_common = &aenq_e->aenq_common_desc;
2113	}
2114
2115	aenq->head += processed;
2116	aenq->phase = phase;
2117
2118	/* Don't update aenq doorbell if there weren't any processed events */
2119	if (!processed)
2120		return;
2121
2122	/* write the aenq doorbell after all AENQ descriptors were read */
2123	mb();
2124	writel_relaxed((u32)aenq->head,
2125		       ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
2126}
2127
2128int ena_com_dev_reset(struct ena_com_dev *ena_dev,
2129		      enum ena_regs_reset_reason_types reset_reason)
2130{
2131	u32 stat, timeout, cap, reset_val;
2132	int rc;
2133
2134	stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
2135	cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
2136
2137	if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
2138		     (cap == ENA_MMIO_READ_TIMEOUT))) {
2139		netdev_err(ena_dev->net_device, "Reg read32 timeout occurred\n");
2140		return -ETIME;
2141	}
2142
2143	if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
2144		netdev_err(ena_dev->net_device,
2145			   "Device isn't ready, can't reset device\n");
2146		return -EINVAL;
2147	}
2148
2149	timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
2150			ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
2151	if (timeout == 0) {
2152		netdev_err(ena_dev->net_device, "Invalid timeout value\n");
2153		return -EINVAL;
2154	}
2155
2156	/* start reset */
2157	reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
2158	reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
2159		     ENA_REGS_DEV_CTL_RESET_REASON_MASK;
2160	writel(reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2161
2162	/* Write again the MMIO read request address */
2163	ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
2164
2165	rc = wait_for_reset_state(ena_dev, timeout,
2166				  ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
2167	if (rc != 0) {
2168		netdev_err(ena_dev->net_device,
2169			   "Reset indication didn't turn on\n");
2170		return rc;
2171	}
2172
2173	/* reset done */
2174	writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2175	rc = wait_for_reset_state(ena_dev, timeout, 0);
2176	if (rc != 0) {
2177		netdev_err(ena_dev->net_device,
2178			   "Reset indication didn't turn off\n");
2179		return rc;
2180	}
2181
2182	timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
2183		ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
2184	if (timeout)
2185		/* the resolution of timeout reg is 100ms */
2186		ena_dev->admin_queue.completion_timeout = timeout * 100000;
2187	else
2188		ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
2189
2190	return 0;
2191}
2192
2193static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
2194			     struct ena_com_stats_ctx *ctx,
2195			     enum ena_admin_get_stats_type type)
2196{
2197	struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
2198	struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
2199	struct ena_com_admin_queue *admin_queue;
2200	int ret;
2201
2202	admin_queue = &ena_dev->admin_queue;
 
 
 
 
2203
2204	get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
2205	get_cmd->aq_common_descriptor.flags = 0;
2206	get_cmd->type = type;
2207
2208	ret =  ena_com_execute_admin_command(admin_queue,
2209					     (struct ena_admin_aq_entry *)get_cmd,
2210					     sizeof(*get_cmd),
2211					     (struct ena_admin_acq_entry *)get_resp,
2212					     sizeof(*get_resp));
2213
2214	if (unlikely(ret))
2215		netdev_err(ena_dev->net_device,
2216			   "Failed to get stats. error: %d\n", ret);
2217
2218	return ret;
2219}
2220
2221int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
2222			  struct ena_admin_eni_stats *stats)
2223{
2224	struct ena_com_stats_ctx ctx;
2225	int ret;
2226
2227	if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) {
2228		netdev_err(ena_dev->net_device,
2229			   "Capability %d isn't supported\n",
2230			   ENA_ADMIN_ENI_STATS);
2231		return -EOPNOTSUPP;
2232	}
2233
2234	memset(&ctx, 0x0, sizeof(ctx));
2235	ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENI);
2236	if (likely(ret == 0))
2237		memcpy(stats, &ctx.get_resp.u.eni_stats,
2238		       sizeof(ctx.get_resp.u.eni_stats));
2239
2240	return ret;
2241}
2242
2243int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
2244				struct ena_admin_basic_stats *stats)
2245{
 
2246	struct ena_com_stats_ctx ctx;
2247	int ret;
2248
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2249	memset(&ctx, 0x0, sizeof(ctx));
2250	ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
 
 
 
 
 
 
 
 
 
 
2251	if (likely(ret == 0))
2252		memcpy(stats, &ctx.get_resp.u.basic_stats,
2253		       sizeof(ctx.get_resp.u.basic_stats));
 
2254
2255	return ret;
2256}
2257
2258int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu)
2259{
2260	struct ena_com_admin_queue *admin_queue;
2261	struct ena_admin_set_feat_cmd cmd;
2262	struct ena_admin_set_feat_resp resp;
2263	int ret;
2264
2265	if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
2266		netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
2267			   ENA_ADMIN_MTU);
2268		return -EOPNOTSUPP;
2269	}
2270
2271	memset(&cmd, 0x0, sizeof(cmd));
2272	admin_queue = &ena_dev->admin_queue;
2273
2274	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2275	cmd.aq_common_descriptor.flags = 0;
2276	cmd.feat_common.feature_id = ENA_ADMIN_MTU;
2277	cmd.u.mtu.mtu = mtu;
2278
2279	ret = ena_com_execute_admin_command(admin_queue,
2280					    (struct ena_admin_aq_entry *)&cmd,
2281					    sizeof(cmd),
2282					    (struct ena_admin_acq_entry *)&resp,
2283					    sizeof(resp));
2284
2285	if (unlikely(ret))
2286		netdev_err(ena_dev->net_device,
2287			   "Failed to set mtu %d. error: %d\n", mtu, ret);
2288
2289	return ret;
2290}
2291
2292int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
2293				 struct ena_admin_feature_offload_desc *offload)
2294{
2295	int ret;
2296	struct ena_admin_get_feat_resp resp;
2297
2298	ret = ena_com_get_feature(ena_dev, &resp,
2299				  ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
2300	if (unlikely(ret)) {
2301		netdev_err(ena_dev->net_device,
2302			   "Failed to get offload capabilities %d\n", ret);
2303		return ret;
2304	}
2305
2306	memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
2307
2308	return 0;
2309}
2310
2311int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
2312{
2313	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2314	struct ena_rss *rss = &ena_dev->rss;
2315	struct ena_admin_set_feat_cmd cmd;
2316	struct ena_admin_set_feat_resp resp;
2317	struct ena_admin_get_feat_resp get_resp;
2318	int ret;
2319
2320	if (!ena_com_check_supported_feature_id(ena_dev,
2321						ENA_ADMIN_RSS_HASH_FUNCTION)) {
2322		netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
2323			   ENA_ADMIN_RSS_HASH_FUNCTION);
2324		return -EOPNOTSUPP;
2325	}
2326
2327	/* Validate hash function is supported */
2328	ret = ena_com_get_feature(ena_dev, &get_resp,
2329				  ENA_ADMIN_RSS_HASH_FUNCTION, 0);
2330	if (unlikely(ret))
2331		return ret;
2332
2333	if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
2334		netdev_err(ena_dev->net_device,
2335			   "Func hash %d isn't supported by device, abort\n",
2336			   rss->hash_func);
2337		return -EOPNOTSUPP;
2338	}
2339
2340	memset(&cmd, 0x0, sizeof(cmd));
2341
2342	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2343	cmd.aq_common_descriptor.flags =
2344		ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2345	cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
2346	cmd.u.flow_hash_func.init_val = rss->hash_init_val;
2347	cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
2348
2349	ret = ena_com_mem_addr_set(ena_dev,
2350				   &cmd.control_buffer.address,
2351				   rss->hash_key_dma_addr);
2352	if (unlikely(ret)) {
2353		netdev_err(ena_dev->net_device, "Memory address set failed\n");
2354		return ret;
2355	}
2356
2357	cmd.control_buffer.length = sizeof(*rss->hash_key);
2358
2359	ret = ena_com_execute_admin_command(admin_queue,
2360					    (struct ena_admin_aq_entry *)&cmd,
2361					    sizeof(cmd),
2362					    (struct ena_admin_acq_entry *)&resp,
2363					    sizeof(resp));
2364	if (unlikely(ret)) {
2365		netdev_err(ena_dev->net_device,
2366			   "Failed to set hash function %d. error: %d\n",
2367			   rss->hash_func, ret);
2368		return -EINVAL;
2369	}
2370
2371	return 0;
2372}
2373
2374int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
2375			       enum ena_admin_hash_functions func,
2376			       const u8 *key, u16 key_len, u32 init_val)
2377{
2378	struct ena_admin_feature_rss_flow_hash_control *hash_key;
2379	struct ena_admin_get_feat_resp get_resp;
2380	enum ena_admin_hash_functions old_func;
2381	struct ena_rss *rss = &ena_dev->rss;
2382	int rc;
2383
2384	hash_key = rss->hash_key;
2385
2386	/* Make sure size is a mult of DWs */
2387	if (unlikely(key_len & 0x3))
2388		return -EINVAL;
2389
2390	rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2391				    ENA_ADMIN_RSS_HASH_FUNCTION,
2392				    rss->hash_key_dma_addr,
2393				    sizeof(*rss->hash_key), 0);
2394	if (unlikely(rc))
2395		return rc;
2396
2397	if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) {
2398		netdev_err(ena_dev->net_device,
2399			   "Flow hash function %d isn't supported\n", func);
2400		return -EOPNOTSUPP;
2401	}
2402
2403	if ((func == ENA_ADMIN_TOEPLITZ) && key) {
2404		if (key_len != sizeof(hash_key->key)) {
2405			netdev_err(ena_dev->net_device,
2406				   "key len (%u) doesn't equal the supported size (%zu)\n",
2407				   key_len, sizeof(hash_key->key));
2408			return -EINVAL;
2409		}
2410		memcpy(hash_key->key, key, key_len);
2411		hash_key->key_parts = key_len / sizeof(hash_key->key[0]);
2412	}
2413
2414	rss->hash_init_val = init_val;
2415	old_func = rss->hash_func;
2416	rss->hash_func = func;
2417	rc = ena_com_set_hash_function(ena_dev);
2418
2419	/* Restore the old function */
2420	if (unlikely(rc))
2421		rss->hash_func = old_func;
2422
2423	return rc;
2424}
2425
2426int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
2427			      enum ena_admin_hash_functions *func)
2428{
2429	struct ena_rss *rss = &ena_dev->rss;
2430	struct ena_admin_get_feat_resp get_resp;
2431	int rc;
2432
2433	if (unlikely(!func))
2434		return -EINVAL;
2435
2436	rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2437				    ENA_ADMIN_RSS_HASH_FUNCTION,
2438				    rss->hash_key_dma_addr,
2439				    sizeof(*rss->hash_key), 0);
2440	if (unlikely(rc))
2441		return rc;
2442
2443	/* ffs() returns 1 in case the lsb is set */
2444	rss->hash_func = ffs(get_resp.u.flow_hash_func.selected_func);
2445	if (rss->hash_func)
2446		rss->hash_func--;
2447
2448	*func = rss->hash_func;
2449
2450	return 0;
2451}
2452
2453int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key)
2454{
2455	struct ena_admin_feature_rss_flow_hash_control *hash_key =
2456		ena_dev->rss.hash_key;
2457
2458	if (key)
2459		memcpy(key, hash_key->key,
2460		       (size_t)(hash_key->key_parts) * sizeof(hash_key->key[0]));
2461
2462	return 0;
2463}
2464
2465int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
2466			  enum ena_admin_flow_hash_proto proto,
2467			  u16 *fields)
2468{
2469	struct ena_rss *rss = &ena_dev->rss;
2470	struct ena_admin_get_feat_resp get_resp;
2471	int rc;
2472
2473	rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2474				    ENA_ADMIN_RSS_HASH_INPUT,
2475				    rss->hash_ctrl_dma_addr,
2476				    sizeof(*rss->hash_ctrl), 0);
2477	if (unlikely(rc))
2478		return rc;
2479
2480	if (fields)
2481		*fields = rss->hash_ctrl->selected_fields[proto].fields;
2482
2483	return 0;
2484}
2485
2486int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
2487{
2488	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2489	struct ena_rss *rss = &ena_dev->rss;
2490	struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2491	struct ena_admin_set_feat_cmd cmd;
2492	struct ena_admin_set_feat_resp resp;
2493	int ret;
2494
2495	if (!ena_com_check_supported_feature_id(ena_dev,
2496						ENA_ADMIN_RSS_HASH_INPUT)) {
2497		netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
2498			   ENA_ADMIN_RSS_HASH_INPUT);
2499		return -EOPNOTSUPP;
2500	}
2501
2502	memset(&cmd, 0x0, sizeof(cmd));
2503
2504	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2505	cmd.aq_common_descriptor.flags =
2506		ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2507	cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
2508	cmd.u.flow_hash_input.enabled_input_sort =
2509		ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
2510		ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
2511
2512	ret = ena_com_mem_addr_set(ena_dev,
2513				   &cmd.control_buffer.address,
2514				   rss->hash_ctrl_dma_addr);
2515	if (unlikely(ret)) {
2516		netdev_err(ena_dev->net_device, "Memory address set failed\n");
2517		return ret;
2518	}
2519	cmd.control_buffer.length = sizeof(*hash_ctrl);
2520
2521	ret = ena_com_execute_admin_command(admin_queue,
2522					    (struct ena_admin_aq_entry *)&cmd,
2523					    sizeof(cmd),
2524					    (struct ena_admin_acq_entry *)&resp,
2525					    sizeof(resp));
2526	if (unlikely(ret))
2527		netdev_err(ena_dev->net_device,
2528			   "Failed to set hash input. error: %d\n", ret);
2529
2530	return ret;
2531}
2532
2533int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
2534{
2535	struct ena_rss *rss = &ena_dev->rss;
2536	struct ena_admin_feature_rss_hash_control *hash_ctrl =
2537		rss->hash_ctrl;
2538	u16 available_fields = 0;
2539	int rc, i;
2540
2541	/* Get the supported hash input */
2542	rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2543	if (unlikely(rc))
2544		return rc;
2545
2546	hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
2547		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2548		ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2549
2550	hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
2551		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2552		ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2553
2554	hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
2555		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2556		ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2557
2558	hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
2559		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2560		ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2561
2562	hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
2563		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2564
2565	hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
2566		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2567
2568	hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
2569		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2570
2571	hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
2572		ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
2573
2574	for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
2575		available_fields = hash_ctrl->selected_fields[i].fields &
2576				hash_ctrl->supported_fields[i].fields;
2577		if (available_fields != hash_ctrl->selected_fields[i].fields) {
2578			netdev_err(ena_dev->net_device,
2579				   "Hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2580				   i, hash_ctrl->supported_fields[i].fields,
2581				   hash_ctrl->selected_fields[i].fields);
2582			return -EOPNOTSUPP;
2583		}
2584	}
2585
2586	rc = ena_com_set_hash_ctrl(ena_dev);
2587
2588	/* In case of failure, restore the old hash ctrl */
2589	if (unlikely(rc))
2590		ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2591
2592	return rc;
2593}
2594
2595int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
2596			   enum ena_admin_flow_hash_proto proto,
2597			   u16 hash_fields)
2598{
2599	struct ena_rss *rss = &ena_dev->rss;
2600	struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2601	u16 supported_fields;
2602	int rc;
2603
2604	if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
2605		netdev_err(ena_dev->net_device, "Invalid proto num (%u)\n",
2606			   proto);
2607		return -EINVAL;
2608	}
2609
2610	/* Get the ctrl table */
2611	rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
2612	if (unlikely(rc))
2613		return rc;
2614
2615	/* Make sure all the fields are supported */
2616	supported_fields = hash_ctrl->supported_fields[proto].fields;
2617	if ((hash_fields & supported_fields) != hash_fields) {
2618		netdev_err(ena_dev->net_device,
2619			   "Proto %d doesn't support the required fields %x. supports only: %x\n",
2620			   proto, hash_fields, supported_fields);
2621	}
2622
2623	hash_ctrl->selected_fields[proto].fields = hash_fields;
2624
2625	rc = ena_com_set_hash_ctrl(ena_dev);
2626
2627	/* In case of failure, restore the old hash ctrl */
2628	if (unlikely(rc))
2629		ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2630
2631	return 0;
2632}
2633
2634int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
2635				      u16 entry_idx, u16 entry_value)
2636{
2637	struct ena_rss *rss = &ena_dev->rss;
2638
2639	if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
2640		return -EINVAL;
2641
2642	if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
2643		return -EINVAL;
2644
2645	rss->host_rss_ind_tbl[entry_idx] = entry_value;
2646
2647	return 0;
2648}
2649
2650int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
2651{
2652	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2653	struct ena_rss *rss = &ena_dev->rss;
2654	struct ena_admin_set_feat_cmd cmd;
2655	struct ena_admin_set_feat_resp resp;
2656	int ret;
2657
2658	if (!ena_com_check_supported_feature_id(
2659		    ena_dev, ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) {
2660		netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
2661			   ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG);
2662		return -EOPNOTSUPP;
2663	}
2664
2665	ret = ena_com_ind_tbl_convert_to_device(ena_dev);
2666	if (ret) {
2667		netdev_err(ena_dev->net_device,
2668			   "Failed to convert host indirection table to device table\n");
2669		return ret;
2670	}
2671
2672	memset(&cmd, 0x0, sizeof(cmd));
2673
2674	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2675	cmd.aq_common_descriptor.flags =
2676		ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2677	cmd.feat_common.feature_id = ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG;
2678	cmd.u.ind_table.size = rss->tbl_log_size;
2679	cmd.u.ind_table.inline_index = 0xFFFFFFFF;
2680
2681	ret = ena_com_mem_addr_set(ena_dev,
2682				   &cmd.control_buffer.address,
2683				   rss->rss_ind_tbl_dma_addr);
2684	if (unlikely(ret)) {
2685		netdev_err(ena_dev->net_device, "Memory address set failed\n");
2686		return ret;
2687	}
2688
2689	cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
2690		sizeof(struct ena_admin_rss_ind_table_entry);
2691
2692	ret = ena_com_execute_admin_command(admin_queue,
2693					    (struct ena_admin_aq_entry *)&cmd,
2694					    sizeof(cmd),
2695					    (struct ena_admin_acq_entry *)&resp,
2696					    sizeof(resp));
2697
2698	if (unlikely(ret))
2699		netdev_err(ena_dev->net_device,
2700			   "Failed to set indirect table. error: %d\n", ret);
2701
2702	return ret;
2703}
2704
2705int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
2706{
2707	struct ena_rss *rss = &ena_dev->rss;
2708	struct ena_admin_get_feat_resp get_resp;
2709	u32 tbl_size;
2710	int i, rc;
2711
2712	tbl_size = (1ULL << rss->tbl_log_size) *
2713		sizeof(struct ena_admin_rss_ind_table_entry);
2714
2715	rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2716				    ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG,
2717				    rss->rss_ind_tbl_dma_addr,
2718				    tbl_size, 0);
2719	if (unlikely(rc))
2720		return rc;
2721
2722	if (!ind_tbl)
2723		return 0;
2724
2725	for (i = 0; i < (1 << rss->tbl_log_size); i++)
2726		ind_tbl[i] = rss->host_rss_ind_tbl[i];
2727
2728	return 0;
2729}
2730
2731int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
2732{
2733	int rc;
2734
2735	memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2736
2737	rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
2738	if (unlikely(rc))
2739		goto err_indr_tbl;
2740
2741	/* The following function might return unsupported in case the
2742	 * device doesn't support setting the key / hash function. We can safely
2743	 * ignore this error and have indirection table support only.
2744	 */
2745	rc = ena_com_hash_key_allocate(ena_dev);
2746	if (likely(!rc))
2747		ena_com_hash_key_fill_default_key(ena_dev);
2748	else if (rc != -EOPNOTSUPP)
2749		goto err_hash_key;
2750
2751	rc = ena_com_hash_ctrl_init(ena_dev);
2752	if (unlikely(rc))
2753		goto err_hash_ctrl;
2754
2755	return 0;
2756
2757err_hash_ctrl:
2758	ena_com_hash_key_destroy(ena_dev);
2759err_hash_key:
2760	ena_com_indirect_table_destroy(ena_dev);
2761err_indr_tbl:
2762
2763	return rc;
2764}
2765
2766void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
2767{
2768	ena_com_indirect_table_destroy(ena_dev);
2769	ena_com_hash_key_destroy(ena_dev);
2770	ena_com_hash_ctrl_destroy(ena_dev);
2771
2772	memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2773}
2774
2775int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
2776{
2777	struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2778
2779	host_attr->host_info =
2780		dma_alloc_coherent(ena_dev->dmadev, SZ_4K,
2781				   &host_attr->host_info_dma_addr, GFP_KERNEL);
2782	if (unlikely(!host_attr->host_info))
2783		return -ENOMEM;
2784
2785	host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR <<
2786		ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
2787		(ENA_COMMON_SPEC_VERSION_MINOR));
2788
2789	return 0;
2790}
2791
2792int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
2793				u32 debug_area_size)
2794{
2795	struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2796
2797	host_attr->debug_area_virt_addr =
2798		dma_alloc_coherent(ena_dev->dmadev, debug_area_size,
2799				   &host_attr->debug_area_dma_addr, GFP_KERNEL);
2800	if (unlikely(!host_attr->debug_area_virt_addr)) {
2801		host_attr->debug_area_size = 0;
2802		return -ENOMEM;
2803	}
2804
2805	host_attr->debug_area_size = debug_area_size;
2806
2807	return 0;
2808}
2809
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2810void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
2811{
2812	struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2813
2814	if (host_attr->host_info) {
2815		dma_free_coherent(ena_dev->dmadev, SZ_4K, host_attr->host_info,
2816				  host_attr->host_info_dma_addr);
2817		host_attr->host_info = NULL;
2818	}
2819}
2820
2821void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
2822{
2823	struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2824
2825	if (host_attr->debug_area_virt_addr) {
2826		dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size,
2827				  host_attr->debug_area_virt_addr,
2828				  host_attr->debug_area_dma_addr);
2829		host_attr->debug_area_virt_addr = NULL;
2830	}
2831}
2832
 
 
 
 
 
 
 
 
 
 
 
 
 
2833int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
2834{
2835	struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2836	struct ena_com_admin_queue *admin_queue;
2837	struct ena_admin_set_feat_cmd cmd;
2838	struct ena_admin_set_feat_resp resp;
2839
2840	int ret;
2841
2842	/* Host attribute config is called before ena_com_get_dev_attr_feat
2843	 * so ena_com can't check if the feature is supported.
2844	 */
2845
2846	memset(&cmd, 0x0, sizeof(cmd));
2847	admin_queue = &ena_dev->admin_queue;
2848
2849	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2850	cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
2851
2852	ret = ena_com_mem_addr_set(ena_dev,
2853				   &cmd.u.host_attr.debug_ba,
2854				   host_attr->debug_area_dma_addr);
2855	if (unlikely(ret)) {
2856		netdev_err(ena_dev->net_device, "Memory address set failed\n");
2857		return ret;
2858	}
2859
2860	ret = ena_com_mem_addr_set(ena_dev,
2861				   &cmd.u.host_attr.os_info_ba,
2862				   host_attr->host_info_dma_addr);
2863	if (unlikely(ret)) {
2864		netdev_err(ena_dev->net_device, "Memory address set failed\n");
2865		return ret;
2866	}
2867
2868	cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
2869
2870	ret = ena_com_execute_admin_command(admin_queue,
2871					    (struct ena_admin_aq_entry *)&cmd,
2872					    sizeof(cmd),
2873					    (struct ena_admin_acq_entry *)&resp,
2874					    sizeof(resp));
2875
2876	if (unlikely(ret))
2877		netdev_err(ena_dev->net_device,
2878			   "Failed to set host attributes: %d\n", ret);
2879
2880	return ret;
2881}
2882
2883/* Interrupt moderation */
2884bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
2885{
2886	return ena_com_check_supported_feature_id(ena_dev,
2887						  ENA_ADMIN_INTERRUPT_MODERATION);
2888}
2889
2890static int ena_com_update_nonadaptive_moderation_interval(struct ena_com_dev *ena_dev,
2891							  u32 coalesce_usecs,
2892							  u32 intr_delay_resolution,
2893							  u32 *intr_moder_interval)
2894{
2895	if (!intr_delay_resolution) {
2896		netdev_err(ena_dev->net_device,
2897			   "Illegal interrupt delay granularity value\n");
2898		return -EFAULT;
2899	}
2900
2901	*intr_moder_interval = coalesce_usecs / intr_delay_resolution;
2902
2903	return 0;
2904}
2905
2906int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
2907						      u32 tx_coalesce_usecs)
2908{
2909	return ena_com_update_nonadaptive_moderation_interval(ena_dev,
2910							      tx_coalesce_usecs,
2911							      ena_dev->intr_delay_resolution,
2912							      &ena_dev->intr_moder_tx_interval);
2913}
2914
2915int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
2916						      u32 rx_coalesce_usecs)
2917{
2918	return ena_com_update_nonadaptive_moderation_interval(ena_dev,
2919							      rx_coalesce_usecs,
2920							      ena_dev->intr_delay_resolution,
2921							      &ena_dev->intr_moder_rx_interval);
2922}
2923
2924int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
2925{
2926	struct ena_admin_get_feat_resp get_resp;
2927	u16 delay_resolution;
2928	int rc;
2929
2930	rc = ena_com_get_feature(ena_dev, &get_resp,
2931				 ENA_ADMIN_INTERRUPT_MODERATION, 0);
2932
2933	if (rc) {
2934		if (rc == -EOPNOTSUPP) {
2935			netdev_dbg(ena_dev->net_device,
2936				   "Feature %d isn't supported\n",
2937				   ENA_ADMIN_INTERRUPT_MODERATION);
2938			rc = 0;
2939		} else {
2940			netdev_err(ena_dev->net_device,
2941				   "Failed to get interrupt moderation admin cmd. rc: %d\n",
2942				   rc);
2943		}
2944
2945		/* no moderation supported, disable adaptive support */
2946		ena_com_disable_adaptive_moderation(ena_dev);
2947		return rc;
2948	}
2949
2950	/* if moderation is supported by device we set adaptive moderation */
2951	delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
2952	ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
2953
2954	/* Disable adaptive moderation by default - can be enabled later */
2955	ena_com_disable_adaptive_moderation(ena_dev);
2956
2957	return 0;
2958}
2959
2960unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
2961{
2962	return ena_dev->intr_moder_tx_interval;
2963}
2964
2965unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
2966{
2967	return ena_dev->intr_moder_rx_interval;
2968}
2969
2970int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
2971			    struct ena_admin_feature_llq_desc *llq_features,
2972			    struct ena_llq_configurations *llq_default_cfg)
2973{
2974	struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
2975	int rc;
2976
2977	if (!llq_features->max_llq_num) {
2978		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2979		return 0;
2980	}
2981
2982	rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg);
2983	if (rc)
2984		return rc;
2985
2986	ena_dev->tx_max_header_size = llq_info->desc_list_entry_size -
2987		(llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
2988
2989	if (unlikely(ena_dev->tx_max_header_size == 0)) {
2990		netdev_err(ena_dev->net_device,
2991			   "The size of the LLQ entry is smaller than needed\n");
2992		return -EINVAL;
2993	}
2994
2995	ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
2996
2997	return 0;
2998}