Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
  2/*
  3 * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
  4 */
  5
  6#include "ena_eth_com.h"
  7
  8static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
  9	struct ena_com_io_cq *io_cq)
 10{
 11	struct ena_eth_io_rx_cdesc_base *cdesc;
 12	u16 expected_phase, head_masked;
 13	u16 desc_phase;
 14
 15	head_masked = io_cq->head & (io_cq->q_depth - 1);
 16	expected_phase = io_cq->phase;
 17
 18	cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
 19			+ (head_masked * io_cq->cdesc_entry_size_in_bytes));
 20
 21	desc_phase = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
 
 22		     ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
 23
 24	if (desc_phase != expected_phase)
 25		return NULL;
 26
 27	/* Make sure we read the rest of the descriptor after the phase bit
 28	 * has been read
 29	 */
 30	dma_rmb();
 31
 32	return cdesc;
 33}
 34
 35static void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
 36{
 37	u16 tail_masked;
 38	u32 offset;
 39
 40	tail_masked = io_sq->tail & (io_sq->q_depth - 1);
 41
 42	offset = tail_masked * io_sq->desc_entry_size;
 43
 44	return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
 45}
 46
 47static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
 48						     u8 *bounce_buffer)
 49{
 50	struct ena_com_llq_info *llq_info = &io_sq->llq_info;
 51
 52	u16 dst_tail_mask;
 53	u32 dst_offset;
 54
 55	dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
 56	dst_offset = dst_tail_mask * llq_info->desc_list_entry_size;
 57
 58	if (is_llq_max_tx_burst_exists(io_sq)) {
 59		if (unlikely(!io_sq->entries_in_tx_burst_left)) {
 60			netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
 61				   "Error: trying to send more packets than tx burst allows\n");
 62			return -ENOSPC;
 63		}
 64
 65		io_sq->entries_in_tx_burst_left--;
 66		netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
 67			   "Decreasing entries_in_tx_burst_left of queue %d to %d\n", io_sq->qid,
 68			   io_sq->entries_in_tx_burst_left);
 69	}
 70
 71	/* Make sure everything was written into the bounce buffer before
 72	 * writing the bounce buffer to the device
 73	 */
 74	wmb();
 75
 76	/* The line is completed. Copy it to dev */
 77	__iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset, bounce_buffer,
 78			 (llq_info->desc_list_entry_size) / 8);
 79
 80	io_sq->tail++;
 81
 82	/* Switch phase bit in case of wrap around */
 83	if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
 84		io_sq->phase ^= 1;
 85
 86	return 0;
 87}
 88
 89static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
 90						 u8 *header_src,
 91						 u16 header_len)
 92{
 93	struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
 94	struct ena_com_llq_info *llq_info = &io_sq->llq_info;
 95	u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf;
 96	u16 header_offset;
 97
 98	if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
 99		return 0;
100
101	header_offset =
102		llq_info->descs_num_before_header * io_sq->desc_entry_size;
103
104	if (unlikely((header_offset + header_len) > llq_info->desc_list_entry_size)) {
 
105		netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
106			   "Trying to write header larger than llq entry can accommodate\n");
107		return -EFAULT;
108	}
109
110	if (unlikely(!bounce_buffer)) {
111		netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Bounce buffer is NULL\n");
 
112		return -EFAULT;
113	}
114
115	memcpy(bounce_buffer + header_offset, header_src, header_len);
116
117	return 0;
118}
119
120static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
121{
122	struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
123	u8 *bounce_buffer;
124	void *sq_desc;
125
126	bounce_buffer = pkt_ctrl->curr_bounce_buf;
127
128	if (unlikely(!bounce_buffer)) {
129		netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Bounce buffer is NULL\n");
 
130		return NULL;
131	}
132
133	sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size;
134	pkt_ctrl->idx++;
135	pkt_ctrl->descs_left_in_line--;
136
137	return sq_desc;
138}
139
140static int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
141{
142	struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
143	struct ena_com_llq_info *llq_info = &io_sq->llq_info;
144	int rc;
145
146	if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
147		return 0;
148
149	/* bounce buffer was used, so write it and get a new one */
150	if (likely(pkt_ctrl->idx)) {
151		rc = ena_com_write_bounce_buffer_to_dev(io_sq,
152							pkt_ctrl->curr_bounce_buf);
153		if (unlikely(rc)) {
154			netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
155				   "Failed to write bounce buffer to device\n");
156			return rc;
157		}
158
159		pkt_ctrl->curr_bounce_buf =
160			ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
161		memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
162		       0x0, llq_info->desc_list_entry_size);
163	}
164
165	pkt_ctrl->idx = 0;
166	pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header;
167	return 0;
168}
169
170static void *get_sq_desc(struct ena_com_io_sq *io_sq)
171{
172	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
173		return get_sq_desc_llq(io_sq);
174
175	return get_sq_desc_regular_queue(io_sq);
176}
177
178static int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
179{
180	struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
181	struct ena_com_llq_info *llq_info = &io_sq->llq_info;
182	int rc;
183
184	if (!pkt_ctrl->descs_left_in_line) {
185		rc = ena_com_write_bounce_buffer_to_dev(io_sq,
186							pkt_ctrl->curr_bounce_buf);
187		if (unlikely(rc)) {
188			netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
189				   "Failed to write bounce buffer to device\n");
190			return rc;
191		}
192
193		pkt_ctrl->curr_bounce_buf =
194			ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
195		memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
196		       0x0, llq_info->desc_list_entry_size);
197
198		pkt_ctrl->idx = 0;
199		if (unlikely(llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY))
200			pkt_ctrl->descs_left_in_line = 1;
201		else
202			pkt_ctrl->descs_left_in_line =
203			llq_info->desc_list_entry_size / io_sq->desc_entry_size;
204	}
205
206	return 0;
207}
208
209static int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
210{
211	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
212		return ena_com_sq_update_llq_tail(io_sq);
213
214	io_sq->tail++;
215
216	/* Switch phase bit in case of wrap around */
217	if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
218		io_sq->phase ^= 1;
219
220	return 0;
221}
222
223static struct ena_eth_io_rx_cdesc_base *
224	ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
225{
226	idx &= (io_cq->q_depth - 1);
227	return (struct ena_eth_io_rx_cdesc_base *)
228		((uintptr_t)io_cq->cdesc_addr.virt_addr +
229		idx * io_cq->cdesc_entry_size_in_bytes);
230}
231
232static int ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
233				    u16 *first_cdesc_idx,
234				    u16 *num_descs)
235{
236	u16 count = io_cq->cur_rx_pkt_cdesc_count, head_masked;
237	struct ena_eth_io_rx_cdesc_base *cdesc;
 
238	u32 last = 0;
239
240	do {
241		u32 status;
242
243		cdesc = ena_com_get_next_rx_cdesc(io_cq);
244		if (!cdesc)
245			break;
246		status = READ_ONCE(cdesc->status);
247
248		ena_com_cq_inc_head(io_cq);
249		if (unlikely((status & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK) >>
250		    ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT && count != 0)) {
251			struct ena_com_dev *dev = ena_com_io_cq_to_ena_dev(io_cq);
252
253			netdev_err(dev->net_device,
254				   "First bit is on in descriptor #%d on q_id: %d, req_id: %u\n",
255				   count, io_cq->qid, cdesc->req_id);
256			return -EFAULT;
257		}
258		count++;
259		last = (status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
260			ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
 
261	} while (!last);
262
263	if (last) {
264		*first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx;
 
265
266		head_masked = io_cq->head & (io_cq->q_depth - 1);
267
268		*num_descs = count;
269		io_cq->cur_rx_pkt_cdesc_count = 0;
270		io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
271
272		netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
273			   "ENA q_id: %d packets were completed. first desc idx %u descs# %d\n",
274			   io_cq->qid, *first_cdesc_idx, count);
275	} else {
276		io_cq->cur_rx_pkt_cdesc_count = count;
277		*num_descs = 0;
278	}
279
280	return 0;
281}
282
283static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
284			       struct ena_com_tx_meta *ena_meta)
285{
286	struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
287
288	meta_desc = get_sq_desc(io_sq);
289	if (unlikely(!meta_desc))
290		return -EFAULT;
291
292	memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
293
294	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
295
296	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
297
298	/* bits 0-9 of the mss */
299	meta_desc->word2 |= ((u32)ena_meta->mss <<
300		ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) &
301		ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
302	/* bits 10-13 of the mss */
303	meta_desc->len_ctrl |= ((ena_meta->mss >> 10) <<
304		ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) &
305		ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK;
306
307	/* Extended meta desc */
308	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
309	meta_desc->len_ctrl |= ((u32)io_sq->phase <<
310		ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) &
311		ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
312
313	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
314	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
315
316	meta_desc->word2 |= ena_meta->l3_hdr_len &
317		ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
318	meta_desc->word2 |= (ena_meta->l3_hdr_offset <<
319		ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) &
320		ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
321
322	meta_desc->word2 |= ((u32)ena_meta->l4_hdr_len <<
323		ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) &
324		ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
325
326	return ena_com_sq_update_tail(io_sq);
327}
328
329static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
330						 struct ena_com_tx_ctx *ena_tx_ctx,
331						 bool *have_meta)
332{
333	struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
334
335	/* When disable meta caching is set, don't bother to save the meta and
336	 * compare it to the stored version, just create the meta
337	 */
338	if (io_sq->disable_meta_caching) {
 
 
 
339		*have_meta = true;
340		return ena_com_create_meta(io_sq, ena_meta);
341	}
342
343	if (ena_com_meta_desc_changed(io_sq, ena_tx_ctx)) {
344		*have_meta = true;
345		/* Cache the meta desc */
346		memcpy(&io_sq->cached_tx_meta, ena_meta,
347		       sizeof(struct ena_com_tx_meta));
348		return ena_com_create_meta(io_sq, ena_meta);
349	}
350
351	*have_meta = false;
352	return 0;
353}
354
355static void ena_com_rx_set_flags(struct ena_com_io_cq *io_cq,
356				 struct ena_com_rx_ctx *ena_rx_ctx,
357				 struct ena_eth_io_rx_cdesc_base *cdesc)
358{
359	ena_rx_ctx->l3_proto = cdesc->status &
360		ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
361	ena_rx_ctx->l4_proto =
362		(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
363		ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
364	ena_rx_ctx->l3_csum_err =
365		!!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
366		ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT);
367	ena_rx_ctx->l4_csum_err =
368		!!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
369		ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT);
370	ena_rx_ctx->l4_csum_checked =
371		!!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK) >>
372		ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT);
373	ena_rx_ctx->hash = cdesc->hash;
374	ena_rx_ctx->frag =
375		(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
376		ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
377
378	netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
379		   "l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n",
380		   ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto, ena_rx_ctx->l3_csum_err,
381		   ena_rx_ctx->l4_csum_err, ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
 
382}
383
384/*****************************************************************************/
385/*****************************     API      **********************************/
386/*****************************************************************************/
387
388int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
389		       struct ena_com_tx_ctx *ena_tx_ctx,
390		       int *nb_hw_desc)
391{
392	struct ena_eth_io_tx_desc *desc = NULL;
393	struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
394	void *buffer_to_push = ena_tx_ctx->push_header;
395	u16 header_len = ena_tx_ctx->header_len;
396	u16 num_bufs = ena_tx_ctx->num_bufs;
397	u16 start_tail = io_sq->tail;
398	int i, rc;
399	bool have_meta;
400	u64 addr_hi;
401
402	WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX, "wrong Q type");
403
404	/* num_bufs +1 for potential meta desc */
405	if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) {
406		netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
407			   "Not enough space in the tx queue\n");
408		return -ENOMEM;
409	}
410
411	if (unlikely(header_len > io_sq->tx_max_header_size)) {
412		netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
413			   "Header size is too large %d max header: %d\n", header_len,
414			   io_sq->tx_max_header_size);
415		return -EINVAL;
416	}
417
418	if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && !buffer_to_push)) {
 
419		netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
420			   "Push header wasn't provided in LLQ mode\n");
421		return -EINVAL;
422	}
423
424	rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len);
425	if (unlikely(rc))
426		return rc;
427
428	rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta);
429	if (unlikely(rc)) {
430		netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
431			   "Failed to create and store tx meta desc\n");
432		return rc;
433	}
434
435	/* If the caller doesn't want to send packets */
436	if (unlikely(!num_bufs && !header_len)) {
437		rc = ena_com_close_bounce_buffer(io_sq);
438		if (rc)
439			netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
440				   "Failed to write buffers to LLQ\n");
441		*nb_hw_desc = io_sq->tail - start_tail;
442		return rc;
443	}
444
445	desc = get_sq_desc(io_sq);
446	if (unlikely(!desc))
447		return -EFAULT;
448	memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
449
450	/* Set first desc when we don't have meta descriptor */
451	if (!have_meta)
452		desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK;
453
454	desc->buff_addr_hi_hdr_sz |= ((u32)header_len <<
455		ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) &
456		ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
457	desc->len_ctrl |= ((u32)io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
458		ENA_ETH_IO_TX_DESC_PHASE_MASK;
459
460	desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
461
462	/* Bits 0-9 */
463	desc->meta_ctrl |= ((u32)ena_tx_ctx->req_id <<
464		ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) &
465		ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
466
467	desc->meta_ctrl |= (ena_tx_ctx->df <<
468		ENA_ETH_IO_TX_DESC_DF_SHIFT) &
469		ENA_ETH_IO_TX_DESC_DF_MASK;
470
471	/* Bits 10-15 */
472	desc->len_ctrl |= ((ena_tx_ctx->req_id >> 10) <<
473		ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) &
474		ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
475
476	if (ena_tx_ctx->meta_valid) {
477		desc->meta_ctrl |= (ena_tx_ctx->tso_enable <<
478			ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) &
479			ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
480		desc->meta_ctrl |= ena_tx_ctx->l3_proto &
481			ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
482		desc->meta_ctrl |= (ena_tx_ctx->l4_proto <<
483			ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) &
484			ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
485		desc->meta_ctrl |= (ena_tx_ctx->l3_csum_enable <<
486			ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) &
487			ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
488		desc->meta_ctrl |= (ena_tx_ctx->l4_csum_enable <<
489			ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) &
490			ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
491		desc->meta_ctrl |= (ena_tx_ctx->l4_csum_partial <<
492			ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) &
493			ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
494	}
495
496	for (i = 0; i < num_bufs; i++) {
497		/* The first desc share the same desc as the header */
498		if (likely(i != 0)) {
499			rc = ena_com_sq_update_tail(io_sq);
500			if (unlikely(rc)) {
501				netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
502					   "Failed to update sq tail\n");
503				return rc;
504			}
505
506			desc = get_sq_desc(io_sq);
507			if (unlikely(!desc))
508				return -EFAULT;
509
510			memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
511
512			desc->len_ctrl |= ((u32)io_sq->phase <<
513				ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
514				ENA_ETH_IO_TX_DESC_PHASE_MASK;
515		}
516
517		desc->len_ctrl |= ena_bufs->len &
518			ENA_ETH_IO_TX_DESC_LENGTH_MASK;
519
520		addr_hi = ((ena_bufs->paddr &
521			GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
522
523		desc->buff_addr_lo = (u32)ena_bufs->paddr;
524		desc->buff_addr_hi_hdr_sz |= addr_hi &
525			ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
526		ena_bufs++;
527	}
528
529	/* set the last desc indicator */
530	desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
531
532	rc = ena_com_sq_update_tail(io_sq);
533	if (unlikely(rc)) {
534		netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
535			   "Failed to update sq tail of the last descriptor\n");
536		return rc;
537	}
538
539	rc = ena_com_close_bounce_buffer(io_sq);
540
541	*nb_hw_desc = io_sq->tail - start_tail;
542	return rc;
543}
544
545int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
546		   struct ena_com_io_sq *io_sq,
547		   struct ena_com_rx_ctx *ena_rx_ctx)
548{
549	struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0];
550	struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
551	u16 q_depth = io_cq->q_depth;
552	u16 cdesc_idx = 0;
553	u16 nb_hw_desc;
554	u16 i = 0;
555	int rc;
556
557	WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
558
559	rc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx, &nb_hw_desc);
560	if (unlikely(rc != 0))
561		return -EFAULT;
562
563	if (nb_hw_desc == 0) {
564		ena_rx_ctx->descs = nb_hw_desc;
565		return 0;
566	}
567
568	netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
569		   "Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid, nb_hw_desc);
 
570
571	if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
572		netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
573			   "Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc, ena_rx_ctx->max_bufs);
 
574		return -ENOSPC;
575	}
576
577	cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx);
578	ena_rx_ctx->pkt_offset = cdesc->offset;
579
580	do {
581		ena_buf[i].len = cdesc->length;
582		ena_buf[i].req_id = cdesc->req_id;
583		if (unlikely(ena_buf[i].req_id >= q_depth))
584			return -EIO;
585
586		if (++i >= nb_hw_desc)
587			break;
588
589		cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i);
590
591	} while (1);
592
593	/* Update SQ head ptr */
594	io_sq->next_to_comp += nb_hw_desc;
595
596	netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
597		   "[%s][QID#%d] Updating SQ head to: %d\n", __func__, io_sq->qid,
598		   io_sq->next_to_comp);
599
600	/* Get rx flags from the last pkt */
601	ena_com_rx_set_flags(io_cq, ena_rx_ctx, cdesc);
602
603	ena_rx_ctx->descs = nb_hw_desc;
604
605	return 0;
606}
607
608int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
609			       struct ena_com_buf *ena_buf,
610			       u16 req_id)
611{
612	struct ena_eth_io_rx_desc *desc;
613
614	WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
615
616	if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1)))
617		return -ENOSPC;
618
619	desc = get_sq_desc(io_sq);
620	if (unlikely(!desc))
621		return -EFAULT;
622
623	memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
624
625	desc->length = ena_buf->len;
626
627	desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK |
628		     ENA_ETH_IO_RX_DESC_LAST_MASK |
629		     ENA_ETH_IO_RX_DESC_COMP_REQ_MASK |
630		     (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK);
631
632	desc->req_id = req_id;
633
634	netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
635		   "[%s] Adding single RX desc, Queue: %u, req_id: %u\n", __func__, io_sq->qid,
636		   req_id);
637
638	desc->buff_addr_lo = (u32)ena_buf->paddr;
639	desc->buff_addr_hi =
640		((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
641
642	return ena_com_sq_update_tail(io_sq);
643}
644
645bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
646{
647	struct ena_eth_io_rx_cdesc_base *cdesc;
648
649	cdesc = ena_com_get_next_rx_cdesc(io_cq);
650	if (cdesc)
651		return false;
652	else
653		return true;
654}
v6.2
  1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
  2/*
  3 * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
  4 */
  5
  6#include "ena_eth_com.h"
  7
  8static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
  9	struct ena_com_io_cq *io_cq)
 10{
 11	struct ena_eth_io_rx_cdesc_base *cdesc;
 12	u16 expected_phase, head_masked;
 13	u16 desc_phase;
 14
 15	head_masked = io_cq->head & (io_cq->q_depth - 1);
 16	expected_phase = io_cq->phase;
 17
 18	cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
 19			+ (head_masked * io_cq->cdesc_entry_size_in_bytes));
 20
 21	desc_phase = (READ_ONCE(cdesc->status) &
 22		      ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
 23		     ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
 24
 25	if (desc_phase != expected_phase)
 26		return NULL;
 27
 28	/* Make sure we read the rest of the descriptor after the phase bit
 29	 * has been read
 30	 */
 31	dma_rmb();
 32
 33	return cdesc;
 34}
 35
 36static void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
 37{
 38	u16 tail_masked;
 39	u32 offset;
 40
 41	tail_masked = io_sq->tail & (io_sq->q_depth - 1);
 42
 43	offset = tail_masked * io_sq->desc_entry_size;
 44
 45	return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
 46}
 47
 48static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
 49						     u8 *bounce_buffer)
 50{
 51	struct ena_com_llq_info *llq_info = &io_sq->llq_info;
 52
 53	u16 dst_tail_mask;
 54	u32 dst_offset;
 55
 56	dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
 57	dst_offset = dst_tail_mask * llq_info->desc_list_entry_size;
 58
 59	if (is_llq_max_tx_burst_exists(io_sq)) {
 60		if (unlikely(!io_sq->entries_in_tx_burst_left)) {
 61			netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
 62				   "Error: trying to send more packets than tx burst allows\n");
 63			return -ENOSPC;
 64		}
 65
 66		io_sq->entries_in_tx_burst_left--;
 67		netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
 68			   "Decreasing entries_in_tx_burst_left of queue %d to %d\n",
 69			   io_sq->qid, io_sq->entries_in_tx_burst_left);
 70	}
 71
 72	/* Make sure everything was written into the bounce buffer before
 73	 * writing the bounce buffer to the device
 74	 */
 75	wmb();
 76
 77	/* The line is completed. Copy it to dev */
 78	__iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
 79			 bounce_buffer, (llq_info->desc_list_entry_size) / 8);
 80
 81	io_sq->tail++;
 82
 83	/* Switch phase bit in case of wrap around */
 84	if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
 85		io_sq->phase ^= 1;
 86
 87	return 0;
 88}
 89
 90static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
 91						 u8 *header_src,
 92						 u16 header_len)
 93{
 94	struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
 95	struct ena_com_llq_info *llq_info = &io_sq->llq_info;
 96	u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf;
 97	u16 header_offset;
 98
 99	if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
100		return 0;
101
102	header_offset =
103		llq_info->descs_num_before_header * io_sq->desc_entry_size;
104
105	if (unlikely((header_offset + header_len) >
106		     llq_info->desc_list_entry_size)) {
107		netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
108			   "Trying to write header larger than llq entry can accommodate\n");
109		return -EFAULT;
110	}
111
112	if (unlikely(!bounce_buffer)) {
113		netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
114			   "Bounce buffer is NULL\n");
115		return -EFAULT;
116	}
117
118	memcpy(bounce_buffer + header_offset, header_src, header_len);
119
120	return 0;
121}
122
123static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
124{
125	struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
126	u8 *bounce_buffer;
127	void *sq_desc;
128
129	bounce_buffer = pkt_ctrl->curr_bounce_buf;
130
131	if (unlikely(!bounce_buffer)) {
132		netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
133			   "Bounce buffer is NULL\n");
134		return NULL;
135	}
136
137	sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size;
138	pkt_ctrl->idx++;
139	pkt_ctrl->descs_left_in_line--;
140
141	return sq_desc;
142}
143
144static int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
145{
146	struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
147	struct ena_com_llq_info *llq_info = &io_sq->llq_info;
148	int rc;
149
150	if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
151		return 0;
152
153	/* bounce buffer was used, so write it and get a new one */
154	if (likely(pkt_ctrl->idx)) {
155		rc = ena_com_write_bounce_buffer_to_dev(io_sq,
156							pkt_ctrl->curr_bounce_buf);
157		if (unlikely(rc)) {
158			netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
159				   "Failed to write bounce buffer to device\n");
160			return rc;
161		}
162
163		pkt_ctrl->curr_bounce_buf =
164			ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
165		memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
166		       0x0, llq_info->desc_list_entry_size);
167	}
168
169	pkt_ctrl->idx = 0;
170	pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header;
171	return 0;
172}
173
174static void *get_sq_desc(struct ena_com_io_sq *io_sq)
175{
176	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
177		return get_sq_desc_llq(io_sq);
178
179	return get_sq_desc_regular_queue(io_sq);
180}
181
182static int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
183{
184	struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
185	struct ena_com_llq_info *llq_info = &io_sq->llq_info;
186	int rc;
187
188	if (!pkt_ctrl->descs_left_in_line) {
189		rc = ena_com_write_bounce_buffer_to_dev(io_sq,
190							pkt_ctrl->curr_bounce_buf);
191		if (unlikely(rc)) {
192			netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
193				   "Failed to write bounce buffer to device\n");
194			return rc;
195		}
196
197		pkt_ctrl->curr_bounce_buf =
198			ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
199		memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
200		       0x0, llq_info->desc_list_entry_size);
201
202		pkt_ctrl->idx = 0;
203		if (unlikely(llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY))
204			pkt_ctrl->descs_left_in_line = 1;
205		else
206			pkt_ctrl->descs_left_in_line =
207			llq_info->desc_list_entry_size / io_sq->desc_entry_size;
208	}
209
210	return 0;
211}
212
213static int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
214{
215	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
216		return ena_com_sq_update_llq_tail(io_sq);
217
218	io_sq->tail++;
219
220	/* Switch phase bit in case of wrap around */
221	if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
222		io_sq->phase ^= 1;
223
224	return 0;
225}
226
227static struct ena_eth_io_rx_cdesc_base *
228	ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
229{
230	idx &= (io_cq->q_depth - 1);
231	return (struct ena_eth_io_rx_cdesc_base *)
232		((uintptr_t)io_cq->cdesc_addr.virt_addr +
233		idx * io_cq->cdesc_entry_size_in_bytes);
234}
235
236static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
237					   u16 *first_cdesc_idx)
 
238{
 
239	struct ena_eth_io_rx_cdesc_base *cdesc;
240	u16 count = 0, head_masked;
241	u32 last = 0;
242
243	do {
 
 
244		cdesc = ena_com_get_next_rx_cdesc(io_cq);
245		if (!cdesc)
246			break;
 
247
248		ena_com_cq_inc_head(io_cq);
 
 
 
 
 
 
 
 
 
249		count++;
250		last = (READ_ONCE(cdesc->status) &
251			ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
252		       ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
253	} while (!last);
254
255	if (last) {
256		*first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx;
257		count += io_cq->cur_rx_pkt_cdesc_count;
258
259		head_masked = io_cq->head & (io_cq->q_depth - 1);
260
 
261		io_cq->cur_rx_pkt_cdesc_count = 0;
262		io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
263
264		netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
265			   "ENA q_id: %d packets were completed. first desc idx %u descs# %d\n",
266			   io_cq->qid, *first_cdesc_idx, count);
267	} else {
268		io_cq->cur_rx_pkt_cdesc_count += count;
269		count = 0;
270	}
271
272	return count;
273}
274
275static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
276			       struct ena_com_tx_meta *ena_meta)
277{
278	struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
279
280	meta_desc = get_sq_desc(io_sq);
281	if (unlikely(!meta_desc))
282		return -EFAULT;
283
284	memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
285
286	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
287
288	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
289
290	/* bits 0-9 of the mss */
291	meta_desc->word2 |= ((u32)ena_meta->mss <<
292		ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) &
293		ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
294	/* bits 10-13 of the mss */
295	meta_desc->len_ctrl |= ((ena_meta->mss >> 10) <<
296		ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) &
297		ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK;
298
299	/* Extended meta desc */
300	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
301	meta_desc->len_ctrl |= ((u32)io_sq->phase <<
302		ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) &
303		ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
304
305	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
306	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
307
308	meta_desc->word2 |= ena_meta->l3_hdr_len &
309		ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
310	meta_desc->word2 |= (ena_meta->l3_hdr_offset <<
311		ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) &
312		ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
313
314	meta_desc->word2 |= ((u32)ena_meta->l4_hdr_len <<
315		ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) &
316		ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
317
318	return ena_com_sq_update_tail(io_sq);
319}
320
321static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
322						 struct ena_com_tx_ctx *ena_tx_ctx,
323						 bool *have_meta)
324{
325	struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
326
327	/* When disable meta caching is set, don't bother to save the meta and
328	 * compare it to the stored version, just create the meta
329	 */
330	if (io_sq->disable_meta_caching) {
331		if (unlikely(!ena_tx_ctx->meta_valid))
332			return -EINVAL;
333
334		*have_meta = true;
335		return ena_com_create_meta(io_sq, ena_meta);
336	}
337
338	if (ena_com_meta_desc_changed(io_sq, ena_tx_ctx)) {
339		*have_meta = true;
340		/* Cache the meta desc */
341		memcpy(&io_sq->cached_tx_meta, ena_meta,
342		       sizeof(struct ena_com_tx_meta));
343		return ena_com_create_meta(io_sq, ena_meta);
344	}
345
346	*have_meta = false;
347	return 0;
348}
349
350static void ena_com_rx_set_flags(struct ena_com_io_cq *io_cq,
351				 struct ena_com_rx_ctx *ena_rx_ctx,
352				 struct ena_eth_io_rx_cdesc_base *cdesc)
353{
354	ena_rx_ctx->l3_proto = cdesc->status &
355		ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
356	ena_rx_ctx->l4_proto =
357		(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
358		ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
359	ena_rx_ctx->l3_csum_err =
360		!!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
361		ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT);
362	ena_rx_ctx->l4_csum_err =
363		!!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
364		ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT);
365	ena_rx_ctx->l4_csum_checked =
366		!!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK) >>
367		ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT);
368	ena_rx_ctx->hash = cdesc->hash;
369	ena_rx_ctx->frag =
370		(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
371		ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
372
373	netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
374		   "l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n",
375		   ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto,
376		   ena_rx_ctx->l3_csum_err, ena_rx_ctx->l4_csum_err,
377		   ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
378}
379
380/*****************************************************************************/
381/*****************************     API      **********************************/
382/*****************************************************************************/
383
384int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
385		       struct ena_com_tx_ctx *ena_tx_ctx,
386		       int *nb_hw_desc)
387{
388	struct ena_eth_io_tx_desc *desc = NULL;
389	struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
390	void *buffer_to_push = ena_tx_ctx->push_header;
391	u16 header_len = ena_tx_ctx->header_len;
392	u16 num_bufs = ena_tx_ctx->num_bufs;
393	u16 start_tail = io_sq->tail;
394	int i, rc;
395	bool have_meta;
396	u64 addr_hi;
397
398	WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX, "wrong Q type");
399
400	/* num_bufs +1 for potential meta desc */
401	if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) {
402		netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
403			   "Not enough space in the tx queue\n");
404		return -ENOMEM;
405	}
406
407	if (unlikely(header_len > io_sq->tx_max_header_size)) {
408		netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
409			   "Header size is too large %d max header: %d\n",
410			   header_len, io_sq->tx_max_header_size);
411		return -EINVAL;
412	}
413
414	if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV &&
415		     !buffer_to_push)) {
416		netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
417			   "Push header wasn't provided in LLQ mode\n");
418		return -EINVAL;
419	}
420
421	rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len);
422	if (unlikely(rc))
423		return rc;
424
425	rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta);
426	if (unlikely(rc)) {
427		netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
428			   "Failed to create and store tx meta desc\n");
429		return rc;
430	}
431
432	/* If the caller doesn't want to send packets */
433	if (unlikely(!num_bufs && !header_len)) {
434		rc = ena_com_close_bounce_buffer(io_sq);
435		if (rc)
436			netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
437				   "Failed to write buffers to LLQ\n");
438		*nb_hw_desc = io_sq->tail - start_tail;
439		return rc;
440	}
441
442	desc = get_sq_desc(io_sq);
443	if (unlikely(!desc))
444		return -EFAULT;
445	memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
446
447	/* Set first desc when we don't have meta descriptor */
448	if (!have_meta)
449		desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK;
450
451	desc->buff_addr_hi_hdr_sz |= ((u32)header_len <<
452		ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) &
453		ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
454	desc->len_ctrl |= ((u32)io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
455		ENA_ETH_IO_TX_DESC_PHASE_MASK;
456
457	desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
458
459	/* Bits 0-9 */
460	desc->meta_ctrl |= ((u32)ena_tx_ctx->req_id <<
461		ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) &
462		ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
463
464	desc->meta_ctrl |= (ena_tx_ctx->df <<
465		ENA_ETH_IO_TX_DESC_DF_SHIFT) &
466		ENA_ETH_IO_TX_DESC_DF_MASK;
467
468	/* Bits 10-15 */
469	desc->len_ctrl |= ((ena_tx_ctx->req_id >> 10) <<
470		ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) &
471		ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
472
473	if (ena_tx_ctx->meta_valid) {
474		desc->meta_ctrl |= (ena_tx_ctx->tso_enable <<
475			ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) &
476			ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
477		desc->meta_ctrl |= ena_tx_ctx->l3_proto &
478			ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
479		desc->meta_ctrl |= (ena_tx_ctx->l4_proto <<
480			ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) &
481			ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
482		desc->meta_ctrl |= (ena_tx_ctx->l3_csum_enable <<
483			ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) &
484			ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
485		desc->meta_ctrl |= (ena_tx_ctx->l4_csum_enable <<
486			ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) &
487			ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
488		desc->meta_ctrl |= (ena_tx_ctx->l4_csum_partial <<
489			ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) &
490			ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
491	}
492
493	for (i = 0; i < num_bufs; i++) {
494		/* The first desc share the same desc as the header */
495		if (likely(i != 0)) {
496			rc = ena_com_sq_update_tail(io_sq);
497			if (unlikely(rc)) {
498				netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
499					   "Failed to update sq tail\n");
500				return rc;
501			}
502
503			desc = get_sq_desc(io_sq);
504			if (unlikely(!desc))
505				return -EFAULT;
506
507			memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
508
509			desc->len_ctrl |= ((u32)io_sq->phase <<
510				ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
511				ENA_ETH_IO_TX_DESC_PHASE_MASK;
512		}
513
514		desc->len_ctrl |= ena_bufs->len &
515			ENA_ETH_IO_TX_DESC_LENGTH_MASK;
516
517		addr_hi = ((ena_bufs->paddr &
518			GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
519
520		desc->buff_addr_lo = (u32)ena_bufs->paddr;
521		desc->buff_addr_hi_hdr_sz |= addr_hi &
522			ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
523		ena_bufs++;
524	}
525
526	/* set the last desc indicator */
527	desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
528
529	rc = ena_com_sq_update_tail(io_sq);
530	if (unlikely(rc)) {
531		netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
532			   "Failed to update sq tail of the last descriptor\n");
533		return rc;
534	}
535
536	rc = ena_com_close_bounce_buffer(io_sq);
537
538	*nb_hw_desc = io_sq->tail - start_tail;
539	return rc;
540}
541
542int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
543		   struct ena_com_io_sq *io_sq,
544		   struct ena_com_rx_ctx *ena_rx_ctx)
545{
546	struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0];
547	struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
548	u16 q_depth = io_cq->q_depth;
549	u16 cdesc_idx = 0;
550	u16 nb_hw_desc;
551	u16 i = 0;
 
552
553	WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
554
555	nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx);
 
 
 
556	if (nb_hw_desc == 0) {
557		ena_rx_ctx->descs = nb_hw_desc;
558		return 0;
559	}
560
561	netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
562		   "Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid,
563		   nb_hw_desc);
564
565	if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
566		netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
567			   "Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc,
568			   ena_rx_ctx->max_bufs);
569		return -ENOSPC;
570	}
571
572	cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx);
573	ena_rx_ctx->pkt_offset = cdesc->offset;
574
575	do {
576		ena_buf[i].len = cdesc->length;
577		ena_buf[i].req_id = cdesc->req_id;
578		if (unlikely(ena_buf[i].req_id >= q_depth))
579			return -EIO;
580
581		if (++i >= nb_hw_desc)
582			break;
583
584		cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i);
585
586	} while (1);
587
588	/* Update SQ head ptr */
589	io_sq->next_to_comp += nb_hw_desc;
590
591	netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
592		   "[%s][QID#%d] Updating SQ head to: %d\n", __func__,
593		   io_sq->qid, io_sq->next_to_comp);
594
595	/* Get rx flags from the last pkt */
596	ena_com_rx_set_flags(io_cq, ena_rx_ctx, cdesc);
597
598	ena_rx_ctx->descs = nb_hw_desc;
599
600	return 0;
601}
602
603int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
604			       struct ena_com_buf *ena_buf,
605			       u16 req_id)
606{
607	struct ena_eth_io_rx_desc *desc;
608
609	WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
610
611	if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1)))
612		return -ENOSPC;
613
614	desc = get_sq_desc(io_sq);
615	if (unlikely(!desc))
616		return -EFAULT;
617
618	memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
619
620	desc->length = ena_buf->len;
621
622	desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK |
623		     ENA_ETH_IO_RX_DESC_LAST_MASK |
624		     ENA_ETH_IO_RX_DESC_COMP_REQ_MASK |
625		     (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK);
626
627	desc->req_id = req_id;
628
629	netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
630		   "[%s] Adding single RX desc, Queue: %u, req_id: %u\n",
631		   __func__, io_sq->qid, req_id);
632
633	desc->buff_addr_lo = (u32)ena_buf->paddr;
634	desc->buff_addr_hi =
635		((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
636
637	return ena_com_sq_update_tail(io_sq);
638}
639
640bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
641{
642	struct ena_eth_io_rx_cdesc_base *cdesc;
643
644	cdesc = ena_com_get_next_rx_cdesc(io_cq);
645	if (cdesc)
646		return false;
647	else
648		return true;
649}