Loading...
Note: File does not exist in v3.1.
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (c) 2019, Intel Corporation. */
3
4#ifndef _ICE_TXRX_LIB_H_
5#define _ICE_TXRX_LIB_H_
6#include "ice.h"
7
8/**
9 * ice_test_staterr - tests bits in Rx descriptor status and error fields
10 * @status_err_n: Rx descriptor status_error0 or status_error1 bits
11 * @stat_err_bits: value to mask
12 *
13 * This function does some fast chicanery in order to return the
14 * value of the mask which is really only used for boolean tests.
15 * The status_error_len doesn't need to be shifted because it begins
16 * at offset zero.
17 */
18static inline bool
19ice_test_staterr(__le16 status_err_n, const u16 stat_err_bits)
20{
21 return !!(status_err_n & cpu_to_le16(stat_err_bits));
22}
23
24/**
25 * ice_is_non_eop - process handling of non-EOP buffers
26 * @rx_ring: Rx ring being processed
27 * @rx_desc: Rx descriptor for current buffer
28 *
29 * If the buffer is an EOP buffer, this function exits returning false,
30 * otherwise return true indicating that this is in fact a non-EOP buffer.
31 */
32static inline bool
33ice_is_non_eop(const struct ice_rx_ring *rx_ring,
34 const union ice_32b_rx_flex_desc *rx_desc)
35{
36 /* if we are the last buffer then there is nothing else to do */
37#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
38 if (likely(ice_test_staterr(rx_desc->wb.status_error0, ICE_RXD_EOF)))
39 return false;
40
41 rx_ring->ring_stats->rx_stats.non_eop_descs++;
42
43 return true;
44}
45
46static inline __le64
47ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
48{
49 return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA |
50 (td_cmd << ICE_TXD_QW1_CMD_S) |
51 (td_offset << ICE_TXD_QW1_OFFSET_S) |
52 ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
53 (td_tag << ICE_TXD_QW1_L2TAG1_S));
54}
55
56/**
57 * ice_get_vlan_tci - get VLAN TCI from Rx flex descriptor
58 * @rx_desc: Rx 32b flex descriptor with RXDID=2
59 *
60 * The OS and current PF implementation only support stripping a single VLAN tag
61 * at a time, so there should only ever be 0 or 1 tags in the l2tag* fields. If
62 * one is found return the tag, else return 0 to mean no VLAN tag was found.
63 */
64static inline u16
65ice_get_vlan_tci(const union ice_32b_rx_flex_desc *rx_desc)
66{
67 u16 stat_err_bits;
68
69 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
70 if (ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
71 return le16_to_cpu(rx_desc->wb.l2tag1);
72
73 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S);
74 if (ice_test_staterr(rx_desc->wb.status_error1, stat_err_bits))
75 return le16_to_cpu(rx_desc->wb.l2tag2_2nd);
76
77 return 0;
78}
79
80/**
81 * ice_xdp_ring_update_tail - Updates the XDP Tx ring tail register
82 * @xdp_ring: XDP Tx ring
83 *
84 * This function updates the XDP Tx ring tail register.
85 */
86static inline void ice_xdp_ring_update_tail(struct ice_tx_ring *xdp_ring)
87{
88 /* Force memory writes to complete before letting h/w
89 * know there are new descriptors to fetch.
90 */
91 wmb();
92 writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
93}
94
95/**
96 * ice_set_rs_bit - set RS bit on last produced descriptor (one behind current NTU)
97 * @xdp_ring: XDP ring to produce the HW Tx descriptors on
98 *
99 * returns index of descriptor that had RS bit produced on
100 */
101static inline u32 ice_set_rs_bit(const struct ice_tx_ring *xdp_ring)
102{
103 u32 rs_idx = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1;
104 struct ice_tx_desc *tx_desc;
105
106 tx_desc = ICE_TX_DESC(xdp_ring, rs_idx);
107 tx_desc->cmd_type_offset_bsz |=
108 cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
109
110 return rs_idx;
111}
112
113void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res, u32 first_idx);
114int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring,
115 bool frame);
116void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val);
117void
118ice_process_skb_fields(struct ice_rx_ring *rx_ring,
119 union ice_32b_rx_flex_desc *rx_desc,
120 struct sk_buff *skb);
121void
122ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tci);
123
124static inline void
125ice_xdp_meta_set_desc(struct xdp_buff *xdp,
126 union ice_32b_rx_flex_desc *eop_desc)
127{
128 struct ice_xdp_buff *xdp_ext = container_of(xdp, struct ice_xdp_buff,
129 xdp_buff);
130
131 xdp_ext->eop_desc = eop_desc;
132}
133#endif /* !_ICE_TXRX_LIB_H_ */