Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright(c) 2018 Intel Corporation. */
3
4#ifndef I40E_TXRX_COMMON_
5#define I40E_TXRX_COMMON_
6
7void i40e_fd_handle_status(struct i40e_ring *rx_ring,
8 union i40e_rx_desc *rx_desc, u8 prog_id);
9int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring);
10struct i40e_rx_buffer *i40e_clean_programming_status(
11 struct i40e_ring *rx_ring,
12 union i40e_rx_desc *rx_desc,
13 u64 qw);
14void i40e_process_skb_fields(struct i40e_ring *rx_ring,
15 union i40e_rx_desc *rx_desc, struct sk_buff *skb);
16void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring);
17void i40e_update_rx_stats(struct i40e_ring *rx_ring,
18 unsigned int total_rx_bytes,
19 unsigned int total_rx_packets);
20void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res);
21void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val);
22
23#define I40E_XDP_PASS 0
24#define I40E_XDP_CONSUMED BIT(0)
25#define I40E_XDP_TX BIT(1)
26#define I40E_XDP_REDIR BIT(2)
27
28/**
29 * build_ctob - Builds the Tx descriptor (cmd, offset and type) qword
30 **/
31static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
32 u32 td_tag)
33{
34 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
35 ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
36 ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
37 ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
38 ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
39}
40
41/**
42 * i40e_update_tx_stats - Update the egress statistics for the Tx ring
43 * @tx_ring: Tx ring to update
44 * @total_packet: total packets sent
45 * @total_bytes: total bytes sent
46 **/
47static inline void i40e_update_tx_stats(struct i40e_ring *tx_ring,
48 unsigned int total_packets,
49 unsigned int total_bytes)
50{
51 u64_stats_update_begin(&tx_ring->syncp);
52 tx_ring->stats.bytes += total_bytes;
53 tx_ring->stats.packets += total_packets;
54 u64_stats_update_end(&tx_ring->syncp);
55 tx_ring->q_vector->tx.total_bytes += total_bytes;
56 tx_ring->q_vector->tx.total_packets += total_packets;
57}
58
59#define WB_STRIDE 4
60
61/**
62 * i40e_arm_wb - (Possibly) arms Tx write-back
63 * @tx_ring: Tx ring to update
64 * @vsi: the VSI
65 * @budget: the NAPI budget left
66 **/
67static inline void i40e_arm_wb(struct i40e_ring *tx_ring,
68 struct i40e_vsi *vsi,
69 int budget)
70{
71 if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
72 /* check to see if there are < 4 descriptors
73 * waiting to be written back, then kick the hardware to force
74 * them to be written back in case we stay in NAPI.
75 * In this mode on X722 we do not enable Interrupt.
76 */
77 unsigned int j = i40e_get_tx_pending(tx_ring, false);
78
79 if (budget &&
80 ((j / WB_STRIDE) == 0) && j > 0 &&
81 !test_bit(__I40E_VSI_DOWN, vsi->state) &&
82 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
83 tx_ring->arm_wb = true;
84 }
85}
86
87void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring);
88void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring);
89bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi);
90
91#endif /* I40E_TXRX_COMMON_ */
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright(c) 2018 Intel Corporation. */
3
4#ifndef I40E_TXRX_COMMON_
5#define I40E_TXRX_COMMON_
6
7int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring);
8void i40e_clean_programming_status(struct i40e_ring *rx_ring, u64 qword0_raw,
9 u64 qword1);
10void i40e_process_skb_fields(struct i40e_ring *rx_ring,
11 union i40e_rx_desc *rx_desc, struct sk_buff *skb);
12void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring);
13void i40e_update_rx_stats(struct i40e_ring *rx_ring,
14 unsigned int total_rx_bytes,
15 unsigned int total_rx_packets);
16void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res);
17void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val);
18
19#define I40E_XDP_PASS 0
20#define I40E_XDP_CONSUMED BIT(0)
21#define I40E_XDP_TX BIT(1)
22#define I40E_XDP_REDIR BIT(2)
23#define I40E_XDP_EXIT BIT(3)
24
25/*
26 * build_ctob - Builds the Tx descriptor (cmd, offset and type) qword
27 */
28static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
29 u32 td_tag)
30{
31 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
32 ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
33 ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
34 ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
35 ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
36}
37
38/**
39 * i40e_update_tx_stats - Update the egress statistics for the Tx ring
40 * @tx_ring: Tx ring to update
41 * @total_packets: total packets sent
42 * @total_bytes: total bytes sent
43 **/
44static inline void i40e_update_tx_stats(struct i40e_ring *tx_ring,
45 unsigned int total_packets,
46 unsigned int total_bytes)
47{
48 u64_stats_update_begin(&tx_ring->syncp);
49 tx_ring->stats.bytes += total_bytes;
50 tx_ring->stats.packets += total_packets;
51 u64_stats_update_end(&tx_ring->syncp);
52 tx_ring->q_vector->tx.total_bytes += total_bytes;
53 tx_ring->q_vector->tx.total_packets += total_packets;
54}
55
56#define WB_STRIDE 4
57
58/**
59 * i40e_arm_wb - (Possibly) arms Tx write-back
60 * @tx_ring: Tx ring to update
61 * @vsi: the VSI
62 * @budget: the NAPI budget left
63 **/
64static inline void i40e_arm_wb(struct i40e_ring *tx_ring,
65 struct i40e_vsi *vsi,
66 int budget)
67{
68 if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
69 /* check to see if there are < 4 descriptors
70 * waiting to be written back, then kick the hardware to force
71 * them to be written back in case we stay in NAPI.
72 * In this mode on X722 we do not enable Interrupt.
73 */
74 unsigned int j = i40e_get_tx_pending(tx_ring, false);
75
76 if (budget &&
77 ((j / WB_STRIDE) == 0) && j > 0 &&
78 !test_bit(__I40E_VSI_DOWN, vsi->state) &&
79 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
80 tx_ring->arm_wb = true;
81 }
82}
83
84/**
85 * i40e_rx_is_programming_status - check for programming status descriptor
86 * @qword1: qword1 representing status_error_len in CPU ordering
87 *
88 * The value of in the descriptor length field indicate if this
89 * is a programming status descriptor for flow director or FCoE
90 * by the value of I40E_RX_PROG_STATUS_DESC_LENGTH, otherwise
91 * it is a packet descriptor.
92 **/
93static inline bool i40e_rx_is_programming_status(u64 qword1)
94{
95 /* The Rx filter programming status and SPH bit occupy the same
96 * spot in the descriptor. Since we don't support packet split we
97 * can just reuse the bit as an indication that this is a
98 * programming status descriptor.
99 */
100 return qword1 & I40E_RXD_QW1_LENGTH_SPH_MASK;
101}
102
103void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring);
104void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring);
105bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi);
106
107#endif /* I40E_TXRX_COMMON_ */