Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright(c) 2018 Intel Corporation. */
3
4#ifndef I40E_TXRX_COMMON_
5#define I40E_TXRX_COMMON_
6
7#include "i40e.h"
8
9int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring);
10void i40e_clean_programming_status(struct i40e_ring *rx_ring, u64 qword0_raw,
11 u64 qword1);
12void i40e_process_skb_fields(struct i40e_ring *rx_ring,
13 union i40e_rx_desc *rx_desc, struct sk_buff *skb);
14void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring);
15void i40e_update_rx_stats(struct i40e_ring *rx_ring,
16 unsigned int total_rx_bytes,
17 unsigned int total_rx_packets);
18void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res);
19void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val);
20
21#define I40E_XDP_PASS 0
22#define I40E_XDP_CONSUMED BIT(0)
23#define I40E_XDP_TX BIT(1)
24#define I40E_XDP_REDIR BIT(2)
25#define I40E_XDP_EXIT BIT(3)
26
27/*
28 * build_ctob - Builds the Tx descriptor (cmd, offset and type) qword
29 */
30static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
31 u32 td_tag)
32{
33 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
34 ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
35 ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
36 ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
37 ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
38}
39
40/**
41 * i40e_update_tx_stats - Update the egress statistics for the Tx ring
42 * @tx_ring: Tx ring to update
43 * @total_packets: total packets sent
44 * @total_bytes: total bytes sent
45 **/
46static inline void i40e_update_tx_stats(struct i40e_ring *tx_ring,
47 unsigned int total_packets,
48 unsigned int total_bytes)
49{
50 u64_stats_update_begin(&tx_ring->syncp);
51 tx_ring->stats.bytes += total_bytes;
52 tx_ring->stats.packets += total_packets;
53 u64_stats_update_end(&tx_ring->syncp);
54 tx_ring->q_vector->tx.total_bytes += total_bytes;
55 tx_ring->q_vector->tx.total_packets += total_packets;
56}
57
58#define WB_STRIDE 4
59
60/**
61 * i40e_arm_wb - (Possibly) arms Tx write-back
62 * @tx_ring: Tx ring to update
63 * @vsi: the VSI
64 * @budget: the NAPI budget left
65 **/
66static inline void i40e_arm_wb(struct i40e_ring *tx_ring,
67 struct i40e_vsi *vsi,
68 int budget)
69{
70 if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
71 /* check to see if there are < 4 descriptors
72 * waiting to be written back, then kick the hardware to force
73 * them to be written back in case we stay in NAPI.
74 * In this mode on X722 we do not enable Interrupt.
75 */
76 unsigned int j = i40e_get_tx_pending(tx_ring, false);
77
78 if (budget &&
79 ((j / WB_STRIDE) == 0) && j > 0 &&
80 !test_bit(__I40E_VSI_DOWN, vsi->state) &&
81 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
82 tx_ring->arm_wb = true;
83 }
84}
85
86/**
87 * i40e_rx_is_programming_status - check for programming status descriptor
88 * @qword1: qword1 representing status_error_len in CPU ordering
89 *
90 * The value of in the descriptor length field indicate if this
91 * is a programming status descriptor for flow director or FCoE
92 * by the value of I40E_RX_PROG_STATUS_DESC_LENGTH, otherwise
93 * it is a packet descriptor.
94 **/
95static inline bool i40e_rx_is_programming_status(u64 qword1)
96{
97 /* The Rx filter programming status and SPH bit occupy the same
98 * spot in the descriptor. Since we don't support packet split we
99 * can just reuse the bit as an indication that this is a
100 * programming status descriptor.
101 */
102 return qword1 & I40E_RXD_QW1_LENGTH_SPH_MASK;
103}
104
105void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring);
106void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring);
107bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi);
108
109#endif /* I40E_TXRX_COMMON_ */
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright(c) 2018 Intel Corporation. */
3
4#ifndef I40E_TXRX_COMMON_
5#define I40E_TXRX_COMMON_
6
7int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring);
8void i40e_clean_programming_status(struct i40e_ring *rx_ring, u64 qword0_raw,
9 u64 qword1);
10void i40e_process_skb_fields(struct i40e_ring *rx_ring,
11 union i40e_rx_desc *rx_desc, struct sk_buff *skb);
12void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring);
13void i40e_update_rx_stats(struct i40e_ring *rx_ring,
14 unsigned int total_rx_bytes,
15 unsigned int total_rx_packets);
16void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res);
17void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val);
18
19#define I40E_XDP_PASS 0
20#define I40E_XDP_CONSUMED BIT(0)
21#define I40E_XDP_TX BIT(1)
22#define I40E_XDP_REDIR BIT(2)
23
24/**
25 * build_ctob - Builds the Tx descriptor (cmd, offset and type) qword
26 **/
27static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
28 u32 td_tag)
29{
30 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
31 ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
32 ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
33 ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
34 ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
35}
36
37/**
38 * i40e_update_tx_stats - Update the egress statistics for the Tx ring
39 * @tx_ring: Tx ring to update
40 * @total_packet: total packets sent
41 * @total_bytes: total bytes sent
42 **/
43static inline void i40e_update_tx_stats(struct i40e_ring *tx_ring,
44 unsigned int total_packets,
45 unsigned int total_bytes)
46{
47 u64_stats_update_begin(&tx_ring->syncp);
48 tx_ring->stats.bytes += total_bytes;
49 tx_ring->stats.packets += total_packets;
50 u64_stats_update_end(&tx_ring->syncp);
51 tx_ring->q_vector->tx.total_bytes += total_bytes;
52 tx_ring->q_vector->tx.total_packets += total_packets;
53}
54
55#define WB_STRIDE 4
56
57/**
58 * i40e_arm_wb - (Possibly) arms Tx write-back
59 * @tx_ring: Tx ring to update
60 * @vsi: the VSI
61 * @budget: the NAPI budget left
62 **/
63static inline void i40e_arm_wb(struct i40e_ring *tx_ring,
64 struct i40e_vsi *vsi,
65 int budget)
66{
67 if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
68 /* check to see if there are < 4 descriptors
69 * waiting to be written back, then kick the hardware to force
70 * them to be written back in case we stay in NAPI.
71 * In this mode on X722 we do not enable Interrupt.
72 */
73 unsigned int j = i40e_get_tx_pending(tx_ring, false);
74
75 if (budget &&
76 ((j / WB_STRIDE) == 0) && j > 0 &&
77 !test_bit(__I40E_VSI_DOWN, vsi->state) &&
78 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
79 tx_ring->arm_wb = true;
80 }
81}
82
83/**
84 * i40e_rx_is_programming_status - check for programming status descriptor
85 * @qword1: qword1 representing status_error_len in CPU ordering
86 *
87 * The value of in the descriptor length field indicate if this
88 * is a programming status descriptor for flow director or FCoE
89 * by the value of I40E_RX_PROG_STATUS_DESC_LENGTH, otherwise
90 * it is a packet descriptor.
91 **/
92static inline bool i40e_rx_is_programming_status(u64 qword1)
93{
94 /* The Rx filter programming status and SPH bit occupy the same
95 * spot in the descriptor. Since we don't support packet split we
96 * can just reuse the bit as an indication that this is a
97 * programming status descriptor.
98 */
99 return qword1 & I40E_RXD_QW1_LENGTH_SPH_MASK;
100}
101
102/**
103 * i40e_inc_ntc: Advance the next_to_clean index
104 * @rx_ring: Rx ring
105 **/
106static inline void i40e_inc_ntc(struct i40e_ring *rx_ring)
107{
108 u32 ntc = rx_ring->next_to_clean + 1;
109
110 ntc = (ntc < rx_ring->count) ? ntc : 0;
111 rx_ring->next_to_clean = ntc;
112 prefetch(I40E_RX_DESC(rx_ring, ntc));
113}
114
115void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring);
116void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring);
117bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi);
118
119#endif /* I40E_TXRX_COMMON_ */