Linux Audio

Check our new training course

Loading...
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/* Copyright (c) 2019, Intel Corporation. */
  3
  4#ifndef _ICE_TXRX_LIB_H_
  5#define _ICE_TXRX_LIB_H_
  6#include "ice.h"
  7
  8/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  9 * ice_test_staterr - tests bits in Rx descriptor status and error fields
 10 * @status_err_n: Rx descriptor status_error0 or status_error1 bits
 11 * @stat_err_bits: value to mask
 12 *
 13 * This function does some fast chicanery in order to return the
 14 * value of the mask which is really only used for boolean tests.
 15 * The status_error_len doesn't need to be shifted because it begins
 16 * at offset zero.
 17 */
 18static inline bool
 19ice_test_staterr(__le16 status_err_n, const u16 stat_err_bits)
 20{
 21	return !!(status_err_n & cpu_to_le16(stat_err_bits));
 22}
 23
 24/**
 25 * ice_is_non_eop - process handling of non-EOP buffers
 26 * @rx_ring: Rx ring being processed
 27 * @rx_desc: Rx descriptor for current buffer
 28 *
 29 * If the buffer is an EOP buffer, this function exits returning false,
 30 * otherwise return true indicating that this is in fact a non-EOP buffer.
 31 */
 32static inline bool
 33ice_is_non_eop(const struct ice_rx_ring *rx_ring,
 34	       const union ice_32b_rx_flex_desc *rx_desc)
 35{
 36	/* if we are the last buffer then there is nothing else to do */
 37#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
 38	if (likely(ice_test_staterr(rx_desc->wb.status_error0, ICE_RXD_EOF)))
 39		return false;
 40
 41	rx_ring->ring_stats->rx_stats.non_eop_descs++;
 42
 43	return true;
 44}
 45
 46static inline __le64
 47ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
 48{
 49	return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA |
 50			   (td_cmd    << ICE_TXD_QW1_CMD_S) |
 51			   (td_offset << ICE_TXD_QW1_OFFSET_S) |
 52			   ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
 53			   (td_tag    << ICE_TXD_QW1_L2TAG1_S));
 54}
 55
 56/**
 57 * ice_get_vlan_tci - get VLAN TCI from Rx flex descriptor
 58 * @rx_desc: Rx 32b flex descriptor with RXDID=2
 59 *
 60 * The OS and current PF implementation only support stripping a single VLAN tag
 61 * at a time, so there should only ever be 0 or 1 tags in the l2tag* fields. If
 62 * one is found return the tag, else return 0 to mean no VLAN tag was found.
 63 */
 64static inline u16
 65ice_get_vlan_tci(const union ice_32b_rx_flex_desc *rx_desc)
 66{
 67	u16 stat_err_bits;
 68
 69	stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
 70	if (ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
 71		return le16_to_cpu(rx_desc->wb.l2tag1);
 72
 73	stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S);
 74	if (ice_test_staterr(rx_desc->wb.status_error1, stat_err_bits))
 75		return le16_to_cpu(rx_desc->wb.l2tag2_2nd);
 76
 77	return 0;
 78}
 79
 80/**
 81 * ice_xdp_ring_update_tail - Updates the XDP Tx ring tail register
 82 * @xdp_ring: XDP Tx ring
 83 *
 84 * This function updates the XDP Tx ring tail register.
 85 */
 86static inline void ice_xdp_ring_update_tail(struct ice_tx_ring *xdp_ring)
 87{
 88	/* Force memory writes to complete before letting h/w
 89	 * know there are new descriptors to fetch.
 90	 */
 91	wmb();
 92	writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
 93}
 94
 95/**
 96 * ice_set_rs_bit - set RS bit on last produced descriptor (one behind current NTU)
 97 * @xdp_ring: XDP ring to produce the HW Tx descriptors on
 98 *
 99 * returns index of descriptor that had RS bit produced on
100 */
101static inline u32 ice_set_rs_bit(const struct ice_tx_ring *xdp_ring)
102{
103	u32 rs_idx = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1;
104	struct ice_tx_desc *tx_desc;
105
106	tx_desc = ICE_TX_DESC(xdp_ring, rs_idx);
107	tx_desc->cmd_type_offset_bsz |=
108		cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
109
110	return rs_idx;
111}
112
113void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res, u32 first_idx);
 
114int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring,
115			bool frame);
116void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val);
117void
118ice_process_skb_fields(struct ice_rx_ring *rx_ring,
119		       union ice_32b_rx_flex_desc *rx_desc,
120		       struct sk_buff *skb);
121void
122ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tci);
123
124static inline void
125ice_xdp_meta_set_desc(struct xdp_buff *xdp,
126		      union ice_32b_rx_flex_desc *eop_desc)
127{
128	struct ice_xdp_buff *xdp_ext = container_of(xdp, struct ice_xdp_buff,
129						    xdp_buff);
130
131	xdp_ext->eop_desc = eop_desc;
132}
133#endif /* !_ICE_TXRX_LIB_H_ */
v6.9.4
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/* Copyright (c) 2019, Intel Corporation. */
  3
  4#ifndef _ICE_TXRX_LIB_H_
  5#define _ICE_TXRX_LIB_H_
  6#include "ice.h"
  7
  8/**
  9 * ice_set_rx_bufs_act - propagate Rx buffer action to frags
 10 * @xdp: XDP buffer representing frame (linear and frags part)
 11 * @rx_ring: Rx ring struct
 12 * act: action to store onto Rx buffers related to XDP buffer parts
 13 *
 14 * Set action that should be taken before putting Rx buffer from first frag
 15 * to the last.
 16 */
 17static inline void
 18ice_set_rx_bufs_act(struct xdp_buff *xdp, const struct ice_rx_ring *rx_ring,
 19		    const unsigned int act)
 20{
 21	u32 sinfo_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
 22	u32 nr_frags = rx_ring->nr_frags + 1;
 23	u32 idx = rx_ring->first_desc;
 24	u32 cnt = rx_ring->count;
 25	struct ice_rx_buf *buf;
 26
 27	for (int i = 0; i < nr_frags; i++) {
 28		buf = &rx_ring->rx_buf[idx];
 29		buf->act = act;
 30
 31		if (++idx == cnt)
 32			idx = 0;
 33	}
 34
 35	/* adjust pagecnt_bias on frags freed by XDP prog */
 36	if (sinfo_frags < rx_ring->nr_frags && act == ICE_XDP_CONSUMED) {
 37		u32 delta = rx_ring->nr_frags - sinfo_frags;
 38
 39		while (delta) {
 40			if (idx == 0)
 41				idx = cnt - 1;
 42			else
 43				idx--;
 44			buf = &rx_ring->rx_buf[idx];
 45			buf->pagecnt_bias--;
 46			delta--;
 47		}
 48	}
 49}
 50
 51/**
 52 * ice_test_staterr - tests bits in Rx descriptor status and error fields
 53 * @status_err_n: Rx descriptor status_error0 or status_error1 bits
 54 * @stat_err_bits: value to mask
 55 *
 56 * This function does some fast chicanery in order to return the
 57 * value of the mask which is really only used for boolean tests.
 58 * The status_error_len doesn't need to be shifted because it begins
 59 * at offset zero.
 60 */
 61static inline bool
 62ice_test_staterr(__le16 status_err_n, const u16 stat_err_bits)
 63{
 64	return !!(status_err_n & cpu_to_le16(stat_err_bits));
 65}
 66
 67/**
 68 * ice_is_non_eop - process handling of non-EOP buffers
 69 * @rx_ring: Rx ring being processed
 70 * @rx_desc: Rx descriptor for current buffer
 71 *
 72 * If the buffer is an EOP buffer, this function exits returning false,
 73 * otherwise return true indicating that this is in fact a non-EOP buffer.
 74 */
 75static inline bool
 76ice_is_non_eop(const struct ice_rx_ring *rx_ring,
 77	       const union ice_32b_rx_flex_desc *rx_desc)
 78{
 79	/* if we are the last buffer then there is nothing else to do */
 80#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
 81	if (likely(ice_test_staterr(rx_desc->wb.status_error0, ICE_RXD_EOF)))
 82		return false;
 83
 84	rx_ring->ring_stats->rx_stats.non_eop_descs++;
 85
 86	return true;
 87}
 88
 89static inline __le64
 90ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
 91{
 92	return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA |
 93			   (td_cmd    << ICE_TXD_QW1_CMD_S) |
 94			   (td_offset << ICE_TXD_QW1_OFFSET_S) |
 95			   ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
 96			   (td_tag    << ICE_TXD_QW1_L2TAG1_S));
 97}
 98
 99/**
100 * ice_get_vlan_tci - get VLAN TCI from Rx flex descriptor
101 * @rx_desc: Rx 32b flex descriptor with RXDID=2
102 *
103 * The OS and current PF implementation only support stripping a single VLAN tag
104 * at a time, so there should only ever be 0 or 1 tags in the l2tag* fields. If
105 * one is found return the tag, else return 0 to mean no VLAN tag was found.
106 */
107static inline u16
108ice_get_vlan_tci(const union ice_32b_rx_flex_desc *rx_desc)
109{
110	u16 stat_err_bits;
111
112	stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
113	if (ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
114		return le16_to_cpu(rx_desc->wb.l2tag1);
115
116	stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S);
117	if (ice_test_staterr(rx_desc->wb.status_error1, stat_err_bits))
118		return le16_to_cpu(rx_desc->wb.l2tag2_2nd);
119
120	return 0;
121}
122
123/**
124 * ice_xdp_ring_update_tail - Updates the XDP Tx ring tail register
125 * @xdp_ring: XDP Tx ring
126 *
127 * This function updates the XDP Tx ring tail register.
128 */
129static inline void ice_xdp_ring_update_tail(struct ice_tx_ring *xdp_ring)
130{
131	/* Force memory writes to complete before letting h/w
132	 * know there are new descriptors to fetch.
133	 */
134	wmb();
135	writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
136}
137
138/**
139 * ice_set_rs_bit - set RS bit on last produced descriptor (one behind current NTU)
140 * @xdp_ring: XDP ring to produce the HW Tx descriptors on
141 *
142 * returns index of descriptor that had RS bit produced on
143 */
144static inline u32 ice_set_rs_bit(const struct ice_tx_ring *xdp_ring)
145{
146	u32 rs_idx = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1;
147	struct ice_tx_desc *tx_desc;
148
149	tx_desc = ICE_TX_DESC(xdp_ring, rs_idx);
150	tx_desc->cmd_type_offset_bsz |=
151		cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
152
153	return rs_idx;
154}
155
156void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res, u32 first_idx);
157int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring);
158int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring,
159			bool frame);
160void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val);
161void
162ice_process_skb_fields(struct ice_rx_ring *rx_ring,
163		       union ice_32b_rx_flex_desc *rx_desc,
164		       struct sk_buff *skb);
165void
166ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tci);
167
168static inline void
169ice_xdp_meta_set_desc(struct xdp_buff *xdp,
170		      union ice_32b_rx_flex_desc *eop_desc)
171{
172	struct ice_xdp_buff *xdp_ext = container_of(xdp, struct ice_xdp_buff,
173						    xdp_buff);
174
175	xdp_ext->eop_desc = eop_desc;
176}
177#endif /* !_ICE_TXRX_LIB_H_ */