Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (c) 2019, Intel Corporation. */
3
4#ifndef _ICE_XSK_H_
5#define _ICE_XSK_H_
6#include "ice_txrx.h"
7
8#define PKTS_PER_BATCH 8
9
10#ifdef __clang__
11#define loop_unrolled_for _Pragma("clang loop unroll_count(8)") for
12#elif __GNUC__ >= 8
13#define loop_unrolled_for _Pragma("GCC unroll 8") for
14#else
15#define loop_unrolled_for for
16#endif
17
18struct ice_vsi;
19
20#ifdef CONFIG_XDP_SOCKETS
21int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool,
22 u16 qid);
23int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget);
24int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
25bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count);
26bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
27void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring);
28void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring);
29bool ice_xmit_zc(struct ice_tx_ring *xdp_ring);
30int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc);
31#else
32static inline bool ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring)
33{
34 return false;
35}
36
37static inline int
38ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi,
39 struct xsk_buff_pool __always_unused *pool,
40 u16 __always_unused qid)
41{
42 return -EOPNOTSUPP;
43}
44
45static inline int
46ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused *rx_ring,
47 int __always_unused budget)
48{
49 return 0;
50}
51
52static inline bool
53ice_alloc_rx_bufs_zc(struct ice_rx_ring __always_unused *rx_ring,
54 u16 __always_unused count)
55{
56 return false;
57}
58
59static inline bool ice_xsk_any_rx_ring_ena(struct ice_vsi __always_unused *vsi)
60{
61 return false;
62}
63
64static inline int
65ice_xsk_wakeup(struct net_device __always_unused *netdev,
66 u32 __always_unused queue_id, u32 __always_unused flags)
67{
68 return -EOPNOTSUPP;
69}
70
71static inline void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring) { }
72static inline void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring) { }
73
74static inline int
75ice_realloc_zc_buf(struct ice_vsi __always_unused *vsi,
76 bool __always_unused zc)
77{
78 return 0;
79}
80#endif /* CONFIG_XDP_SOCKETS */
81#endif /* !_ICE_XSK_H_ */
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (c) 2019, Intel Corporation. */
3
4#ifndef _ICE_XSK_H_
5#define _ICE_XSK_H_
6#include "ice_txrx.h"
7#include "ice.h"
8
9struct ice_vsi;
10
11#ifdef CONFIG_XDP_SOCKETS
12int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid);
13int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget);
14bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget);
15int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
16bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count);
17bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
18void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring);
19void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring);
20#else
21static inline int
22ice_xsk_umem_setup(struct ice_vsi __always_unused *vsi,
23 struct xdp_umem __always_unused *umem,
24 u16 __always_unused qid)
25{
26 return -EOPNOTSUPP;
27}
28
29static inline int
30ice_clean_rx_irq_zc(struct ice_ring __always_unused *rx_ring,
31 int __always_unused budget)
32{
33 return 0;
34}
35
36static inline bool
37ice_clean_tx_irq_zc(struct ice_ring __always_unused *xdp_ring,
38 int __always_unused budget)
39{
40 return false;
41}
42
43static inline bool
44ice_alloc_rx_bufs_zc(struct ice_ring __always_unused *rx_ring,
45 u16 __always_unused count)
46{
47 return false;
48}
49
50static inline bool ice_xsk_any_rx_ring_ena(struct ice_vsi __always_unused *vsi)
51{
52 return false;
53}
54
55static inline int
56ice_xsk_wakeup(struct net_device __always_unused *netdev,
57 u32 __always_unused queue_id, u32 __always_unused flags)
58{
59 return -EOPNOTSUPP;
60}
61
62#define ice_xsk_clean_rx_ring(rx_ring) do {} while (0)
63#define ice_xsk_clean_xdp_ring(xdp_ring) do {} while (0)
64#endif /* CONFIG_XDP_SOCKETS */
65#endif /* !_ICE_XSK_H_ */