Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (c) 2019, Intel Corporation. */
3
4#ifndef _ICE_XSK_H_
5#define _ICE_XSK_H_
6#include "ice_txrx.h"
7#include "ice.h"
8
9struct ice_vsi;
10
11#ifdef CONFIG_XDP_SOCKETS
12int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid);
13int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget);
14bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget);
15int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
16bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count);
17bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
18void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring);
19void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring);
20#else
21static inline int
22ice_xsk_umem_setup(struct ice_vsi __always_unused *vsi,
23 struct xdp_umem __always_unused *umem,
24 u16 __always_unused qid)
25{
26 return -EOPNOTSUPP;
27}
28
29static inline int
30ice_clean_rx_irq_zc(struct ice_ring __always_unused *rx_ring,
31 int __always_unused budget)
32{
33 return 0;
34}
35
36static inline bool
37ice_clean_tx_irq_zc(struct ice_ring __always_unused *xdp_ring,
38 int __always_unused budget)
39{
40 return false;
41}
42
43static inline bool
44ice_alloc_rx_bufs_zc(struct ice_ring __always_unused *rx_ring,
45 u16 __always_unused count)
46{
47 return false;
48}
49
50static inline bool ice_xsk_any_rx_ring_ena(struct ice_vsi __always_unused *vsi)
51{
52 return false;
53}
54
55static inline int
56ice_xsk_wakeup(struct net_device __always_unused *netdev,
57 u32 __always_unused queue_id, u32 __always_unused flags)
58{
59 return -EOPNOTSUPP;
60}
61
62#define ice_xsk_clean_rx_ring(rx_ring) do {} while (0)
63#define ice_xsk_clean_xdp_ring(xdp_ring) do {} while (0)
64#endif /* CONFIG_XDP_SOCKETS */
65#endif /* !_ICE_XSK_H_ */
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (c) 2019, Intel Corporation. */
3
4#ifndef _ICE_XSK_H_
5#define _ICE_XSK_H_
6#include "ice_txrx.h"
7#include "ice.h"
8
9struct ice_vsi;
10
11#ifdef CONFIG_XDP_SOCKETS
12int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool,
13 u16 qid);
14int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget);
15bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget);
16int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
17bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count);
18bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
19void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring);
20void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring);
21#else
22static inline int
23ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi,
24 struct xsk_buff_pool __always_unused *pool,
25 u16 __always_unused qid)
26{
27 return -EOPNOTSUPP;
28}
29
30static inline int
31ice_clean_rx_irq_zc(struct ice_ring __always_unused *rx_ring,
32 int __always_unused budget)
33{
34 return 0;
35}
36
37static inline bool
38ice_clean_tx_irq_zc(struct ice_ring __always_unused *xdp_ring,
39 int __always_unused budget)
40{
41 return false;
42}
43
44static inline bool
45ice_alloc_rx_bufs_zc(struct ice_ring __always_unused *rx_ring,
46 u16 __always_unused count)
47{
48 return false;
49}
50
51static inline bool ice_xsk_any_rx_ring_ena(struct ice_vsi __always_unused *vsi)
52{
53 return false;
54}
55
56static inline int
57ice_xsk_wakeup(struct net_device __always_unused *netdev,
58 u32 __always_unused queue_id, u32 __always_unused flags)
59{
60 return -EOPNOTSUPP;
61}
62
63static inline void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring) { }
64static inline void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring) { }
65#endif /* CONFIG_XDP_SOCKETS */
66#endif /* !_ICE_XSK_H_ */