Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2020, Intel Corporation. */
3
4#include <linux/if_vlan.h>
5#include <net/xdp_sock_drv.h>
6
7#include "igc.h"
8#include "igc_xdp.h"
9
10int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
11 struct netlink_ext_ack *extack)
12{
13 struct net_device *dev = adapter->netdev;
14 bool if_running = netif_running(dev);
15 struct bpf_prog *old_prog;
16
17 if (dev->mtu > ETH_DATA_LEN) {
18 /* For now, the driver doesn't support XDP functionality with
19 * jumbo frames so we return error.
20 */
21 NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported");
22 return -EOPNOTSUPP;
23 }
24
25 if (if_running)
26 igc_close(dev);
27
28 old_prog = xchg(&adapter->xdp_prog, prog);
29 if (old_prog)
30 bpf_prog_put(old_prog);
31
32 if (prog)
33 xdp_features_set_redirect_target(dev, true);
34 else
35 xdp_features_clear_redirect_target(dev);
36
37 if (if_running)
38 igc_open(dev);
39
40 return 0;
41}
42
43static int igc_xdp_enable_pool(struct igc_adapter *adapter,
44 struct xsk_buff_pool *pool, u16 queue_id)
45{
46 struct net_device *ndev = adapter->netdev;
47 struct device *dev = &adapter->pdev->dev;
48 struct igc_ring *rx_ring, *tx_ring;
49 struct napi_struct *napi;
50 bool needs_reset;
51 u32 frame_size;
52 int err;
53
54 if (queue_id >= adapter->num_rx_queues ||
55 queue_id >= adapter->num_tx_queues)
56 return -EINVAL;
57
58 frame_size = xsk_pool_get_rx_frame_size(pool);
59 if (frame_size < ETH_FRAME_LEN + VLAN_HLEN * 2) {
60 /* When XDP is enabled, the driver doesn't support frames that
61 * span over multiple buffers. To avoid that, we check if xsk
62 * frame size is big enough to fit the max ethernet frame size
63 * + vlan double tagging.
64 */
65 return -EOPNOTSUPP;
66 }
67
68 err = xsk_pool_dma_map(pool, dev, IGC_RX_DMA_ATTR);
69 if (err) {
70 netdev_err(ndev, "Failed to map xsk pool\n");
71 return err;
72 }
73
74 needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter);
75
76 rx_ring = adapter->rx_ring[queue_id];
77 tx_ring = adapter->tx_ring[queue_id];
78 /* Rx and Tx rings share the same napi context. */
79 napi = &rx_ring->q_vector->napi;
80
81 if (needs_reset) {
82 igc_disable_rx_ring(rx_ring);
83 igc_disable_tx_ring(tx_ring);
84 napi_disable(napi);
85 }
86
87 set_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags);
88 set_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags);
89
90 if (needs_reset) {
91 napi_enable(napi);
92 igc_enable_rx_ring(rx_ring);
93 igc_enable_tx_ring(tx_ring);
94
95 err = igc_xsk_wakeup(ndev, queue_id, XDP_WAKEUP_RX);
96 if (err) {
97 xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR);
98 return err;
99 }
100 }
101
102 return 0;
103}
104
105static int igc_xdp_disable_pool(struct igc_adapter *adapter, u16 queue_id)
106{
107 struct igc_ring *rx_ring, *tx_ring;
108 struct xsk_buff_pool *pool;
109 struct napi_struct *napi;
110 bool needs_reset;
111
112 if (queue_id >= adapter->num_rx_queues ||
113 queue_id >= adapter->num_tx_queues)
114 return -EINVAL;
115
116 pool = xsk_get_pool_from_qid(adapter->netdev, queue_id);
117 if (!pool)
118 return -EINVAL;
119
120 needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter);
121
122 rx_ring = adapter->rx_ring[queue_id];
123 tx_ring = adapter->tx_ring[queue_id];
124 /* Rx and Tx rings share the same napi context. */
125 napi = &rx_ring->q_vector->napi;
126
127 if (needs_reset) {
128 igc_disable_rx_ring(rx_ring);
129 igc_disable_tx_ring(tx_ring);
130 napi_disable(napi);
131 }
132
133 xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR);
134 clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags);
135 clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags);
136
137 if (needs_reset) {
138 napi_enable(napi);
139 igc_enable_rx_ring(rx_ring);
140 igc_enable_tx_ring(tx_ring);
141 }
142
143 return 0;
144}
145
146int igc_xdp_setup_pool(struct igc_adapter *adapter, struct xsk_buff_pool *pool,
147 u16 queue_id)
148{
149 return pool ? igc_xdp_enable_pool(adapter, pool, queue_id) :
150 igc_xdp_disable_pool(adapter, queue_id);
151}
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2020, Intel Corporation. */
3
4#include <net/xdp_sock_drv.h>
5
6#include "igc.h"
7#include "igc_xdp.h"
8
9int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
10 struct netlink_ext_ack *extack)
11{
12 struct net_device *dev = adapter->netdev;
13 bool if_running = netif_running(dev);
14 struct bpf_prog *old_prog;
15
16 if (dev->mtu > ETH_DATA_LEN) {
17 /* For now, the driver doesn't support XDP functionality with
18 * jumbo frames so we return error.
19 */
20 NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported");
21 return -EOPNOTSUPP;
22 }
23
24 if (if_running)
25 igc_close(dev);
26
27 old_prog = xchg(&adapter->xdp_prog, prog);
28 if (old_prog)
29 bpf_prog_put(old_prog);
30
31 if (if_running)
32 igc_open(dev);
33
34 return 0;
35}
36
37static int igc_xdp_enable_pool(struct igc_adapter *adapter,
38 struct xsk_buff_pool *pool, u16 queue_id)
39{
40 struct net_device *ndev = adapter->netdev;
41 struct device *dev = &adapter->pdev->dev;
42 struct igc_ring *rx_ring, *tx_ring;
43 struct napi_struct *napi;
44 bool needs_reset;
45 u32 frame_size;
46 int err;
47
48 if (queue_id >= adapter->num_rx_queues ||
49 queue_id >= adapter->num_tx_queues)
50 return -EINVAL;
51
52 frame_size = xsk_pool_get_rx_frame_size(pool);
53 if (frame_size < ETH_FRAME_LEN + VLAN_HLEN * 2) {
54 /* When XDP is enabled, the driver doesn't support frames that
55 * span over multiple buffers. To avoid that, we check if xsk
56 * frame size is big enough to fit the max ethernet frame size
57 * + vlan double tagging.
58 */
59 return -EOPNOTSUPP;
60 }
61
62 err = xsk_pool_dma_map(pool, dev, IGC_RX_DMA_ATTR);
63 if (err) {
64 netdev_err(ndev, "Failed to map xsk pool\n");
65 return err;
66 }
67
68 needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter);
69
70 rx_ring = adapter->rx_ring[queue_id];
71 tx_ring = adapter->tx_ring[queue_id];
72 /* Rx and Tx rings share the same napi context. */
73 napi = &rx_ring->q_vector->napi;
74
75 if (needs_reset) {
76 igc_disable_rx_ring(rx_ring);
77 igc_disable_tx_ring(tx_ring);
78 napi_disable(napi);
79 }
80
81 set_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags);
82 set_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags);
83
84 if (needs_reset) {
85 napi_enable(napi);
86 igc_enable_rx_ring(rx_ring);
87 igc_enable_tx_ring(tx_ring);
88
89 err = igc_xsk_wakeup(ndev, queue_id, XDP_WAKEUP_RX);
90 if (err) {
91 xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR);
92 return err;
93 }
94 }
95
96 return 0;
97}
98
99static int igc_xdp_disable_pool(struct igc_adapter *adapter, u16 queue_id)
100{
101 struct igc_ring *rx_ring, *tx_ring;
102 struct xsk_buff_pool *pool;
103 struct napi_struct *napi;
104 bool needs_reset;
105
106 if (queue_id >= adapter->num_rx_queues ||
107 queue_id >= adapter->num_tx_queues)
108 return -EINVAL;
109
110 pool = xsk_get_pool_from_qid(adapter->netdev, queue_id);
111 if (!pool)
112 return -EINVAL;
113
114 needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter);
115
116 rx_ring = adapter->rx_ring[queue_id];
117 tx_ring = adapter->tx_ring[queue_id];
118 /* Rx and Tx rings share the same napi context. */
119 napi = &rx_ring->q_vector->napi;
120
121 if (needs_reset) {
122 igc_disable_rx_ring(rx_ring);
123 igc_disable_tx_ring(tx_ring);
124 napi_disable(napi);
125 }
126
127 xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR);
128 clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags);
129 clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags);
130
131 if (needs_reset) {
132 napi_enable(napi);
133 igc_enable_rx_ring(rx_ring);
134 igc_enable_tx_ring(tx_ring);
135 }
136
137 return 0;
138}
139
140int igc_xdp_setup_pool(struct igc_adapter *adapter, struct xsk_buff_pool *pool,
141 u16 queue_id)
142{
143 return pool ? igc_xdp_enable_pool(adapter, pool, queue_id) :
144 igc_xdp_disable_pool(adapter, queue_id);
145}