Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2020-21 Intel Corporation.
4 */
5
6#include <linux/etherdevice.h>
7#include <linux/if_arp.h>
8#include <linux/if_link.h>
9#include <linux/rtnetlink.h>
10#include <linux/wwan.h>
11#include <net/pkt_sched.h>
12
13#include "iosm_ipc_chnl_cfg.h"
14#include "iosm_ipc_imem_ops.h"
15#include "iosm_ipc_wwan.h"
16
17#define IOSM_IP_TYPE_MASK 0xF0
18#define IOSM_IP_TYPE_IPV4 0x40
19#define IOSM_IP_TYPE_IPV6 0x60
20
21/**
22 * struct iosm_netdev_priv - netdev WWAN driver specific private data
23 * @ipc_wwan: Pointer to iosm_wwan struct
24 * @netdev: Pointer to network interface device structure
25 * @if_id: Interface id for device.
26 * @ch_id: IPC channel number for which interface device is created.
27 */
28struct iosm_netdev_priv {
29 struct iosm_wwan *ipc_wwan;
30 struct net_device *netdev;
31 int if_id;
32 int ch_id;
33};
34
35/**
36 * struct iosm_wwan - This structure contains information about WWAN root device
37 * and interface to the IPC layer.
38 * @ipc_imem: Pointer to imem data-struct
39 * @sub_netlist: List of active netdevs
40 * @dev: Pointer device structure
41 */
42struct iosm_wwan {
43 struct iosm_imem *ipc_imem;
44 struct iosm_netdev_priv __rcu *sub_netlist[IP_MUX_SESSION_END + 1];
45 struct device *dev;
46};
47
48/* Bring-up the wwan net link */
49static int ipc_wwan_link_open(struct net_device *netdev)
50{
51 struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev);
52 struct iosm_wwan *ipc_wwan = priv->ipc_wwan;
53 int if_id = priv->if_id;
54
55 if (if_id < IP_MUX_SESSION_START ||
56 if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist))
57 return -EINVAL;
58
59 /* get channel id */
60 priv->ch_id = ipc_imem_sys_wwan_open(ipc_wwan->ipc_imem, if_id);
61
62 if (priv->ch_id < 0) {
63 dev_err(ipc_wwan->dev,
64 "cannot connect wwan0 & id %d to the IPC mem layer",
65 if_id);
66 return -ENODEV;
67 }
68
69 /* enable tx path, DL data may follow */
70 netif_start_queue(netdev);
71
72 dev_dbg(ipc_wwan->dev, "Channel id %d allocated to if_id %d",
73 priv->ch_id, priv->if_id);
74
75 return 0;
76}
77
78/* Bring-down the wwan net link */
79static int ipc_wwan_link_stop(struct net_device *netdev)
80{
81 struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev);
82
83 netif_stop_queue(netdev);
84
85 ipc_imem_sys_wwan_close(priv->ipc_wwan->ipc_imem, priv->if_id,
86 priv->ch_id);
87 priv->ch_id = -1;
88
89 return 0;
90}
91
92/* Transmit a packet */
93static netdev_tx_t ipc_wwan_link_transmit(struct sk_buff *skb,
94 struct net_device *netdev)
95{
96 struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev);
97 struct iosm_wwan *ipc_wwan = priv->ipc_wwan;
98 unsigned int len = skb->len;
99 int if_id = priv->if_id;
100 int ret;
101
102 /* Interface IDs from 1 to 8 are for IP data
103 * & from 257 to 261 are for non-IP data
104 */
105 if (if_id < IP_MUX_SESSION_START ||
106 if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist))
107 return -EINVAL;
108
109 /* Send the SKB to device for transmission */
110 ret = ipc_imem_sys_wwan_transmit(ipc_wwan->ipc_imem,
111 if_id, priv->ch_id, skb);
112
113 /* Return code of zero is success */
114 if (ret == 0) {
115 netdev->stats.tx_packets++;
116 netdev->stats.tx_bytes += len;
117 ret = NETDEV_TX_OK;
118 } else if (ret == -EBUSY) {
119 ret = NETDEV_TX_BUSY;
120 dev_err(ipc_wwan->dev, "unable to push packets");
121 } else {
122 goto exit;
123 }
124
125 return ret;
126
127exit:
128 /* Log any skb drop */
129 if (if_id)
130 dev_dbg(ipc_wwan->dev, "skb dropped. IF_ID: %d, ret: %d", if_id,
131 ret);
132
133 dev_kfree_skb_any(skb);
134 netdev->stats.tx_dropped++;
135 return NETDEV_TX_OK;
136}
137
138/* Ops structure for wwan net link */
139static const struct net_device_ops ipc_inm_ops = {
140 .ndo_open = ipc_wwan_link_open,
141 .ndo_stop = ipc_wwan_link_stop,
142 .ndo_start_xmit = ipc_wwan_link_transmit,
143};
144
145/* Setup function for creating new net link */
146static void ipc_wwan_setup(struct net_device *iosm_dev)
147{
148 iosm_dev->header_ops = NULL;
149 iosm_dev->hard_header_len = 0;
150 iosm_dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
151
152 iosm_dev->type = ARPHRD_NONE;
153 iosm_dev->mtu = ETH_DATA_LEN;
154 iosm_dev->min_mtu = ETH_MIN_MTU;
155 iosm_dev->max_mtu = ETH_MAX_MTU;
156
157 iosm_dev->flags = IFF_POINTOPOINT | IFF_NOARP;
158 iosm_dev->needs_free_netdev = true;
159
160 iosm_dev->netdev_ops = &ipc_inm_ops;
161}
162
163/* Create new wwan net link */
164static int ipc_wwan_newlink(void *ctxt, struct net_device *dev,
165 u32 if_id, struct netlink_ext_ack *extack)
166{
167 struct iosm_wwan *ipc_wwan = ctxt;
168 struct iosm_netdev_priv *priv;
169 int err;
170
171 if (if_id < IP_MUX_SESSION_START ||
172 if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist))
173 return -EINVAL;
174
175 priv = wwan_netdev_drvpriv(dev);
176 priv->if_id = if_id;
177 priv->netdev = dev;
178 priv->ipc_wwan = ipc_wwan;
179
180 if (rcu_access_pointer(ipc_wwan->sub_netlist[if_id]))
181 return -EBUSY;
182
183 err = register_netdevice(dev);
184 if (err)
185 return err;
186
187 rcu_assign_pointer(ipc_wwan->sub_netlist[if_id], priv);
188 netif_device_attach(dev);
189
190 return 0;
191}
192
193static void ipc_wwan_dellink(void *ctxt, struct net_device *dev,
194 struct list_head *head)
195{
196 struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(dev);
197 struct iosm_wwan *ipc_wwan = ctxt;
198 int if_id = priv->if_id;
199
200 if (WARN_ON(if_id < IP_MUX_SESSION_START ||
201 if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist)))
202 return;
203
204 if (WARN_ON(rcu_access_pointer(ipc_wwan->sub_netlist[if_id]) != priv))
205 return;
206
207 RCU_INIT_POINTER(ipc_wwan->sub_netlist[if_id], NULL);
208 /* unregistering includes synchronize_net() */
209 unregister_netdevice_queue(dev, head);
210}
211
212static const struct wwan_ops iosm_wwan_ops = {
213 .priv_size = sizeof(struct iosm_netdev_priv),
214 .setup = ipc_wwan_setup,
215 .newlink = ipc_wwan_newlink,
216 .dellink = ipc_wwan_dellink,
217};
218
219int ipc_wwan_receive(struct iosm_wwan *ipc_wwan, struct sk_buff *skb_arg,
220 bool dss, int if_id)
221{
222 struct sk_buff *skb = skb_arg;
223 struct net_device_stats *stats;
224 struct iosm_netdev_priv *priv;
225 int ret;
226
227 if ((skb->data[0] & IOSM_IP_TYPE_MASK) == IOSM_IP_TYPE_IPV4)
228 skb->protocol = htons(ETH_P_IP);
229 else if ((skb->data[0] & IOSM_IP_TYPE_MASK) ==
230 IOSM_IP_TYPE_IPV6)
231 skb->protocol = htons(ETH_P_IPV6);
232
233 skb->pkt_type = PACKET_HOST;
234
235 if (if_id < IP_MUX_SESSION_START ||
236 if_id > IP_MUX_SESSION_END) {
237 ret = -EINVAL;
238 goto free;
239 }
240
241 rcu_read_lock();
242 priv = rcu_dereference(ipc_wwan->sub_netlist[if_id]);
243 if (!priv) {
244 ret = -EINVAL;
245 goto unlock;
246 }
247 skb->dev = priv->netdev;
248 stats = &priv->netdev->stats;
249 stats->rx_packets++;
250 stats->rx_bytes += skb->len;
251
252 ret = netif_rx(skb);
253 skb = NULL;
254unlock:
255 rcu_read_unlock();
256free:
257 dev_kfree_skb(skb);
258 return ret;
259}
260
261void ipc_wwan_tx_flowctrl(struct iosm_wwan *ipc_wwan, int if_id, bool on)
262{
263 struct net_device *netdev;
264 struct iosm_netdev_priv *priv;
265 bool is_tx_blk;
266
267 rcu_read_lock();
268 priv = rcu_dereference(ipc_wwan->sub_netlist[if_id]);
269 if (!priv) {
270 rcu_read_unlock();
271 return;
272 }
273
274 netdev = priv->netdev;
275
276 is_tx_blk = netif_queue_stopped(netdev);
277
278 if (on)
279 dev_dbg(ipc_wwan->dev, "session id[%d]: flowctrl enable",
280 if_id);
281
282 if (on && !is_tx_blk)
283 netif_stop_queue(netdev);
284 else if (!on && is_tx_blk)
285 netif_wake_queue(netdev);
286 rcu_read_unlock();
287}
288
289struct iosm_wwan *ipc_wwan_init(struct iosm_imem *ipc_imem, struct device *dev)
290{
291 struct iosm_wwan *ipc_wwan;
292
293 ipc_wwan = kzalloc(sizeof(*ipc_wwan), GFP_KERNEL);
294 if (!ipc_wwan)
295 return NULL;
296
297 ipc_wwan->dev = dev;
298 ipc_wwan->ipc_imem = ipc_imem;
299
300 /* WWAN core will create a netdev for the default IP MUX channel */
301 if (wwan_register_ops(ipc_wwan->dev, &iosm_wwan_ops, ipc_wwan,
302 IP_MUX_SESSION_DEFAULT)) {
303 kfree(ipc_wwan);
304 return NULL;
305 }
306
307 return ipc_wwan;
308}
309
310void ipc_wwan_deinit(struct iosm_wwan *ipc_wwan)
311{
312 /* This call will remove all child netdev(s) */
313 wwan_unregister_ops(ipc_wwan->dev);
314
315 kfree(ipc_wwan);
316}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2020-21 Intel Corporation.
4 */
5
6#include <linux/etherdevice.h>
7#include <linux/if_arp.h>
8#include <linux/if_link.h>
9#include <linux/rtnetlink.h>
10#include <linux/wwan.h>
11
12#include "iosm_ipc_chnl_cfg.h"
13#include "iosm_ipc_imem_ops.h"
14#include "iosm_ipc_wwan.h"
15
16#define IOSM_IP_TYPE_MASK 0xF0
17#define IOSM_IP_TYPE_IPV4 0x40
18#define IOSM_IP_TYPE_IPV6 0x60
19
20#define IOSM_IF_ID_PAYLOAD 2
21
22/**
23 * struct iosm_netdev_priv - netdev WWAN driver specific private data
24 * @ipc_wwan: Pointer to iosm_wwan struct
25 * @netdev: Pointer to network interface device structure
26 * @if_id: Interface id for device.
27 * @ch_id: IPC channel number for which interface device is created.
28 */
29struct iosm_netdev_priv {
30 struct iosm_wwan *ipc_wwan;
31 struct net_device *netdev;
32 int if_id;
33 int ch_id;
34};
35
36/**
37 * struct iosm_wwan - This structure contains information about WWAN root device
38 * and interface to the IPC layer.
39 * @ipc_imem: Pointer to imem data-struct
40 * @sub_netlist: List of active netdevs
41 * @dev: Pointer device structure
42 * @if_mutex: Mutex used for add and remove interface id
43 */
44struct iosm_wwan {
45 struct iosm_imem *ipc_imem;
46 struct iosm_netdev_priv __rcu *sub_netlist[IP_MUX_SESSION_END + 1];
47 struct device *dev;
48 struct mutex if_mutex; /* Mutex used for add and remove interface id */
49};
50
51/* Bring-up the wwan net link */
52static int ipc_wwan_link_open(struct net_device *netdev)
53{
54 struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev);
55 struct iosm_wwan *ipc_wwan = priv->ipc_wwan;
56 int if_id = priv->if_id;
57 int ret;
58
59 if (if_id < IP_MUX_SESSION_START ||
60 if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist))
61 return -EINVAL;
62
63 mutex_lock(&ipc_wwan->if_mutex);
64
65 /* get channel id */
66 priv->ch_id = ipc_imem_sys_wwan_open(ipc_wwan->ipc_imem, if_id);
67
68 if (priv->ch_id < 0) {
69 dev_err(ipc_wwan->dev,
70 "cannot connect wwan0 & id %d to the IPC mem layer",
71 if_id);
72 ret = -ENODEV;
73 goto out;
74 }
75
76 /* enable tx path, DL data may follow */
77 netif_start_queue(netdev);
78
79 dev_dbg(ipc_wwan->dev, "Channel id %d allocated to if_id %d",
80 priv->ch_id, priv->if_id);
81
82 ret = 0;
83out:
84 mutex_unlock(&ipc_wwan->if_mutex);
85 return ret;
86}
87
88/* Bring-down the wwan net link */
89static int ipc_wwan_link_stop(struct net_device *netdev)
90{
91 struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev);
92
93 netif_stop_queue(netdev);
94
95 mutex_lock(&priv->ipc_wwan->if_mutex);
96 ipc_imem_sys_wwan_close(priv->ipc_wwan->ipc_imem, priv->if_id,
97 priv->ch_id);
98 priv->ch_id = -1;
99 mutex_unlock(&priv->ipc_wwan->if_mutex);
100
101 return 0;
102}
103
104/* Transmit a packet */
105static int ipc_wwan_link_transmit(struct sk_buff *skb,
106 struct net_device *netdev)
107{
108 struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev);
109 struct iosm_wwan *ipc_wwan = priv->ipc_wwan;
110 unsigned int len = skb->len;
111 int if_id = priv->if_id;
112 int ret;
113
114 /* Interface IDs from 1 to 8 are for IP data
115 * & from 257 to 261 are for non-IP data
116 */
117 if (if_id < IP_MUX_SESSION_START ||
118 if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist))
119 return -EINVAL;
120
121 /* Send the SKB to device for transmission */
122 ret = ipc_imem_sys_wwan_transmit(ipc_wwan->ipc_imem,
123 if_id, priv->ch_id, skb);
124
125 /* Return code of zero is success */
126 if (ret == 0) {
127 netdev->stats.tx_packets++;
128 netdev->stats.tx_bytes += len;
129 ret = NETDEV_TX_OK;
130 } else if (ret == -EBUSY) {
131 ret = NETDEV_TX_BUSY;
132 dev_err(ipc_wwan->dev, "unable to push packets");
133 } else {
134 goto exit;
135 }
136
137 return ret;
138
139exit:
140 /* Log any skb drop */
141 if (if_id)
142 dev_dbg(ipc_wwan->dev, "skb dropped. IF_ID: %d, ret: %d", if_id,
143 ret);
144
145 dev_kfree_skb_any(skb);
146 netdev->stats.tx_dropped++;
147 return NETDEV_TX_OK;
148}
149
150/* Ops structure for wwan net link */
151static const struct net_device_ops ipc_inm_ops = {
152 .ndo_open = ipc_wwan_link_open,
153 .ndo_stop = ipc_wwan_link_stop,
154 .ndo_start_xmit = ipc_wwan_link_transmit,
155};
156
157/* Setup function for creating new net link */
158static void ipc_wwan_setup(struct net_device *iosm_dev)
159{
160 iosm_dev->header_ops = NULL;
161 iosm_dev->hard_header_len = 0;
162 iosm_dev->priv_flags |= IFF_NO_QUEUE;
163
164 iosm_dev->type = ARPHRD_NONE;
165 iosm_dev->mtu = ETH_DATA_LEN;
166 iosm_dev->min_mtu = ETH_MIN_MTU;
167 iosm_dev->max_mtu = ETH_MAX_MTU;
168
169 iosm_dev->flags = IFF_POINTOPOINT | IFF_NOARP;
170
171 iosm_dev->netdev_ops = &ipc_inm_ops;
172}
173
174/* Create new wwan net link */
175static int ipc_wwan_newlink(void *ctxt, struct net_device *dev,
176 u32 if_id, struct netlink_ext_ack *extack)
177{
178 struct iosm_wwan *ipc_wwan = ctxt;
179 struct iosm_netdev_priv *priv;
180 int err;
181
182 if (if_id < IP_MUX_SESSION_START ||
183 if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist))
184 return -EINVAL;
185
186 priv = wwan_netdev_drvpriv(dev);
187 priv->if_id = if_id;
188 priv->netdev = dev;
189 priv->ipc_wwan = ipc_wwan;
190
191 mutex_lock(&ipc_wwan->if_mutex);
192 if (rcu_access_pointer(ipc_wwan->sub_netlist[if_id])) {
193 err = -EBUSY;
194 goto out_unlock;
195 }
196
197 err = register_netdevice(dev);
198 if (err)
199 goto out_unlock;
200
201 rcu_assign_pointer(ipc_wwan->sub_netlist[if_id], priv);
202 mutex_unlock(&ipc_wwan->if_mutex);
203
204 netif_device_attach(dev);
205
206 return 0;
207
208out_unlock:
209 mutex_unlock(&ipc_wwan->if_mutex);
210 return err;
211}
212
213static void ipc_wwan_dellink(void *ctxt, struct net_device *dev,
214 struct list_head *head)
215{
216 struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(dev);
217 struct iosm_wwan *ipc_wwan = ctxt;
218 int if_id = priv->if_id;
219
220 if (WARN_ON(if_id < IP_MUX_SESSION_START ||
221 if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist)))
222 return;
223
224 mutex_lock(&ipc_wwan->if_mutex);
225
226 if (WARN_ON(rcu_access_pointer(ipc_wwan->sub_netlist[if_id]) != priv))
227 goto unlock;
228
229 RCU_INIT_POINTER(ipc_wwan->sub_netlist[if_id], NULL);
230 /* unregistering includes synchronize_net() */
231 unregister_netdevice_queue(dev, head);
232
233unlock:
234 mutex_unlock(&ipc_wwan->if_mutex);
235}
236
237static const struct wwan_ops iosm_wwan_ops = {
238 .priv_size = sizeof(struct iosm_netdev_priv),
239 .setup = ipc_wwan_setup,
240 .newlink = ipc_wwan_newlink,
241 .dellink = ipc_wwan_dellink,
242};
243
244int ipc_wwan_receive(struct iosm_wwan *ipc_wwan, struct sk_buff *skb_arg,
245 bool dss, int if_id)
246{
247 struct sk_buff *skb = skb_arg;
248 struct net_device_stats *stats;
249 struct iosm_netdev_priv *priv;
250 int ret;
251
252 if ((skb->data[0] & IOSM_IP_TYPE_MASK) == IOSM_IP_TYPE_IPV4)
253 skb->protocol = htons(ETH_P_IP);
254 else if ((skb->data[0] & IOSM_IP_TYPE_MASK) ==
255 IOSM_IP_TYPE_IPV6)
256 skb->protocol = htons(ETH_P_IPV6);
257
258 skb->pkt_type = PACKET_HOST;
259
260 if (if_id < IP_MUX_SESSION_START ||
261 if_id > IP_MUX_SESSION_END) {
262 ret = -EINVAL;
263 goto free;
264 }
265
266 rcu_read_lock();
267 priv = rcu_dereference(ipc_wwan->sub_netlist[if_id]);
268 if (!priv) {
269 ret = -EINVAL;
270 goto unlock;
271 }
272 skb->dev = priv->netdev;
273 stats = &priv->netdev->stats;
274 stats->rx_packets++;
275 stats->rx_bytes += skb->len;
276
277 ret = netif_rx(skb);
278 skb = NULL;
279unlock:
280 rcu_read_unlock();
281free:
282 dev_kfree_skb(skb);
283 return ret;
284}
285
286void ipc_wwan_tx_flowctrl(struct iosm_wwan *ipc_wwan, int if_id, bool on)
287{
288 struct net_device *netdev;
289 struct iosm_netdev_priv *priv;
290 bool is_tx_blk;
291
292 rcu_read_lock();
293 priv = rcu_dereference(ipc_wwan->sub_netlist[if_id]);
294 if (!priv) {
295 rcu_read_unlock();
296 return;
297 }
298
299 netdev = priv->netdev;
300
301 is_tx_blk = netif_queue_stopped(netdev);
302
303 if (on)
304 dev_dbg(ipc_wwan->dev, "session id[%d]: flowctrl enable",
305 if_id);
306
307 if (on && !is_tx_blk)
308 netif_stop_queue(netdev);
309 else if (!on && is_tx_blk)
310 netif_wake_queue(netdev);
311 rcu_read_unlock();
312}
313
314struct iosm_wwan *ipc_wwan_init(struct iosm_imem *ipc_imem, struct device *dev)
315{
316 struct iosm_wwan *ipc_wwan;
317
318 ipc_wwan = kzalloc(sizeof(*ipc_wwan), GFP_KERNEL);
319 if (!ipc_wwan)
320 return NULL;
321
322 ipc_wwan->dev = dev;
323 ipc_wwan->ipc_imem = ipc_imem;
324
325 /* WWAN core will create a netdev for the default IP MUX channel */
326 if (wwan_register_ops(ipc_wwan->dev, &iosm_wwan_ops, ipc_wwan,
327 IP_MUX_SESSION_DEFAULT)) {
328 kfree(ipc_wwan);
329 return NULL;
330 }
331
332 mutex_init(&ipc_wwan->if_mutex);
333
334 return ipc_wwan;
335}
336
337void ipc_wwan_deinit(struct iosm_wwan *ipc_wwan)
338{
339 /* This call will remove all child netdev(s) */
340 wwan_unregister_ops(ipc_wwan->dev);
341
342 mutex_destroy(&ipc_wwan->if_mutex);
343
344 kfree(ipc_wwan);
345}