Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/skbuff.h>
3#include <linux/netdevice.h>
4#include <linux/if_vlan.h>
5#include <linux/netpoll.h>
6#include <linux/export.h>
7#include <net/gro.h>
8#include "vlan.h"
9
10bool vlan_do_receive(struct sk_buff **skbp)
11{
12 struct sk_buff *skb = *skbp;
13 __be16 vlan_proto = skb->vlan_proto;
14 u16 vlan_id = skb_vlan_tag_get_id(skb);
15 struct net_device *vlan_dev;
16 struct vlan_pcpu_stats *rx_stats;
17
18 vlan_dev = vlan_find_dev(skb->dev, vlan_proto, vlan_id);
19 if (!vlan_dev)
20 return false;
21
22 skb = *skbp = skb_share_check(skb, GFP_ATOMIC);
23 if (unlikely(!skb))
24 return false;
25
26 if (unlikely(!(vlan_dev->flags & IFF_UP))) {
27 kfree_skb(skb);
28 *skbp = NULL;
29 return false;
30 }
31
32 skb->dev = vlan_dev;
33 if (unlikely(skb->pkt_type == PACKET_OTHERHOST)) {
34 /* Our lower layer thinks this is not local, let's make sure.
35 * This allows the VLAN to have a different MAC than the
36 * underlying device, and still route correctly. */
37 if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, vlan_dev->dev_addr))
38 skb->pkt_type = PACKET_HOST;
39 }
40
41 if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR) &&
42 !netif_is_macvlan_port(vlan_dev) &&
43 !netif_is_bridge_port(vlan_dev)) {
44 unsigned int offset = skb->data - skb_mac_header(skb);
45
46 /*
47 * vlan_insert_tag expect skb->data pointing to mac header.
48 * So change skb->data before calling it and change back to
49 * original position later
50 */
51 skb_push(skb, offset);
52 skb = *skbp = vlan_insert_inner_tag(skb, skb->vlan_proto,
53 skb->vlan_tci, skb->mac_len);
54 if (!skb)
55 return false;
56 skb_pull(skb, offset + VLAN_HLEN);
57 skb_reset_mac_len(skb);
58 }
59
60 skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
61 __vlan_hwaccel_clear_tag(skb);
62
63 rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats);
64
65 u64_stats_update_begin(&rx_stats->syncp);
66 u64_stats_inc(&rx_stats->rx_packets);
67 u64_stats_add(&rx_stats->rx_bytes, skb->len);
68 if (skb->pkt_type == PACKET_MULTICAST)
69 u64_stats_inc(&rx_stats->rx_multicast);
70 u64_stats_update_end(&rx_stats->syncp);
71
72 return true;
73}
74
75/* Must be invoked with rcu_read_lock. */
76struct net_device *__vlan_find_dev_deep_rcu(struct net_device *dev,
77 __be16 vlan_proto, u16 vlan_id)
78{
79 struct vlan_info *vlan_info = rcu_dereference(dev->vlan_info);
80
81 if (vlan_info) {
82 return vlan_group_get_device(&vlan_info->grp,
83 vlan_proto, vlan_id);
84 } else {
85 /*
86 * Lower devices of master uppers (bonding, team) do not have
87 * grp assigned to themselves. Grp is assigned to upper device
88 * instead.
89 */
90 struct net_device *upper_dev;
91
92 upper_dev = netdev_master_upper_dev_get_rcu(dev);
93 if (upper_dev)
94 return __vlan_find_dev_deep_rcu(upper_dev,
95 vlan_proto, vlan_id);
96 }
97
98 return NULL;
99}
100EXPORT_SYMBOL(__vlan_find_dev_deep_rcu);
101
102struct net_device *vlan_dev_real_dev(const struct net_device *dev)
103{
104 struct net_device *ret = vlan_dev_priv(dev)->real_dev;
105
106 while (is_vlan_dev(ret))
107 ret = vlan_dev_priv(ret)->real_dev;
108
109 return ret;
110}
111EXPORT_SYMBOL(vlan_dev_real_dev);
112
113u16 vlan_dev_vlan_id(const struct net_device *dev)
114{
115 return vlan_dev_priv(dev)->vlan_id;
116}
117EXPORT_SYMBOL(vlan_dev_vlan_id);
118
119__be16 vlan_dev_vlan_proto(const struct net_device *dev)
120{
121 return vlan_dev_priv(dev)->vlan_proto;
122}
123EXPORT_SYMBOL(vlan_dev_vlan_proto);
124
125/*
126 * vlan info and vid list
127 */
128
129static void vlan_group_free(struct vlan_group *grp)
130{
131 int i, j;
132
133 for (i = 0; i < VLAN_PROTO_NUM; i++)
134 for (j = 0; j < VLAN_GROUP_ARRAY_SPLIT_PARTS; j++)
135 kfree(grp->vlan_devices_arrays[i][j]);
136}
137
138static void vlan_info_free(struct vlan_info *vlan_info)
139{
140 vlan_group_free(&vlan_info->grp);
141 kfree(vlan_info);
142}
143
144static void vlan_info_rcu_free(struct rcu_head *rcu)
145{
146 vlan_info_free(container_of(rcu, struct vlan_info, rcu));
147}
148
149static struct vlan_info *vlan_info_alloc(struct net_device *dev)
150{
151 struct vlan_info *vlan_info;
152
153 vlan_info = kzalloc(sizeof(struct vlan_info), GFP_KERNEL);
154 if (!vlan_info)
155 return NULL;
156
157 vlan_info->real_dev = dev;
158 INIT_LIST_HEAD(&vlan_info->vid_list);
159 return vlan_info;
160}
161
162struct vlan_vid_info {
163 struct list_head list;
164 __be16 proto;
165 u16 vid;
166 int refcount;
167};
168
169static bool vlan_hw_filter_capable(const struct net_device *dev, __be16 proto)
170{
171 if (proto == htons(ETH_P_8021Q) &&
172 dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
173 return true;
174 if (proto == htons(ETH_P_8021AD) &&
175 dev->features & NETIF_F_HW_VLAN_STAG_FILTER)
176 return true;
177 return false;
178}
179
180static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info,
181 __be16 proto, u16 vid)
182{
183 struct vlan_vid_info *vid_info;
184
185 list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
186 if (vid_info->proto == proto && vid_info->vid == vid)
187 return vid_info;
188 }
189 return NULL;
190}
191
192static struct vlan_vid_info *vlan_vid_info_alloc(__be16 proto, u16 vid)
193{
194 struct vlan_vid_info *vid_info;
195
196 vid_info = kzalloc(sizeof(struct vlan_vid_info), GFP_KERNEL);
197 if (!vid_info)
198 return NULL;
199 vid_info->proto = proto;
200 vid_info->vid = vid;
201
202 return vid_info;
203}
204
205static int vlan_add_rx_filter_info(struct net_device *dev, __be16 proto, u16 vid)
206{
207 if (!vlan_hw_filter_capable(dev, proto))
208 return 0;
209
210 if (netif_device_present(dev))
211 return dev->netdev_ops->ndo_vlan_rx_add_vid(dev, proto, vid);
212 else
213 return -ENODEV;
214}
215
216static int vlan_kill_rx_filter_info(struct net_device *dev, __be16 proto, u16 vid)
217{
218 if (!vlan_hw_filter_capable(dev, proto))
219 return 0;
220
221 if (netif_device_present(dev))
222 return dev->netdev_ops->ndo_vlan_rx_kill_vid(dev, proto, vid);
223 else
224 return -ENODEV;
225}
226
227int vlan_for_each(struct net_device *dev,
228 int (*action)(struct net_device *dev, int vid, void *arg),
229 void *arg)
230{
231 struct vlan_vid_info *vid_info;
232 struct vlan_info *vlan_info;
233 struct net_device *vdev;
234 int ret;
235
236 ASSERT_RTNL();
237
238 vlan_info = rtnl_dereference(dev->vlan_info);
239 if (!vlan_info)
240 return 0;
241
242 list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
243 vdev = vlan_group_get_device(&vlan_info->grp, vid_info->proto,
244 vid_info->vid);
245 ret = action(vdev, vid_info->vid, arg);
246 if (ret)
247 return ret;
248 }
249
250 return 0;
251}
252EXPORT_SYMBOL(vlan_for_each);
253
254int vlan_filter_push_vids(struct vlan_info *vlan_info, __be16 proto)
255{
256 struct net_device *real_dev = vlan_info->real_dev;
257 struct vlan_vid_info *vlan_vid_info;
258 int err;
259
260 list_for_each_entry(vlan_vid_info, &vlan_info->vid_list, list) {
261 if (vlan_vid_info->proto == proto) {
262 err = vlan_add_rx_filter_info(real_dev, proto,
263 vlan_vid_info->vid);
264 if (err)
265 goto unwind;
266 }
267 }
268
269 return 0;
270
271unwind:
272 list_for_each_entry_continue_reverse(vlan_vid_info,
273 &vlan_info->vid_list, list) {
274 if (vlan_vid_info->proto == proto)
275 vlan_kill_rx_filter_info(real_dev, proto,
276 vlan_vid_info->vid);
277 }
278
279 return err;
280}
281EXPORT_SYMBOL(vlan_filter_push_vids);
282
283void vlan_filter_drop_vids(struct vlan_info *vlan_info, __be16 proto)
284{
285 struct vlan_vid_info *vlan_vid_info;
286
287 list_for_each_entry(vlan_vid_info, &vlan_info->vid_list, list)
288 if (vlan_vid_info->proto == proto)
289 vlan_kill_rx_filter_info(vlan_info->real_dev,
290 vlan_vid_info->proto,
291 vlan_vid_info->vid);
292}
293EXPORT_SYMBOL(vlan_filter_drop_vids);
294
295static int __vlan_vid_add(struct vlan_info *vlan_info, __be16 proto, u16 vid,
296 struct vlan_vid_info **pvid_info)
297{
298 struct net_device *dev = vlan_info->real_dev;
299 struct vlan_vid_info *vid_info;
300 int err;
301
302 vid_info = vlan_vid_info_alloc(proto, vid);
303 if (!vid_info)
304 return -ENOMEM;
305
306 err = vlan_add_rx_filter_info(dev, proto, vid);
307 if (err) {
308 kfree(vid_info);
309 return err;
310 }
311
312 list_add(&vid_info->list, &vlan_info->vid_list);
313 vlan_info->nr_vids++;
314 *pvid_info = vid_info;
315 return 0;
316}
317
318int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid)
319{
320 struct vlan_info *vlan_info;
321 struct vlan_vid_info *vid_info;
322 bool vlan_info_created = false;
323 int err;
324
325 ASSERT_RTNL();
326
327 vlan_info = rtnl_dereference(dev->vlan_info);
328 if (!vlan_info) {
329 vlan_info = vlan_info_alloc(dev);
330 if (!vlan_info)
331 return -ENOMEM;
332 vlan_info_created = true;
333 }
334 vid_info = vlan_vid_info_get(vlan_info, proto, vid);
335 if (!vid_info) {
336 err = __vlan_vid_add(vlan_info, proto, vid, &vid_info);
337 if (err)
338 goto out_free_vlan_info;
339 }
340 vid_info->refcount++;
341
342 if (vlan_info_created)
343 rcu_assign_pointer(dev->vlan_info, vlan_info);
344
345 return 0;
346
347out_free_vlan_info:
348 if (vlan_info_created)
349 kfree(vlan_info);
350 return err;
351}
352EXPORT_SYMBOL(vlan_vid_add);
353
354static void __vlan_vid_del(struct vlan_info *vlan_info,
355 struct vlan_vid_info *vid_info)
356{
357 struct net_device *dev = vlan_info->real_dev;
358 __be16 proto = vid_info->proto;
359 u16 vid = vid_info->vid;
360 int err;
361
362 err = vlan_kill_rx_filter_info(dev, proto, vid);
363 if (err && dev->reg_state != NETREG_UNREGISTERING)
364 netdev_warn(dev, "failed to kill vid %04x/%d\n", proto, vid);
365
366 list_del(&vid_info->list);
367 kfree(vid_info);
368 vlan_info->nr_vids--;
369}
370
371void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid)
372{
373 struct vlan_info *vlan_info;
374 struct vlan_vid_info *vid_info;
375
376 ASSERT_RTNL();
377
378 vlan_info = rtnl_dereference(dev->vlan_info);
379 if (!vlan_info)
380 return;
381
382 vid_info = vlan_vid_info_get(vlan_info, proto, vid);
383 if (!vid_info)
384 return;
385 vid_info->refcount--;
386 if (vid_info->refcount == 0) {
387 __vlan_vid_del(vlan_info, vid_info);
388 if (vlan_info->nr_vids == 0) {
389 RCU_INIT_POINTER(dev->vlan_info, NULL);
390 call_rcu(&vlan_info->rcu, vlan_info_rcu_free);
391 }
392 }
393}
394EXPORT_SYMBOL(vlan_vid_del);
395
396int vlan_vids_add_by_dev(struct net_device *dev,
397 const struct net_device *by_dev)
398{
399 struct vlan_vid_info *vid_info;
400 struct vlan_info *vlan_info;
401 int err;
402
403 ASSERT_RTNL();
404
405 vlan_info = rtnl_dereference(by_dev->vlan_info);
406 if (!vlan_info)
407 return 0;
408
409 list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
410 err = vlan_vid_add(dev, vid_info->proto, vid_info->vid);
411 if (err)
412 goto unwind;
413 }
414 return 0;
415
416unwind:
417 list_for_each_entry_continue_reverse(vid_info,
418 &vlan_info->vid_list,
419 list) {
420 vlan_vid_del(dev, vid_info->proto, vid_info->vid);
421 }
422
423 return err;
424}
425EXPORT_SYMBOL(vlan_vids_add_by_dev);
426
427void vlan_vids_del_by_dev(struct net_device *dev,
428 const struct net_device *by_dev)
429{
430 struct vlan_vid_info *vid_info;
431 struct vlan_info *vlan_info;
432
433 ASSERT_RTNL();
434
435 vlan_info = rtnl_dereference(by_dev->vlan_info);
436 if (!vlan_info)
437 return;
438
439 list_for_each_entry(vid_info, &vlan_info->vid_list, list)
440 vlan_vid_del(dev, vid_info->proto, vid_info->vid);
441}
442EXPORT_SYMBOL(vlan_vids_del_by_dev);
443
444bool vlan_uses_dev(const struct net_device *dev)
445{
446 struct vlan_info *vlan_info;
447
448 ASSERT_RTNL();
449
450 vlan_info = rtnl_dereference(dev->vlan_info);
451 if (!vlan_info)
452 return false;
453 return vlan_info->grp.nr_vlan_devs ? true : false;
454}
455EXPORT_SYMBOL(vlan_uses_dev);
456
457static struct sk_buff *vlan_gro_receive(struct list_head *head,
458 struct sk_buff *skb)
459{
460 const struct packet_offload *ptype;
461 unsigned int hlen, off_vlan;
462 struct sk_buff *pp = NULL;
463 struct vlan_hdr *vhdr;
464 struct sk_buff *p;
465 __be16 type;
466 int flush = 1;
467
468 off_vlan = skb_gro_offset(skb);
469 hlen = off_vlan + sizeof(*vhdr);
470 vhdr = skb_gro_header(skb, hlen, off_vlan);
471 if (unlikely(!vhdr))
472 goto out;
473
474 type = vhdr->h_vlan_encapsulated_proto;
475
476 ptype = gro_find_receive_by_type(type);
477 if (!ptype)
478 goto out;
479
480 flush = 0;
481
482 list_for_each_entry(p, head, list) {
483 struct vlan_hdr *vhdr2;
484
485 if (!NAPI_GRO_CB(p)->same_flow)
486 continue;
487
488 vhdr2 = (struct vlan_hdr *)(p->data + off_vlan);
489 if (compare_vlan_header(vhdr, vhdr2))
490 NAPI_GRO_CB(p)->same_flow = 0;
491 }
492
493 skb_gro_pull(skb, sizeof(*vhdr));
494 skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr));
495
496 pp = indirect_call_gro_receive_inet(ptype->callbacks.gro_receive,
497 ipv6_gro_receive, inet_gro_receive,
498 head, skb);
499
500out:
501 skb_gro_flush_final(skb, pp, flush);
502
503 return pp;
504}
505
506static int vlan_gro_complete(struct sk_buff *skb, int nhoff)
507{
508 struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data + nhoff);
509 __be16 type = vhdr->h_vlan_encapsulated_proto;
510 struct packet_offload *ptype;
511 int err = -ENOENT;
512
513 ptype = gro_find_complete_by_type(type);
514 if (ptype)
515 err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
516 ipv6_gro_complete, inet_gro_complete,
517 skb, nhoff + sizeof(*vhdr));
518
519 return err;
520}
521
522static struct packet_offload vlan_packet_offloads[] __read_mostly = {
523 {
524 .type = cpu_to_be16(ETH_P_8021Q),
525 .priority = 10,
526 .callbacks = {
527 .gro_receive = vlan_gro_receive,
528 .gro_complete = vlan_gro_complete,
529 },
530 },
531 {
532 .type = cpu_to_be16(ETH_P_8021AD),
533 .priority = 10,
534 .callbacks = {
535 .gro_receive = vlan_gro_receive,
536 .gro_complete = vlan_gro_complete,
537 },
538 },
539};
540
541static int __init vlan_offload_init(void)
542{
543 unsigned int i;
544
545 for (i = 0; i < ARRAY_SIZE(vlan_packet_offloads); i++)
546 dev_add_offload(&vlan_packet_offloads[i]);
547
548 return 0;
549}
550
551fs_initcall(vlan_offload_init);
1#include <linux/skbuff.h>
2#include <linux/netdevice.h>
3#include <linux/if_vlan.h>
4#include <linux/netpoll.h>
5#include <linux/export.h>
6#include "vlan.h"
7
8bool vlan_do_receive(struct sk_buff **skbp)
9{
10 struct sk_buff *skb = *skbp;
11 __be16 vlan_proto = skb->vlan_proto;
12 u16 vlan_id = vlan_tx_tag_get_id(skb);
13 struct net_device *vlan_dev;
14 struct vlan_pcpu_stats *rx_stats;
15
16 vlan_dev = vlan_find_dev(skb->dev, vlan_proto, vlan_id);
17 if (!vlan_dev)
18 return false;
19
20 skb = *skbp = skb_share_check(skb, GFP_ATOMIC);
21 if (unlikely(!skb))
22 return false;
23
24 skb->dev = vlan_dev;
25 if (unlikely(skb->pkt_type == PACKET_OTHERHOST)) {
26 /* Our lower layer thinks this is not local, let's make sure.
27 * This allows the VLAN to have a different MAC than the
28 * underlying device, and still route correctly. */
29 if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, vlan_dev->dev_addr))
30 skb->pkt_type = PACKET_HOST;
31 }
32
33 if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) {
34 unsigned int offset = skb->data - skb_mac_header(skb);
35
36 /*
37 * vlan_insert_tag expect skb->data pointing to mac header.
38 * So change skb->data before calling it and change back to
39 * original position later
40 */
41 skb_push(skb, offset);
42 skb = *skbp = vlan_insert_tag(skb, skb->vlan_proto,
43 skb->vlan_tci);
44 if (!skb)
45 return false;
46 skb_pull(skb, offset + VLAN_HLEN);
47 skb_reset_mac_len(skb);
48 }
49
50 skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
51 skb->vlan_tci = 0;
52
53 rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats);
54
55 u64_stats_update_begin(&rx_stats->syncp);
56 rx_stats->rx_packets++;
57 rx_stats->rx_bytes += skb->len;
58 if (skb->pkt_type == PACKET_MULTICAST)
59 rx_stats->rx_multicast++;
60 u64_stats_update_end(&rx_stats->syncp);
61
62 return true;
63}
64
65/* Must be invoked with rcu_read_lock. */
66struct net_device *__vlan_find_dev_deep(struct net_device *dev,
67 __be16 vlan_proto, u16 vlan_id)
68{
69 struct vlan_info *vlan_info = rcu_dereference(dev->vlan_info);
70
71 if (vlan_info) {
72 return vlan_group_get_device(&vlan_info->grp,
73 vlan_proto, vlan_id);
74 } else {
75 /*
76 * Lower devices of master uppers (bonding, team) do not have
77 * grp assigned to themselves. Grp is assigned to upper device
78 * instead.
79 */
80 struct net_device *upper_dev;
81
82 upper_dev = netdev_master_upper_dev_get_rcu(dev);
83 if (upper_dev)
84 return __vlan_find_dev_deep(upper_dev,
85 vlan_proto, vlan_id);
86 }
87
88 return NULL;
89}
90EXPORT_SYMBOL(__vlan_find_dev_deep);
91
92struct net_device *vlan_dev_real_dev(const struct net_device *dev)
93{
94 struct net_device *ret = vlan_dev_priv(dev)->real_dev;
95
96 while (is_vlan_dev(ret))
97 ret = vlan_dev_priv(ret)->real_dev;
98
99 return ret;
100}
101EXPORT_SYMBOL(vlan_dev_real_dev);
102
103u16 vlan_dev_vlan_id(const struct net_device *dev)
104{
105 return vlan_dev_priv(dev)->vlan_id;
106}
107EXPORT_SYMBOL(vlan_dev_vlan_id);
108
109__be16 vlan_dev_vlan_proto(const struct net_device *dev)
110{
111 return vlan_dev_priv(dev)->vlan_proto;
112}
113EXPORT_SYMBOL(vlan_dev_vlan_proto);
114
115static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
116{
117 if (skb_cow(skb, skb_headroom(skb)) < 0)
118 return NULL;
119 memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
120 skb->mac_header += VLAN_HLEN;
121 return skb;
122}
123
124struct sk_buff *vlan_untag(struct sk_buff *skb)
125{
126 struct vlan_hdr *vhdr;
127 u16 vlan_tci;
128
129 if (unlikely(vlan_tx_tag_present(skb))) {
130 /* vlan_tci is already set-up so leave this for another time */
131 return skb;
132 }
133
134 skb = skb_share_check(skb, GFP_ATOMIC);
135 if (unlikely(!skb))
136 goto err_free;
137
138 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
139 goto err_free;
140
141 vhdr = (struct vlan_hdr *) skb->data;
142 vlan_tci = ntohs(vhdr->h_vlan_TCI);
143 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci);
144
145 skb_pull_rcsum(skb, VLAN_HLEN);
146 vlan_set_encap_proto(skb, vhdr);
147
148 skb = vlan_reorder_header(skb);
149 if (unlikely(!skb))
150 goto err_free;
151
152 skb_reset_network_header(skb);
153 skb_reset_transport_header(skb);
154 skb_reset_mac_len(skb);
155
156 return skb;
157
158err_free:
159 kfree_skb(skb);
160 return NULL;
161}
162EXPORT_SYMBOL(vlan_untag);
163
164
165/*
166 * vlan info and vid list
167 */
168
169static void vlan_group_free(struct vlan_group *grp)
170{
171 int i, j;
172
173 for (i = 0; i < VLAN_PROTO_NUM; i++)
174 for (j = 0; j < VLAN_GROUP_ARRAY_SPLIT_PARTS; j++)
175 kfree(grp->vlan_devices_arrays[i][j]);
176}
177
178static void vlan_info_free(struct vlan_info *vlan_info)
179{
180 vlan_group_free(&vlan_info->grp);
181 kfree(vlan_info);
182}
183
184static void vlan_info_rcu_free(struct rcu_head *rcu)
185{
186 vlan_info_free(container_of(rcu, struct vlan_info, rcu));
187}
188
189static struct vlan_info *vlan_info_alloc(struct net_device *dev)
190{
191 struct vlan_info *vlan_info;
192
193 vlan_info = kzalloc(sizeof(struct vlan_info), GFP_KERNEL);
194 if (!vlan_info)
195 return NULL;
196
197 vlan_info->real_dev = dev;
198 INIT_LIST_HEAD(&vlan_info->vid_list);
199 return vlan_info;
200}
201
202struct vlan_vid_info {
203 struct list_head list;
204 __be16 proto;
205 u16 vid;
206 int refcount;
207};
208
209static bool vlan_hw_filter_capable(const struct net_device *dev,
210 const struct vlan_vid_info *vid_info)
211{
212 if (vid_info->proto == htons(ETH_P_8021Q) &&
213 dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
214 return true;
215 if (vid_info->proto == htons(ETH_P_8021AD) &&
216 dev->features & NETIF_F_HW_VLAN_STAG_FILTER)
217 return true;
218 return false;
219}
220
221static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info,
222 __be16 proto, u16 vid)
223{
224 struct vlan_vid_info *vid_info;
225
226 list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
227 if (vid_info->proto == proto && vid_info->vid == vid)
228 return vid_info;
229 }
230 return NULL;
231}
232
233static struct vlan_vid_info *vlan_vid_info_alloc(__be16 proto, u16 vid)
234{
235 struct vlan_vid_info *vid_info;
236
237 vid_info = kzalloc(sizeof(struct vlan_vid_info), GFP_KERNEL);
238 if (!vid_info)
239 return NULL;
240 vid_info->proto = proto;
241 vid_info->vid = vid;
242
243 return vid_info;
244}
245
246static int __vlan_vid_add(struct vlan_info *vlan_info, __be16 proto, u16 vid,
247 struct vlan_vid_info **pvid_info)
248{
249 struct net_device *dev = vlan_info->real_dev;
250 const struct net_device_ops *ops = dev->netdev_ops;
251 struct vlan_vid_info *vid_info;
252 int err;
253
254 vid_info = vlan_vid_info_alloc(proto, vid);
255 if (!vid_info)
256 return -ENOMEM;
257
258 if (vlan_hw_filter_capable(dev, vid_info)) {
259 err = ops->ndo_vlan_rx_add_vid(dev, proto, vid);
260 if (err) {
261 kfree(vid_info);
262 return err;
263 }
264 }
265 list_add(&vid_info->list, &vlan_info->vid_list);
266 vlan_info->nr_vids++;
267 *pvid_info = vid_info;
268 return 0;
269}
270
271int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid)
272{
273 struct vlan_info *vlan_info;
274 struct vlan_vid_info *vid_info;
275 bool vlan_info_created = false;
276 int err;
277
278 ASSERT_RTNL();
279
280 vlan_info = rtnl_dereference(dev->vlan_info);
281 if (!vlan_info) {
282 vlan_info = vlan_info_alloc(dev);
283 if (!vlan_info)
284 return -ENOMEM;
285 vlan_info_created = true;
286 }
287 vid_info = vlan_vid_info_get(vlan_info, proto, vid);
288 if (!vid_info) {
289 err = __vlan_vid_add(vlan_info, proto, vid, &vid_info);
290 if (err)
291 goto out_free_vlan_info;
292 }
293 vid_info->refcount++;
294
295 if (vlan_info_created)
296 rcu_assign_pointer(dev->vlan_info, vlan_info);
297
298 return 0;
299
300out_free_vlan_info:
301 if (vlan_info_created)
302 kfree(vlan_info);
303 return err;
304}
305EXPORT_SYMBOL(vlan_vid_add);
306
307static void __vlan_vid_del(struct vlan_info *vlan_info,
308 struct vlan_vid_info *vid_info)
309{
310 struct net_device *dev = vlan_info->real_dev;
311 const struct net_device_ops *ops = dev->netdev_ops;
312 __be16 proto = vid_info->proto;
313 u16 vid = vid_info->vid;
314 int err;
315
316 if (vlan_hw_filter_capable(dev, vid_info)) {
317 err = ops->ndo_vlan_rx_kill_vid(dev, proto, vid);
318 if (err) {
319 pr_warn("failed to kill vid %04x/%d for device %s\n",
320 proto, vid, dev->name);
321 }
322 }
323 list_del(&vid_info->list);
324 kfree(vid_info);
325 vlan_info->nr_vids--;
326}
327
328void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid)
329{
330 struct vlan_info *vlan_info;
331 struct vlan_vid_info *vid_info;
332
333 ASSERT_RTNL();
334
335 vlan_info = rtnl_dereference(dev->vlan_info);
336 if (!vlan_info)
337 return;
338
339 vid_info = vlan_vid_info_get(vlan_info, proto, vid);
340 if (!vid_info)
341 return;
342 vid_info->refcount--;
343 if (vid_info->refcount == 0) {
344 __vlan_vid_del(vlan_info, vid_info);
345 if (vlan_info->nr_vids == 0) {
346 RCU_INIT_POINTER(dev->vlan_info, NULL);
347 call_rcu(&vlan_info->rcu, vlan_info_rcu_free);
348 }
349 }
350}
351EXPORT_SYMBOL(vlan_vid_del);
352
353int vlan_vids_add_by_dev(struct net_device *dev,
354 const struct net_device *by_dev)
355{
356 struct vlan_vid_info *vid_info;
357 struct vlan_info *vlan_info;
358 int err;
359
360 ASSERT_RTNL();
361
362 vlan_info = rtnl_dereference(by_dev->vlan_info);
363 if (!vlan_info)
364 return 0;
365
366 list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
367 err = vlan_vid_add(dev, vid_info->proto, vid_info->vid);
368 if (err)
369 goto unwind;
370 }
371 return 0;
372
373unwind:
374 list_for_each_entry_continue_reverse(vid_info,
375 &vlan_info->vid_list,
376 list) {
377 vlan_vid_del(dev, vid_info->proto, vid_info->vid);
378 }
379
380 return err;
381}
382EXPORT_SYMBOL(vlan_vids_add_by_dev);
383
384void vlan_vids_del_by_dev(struct net_device *dev,
385 const struct net_device *by_dev)
386{
387 struct vlan_vid_info *vid_info;
388 struct vlan_info *vlan_info;
389
390 ASSERT_RTNL();
391
392 vlan_info = rtnl_dereference(by_dev->vlan_info);
393 if (!vlan_info)
394 return;
395
396 list_for_each_entry(vid_info, &vlan_info->vid_list, list)
397 vlan_vid_del(dev, vid_info->proto, vid_info->vid);
398}
399EXPORT_SYMBOL(vlan_vids_del_by_dev);
400
401bool vlan_uses_dev(const struct net_device *dev)
402{
403 struct vlan_info *vlan_info;
404
405 ASSERT_RTNL();
406
407 vlan_info = rtnl_dereference(dev->vlan_info);
408 if (!vlan_info)
409 return false;
410 return vlan_info->grp.nr_vlan_devs ? true : false;
411}
412EXPORT_SYMBOL(vlan_uses_dev);