Linux Audio

Check our new training course

Loading...
v4.6
 1#ifndef _NET_GRO_CELLS_H
 2#define _NET_GRO_CELLS_H
 3
 4#include <linux/skbuff.h>
 5#include <linux/slab.h>
 6#include <linux/netdevice.h>
 7
 8struct gro_cell {
 9	struct sk_buff_head	napi_skbs;
10	struct napi_struct	napi;
11};
12
13struct gro_cells {
14	struct gro_cell __percpu	*cells;
 
15};
16
17static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
18{
19	struct gro_cell *cell;
20	struct net_device *dev = skb->dev;
21
22	if (!gcells->cells || skb_cloned(skb) || !(dev->features & NETIF_F_GRO)) {
23		netif_rx(skb);
24		return;
25	}
26
27	cell = this_cpu_ptr(gcells->cells);
 
28
29	if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
30		atomic_long_inc(&dev->rx_dropped);
31		kfree_skb(skb);
32		return;
33	}
34
 
 
 
35	__skb_queue_tail(&cell->napi_skbs, skb);
36	if (skb_queue_len(&cell->napi_skbs) == 1)
37		napi_schedule(&cell->napi);
 
 
38}
39
40/* called under BH context */
41static inline int gro_cell_poll(struct napi_struct *napi, int budget)
42{
43	struct gro_cell *cell = container_of(napi, struct gro_cell, napi);
44	struct sk_buff *skb;
45	int work_done = 0;
46
 
47	while (work_done < budget) {
48		skb = __skb_dequeue(&cell->napi_skbs);
49		if (!skb)
50			break;
 
51		napi_gro_receive(napi, skb);
52		work_done++;
 
53	}
54
55	if (work_done < budget)
56		napi_complete_done(napi, work_done);
 
57	return work_done;
58}
59
60static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *dev)
61{
62	int i;
63
64	gcells->cells = alloc_percpu(struct gro_cell);
 
 
 
65	if (!gcells->cells)
66		return -ENOMEM;
67
68	for_each_possible_cpu(i) {
69		struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
70
71		__skb_queue_head_init(&cell->napi_skbs);
72		netif_napi_add(dev, &cell->napi, gro_cell_poll, 64);
73		napi_enable(&cell->napi);
74	}
75	return 0;
76}
77
78static inline void gro_cells_destroy(struct gro_cells *gcells)
79{
 
80	int i;
81
82	if (!gcells->cells)
83		return;
84	for_each_possible_cpu(i) {
85		struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
86
87		netif_napi_del(&cell->napi);
88		__skb_queue_purge(&cell->napi_skbs);
89	}
90	free_percpu(gcells->cells);
91	gcells->cells = NULL;
92}
93
94#endif
v3.15
  1#ifndef _NET_GRO_CELLS_H
  2#define _NET_GRO_CELLS_H
  3
  4#include <linux/skbuff.h>
  5#include <linux/slab.h>
  6#include <linux/netdevice.h>
  7
  8struct gro_cell {
  9	struct sk_buff_head	napi_skbs;
 10	struct napi_struct	napi;
 11} ____cacheline_aligned_in_smp;
 12
 13struct gro_cells {
 14	unsigned int		gro_cells_mask;
 15	struct gro_cell		*cells;
 16};
 17
 18static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
 19{
 20	struct gro_cell *cell = gcells->cells;
 21	struct net_device *dev = skb->dev;
 22
 23	if (!cell || skb_cloned(skb) || !(dev->features & NETIF_F_GRO)) {
 24		netif_rx(skb);
 25		return;
 26	}
 27
 28	if (skb_rx_queue_recorded(skb))
 29		cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
 30
 31	if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
 32		atomic_long_inc(&dev->rx_dropped);
 33		kfree_skb(skb);
 34		return;
 35	}
 36
 37	/* We run in BH context */
 38	spin_lock(&cell->napi_skbs.lock);
 39
 40	__skb_queue_tail(&cell->napi_skbs, skb);
 41	if (skb_queue_len(&cell->napi_skbs) == 1)
 42		napi_schedule(&cell->napi);
 43
 44	spin_unlock(&cell->napi_skbs.lock);
 45}
 46
 47/* called unser BH context */
 48static inline int gro_cell_poll(struct napi_struct *napi, int budget)
 49{
 50	struct gro_cell *cell = container_of(napi, struct gro_cell, napi);
 51	struct sk_buff *skb;
 52	int work_done = 0;
 53
 54	spin_lock(&cell->napi_skbs.lock);
 55	while (work_done < budget) {
 56		skb = __skb_dequeue(&cell->napi_skbs);
 57		if (!skb)
 58			break;
 59		spin_unlock(&cell->napi_skbs.lock);
 60		napi_gro_receive(napi, skb);
 61		work_done++;
 62		spin_lock(&cell->napi_skbs.lock);
 63	}
 64
 65	if (work_done < budget)
 66		napi_complete(napi);
 67	spin_unlock(&cell->napi_skbs.lock);
 68	return work_done;
 69}
 70
 71static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *dev)
 72{
 73	int i;
 74
 75	gcells->gro_cells_mask = roundup_pow_of_two(netif_get_num_default_rss_queues()) - 1;
 76	gcells->cells = kcalloc(gcells->gro_cells_mask + 1,
 77				sizeof(struct gro_cell),
 78				GFP_KERNEL);
 79	if (!gcells->cells)
 80		return -ENOMEM;
 81
 82	for (i = 0; i <= gcells->gro_cells_mask; i++) {
 83		struct gro_cell *cell = gcells->cells + i;
 84
 85		skb_queue_head_init(&cell->napi_skbs);
 86		netif_napi_add(dev, &cell->napi, gro_cell_poll, 64);
 87		napi_enable(&cell->napi);
 88	}
 89	return 0;
 90}
 91
 92static inline void gro_cells_destroy(struct gro_cells *gcells)
 93{
 94	struct gro_cell *cell = gcells->cells;
 95	int i;
 96
 97	if (!cell)
 98		return;
 99	for (i = 0; i <= gcells->gro_cells_mask; i++,cell++) {
 
 
100		netif_napi_del(&cell->napi);
101		skb_queue_purge(&cell->napi_skbs);
102	}
103	kfree(gcells->cells);
104	gcells->cells = NULL;
105}
106
107#endif