Loading...
1/* flow.c: Generic flow cache.
2 *
3 * Copyright (C) 2003 Alexey N. Kuznetsov (kuznet@ms2.inr.ac.ru)
4 * Copyright (C) 2003 David S. Miller (davem@redhat.com)
5 */
6
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/list.h>
10#include <linux/jhash.h>
11#include <linux/interrupt.h>
12#include <linux/mm.h>
13#include <linux/random.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/smp.h>
17#include <linux/completion.h>
18#include <linux/percpu.h>
19#include <linux/bitops.h>
20#include <linux/notifier.h>
21#include <linux/cpu.h>
22#include <linux/cpumask.h>
23#include <linux/mutex.h>
24#include <net/flow.h>
25#include <linux/atomic.h>
26#include <linux/security.h>
27
28struct flow_cache_entry {
29 union {
30 struct hlist_node hlist;
31 struct list_head gc_list;
32 } u;
33 struct net *net;
34 u16 family;
35 u8 dir;
36 u32 genid;
37 struct flowi key;
38 struct flow_cache_object *object;
39};
40
41struct flow_cache_percpu {
42 struct hlist_head *hash_table;
43 int hash_count;
44 u32 hash_rnd;
45 int hash_rnd_recalc;
46 struct tasklet_struct flush_tasklet;
47};
48
49struct flow_flush_info {
50 struct flow_cache *cache;
51 atomic_t cpuleft;
52 struct completion completion;
53};
54
55struct flow_cache {
56 u32 hash_shift;
57 struct flow_cache_percpu __percpu *percpu;
58 struct notifier_block hotcpu_notifier;
59 int low_watermark;
60 int high_watermark;
61 struct timer_list rnd_timer;
62};
63
64atomic_t flow_cache_genid = ATOMIC_INIT(0);
65EXPORT_SYMBOL(flow_cache_genid);
66static struct flow_cache flow_cache_global;
67static struct kmem_cache *flow_cachep __read_mostly;
68
69static DEFINE_SPINLOCK(flow_cache_gc_lock);
70static LIST_HEAD(flow_cache_gc_list);
71
72#define flow_cache_hash_size(cache) (1 << (cache)->hash_shift)
73#define FLOW_HASH_RND_PERIOD (10 * 60 * HZ)
74
75static void flow_cache_new_hashrnd(unsigned long arg)
76{
77 struct flow_cache *fc = (void *) arg;
78 int i;
79
80 for_each_possible_cpu(i)
81 per_cpu_ptr(fc->percpu, i)->hash_rnd_recalc = 1;
82
83 fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
84 add_timer(&fc->rnd_timer);
85}
86
87static int flow_entry_valid(struct flow_cache_entry *fle)
88{
89 if (atomic_read(&flow_cache_genid) != fle->genid)
90 return 0;
91 if (fle->object && !fle->object->ops->check(fle->object))
92 return 0;
93 return 1;
94}
95
96static void flow_entry_kill(struct flow_cache_entry *fle)
97{
98 if (fle->object)
99 fle->object->ops->delete(fle->object);
100 kmem_cache_free(flow_cachep, fle);
101}
102
103static void flow_cache_gc_task(struct work_struct *work)
104{
105 struct list_head gc_list;
106 struct flow_cache_entry *fce, *n;
107
108 INIT_LIST_HEAD(&gc_list);
109 spin_lock_bh(&flow_cache_gc_lock);
110 list_splice_tail_init(&flow_cache_gc_list, &gc_list);
111 spin_unlock_bh(&flow_cache_gc_lock);
112
113 list_for_each_entry_safe(fce, n, &gc_list, u.gc_list)
114 flow_entry_kill(fce);
115}
116static DECLARE_WORK(flow_cache_gc_work, flow_cache_gc_task);
117
118static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp,
119 int deleted, struct list_head *gc_list)
120{
121 if (deleted) {
122 fcp->hash_count -= deleted;
123 spin_lock_bh(&flow_cache_gc_lock);
124 list_splice_tail(gc_list, &flow_cache_gc_list);
125 spin_unlock_bh(&flow_cache_gc_lock);
126 schedule_work(&flow_cache_gc_work);
127 }
128}
129
130static void __flow_cache_shrink(struct flow_cache *fc,
131 struct flow_cache_percpu *fcp,
132 int shrink_to)
133{
134 struct flow_cache_entry *fle;
135 struct hlist_node *entry, *tmp;
136 LIST_HEAD(gc_list);
137 int i, deleted = 0;
138
139 for (i = 0; i < flow_cache_hash_size(fc); i++) {
140 int saved = 0;
141
142 hlist_for_each_entry_safe(fle, entry, tmp,
143 &fcp->hash_table[i], u.hlist) {
144 if (saved < shrink_to &&
145 flow_entry_valid(fle)) {
146 saved++;
147 } else {
148 deleted++;
149 hlist_del(&fle->u.hlist);
150 list_add_tail(&fle->u.gc_list, &gc_list);
151 }
152 }
153 }
154
155 flow_cache_queue_garbage(fcp, deleted, &gc_list);
156}
157
158static void flow_cache_shrink(struct flow_cache *fc,
159 struct flow_cache_percpu *fcp)
160{
161 int shrink_to = fc->low_watermark / flow_cache_hash_size(fc);
162
163 __flow_cache_shrink(fc, fcp, shrink_to);
164}
165
166static void flow_new_hash_rnd(struct flow_cache *fc,
167 struct flow_cache_percpu *fcp)
168{
169 get_random_bytes(&fcp->hash_rnd, sizeof(u32));
170 fcp->hash_rnd_recalc = 0;
171 __flow_cache_shrink(fc, fcp, 0);
172}
173
174static u32 flow_hash_code(struct flow_cache *fc,
175 struct flow_cache_percpu *fcp,
176 const struct flowi *key,
177 size_t keysize)
178{
179 const u32 *k = (const u32 *) key;
180 const u32 length = keysize * sizeof(flow_compare_t) / sizeof(u32);
181
182 return jhash2(k, length, fcp->hash_rnd)
183 & (flow_cache_hash_size(fc) - 1);
184}
185
186/* I hear what you're saying, use memcmp. But memcmp cannot make
187 * important assumptions that we can here, such as alignment.
188 */
189static int flow_key_compare(const struct flowi *key1, const struct flowi *key2,
190 size_t keysize)
191{
192 const flow_compare_t *k1, *k1_lim, *k2;
193
194 k1 = (const flow_compare_t *) key1;
195 k1_lim = k1 + keysize;
196
197 k2 = (const flow_compare_t *) key2;
198
199 do {
200 if (*k1++ != *k2++)
201 return 1;
202 } while (k1 < k1_lim);
203
204 return 0;
205}
206
207struct flow_cache_object *
208flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
209 flow_resolve_t resolver, void *ctx)
210{
211 struct flow_cache *fc = &flow_cache_global;
212 struct flow_cache_percpu *fcp;
213 struct flow_cache_entry *fle, *tfle;
214 struct hlist_node *entry;
215 struct flow_cache_object *flo;
216 size_t keysize;
217 unsigned int hash;
218
219 local_bh_disable();
220 fcp = this_cpu_ptr(fc->percpu);
221
222 fle = NULL;
223 flo = NULL;
224
225 keysize = flow_key_size(family);
226 if (!keysize)
227 goto nocache;
228
229 /* Packet really early in init? Making flow_cache_init a
230 * pre-smp initcall would solve this. --RR */
231 if (!fcp->hash_table)
232 goto nocache;
233
234 if (fcp->hash_rnd_recalc)
235 flow_new_hash_rnd(fc, fcp);
236
237 hash = flow_hash_code(fc, fcp, key, keysize);
238 hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) {
239 if (tfle->net == net &&
240 tfle->family == family &&
241 tfle->dir == dir &&
242 flow_key_compare(key, &tfle->key, keysize) == 0) {
243 fle = tfle;
244 break;
245 }
246 }
247
248 if (unlikely(!fle)) {
249 if (fcp->hash_count > fc->high_watermark)
250 flow_cache_shrink(fc, fcp);
251
252 fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
253 if (fle) {
254 fle->net = net;
255 fle->family = family;
256 fle->dir = dir;
257 memcpy(&fle->key, key, keysize * sizeof(flow_compare_t));
258 fle->object = NULL;
259 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
260 fcp->hash_count++;
261 }
262 } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
263 flo = fle->object;
264 if (!flo)
265 goto ret_object;
266 flo = flo->ops->get(flo);
267 if (flo)
268 goto ret_object;
269 } else if (fle->object) {
270 flo = fle->object;
271 flo->ops->delete(flo);
272 fle->object = NULL;
273 }
274
275nocache:
276 flo = NULL;
277 if (fle) {
278 flo = fle->object;
279 fle->object = NULL;
280 }
281 flo = resolver(net, key, family, dir, flo, ctx);
282 if (fle) {
283 fle->genid = atomic_read(&flow_cache_genid);
284 if (!IS_ERR(flo))
285 fle->object = flo;
286 else
287 fle->genid--;
288 } else {
289 if (flo && !IS_ERR(flo))
290 flo->ops->delete(flo);
291 }
292ret_object:
293 local_bh_enable();
294 return flo;
295}
296EXPORT_SYMBOL(flow_cache_lookup);
297
298static void flow_cache_flush_tasklet(unsigned long data)
299{
300 struct flow_flush_info *info = (void *)data;
301 struct flow_cache *fc = info->cache;
302 struct flow_cache_percpu *fcp;
303 struct flow_cache_entry *fle;
304 struct hlist_node *entry, *tmp;
305 LIST_HEAD(gc_list);
306 int i, deleted = 0;
307
308 fcp = this_cpu_ptr(fc->percpu);
309 for (i = 0; i < flow_cache_hash_size(fc); i++) {
310 hlist_for_each_entry_safe(fle, entry, tmp,
311 &fcp->hash_table[i], u.hlist) {
312 if (flow_entry_valid(fle))
313 continue;
314
315 deleted++;
316 hlist_del(&fle->u.hlist);
317 list_add_tail(&fle->u.gc_list, &gc_list);
318 }
319 }
320
321 flow_cache_queue_garbage(fcp, deleted, &gc_list);
322
323 if (atomic_dec_and_test(&info->cpuleft))
324 complete(&info->completion);
325}
326
327static void flow_cache_flush_per_cpu(void *data)
328{
329 struct flow_flush_info *info = data;
330 int cpu;
331 struct tasklet_struct *tasklet;
332
333 cpu = smp_processor_id();
334 tasklet = &per_cpu_ptr(info->cache->percpu, cpu)->flush_tasklet;
335 tasklet->data = (unsigned long)info;
336 tasklet_schedule(tasklet);
337}
338
339void flow_cache_flush(void)
340{
341 struct flow_flush_info info;
342 static DEFINE_MUTEX(flow_flush_sem);
343
344 /* Don't want cpus going down or up during this. */
345 get_online_cpus();
346 mutex_lock(&flow_flush_sem);
347 info.cache = &flow_cache_global;
348 atomic_set(&info.cpuleft, num_online_cpus());
349 init_completion(&info.completion);
350
351 local_bh_disable();
352 smp_call_function(flow_cache_flush_per_cpu, &info, 0);
353 flow_cache_flush_tasklet((unsigned long)&info);
354 local_bh_enable();
355
356 wait_for_completion(&info.completion);
357 mutex_unlock(&flow_flush_sem);
358 put_online_cpus();
359}
360
361static int __cpuinit flow_cache_cpu_prepare(struct flow_cache *fc, int cpu)
362{
363 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
364 size_t sz = sizeof(struct hlist_head) * flow_cache_hash_size(fc);
365
366 if (!fcp->hash_table) {
367 fcp->hash_table = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
368 if (!fcp->hash_table) {
369 pr_err("NET: failed to allocate flow cache sz %zu\n", sz);
370 return -ENOMEM;
371 }
372 fcp->hash_rnd_recalc = 1;
373 fcp->hash_count = 0;
374 tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0);
375 }
376 return 0;
377}
378
379static int __cpuinit flow_cache_cpu(struct notifier_block *nfb,
380 unsigned long action,
381 void *hcpu)
382{
383 struct flow_cache *fc = container_of(nfb, struct flow_cache, hotcpu_notifier);
384 int res, cpu = (unsigned long) hcpu;
385 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
386
387 switch (action) {
388 case CPU_UP_PREPARE:
389 case CPU_UP_PREPARE_FROZEN:
390 res = flow_cache_cpu_prepare(fc, cpu);
391 if (res)
392 return notifier_from_errno(res);
393 break;
394 case CPU_DEAD:
395 case CPU_DEAD_FROZEN:
396 __flow_cache_shrink(fc, fcp, 0);
397 break;
398 }
399 return NOTIFY_OK;
400}
401
402static int __init flow_cache_init(struct flow_cache *fc)
403{
404 int i;
405
406 fc->hash_shift = 10;
407 fc->low_watermark = 2 * flow_cache_hash_size(fc);
408 fc->high_watermark = 4 * flow_cache_hash_size(fc);
409
410 fc->percpu = alloc_percpu(struct flow_cache_percpu);
411 if (!fc->percpu)
412 return -ENOMEM;
413
414 for_each_online_cpu(i) {
415 if (flow_cache_cpu_prepare(fc, i))
416 return -ENOMEM;
417 }
418 fc->hotcpu_notifier = (struct notifier_block){
419 .notifier_call = flow_cache_cpu,
420 };
421 register_hotcpu_notifier(&fc->hotcpu_notifier);
422
423 setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd,
424 (unsigned long) fc);
425 fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
426 add_timer(&fc->rnd_timer);
427
428 return 0;
429}
430
431static int __init flow_cache_init_global(void)
432{
433 flow_cachep = kmem_cache_create("flow_cache",
434 sizeof(struct flow_cache_entry),
435 0, SLAB_PANIC, NULL);
436
437 return flow_cache_init(&flow_cache_global);
438}
439
440module_init(flow_cache_init_global);
1/* flow.c: Generic flow cache.
2 *
3 * Copyright (C) 2003 Alexey N. Kuznetsov (kuznet@ms2.inr.ac.ru)
4 * Copyright (C) 2003 David S. Miller (davem@redhat.com)
5 */
6
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/list.h>
10#include <linux/jhash.h>
11#include <linux/interrupt.h>
12#include <linux/mm.h>
13#include <linux/random.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/smp.h>
17#include <linux/completion.h>
18#include <linux/percpu.h>
19#include <linux/bitops.h>
20#include <linux/notifier.h>
21#include <linux/cpu.h>
22#include <linux/cpumask.h>
23#include <linux/mutex.h>
24#include <net/flow.h>
25#include <linux/atomic.h>
26#include <linux/security.h>
27#include <net/net_namespace.h>
28
29struct flow_cache_entry {
30 union {
31 struct hlist_node hlist;
32 struct list_head gc_list;
33 } u;
34 struct net *net;
35 u16 family;
36 u8 dir;
37 u32 genid;
38 struct flowi key;
39 struct flow_cache_object *object;
40};
41
42struct flow_flush_info {
43 struct flow_cache *cache;
44 atomic_t cpuleft;
45 struct completion completion;
46};
47
48static struct kmem_cache *flow_cachep __read_mostly;
49
50#define flow_cache_hash_size(cache) (1 << (cache)->hash_shift)
51#define FLOW_HASH_RND_PERIOD (10 * 60 * HZ)
52
53static void flow_cache_new_hashrnd(unsigned long arg)
54{
55 struct flow_cache *fc = (void *) arg;
56 int i;
57
58 for_each_possible_cpu(i)
59 per_cpu_ptr(fc->percpu, i)->hash_rnd_recalc = 1;
60
61 fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
62 add_timer(&fc->rnd_timer);
63}
64
65static int flow_entry_valid(struct flow_cache_entry *fle,
66 struct netns_xfrm *xfrm)
67{
68 if (atomic_read(&xfrm->flow_cache_genid) != fle->genid)
69 return 0;
70 if (fle->object && !fle->object->ops->check(fle->object))
71 return 0;
72 return 1;
73}
74
75static void flow_entry_kill(struct flow_cache_entry *fle,
76 struct netns_xfrm *xfrm)
77{
78 if (fle->object)
79 fle->object->ops->delete(fle->object);
80 kmem_cache_free(flow_cachep, fle);
81}
82
83static void flow_cache_gc_task(struct work_struct *work)
84{
85 struct list_head gc_list;
86 struct flow_cache_entry *fce, *n;
87 struct netns_xfrm *xfrm = container_of(work, struct netns_xfrm,
88 flow_cache_gc_work);
89
90 INIT_LIST_HEAD(&gc_list);
91 spin_lock_bh(&xfrm->flow_cache_gc_lock);
92 list_splice_tail_init(&xfrm->flow_cache_gc_list, &gc_list);
93 spin_unlock_bh(&xfrm->flow_cache_gc_lock);
94
95 list_for_each_entry_safe(fce, n, &gc_list, u.gc_list) {
96 flow_entry_kill(fce, xfrm);
97 atomic_dec(&xfrm->flow_cache_gc_count);
98 }
99}
100
101static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp,
102 int deleted, struct list_head *gc_list,
103 struct netns_xfrm *xfrm)
104{
105 if (deleted) {
106 atomic_add(deleted, &xfrm->flow_cache_gc_count);
107 fcp->hash_count -= deleted;
108 spin_lock_bh(&xfrm->flow_cache_gc_lock);
109 list_splice_tail(gc_list, &xfrm->flow_cache_gc_list);
110 spin_unlock_bh(&xfrm->flow_cache_gc_lock);
111 schedule_work(&xfrm->flow_cache_gc_work);
112 }
113}
114
115static void __flow_cache_shrink(struct flow_cache *fc,
116 struct flow_cache_percpu *fcp,
117 int shrink_to)
118{
119 struct flow_cache_entry *fle;
120 struct hlist_node *tmp;
121 LIST_HEAD(gc_list);
122 int i, deleted = 0;
123 struct netns_xfrm *xfrm = container_of(fc, struct netns_xfrm,
124 flow_cache_global);
125
126 for (i = 0; i < flow_cache_hash_size(fc); i++) {
127 int saved = 0;
128
129 hlist_for_each_entry_safe(fle, tmp,
130 &fcp->hash_table[i], u.hlist) {
131 if (saved < shrink_to &&
132 flow_entry_valid(fle, xfrm)) {
133 saved++;
134 } else {
135 deleted++;
136 hlist_del(&fle->u.hlist);
137 list_add_tail(&fle->u.gc_list, &gc_list);
138 }
139 }
140 }
141
142 flow_cache_queue_garbage(fcp, deleted, &gc_list, xfrm);
143}
144
145static void flow_cache_shrink(struct flow_cache *fc,
146 struct flow_cache_percpu *fcp)
147{
148 int shrink_to = fc->low_watermark / flow_cache_hash_size(fc);
149
150 __flow_cache_shrink(fc, fcp, shrink_to);
151}
152
153static void flow_new_hash_rnd(struct flow_cache *fc,
154 struct flow_cache_percpu *fcp)
155{
156 get_random_bytes(&fcp->hash_rnd, sizeof(u32));
157 fcp->hash_rnd_recalc = 0;
158 __flow_cache_shrink(fc, fcp, 0);
159}
160
161static u32 flow_hash_code(struct flow_cache *fc,
162 struct flow_cache_percpu *fcp,
163 const struct flowi *key,
164 size_t keysize)
165{
166 const u32 *k = (const u32 *) key;
167 const u32 length = keysize * sizeof(flow_compare_t) / sizeof(u32);
168
169 return jhash2(k, length, fcp->hash_rnd)
170 & (flow_cache_hash_size(fc) - 1);
171}
172
173/* I hear what you're saying, use memcmp. But memcmp cannot make
174 * important assumptions that we can here, such as alignment.
175 */
176static int flow_key_compare(const struct flowi *key1, const struct flowi *key2,
177 size_t keysize)
178{
179 const flow_compare_t *k1, *k1_lim, *k2;
180
181 k1 = (const flow_compare_t *) key1;
182 k1_lim = k1 + keysize;
183
184 k2 = (const flow_compare_t *) key2;
185
186 do {
187 if (*k1++ != *k2++)
188 return 1;
189 } while (k1 < k1_lim);
190
191 return 0;
192}
193
194struct flow_cache_object *
195flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
196 flow_resolve_t resolver, void *ctx)
197{
198 struct flow_cache *fc = &net->xfrm.flow_cache_global;
199 struct flow_cache_percpu *fcp;
200 struct flow_cache_entry *fle, *tfle;
201 struct flow_cache_object *flo;
202 size_t keysize;
203 unsigned int hash;
204
205 local_bh_disable();
206 fcp = this_cpu_ptr(fc->percpu);
207
208 fle = NULL;
209 flo = NULL;
210
211 keysize = flow_key_size(family);
212 if (!keysize)
213 goto nocache;
214
215 /* Packet really early in init? Making flow_cache_init a
216 * pre-smp initcall would solve this. --RR */
217 if (!fcp->hash_table)
218 goto nocache;
219
220 if (fcp->hash_rnd_recalc)
221 flow_new_hash_rnd(fc, fcp);
222
223 hash = flow_hash_code(fc, fcp, key, keysize);
224 hlist_for_each_entry(tfle, &fcp->hash_table[hash], u.hlist) {
225 if (tfle->net == net &&
226 tfle->family == family &&
227 tfle->dir == dir &&
228 flow_key_compare(key, &tfle->key, keysize) == 0) {
229 fle = tfle;
230 break;
231 }
232 }
233
234 if (unlikely(!fle)) {
235 if (fcp->hash_count > fc->high_watermark)
236 flow_cache_shrink(fc, fcp);
237
238 if (atomic_read(&net->xfrm.flow_cache_gc_count) >
239 2 * num_online_cpus() * fc->high_watermark) {
240 flo = ERR_PTR(-ENOBUFS);
241 goto ret_object;
242 }
243
244 fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
245 if (fle) {
246 fle->net = net;
247 fle->family = family;
248 fle->dir = dir;
249 memcpy(&fle->key, key, keysize * sizeof(flow_compare_t));
250 fle->object = NULL;
251 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
252 fcp->hash_count++;
253 }
254 } else if (likely(fle->genid == atomic_read(&net->xfrm.flow_cache_genid))) {
255 flo = fle->object;
256 if (!flo)
257 goto ret_object;
258 flo = flo->ops->get(flo);
259 if (flo)
260 goto ret_object;
261 } else if (fle->object) {
262 flo = fle->object;
263 flo->ops->delete(flo);
264 fle->object = NULL;
265 }
266
267nocache:
268 flo = NULL;
269 if (fle) {
270 flo = fle->object;
271 fle->object = NULL;
272 }
273 flo = resolver(net, key, family, dir, flo, ctx);
274 if (fle) {
275 fle->genid = atomic_read(&net->xfrm.flow_cache_genid);
276 if (!IS_ERR(flo))
277 fle->object = flo;
278 else
279 fle->genid--;
280 } else {
281 if (!IS_ERR_OR_NULL(flo))
282 flo->ops->delete(flo);
283 }
284ret_object:
285 local_bh_enable();
286 return flo;
287}
288EXPORT_SYMBOL(flow_cache_lookup);
289
290static void flow_cache_flush_tasklet(unsigned long data)
291{
292 struct flow_flush_info *info = (void *)data;
293 struct flow_cache *fc = info->cache;
294 struct flow_cache_percpu *fcp;
295 struct flow_cache_entry *fle;
296 struct hlist_node *tmp;
297 LIST_HEAD(gc_list);
298 int i, deleted = 0;
299 struct netns_xfrm *xfrm = container_of(fc, struct netns_xfrm,
300 flow_cache_global);
301
302 fcp = this_cpu_ptr(fc->percpu);
303 for (i = 0; i < flow_cache_hash_size(fc); i++) {
304 hlist_for_each_entry_safe(fle, tmp,
305 &fcp->hash_table[i], u.hlist) {
306 if (flow_entry_valid(fle, xfrm))
307 continue;
308
309 deleted++;
310 hlist_del(&fle->u.hlist);
311 list_add_tail(&fle->u.gc_list, &gc_list);
312 }
313 }
314
315 flow_cache_queue_garbage(fcp, deleted, &gc_list, xfrm);
316
317 if (atomic_dec_and_test(&info->cpuleft))
318 complete(&info->completion);
319}
320
321/*
322 * Return whether a cpu needs flushing. Conservatively, we assume
323 * the presence of any entries means the core may require flushing,
324 * since the flow_cache_ops.check() function may assume it's running
325 * on the same core as the per-cpu cache component.
326 */
327static int flow_cache_percpu_empty(struct flow_cache *fc, int cpu)
328{
329 struct flow_cache_percpu *fcp;
330 int i;
331
332 fcp = per_cpu_ptr(fc->percpu, cpu);
333 for (i = 0; i < flow_cache_hash_size(fc); i++)
334 if (!hlist_empty(&fcp->hash_table[i]))
335 return 0;
336 return 1;
337}
338
339static void flow_cache_flush_per_cpu(void *data)
340{
341 struct flow_flush_info *info = data;
342 struct tasklet_struct *tasklet;
343
344 tasklet = &this_cpu_ptr(info->cache->percpu)->flush_tasklet;
345 tasklet->data = (unsigned long)info;
346 tasklet_schedule(tasklet);
347}
348
349void flow_cache_flush(struct net *net)
350{
351 struct flow_flush_info info;
352 cpumask_var_t mask;
353 int i, self;
354
355 /* Track which cpus need flushing to avoid disturbing all cores. */
356 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
357 return;
358 cpumask_clear(mask);
359
360 /* Don't want cpus going down or up during this. */
361 get_online_cpus();
362 mutex_lock(&net->xfrm.flow_flush_sem);
363 info.cache = &net->xfrm.flow_cache_global;
364 for_each_online_cpu(i)
365 if (!flow_cache_percpu_empty(info.cache, i))
366 cpumask_set_cpu(i, mask);
367 atomic_set(&info.cpuleft, cpumask_weight(mask));
368 if (atomic_read(&info.cpuleft) == 0)
369 goto done;
370
371 init_completion(&info.completion);
372
373 local_bh_disable();
374 self = cpumask_test_and_clear_cpu(smp_processor_id(), mask);
375 on_each_cpu_mask(mask, flow_cache_flush_per_cpu, &info, 0);
376 if (self)
377 flow_cache_flush_tasklet((unsigned long)&info);
378 local_bh_enable();
379
380 wait_for_completion(&info.completion);
381
382done:
383 mutex_unlock(&net->xfrm.flow_flush_sem);
384 put_online_cpus();
385 free_cpumask_var(mask);
386}
387
388static void flow_cache_flush_task(struct work_struct *work)
389{
390 struct netns_xfrm *xfrm = container_of(work, struct netns_xfrm,
391 flow_cache_flush_work);
392 struct net *net = container_of(xfrm, struct net, xfrm);
393
394 flow_cache_flush(net);
395}
396
397void flow_cache_flush_deferred(struct net *net)
398{
399 schedule_work(&net->xfrm.flow_cache_flush_work);
400}
401
402static int flow_cache_cpu_prepare(struct flow_cache *fc, int cpu)
403{
404 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
405 size_t sz = sizeof(struct hlist_head) * flow_cache_hash_size(fc);
406
407 if (!fcp->hash_table) {
408 fcp->hash_table = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
409 if (!fcp->hash_table) {
410 pr_err("NET: failed to allocate flow cache sz %zu\n", sz);
411 return -ENOMEM;
412 }
413 fcp->hash_rnd_recalc = 1;
414 fcp->hash_count = 0;
415 tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0);
416 }
417 return 0;
418}
419
420static int flow_cache_cpu_up_prep(unsigned int cpu, struct hlist_node *node)
421{
422 struct flow_cache *fc = hlist_entry_safe(node, struct flow_cache, node);
423
424 return flow_cache_cpu_prepare(fc, cpu);
425}
426
427static int flow_cache_cpu_dead(unsigned int cpu, struct hlist_node *node)
428{
429 struct flow_cache *fc = hlist_entry_safe(node, struct flow_cache, node);
430 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
431
432 __flow_cache_shrink(fc, fcp, 0);
433 return 0;
434}
435
436int flow_cache_init(struct net *net)
437{
438 int i;
439 struct flow_cache *fc = &net->xfrm.flow_cache_global;
440
441 if (!flow_cachep)
442 flow_cachep = kmem_cache_create("flow_cache",
443 sizeof(struct flow_cache_entry),
444 0, SLAB_PANIC, NULL);
445 spin_lock_init(&net->xfrm.flow_cache_gc_lock);
446 INIT_LIST_HEAD(&net->xfrm.flow_cache_gc_list);
447 INIT_WORK(&net->xfrm.flow_cache_gc_work, flow_cache_gc_task);
448 INIT_WORK(&net->xfrm.flow_cache_flush_work, flow_cache_flush_task);
449 mutex_init(&net->xfrm.flow_flush_sem);
450 atomic_set(&net->xfrm.flow_cache_gc_count, 0);
451
452 fc->hash_shift = 10;
453 fc->low_watermark = 2 * flow_cache_hash_size(fc);
454 fc->high_watermark = 4 * flow_cache_hash_size(fc);
455
456 fc->percpu = alloc_percpu(struct flow_cache_percpu);
457 if (!fc->percpu)
458 return -ENOMEM;
459
460 if (cpuhp_state_add_instance(CPUHP_NET_FLOW_PREPARE, &fc->node))
461 goto err;
462
463 setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd,
464 (unsigned long) fc);
465 fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
466 add_timer(&fc->rnd_timer);
467
468 return 0;
469
470err:
471 for_each_possible_cpu(i) {
472 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i);
473 kfree(fcp->hash_table);
474 fcp->hash_table = NULL;
475 }
476
477 free_percpu(fc->percpu);
478 fc->percpu = NULL;
479
480 return -ENOMEM;
481}
482EXPORT_SYMBOL(flow_cache_init);
483
484void flow_cache_fini(struct net *net)
485{
486 int i;
487 struct flow_cache *fc = &net->xfrm.flow_cache_global;
488
489 del_timer_sync(&fc->rnd_timer);
490
491 cpuhp_state_remove_instance_nocalls(CPUHP_NET_FLOW_PREPARE, &fc->node);
492
493 for_each_possible_cpu(i) {
494 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i);
495 kfree(fcp->hash_table);
496 fcp->hash_table = NULL;
497 }
498
499 free_percpu(fc->percpu);
500 fc->percpu = NULL;
501}
502EXPORT_SYMBOL(flow_cache_fini);
503
504void __init flow_cache_hp_init(void)
505{
506 int ret;
507
508 ret = cpuhp_setup_state_multi(CPUHP_NET_FLOW_PREPARE,
509 "net/flow:prepare",
510 flow_cache_cpu_up_prep,
511 flow_cache_cpu_dead);
512 WARN_ON(ret < 0);
513}