Linux Audio

Check our new training course

Loading...
v4.17
  1/*
  2 * Copyright (c) 2007-2014 Nicira, Inc.
  3 *
  4 * This program is free software; you can redistribute it and/or
  5 * modify it under the terms of version 2 of the GNU General Public
  6 * License as published by the Free Software Foundation.
  7 *
  8 * This program is distributed in the hope that it will be useful, but
  9 * WITHOUT ANY WARRANTY; without even the implied warranty of
 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 11 * General Public License for more details.
 12 *
 13 * You should have received a copy of the GNU General Public License
 14 * along with this program; if not, write to the Free Software
 15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
 16 * 02110-1301, USA
 17 */
 18
 19#include "flow.h"
 20#include "datapath.h"
 21#include "flow_netlink.h"
 22#include <linux/uaccess.h>
 23#include <linux/netdevice.h>
 24#include <linux/etherdevice.h>
 25#include <linux/if_ether.h>
 26#include <linux/if_vlan.h>
 27#include <net/llc_pdu.h>
 28#include <linux/kernel.h>
 29#include <linux/jhash.h>
 30#include <linux/jiffies.h>
 31#include <linux/llc.h>
 32#include <linux/module.h>
 33#include <linux/in.h>
 34#include <linux/rcupdate.h>
 35#include <linux/cpumask.h>
 36#include <linux/if_arp.h>
 37#include <linux/ip.h>
 38#include <linux/ipv6.h>
 39#include <linux/sctp.h>
 40#include <linux/tcp.h>
 41#include <linux/udp.h>
 42#include <linux/icmp.h>
 43#include <linux/icmpv6.h>
 44#include <linux/rculist.h>
 45#include <net/ip.h>
 46#include <net/ipv6.h>
 47#include <net/ndisc.h>
 48
 49#define TBL_MIN_BUCKETS		1024
 50#define REHASH_INTERVAL		(10 * 60 * HZ)
 51
 52static struct kmem_cache *flow_cache;
 53struct kmem_cache *flow_stats_cache __read_mostly;
 54
 55static u16 range_n_bytes(const struct sw_flow_key_range *range)
 56{
 57	return range->end - range->start;
 58}
 59
 60void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
 61		       bool full, const struct sw_flow_mask *mask)
 62{
 63	int start = full ? 0 : mask->range.start;
 64	int len = full ? sizeof *dst : range_n_bytes(&mask->range);
 65	const long *m = (const long *)((const u8 *)&mask->key + start);
 66	const long *s = (const long *)((const u8 *)src + start);
 67	long *d = (long *)((u8 *)dst + start);
 68	int i;
 69
 70	/* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
 71	 * if 'full' is false the memory outside of the 'mask->range' is left
 72	 * uninitialized. This can be used as an optimization when further
 73	 * operations on 'dst' only use contents within 'mask->range'.
 74	 */
 75	for (i = 0; i < len; i += sizeof(long))
 76		*d++ = *s++ & *m++;
 77}
 78
 79struct sw_flow *ovs_flow_alloc(void)
 80{
 81	struct sw_flow *flow;
 82	struct flow_stats *stats;
 
 83
 84	flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL);
 85	if (!flow)
 86		return ERR_PTR(-ENOMEM);
 87
 88	flow->stats_last_writer = -1;
 
 
 
 
 89
 90	/* Initialize the default stat node. */
 91	stats = kmem_cache_alloc_node(flow_stats_cache,
 92				      GFP_KERNEL | __GFP_ZERO,
 93				      node_online(0) ? 0 : NUMA_NO_NODE);
 94	if (!stats)
 95		goto err;
 96
 97	spin_lock_init(&stats->lock);
 98
 99	RCU_INIT_POINTER(flow->stats[0], stats);
100
101	cpumask_set_cpu(0, &flow->cpu_used_mask);
 
 
102
103	return flow;
104err:
105	kmem_cache_free(flow_cache, flow);
106	return ERR_PTR(-ENOMEM);
107}
108
109int ovs_flow_tbl_count(const struct flow_table *table)
110{
111	return table->count;
112}
113
114static struct flex_array *alloc_buckets(unsigned int n_buckets)
115{
116	struct flex_array *buckets;
117	int i, err;
118
119	buckets = flex_array_alloc(sizeof(struct hlist_head),
120				   n_buckets, GFP_KERNEL);
121	if (!buckets)
122		return NULL;
123
124	err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
125	if (err) {
126		flex_array_free(buckets);
127		return NULL;
128	}
129
130	for (i = 0; i < n_buckets; i++)
131		INIT_HLIST_HEAD((struct hlist_head *)
132					flex_array_get(buckets, i));
133
134	return buckets;
135}
136
137static void flow_free(struct sw_flow *flow)
138{
139	int cpu;
140
141	if (ovs_identifier_is_key(&flow->id))
142		kfree(flow->id.unmasked_key);
143	if (flow->sf_acts)
144		ovs_nla_free_flow_actions((struct sw_flow_actions __force *)flow->sf_acts);
145	/* We open code this to make sure cpu 0 is always considered */
146	for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask))
147		if (flow->stats[cpu])
148			kmem_cache_free(flow_stats_cache,
149					(struct flow_stats __force *)flow->stats[cpu]);
150	kmem_cache_free(flow_cache, flow);
151}
152
153static void rcu_free_flow_callback(struct rcu_head *rcu)
154{
155	struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
156
157	flow_free(flow);
158}
159
160void ovs_flow_free(struct sw_flow *flow, bool deferred)
161{
162	if (!flow)
163		return;
164
165	if (deferred)
166		call_rcu(&flow->rcu, rcu_free_flow_callback);
167	else
168		flow_free(flow);
169}
170
171static void free_buckets(struct flex_array *buckets)
172{
173	flex_array_free(buckets);
174}
175
176
177static void __table_instance_destroy(struct table_instance *ti)
178{
179	free_buckets(ti->buckets);
180	kfree(ti);
181}
182
183static struct table_instance *table_instance_alloc(int new_size)
184{
185	struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
186
187	if (!ti)
188		return NULL;
189
190	ti->buckets = alloc_buckets(new_size);
191
192	if (!ti->buckets) {
193		kfree(ti);
194		return NULL;
195	}
196	ti->n_buckets = new_size;
197	ti->node_ver = 0;
198	ti->keep_flows = false;
199	get_random_bytes(&ti->hash_seed, sizeof(u32));
200
201	return ti;
202}
203
204int ovs_flow_tbl_init(struct flow_table *table)
205{
206	struct table_instance *ti, *ufid_ti;
207
208	ti = table_instance_alloc(TBL_MIN_BUCKETS);
209
210	if (!ti)
211		return -ENOMEM;
212
213	ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
214	if (!ufid_ti)
215		goto free_ti;
216
217	rcu_assign_pointer(table->ti, ti);
218	rcu_assign_pointer(table->ufid_ti, ufid_ti);
219	INIT_LIST_HEAD(&table->mask_list);
220	table->last_rehash = jiffies;
221	table->count = 0;
222	table->ufid_count = 0;
223	return 0;
224
225free_ti:
226	__table_instance_destroy(ti);
227	return -ENOMEM;
228}
229
230static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
231{
232	struct table_instance *ti = container_of(rcu, struct table_instance, rcu);
233
234	__table_instance_destroy(ti);
235}
236
237static void table_instance_destroy(struct table_instance *ti,
238				   struct table_instance *ufid_ti,
239				   bool deferred)
240{
241	int i;
242
243	if (!ti)
244		return;
245
246	BUG_ON(!ufid_ti);
247	if (ti->keep_flows)
248		goto skip_flows;
249
250	for (i = 0; i < ti->n_buckets; i++) {
251		struct sw_flow *flow;
252		struct hlist_head *head = flex_array_get(ti->buckets, i);
253		struct hlist_node *n;
254		int ver = ti->node_ver;
255		int ufid_ver = ufid_ti->node_ver;
256
257		hlist_for_each_entry_safe(flow, n, head, flow_table.node[ver]) {
258			hlist_del_rcu(&flow->flow_table.node[ver]);
259			if (ovs_identifier_is_ufid(&flow->id))
260				hlist_del_rcu(&flow->ufid_table.node[ufid_ver]);
261			ovs_flow_free(flow, deferred);
262		}
263	}
264
265skip_flows:
266	if (deferred) {
267		call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
268		call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
269	} else {
270		__table_instance_destroy(ti);
271		__table_instance_destroy(ufid_ti);
272	}
273}
274
275/* No need for locking this function is called from RCU callback or
276 * error path.
277 */
278void ovs_flow_tbl_destroy(struct flow_table *table)
279{
280	struct table_instance *ti = rcu_dereference_raw(table->ti);
281	struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
282
283	table_instance_destroy(ti, ufid_ti, false);
284}
285
286struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
287				       u32 *bucket, u32 *last)
288{
289	struct sw_flow *flow;
290	struct hlist_head *head;
291	int ver;
292	int i;
293
294	ver = ti->node_ver;
295	while (*bucket < ti->n_buckets) {
296		i = 0;
297		head = flex_array_get(ti->buckets, *bucket);
298		hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) {
299			if (i < *last) {
300				i++;
301				continue;
302			}
303			*last = i + 1;
304			return flow;
305		}
306		(*bucket)++;
307		*last = 0;
308	}
309
310	return NULL;
311}
312
313static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
314{
315	hash = jhash_1word(hash, ti->hash_seed);
316	return flex_array_get(ti->buckets,
317				(hash & (ti->n_buckets - 1)));
318}
319
320static void table_instance_insert(struct table_instance *ti,
321				  struct sw_flow *flow)
322{
323	struct hlist_head *head;
324
325	head = find_bucket(ti, flow->flow_table.hash);
326	hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head);
327}
328
329static void ufid_table_instance_insert(struct table_instance *ti,
330				       struct sw_flow *flow)
331{
332	struct hlist_head *head;
333
334	head = find_bucket(ti, flow->ufid_table.hash);
335	hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head);
336}
337
338static void flow_table_copy_flows(struct table_instance *old,
339				  struct table_instance *new, bool ufid)
340{
341	int old_ver;
342	int i;
343
344	old_ver = old->node_ver;
345	new->node_ver = !old_ver;
346
347	/* Insert in new table. */
348	for (i = 0; i < old->n_buckets; i++) {
349		struct sw_flow *flow;
350		struct hlist_head *head;
351
352		head = flex_array_get(old->buckets, i);
353
354		if (ufid)
355			hlist_for_each_entry(flow, head,
356					     ufid_table.node[old_ver])
357				ufid_table_instance_insert(new, flow);
358		else
359			hlist_for_each_entry(flow, head,
360					     flow_table.node[old_ver])
361				table_instance_insert(new, flow);
362	}
363
364	old->keep_flows = true;
365}
366
367static struct table_instance *table_instance_rehash(struct table_instance *ti,
368						    int n_buckets, bool ufid)
369{
370	struct table_instance *new_ti;
371
372	new_ti = table_instance_alloc(n_buckets);
373	if (!new_ti)
374		return NULL;
375
376	flow_table_copy_flows(ti, new_ti, ufid);
377
378	return new_ti;
379}
380
381int ovs_flow_tbl_flush(struct flow_table *flow_table)
382{
383	struct table_instance *old_ti, *new_ti;
384	struct table_instance *old_ufid_ti, *new_ufid_ti;
385
386	new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
387	if (!new_ti)
388		return -ENOMEM;
389	new_ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
390	if (!new_ufid_ti)
391		goto err_free_ti;
392
393	old_ti = ovsl_dereference(flow_table->ti);
394	old_ufid_ti = ovsl_dereference(flow_table->ufid_ti);
395
396	rcu_assign_pointer(flow_table->ti, new_ti);
397	rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti);
398	flow_table->last_rehash = jiffies;
399	flow_table->count = 0;
400	flow_table->ufid_count = 0;
401
402	table_instance_destroy(old_ti, old_ufid_ti, true);
403	return 0;
404
405err_free_ti:
406	__table_instance_destroy(new_ti);
407	return -ENOMEM;
408}
409
410static u32 flow_hash(const struct sw_flow_key *key,
411		     const struct sw_flow_key_range *range)
412{
413	int key_start = range->start;
414	int key_end = range->end;
415	const u32 *hash_key = (const u32 *)((const u8 *)key + key_start);
416	int hash_u32s = (key_end - key_start) >> 2;
417
418	/* Make sure number of hash bytes are multiple of u32. */
419	BUILD_BUG_ON(sizeof(long) % sizeof(u32));
420
421	return jhash2(hash_key, hash_u32s, 0);
422}
423
424static int flow_key_start(const struct sw_flow_key *key)
425{
426	if (key->tun_proto)
427		return 0;
428	else
429		return rounddown(offsetof(struct sw_flow_key, phy),
430					  sizeof(long));
431}
432
433static bool cmp_key(const struct sw_flow_key *key1,
434		    const struct sw_flow_key *key2,
435		    int key_start, int key_end)
436{
437	const long *cp1 = (const long *)((const u8 *)key1 + key_start);
438	const long *cp2 = (const long *)((const u8 *)key2 + key_start);
439	long diffs = 0;
440	int i;
441
442	for (i = key_start; i < key_end;  i += sizeof(long))
443		diffs |= *cp1++ ^ *cp2++;
444
445	return diffs == 0;
446}
447
448static bool flow_cmp_masked_key(const struct sw_flow *flow,
449				const struct sw_flow_key *key,
450				const struct sw_flow_key_range *range)
451{
452	return cmp_key(&flow->key, key, range->start, range->end);
453}
454
455static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
456				      const struct sw_flow_match *match)
457{
458	struct sw_flow_key *key = match->key;
459	int key_start = flow_key_start(key);
460	int key_end = match->range.end;
461
462	BUG_ON(ovs_identifier_is_ufid(&flow->id));
463	return cmp_key(flow->id.unmasked_key, key, key_start, key_end);
464}
465
466static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
467					  const struct sw_flow_key *unmasked,
468					  const struct sw_flow_mask *mask)
469{
470	struct sw_flow *flow;
471	struct hlist_head *head;
472	u32 hash;
473	struct sw_flow_key masked_key;
474
475	ovs_flow_mask_key(&masked_key, unmasked, false, mask);
476	hash = flow_hash(&masked_key, &mask->range);
477	head = find_bucket(ti, hash);
478	hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) {
479		if (flow->mask == mask && flow->flow_table.hash == hash &&
480		    flow_cmp_masked_key(flow, &masked_key, &mask->range))
481			return flow;
482	}
483	return NULL;
484}
485
486struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
487				    const struct sw_flow_key *key,
488				    u32 *n_mask_hit)
489{
490	struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
491	struct sw_flow_mask *mask;
492	struct sw_flow *flow;
493
494	*n_mask_hit = 0;
495	list_for_each_entry_rcu(mask, &tbl->mask_list, list) {
496		(*n_mask_hit)++;
497		flow = masked_flow_lookup(ti, key, mask);
498		if (flow)  /* Found */
499			return flow;
500	}
501	return NULL;
502}
503
504struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
505				    const struct sw_flow_key *key)
506{
507	u32 __always_unused n_mask_hit;
508
509	return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
510}
511
512struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
513					  const struct sw_flow_match *match)
514{
515	struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
516	struct sw_flow_mask *mask;
517	struct sw_flow *flow;
518
519	/* Always called under ovs-mutex. */
520	list_for_each_entry(mask, &tbl->mask_list, list) {
521		flow = masked_flow_lookup(ti, match->key, mask);
522		if (flow && ovs_identifier_is_key(&flow->id) &&
523		    ovs_flow_cmp_unmasked_key(flow, match))
524			return flow;
525	}
526	return NULL;
527}
528
529static u32 ufid_hash(const struct sw_flow_id *sfid)
530{
531	return jhash(sfid->ufid, sfid->ufid_len, 0);
532}
533
534static bool ovs_flow_cmp_ufid(const struct sw_flow *flow,
535			      const struct sw_flow_id *sfid)
536{
537	if (flow->id.ufid_len != sfid->ufid_len)
538		return false;
539
540	return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len);
541}
542
543bool ovs_flow_cmp(const struct sw_flow *flow, const struct sw_flow_match *match)
544{
545	if (ovs_identifier_is_ufid(&flow->id))
546		return flow_cmp_masked_key(flow, match->key, &match->range);
547
548	return ovs_flow_cmp_unmasked_key(flow, match);
549}
550
551struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
552					 const struct sw_flow_id *ufid)
553{
554	struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti);
555	struct sw_flow *flow;
556	struct hlist_head *head;
557	u32 hash;
558
559	hash = ufid_hash(ufid);
560	head = find_bucket(ti, hash);
561	hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver]) {
562		if (flow->ufid_table.hash == hash &&
563		    ovs_flow_cmp_ufid(flow, ufid))
564			return flow;
565	}
566	return NULL;
567}
568
569int ovs_flow_tbl_num_masks(const struct flow_table *table)
570{
571	struct sw_flow_mask *mask;
572	int num = 0;
573
574	list_for_each_entry(mask, &table->mask_list, list)
575		num++;
576
577	return num;
578}
579
580static struct table_instance *table_instance_expand(struct table_instance *ti,
581						    bool ufid)
582{
583	return table_instance_rehash(ti, ti->n_buckets * 2, ufid);
584}
585
586/* Remove 'mask' from the mask list, if it is not needed any more. */
587static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
588{
589	if (mask) {
590		/* ovs-lock is required to protect mask-refcount and
591		 * mask list.
592		 */
593		ASSERT_OVSL();
594		BUG_ON(!mask->ref_count);
595		mask->ref_count--;
596
597		if (!mask->ref_count) {
598			list_del_rcu(&mask->list);
599			kfree_rcu(mask, rcu);
600		}
601	}
602}
603
604/* Must be called with OVS mutex held. */
605void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
606{
607	struct table_instance *ti = ovsl_dereference(table->ti);
608	struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
609
610	BUG_ON(table->count == 0);
611	hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
612	table->count--;
613	if (ovs_identifier_is_ufid(&flow->id)) {
614		hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
615		table->ufid_count--;
616	}
617
618	/* RCU delete the mask. 'flow->mask' is not NULLed, as it should be
619	 * accessible as long as the RCU read lock is held.
620	 */
621	flow_mask_remove(table, flow->mask);
622}
623
624static struct sw_flow_mask *mask_alloc(void)
625{
626	struct sw_flow_mask *mask;
627
628	mask = kmalloc(sizeof(*mask), GFP_KERNEL);
629	if (mask)
630		mask->ref_count = 1;
631
632	return mask;
633}
634
635static bool mask_equal(const struct sw_flow_mask *a,
636		       const struct sw_flow_mask *b)
637{
638	const u8 *a_ = (const u8 *)&a->key + a->range.start;
639	const u8 *b_ = (const u8 *)&b->key + b->range.start;
640
641	return  (a->range.end == b->range.end)
642		&& (a->range.start == b->range.start)
643		&& (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
644}
645
646static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
647					   const struct sw_flow_mask *mask)
648{
649	struct list_head *ml;
650
651	list_for_each(ml, &tbl->mask_list) {
652		struct sw_flow_mask *m;
653		m = container_of(ml, struct sw_flow_mask, list);
654		if (mask_equal(mask, m))
655			return m;
656	}
657
658	return NULL;
659}
660
661/* Add 'mask' into the mask list, if it is not already there. */
662static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
663			    const struct sw_flow_mask *new)
664{
665	struct sw_flow_mask *mask;
666	mask = flow_mask_find(tbl, new);
667	if (!mask) {
668		/* Allocate a new mask if none exsits. */
669		mask = mask_alloc();
670		if (!mask)
671			return -ENOMEM;
672		mask->key = new->key;
673		mask->range = new->range;
674		list_add_rcu(&mask->list, &tbl->mask_list);
675	} else {
676		BUG_ON(!mask->ref_count);
677		mask->ref_count++;
678	}
679
680	flow->mask = mask;
681	return 0;
682}
683
684/* Must be called with OVS mutex held. */
685static void flow_key_insert(struct flow_table *table, struct sw_flow *flow)
686{
687	struct table_instance *new_ti = NULL;
688	struct table_instance *ti;
689
690	flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range);
691	ti = ovsl_dereference(table->ti);
692	table_instance_insert(ti, flow);
693	table->count++;
694
695	/* Expand table, if necessary, to make room. */
696	if (table->count > ti->n_buckets)
697		new_ti = table_instance_expand(ti, false);
698	else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
699		new_ti = table_instance_rehash(ti, ti->n_buckets, false);
700
701	if (new_ti) {
702		rcu_assign_pointer(table->ti, new_ti);
703		call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
704		table->last_rehash = jiffies;
705	}
706}
707
708/* Must be called with OVS mutex held. */
709static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow)
710{
711	struct table_instance *ti;
712
713	flow->ufid_table.hash = ufid_hash(&flow->id);
714	ti = ovsl_dereference(table->ufid_ti);
715	ufid_table_instance_insert(ti, flow);
716	table->ufid_count++;
717
718	/* Expand table, if necessary, to make room. */
719	if (table->ufid_count > ti->n_buckets) {
720		struct table_instance *new_ti;
721
722		new_ti = table_instance_expand(ti, true);
723		if (new_ti) {
724			rcu_assign_pointer(table->ufid_ti, new_ti);
725			call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
726		}
727	}
728}
729
730/* Must be called with OVS mutex held. */
731int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
732			const struct sw_flow_mask *mask)
733{
734	int err;
735
736	err = flow_mask_insert(table, flow, mask);
737	if (err)
738		return err;
739	flow_key_insert(table, flow);
740	if (ovs_identifier_is_ufid(&flow->id))
741		flow_ufid_insert(table, flow);
742
743	return 0;
744}
745
746/* Initializes the flow module.
747 * Returns zero if successful or a negative error code. */
748int ovs_flow_init(void)
749{
750	BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
751	BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
752
753	flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
754				       + (nr_cpu_ids
755					  * sizeof(struct flow_stats *)),
756				       0, 0, NULL);
757	if (flow_cache == NULL)
758		return -ENOMEM;
759
760	flow_stats_cache
761		= kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats),
762				    0, SLAB_HWCACHE_ALIGN, NULL);
763	if (flow_stats_cache == NULL) {
764		kmem_cache_destroy(flow_cache);
765		flow_cache = NULL;
766		return -ENOMEM;
767	}
768
769	return 0;
770}
771
772/* Uninitializes the flow module. */
773void ovs_flow_exit(void)
774{
775	kmem_cache_destroy(flow_stats_cache);
776	kmem_cache_destroy(flow_cache);
777}
v4.6
  1/*
  2 * Copyright (c) 2007-2014 Nicira, Inc.
  3 *
  4 * This program is free software; you can redistribute it and/or
  5 * modify it under the terms of version 2 of the GNU General Public
  6 * License as published by the Free Software Foundation.
  7 *
  8 * This program is distributed in the hope that it will be useful, but
  9 * WITHOUT ANY WARRANTY; without even the implied warranty of
 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 11 * General Public License for more details.
 12 *
 13 * You should have received a copy of the GNU General Public License
 14 * along with this program; if not, write to the Free Software
 15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
 16 * 02110-1301, USA
 17 */
 18
 19#include "flow.h"
 20#include "datapath.h"
 21#include "flow_netlink.h"
 22#include <linux/uaccess.h>
 23#include <linux/netdevice.h>
 24#include <linux/etherdevice.h>
 25#include <linux/if_ether.h>
 26#include <linux/if_vlan.h>
 27#include <net/llc_pdu.h>
 28#include <linux/kernel.h>
 29#include <linux/jhash.h>
 30#include <linux/jiffies.h>
 31#include <linux/llc.h>
 32#include <linux/module.h>
 33#include <linux/in.h>
 34#include <linux/rcupdate.h>
 
 35#include <linux/if_arp.h>
 36#include <linux/ip.h>
 37#include <linux/ipv6.h>
 38#include <linux/sctp.h>
 39#include <linux/tcp.h>
 40#include <linux/udp.h>
 41#include <linux/icmp.h>
 42#include <linux/icmpv6.h>
 43#include <linux/rculist.h>
 44#include <net/ip.h>
 45#include <net/ipv6.h>
 46#include <net/ndisc.h>
 47
 48#define TBL_MIN_BUCKETS		1024
 49#define REHASH_INTERVAL		(10 * 60 * HZ)
 50
 51static struct kmem_cache *flow_cache;
 52struct kmem_cache *flow_stats_cache __read_mostly;
 53
 54static u16 range_n_bytes(const struct sw_flow_key_range *range)
 55{
 56	return range->end - range->start;
 57}
 58
 59void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
 60		       bool full, const struct sw_flow_mask *mask)
 61{
 62	int start = full ? 0 : mask->range.start;
 63	int len = full ? sizeof *dst : range_n_bytes(&mask->range);
 64	const long *m = (const long *)((const u8 *)&mask->key + start);
 65	const long *s = (const long *)((const u8 *)src + start);
 66	long *d = (long *)((u8 *)dst + start);
 67	int i;
 68
 69	/* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
 70	 * if 'full' is false the memory outside of the 'mask->range' is left
 71	 * uninitialized. This can be used as an optimization when further
 72	 * operations on 'dst' only use contents within 'mask->range'.
 73	 */
 74	for (i = 0; i < len; i += sizeof(long))
 75		*d++ = *s++ & *m++;
 76}
 77
 78struct sw_flow *ovs_flow_alloc(void)
 79{
 80	struct sw_flow *flow;
 81	struct flow_stats *stats;
 82	int node;
 83
 84	flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
 85	if (!flow)
 86		return ERR_PTR(-ENOMEM);
 87
 88	flow->sf_acts = NULL;
 89	flow->mask = NULL;
 90	flow->id.unmasked_key = NULL;
 91	flow->id.ufid_len = 0;
 92	flow->stats_last_writer = NUMA_NO_NODE;
 93
 94	/* Initialize the default stat node. */
 95	stats = kmem_cache_alloc_node(flow_stats_cache,
 96				      GFP_KERNEL | __GFP_ZERO,
 97				      node_online(0) ? 0 : NUMA_NO_NODE);
 98	if (!stats)
 99		goto err;
100
101	spin_lock_init(&stats->lock);
102
103	RCU_INIT_POINTER(flow->stats[0], stats);
104
105	for_each_node(node)
106		if (node != 0)
107			RCU_INIT_POINTER(flow->stats[node], NULL);
108
109	return flow;
110err:
111	kmem_cache_free(flow_cache, flow);
112	return ERR_PTR(-ENOMEM);
113}
114
115int ovs_flow_tbl_count(const struct flow_table *table)
116{
117	return table->count;
118}
119
120static struct flex_array *alloc_buckets(unsigned int n_buckets)
121{
122	struct flex_array *buckets;
123	int i, err;
124
125	buckets = flex_array_alloc(sizeof(struct hlist_head),
126				   n_buckets, GFP_KERNEL);
127	if (!buckets)
128		return NULL;
129
130	err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
131	if (err) {
132		flex_array_free(buckets);
133		return NULL;
134	}
135
136	for (i = 0; i < n_buckets; i++)
137		INIT_HLIST_HEAD((struct hlist_head *)
138					flex_array_get(buckets, i));
139
140	return buckets;
141}
142
143static void flow_free(struct sw_flow *flow)
144{
145	int node;
146
147	if (ovs_identifier_is_key(&flow->id))
148		kfree(flow->id.unmasked_key);
149	if (flow->sf_acts)
150		ovs_nla_free_flow_actions((struct sw_flow_actions __force *)flow->sf_acts);
151	for_each_node(node)
152		if (flow->stats[node])
 
153			kmem_cache_free(flow_stats_cache,
154					(struct flow_stats __force *)flow->stats[node]);
155	kmem_cache_free(flow_cache, flow);
156}
157
158static void rcu_free_flow_callback(struct rcu_head *rcu)
159{
160	struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
161
162	flow_free(flow);
163}
164
165void ovs_flow_free(struct sw_flow *flow, bool deferred)
166{
167	if (!flow)
168		return;
169
170	if (deferred)
171		call_rcu(&flow->rcu, rcu_free_flow_callback);
172	else
173		flow_free(flow);
174}
175
176static void free_buckets(struct flex_array *buckets)
177{
178	flex_array_free(buckets);
179}
180
181
182static void __table_instance_destroy(struct table_instance *ti)
183{
184	free_buckets(ti->buckets);
185	kfree(ti);
186}
187
188static struct table_instance *table_instance_alloc(int new_size)
189{
190	struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
191
192	if (!ti)
193		return NULL;
194
195	ti->buckets = alloc_buckets(new_size);
196
197	if (!ti->buckets) {
198		kfree(ti);
199		return NULL;
200	}
201	ti->n_buckets = new_size;
202	ti->node_ver = 0;
203	ti->keep_flows = false;
204	get_random_bytes(&ti->hash_seed, sizeof(u32));
205
206	return ti;
207}
208
209int ovs_flow_tbl_init(struct flow_table *table)
210{
211	struct table_instance *ti, *ufid_ti;
212
213	ti = table_instance_alloc(TBL_MIN_BUCKETS);
214
215	if (!ti)
216		return -ENOMEM;
217
218	ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
219	if (!ufid_ti)
220		goto free_ti;
221
222	rcu_assign_pointer(table->ti, ti);
223	rcu_assign_pointer(table->ufid_ti, ufid_ti);
224	INIT_LIST_HEAD(&table->mask_list);
225	table->last_rehash = jiffies;
226	table->count = 0;
227	table->ufid_count = 0;
228	return 0;
229
230free_ti:
231	__table_instance_destroy(ti);
232	return -ENOMEM;
233}
234
235static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
236{
237	struct table_instance *ti = container_of(rcu, struct table_instance, rcu);
238
239	__table_instance_destroy(ti);
240}
241
242static void table_instance_destroy(struct table_instance *ti,
243				   struct table_instance *ufid_ti,
244				   bool deferred)
245{
246	int i;
247
248	if (!ti)
249		return;
250
251	BUG_ON(!ufid_ti);
252	if (ti->keep_flows)
253		goto skip_flows;
254
255	for (i = 0; i < ti->n_buckets; i++) {
256		struct sw_flow *flow;
257		struct hlist_head *head = flex_array_get(ti->buckets, i);
258		struct hlist_node *n;
259		int ver = ti->node_ver;
260		int ufid_ver = ufid_ti->node_ver;
261
262		hlist_for_each_entry_safe(flow, n, head, flow_table.node[ver]) {
263			hlist_del_rcu(&flow->flow_table.node[ver]);
264			if (ovs_identifier_is_ufid(&flow->id))
265				hlist_del_rcu(&flow->ufid_table.node[ufid_ver]);
266			ovs_flow_free(flow, deferred);
267		}
268	}
269
270skip_flows:
271	if (deferred) {
272		call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
273		call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
274	} else {
275		__table_instance_destroy(ti);
276		__table_instance_destroy(ufid_ti);
277	}
278}
279
280/* No need for locking this function is called from RCU callback or
281 * error path.
282 */
283void ovs_flow_tbl_destroy(struct flow_table *table)
284{
285	struct table_instance *ti = rcu_dereference_raw(table->ti);
286	struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
287
288	table_instance_destroy(ti, ufid_ti, false);
289}
290
291struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
292				       u32 *bucket, u32 *last)
293{
294	struct sw_flow *flow;
295	struct hlist_head *head;
296	int ver;
297	int i;
298
299	ver = ti->node_ver;
300	while (*bucket < ti->n_buckets) {
301		i = 0;
302		head = flex_array_get(ti->buckets, *bucket);
303		hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) {
304			if (i < *last) {
305				i++;
306				continue;
307			}
308			*last = i + 1;
309			return flow;
310		}
311		(*bucket)++;
312		*last = 0;
313	}
314
315	return NULL;
316}
317
318static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
319{
320	hash = jhash_1word(hash, ti->hash_seed);
321	return flex_array_get(ti->buckets,
322				(hash & (ti->n_buckets - 1)));
323}
324
325static void table_instance_insert(struct table_instance *ti,
326				  struct sw_flow *flow)
327{
328	struct hlist_head *head;
329
330	head = find_bucket(ti, flow->flow_table.hash);
331	hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head);
332}
333
334static void ufid_table_instance_insert(struct table_instance *ti,
335				       struct sw_flow *flow)
336{
337	struct hlist_head *head;
338
339	head = find_bucket(ti, flow->ufid_table.hash);
340	hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head);
341}
342
343static void flow_table_copy_flows(struct table_instance *old,
344				  struct table_instance *new, bool ufid)
345{
346	int old_ver;
347	int i;
348
349	old_ver = old->node_ver;
350	new->node_ver = !old_ver;
351
352	/* Insert in new table. */
353	for (i = 0; i < old->n_buckets; i++) {
354		struct sw_flow *flow;
355		struct hlist_head *head;
356
357		head = flex_array_get(old->buckets, i);
358
359		if (ufid)
360			hlist_for_each_entry(flow, head,
361					     ufid_table.node[old_ver])
362				ufid_table_instance_insert(new, flow);
363		else
364			hlist_for_each_entry(flow, head,
365					     flow_table.node[old_ver])
366				table_instance_insert(new, flow);
367	}
368
369	old->keep_flows = true;
370}
371
372static struct table_instance *table_instance_rehash(struct table_instance *ti,
373						    int n_buckets, bool ufid)
374{
375	struct table_instance *new_ti;
376
377	new_ti = table_instance_alloc(n_buckets);
378	if (!new_ti)
379		return NULL;
380
381	flow_table_copy_flows(ti, new_ti, ufid);
382
383	return new_ti;
384}
385
386int ovs_flow_tbl_flush(struct flow_table *flow_table)
387{
388	struct table_instance *old_ti, *new_ti;
389	struct table_instance *old_ufid_ti, *new_ufid_ti;
390
391	new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
392	if (!new_ti)
393		return -ENOMEM;
394	new_ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
395	if (!new_ufid_ti)
396		goto err_free_ti;
397
398	old_ti = ovsl_dereference(flow_table->ti);
399	old_ufid_ti = ovsl_dereference(flow_table->ufid_ti);
400
401	rcu_assign_pointer(flow_table->ti, new_ti);
402	rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti);
403	flow_table->last_rehash = jiffies;
404	flow_table->count = 0;
405	flow_table->ufid_count = 0;
406
407	table_instance_destroy(old_ti, old_ufid_ti, true);
408	return 0;
409
410err_free_ti:
411	__table_instance_destroy(new_ti);
412	return -ENOMEM;
413}
414
415static u32 flow_hash(const struct sw_flow_key *key,
416		     const struct sw_flow_key_range *range)
417{
418	int key_start = range->start;
419	int key_end = range->end;
420	const u32 *hash_key = (const u32 *)((const u8 *)key + key_start);
421	int hash_u32s = (key_end - key_start) >> 2;
422
423	/* Make sure number of hash bytes are multiple of u32. */
424	BUILD_BUG_ON(sizeof(long) % sizeof(u32));
425
426	return jhash2(hash_key, hash_u32s, 0);
427}
428
429static int flow_key_start(const struct sw_flow_key *key)
430{
431	if (key->tun_proto)
432		return 0;
433	else
434		return rounddown(offsetof(struct sw_flow_key, phy),
435					  sizeof(long));
436}
437
438static bool cmp_key(const struct sw_flow_key *key1,
439		    const struct sw_flow_key *key2,
440		    int key_start, int key_end)
441{
442	const long *cp1 = (const long *)((const u8 *)key1 + key_start);
443	const long *cp2 = (const long *)((const u8 *)key2 + key_start);
444	long diffs = 0;
445	int i;
446
447	for (i = key_start; i < key_end;  i += sizeof(long))
448		diffs |= *cp1++ ^ *cp2++;
449
450	return diffs == 0;
451}
452
453static bool flow_cmp_masked_key(const struct sw_flow *flow,
454				const struct sw_flow_key *key,
455				const struct sw_flow_key_range *range)
456{
457	return cmp_key(&flow->key, key, range->start, range->end);
458}
459
460static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
461				      const struct sw_flow_match *match)
462{
463	struct sw_flow_key *key = match->key;
464	int key_start = flow_key_start(key);
465	int key_end = match->range.end;
466
467	BUG_ON(ovs_identifier_is_ufid(&flow->id));
468	return cmp_key(flow->id.unmasked_key, key, key_start, key_end);
469}
470
471static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
472					  const struct sw_flow_key *unmasked,
473					  const struct sw_flow_mask *mask)
474{
475	struct sw_flow *flow;
476	struct hlist_head *head;
477	u32 hash;
478	struct sw_flow_key masked_key;
479
480	ovs_flow_mask_key(&masked_key, unmasked, false, mask);
481	hash = flow_hash(&masked_key, &mask->range);
482	head = find_bucket(ti, hash);
483	hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) {
484		if (flow->mask == mask && flow->flow_table.hash == hash &&
485		    flow_cmp_masked_key(flow, &masked_key, &mask->range))
486			return flow;
487	}
488	return NULL;
489}
490
491struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
492				    const struct sw_flow_key *key,
493				    u32 *n_mask_hit)
494{
495	struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
496	struct sw_flow_mask *mask;
497	struct sw_flow *flow;
498
499	*n_mask_hit = 0;
500	list_for_each_entry_rcu(mask, &tbl->mask_list, list) {
501		(*n_mask_hit)++;
502		flow = masked_flow_lookup(ti, key, mask);
503		if (flow)  /* Found */
504			return flow;
505	}
506	return NULL;
507}
508
509struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
510				    const struct sw_flow_key *key)
511{
512	u32 __always_unused n_mask_hit;
513
514	return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
515}
516
517struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
518					  const struct sw_flow_match *match)
519{
520	struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
521	struct sw_flow_mask *mask;
522	struct sw_flow *flow;
523
524	/* Always called under ovs-mutex. */
525	list_for_each_entry(mask, &tbl->mask_list, list) {
526		flow = masked_flow_lookup(ti, match->key, mask);
527		if (flow && ovs_identifier_is_key(&flow->id) &&
528		    ovs_flow_cmp_unmasked_key(flow, match))
529			return flow;
530	}
531	return NULL;
532}
533
534static u32 ufid_hash(const struct sw_flow_id *sfid)
535{
536	return jhash(sfid->ufid, sfid->ufid_len, 0);
537}
538
539static bool ovs_flow_cmp_ufid(const struct sw_flow *flow,
540			      const struct sw_flow_id *sfid)
541{
542	if (flow->id.ufid_len != sfid->ufid_len)
543		return false;
544
545	return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len);
546}
547
548bool ovs_flow_cmp(const struct sw_flow *flow, const struct sw_flow_match *match)
549{
550	if (ovs_identifier_is_ufid(&flow->id))
551		return flow_cmp_masked_key(flow, match->key, &match->range);
552
553	return ovs_flow_cmp_unmasked_key(flow, match);
554}
555
556struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
557					 const struct sw_flow_id *ufid)
558{
559	struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti);
560	struct sw_flow *flow;
561	struct hlist_head *head;
562	u32 hash;
563
564	hash = ufid_hash(ufid);
565	head = find_bucket(ti, hash);
566	hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver]) {
567		if (flow->ufid_table.hash == hash &&
568		    ovs_flow_cmp_ufid(flow, ufid))
569			return flow;
570	}
571	return NULL;
572}
573
574int ovs_flow_tbl_num_masks(const struct flow_table *table)
575{
576	struct sw_flow_mask *mask;
577	int num = 0;
578
579	list_for_each_entry(mask, &table->mask_list, list)
580		num++;
581
582	return num;
583}
584
585static struct table_instance *table_instance_expand(struct table_instance *ti,
586						    bool ufid)
587{
588	return table_instance_rehash(ti, ti->n_buckets * 2, ufid);
589}
590
591/* Remove 'mask' from the mask list, if it is not needed any more. */
592static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
593{
594	if (mask) {
595		/* ovs-lock is required to protect mask-refcount and
596		 * mask list.
597		 */
598		ASSERT_OVSL();
599		BUG_ON(!mask->ref_count);
600		mask->ref_count--;
601
602		if (!mask->ref_count) {
603			list_del_rcu(&mask->list);
604			kfree_rcu(mask, rcu);
605		}
606	}
607}
608
609/* Must be called with OVS mutex held. */
610void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
611{
612	struct table_instance *ti = ovsl_dereference(table->ti);
613	struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
614
615	BUG_ON(table->count == 0);
616	hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
617	table->count--;
618	if (ovs_identifier_is_ufid(&flow->id)) {
619		hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
620		table->ufid_count--;
621	}
622
623	/* RCU delete the mask. 'flow->mask' is not NULLed, as it should be
624	 * accessible as long as the RCU read lock is held.
625	 */
626	flow_mask_remove(table, flow->mask);
627}
628
629static struct sw_flow_mask *mask_alloc(void)
630{
631	struct sw_flow_mask *mask;
632
633	mask = kmalloc(sizeof(*mask), GFP_KERNEL);
634	if (mask)
635		mask->ref_count = 1;
636
637	return mask;
638}
639
640static bool mask_equal(const struct sw_flow_mask *a,
641		       const struct sw_flow_mask *b)
642{
643	const u8 *a_ = (const u8 *)&a->key + a->range.start;
644	const u8 *b_ = (const u8 *)&b->key + b->range.start;
645
646	return  (a->range.end == b->range.end)
647		&& (a->range.start == b->range.start)
648		&& (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
649}
650
651static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
652					   const struct sw_flow_mask *mask)
653{
654	struct list_head *ml;
655
656	list_for_each(ml, &tbl->mask_list) {
657		struct sw_flow_mask *m;
658		m = container_of(ml, struct sw_flow_mask, list);
659		if (mask_equal(mask, m))
660			return m;
661	}
662
663	return NULL;
664}
665
666/* Add 'mask' into the mask list, if it is not already there. */
667static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
668			    const struct sw_flow_mask *new)
669{
670	struct sw_flow_mask *mask;
671	mask = flow_mask_find(tbl, new);
672	if (!mask) {
673		/* Allocate a new mask if none exsits. */
674		mask = mask_alloc();
675		if (!mask)
676			return -ENOMEM;
677		mask->key = new->key;
678		mask->range = new->range;
679		list_add_rcu(&mask->list, &tbl->mask_list);
680	} else {
681		BUG_ON(!mask->ref_count);
682		mask->ref_count++;
683	}
684
685	flow->mask = mask;
686	return 0;
687}
688
689/* Must be called with OVS mutex held. */
690static void flow_key_insert(struct flow_table *table, struct sw_flow *flow)
691{
692	struct table_instance *new_ti = NULL;
693	struct table_instance *ti;
694
695	flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range);
696	ti = ovsl_dereference(table->ti);
697	table_instance_insert(ti, flow);
698	table->count++;
699
700	/* Expand table, if necessary, to make room. */
701	if (table->count > ti->n_buckets)
702		new_ti = table_instance_expand(ti, false);
703	else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
704		new_ti = table_instance_rehash(ti, ti->n_buckets, false);
705
706	if (new_ti) {
707		rcu_assign_pointer(table->ti, new_ti);
708		call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
709		table->last_rehash = jiffies;
710	}
711}
712
713/* Must be called with OVS mutex held. */
714static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow)
715{
716	struct table_instance *ti;
717
718	flow->ufid_table.hash = ufid_hash(&flow->id);
719	ti = ovsl_dereference(table->ufid_ti);
720	ufid_table_instance_insert(ti, flow);
721	table->ufid_count++;
722
723	/* Expand table, if necessary, to make room. */
724	if (table->ufid_count > ti->n_buckets) {
725		struct table_instance *new_ti;
726
727		new_ti = table_instance_expand(ti, true);
728		if (new_ti) {
729			rcu_assign_pointer(table->ufid_ti, new_ti);
730			call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
731		}
732	}
733}
734
735/* Must be called with OVS mutex held. */
736int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
737			const struct sw_flow_mask *mask)
738{
739	int err;
740
741	err = flow_mask_insert(table, flow, mask);
742	if (err)
743		return err;
744	flow_key_insert(table, flow);
745	if (ovs_identifier_is_ufid(&flow->id))
746		flow_ufid_insert(table, flow);
747
748	return 0;
749}
750
751/* Initializes the flow module.
752 * Returns zero if successful or a negative error code. */
753int ovs_flow_init(void)
754{
755	BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
756	BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
757
758	flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
759				       + (nr_node_ids
760					  * sizeof(struct flow_stats *)),
761				       0, 0, NULL);
762	if (flow_cache == NULL)
763		return -ENOMEM;
764
765	flow_stats_cache
766		= kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats),
767				    0, SLAB_HWCACHE_ALIGN, NULL);
768	if (flow_stats_cache == NULL) {
769		kmem_cache_destroy(flow_cache);
770		flow_cache = NULL;
771		return -ENOMEM;
772	}
773
774	return 0;
775}
776
777/* Uninitializes the flow module. */
778void ovs_flow_exit(void)
779{
780	kmem_cache_destroy(flow_stats_cache);
781	kmem_cache_destroy(flow_cache);
782}