Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Apr 14-17, 2025
Register
Loading...
v4.10.11
 
  1/*
  2 * Copyright (c) 2007-2014 Nicira, Inc.
  3 *
  4 * This program is free software; you can redistribute it and/or
  5 * modify it under the terms of version 2 of the GNU General Public
  6 * License as published by the Free Software Foundation.
  7 *
  8 * This program is distributed in the hope that it will be useful, but
  9 * WITHOUT ANY WARRANTY; without even the implied warranty of
 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 11 * General Public License for more details.
 12 *
 13 * You should have received a copy of the GNU General Public License
 14 * along with this program; if not, write to the Free Software
 15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
 16 * 02110-1301, USA
 17 */
 18
 19#include "flow.h"
 20#include "datapath.h"
 21#include "flow_netlink.h"
 22#include <linux/uaccess.h>
 23#include <linux/netdevice.h>
 24#include <linux/etherdevice.h>
 25#include <linux/if_ether.h>
 26#include <linux/if_vlan.h>
 27#include <net/llc_pdu.h>
 28#include <linux/kernel.h>
 29#include <linux/jhash.h>
 30#include <linux/jiffies.h>
 31#include <linux/llc.h>
 32#include <linux/module.h>
 33#include <linux/in.h>
 34#include <linux/rcupdate.h>
 35#include <linux/cpumask.h>
 36#include <linux/if_arp.h>
 37#include <linux/ip.h>
 38#include <linux/ipv6.h>
 39#include <linux/sctp.h>
 40#include <linux/tcp.h>
 41#include <linux/udp.h>
 42#include <linux/icmp.h>
 43#include <linux/icmpv6.h>
 44#include <linux/rculist.h>
 45#include <net/ip.h>
 46#include <net/ipv6.h>
 47#include <net/ndisc.h>
 48
 49#define TBL_MIN_BUCKETS		1024
 50#define REHASH_INTERVAL		(10 * 60 * HZ)
 51
 52static struct kmem_cache *flow_cache;
 53struct kmem_cache *flow_stats_cache __read_mostly;
 54
 55static u16 range_n_bytes(const struct sw_flow_key_range *range)
 56{
 57	return range->end - range->start;
 58}
 59
 60void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
 61		       bool full, const struct sw_flow_mask *mask)
 62{
 63	int start = full ? 0 : mask->range.start;
 64	int len = full ? sizeof *dst : range_n_bytes(&mask->range);
 65	const long *m = (const long *)((const u8 *)&mask->key + start);
 66	const long *s = (const long *)((const u8 *)src + start);
 67	long *d = (long *)((u8 *)dst + start);
 68	int i;
 69
 70	/* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
 71	 * if 'full' is false the memory outside of the 'mask->range' is left
 72	 * uninitialized. This can be used as an optimization when further
 73	 * operations on 'dst' only use contents within 'mask->range'.
 74	 */
 75	for (i = 0; i < len; i += sizeof(long))
 76		*d++ = *s++ & *m++;
 77}
 78
 79struct sw_flow *ovs_flow_alloc(void)
 80{
 81	struct sw_flow *flow;
 82	struct flow_stats *stats;
 83
 84	flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL);
 85	if (!flow)
 86		return ERR_PTR(-ENOMEM);
 87
 88	flow->stats_last_writer = -1;
 89
 90	/* Initialize the default stat node. */
 91	stats = kmem_cache_alloc_node(flow_stats_cache,
 92				      GFP_KERNEL | __GFP_ZERO,
 93				      node_online(0) ? 0 : NUMA_NO_NODE);
 94	if (!stats)
 95		goto err;
 96
 97	spin_lock_init(&stats->lock);
 98
 99	RCU_INIT_POINTER(flow->stats[0], stats);
100
 
 
101	return flow;
102err:
103	kmem_cache_free(flow_cache, flow);
104	return ERR_PTR(-ENOMEM);
105}
106
107int ovs_flow_tbl_count(const struct flow_table *table)
108{
109	return table->count;
110}
111
112static struct flex_array *alloc_buckets(unsigned int n_buckets)
113{
114	struct flex_array *buckets;
115	int i, err;
116
117	buckets = flex_array_alloc(sizeof(struct hlist_head),
118				   n_buckets, GFP_KERNEL);
119	if (!buckets)
120		return NULL;
121
122	err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
123	if (err) {
124		flex_array_free(buckets);
125		return NULL;
126	}
127
128	for (i = 0; i < n_buckets; i++)
129		INIT_HLIST_HEAD((struct hlist_head *)
130					flex_array_get(buckets, i));
131
132	return buckets;
133}
134
135static void flow_free(struct sw_flow *flow)
136{
137	int cpu;
138
139	if (ovs_identifier_is_key(&flow->id))
140		kfree(flow->id.unmasked_key);
141	if (flow->sf_acts)
142		ovs_nla_free_flow_actions((struct sw_flow_actions __force *)flow->sf_acts);
143	/* We open code this to make sure cpu 0 is always considered */
144	for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpu_possible_mask))
145		if (flow->stats[cpu])
146			kmem_cache_free(flow_stats_cache,
147					(struct flow_stats __force *)flow->stats[cpu]);
148	kmem_cache_free(flow_cache, flow);
149}
150
151static void rcu_free_flow_callback(struct rcu_head *rcu)
152{
153	struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
154
155	flow_free(flow);
156}
157
158void ovs_flow_free(struct sw_flow *flow, bool deferred)
159{
160	if (!flow)
161		return;
162
163	if (deferred)
164		call_rcu(&flow->rcu, rcu_free_flow_callback);
165	else
166		flow_free(flow);
167}
168
169static void free_buckets(struct flex_array *buckets)
170{
171	flex_array_free(buckets);
172}
173
174
175static void __table_instance_destroy(struct table_instance *ti)
176{
177	free_buckets(ti->buckets);
178	kfree(ti);
179}
180
181static struct table_instance *table_instance_alloc(int new_size)
182{
183	struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
 
184
185	if (!ti)
186		return NULL;
187
188	ti->buckets = alloc_buckets(new_size);
189
190	if (!ti->buckets) {
191		kfree(ti);
192		return NULL;
193	}
 
 
 
 
194	ti->n_buckets = new_size;
195	ti->node_ver = 0;
196	ti->keep_flows = false;
197	get_random_bytes(&ti->hash_seed, sizeof(u32));
198
199	return ti;
200}
201
202int ovs_flow_tbl_init(struct flow_table *table)
203{
204	struct table_instance *ti, *ufid_ti;
205
206	ti = table_instance_alloc(TBL_MIN_BUCKETS);
207
208	if (!ti)
209		return -ENOMEM;
210
211	ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
212	if (!ufid_ti)
213		goto free_ti;
214
215	rcu_assign_pointer(table->ti, ti);
216	rcu_assign_pointer(table->ufid_ti, ufid_ti);
217	INIT_LIST_HEAD(&table->mask_list);
218	table->last_rehash = jiffies;
219	table->count = 0;
220	table->ufid_count = 0;
221	return 0;
222
223free_ti:
224	__table_instance_destroy(ti);
225	return -ENOMEM;
226}
227
228static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
229{
230	struct table_instance *ti = container_of(rcu, struct table_instance, rcu);
231
232	__table_instance_destroy(ti);
233}
234
235static void table_instance_destroy(struct table_instance *ti,
236				   struct table_instance *ufid_ti,
237				   bool deferred)
238{
239	int i;
240
241	if (!ti)
242		return;
243
244	BUG_ON(!ufid_ti);
245	if (ti->keep_flows)
246		goto skip_flows;
247
248	for (i = 0; i < ti->n_buckets; i++) {
249		struct sw_flow *flow;
250		struct hlist_head *head = flex_array_get(ti->buckets, i);
251		struct hlist_node *n;
252		int ver = ti->node_ver;
253		int ufid_ver = ufid_ti->node_ver;
254
255		hlist_for_each_entry_safe(flow, n, head, flow_table.node[ver]) {
256			hlist_del_rcu(&flow->flow_table.node[ver]);
257			if (ovs_identifier_is_ufid(&flow->id))
258				hlist_del_rcu(&flow->ufid_table.node[ufid_ver]);
259			ovs_flow_free(flow, deferred);
260		}
261	}
262
263skip_flows:
264	if (deferred) {
265		call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
266		call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
267	} else {
268		__table_instance_destroy(ti);
269		__table_instance_destroy(ufid_ti);
270	}
271}
272
273/* No need for locking this function is called from RCU callback or
274 * error path.
275 */
276void ovs_flow_tbl_destroy(struct flow_table *table)
277{
278	struct table_instance *ti = rcu_dereference_raw(table->ti);
279	struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
280
281	table_instance_destroy(ti, ufid_ti, false);
282}
283
284struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
285				       u32 *bucket, u32 *last)
286{
287	struct sw_flow *flow;
288	struct hlist_head *head;
289	int ver;
290	int i;
291
292	ver = ti->node_ver;
293	while (*bucket < ti->n_buckets) {
294		i = 0;
295		head = flex_array_get(ti->buckets, *bucket);
296		hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) {
297			if (i < *last) {
298				i++;
299				continue;
300			}
301			*last = i + 1;
302			return flow;
303		}
304		(*bucket)++;
305		*last = 0;
306	}
307
308	return NULL;
309}
310
311static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
312{
313	hash = jhash_1word(hash, ti->hash_seed);
314	return flex_array_get(ti->buckets,
315				(hash & (ti->n_buckets - 1)));
316}
317
318static void table_instance_insert(struct table_instance *ti,
319				  struct sw_flow *flow)
320{
321	struct hlist_head *head;
322
323	head = find_bucket(ti, flow->flow_table.hash);
324	hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head);
325}
326
327static void ufid_table_instance_insert(struct table_instance *ti,
328				       struct sw_flow *flow)
329{
330	struct hlist_head *head;
331
332	head = find_bucket(ti, flow->ufid_table.hash);
333	hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head);
334}
335
336static void flow_table_copy_flows(struct table_instance *old,
337				  struct table_instance *new, bool ufid)
338{
339	int old_ver;
340	int i;
341
342	old_ver = old->node_ver;
343	new->node_ver = !old_ver;
344
345	/* Insert in new table. */
346	for (i = 0; i < old->n_buckets; i++) {
347		struct sw_flow *flow;
348		struct hlist_head *head;
349
350		head = flex_array_get(old->buckets, i);
351
352		if (ufid)
353			hlist_for_each_entry(flow, head,
354					     ufid_table.node[old_ver])
355				ufid_table_instance_insert(new, flow);
356		else
357			hlist_for_each_entry(flow, head,
358					     flow_table.node[old_ver])
359				table_instance_insert(new, flow);
360	}
361
362	old->keep_flows = true;
363}
364
365static struct table_instance *table_instance_rehash(struct table_instance *ti,
366						    int n_buckets, bool ufid)
367{
368	struct table_instance *new_ti;
369
370	new_ti = table_instance_alloc(n_buckets);
371	if (!new_ti)
372		return NULL;
373
374	flow_table_copy_flows(ti, new_ti, ufid);
375
376	return new_ti;
377}
378
379int ovs_flow_tbl_flush(struct flow_table *flow_table)
380{
381	struct table_instance *old_ti, *new_ti;
382	struct table_instance *old_ufid_ti, *new_ufid_ti;
383
384	new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
385	if (!new_ti)
386		return -ENOMEM;
387	new_ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
388	if (!new_ufid_ti)
389		goto err_free_ti;
390
391	old_ti = ovsl_dereference(flow_table->ti);
392	old_ufid_ti = ovsl_dereference(flow_table->ufid_ti);
393
394	rcu_assign_pointer(flow_table->ti, new_ti);
395	rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti);
396	flow_table->last_rehash = jiffies;
397	flow_table->count = 0;
398	flow_table->ufid_count = 0;
399
400	table_instance_destroy(old_ti, old_ufid_ti, true);
401	return 0;
402
403err_free_ti:
404	__table_instance_destroy(new_ti);
405	return -ENOMEM;
406}
407
408static u32 flow_hash(const struct sw_flow_key *key,
409		     const struct sw_flow_key_range *range)
410{
411	int key_start = range->start;
412	int key_end = range->end;
413	const u32 *hash_key = (const u32 *)((const u8 *)key + key_start);
414	int hash_u32s = (key_end - key_start) >> 2;
415
416	/* Make sure number of hash bytes are multiple of u32. */
417	BUILD_BUG_ON(sizeof(long) % sizeof(u32));
418
419	return jhash2(hash_key, hash_u32s, 0);
420}
421
422static int flow_key_start(const struct sw_flow_key *key)
423{
424	if (key->tun_proto)
425		return 0;
426	else
427		return rounddown(offsetof(struct sw_flow_key, phy),
428					  sizeof(long));
429}
430
431static bool cmp_key(const struct sw_flow_key *key1,
432		    const struct sw_flow_key *key2,
433		    int key_start, int key_end)
434{
435	const long *cp1 = (const long *)((const u8 *)key1 + key_start);
436	const long *cp2 = (const long *)((const u8 *)key2 + key_start);
437	long diffs = 0;
438	int i;
439
440	for (i = key_start; i < key_end;  i += sizeof(long))
441		diffs |= *cp1++ ^ *cp2++;
442
443	return diffs == 0;
444}
445
446static bool flow_cmp_masked_key(const struct sw_flow *flow,
447				const struct sw_flow_key *key,
448				const struct sw_flow_key_range *range)
449{
450	return cmp_key(&flow->key, key, range->start, range->end);
451}
452
453static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
454				      const struct sw_flow_match *match)
455{
456	struct sw_flow_key *key = match->key;
457	int key_start = flow_key_start(key);
458	int key_end = match->range.end;
459
460	BUG_ON(ovs_identifier_is_ufid(&flow->id));
461	return cmp_key(flow->id.unmasked_key, key, key_start, key_end);
462}
463
464static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
465					  const struct sw_flow_key *unmasked,
466					  const struct sw_flow_mask *mask)
467{
468	struct sw_flow *flow;
469	struct hlist_head *head;
470	u32 hash;
471	struct sw_flow_key masked_key;
472
473	ovs_flow_mask_key(&masked_key, unmasked, false, mask);
474	hash = flow_hash(&masked_key, &mask->range);
475	head = find_bucket(ti, hash);
476	hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) {
477		if (flow->mask == mask && flow->flow_table.hash == hash &&
478		    flow_cmp_masked_key(flow, &masked_key, &mask->range))
479			return flow;
480	}
481	return NULL;
482}
483
484struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
485				    const struct sw_flow_key *key,
486				    u32 *n_mask_hit)
487{
488	struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
489	struct sw_flow_mask *mask;
490	struct sw_flow *flow;
491
492	*n_mask_hit = 0;
493	list_for_each_entry_rcu(mask, &tbl->mask_list, list) {
494		(*n_mask_hit)++;
495		flow = masked_flow_lookup(ti, key, mask);
496		if (flow)  /* Found */
497			return flow;
498	}
499	return NULL;
500}
501
502struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
503				    const struct sw_flow_key *key)
504{
505	u32 __always_unused n_mask_hit;
506
507	return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
508}
509
510struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
511					  const struct sw_flow_match *match)
512{
513	struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
514	struct sw_flow_mask *mask;
515	struct sw_flow *flow;
516
517	/* Always called under ovs-mutex. */
518	list_for_each_entry(mask, &tbl->mask_list, list) {
519		flow = masked_flow_lookup(ti, match->key, mask);
520		if (flow && ovs_identifier_is_key(&flow->id) &&
521		    ovs_flow_cmp_unmasked_key(flow, match))
522			return flow;
523	}
524	return NULL;
525}
526
527static u32 ufid_hash(const struct sw_flow_id *sfid)
528{
529	return jhash(sfid->ufid, sfid->ufid_len, 0);
530}
531
532static bool ovs_flow_cmp_ufid(const struct sw_flow *flow,
533			      const struct sw_flow_id *sfid)
534{
535	if (flow->id.ufid_len != sfid->ufid_len)
536		return false;
537
538	return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len);
539}
540
541bool ovs_flow_cmp(const struct sw_flow *flow, const struct sw_flow_match *match)
542{
543	if (ovs_identifier_is_ufid(&flow->id))
544		return flow_cmp_masked_key(flow, match->key, &match->range);
545
546	return ovs_flow_cmp_unmasked_key(flow, match);
547}
548
549struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
550					 const struct sw_flow_id *ufid)
551{
552	struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti);
553	struct sw_flow *flow;
554	struct hlist_head *head;
555	u32 hash;
556
557	hash = ufid_hash(ufid);
558	head = find_bucket(ti, hash);
559	hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver]) {
560		if (flow->ufid_table.hash == hash &&
561		    ovs_flow_cmp_ufid(flow, ufid))
562			return flow;
563	}
564	return NULL;
565}
566
567int ovs_flow_tbl_num_masks(const struct flow_table *table)
568{
569	struct sw_flow_mask *mask;
570	int num = 0;
571
572	list_for_each_entry(mask, &table->mask_list, list)
573		num++;
574
575	return num;
576}
577
578static struct table_instance *table_instance_expand(struct table_instance *ti,
579						    bool ufid)
580{
581	return table_instance_rehash(ti, ti->n_buckets * 2, ufid);
582}
583
584/* Remove 'mask' from the mask list, if it is not needed any more. */
585static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
586{
587	if (mask) {
588		/* ovs-lock is required to protect mask-refcount and
589		 * mask list.
590		 */
591		ASSERT_OVSL();
592		BUG_ON(!mask->ref_count);
593		mask->ref_count--;
594
595		if (!mask->ref_count) {
596			list_del_rcu(&mask->list);
597			kfree_rcu(mask, rcu);
598		}
599	}
600}
601
602/* Must be called with OVS mutex held. */
603void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
604{
605	struct table_instance *ti = ovsl_dereference(table->ti);
606	struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
607
608	BUG_ON(table->count == 0);
609	hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
610	table->count--;
611	if (ovs_identifier_is_ufid(&flow->id)) {
612		hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
613		table->ufid_count--;
614	}
615
616	/* RCU delete the mask. 'flow->mask' is not NULLed, as it should be
617	 * accessible as long as the RCU read lock is held.
618	 */
619	flow_mask_remove(table, flow->mask);
620}
621
622static struct sw_flow_mask *mask_alloc(void)
623{
624	struct sw_flow_mask *mask;
625
626	mask = kmalloc(sizeof(*mask), GFP_KERNEL);
627	if (mask)
628		mask->ref_count = 1;
629
630	return mask;
631}
632
633static bool mask_equal(const struct sw_flow_mask *a,
634		       const struct sw_flow_mask *b)
635{
636	const u8 *a_ = (const u8 *)&a->key + a->range.start;
637	const u8 *b_ = (const u8 *)&b->key + b->range.start;
638
639	return  (a->range.end == b->range.end)
640		&& (a->range.start == b->range.start)
641		&& (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
642}
643
644static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
645					   const struct sw_flow_mask *mask)
646{
647	struct list_head *ml;
648
649	list_for_each(ml, &tbl->mask_list) {
650		struct sw_flow_mask *m;
651		m = container_of(ml, struct sw_flow_mask, list);
652		if (mask_equal(mask, m))
653			return m;
654	}
655
656	return NULL;
657}
658
659/* Add 'mask' into the mask list, if it is not already there. */
660static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
661			    const struct sw_flow_mask *new)
662{
663	struct sw_flow_mask *mask;
664	mask = flow_mask_find(tbl, new);
665	if (!mask) {
666		/* Allocate a new mask if none exsits. */
667		mask = mask_alloc();
668		if (!mask)
669			return -ENOMEM;
670		mask->key = new->key;
671		mask->range = new->range;
672		list_add_rcu(&mask->list, &tbl->mask_list);
673	} else {
674		BUG_ON(!mask->ref_count);
675		mask->ref_count++;
676	}
677
678	flow->mask = mask;
679	return 0;
680}
681
682/* Must be called with OVS mutex held. */
683static void flow_key_insert(struct flow_table *table, struct sw_flow *flow)
684{
685	struct table_instance *new_ti = NULL;
686	struct table_instance *ti;
687
688	flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range);
689	ti = ovsl_dereference(table->ti);
690	table_instance_insert(ti, flow);
691	table->count++;
692
693	/* Expand table, if necessary, to make room. */
694	if (table->count > ti->n_buckets)
695		new_ti = table_instance_expand(ti, false);
696	else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
697		new_ti = table_instance_rehash(ti, ti->n_buckets, false);
698
699	if (new_ti) {
700		rcu_assign_pointer(table->ti, new_ti);
701		call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
702		table->last_rehash = jiffies;
703	}
704}
705
706/* Must be called with OVS mutex held. */
707static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow)
708{
709	struct table_instance *ti;
710
711	flow->ufid_table.hash = ufid_hash(&flow->id);
712	ti = ovsl_dereference(table->ufid_ti);
713	ufid_table_instance_insert(ti, flow);
714	table->ufid_count++;
715
716	/* Expand table, if necessary, to make room. */
717	if (table->ufid_count > ti->n_buckets) {
718		struct table_instance *new_ti;
719
720		new_ti = table_instance_expand(ti, true);
721		if (new_ti) {
722			rcu_assign_pointer(table->ufid_ti, new_ti);
723			call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
724		}
725	}
726}
727
728/* Must be called with OVS mutex held. */
729int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
730			const struct sw_flow_mask *mask)
731{
732	int err;
733
734	err = flow_mask_insert(table, flow, mask);
735	if (err)
736		return err;
737	flow_key_insert(table, flow);
738	if (ovs_identifier_is_ufid(&flow->id))
739		flow_ufid_insert(table, flow);
740
741	return 0;
742}
743
744/* Initializes the flow module.
745 * Returns zero if successful or a negative error code. */
746int ovs_flow_init(void)
747{
748	BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
749	BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
750
751	flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
752				       + (nr_cpu_ids
753					  * sizeof(struct flow_stats *)),
754				       0, 0, NULL);
755	if (flow_cache == NULL)
756		return -ENOMEM;
757
758	flow_stats_cache
759		= kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats),
760				    0, SLAB_HWCACHE_ALIGN, NULL);
761	if (flow_stats_cache == NULL) {
762		kmem_cache_destroy(flow_cache);
763		flow_cache = NULL;
764		return -ENOMEM;
765	}
766
767	return 0;
768}
769
770/* Uninitializes the flow module. */
771void ovs_flow_exit(void)
772{
773	kmem_cache_destroy(flow_stats_cache);
774	kmem_cache_destroy(flow_cache);
775}
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (c) 2007-2014 Nicira, Inc.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  4 */
  5
  6#include "flow.h"
  7#include "datapath.h"
  8#include "flow_netlink.h"
  9#include <linux/uaccess.h>
 10#include <linux/netdevice.h>
 11#include <linux/etherdevice.h>
 12#include <linux/if_ether.h>
 13#include <linux/if_vlan.h>
 14#include <net/llc_pdu.h>
 15#include <linux/kernel.h>
 16#include <linux/jhash.h>
 17#include <linux/jiffies.h>
 18#include <linux/llc.h>
 19#include <linux/module.h>
 20#include <linux/in.h>
 21#include <linux/rcupdate.h>
 22#include <linux/cpumask.h>
 23#include <linux/if_arp.h>
 24#include <linux/ip.h>
 25#include <linux/ipv6.h>
 26#include <linux/sctp.h>
 27#include <linux/tcp.h>
 28#include <linux/udp.h>
 29#include <linux/icmp.h>
 30#include <linux/icmpv6.h>
 31#include <linux/rculist.h>
 32#include <net/ip.h>
 33#include <net/ipv6.h>
 34#include <net/ndisc.h>
 35
 36#define TBL_MIN_BUCKETS		1024
 37#define REHASH_INTERVAL		(10 * 60 * HZ)
 38
 39static struct kmem_cache *flow_cache;
 40struct kmem_cache *flow_stats_cache __read_mostly;
 41
 42static u16 range_n_bytes(const struct sw_flow_key_range *range)
 43{
 44	return range->end - range->start;
 45}
 46
 47void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
 48		       bool full, const struct sw_flow_mask *mask)
 49{
 50	int start = full ? 0 : mask->range.start;
 51	int len = full ? sizeof *dst : range_n_bytes(&mask->range);
 52	const long *m = (const long *)((const u8 *)&mask->key + start);
 53	const long *s = (const long *)((const u8 *)src + start);
 54	long *d = (long *)((u8 *)dst + start);
 55	int i;
 56
 57	/* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
 58	 * if 'full' is false the memory outside of the 'mask->range' is left
 59	 * uninitialized. This can be used as an optimization when further
 60	 * operations on 'dst' only use contents within 'mask->range'.
 61	 */
 62	for (i = 0; i < len; i += sizeof(long))
 63		*d++ = *s++ & *m++;
 64}
 65
 66struct sw_flow *ovs_flow_alloc(void)
 67{
 68	struct sw_flow *flow;
 69	struct sw_flow_stats *stats;
 70
 71	flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL);
 72	if (!flow)
 73		return ERR_PTR(-ENOMEM);
 74
 75	flow->stats_last_writer = -1;
 76
 77	/* Initialize the default stat node. */
 78	stats = kmem_cache_alloc_node(flow_stats_cache,
 79				      GFP_KERNEL | __GFP_ZERO,
 80				      node_online(0) ? 0 : NUMA_NO_NODE);
 81	if (!stats)
 82		goto err;
 83
 84	spin_lock_init(&stats->lock);
 85
 86	RCU_INIT_POINTER(flow->stats[0], stats);
 87
 88	cpumask_set_cpu(0, &flow->cpu_used_mask);
 89
 90	return flow;
 91err:
 92	kmem_cache_free(flow_cache, flow);
 93	return ERR_PTR(-ENOMEM);
 94}
 95
 96int ovs_flow_tbl_count(const struct flow_table *table)
 97{
 98	return table->count;
 99}
100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101static void flow_free(struct sw_flow *flow)
102{
103	int cpu;
104
105	if (ovs_identifier_is_key(&flow->id))
106		kfree(flow->id.unmasked_key);
107	if (flow->sf_acts)
108		ovs_nla_free_flow_actions((struct sw_flow_actions __force *)flow->sf_acts);
109	/* We open code this to make sure cpu 0 is always considered */
110	for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask))
111		if (flow->stats[cpu])
112			kmem_cache_free(flow_stats_cache,
113					(struct sw_flow_stats __force *)flow->stats[cpu]);
114	kmem_cache_free(flow_cache, flow);
115}
116
117static void rcu_free_flow_callback(struct rcu_head *rcu)
118{
119	struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
120
121	flow_free(flow);
122}
123
124void ovs_flow_free(struct sw_flow *flow, bool deferred)
125{
126	if (!flow)
127		return;
128
129	if (deferred)
130		call_rcu(&flow->rcu, rcu_free_flow_callback);
131	else
132		flow_free(flow);
133}
134
 
 
 
 
 
 
135static void __table_instance_destroy(struct table_instance *ti)
136{
137	kvfree(ti->buckets);
138	kfree(ti);
139}
140
141static struct table_instance *table_instance_alloc(int new_size)
142{
143	struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
144	int i;
145
146	if (!ti)
147		return NULL;
148
149	ti->buckets = kvmalloc_array(new_size, sizeof(struct hlist_head),
150				     GFP_KERNEL);
151	if (!ti->buckets) {
152		kfree(ti);
153		return NULL;
154	}
155
156	for (i = 0; i < new_size; i++)
157		INIT_HLIST_HEAD(&ti->buckets[i]);
158
159	ti->n_buckets = new_size;
160	ti->node_ver = 0;
161	ti->keep_flows = false;
162	get_random_bytes(&ti->hash_seed, sizeof(u32));
163
164	return ti;
165}
166
167int ovs_flow_tbl_init(struct flow_table *table)
168{
169	struct table_instance *ti, *ufid_ti;
170
171	ti = table_instance_alloc(TBL_MIN_BUCKETS);
172
173	if (!ti)
174		return -ENOMEM;
175
176	ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
177	if (!ufid_ti)
178		goto free_ti;
179
180	rcu_assign_pointer(table->ti, ti);
181	rcu_assign_pointer(table->ufid_ti, ufid_ti);
182	INIT_LIST_HEAD(&table->mask_list);
183	table->last_rehash = jiffies;
184	table->count = 0;
185	table->ufid_count = 0;
186	return 0;
187
188free_ti:
189	__table_instance_destroy(ti);
190	return -ENOMEM;
191}
192
193static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
194{
195	struct table_instance *ti = container_of(rcu, struct table_instance, rcu);
196
197	__table_instance_destroy(ti);
198}
199
200static void table_instance_destroy(struct table_instance *ti,
201				   struct table_instance *ufid_ti,
202				   bool deferred)
203{
204	int i;
205
206	if (!ti)
207		return;
208
209	BUG_ON(!ufid_ti);
210	if (ti->keep_flows)
211		goto skip_flows;
212
213	for (i = 0; i < ti->n_buckets; i++) {
214		struct sw_flow *flow;
215		struct hlist_head *head = &ti->buckets[i];
216		struct hlist_node *n;
217		int ver = ti->node_ver;
218		int ufid_ver = ufid_ti->node_ver;
219
220		hlist_for_each_entry_safe(flow, n, head, flow_table.node[ver]) {
221			hlist_del_rcu(&flow->flow_table.node[ver]);
222			if (ovs_identifier_is_ufid(&flow->id))
223				hlist_del_rcu(&flow->ufid_table.node[ufid_ver]);
224			ovs_flow_free(flow, deferred);
225		}
226	}
227
228skip_flows:
229	if (deferred) {
230		call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
231		call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
232	} else {
233		__table_instance_destroy(ti);
234		__table_instance_destroy(ufid_ti);
235	}
236}
237
238/* No need for locking this function is called from RCU callback or
239 * error path.
240 */
241void ovs_flow_tbl_destroy(struct flow_table *table)
242{
243	struct table_instance *ti = rcu_dereference_raw(table->ti);
244	struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
245
246	table_instance_destroy(ti, ufid_ti, false);
247}
248
249struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
250				       u32 *bucket, u32 *last)
251{
252	struct sw_flow *flow;
253	struct hlist_head *head;
254	int ver;
255	int i;
256
257	ver = ti->node_ver;
258	while (*bucket < ti->n_buckets) {
259		i = 0;
260		head = &ti->buckets[*bucket];
261		hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) {
262			if (i < *last) {
263				i++;
264				continue;
265			}
266			*last = i + 1;
267			return flow;
268		}
269		(*bucket)++;
270		*last = 0;
271	}
272
273	return NULL;
274}
275
276static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
277{
278	hash = jhash_1word(hash, ti->hash_seed);
279	return &ti->buckets[hash & (ti->n_buckets - 1)];
 
280}
281
282static void table_instance_insert(struct table_instance *ti,
283				  struct sw_flow *flow)
284{
285	struct hlist_head *head;
286
287	head = find_bucket(ti, flow->flow_table.hash);
288	hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head);
289}
290
291static void ufid_table_instance_insert(struct table_instance *ti,
292				       struct sw_flow *flow)
293{
294	struct hlist_head *head;
295
296	head = find_bucket(ti, flow->ufid_table.hash);
297	hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head);
298}
299
300static void flow_table_copy_flows(struct table_instance *old,
301				  struct table_instance *new, bool ufid)
302{
303	int old_ver;
304	int i;
305
306	old_ver = old->node_ver;
307	new->node_ver = !old_ver;
308
309	/* Insert in new table. */
310	for (i = 0; i < old->n_buckets; i++) {
311		struct sw_flow *flow;
312		struct hlist_head *head = &old->buckets[i];
 
 
313
314		if (ufid)
315			hlist_for_each_entry(flow, head,
316					     ufid_table.node[old_ver])
317				ufid_table_instance_insert(new, flow);
318		else
319			hlist_for_each_entry(flow, head,
320					     flow_table.node[old_ver])
321				table_instance_insert(new, flow);
322	}
323
324	old->keep_flows = true;
325}
326
327static struct table_instance *table_instance_rehash(struct table_instance *ti,
328						    int n_buckets, bool ufid)
329{
330	struct table_instance *new_ti;
331
332	new_ti = table_instance_alloc(n_buckets);
333	if (!new_ti)
334		return NULL;
335
336	flow_table_copy_flows(ti, new_ti, ufid);
337
338	return new_ti;
339}
340
341int ovs_flow_tbl_flush(struct flow_table *flow_table)
342{
343	struct table_instance *old_ti, *new_ti;
344	struct table_instance *old_ufid_ti, *new_ufid_ti;
345
346	new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
347	if (!new_ti)
348		return -ENOMEM;
349	new_ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
350	if (!new_ufid_ti)
351		goto err_free_ti;
352
353	old_ti = ovsl_dereference(flow_table->ti);
354	old_ufid_ti = ovsl_dereference(flow_table->ufid_ti);
355
356	rcu_assign_pointer(flow_table->ti, new_ti);
357	rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti);
358	flow_table->last_rehash = jiffies;
359	flow_table->count = 0;
360	flow_table->ufid_count = 0;
361
362	table_instance_destroy(old_ti, old_ufid_ti, true);
363	return 0;
364
365err_free_ti:
366	__table_instance_destroy(new_ti);
367	return -ENOMEM;
368}
369
370static u32 flow_hash(const struct sw_flow_key *key,
371		     const struct sw_flow_key_range *range)
372{
373	int key_start = range->start;
374	int key_end = range->end;
375	const u32 *hash_key = (const u32 *)((const u8 *)key + key_start);
376	int hash_u32s = (key_end - key_start) >> 2;
377
378	/* Make sure number of hash bytes are multiple of u32. */
379	BUILD_BUG_ON(sizeof(long) % sizeof(u32));
380
381	return jhash2(hash_key, hash_u32s, 0);
382}
383
384static int flow_key_start(const struct sw_flow_key *key)
385{
386	if (key->tun_proto)
387		return 0;
388	else
389		return rounddown(offsetof(struct sw_flow_key, phy),
390					  sizeof(long));
391}
392
393static bool cmp_key(const struct sw_flow_key *key1,
394		    const struct sw_flow_key *key2,
395		    int key_start, int key_end)
396{
397	const long *cp1 = (const long *)((const u8 *)key1 + key_start);
398	const long *cp2 = (const long *)((const u8 *)key2 + key_start);
399	long diffs = 0;
400	int i;
401
402	for (i = key_start; i < key_end;  i += sizeof(long))
403		diffs |= *cp1++ ^ *cp2++;
404
405	return diffs == 0;
406}
407
408static bool flow_cmp_masked_key(const struct sw_flow *flow,
409				const struct sw_flow_key *key,
410				const struct sw_flow_key_range *range)
411{
412	return cmp_key(&flow->key, key, range->start, range->end);
413}
414
415static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
416				      const struct sw_flow_match *match)
417{
418	struct sw_flow_key *key = match->key;
419	int key_start = flow_key_start(key);
420	int key_end = match->range.end;
421
422	BUG_ON(ovs_identifier_is_ufid(&flow->id));
423	return cmp_key(flow->id.unmasked_key, key, key_start, key_end);
424}
425
426static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
427					  const struct sw_flow_key *unmasked,
428					  const struct sw_flow_mask *mask)
429{
430	struct sw_flow *flow;
431	struct hlist_head *head;
432	u32 hash;
433	struct sw_flow_key masked_key;
434
435	ovs_flow_mask_key(&masked_key, unmasked, false, mask);
436	hash = flow_hash(&masked_key, &mask->range);
437	head = find_bucket(ti, hash);
438	hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) {
439		if (flow->mask == mask && flow->flow_table.hash == hash &&
440		    flow_cmp_masked_key(flow, &masked_key, &mask->range))
441			return flow;
442	}
443	return NULL;
444}
445
446struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
447				    const struct sw_flow_key *key,
448				    u32 *n_mask_hit)
449{
450	struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
451	struct sw_flow_mask *mask;
452	struct sw_flow *flow;
453
454	*n_mask_hit = 0;
455	list_for_each_entry_rcu(mask, &tbl->mask_list, list) {
456		(*n_mask_hit)++;
457		flow = masked_flow_lookup(ti, key, mask);
458		if (flow)  /* Found */
459			return flow;
460	}
461	return NULL;
462}
463
464struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
465				    const struct sw_flow_key *key)
466{
467	u32 __always_unused n_mask_hit;
468
469	return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
470}
471
472struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
473					  const struct sw_flow_match *match)
474{
475	struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
476	struct sw_flow_mask *mask;
477	struct sw_flow *flow;
478
479	/* Always called under ovs-mutex. */
480	list_for_each_entry(mask, &tbl->mask_list, list) {
481		flow = masked_flow_lookup(ti, match->key, mask);
482		if (flow && ovs_identifier_is_key(&flow->id) &&
483		    ovs_flow_cmp_unmasked_key(flow, match))
484			return flow;
485	}
486	return NULL;
487}
488
489static u32 ufid_hash(const struct sw_flow_id *sfid)
490{
491	return jhash(sfid->ufid, sfid->ufid_len, 0);
492}
493
494static bool ovs_flow_cmp_ufid(const struct sw_flow *flow,
495			      const struct sw_flow_id *sfid)
496{
497	if (flow->id.ufid_len != sfid->ufid_len)
498		return false;
499
500	return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len);
501}
502
503bool ovs_flow_cmp(const struct sw_flow *flow, const struct sw_flow_match *match)
504{
505	if (ovs_identifier_is_ufid(&flow->id))
506		return flow_cmp_masked_key(flow, match->key, &match->range);
507
508	return ovs_flow_cmp_unmasked_key(flow, match);
509}
510
511struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
512					 const struct sw_flow_id *ufid)
513{
514	struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti);
515	struct sw_flow *flow;
516	struct hlist_head *head;
517	u32 hash;
518
519	hash = ufid_hash(ufid);
520	head = find_bucket(ti, hash);
521	hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver]) {
522		if (flow->ufid_table.hash == hash &&
523		    ovs_flow_cmp_ufid(flow, ufid))
524			return flow;
525	}
526	return NULL;
527}
528
529int ovs_flow_tbl_num_masks(const struct flow_table *table)
530{
531	struct sw_flow_mask *mask;
532	int num = 0;
533
534	list_for_each_entry(mask, &table->mask_list, list)
535		num++;
536
537	return num;
538}
539
540static struct table_instance *table_instance_expand(struct table_instance *ti,
541						    bool ufid)
542{
543	return table_instance_rehash(ti, ti->n_buckets * 2, ufid);
544}
545
546/* Remove 'mask' from the mask list, if it is not needed any more. */
547static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
548{
549	if (mask) {
550		/* ovs-lock is required to protect mask-refcount and
551		 * mask list.
552		 */
553		ASSERT_OVSL();
554		BUG_ON(!mask->ref_count);
555		mask->ref_count--;
556
557		if (!mask->ref_count) {
558			list_del_rcu(&mask->list);
559			kfree_rcu(mask, rcu);
560		}
561	}
562}
563
564/* Must be called with OVS mutex held. */
565void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
566{
567	struct table_instance *ti = ovsl_dereference(table->ti);
568	struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
569
570	BUG_ON(table->count == 0);
571	hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
572	table->count--;
573	if (ovs_identifier_is_ufid(&flow->id)) {
574		hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
575		table->ufid_count--;
576	}
577
578	/* RCU delete the mask. 'flow->mask' is not NULLed, as it should be
579	 * accessible as long as the RCU read lock is held.
580	 */
581	flow_mask_remove(table, flow->mask);
582}
583
584static struct sw_flow_mask *mask_alloc(void)
585{
586	struct sw_flow_mask *mask;
587
588	mask = kmalloc(sizeof(*mask), GFP_KERNEL);
589	if (mask)
590		mask->ref_count = 1;
591
592	return mask;
593}
594
595static bool mask_equal(const struct sw_flow_mask *a,
596		       const struct sw_flow_mask *b)
597{
598	const u8 *a_ = (const u8 *)&a->key + a->range.start;
599	const u8 *b_ = (const u8 *)&b->key + b->range.start;
600
601	return  (a->range.end == b->range.end)
602		&& (a->range.start == b->range.start)
603		&& (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
604}
605
606static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
607					   const struct sw_flow_mask *mask)
608{
609	struct list_head *ml;
610
611	list_for_each(ml, &tbl->mask_list) {
612		struct sw_flow_mask *m;
613		m = container_of(ml, struct sw_flow_mask, list);
614		if (mask_equal(mask, m))
615			return m;
616	}
617
618	return NULL;
619}
620
621/* Add 'mask' into the mask list, if it is not already there. */
622static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
623			    const struct sw_flow_mask *new)
624{
625	struct sw_flow_mask *mask;
626	mask = flow_mask_find(tbl, new);
627	if (!mask) {
628		/* Allocate a new mask if none exsits. */
629		mask = mask_alloc();
630		if (!mask)
631			return -ENOMEM;
632		mask->key = new->key;
633		mask->range = new->range;
634		list_add_rcu(&mask->list, &tbl->mask_list);
635	} else {
636		BUG_ON(!mask->ref_count);
637		mask->ref_count++;
638	}
639
640	flow->mask = mask;
641	return 0;
642}
643
644/* Must be called with OVS mutex held. */
645static void flow_key_insert(struct flow_table *table, struct sw_flow *flow)
646{
647	struct table_instance *new_ti = NULL;
648	struct table_instance *ti;
649
650	flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range);
651	ti = ovsl_dereference(table->ti);
652	table_instance_insert(ti, flow);
653	table->count++;
654
655	/* Expand table, if necessary, to make room. */
656	if (table->count > ti->n_buckets)
657		new_ti = table_instance_expand(ti, false);
658	else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
659		new_ti = table_instance_rehash(ti, ti->n_buckets, false);
660
661	if (new_ti) {
662		rcu_assign_pointer(table->ti, new_ti);
663		call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
664		table->last_rehash = jiffies;
665	}
666}
667
668/* Must be called with OVS mutex held. */
669static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow)
670{
671	struct table_instance *ti;
672
673	flow->ufid_table.hash = ufid_hash(&flow->id);
674	ti = ovsl_dereference(table->ufid_ti);
675	ufid_table_instance_insert(ti, flow);
676	table->ufid_count++;
677
678	/* Expand table, if necessary, to make room. */
679	if (table->ufid_count > ti->n_buckets) {
680		struct table_instance *new_ti;
681
682		new_ti = table_instance_expand(ti, true);
683		if (new_ti) {
684			rcu_assign_pointer(table->ufid_ti, new_ti);
685			call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
686		}
687	}
688}
689
690/* Must be called with OVS mutex held. */
691int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
692			const struct sw_flow_mask *mask)
693{
694	int err;
695
696	err = flow_mask_insert(table, flow, mask);
697	if (err)
698		return err;
699	flow_key_insert(table, flow);
700	if (ovs_identifier_is_ufid(&flow->id))
701		flow_ufid_insert(table, flow);
702
703	return 0;
704}
705
706/* Initializes the flow module.
707 * Returns zero if successful or a negative error code. */
708int ovs_flow_init(void)
709{
710	BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
711	BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
712
713	flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
714				       + (nr_cpu_ids
715					  * sizeof(struct sw_flow_stats *)),
716				       0, 0, NULL);
717	if (flow_cache == NULL)
718		return -ENOMEM;
719
720	flow_stats_cache
721		= kmem_cache_create("sw_flow_stats", sizeof(struct sw_flow_stats),
722				    0, SLAB_HWCACHE_ALIGN, NULL);
723	if (flow_stats_cache == NULL) {
724		kmem_cache_destroy(flow_cache);
725		flow_cache = NULL;
726		return -ENOMEM;
727	}
728
729	return 0;
730}
731
732/* Uninitializes the flow module. */
733void ovs_flow_exit(void)
734{
735	kmem_cache_destroy(flow_stats_cache);
736	kmem_cache_destroy(flow_cache);
737}