Linux Audio

Check our new training course

Loading...
v4.10.11
  1/*
  2 * Copyright (c) 2007-2014 Nicira, Inc.
  3 *
  4 * This program is free software; you can redistribute it and/or
  5 * modify it under the terms of version 2 of the GNU General Public
  6 * License as published by the Free Software Foundation.
  7 *
  8 * This program is distributed in the hope that it will be useful, but
  9 * WITHOUT ANY WARRANTY; without even the implied warranty of
 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 11 * General Public License for more details.
 12 *
 13 * You should have received a copy of the GNU General Public License
 14 * along with this program; if not, write to the Free Software
 15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
 16 * 02110-1301, USA
 17 */
 18
 19#include "flow.h"
 20#include "datapath.h"
 21#include "flow_netlink.h"
 22#include <linux/uaccess.h>
 23#include <linux/netdevice.h>
 24#include <linux/etherdevice.h>
 25#include <linux/if_ether.h>
 26#include <linux/if_vlan.h>
 27#include <net/llc_pdu.h>
 28#include <linux/kernel.h>
 29#include <linux/jhash.h>
 30#include <linux/jiffies.h>
 31#include <linux/llc.h>
 32#include <linux/module.h>
 33#include <linux/in.h>
 34#include <linux/rcupdate.h>
 35#include <linux/cpumask.h>
 36#include <linux/if_arp.h>
 37#include <linux/ip.h>
 38#include <linux/ipv6.h>
 39#include <linux/sctp.h>
 40#include <linux/tcp.h>
 41#include <linux/udp.h>
 42#include <linux/icmp.h>
 43#include <linux/icmpv6.h>
 44#include <linux/rculist.h>
 45#include <net/ip.h>
 46#include <net/ipv6.h>
 47#include <net/ndisc.h>
 48
 49#define TBL_MIN_BUCKETS		1024
 50#define REHASH_INTERVAL		(10 * 60 * HZ)
 51
 52static struct kmem_cache *flow_cache;
 53struct kmem_cache *flow_stats_cache __read_mostly;
 54
 55static u16 range_n_bytes(const struct sw_flow_key_range *range)
 56{
 57	return range->end - range->start;
 58}
 59
 60void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
 61		       bool full, const struct sw_flow_mask *mask)
 62{
 63	int start = full ? 0 : mask->range.start;
 64	int len = full ? sizeof *dst : range_n_bytes(&mask->range);
 65	const long *m = (const long *)((const u8 *)&mask->key + start);
 66	const long *s = (const long *)((const u8 *)src + start);
 67	long *d = (long *)((u8 *)dst + start);
 68	int i;
 69
 70	/* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
 71	 * if 'full' is false the memory outside of the 'mask->range' is left
 72	 * uninitialized. This can be used as an optimization when further
 73	 * operations on 'dst' only use contents within 'mask->range'.
 74	 */
 75	for (i = 0; i < len; i += sizeof(long))
 76		*d++ = *s++ & *m++;
 77}
 78
 79struct sw_flow *ovs_flow_alloc(void)
 80{
 81	struct sw_flow *flow;
 82	struct flow_stats *stats;
 83
 84	flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL);
 85	if (!flow)
 86		return ERR_PTR(-ENOMEM);
 87
 88	flow->stats_last_writer = -1;
 89
 90	/* Initialize the default stat node. */
 91	stats = kmem_cache_alloc_node(flow_stats_cache,
 92				      GFP_KERNEL | __GFP_ZERO,
 93				      node_online(0) ? 0 : NUMA_NO_NODE);
 94	if (!stats)
 95		goto err;
 96
 97	spin_lock_init(&stats->lock);
 98
 99	RCU_INIT_POINTER(flow->stats[0], stats);
100
 
 
101	return flow;
102err:
103	kmem_cache_free(flow_cache, flow);
104	return ERR_PTR(-ENOMEM);
105}
106
107int ovs_flow_tbl_count(const struct flow_table *table)
108{
109	return table->count;
110}
111
112static struct flex_array *alloc_buckets(unsigned int n_buckets)
113{
114	struct flex_array *buckets;
115	int i, err;
116
117	buckets = flex_array_alloc(sizeof(struct hlist_head),
118				   n_buckets, GFP_KERNEL);
119	if (!buckets)
120		return NULL;
121
122	err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
123	if (err) {
124		flex_array_free(buckets);
125		return NULL;
126	}
127
128	for (i = 0; i < n_buckets; i++)
129		INIT_HLIST_HEAD((struct hlist_head *)
130					flex_array_get(buckets, i));
131
132	return buckets;
133}
134
135static void flow_free(struct sw_flow *flow)
136{
137	int cpu;
138
139	if (ovs_identifier_is_key(&flow->id))
140		kfree(flow->id.unmasked_key);
141	if (flow->sf_acts)
142		ovs_nla_free_flow_actions((struct sw_flow_actions __force *)flow->sf_acts);
143	/* We open code this to make sure cpu 0 is always considered */
144	for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpu_possible_mask))
145		if (flow->stats[cpu])
146			kmem_cache_free(flow_stats_cache,
147					(struct flow_stats __force *)flow->stats[cpu]);
148	kmem_cache_free(flow_cache, flow);
149}
150
151static void rcu_free_flow_callback(struct rcu_head *rcu)
152{
153	struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
154
155	flow_free(flow);
156}
157
158void ovs_flow_free(struct sw_flow *flow, bool deferred)
159{
160	if (!flow)
161		return;
162
163	if (deferred)
164		call_rcu(&flow->rcu, rcu_free_flow_callback);
165	else
166		flow_free(flow);
167}
168
169static void free_buckets(struct flex_array *buckets)
170{
171	flex_array_free(buckets);
172}
173
174
175static void __table_instance_destroy(struct table_instance *ti)
176{
177	free_buckets(ti->buckets);
178	kfree(ti);
179}
180
181static struct table_instance *table_instance_alloc(int new_size)
182{
183	struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
184
185	if (!ti)
186		return NULL;
187
188	ti->buckets = alloc_buckets(new_size);
189
190	if (!ti->buckets) {
191		kfree(ti);
192		return NULL;
193	}
194	ti->n_buckets = new_size;
195	ti->node_ver = 0;
196	ti->keep_flows = false;
197	get_random_bytes(&ti->hash_seed, sizeof(u32));
198
199	return ti;
200}
201
202int ovs_flow_tbl_init(struct flow_table *table)
203{
204	struct table_instance *ti, *ufid_ti;
205
206	ti = table_instance_alloc(TBL_MIN_BUCKETS);
207
208	if (!ti)
209		return -ENOMEM;
210
211	ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
212	if (!ufid_ti)
213		goto free_ti;
214
215	rcu_assign_pointer(table->ti, ti);
216	rcu_assign_pointer(table->ufid_ti, ufid_ti);
217	INIT_LIST_HEAD(&table->mask_list);
218	table->last_rehash = jiffies;
219	table->count = 0;
220	table->ufid_count = 0;
221	return 0;
222
223free_ti:
224	__table_instance_destroy(ti);
225	return -ENOMEM;
226}
227
228static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
229{
230	struct table_instance *ti = container_of(rcu, struct table_instance, rcu);
231
232	__table_instance_destroy(ti);
233}
234
235static void table_instance_destroy(struct table_instance *ti,
236				   struct table_instance *ufid_ti,
237				   bool deferred)
238{
239	int i;
240
241	if (!ti)
242		return;
243
244	BUG_ON(!ufid_ti);
245	if (ti->keep_flows)
246		goto skip_flows;
247
248	for (i = 0; i < ti->n_buckets; i++) {
249		struct sw_flow *flow;
250		struct hlist_head *head = flex_array_get(ti->buckets, i);
251		struct hlist_node *n;
252		int ver = ti->node_ver;
253		int ufid_ver = ufid_ti->node_ver;
254
255		hlist_for_each_entry_safe(flow, n, head, flow_table.node[ver]) {
256			hlist_del_rcu(&flow->flow_table.node[ver]);
257			if (ovs_identifier_is_ufid(&flow->id))
258				hlist_del_rcu(&flow->ufid_table.node[ufid_ver]);
259			ovs_flow_free(flow, deferred);
260		}
261	}
262
263skip_flows:
264	if (deferred) {
265		call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
266		call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
267	} else {
268		__table_instance_destroy(ti);
269		__table_instance_destroy(ufid_ti);
270	}
271}
272
273/* No need for locking this function is called from RCU callback or
274 * error path.
275 */
276void ovs_flow_tbl_destroy(struct flow_table *table)
277{
278	struct table_instance *ti = rcu_dereference_raw(table->ti);
279	struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
280
281	table_instance_destroy(ti, ufid_ti, false);
282}
283
284struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
285				       u32 *bucket, u32 *last)
286{
287	struct sw_flow *flow;
288	struct hlist_head *head;
289	int ver;
290	int i;
291
292	ver = ti->node_ver;
293	while (*bucket < ti->n_buckets) {
294		i = 0;
295		head = flex_array_get(ti->buckets, *bucket);
296		hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) {
297			if (i < *last) {
298				i++;
299				continue;
300			}
301			*last = i + 1;
302			return flow;
303		}
304		(*bucket)++;
305		*last = 0;
306	}
307
308	return NULL;
309}
310
311static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
312{
313	hash = jhash_1word(hash, ti->hash_seed);
314	return flex_array_get(ti->buckets,
315				(hash & (ti->n_buckets - 1)));
316}
317
318static void table_instance_insert(struct table_instance *ti,
319				  struct sw_flow *flow)
320{
321	struct hlist_head *head;
322
323	head = find_bucket(ti, flow->flow_table.hash);
324	hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head);
325}
326
327static void ufid_table_instance_insert(struct table_instance *ti,
328				       struct sw_flow *flow)
329{
330	struct hlist_head *head;
331
332	head = find_bucket(ti, flow->ufid_table.hash);
333	hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head);
334}
335
336static void flow_table_copy_flows(struct table_instance *old,
337				  struct table_instance *new, bool ufid)
338{
339	int old_ver;
340	int i;
341
342	old_ver = old->node_ver;
343	new->node_ver = !old_ver;
344
345	/* Insert in new table. */
346	for (i = 0; i < old->n_buckets; i++) {
347		struct sw_flow *flow;
348		struct hlist_head *head;
349
350		head = flex_array_get(old->buckets, i);
351
352		if (ufid)
353			hlist_for_each_entry(flow, head,
354					     ufid_table.node[old_ver])
355				ufid_table_instance_insert(new, flow);
356		else
357			hlist_for_each_entry(flow, head,
358					     flow_table.node[old_ver])
359				table_instance_insert(new, flow);
360	}
361
362	old->keep_flows = true;
363}
364
365static struct table_instance *table_instance_rehash(struct table_instance *ti,
366						    int n_buckets, bool ufid)
367{
368	struct table_instance *new_ti;
369
370	new_ti = table_instance_alloc(n_buckets);
371	if (!new_ti)
372		return NULL;
373
374	flow_table_copy_flows(ti, new_ti, ufid);
375
376	return new_ti;
377}
378
379int ovs_flow_tbl_flush(struct flow_table *flow_table)
380{
381	struct table_instance *old_ti, *new_ti;
382	struct table_instance *old_ufid_ti, *new_ufid_ti;
383
384	new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
385	if (!new_ti)
386		return -ENOMEM;
387	new_ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
388	if (!new_ufid_ti)
389		goto err_free_ti;
390
391	old_ti = ovsl_dereference(flow_table->ti);
392	old_ufid_ti = ovsl_dereference(flow_table->ufid_ti);
393
394	rcu_assign_pointer(flow_table->ti, new_ti);
395	rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti);
396	flow_table->last_rehash = jiffies;
397	flow_table->count = 0;
398	flow_table->ufid_count = 0;
399
400	table_instance_destroy(old_ti, old_ufid_ti, true);
401	return 0;
402
403err_free_ti:
404	__table_instance_destroy(new_ti);
405	return -ENOMEM;
406}
407
408static u32 flow_hash(const struct sw_flow_key *key,
409		     const struct sw_flow_key_range *range)
410{
411	int key_start = range->start;
412	int key_end = range->end;
413	const u32 *hash_key = (const u32 *)((const u8 *)key + key_start);
414	int hash_u32s = (key_end - key_start) >> 2;
415
416	/* Make sure number of hash bytes are multiple of u32. */
417	BUILD_BUG_ON(sizeof(long) % sizeof(u32));
418
419	return jhash2(hash_key, hash_u32s, 0);
420}
421
422static int flow_key_start(const struct sw_flow_key *key)
423{
424	if (key->tun_proto)
425		return 0;
426	else
427		return rounddown(offsetof(struct sw_flow_key, phy),
428					  sizeof(long));
429}
430
431static bool cmp_key(const struct sw_flow_key *key1,
432		    const struct sw_flow_key *key2,
433		    int key_start, int key_end)
434{
435	const long *cp1 = (const long *)((const u8 *)key1 + key_start);
436	const long *cp2 = (const long *)((const u8 *)key2 + key_start);
437	long diffs = 0;
438	int i;
439
440	for (i = key_start; i < key_end;  i += sizeof(long))
441		diffs |= *cp1++ ^ *cp2++;
442
443	return diffs == 0;
444}
445
446static bool flow_cmp_masked_key(const struct sw_flow *flow,
447				const struct sw_flow_key *key,
448				const struct sw_flow_key_range *range)
449{
450	return cmp_key(&flow->key, key, range->start, range->end);
451}
452
453static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
454				      const struct sw_flow_match *match)
455{
456	struct sw_flow_key *key = match->key;
457	int key_start = flow_key_start(key);
458	int key_end = match->range.end;
459
460	BUG_ON(ovs_identifier_is_ufid(&flow->id));
461	return cmp_key(flow->id.unmasked_key, key, key_start, key_end);
462}
463
464static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
465					  const struct sw_flow_key *unmasked,
466					  const struct sw_flow_mask *mask)
467{
468	struct sw_flow *flow;
469	struct hlist_head *head;
470	u32 hash;
471	struct sw_flow_key masked_key;
472
473	ovs_flow_mask_key(&masked_key, unmasked, false, mask);
474	hash = flow_hash(&masked_key, &mask->range);
475	head = find_bucket(ti, hash);
476	hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) {
477		if (flow->mask == mask && flow->flow_table.hash == hash &&
478		    flow_cmp_masked_key(flow, &masked_key, &mask->range))
479			return flow;
480	}
481	return NULL;
482}
483
484struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
485				    const struct sw_flow_key *key,
486				    u32 *n_mask_hit)
487{
488	struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
489	struct sw_flow_mask *mask;
490	struct sw_flow *flow;
491
492	*n_mask_hit = 0;
493	list_for_each_entry_rcu(mask, &tbl->mask_list, list) {
494		(*n_mask_hit)++;
495		flow = masked_flow_lookup(ti, key, mask);
496		if (flow)  /* Found */
497			return flow;
498	}
499	return NULL;
500}
501
502struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
503				    const struct sw_flow_key *key)
504{
505	u32 __always_unused n_mask_hit;
506
507	return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
508}
509
510struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
511					  const struct sw_flow_match *match)
512{
513	struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
514	struct sw_flow_mask *mask;
515	struct sw_flow *flow;
516
517	/* Always called under ovs-mutex. */
518	list_for_each_entry(mask, &tbl->mask_list, list) {
519		flow = masked_flow_lookup(ti, match->key, mask);
520		if (flow && ovs_identifier_is_key(&flow->id) &&
521		    ovs_flow_cmp_unmasked_key(flow, match))
522			return flow;
523	}
524	return NULL;
525}
526
527static u32 ufid_hash(const struct sw_flow_id *sfid)
528{
529	return jhash(sfid->ufid, sfid->ufid_len, 0);
530}
531
532static bool ovs_flow_cmp_ufid(const struct sw_flow *flow,
533			      const struct sw_flow_id *sfid)
534{
535	if (flow->id.ufid_len != sfid->ufid_len)
536		return false;
537
538	return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len);
539}
540
541bool ovs_flow_cmp(const struct sw_flow *flow, const struct sw_flow_match *match)
542{
543	if (ovs_identifier_is_ufid(&flow->id))
544		return flow_cmp_masked_key(flow, match->key, &match->range);
545
546	return ovs_flow_cmp_unmasked_key(flow, match);
547}
548
549struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
550					 const struct sw_flow_id *ufid)
551{
552	struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti);
553	struct sw_flow *flow;
554	struct hlist_head *head;
555	u32 hash;
556
557	hash = ufid_hash(ufid);
558	head = find_bucket(ti, hash);
559	hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver]) {
560		if (flow->ufid_table.hash == hash &&
561		    ovs_flow_cmp_ufid(flow, ufid))
562			return flow;
563	}
564	return NULL;
565}
566
567int ovs_flow_tbl_num_masks(const struct flow_table *table)
568{
569	struct sw_flow_mask *mask;
570	int num = 0;
571
572	list_for_each_entry(mask, &table->mask_list, list)
573		num++;
574
575	return num;
576}
577
578static struct table_instance *table_instance_expand(struct table_instance *ti,
579						    bool ufid)
580{
581	return table_instance_rehash(ti, ti->n_buckets * 2, ufid);
582}
583
584/* Remove 'mask' from the mask list, if it is not needed any more. */
585static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
586{
587	if (mask) {
588		/* ovs-lock is required to protect mask-refcount and
589		 * mask list.
590		 */
591		ASSERT_OVSL();
592		BUG_ON(!mask->ref_count);
593		mask->ref_count--;
594
595		if (!mask->ref_count) {
596			list_del_rcu(&mask->list);
597			kfree_rcu(mask, rcu);
598		}
599	}
600}
601
602/* Must be called with OVS mutex held. */
603void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
604{
605	struct table_instance *ti = ovsl_dereference(table->ti);
606	struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
607
608	BUG_ON(table->count == 0);
609	hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
610	table->count--;
611	if (ovs_identifier_is_ufid(&flow->id)) {
612		hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
613		table->ufid_count--;
614	}
615
616	/* RCU delete the mask. 'flow->mask' is not NULLed, as it should be
617	 * accessible as long as the RCU read lock is held.
618	 */
619	flow_mask_remove(table, flow->mask);
620}
621
622static struct sw_flow_mask *mask_alloc(void)
623{
624	struct sw_flow_mask *mask;
625
626	mask = kmalloc(sizeof(*mask), GFP_KERNEL);
627	if (mask)
628		mask->ref_count = 1;
629
630	return mask;
631}
632
633static bool mask_equal(const struct sw_flow_mask *a,
634		       const struct sw_flow_mask *b)
635{
636	const u8 *a_ = (const u8 *)&a->key + a->range.start;
637	const u8 *b_ = (const u8 *)&b->key + b->range.start;
638
639	return  (a->range.end == b->range.end)
640		&& (a->range.start == b->range.start)
641		&& (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
642}
643
644static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
645					   const struct sw_flow_mask *mask)
646{
647	struct list_head *ml;
648
649	list_for_each(ml, &tbl->mask_list) {
650		struct sw_flow_mask *m;
651		m = container_of(ml, struct sw_flow_mask, list);
652		if (mask_equal(mask, m))
653			return m;
654	}
655
656	return NULL;
657}
658
659/* Add 'mask' into the mask list, if it is not already there. */
660static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
661			    const struct sw_flow_mask *new)
662{
663	struct sw_flow_mask *mask;
664	mask = flow_mask_find(tbl, new);
665	if (!mask) {
666		/* Allocate a new mask if none exsits. */
667		mask = mask_alloc();
668		if (!mask)
669			return -ENOMEM;
670		mask->key = new->key;
671		mask->range = new->range;
672		list_add_rcu(&mask->list, &tbl->mask_list);
673	} else {
674		BUG_ON(!mask->ref_count);
675		mask->ref_count++;
676	}
677
678	flow->mask = mask;
679	return 0;
680}
681
682/* Must be called with OVS mutex held. */
683static void flow_key_insert(struct flow_table *table, struct sw_flow *flow)
684{
685	struct table_instance *new_ti = NULL;
686	struct table_instance *ti;
687
688	flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range);
689	ti = ovsl_dereference(table->ti);
690	table_instance_insert(ti, flow);
691	table->count++;
692
693	/* Expand table, if necessary, to make room. */
694	if (table->count > ti->n_buckets)
695		new_ti = table_instance_expand(ti, false);
696	else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
697		new_ti = table_instance_rehash(ti, ti->n_buckets, false);
698
699	if (new_ti) {
700		rcu_assign_pointer(table->ti, new_ti);
701		call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
702		table->last_rehash = jiffies;
703	}
704}
705
706/* Must be called with OVS mutex held. */
707static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow)
708{
709	struct table_instance *ti;
710
711	flow->ufid_table.hash = ufid_hash(&flow->id);
712	ti = ovsl_dereference(table->ufid_ti);
713	ufid_table_instance_insert(ti, flow);
714	table->ufid_count++;
715
716	/* Expand table, if necessary, to make room. */
717	if (table->ufid_count > ti->n_buckets) {
718		struct table_instance *new_ti;
719
720		new_ti = table_instance_expand(ti, true);
721		if (new_ti) {
722			rcu_assign_pointer(table->ufid_ti, new_ti);
723			call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
724		}
725	}
726}
727
728/* Must be called with OVS mutex held. */
729int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
730			const struct sw_flow_mask *mask)
731{
732	int err;
733
734	err = flow_mask_insert(table, flow, mask);
735	if (err)
736		return err;
737	flow_key_insert(table, flow);
738	if (ovs_identifier_is_ufid(&flow->id))
739		flow_ufid_insert(table, flow);
740
741	return 0;
742}
743
744/* Initializes the flow module.
745 * Returns zero if successful or a negative error code. */
746int ovs_flow_init(void)
747{
748	BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
749	BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
750
751	flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
752				       + (nr_cpu_ids
753					  * sizeof(struct flow_stats *)),
754				       0, 0, NULL);
755	if (flow_cache == NULL)
756		return -ENOMEM;
757
758	flow_stats_cache
759		= kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats),
760				    0, SLAB_HWCACHE_ALIGN, NULL);
761	if (flow_stats_cache == NULL) {
762		kmem_cache_destroy(flow_cache);
763		flow_cache = NULL;
764		return -ENOMEM;
765	}
766
767	return 0;
768}
769
770/* Uninitializes the flow module. */
771void ovs_flow_exit(void)
772{
773	kmem_cache_destroy(flow_stats_cache);
774	kmem_cache_destroy(flow_cache);
775}
v4.17
  1/*
  2 * Copyright (c) 2007-2014 Nicira, Inc.
  3 *
  4 * This program is free software; you can redistribute it and/or
  5 * modify it under the terms of version 2 of the GNU General Public
  6 * License as published by the Free Software Foundation.
  7 *
  8 * This program is distributed in the hope that it will be useful, but
  9 * WITHOUT ANY WARRANTY; without even the implied warranty of
 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 11 * General Public License for more details.
 12 *
 13 * You should have received a copy of the GNU General Public License
 14 * along with this program; if not, write to the Free Software
 15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
 16 * 02110-1301, USA
 17 */
 18
 19#include "flow.h"
 20#include "datapath.h"
 21#include "flow_netlink.h"
 22#include <linux/uaccess.h>
 23#include <linux/netdevice.h>
 24#include <linux/etherdevice.h>
 25#include <linux/if_ether.h>
 26#include <linux/if_vlan.h>
 27#include <net/llc_pdu.h>
 28#include <linux/kernel.h>
 29#include <linux/jhash.h>
 30#include <linux/jiffies.h>
 31#include <linux/llc.h>
 32#include <linux/module.h>
 33#include <linux/in.h>
 34#include <linux/rcupdate.h>
 35#include <linux/cpumask.h>
 36#include <linux/if_arp.h>
 37#include <linux/ip.h>
 38#include <linux/ipv6.h>
 39#include <linux/sctp.h>
 40#include <linux/tcp.h>
 41#include <linux/udp.h>
 42#include <linux/icmp.h>
 43#include <linux/icmpv6.h>
 44#include <linux/rculist.h>
 45#include <net/ip.h>
 46#include <net/ipv6.h>
 47#include <net/ndisc.h>
 48
 49#define TBL_MIN_BUCKETS		1024
 50#define REHASH_INTERVAL		(10 * 60 * HZ)
 51
 52static struct kmem_cache *flow_cache;
 53struct kmem_cache *flow_stats_cache __read_mostly;
 54
 55static u16 range_n_bytes(const struct sw_flow_key_range *range)
 56{
 57	return range->end - range->start;
 58}
 59
 60void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
 61		       bool full, const struct sw_flow_mask *mask)
 62{
 63	int start = full ? 0 : mask->range.start;
 64	int len = full ? sizeof *dst : range_n_bytes(&mask->range);
 65	const long *m = (const long *)((const u8 *)&mask->key + start);
 66	const long *s = (const long *)((const u8 *)src + start);
 67	long *d = (long *)((u8 *)dst + start);
 68	int i;
 69
 70	/* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
 71	 * if 'full' is false the memory outside of the 'mask->range' is left
 72	 * uninitialized. This can be used as an optimization when further
 73	 * operations on 'dst' only use contents within 'mask->range'.
 74	 */
 75	for (i = 0; i < len; i += sizeof(long))
 76		*d++ = *s++ & *m++;
 77}
 78
 79struct sw_flow *ovs_flow_alloc(void)
 80{
 81	struct sw_flow *flow;
 82	struct flow_stats *stats;
 83
 84	flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL);
 85	if (!flow)
 86		return ERR_PTR(-ENOMEM);
 87
 88	flow->stats_last_writer = -1;
 89
 90	/* Initialize the default stat node. */
 91	stats = kmem_cache_alloc_node(flow_stats_cache,
 92				      GFP_KERNEL | __GFP_ZERO,
 93				      node_online(0) ? 0 : NUMA_NO_NODE);
 94	if (!stats)
 95		goto err;
 96
 97	spin_lock_init(&stats->lock);
 98
 99	RCU_INIT_POINTER(flow->stats[0], stats);
100
101	cpumask_set_cpu(0, &flow->cpu_used_mask);
102
103	return flow;
104err:
105	kmem_cache_free(flow_cache, flow);
106	return ERR_PTR(-ENOMEM);
107}
108
109int ovs_flow_tbl_count(const struct flow_table *table)
110{
111	return table->count;
112}
113
114static struct flex_array *alloc_buckets(unsigned int n_buckets)
115{
116	struct flex_array *buckets;
117	int i, err;
118
119	buckets = flex_array_alloc(sizeof(struct hlist_head),
120				   n_buckets, GFP_KERNEL);
121	if (!buckets)
122		return NULL;
123
124	err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
125	if (err) {
126		flex_array_free(buckets);
127		return NULL;
128	}
129
130	for (i = 0; i < n_buckets; i++)
131		INIT_HLIST_HEAD((struct hlist_head *)
132					flex_array_get(buckets, i));
133
134	return buckets;
135}
136
137static void flow_free(struct sw_flow *flow)
138{
139	int cpu;
140
141	if (ovs_identifier_is_key(&flow->id))
142		kfree(flow->id.unmasked_key);
143	if (flow->sf_acts)
144		ovs_nla_free_flow_actions((struct sw_flow_actions __force *)flow->sf_acts);
145	/* We open code this to make sure cpu 0 is always considered */
146	for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask))
147		if (flow->stats[cpu])
148			kmem_cache_free(flow_stats_cache,
149					(struct flow_stats __force *)flow->stats[cpu]);
150	kmem_cache_free(flow_cache, flow);
151}
152
153static void rcu_free_flow_callback(struct rcu_head *rcu)
154{
155	struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
156
157	flow_free(flow);
158}
159
160void ovs_flow_free(struct sw_flow *flow, bool deferred)
161{
162	if (!flow)
163		return;
164
165	if (deferred)
166		call_rcu(&flow->rcu, rcu_free_flow_callback);
167	else
168		flow_free(flow);
169}
170
171static void free_buckets(struct flex_array *buckets)
172{
173	flex_array_free(buckets);
174}
175
176
177static void __table_instance_destroy(struct table_instance *ti)
178{
179	free_buckets(ti->buckets);
180	kfree(ti);
181}
182
183static struct table_instance *table_instance_alloc(int new_size)
184{
185	struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
186
187	if (!ti)
188		return NULL;
189
190	ti->buckets = alloc_buckets(new_size);
191
192	if (!ti->buckets) {
193		kfree(ti);
194		return NULL;
195	}
196	ti->n_buckets = new_size;
197	ti->node_ver = 0;
198	ti->keep_flows = false;
199	get_random_bytes(&ti->hash_seed, sizeof(u32));
200
201	return ti;
202}
203
204int ovs_flow_tbl_init(struct flow_table *table)
205{
206	struct table_instance *ti, *ufid_ti;
207
208	ti = table_instance_alloc(TBL_MIN_BUCKETS);
209
210	if (!ti)
211		return -ENOMEM;
212
213	ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
214	if (!ufid_ti)
215		goto free_ti;
216
217	rcu_assign_pointer(table->ti, ti);
218	rcu_assign_pointer(table->ufid_ti, ufid_ti);
219	INIT_LIST_HEAD(&table->mask_list);
220	table->last_rehash = jiffies;
221	table->count = 0;
222	table->ufid_count = 0;
223	return 0;
224
225free_ti:
226	__table_instance_destroy(ti);
227	return -ENOMEM;
228}
229
230static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
231{
232	struct table_instance *ti = container_of(rcu, struct table_instance, rcu);
233
234	__table_instance_destroy(ti);
235}
236
237static void table_instance_destroy(struct table_instance *ti,
238				   struct table_instance *ufid_ti,
239				   bool deferred)
240{
241	int i;
242
243	if (!ti)
244		return;
245
246	BUG_ON(!ufid_ti);
247	if (ti->keep_flows)
248		goto skip_flows;
249
250	for (i = 0; i < ti->n_buckets; i++) {
251		struct sw_flow *flow;
252		struct hlist_head *head = flex_array_get(ti->buckets, i);
253		struct hlist_node *n;
254		int ver = ti->node_ver;
255		int ufid_ver = ufid_ti->node_ver;
256
257		hlist_for_each_entry_safe(flow, n, head, flow_table.node[ver]) {
258			hlist_del_rcu(&flow->flow_table.node[ver]);
259			if (ovs_identifier_is_ufid(&flow->id))
260				hlist_del_rcu(&flow->ufid_table.node[ufid_ver]);
261			ovs_flow_free(flow, deferred);
262		}
263	}
264
265skip_flows:
266	if (deferred) {
267		call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
268		call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
269	} else {
270		__table_instance_destroy(ti);
271		__table_instance_destroy(ufid_ti);
272	}
273}
274
275/* No need for locking this function is called from RCU callback or
276 * error path.
277 */
278void ovs_flow_tbl_destroy(struct flow_table *table)
279{
280	struct table_instance *ti = rcu_dereference_raw(table->ti);
281	struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
282
283	table_instance_destroy(ti, ufid_ti, false);
284}
285
286struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
287				       u32 *bucket, u32 *last)
288{
289	struct sw_flow *flow;
290	struct hlist_head *head;
291	int ver;
292	int i;
293
294	ver = ti->node_ver;
295	while (*bucket < ti->n_buckets) {
296		i = 0;
297		head = flex_array_get(ti->buckets, *bucket);
298		hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) {
299			if (i < *last) {
300				i++;
301				continue;
302			}
303			*last = i + 1;
304			return flow;
305		}
306		(*bucket)++;
307		*last = 0;
308	}
309
310	return NULL;
311}
312
313static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
314{
315	hash = jhash_1word(hash, ti->hash_seed);
316	return flex_array_get(ti->buckets,
317				(hash & (ti->n_buckets - 1)));
318}
319
320static void table_instance_insert(struct table_instance *ti,
321				  struct sw_flow *flow)
322{
323	struct hlist_head *head;
324
325	head = find_bucket(ti, flow->flow_table.hash);
326	hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head);
327}
328
329static void ufid_table_instance_insert(struct table_instance *ti,
330				       struct sw_flow *flow)
331{
332	struct hlist_head *head;
333
334	head = find_bucket(ti, flow->ufid_table.hash);
335	hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head);
336}
337
338static void flow_table_copy_flows(struct table_instance *old,
339				  struct table_instance *new, bool ufid)
340{
341	int old_ver;
342	int i;
343
344	old_ver = old->node_ver;
345	new->node_ver = !old_ver;
346
347	/* Insert in new table. */
348	for (i = 0; i < old->n_buckets; i++) {
349		struct sw_flow *flow;
350		struct hlist_head *head;
351
352		head = flex_array_get(old->buckets, i);
353
354		if (ufid)
355			hlist_for_each_entry(flow, head,
356					     ufid_table.node[old_ver])
357				ufid_table_instance_insert(new, flow);
358		else
359			hlist_for_each_entry(flow, head,
360					     flow_table.node[old_ver])
361				table_instance_insert(new, flow);
362	}
363
364	old->keep_flows = true;
365}
366
367static struct table_instance *table_instance_rehash(struct table_instance *ti,
368						    int n_buckets, bool ufid)
369{
370	struct table_instance *new_ti;
371
372	new_ti = table_instance_alloc(n_buckets);
373	if (!new_ti)
374		return NULL;
375
376	flow_table_copy_flows(ti, new_ti, ufid);
377
378	return new_ti;
379}
380
381int ovs_flow_tbl_flush(struct flow_table *flow_table)
382{
383	struct table_instance *old_ti, *new_ti;
384	struct table_instance *old_ufid_ti, *new_ufid_ti;
385
386	new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
387	if (!new_ti)
388		return -ENOMEM;
389	new_ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
390	if (!new_ufid_ti)
391		goto err_free_ti;
392
393	old_ti = ovsl_dereference(flow_table->ti);
394	old_ufid_ti = ovsl_dereference(flow_table->ufid_ti);
395
396	rcu_assign_pointer(flow_table->ti, new_ti);
397	rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti);
398	flow_table->last_rehash = jiffies;
399	flow_table->count = 0;
400	flow_table->ufid_count = 0;
401
402	table_instance_destroy(old_ti, old_ufid_ti, true);
403	return 0;
404
405err_free_ti:
406	__table_instance_destroy(new_ti);
407	return -ENOMEM;
408}
409
410static u32 flow_hash(const struct sw_flow_key *key,
411		     const struct sw_flow_key_range *range)
412{
413	int key_start = range->start;
414	int key_end = range->end;
415	const u32 *hash_key = (const u32 *)((const u8 *)key + key_start);
416	int hash_u32s = (key_end - key_start) >> 2;
417
418	/* Make sure number of hash bytes are multiple of u32. */
419	BUILD_BUG_ON(sizeof(long) % sizeof(u32));
420
421	return jhash2(hash_key, hash_u32s, 0);
422}
423
424static int flow_key_start(const struct sw_flow_key *key)
425{
426	if (key->tun_proto)
427		return 0;
428	else
429		return rounddown(offsetof(struct sw_flow_key, phy),
430					  sizeof(long));
431}
432
433static bool cmp_key(const struct sw_flow_key *key1,
434		    const struct sw_flow_key *key2,
435		    int key_start, int key_end)
436{
437	const long *cp1 = (const long *)((const u8 *)key1 + key_start);
438	const long *cp2 = (const long *)((const u8 *)key2 + key_start);
439	long diffs = 0;
440	int i;
441
442	for (i = key_start; i < key_end;  i += sizeof(long))
443		diffs |= *cp1++ ^ *cp2++;
444
445	return diffs == 0;
446}
447
448static bool flow_cmp_masked_key(const struct sw_flow *flow,
449				const struct sw_flow_key *key,
450				const struct sw_flow_key_range *range)
451{
452	return cmp_key(&flow->key, key, range->start, range->end);
453}
454
455static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
456				      const struct sw_flow_match *match)
457{
458	struct sw_flow_key *key = match->key;
459	int key_start = flow_key_start(key);
460	int key_end = match->range.end;
461
462	BUG_ON(ovs_identifier_is_ufid(&flow->id));
463	return cmp_key(flow->id.unmasked_key, key, key_start, key_end);
464}
465
466static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
467					  const struct sw_flow_key *unmasked,
468					  const struct sw_flow_mask *mask)
469{
470	struct sw_flow *flow;
471	struct hlist_head *head;
472	u32 hash;
473	struct sw_flow_key masked_key;
474
475	ovs_flow_mask_key(&masked_key, unmasked, false, mask);
476	hash = flow_hash(&masked_key, &mask->range);
477	head = find_bucket(ti, hash);
478	hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) {
479		if (flow->mask == mask && flow->flow_table.hash == hash &&
480		    flow_cmp_masked_key(flow, &masked_key, &mask->range))
481			return flow;
482	}
483	return NULL;
484}
485
486struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
487				    const struct sw_flow_key *key,
488				    u32 *n_mask_hit)
489{
490	struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
491	struct sw_flow_mask *mask;
492	struct sw_flow *flow;
493
494	*n_mask_hit = 0;
495	list_for_each_entry_rcu(mask, &tbl->mask_list, list) {
496		(*n_mask_hit)++;
497		flow = masked_flow_lookup(ti, key, mask);
498		if (flow)  /* Found */
499			return flow;
500	}
501	return NULL;
502}
503
504struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
505				    const struct sw_flow_key *key)
506{
507	u32 __always_unused n_mask_hit;
508
509	return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
510}
511
512struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
513					  const struct sw_flow_match *match)
514{
515	struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
516	struct sw_flow_mask *mask;
517	struct sw_flow *flow;
518
519	/* Always called under ovs-mutex. */
520	list_for_each_entry(mask, &tbl->mask_list, list) {
521		flow = masked_flow_lookup(ti, match->key, mask);
522		if (flow && ovs_identifier_is_key(&flow->id) &&
523		    ovs_flow_cmp_unmasked_key(flow, match))
524			return flow;
525	}
526	return NULL;
527}
528
529static u32 ufid_hash(const struct sw_flow_id *sfid)
530{
531	return jhash(sfid->ufid, sfid->ufid_len, 0);
532}
533
534static bool ovs_flow_cmp_ufid(const struct sw_flow *flow,
535			      const struct sw_flow_id *sfid)
536{
537	if (flow->id.ufid_len != sfid->ufid_len)
538		return false;
539
540	return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len);
541}
542
543bool ovs_flow_cmp(const struct sw_flow *flow, const struct sw_flow_match *match)
544{
545	if (ovs_identifier_is_ufid(&flow->id))
546		return flow_cmp_masked_key(flow, match->key, &match->range);
547
548	return ovs_flow_cmp_unmasked_key(flow, match);
549}
550
551struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
552					 const struct sw_flow_id *ufid)
553{
554	struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti);
555	struct sw_flow *flow;
556	struct hlist_head *head;
557	u32 hash;
558
559	hash = ufid_hash(ufid);
560	head = find_bucket(ti, hash);
561	hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver]) {
562		if (flow->ufid_table.hash == hash &&
563		    ovs_flow_cmp_ufid(flow, ufid))
564			return flow;
565	}
566	return NULL;
567}
568
569int ovs_flow_tbl_num_masks(const struct flow_table *table)
570{
571	struct sw_flow_mask *mask;
572	int num = 0;
573
574	list_for_each_entry(mask, &table->mask_list, list)
575		num++;
576
577	return num;
578}
579
580static struct table_instance *table_instance_expand(struct table_instance *ti,
581						    bool ufid)
582{
583	return table_instance_rehash(ti, ti->n_buckets * 2, ufid);
584}
585
586/* Remove 'mask' from the mask list, if it is not needed any more. */
587static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
588{
589	if (mask) {
590		/* ovs-lock is required to protect mask-refcount and
591		 * mask list.
592		 */
593		ASSERT_OVSL();
594		BUG_ON(!mask->ref_count);
595		mask->ref_count--;
596
597		if (!mask->ref_count) {
598			list_del_rcu(&mask->list);
599			kfree_rcu(mask, rcu);
600		}
601	}
602}
603
604/* Must be called with OVS mutex held. */
605void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
606{
607	struct table_instance *ti = ovsl_dereference(table->ti);
608	struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
609
610	BUG_ON(table->count == 0);
611	hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
612	table->count--;
613	if (ovs_identifier_is_ufid(&flow->id)) {
614		hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
615		table->ufid_count--;
616	}
617
618	/* RCU delete the mask. 'flow->mask' is not NULLed, as it should be
619	 * accessible as long as the RCU read lock is held.
620	 */
621	flow_mask_remove(table, flow->mask);
622}
623
624static struct sw_flow_mask *mask_alloc(void)
625{
626	struct sw_flow_mask *mask;
627
628	mask = kmalloc(sizeof(*mask), GFP_KERNEL);
629	if (mask)
630		mask->ref_count = 1;
631
632	return mask;
633}
634
635static bool mask_equal(const struct sw_flow_mask *a,
636		       const struct sw_flow_mask *b)
637{
638	const u8 *a_ = (const u8 *)&a->key + a->range.start;
639	const u8 *b_ = (const u8 *)&b->key + b->range.start;
640
641	return  (a->range.end == b->range.end)
642		&& (a->range.start == b->range.start)
643		&& (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
644}
645
646static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
647					   const struct sw_flow_mask *mask)
648{
649	struct list_head *ml;
650
651	list_for_each(ml, &tbl->mask_list) {
652		struct sw_flow_mask *m;
653		m = container_of(ml, struct sw_flow_mask, list);
654		if (mask_equal(mask, m))
655			return m;
656	}
657
658	return NULL;
659}
660
661/* Add 'mask' into the mask list, if it is not already there. */
662static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
663			    const struct sw_flow_mask *new)
664{
665	struct sw_flow_mask *mask;
666	mask = flow_mask_find(tbl, new);
667	if (!mask) {
668		/* Allocate a new mask if none exsits. */
669		mask = mask_alloc();
670		if (!mask)
671			return -ENOMEM;
672		mask->key = new->key;
673		mask->range = new->range;
674		list_add_rcu(&mask->list, &tbl->mask_list);
675	} else {
676		BUG_ON(!mask->ref_count);
677		mask->ref_count++;
678	}
679
680	flow->mask = mask;
681	return 0;
682}
683
684/* Must be called with OVS mutex held. */
685static void flow_key_insert(struct flow_table *table, struct sw_flow *flow)
686{
687	struct table_instance *new_ti = NULL;
688	struct table_instance *ti;
689
690	flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range);
691	ti = ovsl_dereference(table->ti);
692	table_instance_insert(ti, flow);
693	table->count++;
694
695	/* Expand table, if necessary, to make room. */
696	if (table->count > ti->n_buckets)
697		new_ti = table_instance_expand(ti, false);
698	else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
699		new_ti = table_instance_rehash(ti, ti->n_buckets, false);
700
701	if (new_ti) {
702		rcu_assign_pointer(table->ti, new_ti);
703		call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
704		table->last_rehash = jiffies;
705	}
706}
707
708/* Must be called with OVS mutex held. */
709static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow)
710{
711	struct table_instance *ti;
712
713	flow->ufid_table.hash = ufid_hash(&flow->id);
714	ti = ovsl_dereference(table->ufid_ti);
715	ufid_table_instance_insert(ti, flow);
716	table->ufid_count++;
717
718	/* Expand table, if necessary, to make room. */
719	if (table->ufid_count > ti->n_buckets) {
720		struct table_instance *new_ti;
721
722		new_ti = table_instance_expand(ti, true);
723		if (new_ti) {
724			rcu_assign_pointer(table->ufid_ti, new_ti);
725			call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
726		}
727	}
728}
729
730/* Must be called with OVS mutex held. */
731int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
732			const struct sw_flow_mask *mask)
733{
734	int err;
735
736	err = flow_mask_insert(table, flow, mask);
737	if (err)
738		return err;
739	flow_key_insert(table, flow);
740	if (ovs_identifier_is_ufid(&flow->id))
741		flow_ufid_insert(table, flow);
742
743	return 0;
744}
745
746/* Initializes the flow module.
747 * Returns zero if successful or a negative error code. */
748int ovs_flow_init(void)
749{
750	BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
751	BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
752
753	flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
754				       + (nr_cpu_ids
755					  * sizeof(struct flow_stats *)),
756				       0, 0, NULL);
757	if (flow_cache == NULL)
758		return -ENOMEM;
759
760	flow_stats_cache
761		= kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats),
762				    0, SLAB_HWCACHE_ALIGN, NULL);
763	if (flow_stats_cache == NULL) {
764		kmem_cache_destroy(flow_cache);
765		flow_cache = NULL;
766		return -ENOMEM;
767	}
768
769	return 0;
770}
771
772/* Uninitializes the flow module. */
773void ovs_flow_exit(void)
774{
775	kmem_cache_destroy(flow_stats_cache);
776	kmem_cache_destroy(flow_cache);
777}