Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.15.
  1/*
  2 * Resizable, Scalable, Concurrent Hash Table
  3 *
  4 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
  5 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
  6 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
  7 *
  8 * Code partially derived from nft_hash
  9 * Rewritten with rehash code from br_multicast plus single list
 10 * pointer as suggested by Josh Triplett
 11 *
 12 * This program is free software; you can redistribute it and/or modify
 13 * it under the terms of the GNU General Public License version 2 as
 14 * published by the Free Software Foundation.
 15 */
 16
 17#ifndef _LINUX_RHASHTABLE_H
 18#define _LINUX_RHASHTABLE_H
 19
 20#include <linux/atomic.h>
 21#include <linux/compiler.h>
 22#include <linux/err.h>
 23#include <linux/errno.h>
 24#include <linux/jhash.h>
 25#include <linux/list_nulls.h>
 26#include <linux/workqueue.h>
 27#include <linux/mutex.h>
 28#include <linux/rcupdate.h>
 29
 30/*
 31 * The end of the chain is marked with a special nulls marks which has
 32 * the following format:
 33 *
 34 * +-------+-----------------------------------------------------+-+
 35 * | Base  |                      Hash                           |1|
 36 * +-------+-----------------------------------------------------+-+
 37 *
 38 * Base (4 bits) : Reserved to distinguish between multiple tables.
 39 *                 Specified via &struct rhashtable_params.nulls_base.
 40 * Hash (27 bits): Full hash (unmasked) of first element added to bucket
 41 * 1 (1 bit)     : Nulls marker (always set)
 42 *
 43 * The remaining bits of the next pointer remain unused for now.
 44 */
 45#define RHT_BASE_BITS		4
 46#define RHT_HASH_BITS		27
 47#define RHT_BASE_SHIFT		RHT_HASH_BITS
 48
 49/* Base bits plus 1 bit for nulls marker */
 50#define RHT_HASH_RESERVED_SPACE	(RHT_BASE_BITS + 1)
 51
 52struct rhash_head {
 53	struct rhash_head __rcu		*next;
 54};
 55
 56/**
 57 * struct bucket_table - Table of hash buckets
 58 * @size: Number of hash buckets
 59 * @rehash: Current bucket being rehashed
 60 * @hash_rnd: Random seed to fold into hash
 61 * @locks_mask: Mask to apply before accessing locks[]
 62 * @locks: Array of spinlocks protecting individual buckets
 63 * @walkers: List of active walkers
 64 * @rcu: RCU structure for freeing the table
 65 * @future_tbl: Table under construction during rehashing
 66 * @buckets: size * hash buckets
 67 */
 68struct bucket_table {
 69	unsigned int		size;
 70	unsigned int		rehash;
 71	u32			hash_rnd;
 72	unsigned int		locks_mask;
 73	spinlock_t		*locks;
 74	struct list_head	walkers;
 75	struct rcu_head		rcu;
 76
 77	struct bucket_table __rcu *future_tbl;
 78
 79	struct rhash_head __rcu	*buckets[] ____cacheline_aligned_in_smp;
 80};
 81
 82/**
 83 * struct rhashtable_compare_arg - Key for the function rhashtable_compare
 84 * @ht: Hash table
 85 * @key: Key to compare against
 86 */
 87struct rhashtable_compare_arg {
 88	struct rhashtable *ht;
 89	const void *key;
 90};
 91
 92typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
 93typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 len, u32 seed);
 94typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg,
 95			       const void *obj);
 96
 97struct rhashtable;
 98
 99/**
100 * struct rhashtable_params - Hash table construction parameters
101 * @nelem_hint: Hint on number of elements, should be 75% of desired size
102 * @key_len: Length of key
103 * @key_offset: Offset of key in struct to be hashed
104 * @head_offset: Offset of rhash_head in struct to be hashed
105 * @insecure_max_entries: Maximum number of entries (may be exceeded)
106 * @max_size: Maximum size while expanding
107 * @min_size: Minimum size while shrinking
108 * @nulls_base: Base value to generate nulls marker
109 * @insecure_elasticity: Set to true to disable chain length checks
110 * @automatic_shrinking: Enable automatic shrinking of tables
111 * @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
112 * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
113 * @obj_hashfn: Function to hash object
114 * @obj_cmpfn: Function to compare key with object
115 */
116struct rhashtable_params {
117	size_t			nelem_hint;
118	size_t			key_len;
119	size_t			key_offset;
120	size_t			head_offset;
121	unsigned int		insecure_max_entries;
122	unsigned int		max_size;
123	unsigned int		min_size;
124	u32			nulls_base;
125	bool			insecure_elasticity;
126	bool			automatic_shrinking;
127	size_t			locks_mul;
128	rht_hashfn_t		hashfn;
129	rht_obj_hashfn_t	obj_hashfn;
130	rht_obj_cmpfn_t		obj_cmpfn;
131};
132
133/**
134 * struct rhashtable - Hash table handle
135 * @tbl: Bucket table
136 * @nelems: Number of elements in table
137 * @key_len: Key length for hashfn
138 * @elasticity: Maximum chain length before rehash
139 * @p: Configuration parameters
140 * @run_work: Deferred worker to expand/shrink asynchronously
141 * @mutex: Mutex to protect current/future table swapping
142 * @lock: Spin lock to protect walker list
143 */
144struct rhashtable {
145	struct bucket_table __rcu	*tbl;
146	atomic_t			nelems;
147	unsigned int			key_len;
148	unsigned int			elasticity;
149	struct rhashtable_params	p;
150	struct work_struct		run_work;
151	struct mutex                    mutex;
152	spinlock_t			lock;
153};
154
155/**
156 * struct rhashtable_walker - Hash table walker
157 * @list: List entry on list of walkers
158 * @tbl: The table that we were walking over
159 */
160struct rhashtable_walker {
161	struct list_head list;
162	struct bucket_table *tbl;
163};
164
165/**
166 * struct rhashtable_iter - Hash table iterator, fits into netlink cb
167 * @ht: Table to iterate through
168 * @p: Current pointer
169 * @walker: Associated rhashtable walker
170 * @slot: Current slot
171 * @skip: Number of entries to skip in slot
172 */
173struct rhashtable_iter {
174	struct rhashtable *ht;
175	struct rhash_head *p;
176	struct rhashtable_walker *walker;
177	unsigned int slot;
178	unsigned int skip;
179};
180
181static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash)
182{
183	return NULLS_MARKER(ht->p.nulls_base + hash);
184}
185
186#define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \
187	((ptr) = (typeof(ptr)) rht_marker(ht, hash))
188
189static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
190{
191	return ((unsigned long) ptr & 1);
192}
193
194static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr)
195{
196	return ((unsigned long) ptr) >> 1;
197}
198
199static inline void *rht_obj(const struct rhashtable *ht,
200			    const struct rhash_head *he)
201{
202	return (char *)he - ht->p.head_offset;
203}
204
205static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
206					    unsigned int hash)
207{
208	return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1);
209}
210
211static inline unsigned int rht_key_hashfn(
212	struct rhashtable *ht, const struct bucket_table *tbl,
213	const void *key, const struct rhashtable_params params)
214{
215	unsigned int hash;
216
217	/* params must be equal to ht->p if it isn't constant. */
218	if (!__builtin_constant_p(params.key_len))
219		hash = ht->p.hashfn(key, ht->key_len, tbl->hash_rnd);
220	else if (params.key_len) {
221		unsigned int key_len = params.key_len;
222
223		if (params.hashfn)
224			hash = params.hashfn(key, key_len, tbl->hash_rnd);
225		else if (key_len & (sizeof(u32) - 1))
226			hash = jhash(key, key_len, tbl->hash_rnd);
227		else
228			hash = jhash2(key, key_len / sizeof(u32),
229				      tbl->hash_rnd);
230	} else {
231		unsigned int key_len = ht->p.key_len;
232
233		if (params.hashfn)
234			hash = params.hashfn(key, key_len, tbl->hash_rnd);
235		else
236			hash = jhash(key, key_len, tbl->hash_rnd);
237	}
238
239	return rht_bucket_index(tbl, hash);
240}
241
242static inline unsigned int rht_head_hashfn(
243	struct rhashtable *ht, const struct bucket_table *tbl,
244	const struct rhash_head *he, const struct rhashtable_params params)
245{
246	const char *ptr = rht_obj(ht, he);
247
248	return likely(params.obj_hashfn) ?
249	       rht_bucket_index(tbl, params.obj_hashfn(ptr, params.key_len ?:
250							    ht->p.key_len,
251						       tbl->hash_rnd)) :
252	       rht_key_hashfn(ht, tbl, ptr + params.key_offset, params);
253}
254
255/**
256 * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
257 * @ht:		hash table
258 * @tbl:	current table
259 */
260static inline bool rht_grow_above_75(const struct rhashtable *ht,
261				     const struct bucket_table *tbl)
262{
263	/* Expand table when exceeding 75% load */
264	return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) &&
265	       (!ht->p.max_size || tbl->size < ht->p.max_size);
266}
267
268/**
269 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
270 * @ht:		hash table
271 * @tbl:	current table
272 */
273static inline bool rht_shrink_below_30(const struct rhashtable *ht,
274				       const struct bucket_table *tbl)
275{
276	/* Shrink table beneath 30% load */
277	return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) &&
278	       tbl->size > ht->p.min_size;
279}
280
281/**
282 * rht_grow_above_100 - returns true if nelems > table-size
283 * @ht:		hash table
284 * @tbl:	current table
285 */
286static inline bool rht_grow_above_100(const struct rhashtable *ht,
287				      const struct bucket_table *tbl)
288{
289	return atomic_read(&ht->nelems) > tbl->size &&
290		(!ht->p.max_size || tbl->size < ht->p.max_size);
291}
292
293/**
294 * rht_grow_above_max - returns true if table is above maximum
295 * @ht:		hash table
296 * @tbl:	current table
297 */
298static inline bool rht_grow_above_max(const struct rhashtable *ht,
299				      const struct bucket_table *tbl)
300{
301	return ht->p.insecure_max_entries &&
302	       atomic_read(&ht->nelems) >= ht->p.insecure_max_entries;
303}
304
305/* The bucket lock is selected based on the hash and protects mutations
306 * on a group of hash buckets.
307 *
308 * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
309 * a single lock always covers both buckets which may both contains
310 * entries which link to the same bucket of the old table during resizing.
311 * This allows to simplify the locking as locking the bucket in both
312 * tables during resize always guarantee protection.
313 *
314 * IMPORTANT: When holding the bucket lock of both the old and new table
315 * during expansions and shrinking, the old bucket lock must always be
316 * acquired first.
317 */
318static inline spinlock_t *rht_bucket_lock(const struct bucket_table *tbl,
319					  unsigned int hash)
320{
321	return &tbl->locks[hash & tbl->locks_mask];
322}
323
324#ifdef CONFIG_PROVE_LOCKING
325int lockdep_rht_mutex_is_held(struct rhashtable *ht);
326int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
327#else
328static inline int lockdep_rht_mutex_is_held(struct rhashtable *ht)
329{
330	return 1;
331}
332
333static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
334					     u32 hash)
335{
336	return 1;
337}
338#endif /* CONFIG_PROVE_LOCKING */
339
340int rhashtable_init(struct rhashtable *ht,
341		    const struct rhashtable_params *params);
342
343struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
344					    const void *key,
345					    struct rhash_head *obj,
346					    struct bucket_table *old_tbl);
347int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl);
348
349int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter);
350void rhashtable_walk_exit(struct rhashtable_iter *iter);
351int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
352void *rhashtable_walk_next(struct rhashtable_iter *iter);
353void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
354
355void rhashtable_free_and_destroy(struct rhashtable *ht,
356				 void (*free_fn)(void *ptr, void *arg),
357				 void *arg);
358void rhashtable_destroy(struct rhashtable *ht);
359
360#define rht_dereference(p, ht) \
361	rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
362
363#define rht_dereference_rcu(p, ht) \
364	rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht))
365
366#define rht_dereference_bucket(p, tbl, hash) \
367	rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
368
369#define rht_dereference_bucket_rcu(p, tbl, hash) \
370	rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash))
371
372#define rht_entry(tpos, pos, member) \
373	({ tpos = container_of(pos, typeof(*tpos), member); 1; })
374
375/**
376 * rht_for_each_continue - continue iterating over hash chain
377 * @pos:	the &struct rhash_head to use as a loop cursor.
378 * @head:	the previous &struct rhash_head to continue from
379 * @tbl:	the &struct bucket_table
380 * @hash:	the hash value / bucket index
381 */
382#define rht_for_each_continue(pos, head, tbl, hash) \
383	for (pos = rht_dereference_bucket(head, tbl, hash); \
384	     !rht_is_a_nulls(pos); \
385	     pos = rht_dereference_bucket((pos)->next, tbl, hash))
386
387/**
388 * rht_for_each - iterate over hash chain
389 * @pos:	the &struct rhash_head to use as a loop cursor.
390 * @tbl:	the &struct bucket_table
391 * @hash:	the hash value / bucket index
392 */
393#define rht_for_each(pos, tbl, hash) \
394	rht_for_each_continue(pos, (tbl)->buckets[hash], tbl, hash)
395
396/**
397 * rht_for_each_entry_continue - continue iterating over hash chain
398 * @tpos:	the type * to use as a loop cursor.
399 * @pos:	the &struct rhash_head to use as a loop cursor.
400 * @head:	the previous &struct rhash_head to continue from
401 * @tbl:	the &struct bucket_table
402 * @hash:	the hash value / bucket index
403 * @member:	name of the &struct rhash_head within the hashable struct.
404 */
405#define rht_for_each_entry_continue(tpos, pos, head, tbl, hash, member)	\
406	for (pos = rht_dereference_bucket(head, tbl, hash);		\
407	     (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member);	\
408	     pos = rht_dereference_bucket((pos)->next, tbl, hash))
409
410/**
411 * rht_for_each_entry - iterate over hash chain of given type
412 * @tpos:	the type * to use as a loop cursor.
413 * @pos:	the &struct rhash_head to use as a loop cursor.
414 * @tbl:	the &struct bucket_table
415 * @hash:	the hash value / bucket index
416 * @member:	name of the &struct rhash_head within the hashable struct.
417 */
418#define rht_for_each_entry(tpos, pos, tbl, hash, member)		\
419	rht_for_each_entry_continue(tpos, pos, (tbl)->buckets[hash],	\
420				    tbl, hash, member)
421
422/**
423 * rht_for_each_entry_safe - safely iterate over hash chain of given type
424 * @tpos:	the type * to use as a loop cursor.
425 * @pos:	the &struct rhash_head to use as a loop cursor.
426 * @next:	the &struct rhash_head to use as next in loop cursor.
427 * @tbl:	the &struct bucket_table
428 * @hash:	the hash value / bucket index
429 * @member:	name of the &struct rhash_head within the hashable struct.
430 *
431 * This hash chain list-traversal primitive allows for the looped code to
432 * remove the loop cursor from the list.
433 */
434#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member)	    \
435	for (pos = rht_dereference_bucket((tbl)->buckets[hash], tbl, hash), \
436	     next = !rht_is_a_nulls(pos) ?				    \
437		       rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
438	     (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member);	    \
439	     pos = next,						    \
440	     next = !rht_is_a_nulls(pos) ?				    \
441		       rht_dereference_bucket(pos->next, tbl, hash) : NULL)
442
443/**
444 * rht_for_each_rcu_continue - continue iterating over rcu hash chain
445 * @pos:	the &struct rhash_head to use as a loop cursor.
446 * @head:	the previous &struct rhash_head to continue from
447 * @tbl:	the &struct bucket_table
448 * @hash:	the hash value / bucket index
449 *
450 * This hash chain list-traversal primitive may safely run concurrently with
451 * the _rcu mutation primitives such as rhashtable_insert() as long as the
452 * traversal is guarded by rcu_read_lock().
453 */
454#define rht_for_each_rcu_continue(pos, head, tbl, hash)			\
455	for (({barrier(); }),						\
456	     pos = rht_dereference_bucket_rcu(head, tbl, hash);		\
457	     !rht_is_a_nulls(pos);					\
458	     pos = rcu_dereference_raw(pos->next))
459
460/**
461 * rht_for_each_rcu - iterate over rcu hash chain
462 * @pos:	the &struct rhash_head to use as a loop cursor.
463 * @tbl:	the &struct bucket_table
464 * @hash:	the hash value / bucket index
465 *
466 * This hash chain list-traversal primitive may safely run concurrently with
467 * the _rcu mutation primitives such as rhashtable_insert() as long as the
468 * traversal is guarded by rcu_read_lock().
469 */
470#define rht_for_each_rcu(pos, tbl, hash)				\
471	rht_for_each_rcu_continue(pos, (tbl)->buckets[hash], tbl, hash)
472
473/**
474 * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain
475 * @tpos:	the type * to use as a loop cursor.
476 * @pos:	the &struct rhash_head to use as a loop cursor.
477 * @head:	the previous &struct rhash_head to continue from
478 * @tbl:	the &struct bucket_table
479 * @hash:	the hash value / bucket index
480 * @member:	name of the &struct rhash_head within the hashable struct.
481 *
482 * This hash chain list-traversal primitive may safely run concurrently with
483 * the _rcu mutation primitives such as rhashtable_insert() as long as the
484 * traversal is guarded by rcu_read_lock().
485 */
486#define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \
487	for (({barrier(); }),						    \
488	     pos = rht_dereference_bucket_rcu(head, tbl, hash);		    \
489	     (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member);	    \
490	     pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
491
492/**
493 * rht_for_each_entry_rcu - iterate over rcu hash chain of given type
494 * @tpos:	the type * to use as a loop cursor.
495 * @pos:	the &struct rhash_head to use as a loop cursor.
496 * @tbl:	the &struct bucket_table
497 * @hash:	the hash value / bucket index
498 * @member:	name of the &struct rhash_head within the hashable struct.
499 *
500 * This hash chain list-traversal primitive may safely run concurrently with
501 * the _rcu mutation primitives such as rhashtable_insert() as long as the
502 * traversal is guarded by rcu_read_lock().
503 */
504#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member)		\
505	rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
506					tbl, hash, member)
507
508static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
509				     const void *obj)
510{
511	struct rhashtable *ht = arg->ht;
512	const char *ptr = obj;
513
514	return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len);
515}
516
517/**
518 * rhashtable_lookup_fast - search hash table, inlined version
519 * @ht:		hash table
520 * @key:	the pointer to the key
521 * @params:	hash table parameters
522 *
523 * Computes the hash value for the key and traverses the bucket chain looking
524 * for a entry with an identical key. The first matching entry is returned.
525 *
526 * Returns the first entry on which the compare function returned true.
527 */
528static inline void *rhashtable_lookup_fast(
529	struct rhashtable *ht, const void *key,
530	const struct rhashtable_params params)
531{
532	struct rhashtable_compare_arg arg = {
533		.ht = ht,
534		.key = key,
535	};
536	const struct bucket_table *tbl;
537	struct rhash_head *he;
538	unsigned int hash;
539
540	rcu_read_lock();
541
542	tbl = rht_dereference_rcu(ht->tbl, ht);
543restart:
544	hash = rht_key_hashfn(ht, tbl, key, params);
545	rht_for_each_rcu(he, tbl, hash) {
546		if (params.obj_cmpfn ?
547		    params.obj_cmpfn(&arg, rht_obj(ht, he)) :
548		    rhashtable_compare(&arg, rht_obj(ht, he)))
549			continue;
550		rcu_read_unlock();
551		return rht_obj(ht, he);
552	}
553
554	/* Ensure we see any new tables. */
555	smp_rmb();
556
557	tbl = rht_dereference_rcu(tbl->future_tbl, ht);
558	if (unlikely(tbl))
559		goto restart;
560	rcu_read_unlock();
561
562	return NULL;
563}
564
565/* Internal function, please use rhashtable_insert_fast() instead */
566static inline int __rhashtable_insert_fast(
567	struct rhashtable *ht, const void *key, struct rhash_head *obj,
568	const struct rhashtable_params params)
569{
570	struct rhashtable_compare_arg arg = {
571		.ht = ht,
572		.key = key,
573	};
574	struct bucket_table *tbl, *new_tbl;
575	struct rhash_head *head;
576	spinlock_t *lock;
577	unsigned int elasticity;
578	unsigned int hash;
579	int err;
580
581restart:
582	rcu_read_lock();
583
584	tbl = rht_dereference_rcu(ht->tbl, ht);
585
586	/* All insertions must grab the oldest table containing
587	 * the hashed bucket that is yet to be rehashed.
588	 */
589	for (;;) {
590		hash = rht_head_hashfn(ht, tbl, obj, params);
591		lock = rht_bucket_lock(tbl, hash);
592		spin_lock_bh(lock);
593
594		if (tbl->rehash <= hash)
595			break;
596
597		spin_unlock_bh(lock);
598		tbl = rht_dereference_rcu(tbl->future_tbl, ht);
599	}
600
601	new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
602	if (unlikely(new_tbl)) {
603		tbl = rhashtable_insert_slow(ht, key, obj, new_tbl);
604		if (!IS_ERR_OR_NULL(tbl))
605			goto slow_path;
606
607		err = PTR_ERR(tbl);
608		goto out;
609	}
610
611	err = -E2BIG;
612	if (unlikely(rht_grow_above_max(ht, tbl)))
613		goto out;
614
615	if (unlikely(rht_grow_above_100(ht, tbl))) {
616slow_path:
617		spin_unlock_bh(lock);
618		err = rhashtable_insert_rehash(ht, tbl);
619		rcu_read_unlock();
620		if (err)
621			return err;
622
623		goto restart;
624	}
625
626	err = -EEXIST;
627	elasticity = ht->elasticity;
628	rht_for_each(head, tbl, hash) {
629		if (key &&
630		    unlikely(!(params.obj_cmpfn ?
631			       params.obj_cmpfn(&arg, rht_obj(ht, head)) :
632			       rhashtable_compare(&arg, rht_obj(ht, head)))))
633			goto out;
634		if (!--elasticity)
635			goto slow_path;
636	}
637
638	err = 0;
639
640	head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
641
642	RCU_INIT_POINTER(obj->next, head);
643
644	rcu_assign_pointer(tbl->buckets[hash], obj);
645
646	atomic_inc(&ht->nelems);
647	if (rht_grow_above_75(ht, tbl))
648		schedule_work(&ht->run_work);
649
650out:
651	spin_unlock_bh(lock);
652	rcu_read_unlock();
653
654	return err;
655}
656
657/**
658 * rhashtable_insert_fast - insert object into hash table
659 * @ht:		hash table
660 * @obj:	pointer to hash head inside object
661 * @params:	hash table parameters
662 *
663 * Will take a per bucket spinlock to protect against mutual mutations
664 * on the same bucket. Multiple insertions may occur in parallel unless
665 * they map to the same bucket lock.
666 *
667 * It is safe to call this function from atomic context.
668 *
669 * Will trigger an automatic deferred table resizing if the size grows
670 * beyond the watermark indicated by grow_decision() which can be passed
671 * to rhashtable_init().
672 */
673static inline int rhashtable_insert_fast(
674	struct rhashtable *ht, struct rhash_head *obj,
675	const struct rhashtable_params params)
676{
677	return __rhashtable_insert_fast(ht, NULL, obj, params);
678}
679
680/**
681 * rhashtable_lookup_insert_fast - lookup and insert object into hash table
682 * @ht:		hash table
683 * @obj:	pointer to hash head inside object
684 * @params:	hash table parameters
685 *
686 * Locks down the bucket chain in both the old and new table if a resize
687 * is in progress to ensure that writers can't remove from the old table
688 * and can't insert to the new table during the atomic operation of search
689 * and insertion. Searches for duplicates in both the old and new table if
690 * a resize is in progress.
691 *
692 * This lookup function may only be used for fixed key hash table (key_len
693 * parameter set). It will BUG() if used inappropriately.
694 *
695 * It is safe to call this function from atomic context.
696 *
697 * Will trigger an automatic deferred table resizing if the size grows
698 * beyond the watermark indicated by grow_decision() which can be passed
699 * to rhashtable_init().
700 */
701static inline int rhashtable_lookup_insert_fast(
702	struct rhashtable *ht, struct rhash_head *obj,
703	const struct rhashtable_params params)
704{
705	const char *key = rht_obj(ht, obj);
706
707	BUG_ON(ht->p.obj_hashfn);
708
709	return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj,
710					params);
711}
712
713/**
714 * rhashtable_lookup_insert_key - search and insert object to hash table
715 *				  with explicit key
716 * @ht:		hash table
717 * @key:	key
718 * @obj:	pointer to hash head inside object
719 * @params:	hash table parameters
720 *
721 * Locks down the bucket chain in both the old and new table if a resize
722 * is in progress to ensure that writers can't remove from the old table
723 * and can't insert to the new table during the atomic operation of search
724 * and insertion. Searches for duplicates in both the old and new table if
725 * a resize is in progress.
726 *
727 * Lookups may occur in parallel with hashtable mutations and resizing.
728 *
729 * Will trigger an automatic deferred table resizing if the size grows
730 * beyond the watermark indicated by grow_decision() which can be passed
731 * to rhashtable_init().
732 *
733 * Returns zero on success.
734 */
735static inline int rhashtable_lookup_insert_key(
736	struct rhashtable *ht, const void *key, struct rhash_head *obj,
737	const struct rhashtable_params params)
738{
739	BUG_ON(!ht->p.obj_hashfn || !key);
740
741	return __rhashtable_insert_fast(ht, key, obj, params);
742}
743
744/* Internal function, please use rhashtable_remove_fast() instead */
745static inline int __rhashtable_remove_fast(
746	struct rhashtable *ht, struct bucket_table *tbl,
747	struct rhash_head *obj, const struct rhashtable_params params)
748{
749	struct rhash_head __rcu **pprev;
750	struct rhash_head *he;
751	spinlock_t * lock;
752	unsigned int hash;
753	int err = -ENOENT;
754
755	hash = rht_head_hashfn(ht, tbl, obj, params);
756	lock = rht_bucket_lock(tbl, hash);
757
758	spin_lock_bh(lock);
759
760	pprev = &tbl->buckets[hash];
761	rht_for_each(he, tbl, hash) {
762		if (he != obj) {
763			pprev = &he->next;
764			continue;
765		}
766
767		rcu_assign_pointer(*pprev, obj->next);
768		err = 0;
769		break;
770	}
771
772	spin_unlock_bh(lock);
773
774	return err;
775}
776
777/**
778 * rhashtable_remove_fast - remove object from hash table
779 * @ht:		hash table
780 * @obj:	pointer to hash head inside object
781 * @params:	hash table parameters
782 *
783 * Since the hash chain is single linked, the removal operation needs to
784 * walk the bucket chain upon removal. The removal operation is thus
785 * considerable slow if the hash table is not correctly sized.
786 *
787 * Will automatically shrink the table via rhashtable_expand() if the
788 * shrink_decision function specified at rhashtable_init() returns true.
789 *
790 * Returns zero on success, -ENOENT if the entry could not be found.
791 */
792static inline int rhashtable_remove_fast(
793	struct rhashtable *ht, struct rhash_head *obj,
794	const struct rhashtable_params params)
795{
796	struct bucket_table *tbl;
797	int err;
798
799	rcu_read_lock();
800
801	tbl = rht_dereference_rcu(ht->tbl, ht);
802
803	/* Because we have already taken (and released) the bucket
804	 * lock in old_tbl, if we find that future_tbl is not yet
805	 * visible then that guarantees the entry to still be in
806	 * the old tbl if it exists.
807	 */
808	while ((err = __rhashtable_remove_fast(ht, tbl, obj, params)) &&
809	       (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
810		;
811
812	if (err)
813		goto out;
814
815	atomic_dec(&ht->nelems);
816	if (unlikely(ht->p.automatic_shrinking &&
817		     rht_shrink_below_30(ht, tbl)))
818		schedule_work(&ht->run_work);
819
820out:
821	rcu_read_unlock();
822
823	return err;
824}
825
826/* Internal function, please use rhashtable_replace_fast() instead */
827static inline int __rhashtable_replace_fast(
828	struct rhashtable *ht, struct bucket_table *tbl,
829	struct rhash_head *obj_old, struct rhash_head *obj_new,
830	const struct rhashtable_params params)
831{
832	struct rhash_head __rcu **pprev;
833	struct rhash_head *he;
834	spinlock_t *lock;
835	unsigned int hash;
836	int err = -ENOENT;
837
838	/* Minimally, the old and new objects must have same hash
839	 * (which should mean identifiers are the same).
840	 */
841	hash = rht_head_hashfn(ht, tbl, obj_old, params);
842	if (hash != rht_head_hashfn(ht, tbl, obj_new, params))
843		return -EINVAL;
844
845	lock = rht_bucket_lock(tbl, hash);
846
847	spin_lock_bh(lock);
848
849	pprev = &tbl->buckets[hash];
850	rht_for_each(he, tbl, hash) {
851		if (he != obj_old) {
852			pprev = &he->next;
853			continue;
854		}
855
856		rcu_assign_pointer(obj_new->next, obj_old->next);
857		rcu_assign_pointer(*pprev, obj_new);
858		err = 0;
859		break;
860	}
861
862	spin_unlock_bh(lock);
863
864	return err;
865}
866
867/**
868 * rhashtable_replace_fast - replace an object in hash table
869 * @ht:		hash table
870 * @obj_old:	pointer to hash head inside object being replaced
871 * @obj_new:	pointer to hash head inside object which is new
872 * @params:	hash table parameters
873 *
874 * Replacing an object doesn't affect the number of elements in the hash table
875 * or bucket, so we don't need to worry about shrinking or expanding the
876 * table here.
877 *
878 * Returns zero on success, -ENOENT if the entry could not be found,
879 * -EINVAL if hash is not the same for the old and new objects.
880 */
881static inline int rhashtable_replace_fast(
882	struct rhashtable *ht, struct rhash_head *obj_old,
883	struct rhash_head *obj_new,
884	const struct rhashtable_params params)
885{
886	struct bucket_table *tbl;
887	int err;
888
889	rcu_read_lock();
890
891	tbl = rht_dereference_rcu(ht->tbl, ht);
892
893	/* Because we have already taken (and released) the bucket
894	 * lock in old_tbl, if we find that future_tbl is not yet
895	 * visible then that guarantees the entry to still be in
896	 * the old tbl if it exists.
897	 */
898	while ((err = __rhashtable_replace_fast(ht, tbl, obj_old,
899						obj_new, params)) &&
900	       (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
901		;
902
903	rcu_read_unlock();
904
905	return err;
906}
907
908#endif /* _LINUX_RHASHTABLE_H */