Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.17.
   1// SPDX-License-Identifier: GPL-2.0
   2
   3/*
   4 * Copyright 2019, 2020 Amazon.com, Inc. or its affiliates. All rights reserved.
   5 *
   6 * User extended attribute client side cache functions.
   7 *
   8 * Author: Frank van der Linden <fllinden@amazon.com>
   9 */
  10#include <linux/errno.h>
  11#include <linux/nfs_fs.h>
  12#include <linux/hashtable.h>
  13#include <linux/refcount.h>
  14#include <uapi/linux/xattr.h>
  15
  16#include "nfs4_fs.h"
  17#include "internal.h"
  18
  19/*
  20 * User extended attributes client side caching is implemented by having
  21 * a cache structure attached to NFS inodes. This structure is allocated
  22 * when needed, and freed when the cache is zapped.
  23 *
  24 * The cache structure contains as hash table of entries, and a pointer
  25 * to a special-cased entry for the listxattr cache.
  26 *
  27 * Accessing and allocating / freeing the caches is done via reference
  28 * counting. The cache entries use a similar refcounting scheme.
  29 *
  30 * This makes freeing a cache, both from the shrinker and from the
  31 * zap cache path, easy. It also means that, in current use cases,
  32 * the large majority of inodes will not waste any memory, as they
  33 * will never have any user extended attributes assigned to them.
  34 *
  35 * Attribute entries are hashed in to a simple hash table. They are
  36 * also part of an LRU.
  37 *
  38 * There are three shrinkers.
  39 *
  40 * Two shrinkers deal with the cache entries themselves: one for
  41 * large entries (> PAGE_SIZE), and one for smaller entries. The
  42 * shrinker for the larger entries works more aggressively than
  43 * those for the smaller entries.
  44 *
  45 * The other shrinker frees the cache structures themselves.
  46 */
  47
  48/*
  49 * 64 buckets is a good default. There is likely no reasonable
  50 * workload that uses more than even 64 user extended attributes.
  51 * You can certainly add a lot more - but you get what you ask for
  52 * in those circumstances.
  53 */
  54#define NFS4_XATTR_HASH_SIZE	64
  55
  56#define NFSDBG_FACILITY	NFSDBG_XATTRCACHE
  57
  58struct nfs4_xattr_cache;
  59struct nfs4_xattr_entry;
  60
  61struct nfs4_xattr_bucket {
  62	spinlock_t lock;
  63	struct hlist_head hlist;
  64	struct nfs4_xattr_cache *cache;
  65	bool draining;
  66};
  67
  68struct nfs4_xattr_cache {
  69	struct kref ref;
  70	struct nfs4_xattr_bucket buckets[NFS4_XATTR_HASH_SIZE];
  71	struct list_head lru;
  72	struct list_head dispose;
  73	atomic_long_t nent;
  74	spinlock_t listxattr_lock;
  75	struct inode *inode;
  76	struct nfs4_xattr_entry *listxattr;
  77};
  78
  79struct nfs4_xattr_entry {
  80	struct kref ref;
  81	struct hlist_node hnode;
  82	struct list_head lru;
  83	struct list_head dispose;
  84	char *xattr_name;
  85	void *xattr_value;
  86	size_t xattr_size;
  87	struct nfs4_xattr_bucket *bucket;
  88	uint32_t flags;
  89};
  90
  91#define	NFS4_XATTR_ENTRY_EXTVAL	0x0001
  92
  93/*
  94 * LRU list of NFS inodes that have xattr caches.
  95 */
  96static struct list_lru nfs4_xattr_cache_lru;
  97static struct list_lru nfs4_xattr_entry_lru;
  98static struct list_lru nfs4_xattr_large_entry_lru;
  99
 100static struct kmem_cache *nfs4_xattr_cache_cachep;
 101
 102/*
 103 * Hashing helper functions.
 104 */
 105static void
 106nfs4_xattr_hash_init(struct nfs4_xattr_cache *cache)
 107{
 108	unsigned int i;
 109
 110	for (i = 0; i < NFS4_XATTR_HASH_SIZE; i++) {
 111		INIT_HLIST_HEAD(&cache->buckets[i].hlist);
 112		spin_lock_init(&cache->buckets[i].lock);
 113		cache->buckets[i].cache = cache;
 114		cache->buckets[i].draining = false;
 115	}
 116}
 117
 118/*
 119 * Locking order:
 120 * 1. inode i_lock or bucket lock
 121 * 2. list_lru lock (taken by list_lru_* functions)
 122 */
 123
 124/*
 125 * Wrapper functions to add a cache entry to the right LRU.
 126 */
 127static bool
 128nfs4_xattr_entry_lru_add(struct nfs4_xattr_entry *entry)
 129{
 130	struct list_lru *lru;
 131
 132	lru = (entry->flags & NFS4_XATTR_ENTRY_EXTVAL) ?
 133	    &nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru;
 134
 135	return list_lru_add(lru, &entry->lru);
 136}
 137
 138static bool
 139nfs4_xattr_entry_lru_del(struct nfs4_xattr_entry *entry)
 140{
 141	struct list_lru *lru;
 142
 143	lru = (entry->flags & NFS4_XATTR_ENTRY_EXTVAL) ?
 144	    &nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru;
 145
 146	return list_lru_del(lru, &entry->lru);
 147}
 148
 149/*
 150 * This function allocates cache entries. They are the normal
 151 * extended attribute name/value pairs, but may also be a listxattr
 152 * cache. Those allocations use the same entry so that they can be
 153 * treated as one by the memory shrinker.
 154 *
 155 * xattr cache entries are allocated together with names. If the
 156 * value fits in to one page with the entry structure and the name,
 157 * it will also be part of the same allocation (kmalloc). This is
 158 * expected to be the vast majority of cases. Larger allocations
 159 * have a value pointer that is allocated separately by kvmalloc.
 160 *
 161 * Parameters:
 162 *
 163 * @name:  Name of the extended attribute. NULL for listxattr cache
 164 *         entry.
 165 * @value: Value of attribute, or listxattr cache. NULL if the
 166 *         value is to be copied from pages instead.
 167 * @pages: Pages to copy the value from, if not NULL. Passed in to
 168 *	   make it easier to copy the value after an RPC, even if
 169 *	   the value will not be passed up to application (e.g.
 170 *	   for a 'query' getxattr with NULL buffer).
 171 * @len:   Length of the value. Can be 0 for zero-length attributes.
 172 *         @value and @pages will be NULL if @len is 0.
 173 */
 174static struct nfs4_xattr_entry *
 175nfs4_xattr_alloc_entry(const char *name, const void *value,
 176		       struct page **pages, size_t len)
 177{
 178	struct nfs4_xattr_entry *entry;
 179	void *valp;
 180	char *namep;
 181	size_t alloclen, slen;
 182	char *buf;
 183	uint32_t flags;
 184
 185	BUILD_BUG_ON(sizeof(struct nfs4_xattr_entry) +
 186	    XATTR_NAME_MAX + 1 > PAGE_SIZE);
 187
 188	alloclen = sizeof(struct nfs4_xattr_entry);
 189	if (name != NULL) {
 190		slen = strlen(name) + 1;
 191		alloclen += slen;
 192	} else
 193		slen = 0;
 194
 195	if (alloclen + len <= PAGE_SIZE) {
 196		alloclen += len;
 197		flags = 0;
 198	} else {
 199		flags = NFS4_XATTR_ENTRY_EXTVAL;
 200	}
 201
 202	buf = kmalloc(alloclen, GFP_KERNEL);
 203	if (buf == NULL)
 204		return NULL;
 205	entry = (struct nfs4_xattr_entry *)buf;
 206
 207	if (name != NULL) {
 208		namep = buf + sizeof(struct nfs4_xattr_entry);
 209		memcpy(namep, name, slen);
 210	} else {
 211		namep = NULL;
 212	}
 213
 214
 215	if (flags & NFS4_XATTR_ENTRY_EXTVAL) {
 216		valp = kvmalloc(len, GFP_KERNEL);
 217		if (valp == NULL) {
 218			kfree(buf);
 219			return NULL;
 220		}
 221	} else if (len != 0) {
 222		valp = buf + sizeof(struct nfs4_xattr_entry) + slen;
 223	} else
 224		valp = NULL;
 225
 226	if (valp != NULL) {
 227		if (value != NULL)
 228			memcpy(valp, value, len);
 229		else
 230			_copy_from_pages(valp, pages, 0, len);
 231	}
 232
 233	entry->flags = flags;
 234	entry->xattr_value = valp;
 235	kref_init(&entry->ref);
 236	entry->xattr_name = namep;
 237	entry->xattr_size = len;
 238	entry->bucket = NULL;
 239	INIT_LIST_HEAD(&entry->lru);
 240	INIT_LIST_HEAD(&entry->dispose);
 241	INIT_HLIST_NODE(&entry->hnode);
 242
 243	return entry;
 244}
 245
 246static void
 247nfs4_xattr_free_entry(struct nfs4_xattr_entry *entry)
 248{
 249	if (entry->flags & NFS4_XATTR_ENTRY_EXTVAL)
 250		kvfree(entry->xattr_value);
 251	kfree(entry);
 252}
 253
 254static void
 255nfs4_xattr_free_entry_cb(struct kref *kref)
 256{
 257	struct nfs4_xattr_entry *entry;
 258
 259	entry = container_of(kref, struct nfs4_xattr_entry, ref);
 260
 261	if (WARN_ON(!list_empty(&entry->lru)))
 262		return;
 263
 264	nfs4_xattr_free_entry(entry);
 265}
 266
 267static void
 268nfs4_xattr_free_cache_cb(struct kref *kref)
 269{
 270	struct nfs4_xattr_cache *cache;
 271	int i;
 272
 273	cache = container_of(kref, struct nfs4_xattr_cache, ref);
 274
 275	for (i = 0; i < NFS4_XATTR_HASH_SIZE; i++) {
 276		if (WARN_ON(!hlist_empty(&cache->buckets[i].hlist)))
 277			return;
 278		cache->buckets[i].draining = false;
 279	}
 280
 281	cache->listxattr = NULL;
 282
 283	kmem_cache_free(nfs4_xattr_cache_cachep, cache);
 284
 285}
 286
 287static struct nfs4_xattr_cache *
 288nfs4_xattr_alloc_cache(void)
 289{
 290	struct nfs4_xattr_cache *cache;
 291
 292	cache = kmem_cache_alloc(nfs4_xattr_cache_cachep, GFP_KERNEL);
 293	if (cache == NULL)
 294		return NULL;
 295
 296	kref_init(&cache->ref);
 297	atomic_long_set(&cache->nent, 0);
 298
 299	return cache;
 300}
 301
 302/*
 303 * Set the listxattr cache, which is a special-cased cache entry.
 304 * The special value ERR_PTR(-ESTALE) is used to indicate that
 305 * the cache is being drained - this prevents a new listxattr
 306 * cache from being added to what is now a stale cache.
 307 */
 308static int
 309nfs4_xattr_set_listcache(struct nfs4_xattr_cache *cache,
 310			 struct nfs4_xattr_entry *new)
 311{
 312	struct nfs4_xattr_entry *old;
 313	int ret = 1;
 314
 315	spin_lock(&cache->listxattr_lock);
 316
 317	old = cache->listxattr;
 318
 319	if (old == ERR_PTR(-ESTALE)) {
 320		ret = 0;
 321		goto out;
 322	}
 323
 324	cache->listxattr = new;
 325	if (new != NULL && new != ERR_PTR(-ESTALE))
 326		nfs4_xattr_entry_lru_add(new);
 327
 328	if (old != NULL) {
 329		nfs4_xattr_entry_lru_del(old);
 330		kref_put(&old->ref, nfs4_xattr_free_entry_cb);
 331	}
 332out:
 333	spin_unlock(&cache->listxattr_lock);
 334
 335	return ret;
 336}
 337
 338/*
 339 * Unlink a cache from its parent inode, clearing out an invalid
 340 * cache. Must be called with i_lock held.
 341 */
 342static struct nfs4_xattr_cache *
 343nfs4_xattr_cache_unlink(struct inode *inode)
 344{
 345	struct nfs_inode *nfsi;
 346	struct nfs4_xattr_cache *oldcache;
 347
 348	nfsi = NFS_I(inode);
 349
 350	oldcache = nfsi->xattr_cache;
 351	if (oldcache != NULL) {
 352		list_lru_del(&nfs4_xattr_cache_lru, &oldcache->lru);
 353		oldcache->inode = NULL;
 354	}
 355	nfsi->xattr_cache = NULL;
 356	nfsi->cache_validity &= ~NFS_INO_INVALID_XATTR;
 357
 358	return oldcache;
 359
 360}
 361
 362/*
 363 * Discard a cache. Called by get_cache() if there was an old,
 364 * invalid cache. Can also be called from a shrinker callback.
 365 *
 366 * The cache is dead, it has already been unlinked from its inode,
 367 * and no longer appears on the cache LRU list.
 368 *
 369 * Mark all buckets as draining, so that no new entries are added. This
 370 * could still happen in the unlikely, but possible case that another
 371 * thread had grabbed a reference before it was unlinked from the inode,
 372 * and is still holding it for an add operation.
 373 *
 374 * Remove all entries from the LRU lists, so that there is no longer
 375 * any way to 'find' this cache. Then, remove the entries from the hash
 376 * table.
 377 *
 378 * At that point, the cache will remain empty and can be freed when the final
 379 * reference drops, which is very likely the kref_put at the end of
 380 * this function, or the one called immediately afterwards in the
 381 * shrinker callback.
 382 */
 383static void
 384nfs4_xattr_discard_cache(struct nfs4_xattr_cache *cache)
 385{
 386	unsigned int i;
 387	struct nfs4_xattr_entry *entry;
 388	struct nfs4_xattr_bucket *bucket;
 389	struct hlist_node *n;
 390
 391	nfs4_xattr_set_listcache(cache, ERR_PTR(-ESTALE));
 392
 393	for (i = 0; i < NFS4_XATTR_HASH_SIZE; i++) {
 394		bucket = &cache->buckets[i];
 395
 396		spin_lock(&bucket->lock);
 397		bucket->draining = true;
 398		hlist_for_each_entry_safe(entry, n, &bucket->hlist, hnode) {
 399			nfs4_xattr_entry_lru_del(entry);
 400			hlist_del_init(&entry->hnode);
 401			kref_put(&entry->ref, nfs4_xattr_free_entry_cb);
 402		}
 403		spin_unlock(&bucket->lock);
 404	}
 405
 406	atomic_long_set(&cache->nent, 0);
 407
 408	kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
 409}
 410
 411/*
 412 * Get a referenced copy of the cache structure. Avoid doing allocs
 413 * while holding i_lock. Which means that we do some optimistic allocation,
 414 * and might have to free the result in rare cases.
 415 *
 416 * This function only checks the NFS_INO_INVALID_XATTR cache validity bit
 417 * and acts accordingly, replacing the cache when needed. For the read case
 418 * (!add), this means that the caller must make sure that the cache
 419 * is valid before caling this function. getxattr and listxattr call
 420 * revalidate_inode to do this. The attribute cache timeout (for the
 421 * non-delegated case) is expected to be dealt with in the revalidate
 422 * call.
 423 */
 424
 425static struct nfs4_xattr_cache *
 426nfs4_xattr_get_cache(struct inode *inode, int add)
 427{
 428	struct nfs_inode *nfsi;
 429	struct nfs4_xattr_cache *cache, *oldcache, *newcache;
 430
 431	nfsi = NFS_I(inode);
 432
 433	cache = oldcache = NULL;
 434
 435	spin_lock(&inode->i_lock);
 436
 437	if (nfsi->cache_validity & NFS_INO_INVALID_XATTR)
 438		oldcache = nfs4_xattr_cache_unlink(inode);
 439	else
 440		cache = nfsi->xattr_cache;
 441
 442	if (cache != NULL)
 443		kref_get(&cache->ref);
 444
 445	spin_unlock(&inode->i_lock);
 446
 447	if (add && cache == NULL) {
 448		newcache = NULL;
 449
 450		cache = nfs4_xattr_alloc_cache();
 451		if (cache == NULL)
 452			goto out;
 453
 454		spin_lock(&inode->i_lock);
 455		if (nfsi->cache_validity & NFS_INO_INVALID_XATTR) {
 456			/*
 457			 * The cache was invalidated again. Give up,
 458			 * since what we want to enter is now likely
 459			 * outdated anyway.
 460			 */
 461			spin_unlock(&inode->i_lock);
 462			kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
 463			cache = NULL;
 464			goto out;
 465		}
 466
 467		/*
 468		 * Check if someone beat us to it.
 469		 */
 470		if (nfsi->xattr_cache != NULL) {
 471			newcache = nfsi->xattr_cache;
 472			kref_get(&newcache->ref);
 473		} else {
 474			kref_get(&cache->ref);
 475			nfsi->xattr_cache = cache;
 476			cache->inode = inode;
 477			list_lru_add(&nfs4_xattr_cache_lru, &cache->lru);
 478		}
 479
 480		spin_unlock(&inode->i_lock);
 481
 482		/*
 483		 * If there was a race, throw away the cache we just
 484		 * allocated, and use the new one allocated by someone
 485		 * else.
 486		 */
 487		if (newcache != NULL) {
 488			kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
 489			cache = newcache;
 490		}
 491	}
 492
 493out:
 494	/*
 495	 * Discard the now orphaned old cache.
 496	 */
 497	if (oldcache != NULL)
 498		nfs4_xattr_discard_cache(oldcache);
 499
 500	return cache;
 501}
 502
 503static inline struct nfs4_xattr_bucket *
 504nfs4_xattr_hash_bucket(struct nfs4_xattr_cache *cache, const char *name)
 505{
 506	return &cache->buckets[jhash(name, strlen(name), 0) &
 507	    (ARRAY_SIZE(cache->buckets) - 1)];
 508}
 509
 510static struct nfs4_xattr_entry *
 511nfs4_xattr_get_entry(struct nfs4_xattr_bucket *bucket, const char *name)
 512{
 513	struct nfs4_xattr_entry *entry;
 514
 515	entry = NULL;
 516
 517	hlist_for_each_entry(entry, &bucket->hlist, hnode) {
 518		if (!strcmp(entry->xattr_name, name))
 519			break;
 520	}
 521
 522	return entry;
 523}
 524
 525static int
 526nfs4_xattr_hash_add(struct nfs4_xattr_cache *cache,
 527		    struct nfs4_xattr_entry *entry)
 528{
 529	struct nfs4_xattr_bucket *bucket;
 530	struct nfs4_xattr_entry *oldentry = NULL;
 531	int ret = 1;
 532
 533	bucket = nfs4_xattr_hash_bucket(cache, entry->xattr_name);
 534	entry->bucket = bucket;
 535
 536	spin_lock(&bucket->lock);
 537
 538	if (bucket->draining) {
 539		ret = 0;
 540		goto out;
 541	}
 542
 543	oldentry = nfs4_xattr_get_entry(bucket, entry->xattr_name);
 544	if (oldentry != NULL) {
 545		hlist_del_init(&oldentry->hnode);
 546		nfs4_xattr_entry_lru_del(oldentry);
 547	} else {
 548		atomic_long_inc(&cache->nent);
 549	}
 550
 551	hlist_add_head(&entry->hnode, &bucket->hlist);
 552	nfs4_xattr_entry_lru_add(entry);
 553
 554out:
 555	spin_unlock(&bucket->lock);
 556
 557	if (oldentry != NULL)
 558		kref_put(&oldentry->ref, nfs4_xattr_free_entry_cb);
 559
 560	return ret;
 561}
 562
 563static void
 564nfs4_xattr_hash_remove(struct nfs4_xattr_cache *cache, const char *name)
 565{
 566	struct nfs4_xattr_bucket *bucket;
 567	struct nfs4_xattr_entry *entry;
 568
 569	bucket = nfs4_xattr_hash_bucket(cache, name);
 570
 571	spin_lock(&bucket->lock);
 572
 573	entry = nfs4_xattr_get_entry(bucket, name);
 574	if (entry != NULL) {
 575		hlist_del_init(&entry->hnode);
 576		nfs4_xattr_entry_lru_del(entry);
 577		atomic_long_dec(&cache->nent);
 578	}
 579
 580	spin_unlock(&bucket->lock);
 581
 582	if (entry != NULL)
 583		kref_put(&entry->ref, nfs4_xattr_free_entry_cb);
 584}
 585
 586static struct nfs4_xattr_entry *
 587nfs4_xattr_hash_find(struct nfs4_xattr_cache *cache, const char *name)
 588{
 589	struct nfs4_xattr_bucket *bucket;
 590	struct nfs4_xattr_entry *entry;
 591
 592	bucket = nfs4_xattr_hash_bucket(cache, name);
 593
 594	spin_lock(&bucket->lock);
 595
 596	entry = nfs4_xattr_get_entry(bucket, name);
 597	if (entry != NULL)
 598		kref_get(&entry->ref);
 599
 600	spin_unlock(&bucket->lock);
 601
 602	return entry;
 603}
 604
 605/*
 606 * Entry point to retrieve an entry from the cache.
 607 */
 608ssize_t nfs4_xattr_cache_get(struct inode *inode, const char *name, char *buf,
 609			 ssize_t buflen)
 610{
 611	struct nfs4_xattr_cache *cache;
 612	struct nfs4_xattr_entry *entry;
 613	ssize_t ret;
 614
 615	cache = nfs4_xattr_get_cache(inode, 0);
 616	if (cache == NULL)
 617		return -ENOENT;
 618
 619	ret = 0;
 620	entry = nfs4_xattr_hash_find(cache, name);
 621
 622	if (entry != NULL) {
 623		dprintk("%s: cache hit '%s', len %lu\n", __func__,
 624		    entry->xattr_name, (unsigned long)entry->xattr_size);
 625		if (buflen == 0) {
 626			/* Length probe only */
 627			ret = entry->xattr_size;
 628		} else if (buflen < entry->xattr_size)
 629			ret = -ERANGE;
 630		else {
 631			memcpy(buf, entry->xattr_value, entry->xattr_size);
 632			ret = entry->xattr_size;
 633		}
 634		kref_put(&entry->ref, nfs4_xattr_free_entry_cb);
 635	} else {
 636		dprintk("%s: cache miss '%s'\n", __func__, name);
 637		ret = -ENOENT;
 638	}
 639
 640	kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
 641
 642	return ret;
 643}
 644
 645/*
 646 * Retrieve a cached list of xattrs from the cache.
 647 */
 648ssize_t nfs4_xattr_cache_list(struct inode *inode, char *buf, ssize_t buflen)
 649{
 650	struct nfs4_xattr_cache *cache;
 651	struct nfs4_xattr_entry *entry;
 652	ssize_t ret;
 653
 654	cache = nfs4_xattr_get_cache(inode, 0);
 655	if (cache == NULL)
 656		return -ENOENT;
 657
 658	spin_lock(&cache->listxattr_lock);
 659
 660	entry = cache->listxattr;
 661
 662	if (entry != NULL && entry != ERR_PTR(-ESTALE)) {
 663		if (buflen == 0) {
 664			/* Length probe only */
 665			ret = entry->xattr_size;
 666		} else if (entry->xattr_size > buflen)
 667			ret = -ERANGE;
 668		else {
 669			memcpy(buf, entry->xattr_value, entry->xattr_size);
 670			ret = entry->xattr_size;
 671		}
 672	} else {
 673		ret = -ENOENT;
 674	}
 675
 676	spin_unlock(&cache->listxattr_lock);
 677
 678	kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
 679
 680	return ret;
 681}
 682
 683/*
 684 * Add an xattr to the cache.
 685 *
 686 * This also invalidates the xattr list cache.
 687 */
 688void nfs4_xattr_cache_add(struct inode *inode, const char *name,
 689			  const char *buf, struct page **pages, ssize_t buflen)
 690{
 691	struct nfs4_xattr_cache *cache;
 692	struct nfs4_xattr_entry *entry;
 693
 694	dprintk("%s: add '%s' len %lu\n", __func__,
 695	    name, (unsigned long)buflen);
 696
 697	cache = nfs4_xattr_get_cache(inode, 1);
 698	if (cache == NULL)
 699		return;
 700
 701	entry = nfs4_xattr_alloc_entry(name, buf, pages, buflen);
 702	if (entry == NULL)
 703		goto out;
 704
 705	(void)nfs4_xattr_set_listcache(cache, NULL);
 706
 707	if (!nfs4_xattr_hash_add(cache, entry))
 708		kref_put(&entry->ref, nfs4_xattr_free_entry_cb);
 709
 710out:
 711	kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
 712}
 713
 714
 715/*
 716 * Remove an xattr from the cache.
 717 *
 718 * This also invalidates the xattr list cache.
 719 */
 720void nfs4_xattr_cache_remove(struct inode *inode, const char *name)
 721{
 722	struct nfs4_xattr_cache *cache;
 723
 724	dprintk("%s: remove '%s'\n", __func__, name);
 725
 726	cache = nfs4_xattr_get_cache(inode, 0);
 727	if (cache == NULL)
 728		return;
 729
 730	(void)nfs4_xattr_set_listcache(cache, NULL);
 731	nfs4_xattr_hash_remove(cache, name);
 732
 733	kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
 734}
 735
 736/*
 737 * Cache listxattr output, replacing any possible old one.
 738 */
 739void nfs4_xattr_cache_set_list(struct inode *inode, const char *buf,
 740			       ssize_t buflen)
 741{
 742	struct nfs4_xattr_cache *cache;
 743	struct nfs4_xattr_entry *entry;
 744
 745	cache = nfs4_xattr_get_cache(inode, 1);
 746	if (cache == NULL)
 747		return;
 748
 749	entry = nfs4_xattr_alloc_entry(NULL, buf, NULL, buflen);
 750	if (entry == NULL)
 751		goto out;
 752
 753	/*
 754	 * This is just there to be able to get to bucket->cache,
 755	 * which is obviously the same for all buckets, so just
 756	 * use bucket 0.
 757	 */
 758	entry->bucket = &cache->buckets[0];
 759
 760	if (!nfs4_xattr_set_listcache(cache, entry))
 761		kref_put(&entry->ref, nfs4_xattr_free_entry_cb);
 762
 763out:
 764	kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
 765}
 766
 767/*
 768 * Zap the entire cache. Called when an inode is evicted.
 769 */
 770void nfs4_xattr_cache_zap(struct inode *inode)
 771{
 772	struct nfs4_xattr_cache *oldcache;
 773
 774	spin_lock(&inode->i_lock);
 775	oldcache = nfs4_xattr_cache_unlink(inode);
 776	spin_unlock(&inode->i_lock);
 777
 778	if (oldcache)
 779		nfs4_xattr_discard_cache(oldcache);
 780}
 781
 782/*
 783 * The entry LRU is shrunk more aggressively than the cache LRU,
 784 * by settings @seeks to 1.
 785 *
 786 * Cache structures are freed only when they've become empty, after
 787 * pruning all but one entry.
 788 */
 789
 790static unsigned long nfs4_xattr_cache_count(struct shrinker *shrink,
 791					    struct shrink_control *sc);
 792static unsigned long nfs4_xattr_entry_count(struct shrinker *shrink,
 793					    struct shrink_control *sc);
 794static unsigned long nfs4_xattr_cache_scan(struct shrinker *shrink,
 795					   struct shrink_control *sc);
 796static unsigned long nfs4_xattr_entry_scan(struct shrinker *shrink,
 797					   struct shrink_control *sc);
 798
 799static struct shrinker nfs4_xattr_cache_shrinker = {
 800	.count_objects	= nfs4_xattr_cache_count,
 801	.scan_objects	= nfs4_xattr_cache_scan,
 802	.seeks		= DEFAULT_SEEKS,
 803	.flags		= SHRINKER_MEMCG_AWARE,
 804};
 805
 806static struct shrinker nfs4_xattr_entry_shrinker = {
 807	.count_objects	= nfs4_xattr_entry_count,
 808	.scan_objects	= nfs4_xattr_entry_scan,
 809	.seeks		= DEFAULT_SEEKS,
 810	.batch		= 512,
 811	.flags		= SHRINKER_MEMCG_AWARE,
 812};
 813
 814static struct shrinker nfs4_xattr_large_entry_shrinker = {
 815	.count_objects	= nfs4_xattr_entry_count,
 816	.scan_objects	= nfs4_xattr_entry_scan,
 817	.seeks		= 1,
 818	.batch		= 512,
 819	.flags		= SHRINKER_MEMCG_AWARE,
 820};
 821
 822static enum lru_status
 823cache_lru_isolate(struct list_head *item,
 824	struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
 825{
 826	struct list_head *dispose = arg;
 827	struct inode *inode;
 828	struct nfs4_xattr_cache *cache = container_of(item,
 829	    struct nfs4_xattr_cache, lru);
 830
 831	if (atomic_long_read(&cache->nent) > 1)
 832		return LRU_SKIP;
 833
 834	/*
 835	 * If a cache structure is on the LRU list, we know that
 836	 * its inode is valid. Try to lock it to break the link.
 837	 * Since we're inverting the lock order here, only try.
 838	 */
 839	inode = cache->inode;
 840
 841	if (!spin_trylock(&inode->i_lock))
 842		return LRU_SKIP;
 843
 844	kref_get(&cache->ref);
 845
 846	cache->inode = NULL;
 847	NFS_I(inode)->xattr_cache = NULL;
 848	NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_XATTR;
 849	list_lru_isolate(lru, &cache->lru);
 850
 851	spin_unlock(&inode->i_lock);
 852
 853	list_add_tail(&cache->dispose, dispose);
 854	return LRU_REMOVED;
 855}
 856
 857static unsigned long
 858nfs4_xattr_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
 859{
 860	LIST_HEAD(dispose);
 861	unsigned long freed;
 862	struct nfs4_xattr_cache *cache;
 863
 864	freed = list_lru_shrink_walk(&nfs4_xattr_cache_lru, sc,
 865	    cache_lru_isolate, &dispose);
 866	while (!list_empty(&dispose)) {
 867		cache = list_first_entry(&dispose, struct nfs4_xattr_cache,
 868		    dispose);
 869		list_del_init(&cache->dispose);
 870		nfs4_xattr_discard_cache(cache);
 871		kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
 872	}
 873
 874	return freed;
 875}
 876
 877
 878static unsigned long
 879nfs4_xattr_cache_count(struct shrinker *shrink, struct shrink_control *sc)
 880{
 881	unsigned long count;
 882
 883	count = list_lru_shrink_count(&nfs4_xattr_cache_lru, sc);
 884	return vfs_pressure_ratio(count);
 885}
 886
 887static enum lru_status
 888entry_lru_isolate(struct list_head *item,
 889	struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
 890{
 891	struct list_head *dispose = arg;
 892	struct nfs4_xattr_bucket *bucket;
 893	struct nfs4_xattr_cache *cache;
 894	struct nfs4_xattr_entry *entry = container_of(item,
 895	    struct nfs4_xattr_entry, lru);
 896
 897	bucket = entry->bucket;
 898	cache = bucket->cache;
 899
 900	/*
 901	 * Unhook the entry from its parent (either a cache bucket
 902	 * or a cache structure if it's a listxattr buf), so that
 903	 * it's no longer found. Then add it to the isolate list,
 904	 * to be freed later.
 905	 *
 906	 * In both cases, we're reverting lock order, so use
 907	 * trylock and skip the entry if we can't get the lock.
 908	 */
 909	if (entry->xattr_name != NULL) {
 910		/* Regular cache entry */
 911		if (!spin_trylock(&bucket->lock))
 912			return LRU_SKIP;
 913
 914		kref_get(&entry->ref);
 915
 916		hlist_del_init(&entry->hnode);
 917		atomic_long_dec(&cache->nent);
 918		list_lru_isolate(lru, &entry->lru);
 919
 920		spin_unlock(&bucket->lock);
 921	} else {
 922		/* Listxattr cache entry */
 923		if (!spin_trylock(&cache->listxattr_lock))
 924			return LRU_SKIP;
 925
 926		kref_get(&entry->ref);
 927
 928		cache->listxattr = NULL;
 929		list_lru_isolate(lru, &entry->lru);
 930
 931		spin_unlock(&cache->listxattr_lock);
 932	}
 933
 934	list_add_tail(&entry->dispose, dispose);
 935	return LRU_REMOVED;
 936}
 937
 938static unsigned long
 939nfs4_xattr_entry_scan(struct shrinker *shrink, struct shrink_control *sc)
 940{
 941	LIST_HEAD(dispose);
 942	unsigned long freed;
 943	struct nfs4_xattr_entry *entry;
 944	struct list_lru *lru;
 945
 946	lru = (shrink == &nfs4_xattr_large_entry_shrinker) ?
 947	    &nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru;
 948
 949	freed = list_lru_shrink_walk(lru, sc, entry_lru_isolate, &dispose);
 950
 951	while (!list_empty(&dispose)) {
 952		entry = list_first_entry(&dispose, struct nfs4_xattr_entry,
 953		    dispose);
 954		list_del_init(&entry->dispose);
 955
 956		/*
 957		 * Drop two references: the one that we just grabbed
 958		 * in entry_lru_isolate, and the one that was set
 959		 * when the entry was first allocated.
 960		 */
 961		kref_put(&entry->ref, nfs4_xattr_free_entry_cb);
 962		kref_put(&entry->ref, nfs4_xattr_free_entry_cb);
 963	}
 964
 965	return freed;
 966}
 967
 968static unsigned long
 969nfs4_xattr_entry_count(struct shrinker *shrink, struct shrink_control *sc)
 970{
 971	unsigned long count;
 972	struct list_lru *lru;
 973
 974	lru = (shrink == &nfs4_xattr_large_entry_shrinker) ?
 975	    &nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru;
 976
 977	count = list_lru_shrink_count(lru, sc);
 978	return vfs_pressure_ratio(count);
 979}
 980
 981
 982static void nfs4_xattr_cache_init_once(void *p)
 983{
 984	struct nfs4_xattr_cache *cache = p;
 985
 986	spin_lock_init(&cache->listxattr_lock);
 987	atomic_long_set(&cache->nent, 0);
 988	nfs4_xattr_hash_init(cache);
 989	cache->listxattr = NULL;
 990	INIT_LIST_HEAD(&cache->lru);
 991	INIT_LIST_HEAD(&cache->dispose);
 992}
 993
 994int __init nfs4_xattr_cache_init(void)
 995{
 996	int ret = 0;
 997
 998	nfs4_xattr_cache_cachep = kmem_cache_create("nfs4_xattr_cache_cache",
 999	    sizeof(struct nfs4_xattr_cache), 0,
1000	    (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD),
1001	    nfs4_xattr_cache_init_once);
1002	if (nfs4_xattr_cache_cachep == NULL)
1003		return -ENOMEM;
1004
1005	ret = list_lru_init_memcg(&nfs4_xattr_large_entry_lru,
1006	    &nfs4_xattr_large_entry_shrinker);
1007	if (ret)
1008		goto out4;
1009
1010	ret = list_lru_init_memcg(&nfs4_xattr_entry_lru,
1011	    &nfs4_xattr_entry_shrinker);
1012	if (ret)
1013		goto out3;
1014
1015	ret = list_lru_init_memcg(&nfs4_xattr_cache_lru,
1016	    &nfs4_xattr_cache_shrinker);
1017	if (ret)
1018		goto out2;
1019
1020	ret = register_shrinker(&nfs4_xattr_cache_shrinker, "nfs-xattr_cache");
1021	if (ret)
1022		goto out1;
1023
1024	ret = register_shrinker(&nfs4_xattr_entry_shrinker, "nfs-xattr_entry");
1025	if (ret)
1026		goto out;
1027
1028	ret = register_shrinker(&nfs4_xattr_large_entry_shrinker,
1029				"nfs-xattr_large_entry");
1030	if (!ret)
1031		return 0;
1032
1033	unregister_shrinker(&nfs4_xattr_entry_shrinker);
1034out:
1035	unregister_shrinker(&nfs4_xattr_cache_shrinker);
1036out1:
1037	list_lru_destroy(&nfs4_xattr_cache_lru);
1038out2:
1039	list_lru_destroy(&nfs4_xattr_entry_lru);
1040out3:
1041	list_lru_destroy(&nfs4_xattr_large_entry_lru);
1042out4:
1043	kmem_cache_destroy(nfs4_xattr_cache_cachep);
1044
1045	return ret;
1046}
1047
1048void nfs4_xattr_cache_exit(void)
1049{
1050	unregister_shrinker(&nfs4_xattr_large_entry_shrinker);
1051	unregister_shrinker(&nfs4_xattr_entry_shrinker);
1052	unregister_shrinker(&nfs4_xattr_cache_shrinker);
1053	list_lru_destroy(&nfs4_xattr_large_entry_lru);
1054	list_lru_destroy(&nfs4_xattr_entry_lru);
1055	list_lru_destroy(&nfs4_xattr_cache_lru);
1056	kmem_cache_destroy(nfs4_xattr_cache_cachep);
1057}