Loading...
1/* FS-Cache cache handling
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#define FSCACHE_DEBUG_LEVEL CACHE
13#include <linux/module.h>
14#include <linux/slab.h>
15#include "internal.h"
16
17LIST_HEAD(fscache_cache_list);
18DECLARE_RWSEM(fscache_addremove_sem);
19DECLARE_WAIT_QUEUE_HEAD(fscache_cache_cleared_wq);
20EXPORT_SYMBOL(fscache_cache_cleared_wq);
21
22static LIST_HEAD(fscache_cache_tag_list);
23
24/*
25 * look up a cache tag
26 */
27struct fscache_cache_tag *__fscache_lookup_cache_tag(const char *name)
28{
29 struct fscache_cache_tag *tag, *xtag;
30
31 /* firstly check for the existence of the tag under read lock */
32 down_read(&fscache_addremove_sem);
33
34 list_for_each_entry(tag, &fscache_cache_tag_list, link) {
35 if (strcmp(tag->name, name) == 0) {
36 atomic_inc(&tag->usage);
37 up_read(&fscache_addremove_sem);
38 return tag;
39 }
40 }
41
42 up_read(&fscache_addremove_sem);
43
44 /* the tag does not exist - create a candidate */
45 xtag = kzalloc(sizeof(*xtag) + strlen(name) + 1, GFP_KERNEL);
46 if (!xtag)
47 /* return a dummy tag if out of memory */
48 return ERR_PTR(-ENOMEM);
49
50 atomic_set(&xtag->usage, 1);
51 strcpy(xtag->name, name);
52
53 /* write lock, search again and add if still not present */
54 down_write(&fscache_addremove_sem);
55
56 list_for_each_entry(tag, &fscache_cache_tag_list, link) {
57 if (strcmp(tag->name, name) == 0) {
58 atomic_inc(&tag->usage);
59 up_write(&fscache_addremove_sem);
60 kfree(xtag);
61 return tag;
62 }
63 }
64
65 list_add_tail(&xtag->link, &fscache_cache_tag_list);
66 up_write(&fscache_addremove_sem);
67 return xtag;
68}
69
70/*
71 * release a reference to a cache tag
72 */
73void __fscache_release_cache_tag(struct fscache_cache_tag *tag)
74{
75 if (tag != ERR_PTR(-ENOMEM)) {
76 down_write(&fscache_addremove_sem);
77
78 if (atomic_dec_and_test(&tag->usage))
79 list_del_init(&tag->link);
80 else
81 tag = NULL;
82
83 up_write(&fscache_addremove_sem);
84
85 kfree(tag);
86 }
87}
88
89/*
90 * select a cache in which to store an object
91 * - the cache addremove semaphore must be at least read-locked by the caller
92 * - the object will never be an index
93 */
94struct fscache_cache *fscache_select_cache_for_object(
95 struct fscache_cookie *cookie)
96{
97 struct fscache_cache_tag *tag;
98 struct fscache_object *object;
99 struct fscache_cache *cache;
100
101 _enter("");
102
103 if (list_empty(&fscache_cache_list)) {
104 _leave(" = NULL [no cache]");
105 return NULL;
106 }
107
108 /* we check the parent to determine the cache to use */
109 spin_lock(&cookie->lock);
110
111 /* the first in the parent's backing list should be the preferred
112 * cache */
113 if (!hlist_empty(&cookie->backing_objects)) {
114 object = hlist_entry(cookie->backing_objects.first,
115 struct fscache_object, cookie_link);
116
117 cache = object->cache;
118 if (object->state >= FSCACHE_OBJECT_DYING ||
119 test_bit(FSCACHE_IOERROR, &cache->flags))
120 cache = NULL;
121
122 spin_unlock(&cookie->lock);
123 _leave(" = %p [parent]", cache);
124 return cache;
125 }
126
127 /* the parent is unbacked */
128 if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX) {
129 /* cookie not an index and is unbacked */
130 spin_unlock(&cookie->lock);
131 _leave(" = NULL [cookie ub,ni]");
132 return NULL;
133 }
134
135 spin_unlock(&cookie->lock);
136
137 if (!cookie->def->select_cache)
138 goto no_preference;
139
140 /* ask the netfs for its preference */
141 tag = cookie->def->select_cache(cookie->parent->netfs_data,
142 cookie->netfs_data);
143 if (!tag)
144 goto no_preference;
145
146 if (tag == ERR_PTR(-ENOMEM)) {
147 _leave(" = NULL [nomem tag]");
148 return NULL;
149 }
150
151 if (!tag->cache) {
152 _leave(" = NULL [unbacked tag]");
153 return NULL;
154 }
155
156 if (test_bit(FSCACHE_IOERROR, &tag->cache->flags))
157 return NULL;
158
159 _leave(" = %p [specific]", tag->cache);
160 return tag->cache;
161
162no_preference:
163 /* netfs has no preference - just select first cache */
164 cache = list_entry(fscache_cache_list.next,
165 struct fscache_cache, link);
166 _leave(" = %p [first]", cache);
167 return cache;
168}
169
170/**
171 * fscache_init_cache - Initialise a cache record
172 * @cache: The cache record to be initialised
173 * @ops: The cache operations to be installed in that record
174 * @idfmt: Format string to define identifier
175 * @...: sprintf-style arguments
176 *
177 * Initialise a record of a cache and fill in the name.
178 *
179 * See Documentation/filesystems/caching/backend-api.txt for a complete
180 * description.
181 */
182void fscache_init_cache(struct fscache_cache *cache,
183 const struct fscache_cache_ops *ops,
184 const char *idfmt,
185 ...)
186{
187 va_list va;
188
189 memset(cache, 0, sizeof(*cache));
190
191 cache->ops = ops;
192
193 va_start(va, idfmt);
194 vsnprintf(cache->identifier, sizeof(cache->identifier), idfmt, va);
195 va_end(va);
196
197 INIT_WORK(&cache->op_gc, fscache_operation_gc);
198 INIT_LIST_HEAD(&cache->link);
199 INIT_LIST_HEAD(&cache->object_list);
200 INIT_LIST_HEAD(&cache->op_gc_list);
201 spin_lock_init(&cache->object_list_lock);
202 spin_lock_init(&cache->op_gc_list_lock);
203}
204EXPORT_SYMBOL(fscache_init_cache);
205
206/**
207 * fscache_add_cache - Declare a cache as being open for business
208 * @cache: The record describing the cache
209 * @ifsdef: The record of the cache object describing the top-level index
210 * @tagname: The tag describing this cache
211 *
212 * Add a cache to the system, making it available for netfs's to use.
213 *
214 * See Documentation/filesystems/caching/backend-api.txt for a complete
215 * description.
216 */
217int fscache_add_cache(struct fscache_cache *cache,
218 struct fscache_object *ifsdef,
219 const char *tagname)
220{
221 struct fscache_cache_tag *tag;
222
223 BUG_ON(!cache->ops);
224 BUG_ON(!ifsdef);
225
226 cache->flags = 0;
227 ifsdef->event_mask = ULONG_MAX & ~(1 << FSCACHE_OBJECT_EV_CLEARED);
228 ifsdef->state = FSCACHE_OBJECT_ACTIVE;
229
230 if (!tagname)
231 tagname = cache->identifier;
232
233 BUG_ON(!tagname[0]);
234
235 _enter("{%s.%s},,%s", cache->ops->name, cache->identifier, tagname);
236
237 /* we use the cache tag to uniquely identify caches */
238 tag = __fscache_lookup_cache_tag(tagname);
239 if (IS_ERR(tag))
240 goto nomem;
241
242 if (test_and_set_bit(FSCACHE_TAG_RESERVED, &tag->flags))
243 goto tag_in_use;
244
245 cache->kobj = kobject_create_and_add(tagname, fscache_root);
246 if (!cache->kobj)
247 goto error;
248
249 ifsdef->cookie = &fscache_fsdef_index;
250 ifsdef->cache = cache;
251 cache->fsdef = ifsdef;
252
253 down_write(&fscache_addremove_sem);
254
255 tag->cache = cache;
256 cache->tag = tag;
257
258 /* add the cache to the list */
259 list_add(&cache->link, &fscache_cache_list);
260
261 /* add the cache's netfs definition index object to the cache's
262 * list */
263 spin_lock(&cache->object_list_lock);
264 list_add_tail(&ifsdef->cache_link, &cache->object_list);
265 spin_unlock(&cache->object_list_lock);
266 fscache_objlist_add(ifsdef);
267
268 /* add the cache's netfs definition index object to the top level index
269 * cookie as a known backing object */
270 spin_lock(&fscache_fsdef_index.lock);
271
272 hlist_add_head(&ifsdef->cookie_link,
273 &fscache_fsdef_index.backing_objects);
274
275 atomic_inc(&fscache_fsdef_index.usage);
276
277 /* done */
278 spin_unlock(&fscache_fsdef_index.lock);
279 up_write(&fscache_addremove_sem);
280
281 printk(KERN_NOTICE "FS-Cache: Cache \"%s\" added (type %s)\n",
282 cache->tag->name, cache->ops->name);
283 kobject_uevent(cache->kobj, KOBJ_ADD);
284
285 _leave(" = 0 [%s]", cache->identifier);
286 return 0;
287
288tag_in_use:
289 printk(KERN_ERR "FS-Cache: Cache tag '%s' already in use\n", tagname);
290 __fscache_release_cache_tag(tag);
291 _leave(" = -EXIST");
292 return -EEXIST;
293
294error:
295 __fscache_release_cache_tag(tag);
296 _leave(" = -EINVAL");
297 return -EINVAL;
298
299nomem:
300 _leave(" = -ENOMEM");
301 return -ENOMEM;
302}
303EXPORT_SYMBOL(fscache_add_cache);
304
305/**
306 * fscache_io_error - Note a cache I/O error
307 * @cache: The record describing the cache
308 *
309 * Note that an I/O error occurred in a cache and that it should no longer be
310 * used for anything. This also reports the error into the kernel log.
311 *
312 * See Documentation/filesystems/caching/backend-api.txt for a complete
313 * description.
314 */
315void fscache_io_error(struct fscache_cache *cache)
316{
317 set_bit(FSCACHE_IOERROR, &cache->flags);
318
319 printk(KERN_ERR "FS-Cache: Cache %s stopped due to I/O error\n",
320 cache->ops->name);
321}
322EXPORT_SYMBOL(fscache_io_error);
323
324/*
325 * request withdrawal of all the objects in a cache
326 * - all the objects being withdrawn are moved onto the supplied list
327 */
328static void fscache_withdraw_all_objects(struct fscache_cache *cache,
329 struct list_head *dying_objects)
330{
331 struct fscache_object *object;
332
333 spin_lock(&cache->object_list_lock);
334
335 while (!list_empty(&cache->object_list)) {
336 object = list_entry(cache->object_list.next,
337 struct fscache_object, cache_link);
338 list_move_tail(&object->cache_link, dying_objects);
339
340 _debug("withdraw %p", object->cookie);
341
342 spin_lock(&object->lock);
343 spin_unlock(&cache->object_list_lock);
344 fscache_raise_event(object, FSCACHE_OBJECT_EV_WITHDRAW);
345 spin_unlock(&object->lock);
346
347 cond_resched();
348 spin_lock(&cache->object_list_lock);
349 }
350
351 spin_unlock(&cache->object_list_lock);
352}
353
354/**
355 * fscache_withdraw_cache - Withdraw a cache from the active service
356 * @cache: The record describing the cache
357 *
358 * Withdraw a cache from service, unbinding all its cache objects from the
359 * netfs cookies they're currently representing.
360 *
361 * See Documentation/filesystems/caching/backend-api.txt for a complete
362 * description.
363 */
364void fscache_withdraw_cache(struct fscache_cache *cache)
365{
366 LIST_HEAD(dying_objects);
367
368 _enter("");
369
370 printk(KERN_NOTICE "FS-Cache: Withdrawing cache \"%s\"\n",
371 cache->tag->name);
372
373 /* make the cache unavailable for cookie acquisition */
374 if (test_and_set_bit(FSCACHE_CACHE_WITHDRAWN, &cache->flags))
375 BUG();
376
377 down_write(&fscache_addremove_sem);
378 list_del_init(&cache->link);
379 cache->tag->cache = NULL;
380 up_write(&fscache_addremove_sem);
381
382 /* make sure all pages pinned by operations on behalf of the netfs are
383 * written to disk */
384 fscache_stat(&fscache_n_cop_sync_cache);
385 cache->ops->sync_cache(cache);
386 fscache_stat_d(&fscache_n_cop_sync_cache);
387
388 /* dissociate all the netfs pages backed by this cache from the block
389 * mappings in the cache */
390 fscache_stat(&fscache_n_cop_dissociate_pages);
391 cache->ops->dissociate_pages(cache);
392 fscache_stat_d(&fscache_n_cop_dissociate_pages);
393
394 /* we now have to destroy all the active objects pertaining to this
395 * cache - which we do by passing them off to thread pool to be
396 * disposed of */
397 _debug("destroy");
398
399 fscache_withdraw_all_objects(cache, &dying_objects);
400
401 /* wait for all extant objects to finish their outstanding operations
402 * and go away */
403 _debug("wait for finish");
404 wait_event(fscache_cache_cleared_wq,
405 atomic_read(&cache->object_count) == 0);
406 _debug("wait for clearance");
407 wait_event(fscache_cache_cleared_wq,
408 list_empty(&cache->object_list));
409 _debug("cleared");
410 ASSERT(list_empty(&dying_objects));
411
412 kobject_put(cache->kobj);
413
414 clear_bit(FSCACHE_TAG_RESERVED, &cache->tag->flags);
415 fscache_release_cache_tag(cache->tag);
416 cache->tag = NULL;
417
418 _leave("");
419}
420EXPORT_SYMBOL(fscache_withdraw_cache);
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* FS-Cache cache handling
3 *
4 * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8#define FSCACHE_DEBUG_LEVEL CACHE
9#include <linux/export.h>
10#include <linux/slab.h>
11#include "internal.h"
12
13static LIST_HEAD(fscache_caches);
14DECLARE_RWSEM(fscache_addremove_sem);
15EXPORT_SYMBOL(fscache_addremove_sem);
16DECLARE_WAIT_QUEUE_HEAD(fscache_clearance_waiters);
17EXPORT_SYMBOL(fscache_clearance_waiters);
18
19static atomic_t fscache_cache_debug_id;
20
21/*
22 * Allocate a cache cookie.
23 */
24static struct fscache_cache *fscache_alloc_cache(const char *name)
25{
26 struct fscache_cache *cache;
27
28 cache = kzalloc(sizeof(*cache), GFP_KERNEL);
29 if (cache) {
30 if (name) {
31 cache->name = kstrdup(name, GFP_KERNEL);
32 if (!cache->name) {
33 kfree(cache);
34 return NULL;
35 }
36 }
37 refcount_set(&cache->ref, 1);
38 INIT_LIST_HEAD(&cache->cache_link);
39 cache->debug_id = atomic_inc_return(&fscache_cache_debug_id);
40 }
41 return cache;
42}
43
44static bool fscache_get_cache_maybe(struct fscache_cache *cache,
45 enum fscache_cache_trace where)
46{
47 bool success;
48 int ref;
49
50 success = __refcount_inc_not_zero(&cache->ref, &ref);
51 if (success)
52 trace_fscache_cache(cache->debug_id, ref + 1, where);
53 return success;
54}
55
56/*
57 * Look up a cache cookie.
58 */
59struct fscache_cache *fscache_lookup_cache(const char *name, bool is_cache)
60{
61 struct fscache_cache *candidate, *cache, *unnamed = NULL;
62
63 /* firstly check for the existence of the cache under read lock */
64 down_read(&fscache_addremove_sem);
65
66 list_for_each_entry(cache, &fscache_caches, cache_link) {
67 if (cache->name && name && strcmp(cache->name, name) == 0 &&
68 fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
69 goto got_cache_r;
70 if (!cache->name && !name &&
71 fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
72 goto got_cache_r;
73 }
74
75 if (!name) {
76 list_for_each_entry(cache, &fscache_caches, cache_link) {
77 if (cache->name &&
78 fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
79 goto got_cache_r;
80 }
81 }
82
83 up_read(&fscache_addremove_sem);
84
85 /* the cache does not exist - create a candidate */
86 candidate = fscache_alloc_cache(name);
87 if (!candidate)
88 return ERR_PTR(-ENOMEM);
89
90 /* write lock, search again and add if still not present */
91 down_write(&fscache_addremove_sem);
92
93 list_for_each_entry(cache, &fscache_caches, cache_link) {
94 if (cache->name && name && strcmp(cache->name, name) == 0 &&
95 fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
96 goto got_cache_w;
97 if (!cache->name) {
98 unnamed = cache;
99 if (!name &&
100 fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
101 goto got_cache_w;
102 }
103 }
104
105 if (unnamed && is_cache &&
106 fscache_get_cache_maybe(unnamed, fscache_cache_get_acquire))
107 goto use_unnamed_cache;
108
109 if (!name) {
110 list_for_each_entry(cache, &fscache_caches, cache_link) {
111 if (cache->name &&
112 fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
113 goto got_cache_w;
114 }
115 }
116
117 list_add_tail(&candidate->cache_link, &fscache_caches);
118 trace_fscache_cache(candidate->debug_id,
119 refcount_read(&candidate->ref),
120 fscache_cache_new_acquire);
121 up_write(&fscache_addremove_sem);
122 return candidate;
123
124got_cache_r:
125 up_read(&fscache_addremove_sem);
126 return cache;
127use_unnamed_cache:
128 cache = unnamed;
129 cache->name = candidate->name;
130 candidate->name = NULL;
131got_cache_w:
132 up_write(&fscache_addremove_sem);
133 kfree(candidate->name);
134 kfree(candidate);
135 return cache;
136}
137
138/**
139 * fscache_acquire_cache - Acquire a cache-level cookie.
140 * @name: The name of the cache.
141 *
142 * Get a cookie to represent an actual cache. If a name is given and there is
143 * a nameless cache record available, this will acquire that and set its name,
144 * directing all the volumes using it to this cache.
145 *
146 * The cache will be switched over to the preparing state if not currently in
147 * use, otherwise -EBUSY will be returned.
148 */
149struct fscache_cache *fscache_acquire_cache(const char *name)
150{
151 struct fscache_cache *cache;
152
153 ASSERT(name);
154 cache = fscache_lookup_cache(name, true);
155 if (IS_ERR(cache))
156 return cache;
157
158 if (!fscache_set_cache_state_maybe(cache,
159 FSCACHE_CACHE_IS_NOT_PRESENT,
160 FSCACHE_CACHE_IS_PREPARING)) {
161 pr_warn("Cache tag %s in use\n", name);
162 fscache_put_cache(cache, fscache_cache_put_cache);
163 return ERR_PTR(-EBUSY);
164 }
165
166 return cache;
167}
168EXPORT_SYMBOL(fscache_acquire_cache);
169
170/**
171 * fscache_put_cache - Release a cache-level cookie.
172 * @cache: The cache cookie to be released
173 * @where: An indication of where the release happened
174 *
175 * Release the caller's reference on a cache-level cookie. The @where
176 * indication should give information about the circumstances in which the call
177 * occurs and will be logged through a tracepoint.
178 */
179void fscache_put_cache(struct fscache_cache *cache,
180 enum fscache_cache_trace where)
181{
182 unsigned int debug_id = cache->debug_id;
183 bool zero;
184 int ref;
185
186 if (IS_ERR_OR_NULL(cache))
187 return;
188
189 zero = __refcount_dec_and_test(&cache->ref, &ref);
190 trace_fscache_cache(debug_id, ref - 1, where);
191
192 if (zero) {
193 down_write(&fscache_addremove_sem);
194 list_del_init(&cache->cache_link);
195 up_write(&fscache_addremove_sem);
196 kfree(cache->name);
197 kfree(cache);
198 }
199}
200
201/**
202 * fscache_relinquish_cache - Reset cache state and release cookie
203 * @cache: The cache cookie to be released
204 *
205 * Reset the state of a cache and release the caller's reference on a cache
206 * cookie.
207 */
208void fscache_relinquish_cache(struct fscache_cache *cache)
209{
210 enum fscache_cache_trace where =
211 (cache->state == FSCACHE_CACHE_IS_PREPARING) ?
212 fscache_cache_put_prep_failed :
213 fscache_cache_put_relinquish;
214
215 cache->ops = NULL;
216 cache->cache_priv = NULL;
217 fscache_set_cache_state(cache, FSCACHE_CACHE_IS_NOT_PRESENT);
218 fscache_put_cache(cache, where);
219}
220EXPORT_SYMBOL(fscache_relinquish_cache);
221
222/**
223 * fscache_add_cache - Declare a cache as being open for business
224 * @cache: The cache-level cookie representing the cache
225 * @ops: Table of cache operations to use
226 * @cache_priv: Private data for the cache record
227 *
228 * Add a cache to the system, making it available for netfs's to use.
229 *
230 * See Documentation/filesystems/caching/backend-api.rst for a complete
231 * description.
232 */
233int fscache_add_cache(struct fscache_cache *cache,
234 const struct fscache_cache_ops *ops,
235 void *cache_priv)
236{
237 int n_accesses;
238
239 _enter("{%s,%s}", ops->name, cache->name);
240
241 BUG_ON(fscache_cache_state(cache) != FSCACHE_CACHE_IS_PREPARING);
242
243 /* Get a ref on the cache cookie and keep its n_accesses counter raised
244 * by 1 to prevent wakeups from transitioning it to 0 until we're
245 * withdrawing caching services from it.
246 */
247 n_accesses = atomic_inc_return(&cache->n_accesses);
248 trace_fscache_access_cache(cache->debug_id, refcount_read(&cache->ref),
249 n_accesses, fscache_access_cache_pin);
250
251 down_write(&fscache_addremove_sem);
252
253 cache->ops = ops;
254 cache->cache_priv = cache_priv;
255 fscache_set_cache_state(cache, FSCACHE_CACHE_IS_ACTIVE);
256
257 up_write(&fscache_addremove_sem);
258 pr_notice("Cache \"%s\" added (type %s)\n", cache->name, ops->name);
259 _leave(" = 0 [%s]", cache->name);
260 return 0;
261}
262EXPORT_SYMBOL(fscache_add_cache);
263
264/**
265 * fscache_begin_cache_access - Pin a cache so it can be accessed
266 * @cache: The cache-level cookie
267 * @why: An indication of the circumstances of the access for tracing
268 *
269 * Attempt to pin the cache to prevent it from going away whilst we're
270 * accessing it and returns true if successful. This works as follows:
271 *
272 * (1) If the cache tests as not live (state is not FSCACHE_CACHE_IS_ACTIVE),
273 * then we return false to indicate access was not permitted.
274 *
275 * (2) If the cache tests as live, then we increment the n_accesses count and
276 * then recheck the liveness, ending the access if it ceased to be live.
277 *
278 * (3) When we end the access, we decrement n_accesses and wake up the any
279 * waiters if it reaches 0.
280 *
281 * (4) Whilst the cache is caching, n_accesses is kept artificially
282 * incremented to prevent wakeups from happening.
283 *
284 * (5) When the cache is taken offline, the state is changed to prevent new
285 * accesses, n_accesses is decremented and we wait for n_accesses to
286 * become 0.
287 */
288bool fscache_begin_cache_access(struct fscache_cache *cache, enum fscache_access_trace why)
289{
290 int n_accesses;
291
292 if (!fscache_cache_is_live(cache))
293 return false;
294
295 n_accesses = atomic_inc_return(&cache->n_accesses);
296 smp_mb__after_atomic(); /* Reread live flag after n_accesses */
297 trace_fscache_access_cache(cache->debug_id, refcount_read(&cache->ref),
298 n_accesses, why);
299 if (!fscache_cache_is_live(cache)) {
300 fscache_end_cache_access(cache, fscache_access_unlive);
301 return false;
302 }
303 return true;
304}
305
306/**
307 * fscache_end_cache_access - Unpin a cache at the end of an access.
308 * @cache: The cache-level cookie
309 * @why: An indication of the circumstances of the access for tracing
310 *
311 * Unpin a cache after we've accessed it. The @why indicator is merely
312 * provided for tracing purposes.
313 */
314void fscache_end_cache_access(struct fscache_cache *cache, enum fscache_access_trace why)
315{
316 int n_accesses;
317
318 smp_mb__before_atomic();
319 n_accesses = atomic_dec_return(&cache->n_accesses);
320 trace_fscache_access_cache(cache->debug_id, refcount_read(&cache->ref),
321 n_accesses, why);
322 if (n_accesses == 0)
323 wake_up_var(&cache->n_accesses);
324}
325
326/**
327 * fscache_io_error - Note a cache I/O error
328 * @cache: The record describing the cache
329 *
330 * Note that an I/O error occurred in a cache and that it should no longer be
331 * used for anything. This also reports the error into the kernel log.
332 *
333 * See Documentation/filesystems/caching/backend-api.rst for a complete
334 * description.
335 */
336void fscache_io_error(struct fscache_cache *cache)
337{
338 if (fscache_set_cache_state_maybe(cache,
339 FSCACHE_CACHE_IS_ACTIVE,
340 FSCACHE_CACHE_GOT_IOERROR))
341 pr_err("Cache '%s' stopped due to I/O error\n",
342 cache->name);
343}
344EXPORT_SYMBOL(fscache_io_error);
345
346/**
347 * fscache_withdraw_cache - Withdraw a cache from the active service
348 * @cache: The cache cookie
349 *
350 * Begin the process of withdrawing a cache from service. This stops new
351 * cache-level and volume-level accesses from taking place and waits for
352 * currently ongoing cache-level accesses to end.
353 */
354void fscache_withdraw_cache(struct fscache_cache *cache)
355{
356 int n_accesses;
357
358 pr_notice("Withdrawing cache \"%s\" (%u objs)\n",
359 cache->name, atomic_read(&cache->object_count));
360
361 fscache_set_cache_state(cache, FSCACHE_CACHE_IS_WITHDRAWN);
362
363 /* Allow wakeups on dec-to-0 */
364 n_accesses = atomic_dec_return(&cache->n_accesses);
365 trace_fscache_access_cache(cache->debug_id, refcount_read(&cache->ref),
366 n_accesses, fscache_access_cache_unpin);
367
368 wait_var_event(&cache->n_accesses,
369 atomic_read(&cache->n_accesses) == 0);
370}
371EXPORT_SYMBOL(fscache_withdraw_cache);
372
373#ifdef CONFIG_PROC_FS
374static const char fscache_cache_states[NR__FSCACHE_CACHE_STATE] = "-PAEW";
375
376/*
377 * Generate a list of caches in /proc/fs/fscache/caches
378 */
379static int fscache_caches_seq_show(struct seq_file *m, void *v)
380{
381 struct fscache_cache *cache;
382
383 if (v == &fscache_caches) {
384 seq_puts(m,
385 "CACHE REF VOLS OBJS ACCES S NAME\n"
386 "======== ===== ===== ===== ===== = ===============\n"
387 );
388 return 0;
389 }
390
391 cache = list_entry(v, struct fscache_cache, cache_link);
392 seq_printf(m,
393 "%08x %5d %5d %5d %5d %c %s\n",
394 cache->debug_id,
395 refcount_read(&cache->ref),
396 atomic_read(&cache->n_volumes),
397 atomic_read(&cache->object_count),
398 atomic_read(&cache->n_accesses),
399 fscache_cache_states[cache->state],
400 cache->name ?: "-");
401 return 0;
402}
403
404static void *fscache_caches_seq_start(struct seq_file *m, loff_t *_pos)
405 __acquires(fscache_addremove_sem)
406{
407 down_read(&fscache_addremove_sem);
408 return seq_list_start_head(&fscache_caches, *_pos);
409}
410
411static void *fscache_caches_seq_next(struct seq_file *m, void *v, loff_t *_pos)
412{
413 return seq_list_next(v, &fscache_caches, _pos);
414}
415
416static void fscache_caches_seq_stop(struct seq_file *m, void *v)
417 __releases(fscache_addremove_sem)
418{
419 up_read(&fscache_addremove_sem);
420}
421
422const struct seq_operations fscache_caches_seq_ops = {
423 .start = fscache_caches_seq_start,
424 .next = fscache_caches_seq_next,
425 .stop = fscache_caches_seq_stop,
426 .show = fscache_caches_seq_show,
427};
428#endif /* CONFIG_PROC_FS */