Loading...
1/* FS-Cache object state machine handler
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * See Documentation/filesystems/caching/object.txt for a description of the
12 * object state machine and the in-kernel representations.
13 */
14
15#define FSCACHE_DEBUG_LEVEL COOKIE
16#include <linux/module.h>
17#include <linux/slab.h>
18#include <linux/prefetch.h>
19#include "internal.h"
20
21static const struct fscache_state *fscache_abort_initialisation(struct fscache_object *, int);
22static const struct fscache_state *fscache_kill_dependents(struct fscache_object *, int);
23static const struct fscache_state *fscache_drop_object(struct fscache_object *, int);
24static const struct fscache_state *fscache_initialise_object(struct fscache_object *, int);
25static const struct fscache_state *fscache_invalidate_object(struct fscache_object *, int);
26static const struct fscache_state *fscache_jumpstart_dependents(struct fscache_object *, int);
27static const struct fscache_state *fscache_kill_object(struct fscache_object *, int);
28static const struct fscache_state *fscache_lookup_failure(struct fscache_object *, int);
29static const struct fscache_state *fscache_look_up_object(struct fscache_object *, int);
30static const struct fscache_state *fscache_object_available(struct fscache_object *, int);
31static const struct fscache_state *fscache_parent_ready(struct fscache_object *, int);
32static const struct fscache_state *fscache_update_object(struct fscache_object *, int);
33
34#define __STATE_NAME(n) fscache_osm_##n
35#define STATE(n) (&__STATE_NAME(n))
36
37/*
38 * Define a work state. Work states are execution states. No event processing
39 * is performed by them. The function attached to a work state returns a
40 * pointer indicating the next state to which the state machine should
41 * transition. Returning NO_TRANSIT repeats the current state, but goes back
42 * to the scheduler first.
43 */
44#define WORK_STATE(n, sn, f) \
45 const struct fscache_state __STATE_NAME(n) = { \
46 .name = #n, \
47 .short_name = sn, \
48 .work = f \
49 }
50
51/*
52 * Returns from work states.
53 */
54#define transit_to(state) ({ prefetch(&STATE(state)->work); STATE(state); })
55
56#define NO_TRANSIT ((struct fscache_state *)NULL)
57
58/*
59 * Define a wait state. Wait states are event processing states. No execution
60 * is performed by them. Wait states are just tables of "if event X occurs,
61 * clear it and transition to state Y". The dispatcher returns to the
62 * scheduler if none of the events in which the wait state has an interest are
63 * currently pending.
64 */
65#define WAIT_STATE(n, sn, ...) \
66 const struct fscache_state __STATE_NAME(n) = { \
67 .name = #n, \
68 .short_name = sn, \
69 .work = NULL, \
70 .transitions = { __VA_ARGS__, { 0, NULL } } \
71 }
72
73#define TRANSIT_TO(state, emask) \
74 { .events = (emask), .transit_to = STATE(state) }
75
76/*
77 * The object state machine.
78 */
79static WORK_STATE(INIT_OBJECT, "INIT", fscache_initialise_object);
80static WORK_STATE(PARENT_READY, "PRDY", fscache_parent_ready);
81static WORK_STATE(ABORT_INIT, "ABRT", fscache_abort_initialisation);
82static WORK_STATE(LOOK_UP_OBJECT, "LOOK", fscache_look_up_object);
83static WORK_STATE(CREATE_OBJECT, "CRTO", fscache_look_up_object);
84static WORK_STATE(OBJECT_AVAILABLE, "AVBL", fscache_object_available);
85static WORK_STATE(JUMPSTART_DEPS, "JUMP", fscache_jumpstart_dependents);
86
87static WORK_STATE(INVALIDATE_OBJECT, "INVL", fscache_invalidate_object);
88static WORK_STATE(UPDATE_OBJECT, "UPDT", fscache_update_object);
89
90static WORK_STATE(LOOKUP_FAILURE, "LCFL", fscache_lookup_failure);
91static WORK_STATE(KILL_OBJECT, "KILL", fscache_kill_object);
92static WORK_STATE(KILL_DEPENDENTS, "KDEP", fscache_kill_dependents);
93static WORK_STATE(DROP_OBJECT, "DROP", fscache_drop_object);
94static WORK_STATE(OBJECT_DEAD, "DEAD", (void*)2UL);
95
96static WAIT_STATE(WAIT_FOR_INIT, "?INI",
97 TRANSIT_TO(INIT_OBJECT, 1 << FSCACHE_OBJECT_EV_NEW_CHILD));
98
99static WAIT_STATE(WAIT_FOR_PARENT, "?PRN",
100 TRANSIT_TO(PARENT_READY, 1 << FSCACHE_OBJECT_EV_PARENT_READY));
101
102static WAIT_STATE(WAIT_FOR_CMD, "?CMD",
103 TRANSIT_TO(INVALIDATE_OBJECT, 1 << FSCACHE_OBJECT_EV_INVALIDATE),
104 TRANSIT_TO(UPDATE_OBJECT, 1 << FSCACHE_OBJECT_EV_UPDATE),
105 TRANSIT_TO(JUMPSTART_DEPS, 1 << FSCACHE_OBJECT_EV_NEW_CHILD));
106
107static WAIT_STATE(WAIT_FOR_CLEARANCE, "?CLR",
108 TRANSIT_TO(KILL_OBJECT, 1 << FSCACHE_OBJECT_EV_CLEARED));
109
110/*
111 * Out-of-band event transition tables. These are for handling unexpected
112 * events, such as an I/O error. If an OOB event occurs, the state machine
113 * clears and disables the event and forces a transition to the nominated work
114 * state (acurrently executing work states will complete first).
115 *
116 * In such a situation, object->state remembers the state the machine should
117 * have been in/gone to and returning NO_TRANSIT returns to that.
118 */
119static const struct fscache_transition fscache_osm_init_oob[] = {
120 TRANSIT_TO(ABORT_INIT,
121 (1 << FSCACHE_OBJECT_EV_ERROR) |
122 (1 << FSCACHE_OBJECT_EV_KILL)),
123 { 0, NULL }
124};
125
126static const struct fscache_transition fscache_osm_lookup_oob[] = {
127 TRANSIT_TO(LOOKUP_FAILURE,
128 (1 << FSCACHE_OBJECT_EV_ERROR) |
129 (1 << FSCACHE_OBJECT_EV_KILL)),
130 { 0, NULL }
131};
132
133static const struct fscache_transition fscache_osm_run_oob[] = {
134 TRANSIT_TO(KILL_OBJECT,
135 (1 << FSCACHE_OBJECT_EV_ERROR) |
136 (1 << FSCACHE_OBJECT_EV_KILL)),
137 { 0, NULL }
138};
139
140static int fscache_get_object(struct fscache_object *);
141static void fscache_put_object(struct fscache_object *);
142static bool fscache_enqueue_dependents(struct fscache_object *, int);
143static void fscache_dequeue_object(struct fscache_object *);
144
145/*
146 * we need to notify the parent when an op completes that we had outstanding
147 * upon it
148 */
149static inline void fscache_done_parent_op(struct fscache_object *object)
150{
151 struct fscache_object *parent = object->parent;
152
153 _enter("OBJ%x {OBJ%x,%x}",
154 object->debug_id, parent->debug_id, parent->n_ops);
155
156 spin_lock_nested(&parent->lock, 1);
157 parent->n_obj_ops--;
158 parent->n_ops--;
159 if (parent->n_ops == 0)
160 fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED);
161 spin_unlock(&parent->lock);
162}
163
164/*
165 * Object state machine dispatcher.
166 */
167static void fscache_object_sm_dispatcher(struct fscache_object *object)
168{
169 const struct fscache_transition *t;
170 const struct fscache_state *state, *new_state;
171 unsigned long events, event_mask;
172 int event = -1;
173
174 ASSERT(object != NULL);
175
176 _enter("{OBJ%x,%s,%lx}",
177 object->debug_id, object->state->name, object->events);
178
179 event_mask = object->event_mask;
180restart:
181 object->event_mask = 0; /* Mask normal event handling */
182 state = object->state;
183restart_masked:
184 events = object->events;
185
186 /* Handle any out-of-band events (typically an error) */
187 if (events & object->oob_event_mask) {
188 _debug("{OBJ%x} oob %lx",
189 object->debug_id, events & object->oob_event_mask);
190 for (t = object->oob_table; t->events; t++) {
191 if (events & t->events) {
192 state = t->transit_to;
193 ASSERT(state->work != NULL);
194 event = fls(events & t->events) - 1;
195 __clear_bit(event, &object->oob_event_mask);
196 clear_bit(event, &object->events);
197 goto execute_work_state;
198 }
199 }
200 }
201
202 /* Wait states are just transition tables */
203 if (!state->work) {
204 if (events & event_mask) {
205 for (t = state->transitions; t->events; t++) {
206 if (events & t->events) {
207 new_state = t->transit_to;
208 event = fls(events & t->events) - 1;
209 clear_bit(event, &object->events);
210 _debug("{OBJ%x} ev %d: %s -> %s",
211 object->debug_id, event,
212 state->name, new_state->name);
213 object->state = state = new_state;
214 goto execute_work_state;
215 }
216 }
217
218 /* The event mask didn't include all the tabled bits */
219 BUG();
220 }
221 /* Randomly woke up */
222 goto unmask_events;
223 }
224
225execute_work_state:
226 _debug("{OBJ%x} exec %s", object->debug_id, state->name);
227
228 new_state = state->work(object, event);
229 event = -1;
230 if (new_state == NO_TRANSIT) {
231 _debug("{OBJ%x} %s notrans", object->debug_id, state->name);
232 fscache_enqueue_object(object);
233 event_mask = object->oob_event_mask;
234 goto unmask_events;
235 }
236
237 _debug("{OBJ%x} %s -> %s",
238 object->debug_id, state->name, new_state->name);
239 object->state = state = new_state;
240
241 if (state->work) {
242 if (unlikely(state->work == ((void *)2UL))) {
243 _leave(" [dead]");
244 return;
245 }
246 goto restart_masked;
247 }
248
249 /* Transited to wait state */
250 event_mask = object->oob_event_mask;
251 for (t = state->transitions; t->events; t++)
252 event_mask |= t->events;
253
254unmask_events:
255 object->event_mask = event_mask;
256 smp_mb();
257 events = object->events;
258 if (events & event_mask)
259 goto restart;
260 _leave(" [msk %lx]", event_mask);
261}
262
263/*
264 * execute an object
265 */
266static void fscache_object_work_func(struct work_struct *work)
267{
268 struct fscache_object *object =
269 container_of(work, struct fscache_object, work);
270 unsigned long start;
271
272 _enter("{OBJ%x}", object->debug_id);
273
274 start = jiffies;
275 fscache_object_sm_dispatcher(object);
276 fscache_hist(fscache_objs_histogram, start);
277 fscache_put_object(object);
278}
279
280/**
281 * fscache_object_init - Initialise a cache object description
282 * @object: Object description
283 * @cookie: Cookie object will be attached to
284 * @cache: Cache in which backing object will be found
285 *
286 * Initialise a cache object description to its basic values.
287 *
288 * See Documentation/filesystems/caching/backend-api.txt for a complete
289 * description.
290 */
291void fscache_object_init(struct fscache_object *object,
292 struct fscache_cookie *cookie,
293 struct fscache_cache *cache)
294{
295 const struct fscache_transition *t;
296
297 atomic_inc(&cache->object_count);
298
299 object->state = STATE(WAIT_FOR_INIT);
300 object->oob_table = fscache_osm_init_oob;
301 object->flags = 1 << FSCACHE_OBJECT_IS_LIVE;
302 spin_lock_init(&object->lock);
303 INIT_LIST_HEAD(&object->cache_link);
304 INIT_HLIST_NODE(&object->cookie_link);
305 INIT_WORK(&object->work, fscache_object_work_func);
306 INIT_LIST_HEAD(&object->dependents);
307 INIT_LIST_HEAD(&object->dep_link);
308 INIT_LIST_HEAD(&object->pending_ops);
309 object->n_children = 0;
310 object->n_ops = object->n_in_progress = object->n_exclusive = 0;
311 object->events = 0;
312 object->store_limit = 0;
313 object->store_limit_l = 0;
314 object->cache = cache;
315 object->cookie = cookie;
316 object->parent = NULL;
317#ifdef CONFIG_FSCACHE_OBJECT_LIST
318 RB_CLEAR_NODE(&object->objlist_link);
319#endif
320
321 object->oob_event_mask = 0;
322 for (t = object->oob_table; t->events; t++)
323 object->oob_event_mask |= t->events;
324 object->event_mask = object->oob_event_mask;
325 for (t = object->state->transitions; t->events; t++)
326 object->event_mask |= t->events;
327}
328EXPORT_SYMBOL(fscache_object_init);
329
330/*
331 * Mark the object as no longer being live, making sure that we synchronise
332 * against op submission.
333 */
334static inline void fscache_mark_object_dead(struct fscache_object *object)
335{
336 spin_lock(&object->lock);
337 clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags);
338 spin_unlock(&object->lock);
339}
340
341/*
342 * Abort object initialisation before we start it.
343 */
344static const struct fscache_state *fscache_abort_initialisation(struct fscache_object *object,
345 int event)
346{
347 _enter("{OBJ%x},%d", object->debug_id, event);
348
349 object->oob_event_mask = 0;
350 fscache_dequeue_object(object);
351 return transit_to(KILL_OBJECT);
352}
353
354/*
355 * initialise an object
356 * - check the specified object's parent to see if we can make use of it
357 * immediately to do a creation
358 * - we may need to start the process of creating a parent and we need to wait
359 * for the parent's lookup and creation to complete if it's not there yet
360 */
361static const struct fscache_state *fscache_initialise_object(struct fscache_object *object,
362 int event)
363{
364 struct fscache_object *parent;
365 bool success;
366
367 _enter("{OBJ%x},%d", object->debug_id, event);
368
369 ASSERT(list_empty(&object->dep_link));
370
371 parent = object->parent;
372 if (!parent) {
373 _leave(" [no parent]");
374 return transit_to(DROP_OBJECT);
375 }
376
377 _debug("parent: %s of:%lx", parent->state->name, parent->flags);
378
379 if (fscache_object_is_dying(parent)) {
380 _leave(" [bad parent]");
381 return transit_to(DROP_OBJECT);
382 }
383
384 if (fscache_object_is_available(parent)) {
385 _leave(" [ready]");
386 return transit_to(PARENT_READY);
387 }
388
389 _debug("wait");
390
391 spin_lock(&parent->lock);
392 fscache_stat(&fscache_n_cop_grab_object);
393 success = false;
394 if (fscache_object_is_live(parent) &&
395 object->cache->ops->grab_object(object)) {
396 list_add(&object->dep_link, &parent->dependents);
397 success = true;
398 }
399 fscache_stat_d(&fscache_n_cop_grab_object);
400 spin_unlock(&parent->lock);
401 if (!success) {
402 _leave(" [grab failed]");
403 return transit_to(DROP_OBJECT);
404 }
405
406 /* fscache_acquire_non_index_cookie() uses this
407 * to wake the chain up */
408 fscache_raise_event(parent, FSCACHE_OBJECT_EV_NEW_CHILD);
409 _leave(" [wait]");
410 return transit_to(WAIT_FOR_PARENT);
411}
412
413/*
414 * Once the parent object is ready, we should kick off our lookup op.
415 */
416static const struct fscache_state *fscache_parent_ready(struct fscache_object *object,
417 int event)
418{
419 struct fscache_object *parent = object->parent;
420
421 _enter("{OBJ%x},%d", object->debug_id, event);
422
423 ASSERT(parent != NULL);
424
425 spin_lock(&parent->lock);
426 parent->n_ops++;
427 parent->n_obj_ops++;
428 object->lookup_jif = jiffies;
429 spin_unlock(&parent->lock);
430
431 _leave("");
432 return transit_to(LOOK_UP_OBJECT);
433}
434
435/*
436 * look an object up in the cache from which it was allocated
437 * - we hold an "access lock" on the parent object, so the parent object cannot
438 * be withdrawn by either party till we've finished
439 */
440static const struct fscache_state *fscache_look_up_object(struct fscache_object *object,
441 int event)
442{
443 struct fscache_cookie *cookie = object->cookie;
444 struct fscache_object *parent = object->parent;
445 int ret;
446
447 _enter("{OBJ%x},%d", object->debug_id, event);
448
449 object->oob_table = fscache_osm_lookup_oob;
450
451 ASSERT(parent != NULL);
452 ASSERTCMP(parent->n_ops, >, 0);
453 ASSERTCMP(parent->n_obj_ops, >, 0);
454
455 /* make sure the parent is still available */
456 ASSERT(fscache_object_is_available(parent));
457
458 if (fscache_object_is_dying(parent) ||
459 test_bit(FSCACHE_IOERROR, &object->cache->flags) ||
460 !fscache_use_cookie(object)) {
461 _leave(" [unavailable]");
462 return transit_to(LOOKUP_FAILURE);
463 }
464
465 _debug("LOOKUP \"%s\" in \"%s\"",
466 cookie->def->name, object->cache->tag->name);
467
468 fscache_stat(&fscache_n_object_lookups);
469 fscache_stat(&fscache_n_cop_lookup_object);
470 ret = object->cache->ops->lookup_object(object);
471 fscache_stat_d(&fscache_n_cop_lookup_object);
472
473 fscache_unuse_cookie(object);
474
475 if (ret == -ETIMEDOUT) {
476 /* probably stuck behind another object, so move this one to
477 * the back of the queue */
478 fscache_stat(&fscache_n_object_lookups_timed_out);
479 _leave(" [timeout]");
480 return NO_TRANSIT;
481 }
482
483 if (ret < 0) {
484 _leave(" [error]");
485 return transit_to(LOOKUP_FAILURE);
486 }
487
488 _leave(" [ok]");
489 return transit_to(OBJECT_AVAILABLE);
490}
491
492/**
493 * fscache_object_lookup_negative - Note negative cookie lookup
494 * @object: Object pointing to cookie to mark
495 *
496 * Note negative lookup, permitting those waiting to read data from an already
497 * existing backing object to continue as there's no data for them to read.
498 */
499void fscache_object_lookup_negative(struct fscache_object *object)
500{
501 struct fscache_cookie *cookie = object->cookie;
502
503 _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
504
505 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
506 fscache_stat(&fscache_n_object_lookups_negative);
507
508 /* Allow write requests to begin stacking up and read requests to begin
509 * returning ENODATA.
510 */
511 set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
512 clear_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
513
514 _debug("wake up lookup %p", &cookie->flags);
515 clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
516 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
517 }
518 _leave("");
519}
520EXPORT_SYMBOL(fscache_object_lookup_negative);
521
522/**
523 * fscache_obtained_object - Note successful object lookup or creation
524 * @object: Object pointing to cookie to mark
525 *
526 * Note successful lookup and/or creation, permitting those waiting to write
527 * data to a backing object to continue.
528 *
529 * Note that after calling this, an object's cookie may be relinquished by the
530 * netfs, and so must be accessed with object lock held.
531 */
532void fscache_obtained_object(struct fscache_object *object)
533{
534 struct fscache_cookie *cookie = object->cookie;
535
536 _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
537
538 /* if we were still looking up, then we must have a positive lookup
539 * result, in which case there may be data available */
540 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
541 fscache_stat(&fscache_n_object_lookups_positive);
542
543 /* We do (presumably) have data */
544 clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
545 clear_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
546
547 /* Allow write requests to begin stacking up and read requests
548 * to begin shovelling data.
549 */
550 clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
551 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
552 } else {
553 fscache_stat(&fscache_n_object_created);
554 }
555
556 set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
557 _leave("");
558}
559EXPORT_SYMBOL(fscache_obtained_object);
560
561/*
562 * handle an object that has just become available
563 */
564static const struct fscache_state *fscache_object_available(struct fscache_object *object,
565 int event)
566{
567 _enter("{OBJ%x},%d", object->debug_id, event);
568
569 object->oob_table = fscache_osm_run_oob;
570
571 spin_lock(&object->lock);
572
573 fscache_done_parent_op(object);
574 if (object->n_in_progress == 0) {
575 if (object->n_ops > 0) {
576 ASSERTCMP(object->n_ops, >=, object->n_obj_ops);
577 fscache_start_operations(object);
578 } else {
579 ASSERT(list_empty(&object->pending_ops));
580 }
581 }
582 spin_unlock(&object->lock);
583
584 fscache_stat(&fscache_n_cop_lookup_complete);
585 object->cache->ops->lookup_complete(object);
586 fscache_stat_d(&fscache_n_cop_lookup_complete);
587
588 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
589 fscache_stat(&fscache_n_object_avail);
590
591 _leave("");
592 return transit_to(JUMPSTART_DEPS);
593}
594
595/*
596 * Wake up this object's dependent objects now that we've become available.
597 */
598static const struct fscache_state *fscache_jumpstart_dependents(struct fscache_object *object,
599 int event)
600{
601 _enter("{OBJ%x},%d", object->debug_id, event);
602
603 if (!fscache_enqueue_dependents(object, FSCACHE_OBJECT_EV_PARENT_READY))
604 return NO_TRANSIT; /* Not finished; requeue */
605 return transit_to(WAIT_FOR_CMD);
606}
607
608/*
609 * Handle lookup or creation failute.
610 */
611static const struct fscache_state *fscache_lookup_failure(struct fscache_object *object,
612 int event)
613{
614 struct fscache_cookie *cookie;
615
616 _enter("{OBJ%x},%d", object->debug_id, event);
617
618 object->oob_event_mask = 0;
619
620 fscache_stat(&fscache_n_cop_lookup_complete);
621 object->cache->ops->lookup_complete(object);
622 fscache_stat_d(&fscache_n_cop_lookup_complete);
623
624 set_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->flags);
625
626 cookie = object->cookie;
627 set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
628 if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags))
629 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
630
631 fscache_done_parent_op(object);
632 return transit_to(KILL_OBJECT);
633}
634
635/*
636 * Wait for completion of all active operations on this object and the death of
637 * all child objects of this object.
638 */
639static const struct fscache_state *fscache_kill_object(struct fscache_object *object,
640 int event)
641{
642 _enter("{OBJ%x,%d,%d},%d",
643 object->debug_id, object->n_ops, object->n_children, event);
644
645 fscache_mark_object_dead(object);
646 object->oob_event_mask = 0;
647
648 if (list_empty(&object->dependents) &&
649 object->n_ops == 0 &&
650 object->n_children == 0)
651 return transit_to(DROP_OBJECT);
652
653 if (object->n_in_progress == 0) {
654 spin_lock(&object->lock);
655 if (object->n_ops > 0 && object->n_in_progress == 0)
656 fscache_start_operations(object);
657 spin_unlock(&object->lock);
658 }
659
660 if (!list_empty(&object->dependents))
661 return transit_to(KILL_DEPENDENTS);
662
663 return transit_to(WAIT_FOR_CLEARANCE);
664}
665
666/*
667 * Kill dependent objects.
668 */
669static const struct fscache_state *fscache_kill_dependents(struct fscache_object *object,
670 int event)
671{
672 _enter("{OBJ%x},%d", object->debug_id, event);
673
674 if (!fscache_enqueue_dependents(object, FSCACHE_OBJECT_EV_KILL))
675 return NO_TRANSIT; /* Not finished */
676 return transit_to(WAIT_FOR_CLEARANCE);
677}
678
679/*
680 * Drop an object's attachments
681 */
682static const struct fscache_state *fscache_drop_object(struct fscache_object *object,
683 int event)
684{
685 struct fscache_object *parent = object->parent;
686 struct fscache_cookie *cookie = object->cookie;
687 struct fscache_cache *cache = object->cache;
688 bool awaken = false;
689
690 _enter("{OBJ%x,%d},%d", object->debug_id, object->n_children, event);
691
692 ASSERT(cookie != NULL);
693 ASSERT(!hlist_unhashed(&object->cookie_link));
694
695 /* Make sure the cookie no longer points here and that the netfs isn't
696 * waiting for us.
697 */
698 spin_lock(&cookie->lock);
699 hlist_del_init(&object->cookie_link);
700 if (hlist_empty(&cookie->backing_objects) &&
701 test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
702 awaken = true;
703 spin_unlock(&cookie->lock);
704
705 if (awaken)
706 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
707
708 /* Prevent a race with our last child, which has to signal EV_CLEARED
709 * before dropping our spinlock.
710 */
711 spin_lock(&object->lock);
712 spin_unlock(&object->lock);
713
714 /* Discard from the cache's collection of objects */
715 spin_lock(&cache->object_list_lock);
716 list_del_init(&object->cache_link);
717 spin_unlock(&cache->object_list_lock);
718
719 fscache_stat(&fscache_n_cop_drop_object);
720 cache->ops->drop_object(object);
721 fscache_stat_d(&fscache_n_cop_drop_object);
722
723 /* The parent object wants to know when all it dependents have gone */
724 if (parent) {
725 _debug("release parent OBJ%x {%d}",
726 parent->debug_id, parent->n_children);
727
728 spin_lock(&parent->lock);
729 parent->n_children--;
730 if (parent->n_children == 0)
731 fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED);
732 spin_unlock(&parent->lock);
733 object->parent = NULL;
734 }
735
736 /* this just shifts the object release to the work processor */
737 fscache_put_object(object);
738 fscache_stat(&fscache_n_object_dead);
739
740 _leave("");
741 return transit_to(OBJECT_DEAD);
742}
743
744/*
745 * get a ref on an object
746 */
747static int fscache_get_object(struct fscache_object *object)
748{
749 int ret;
750
751 fscache_stat(&fscache_n_cop_grab_object);
752 ret = object->cache->ops->grab_object(object) ? 0 : -EAGAIN;
753 fscache_stat_d(&fscache_n_cop_grab_object);
754 return ret;
755}
756
757/*
758 * Discard a ref on an object
759 */
760static void fscache_put_object(struct fscache_object *object)
761{
762 fscache_stat(&fscache_n_cop_put_object);
763 object->cache->ops->put_object(object);
764 fscache_stat_d(&fscache_n_cop_put_object);
765}
766
767/**
768 * fscache_object_destroy - Note that a cache object is about to be destroyed
769 * @object: The object to be destroyed
770 *
771 * Note the imminent destruction and deallocation of a cache object record.
772 */
773void fscache_object_destroy(struct fscache_object *object)
774{
775 fscache_objlist_remove(object);
776
777 /* We can get rid of the cookie now */
778 fscache_cookie_put(object->cookie);
779 object->cookie = NULL;
780}
781EXPORT_SYMBOL(fscache_object_destroy);
782
783/*
784 * enqueue an object for metadata-type processing
785 */
786void fscache_enqueue_object(struct fscache_object *object)
787{
788 _enter("{OBJ%x}", object->debug_id);
789
790 if (fscache_get_object(object) >= 0) {
791 wait_queue_head_t *cong_wq =
792 &get_cpu_var(fscache_object_cong_wait);
793
794 if (queue_work(fscache_object_wq, &object->work)) {
795 if (fscache_object_congested())
796 wake_up(cong_wq);
797 } else
798 fscache_put_object(object);
799
800 put_cpu_var(fscache_object_cong_wait);
801 }
802}
803
804/**
805 * fscache_object_sleep_till_congested - Sleep until object wq is congested
806 * @timeoutp: Scheduler sleep timeout
807 *
808 * Allow an object handler to sleep until the object workqueue is congested.
809 *
810 * The caller must set up a wake up event before calling this and must have set
811 * the appropriate sleep mode (such as TASK_UNINTERRUPTIBLE) and tested its own
812 * condition before calling this function as no test is made here.
813 *
814 * %true is returned if the object wq is congested, %false otherwise.
815 */
816bool fscache_object_sleep_till_congested(signed long *timeoutp)
817{
818 wait_queue_head_t *cong_wq = this_cpu_ptr(&fscache_object_cong_wait);
819 DEFINE_WAIT(wait);
820
821 if (fscache_object_congested())
822 return true;
823
824 add_wait_queue_exclusive(cong_wq, &wait);
825 if (!fscache_object_congested())
826 *timeoutp = schedule_timeout(*timeoutp);
827 finish_wait(cong_wq, &wait);
828
829 return fscache_object_congested();
830}
831EXPORT_SYMBOL_GPL(fscache_object_sleep_till_congested);
832
833/*
834 * Enqueue the dependents of an object for metadata-type processing.
835 *
836 * If we don't manage to finish the list before the scheduler wants to run
837 * again then return false immediately. We return true if the list was
838 * cleared.
839 */
840static bool fscache_enqueue_dependents(struct fscache_object *object, int event)
841{
842 struct fscache_object *dep;
843 bool ret = true;
844
845 _enter("{OBJ%x}", object->debug_id);
846
847 if (list_empty(&object->dependents))
848 return true;
849
850 spin_lock(&object->lock);
851
852 while (!list_empty(&object->dependents)) {
853 dep = list_entry(object->dependents.next,
854 struct fscache_object, dep_link);
855 list_del_init(&dep->dep_link);
856
857 fscache_raise_event(dep, event);
858 fscache_put_object(dep);
859
860 if (!list_empty(&object->dependents) && need_resched()) {
861 ret = false;
862 break;
863 }
864 }
865
866 spin_unlock(&object->lock);
867 return ret;
868}
869
870/*
871 * remove an object from whatever queue it's waiting on
872 */
873static void fscache_dequeue_object(struct fscache_object *object)
874{
875 _enter("{OBJ%x}", object->debug_id);
876
877 if (!list_empty(&object->dep_link)) {
878 spin_lock(&object->parent->lock);
879 list_del_init(&object->dep_link);
880 spin_unlock(&object->parent->lock);
881 }
882
883 _leave("");
884}
885
886/**
887 * fscache_check_aux - Ask the netfs whether an object on disk is still valid
888 * @object: The object to ask about
889 * @data: The auxiliary data for the object
890 * @datalen: The size of the auxiliary data
891 *
892 * This function consults the netfs about the coherency state of an object.
893 * The caller must be holding a ref on cookie->n_active (held by
894 * fscache_look_up_object() on behalf of the cache backend during object lookup
895 * and creation).
896 */
897enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
898 const void *data, uint16_t datalen)
899{
900 enum fscache_checkaux result;
901
902 if (!object->cookie->def->check_aux) {
903 fscache_stat(&fscache_n_checkaux_none);
904 return FSCACHE_CHECKAUX_OKAY;
905 }
906
907 result = object->cookie->def->check_aux(object->cookie->netfs_data,
908 data, datalen);
909 switch (result) {
910 /* entry okay as is */
911 case FSCACHE_CHECKAUX_OKAY:
912 fscache_stat(&fscache_n_checkaux_okay);
913 break;
914
915 /* entry requires update */
916 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
917 fscache_stat(&fscache_n_checkaux_update);
918 break;
919
920 /* entry requires deletion */
921 case FSCACHE_CHECKAUX_OBSOLETE:
922 fscache_stat(&fscache_n_checkaux_obsolete);
923 break;
924
925 default:
926 BUG();
927 }
928
929 return result;
930}
931EXPORT_SYMBOL(fscache_check_aux);
932
933/*
934 * Asynchronously invalidate an object.
935 */
936static const struct fscache_state *_fscache_invalidate_object(struct fscache_object *object,
937 int event)
938{
939 struct fscache_operation *op;
940 struct fscache_cookie *cookie = object->cookie;
941
942 _enter("{OBJ%x},%d", object->debug_id, event);
943
944 /* We're going to need the cookie. If the cookie is not available then
945 * retire the object instead.
946 */
947 if (!fscache_use_cookie(object)) {
948 ASSERT(object->cookie->stores.rnode == NULL);
949 set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
950 _leave(" [no cookie]");
951 return transit_to(KILL_OBJECT);
952 }
953
954 /* Reject any new read/write ops and abort any that are pending. */
955 fscache_invalidate_writes(cookie);
956 clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
957 fscache_cancel_all_ops(object);
958
959 /* Now we have to wait for in-progress reads and writes */
960 op = kzalloc(sizeof(*op), GFP_KERNEL);
961 if (!op)
962 goto nomem;
963
964 fscache_operation_init(op, object->cache->ops->invalidate_object,
965 NULL, NULL);
966 op->flags = FSCACHE_OP_ASYNC |
967 (1 << FSCACHE_OP_EXCLUSIVE) |
968 (1 << FSCACHE_OP_UNUSE_COOKIE);
969
970 spin_lock(&cookie->lock);
971 if (fscache_submit_exclusive_op(object, op) < 0)
972 goto submit_op_failed;
973 spin_unlock(&cookie->lock);
974 fscache_put_operation(op);
975
976 /* Once we've completed the invalidation, we know there will be no data
977 * stored in the cache and thus we can reinstate the data-check-skip
978 * optimisation.
979 */
980 set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
981
982 /* We can allow read and write requests to come in once again. They'll
983 * queue up behind our exclusive invalidation operation.
984 */
985 if (test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
986 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
987 _leave(" [ok]");
988 return transit_to(UPDATE_OBJECT);
989
990nomem:
991 fscache_mark_object_dead(object);
992 fscache_unuse_cookie(object);
993 _leave(" [ENOMEM]");
994 return transit_to(KILL_OBJECT);
995
996submit_op_failed:
997 fscache_mark_object_dead(object);
998 spin_unlock(&cookie->lock);
999 fscache_unuse_cookie(object);
1000 kfree(op);
1001 _leave(" [EIO]");
1002 return transit_to(KILL_OBJECT);
1003}
1004
1005static const struct fscache_state *fscache_invalidate_object(struct fscache_object *object,
1006 int event)
1007{
1008 const struct fscache_state *s;
1009
1010 fscache_stat(&fscache_n_invalidates_run);
1011 fscache_stat(&fscache_n_cop_invalidate_object);
1012 s = _fscache_invalidate_object(object, event);
1013 fscache_stat_d(&fscache_n_cop_invalidate_object);
1014 return s;
1015}
1016
1017/*
1018 * Asynchronously update an object.
1019 */
1020static const struct fscache_state *fscache_update_object(struct fscache_object *object,
1021 int event)
1022{
1023 _enter("{OBJ%x},%d", object->debug_id, event);
1024
1025 fscache_stat(&fscache_n_updates_run);
1026 fscache_stat(&fscache_n_cop_update_object);
1027 object->cache->ops->update_object(object);
1028 fscache_stat_d(&fscache_n_cop_update_object);
1029
1030 _leave("");
1031 return transit_to(WAIT_FOR_CMD);
1032}
1033
1034/**
1035 * fscache_object_retrying_stale - Note retrying stale object
1036 * @object: The object that will be retried
1037 *
1038 * Note that an object lookup found an on-disk object that was adjudged to be
1039 * stale and has been deleted. The lookup will be retried.
1040 */
1041void fscache_object_retrying_stale(struct fscache_object *object)
1042{
1043 fscache_stat(&fscache_n_cache_no_space_reject);
1044}
1045EXPORT_SYMBOL(fscache_object_retrying_stale);
1046
1047/**
1048 * fscache_object_mark_killed - Note that an object was killed
1049 * @object: The object that was culled
1050 * @why: The reason the object was killed.
1051 *
1052 * Note that an object was killed. Returns true if the object was
1053 * already marked killed, false if it wasn't.
1054 */
1055void fscache_object_mark_killed(struct fscache_object *object,
1056 enum fscache_why_object_killed why)
1057{
1058 if (test_and_set_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->flags)) {
1059 pr_err("Error: Object already killed by cache [%s]\n",
1060 object->cache->identifier);
1061 return;
1062 }
1063
1064 switch (why) {
1065 case FSCACHE_OBJECT_NO_SPACE:
1066 fscache_stat(&fscache_n_cache_no_space_reject);
1067 break;
1068 case FSCACHE_OBJECT_IS_STALE:
1069 fscache_stat(&fscache_n_cache_stale_objects);
1070 break;
1071 case FSCACHE_OBJECT_WAS_RETIRED:
1072 fscache_stat(&fscache_n_cache_retired_objects);
1073 break;
1074 case FSCACHE_OBJECT_WAS_CULLED:
1075 fscache_stat(&fscache_n_cache_culled_objects);
1076 break;
1077 }
1078}
1079EXPORT_SYMBOL(fscache_object_mark_killed);
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* FS-Cache object state machine handler
3 *
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 *
7 * See Documentation/filesystems/caching/object.rst for a description of the
8 * object state machine and the in-kernel representations.
9 */
10
11#define FSCACHE_DEBUG_LEVEL COOKIE
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/prefetch.h>
15#include "internal.h"
16
17static const struct fscache_state *fscache_abort_initialisation(struct fscache_object *, int);
18static const struct fscache_state *fscache_kill_dependents(struct fscache_object *, int);
19static const struct fscache_state *fscache_drop_object(struct fscache_object *, int);
20static const struct fscache_state *fscache_initialise_object(struct fscache_object *, int);
21static const struct fscache_state *fscache_invalidate_object(struct fscache_object *, int);
22static const struct fscache_state *fscache_jumpstart_dependents(struct fscache_object *, int);
23static const struct fscache_state *fscache_kill_object(struct fscache_object *, int);
24static const struct fscache_state *fscache_lookup_failure(struct fscache_object *, int);
25static const struct fscache_state *fscache_look_up_object(struct fscache_object *, int);
26static const struct fscache_state *fscache_object_available(struct fscache_object *, int);
27static const struct fscache_state *fscache_parent_ready(struct fscache_object *, int);
28static const struct fscache_state *fscache_update_object(struct fscache_object *, int);
29static const struct fscache_state *fscache_object_dead(struct fscache_object *, int);
30
31#define __STATE_NAME(n) fscache_osm_##n
32#define STATE(n) (&__STATE_NAME(n))
33
34/*
35 * Define a work state. Work states are execution states. No event processing
36 * is performed by them. The function attached to a work state returns a
37 * pointer indicating the next state to which the state machine should
38 * transition. Returning NO_TRANSIT repeats the current state, but goes back
39 * to the scheduler first.
40 */
41#define WORK_STATE(n, sn, f) \
42 const struct fscache_state __STATE_NAME(n) = { \
43 .name = #n, \
44 .short_name = sn, \
45 .work = f \
46 }
47
48/*
49 * Returns from work states.
50 */
51#define transit_to(state) ({ prefetch(&STATE(state)->work); STATE(state); })
52
53#define NO_TRANSIT ((struct fscache_state *)NULL)
54
55/*
56 * Define a wait state. Wait states are event processing states. No execution
57 * is performed by them. Wait states are just tables of "if event X occurs,
58 * clear it and transition to state Y". The dispatcher returns to the
59 * scheduler if none of the events in which the wait state has an interest are
60 * currently pending.
61 */
62#define WAIT_STATE(n, sn, ...) \
63 const struct fscache_state __STATE_NAME(n) = { \
64 .name = #n, \
65 .short_name = sn, \
66 .work = NULL, \
67 .transitions = { __VA_ARGS__, { 0, NULL } } \
68 }
69
70#define TRANSIT_TO(state, emask) \
71 { .events = (emask), .transit_to = STATE(state) }
72
73/*
74 * The object state machine.
75 */
76static WORK_STATE(INIT_OBJECT, "INIT", fscache_initialise_object);
77static WORK_STATE(PARENT_READY, "PRDY", fscache_parent_ready);
78static WORK_STATE(ABORT_INIT, "ABRT", fscache_abort_initialisation);
79static WORK_STATE(LOOK_UP_OBJECT, "LOOK", fscache_look_up_object);
80static WORK_STATE(CREATE_OBJECT, "CRTO", fscache_look_up_object);
81static WORK_STATE(OBJECT_AVAILABLE, "AVBL", fscache_object_available);
82static WORK_STATE(JUMPSTART_DEPS, "JUMP", fscache_jumpstart_dependents);
83
84static WORK_STATE(INVALIDATE_OBJECT, "INVL", fscache_invalidate_object);
85static WORK_STATE(UPDATE_OBJECT, "UPDT", fscache_update_object);
86
87static WORK_STATE(LOOKUP_FAILURE, "LCFL", fscache_lookup_failure);
88static WORK_STATE(KILL_OBJECT, "KILL", fscache_kill_object);
89static WORK_STATE(KILL_DEPENDENTS, "KDEP", fscache_kill_dependents);
90static WORK_STATE(DROP_OBJECT, "DROP", fscache_drop_object);
91static WORK_STATE(OBJECT_DEAD, "DEAD", fscache_object_dead);
92
93static WAIT_STATE(WAIT_FOR_INIT, "?INI",
94 TRANSIT_TO(INIT_OBJECT, 1 << FSCACHE_OBJECT_EV_NEW_CHILD));
95
96static WAIT_STATE(WAIT_FOR_PARENT, "?PRN",
97 TRANSIT_TO(PARENT_READY, 1 << FSCACHE_OBJECT_EV_PARENT_READY));
98
99static WAIT_STATE(WAIT_FOR_CMD, "?CMD",
100 TRANSIT_TO(INVALIDATE_OBJECT, 1 << FSCACHE_OBJECT_EV_INVALIDATE),
101 TRANSIT_TO(UPDATE_OBJECT, 1 << FSCACHE_OBJECT_EV_UPDATE),
102 TRANSIT_TO(JUMPSTART_DEPS, 1 << FSCACHE_OBJECT_EV_NEW_CHILD));
103
104static WAIT_STATE(WAIT_FOR_CLEARANCE, "?CLR",
105 TRANSIT_TO(KILL_OBJECT, 1 << FSCACHE_OBJECT_EV_CLEARED));
106
107/*
108 * Out-of-band event transition tables. These are for handling unexpected
109 * events, such as an I/O error. If an OOB event occurs, the state machine
110 * clears and disables the event and forces a transition to the nominated work
111 * state (acurrently executing work states will complete first).
112 *
113 * In such a situation, object->state remembers the state the machine should
114 * have been in/gone to and returning NO_TRANSIT returns to that.
115 */
116static const struct fscache_transition fscache_osm_init_oob[] = {
117 TRANSIT_TO(ABORT_INIT,
118 (1 << FSCACHE_OBJECT_EV_ERROR) |
119 (1 << FSCACHE_OBJECT_EV_KILL)),
120 { 0, NULL }
121};
122
123static const struct fscache_transition fscache_osm_lookup_oob[] = {
124 TRANSIT_TO(LOOKUP_FAILURE,
125 (1 << FSCACHE_OBJECT_EV_ERROR) |
126 (1 << FSCACHE_OBJECT_EV_KILL)),
127 { 0, NULL }
128};
129
130static const struct fscache_transition fscache_osm_run_oob[] = {
131 TRANSIT_TO(KILL_OBJECT,
132 (1 << FSCACHE_OBJECT_EV_ERROR) |
133 (1 << FSCACHE_OBJECT_EV_KILL)),
134 { 0, NULL }
135};
136
137static int fscache_get_object(struct fscache_object *,
138 enum fscache_obj_ref_trace);
139static void fscache_put_object(struct fscache_object *,
140 enum fscache_obj_ref_trace);
141static bool fscache_enqueue_dependents(struct fscache_object *, int);
142static void fscache_dequeue_object(struct fscache_object *);
143static void fscache_update_aux_data(struct fscache_object *);
144
145/*
146 * we need to notify the parent when an op completes that we had outstanding
147 * upon it
148 */
149static inline void fscache_done_parent_op(struct fscache_object *object)
150{
151 struct fscache_object *parent = object->parent;
152
153 _enter("OBJ%x {OBJ%x,%x}",
154 object->debug_id, parent->debug_id, parent->n_ops);
155
156 spin_lock_nested(&parent->lock, 1);
157 parent->n_obj_ops--;
158 parent->n_ops--;
159 if (parent->n_ops == 0)
160 fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED);
161 spin_unlock(&parent->lock);
162}
163
164/*
165 * Object state machine dispatcher.
166 */
167static void fscache_object_sm_dispatcher(struct fscache_object *object)
168{
169 const struct fscache_transition *t;
170 const struct fscache_state *state, *new_state;
171 unsigned long events, event_mask;
172 bool oob;
173 int event = -1;
174
175 ASSERT(object != NULL);
176
177 _enter("{OBJ%x,%s,%lx}",
178 object->debug_id, object->state->name, object->events);
179
180 event_mask = object->event_mask;
181restart:
182 object->event_mask = 0; /* Mask normal event handling */
183 state = object->state;
184restart_masked:
185 events = object->events;
186
187 /* Handle any out-of-band events (typically an error) */
188 if (events & object->oob_event_mask) {
189 _debug("{OBJ%x} oob %lx",
190 object->debug_id, events & object->oob_event_mask);
191 oob = true;
192 for (t = object->oob_table; t->events; t++) {
193 if (events & t->events) {
194 state = t->transit_to;
195 ASSERT(state->work != NULL);
196 event = fls(events & t->events) - 1;
197 __clear_bit(event, &object->oob_event_mask);
198 clear_bit(event, &object->events);
199 goto execute_work_state;
200 }
201 }
202 }
203 oob = false;
204
205 /* Wait states are just transition tables */
206 if (!state->work) {
207 if (events & event_mask) {
208 for (t = state->transitions; t->events; t++) {
209 if (events & t->events) {
210 new_state = t->transit_to;
211 event = fls(events & t->events) - 1;
212 trace_fscache_osm(object, state,
213 true, false, event);
214 clear_bit(event, &object->events);
215 _debug("{OBJ%x} ev %d: %s -> %s",
216 object->debug_id, event,
217 state->name, new_state->name);
218 object->state = state = new_state;
219 goto execute_work_state;
220 }
221 }
222
223 /* The event mask didn't include all the tabled bits */
224 BUG();
225 }
226 /* Randomly woke up */
227 goto unmask_events;
228 }
229
230execute_work_state:
231 _debug("{OBJ%x} exec %s", object->debug_id, state->name);
232
233 trace_fscache_osm(object, state, false, oob, event);
234 new_state = state->work(object, event);
235 event = -1;
236 if (new_state == NO_TRANSIT) {
237 _debug("{OBJ%x} %s notrans", object->debug_id, state->name);
238 if (unlikely(state == STATE(OBJECT_DEAD))) {
239 _leave(" [dead]");
240 return;
241 }
242 fscache_enqueue_object(object);
243 event_mask = object->oob_event_mask;
244 goto unmask_events;
245 }
246
247 _debug("{OBJ%x} %s -> %s",
248 object->debug_id, state->name, new_state->name);
249 object->state = state = new_state;
250
251 if (state->work) {
252 if (unlikely(state == STATE(OBJECT_DEAD))) {
253 _leave(" [dead]");
254 return;
255 }
256 goto restart_masked;
257 }
258
259 /* Transited to wait state */
260 event_mask = object->oob_event_mask;
261 for (t = state->transitions; t->events; t++)
262 event_mask |= t->events;
263
264unmask_events:
265 object->event_mask = event_mask;
266 smp_mb();
267 events = object->events;
268 if (events & event_mask)
269 goto restart;
270 _leave(" [msk %lx]", event_mask);
271}
272
273/*
274 * execute an object
275 */
276static void fscache_object_work_func(struct work_struct *work)
277{
278 struct fscache_object *object =
279 container_of(work, struct fscache_object, work);
280 unsigned long start;
281
282 _enter("{OBJ%x}", object->debug_id);
283
284 start = jiffies;
285 fscache_object_sm_dispatcher(object);
286 fscache_hist(fscache_objs_histogram, start);
287 fscache_put_object(object, fscache_obj_put_work);
288}
289
290/**
291 * fscache_object_init - Initialise a cache object description
292 * @object: Object description
293 * @cookie: Cookie object will be attached to
294 * @cache: Cache in which backing object will be found
295 *
296 * Initialise a cache object description to its basic values.
297 *
298 * See Documentation/filesystems/caching/backend-api.rst for a complete
299 * description.
300 */
301void fscache_object_init(struct fscache_object *object,
302 struct fscache_cookie *cookie,
303 struct fscache_cache *cache)
304{
305 const struct fscache_transition *t;
306
307 atomic_inc(&cache->object_count);
308
309 object->state = STATE(WAIT_FOR_INIT);
310 object->oob_table = fscache_osm_init_oob;
311 object->flags = 1 << FSCACHE_OBJECT_IS_LIVE;
312 spin_lock_init(&object->lock);
313 INIT_LIST_HEAD(&object->cache_link);
314 INIT_HLIST_NODE(&object->cookie_link);
315 INIT_WORK(&object->work, fscache_object_work_func);
316 INIT_LIST_HEAD(&object->dependents);
317 INIT_LIST_HEAD(&object->dep_link);
318 INIT_LIST_HEAD(&object->pending_ops);
319 object->n_children = 0;
320 object->n_ops = object->n_in_progress = object->n_exclusive = 0;
321 object->events = 0;
322 object->store_limit = 0;
323 object->store_limit_l = 0;
324 object->cache = cache;
325 object->cookie = cookie;
326 fscache_cookie_get(cookie, fscache_cookie_get_attach_object);
327 object->parent = NULL;
328#ifdef CONFIG_FSCACHE_OBJECT_LIST
329 RB_CLEAR_NODE(&object->objlist_link);
330#endif
331
332 object->oob_event_mask = 0;
333 for (t = object->oob_table; t->events; t++)
334 object->oob_event_mask |= t->events;
335 object->event_mask = object->oob_event_mask;
336 for (t = object->state->transitions; t->events; t++)
337 object->event_mask |= t->events;
338}
339EXPORT_SYMBOL(fscache_object_init);
340
341/*
342 * Mark the object as no longer being live, making sure that we synchronise
343 * against op submission.
344 */
345static inline void fscache_mark_object_dead(struct fscache_object *object)
346{
347 spin_lock(&object->lock);
348 clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags);
349 spin_unlock(&object->lock);
350}
351
352/*
353 * Abort object initialisation before we start it.
354 */
355static const struct fscache_state *fscache_abort_initialisation(struct fscache_object *object,
356 int event)
357{
358 _enter("{OBJ%x},%d", object->debug_id, event);
359
360 object->oob_event_mask = 0;
361 fscache_dequeue_object(object);
362 return transit_to(KILL_OBJECT);
363}
364
365/*
366 * initialise an object
367 * - check the specified object's parent to see if we can make use of it
368 * immediately to do a creation
369 * - we may need to start the process of creating a parent and we need to wait
370 * for the parent's lookup and creation to complete if it's not there yet
371 */
372static const struct fscache_state *fscache_initialise_object(struct fscache_object *object,
373 int event)
374{
375 struct fscache_object *parent;
376 bool success;
377
378 _enter("{OBJ%x},%d", object->debug_id, event);
379
380 ASSERT(list_empty(&object->dep_link));
381
382 parent = object->parent;
383 if (!parent) {
384 _leave(" [no parent]");
385 return transit_to(DROP_OBJECT);
386 }
387
388 _debug("parent: %s of:%lx", parent->state->name, parent->flags);
389
390 if (fscache_object_is_dying(parent)) {
391 _leave(" [bad parent]");
392 return transit_to(DROP_OBJECT);
393 }
394
395 if (fscache_object_is_available(parent)) {
396 _leave(" [ready]");
397 return transit_to(PARENT_READY);
398 }
399
400 _debug("wait");
401
402 spin_lock(&parent->lock);
403 fscache_stat(&fscache_n_cop_grab_object);
404 success = false;
405 if (fscache_object_is_live(parent) &&
406 object->cache->ops->grab_object(object, fscache_obj_get_add_to_deps)) {
407 list_add(&object->dep_link, &parent->dependents);
408 success = true;
409 }
410 fscache_stat_d(&fscache_n_cop_grab_object);
411 spin_unlock(&parent->lock);
412 if (!success) {
413 _leave(" [grab failed]");
414 return transit_to(DROP_OBJECT);
415 }
416
417 /* fscache_acquire_non_index_cookie() uses this
418 * to wake the chain up */
419 fscache_raise_event(parent, FSCACHE_OBJECT_EV_NEW_CHILD);
420 _leave(" [wait]");
421 return transit_to(WAIT_FOR_PARENT);
422}
423
424/*
425 * Once the parent object is ready, we should kick off our lookup op.
426 */
427static const struct fscache_state *fscache_parent_ready(struct fscache_object *object,
428 int event)
429{
430 struct fscache_object *parent = object->parent;
431
432 _enter("{OBJ%x},%d", object->debug_id, event);
433
434 ASSERT(parent != NULL);
435
436 spin_lock(&parent->lock);
437 parent->n_ops++;
438 parent->n_obj_ops++;
439 object->lookup_jif = jiffies;
440 spin_unlock(&parent->lock);
441
442 _leave("");
443 return transit_to(LOOK_UP_OBJECT);
444}
445
446/*
447 * look an object up in the cache from which it was allocated
448 * - we hold an "access lock" on the parent object, so the parent object cannot
449 * be withdrawn by either party till we've finished
450 */
451static const struct fscache_state *fscache_look_up_object(struct fscache_object *object,
452 int event)
453{
454 struct fscache_cookie *cookie = object->cookie;
455 struct fscache_object *parent = object->parent;
456 int ret;
457
458 _enter("{OBJ%x},%d", object->debug_id, event);
459
460 object->oob_table = fscache_osm_lookup_oob;
461
462 ASSERT(parent != NULL);
463 ASSERTCMP(parent->n_ops, >, 0);
464 ASSERTCMP(parent->n_obj_ops, >, 0);
465
466 /* make sure the parent is still available */
467 ASSERT(fscache_object_is_available(parent));
468
469 if (fscache_object_is_dying(parent) ||
470 test_bit(FSCACHE_IOERROR, &object->cache->flags) ||
471 !fscache_use_cookie(object)) {
472 _leave(" [unavailable]");
473 return transit_to(LOOKUP_FAILURE);
474 }
475
476 _debug("LOOKUP \"%s\" in \"%s\"",
477 cookie->def->name, object->cache->tag->name);
478
479 fscache_stat(&fscache_n_object_lookups);
480 fscache_stat(&fscache_n_cop_lookup_object);
481 ret = object->cache->ops->lookup_object(object);
482 fscache_stat_d(&fscache_n_cop_lookup_object);
483
484 fscache_unuse_cookie(object);
485
486 if (ret == -ETIMEDOUT) {
487 /* probably stuck behind another object, so move this one to
488 * the back of the queue */
489 fscache_stat(&fscache_n_object_lookups_timed_out);
490 _leave(" [timeout]");
491 return NO_TRANSIT;
492 }
493
494 if (ret < 0) {
495 _leave(" [error]");
496 return transit_to(LOOKUP_FAILURE);
497 }
498
499 _leave(" [ok]");
500 return transit_to(OBJECT_AVAILABLE);
501}
502
503/**
504 * fscache_object_lookup_negative - Note negative cookie lookup
505 * @object: Object pointing to cookie to mark
506 *
507 * Note negative lookup, permitting those waiting to read data from an already
508 * existing backing object to continue as there's no data for them to read.
509 */
510void fscache_object_lookup_negative(struct fscache_object *object)
511{
512 struct fscache_cookie *cookie = object->cookie;
513
514 _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
515
516 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
517 fscache_stat(&fscache_n_object_lookups_negative);
518
519 /* Allow write requests to begin stacking up and read requests to begin
520 * returning ENODATA.
521 */
522 set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
523 clear_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
524
525 _debug("wake up lookup %p", &cookie->flags);
526 clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
527 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
528 }
529 _leave("");
530}
531EXPORT_SYMBOL(fscache_object_lookup_negative);
532
533/**
534 * fscache_obtained_object - Note successful object lookup or creation
535 * @object: Object pointing to cookie to mark
536 *
537 * Note successful lookup and/or creation, permitting those waiting to write
538 * data to a backing object to continue.
539 *
540 * Note that after calling this, an object's cookie may be relinquished by the
541 * netfs, and so must be accessed with object lock held.
542 */
543void fscache_obtained_object(struct fscache_object *object)
544{
545 struct fscache_cookie *cookie = object->cookie;
546
547 _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
548
549 /* if we were still looking up, then we must have a positive lookup
550 * result, in which case there may be data available */
551 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
552 fscache_stat(&fscache_n_object_lookups_positive);
553
554 /* We do (presumably) have data */
555 clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
556 clear_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
557
558 /* Allow write requests to begin stacking up and read requests
559 * to begin shovelling data.
560 */
561 clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
562 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
563 } else {
564 fscache_stat(&fscache_n_object_created);
565 }
566
567 set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
568 _leave("");
569}
570EXPORT_SYMBOL(fscache_obtained_object);
571
572/*
573 * handle an object that has just become available
574 */
575static const struct fscache_state *fscache_object_available(struct fscache_object *object,
576 int event)
577{
578 _enter("{OBJ%x},%d", object->debug_id, event);
579
580 object->oob_table = fscache_osm_run_oob;
581
582 spin_lock(&object->lock);
583
584 fscache_done_parent_op(object);
585 if (object->n_in_progress == 0) {
586 if (object->n_ops > 0) {
587 ASSERTCMP(object->n_ops, >=, object->n_obj_ops);
588 fscache_start_operations(object);
589 } else {
590 ASSERT(list_empty(&object->pending_ops));
591 }
592 }
593 spin_unlock(&object->lock);
594
595 fscache_stat(&fscache_n_cop_lookup_complete);
596 object->cache->ops->lookup_complete(object);
597 fscache_stat_d(&fscache_n_cop_lookup_complete);
598
599 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
600 fscache_stat(&fscache_n_object_avail);
601
602 _leave("");
603 return transit_to(JUMPSTART_DEPS);
604}
605
606/*
607 * Wake up this object's dependent objects now that we've become available.
608 */
609static const struct fscache_state *fscache_jumpstart_dependents(struct fscache_object *object,
610 int event)
611{
612 _enter("{OBJ%x},%d", object->debug_id, event);
613
614 if (!fscache_enqueue_dependents(object, FSCACHE_OBJECT_EV_PARENT_READY))
615 return NO_TRANSIT; /* Not finished; requeue */
616 return transit_to(WAIT_FOR_CMD);
617}
618
619/*
620 * Handle lookup or creation failute.
621 */
622static const struct fscache_state *fscache_lookup_failure(struct fscache_object *object,
623 int event)
624{
625 struct fscache_cookie *cookie;
626
627 _enter("{OBJ%x},%d", object->debug_id, event);
628
629 object->oob_event_mask = 0;
630
631 fscache_stat(&fscache_n_cop_lookup_complete);
632 object->cache->ops->lookup_complete(object);
633 fscache_stat_d(&fscache_n_cop_lookup_complete);
634
635 set_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->flags);
636
637 cookie = object->cookie;
638 set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
639 if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags))
640 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
641
642 fscache_done_parent_op(object);
643 return transit_to(KILL_OBJECT);
644}
645
646/*
647 * Wait for completion of all active operations on this object and the death of
648 * all child objects of this object.
649 */
650static const struct fscache_state *fscache_kill_object(struct fscache_object *object,
651 int event)
652{
653 _enter("{OBJ%x,%d,%d},%d",
654 object->debug_id, object->n_ops, object->n_children, event);
655
656 fscache_mark_object_dead(object);
657 object->oob_event_mask = 0;
658
659 if (test_bit(FSCACHE_OBJECT_RETIRED, &object->flags)) {
660 /* Reject any new read/write ops and abort any that are pending. */
661 clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
662 fscache_cancel_all_ops(object);
663 }
664
665 if (list_empty(&object->dependents) &&
666 object->n_ops == 0 &&
667 object->n_children == 0)
668 return transit_to(DROP_OBJECT);
669
670 if (object->n_in_progress == 0) {
671 spin_lock(&object->lock);
672 if (object->n_ops > 0 && object->n_in_progress == 0)
673 fscache_start_operations(object);
674 spin_unlock(&object->lock);
675 }
676
677 if (!list_empty(&object->dependents))
678 return transit_to(KILL_DEPENDENTS);
679
680 return transit_to(WAIT_FOR_CLEARANCE);
681}
682
683/*
684 * Kill dependent objects.
685 */
686static const struct fscache_state *fscache_kill_dependents(struct fscache_object *object,
687 int event)
688{
689 _enter("{OBJ%x},%d", object->debug_id, event);
690
691 if (!fscache_enqueue_dependents(object, FSCACHE_OBJECT_EV_KILL))
692 return NO_TRANSIT; /* Not finished */
693 return transit_to(WAIT_FOR_CLEARANCE);
694}
695
696/*
697 * Drop an object's attachments
698 */
699static const struct fscache_state *fscache_drop_object(struct fscache_object *object,
700 int event)
701{
702 struct fscache_object *parent = object->parent;
703 struct fscache_cookie *cookie = object->cookie;
704 struct fscache_cache *cache = object->cache;
705 bool awaken = false;
706
707 _enter("{OBJ%x,%d},%d", object->debug_id, object->n_children, event);
708
709 ASSERT(cookie != NULL);
710 ASSERT(!hlist_unhashed(&object->cookie_link));
711
712 if (test_bit(FSCACHE_COOKIE_AUX_UPDATED, &cookie->flags)) {
713 _debug("final update");
714 fscache_update_aux_data(object);
715 }
716
717 /* Make sure the cookie no longer points here and that the netfs isn't
718 * waiting for us.
719 */
720 spin_lock(&cookie->lock);
721 hlist_del_init(&object->cookie_link);
722 if (hlist_empty(&cookie->backing_objects) &&
723 test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
724 awaken = true;
725 spin_unlock(&cookie->lock);
726
727 if (awaken)
728 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
729 if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags))
730 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
731
732
733 /* Prevent a race with our last child, which has to signal EV_CLEARED
734 * before dropping our spinlock.
735 */
736 spin_lock(&object->lock);
737 spin_unlock(&object->lock);
738
739 /* Discard from the cache's collection of objects */
740 spin_lock(&cache->object_list_lock);
741 list_del_init(&object->cache_link);
742 spin_unlock(&cache->object_list_lock);
743
744 fscache_stat(&fscache_n_cop_drop_object);
745 cache->ops->drop_object(object);
746 fscache_stat_d(&fscache_n_cop_drop_object);
747
748 /* The parent object wants to know when all it dependents have gone */
749 if (parent) {
750 _debug("release parent OBJ%x {%d}",
751 parent->debug_id, parent->n_children);
752
753 spin_lock(&parent->lock);
754 parent->n_children--;
755 if (parent->n_children == 0)
756 fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED);
757 spin_unlock(&parent->lock);
758 object->parent = NULL;
759 }
760
761 /* this just shifts the object release to the work processor */
762 fscache_put_object(object, fscache_obj_put_drop_obj);
763 fscache_stat(&fscache_n_object_dead);
764
765 _leave("");
766 return transit_to(OBJECT_DEAD);
767}
768
769/*
770 * get a ref on an object
771 */
772static int fscache_get_object(struct fscache_object *object,
773 enum fscache_obj_ref_trace why)
774{
775 int ret;
776
777 fscache_stat(&fscache_n_cop_grab_object);
778 ret = object->cache->ops->grab_object(object, why) ? 0 : -EAGAIN;
779 fscache_stat_d(&fscache_n_cop_grab_object);
780 return ret;
781}
782
783/*
784 * Discard a ref on an object
785 */
786static void fscache_put_object(struct fscache_object *object,
787 enum fscache_obj_ref_trace why)
788{
789 fscache_stat(&fscache_n_cop_put_object);
790 object->cache->ops->put_object(object, why);
791 fscache_stat_d(&fscache_n_cop_put_object);
792}
793
794/**
795 * fscache_object_destroy - Note that a cache object is about to be destroyed
796 * @object: The object to be destroyed
797 *
798 * Note the imminent destruction and deallocation of a cache object record.
799 */
800void fscache_object_destroy(struct fscache_object *object)
801{
802 fscache_objlist_remove(object);
803
804 /* We can get rid of the cookie now */
805 fscache_cookie_put(object->cookie, fscache_cookie_put_object);
806 object->cookie = NULL;
807}
808EXPORT_SYMBOL(fscache_object_destroy);
809
810/*
811 * enqueue an object for metadata-type processing
812 */
813void fscache_enqueue_object(struct fscache_object *object)
814{
815 _enter("{OBJ%x}", object->debug_id);
816
817 if (fscache_get_object(object, fscache_obj_get_queue) >= 0) {
818 wait_queue_head_t *cong_wq =
819 &get_cpu_var(fscache_object_cong_wait);
820
821 if (queue_work(fscache_object_wq, &object->work)) {
822 if (fscache_object_congested())
823 wake_up(cong_wq);
824 } else
825 fscache_put_object(object, fscache_obj_put_queue);
826
827 put_cpu_var(fscache_object_cong_wait);
828 }
829}
830
831/**
832 * fscache_object_sleep_till_congested - Sleep until object wq is congested
833 * @timeoutp: Scheduler sleep timeout
834 *
835 * Allow an object handler to sleep until the object workqueue is congested.
836 *
837 * The caller must set up a wake up event before calling this and must have set
838 * the appropriate sleep mode (such as TASK_UNINTERRUPTIBLE) and tested its own
839 * condition before calling this function as no test is made here.
840 *
841 * %true is returned if the object wq is congested, %false otherwise.
842 */
843bool fscache_object_sleep_till_congested(signed long *timeoutp)
844{
845 wait_queue_head_t *cong_wq = this_cpu_ptr(&fscache_object_cong_wait);
846 DEFINE_WAIT(wait);
847
848 if (fscache_object_congested())
849 return true;
850
851 add_wait_queue_exclusive(cong_wq, &wait);
852 if (!fscache_object_congested())
853 *timeoutp = schedule_timeout(*timeoutp);
854 finish_wait(cong_wq, &wait);
855
856 return fscache_object_congested();
857}
858EXPORT_SYMBOL_GPL(fscache_object_sleep_till_congested);
859
860/*
861 * Enqueue the dependents of an object for metadata-type processing.
862 *
863 * If we don't manage to finish the list before the scheduler wants to run
864 * again then return false immediately. We return true if the list was
865 * cleared.
866 */
867static bool fscache_enqueue_dependents(struct fscache_object *object, int event)
868{
869 struct fscache_object *dep;
870 bool ret = true;
871
872 _enter("{OBJ%x}", object->debug_id);
873
874 if (list_empty(&object->dependents))
875 return true;
876
877 spin_lock(&object->lock);
878
879 while (!list_empty(&object->dependents)) {
880 dep = list_entry(object->dependents.next,
881 struct fscache_object, dep_link);
882 list_del_init(&dep->dep_link);
883
884 fscache_raise_event(dep, event);
885 fscache_put_object(dep, fscache_obj_put_enq_dep);
886
887 if (!list_empty(&object->dependents) && need_resched()) {
888 ret = false;
889 break;
890 }
891 }
892
893 spin_unlock(&object->lock);
894 return ret;
895}
896
897/*
898 * remove an object from whatever queue it's waiting on
899 */
900static void fscache_dequeue_object(struct fscache_object *object)
901{
902 _enter("{OBJ%x}", object->debug_id);
903
904 if (!list_empty(&object->dep_link)) {
905 spin_lock(&object->parent->lock);
906 list_del_init(&object->dep_link);
907 spin_unlock(&object->parent->lock);
908 }
909
910 _leave("");
911}
912
913/**
914 * fscache_check_aux - Ask the netfs whether an object on disk is still valid
915 * @object: The object to ask about
916 * @data: The auxiliary data for the object
917 * @datalen: The size of the auxiliary data
918 *
919 * This function consults the netfs about the coherency state of an object.
920 * The caller must be holding a ref on cookie->n_active (held by
921 * fscache_look_up_object() on behalf of the cache backend during object lookup
922 * and creation).
923 */
924enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
925 const void *data, uint16_t datalen,
926 loff_t object_size)
927{
928 enum fscache_checkaux result;
929
930 if (!object->cookie->def->check_aux) {
931 fscache_stat(&fscache_n_checkaux_none);
932 return FSCACHE_CHECKAUX_OKAY;
933 }
934
935 result = object->cookie->def->check_aux(object->cookie->netfs_data,
936 data, datalen, object_size);
937 switch (result) {
938 /* entry okay as is */
939 case FSCACHE_CHECKAUX_OKAY:
940 fscache_stat(&fscache_n_checkaux_okay);
941 break;
942
943 /* entry requires update */
944 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
945 fscache_stat(&fscache_n_checkaux_update);
946 break;
947
948 /* entry requires deletion */
949 case FSCACHE_CHECKAUX_OBSOLETE:
950 fscache_stat(&fscache_n_checkaux_obsolete);
951 break;
952
953 default:
954 BUG();
955 }
956
957 return result;
958}
959EXPORT_SYMBOL(fscache_check_aux);
960
961/*
962 * Asynchronously invalidate an object.
963 */
964static const struct fscache_state *_fscache_invalidate_object(struct fscache_object *object,
965 int event)
966{
967 struct fscache_operation *op;
968 struct fscache_cookie *cookie = object->cookie;
969
970 _enter("{OBJ%x},%d", object->debug_id, event);
971
972 /* We're going to need the cookie. If the cookie is not available then
973 * retire the object instead.
974 */
975 if (!fscache_use_cookie(object)) {
976 ASSERT(radix_tree_empty(&object->cookie->stores));
977 set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
978 _leave(" [no cookie]");
979 return transit_to(KILL_OBJECT);
980 }
981
982 /* Reject any new read/write ops and abort any that are pending. */
983 fscache_invalidate_writes(cookie);
984 clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
985 fscache_cancel_all_ops(object);
986
987 /* Now we have to wait for in-progress reads and writes */
988 op = kzalloc(sizeof(*op), GFP_KERNEL);
989 if (!op)
990 goto nomem;
991
992 fscache_operation_init(cookie, op, object->cache->ops->invalidate_object,
993 NULL, NULL);
994 op->flags = FSCACHE_OP_ASYNC |
995 (1 << FSCACHE_OP_EXCLUSIVE) |
996 (1 << FSCACHE_OP_UNUSE_COOKIE);
997 trace_fscache_page_op(cookie, NULL, op, fscache_page_op_invalidate);
998
999 spin_lock(&cookie->lock);
1000 if (fscache_submit_exclusive_op(object, op) < 0)
1001 goto submit_op_failed;
1002 spin_unlock(&cookie->lock);
1003 fscache_put_operation(op);
1004
1005 /* Once we've completed the invalidation, we know there will be no data
1006 * stored in the cache and thus we can reinstate the data-check-skip
1007 * optimisation.
1008 */
1009 set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
1010
1011 /* We can allow read and write requests to come in once again. They'll
1012 * queue up behind our exclusive invalidation operation.
1013 */
1014 if (test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
1015 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
1016 _leave(" [ok]");
1017 return transit_to(UPDATE_OBJECT);
1018
1019nomem:
1020 fscache_mark_object_dead(object);
1021 fscache_unuse_cookie(object);
1022 _leave(" [ENOMEM]");
1023 return transit_to(KILL_OBJECT);
1024
1025submit_op_failed:
1026 fscache_mark_object_dead(object);
1027 spin_unlock(&cookie->lock);
1028 fscache_unuse_cookie(object);
1029 kfree(op);
1030 _leave(" [EIO]");
1031 return transit_to(KILL_OBJECT);
1032}
1033
1034static const struct fscache_state *fscache_invalidate_object(struct fscache_object *object,
1035 int event)
1036{
1037 const struct fscache_state *s;
1038
1039 fscache_stat(&fscache_n_invalidates_run);
1040 fscache_stat(&fscache_n_cop_invalidate_object);
1041 s = _fscache_invalidate_object(object, event);
1042 fscache_stat_d(&fscache_n_cop_invalidate_object);
1043 return s;
1044}
1045
1046/*
1047 * Update auxiliary data.
1048 */
1049static void fscache_update_aux_data(struct fscache_object *object)
1050{
1051 fscache_stat(&fscache_n_updates_run);
1052 fscache_stat(&fscache_n_cop_update_object);
1053 object->cache->ops->update_object(object);
1054 fscache_stat_d(&fscache_n_cop_update_object);
1055}
1056
1057/*
1058 * Asynchronously update an object.
1059 */
1060static const struct fscache_state *fscache_update_object(struct fscache_object *object,
1061 int event)
1062{
1063 _enter("{OBJ%x},%d", object->debug_id, event);
1064
1065 fscache_update_aux_data(object);
1066
1067 _leave("");
1068 return transit_to(WAIT_FOR_CMD);
1069}
1070
1071/**
1072 * fscache_object_retrying_stale - Note retrying stale object
1073 * @object: The object that will be retried
1074 *
1075 * Note that an object lookup found an on-disk object that was adjudged to be
1076 * stale and has been deleted. The lookup will be retried.
1077 */
1078void fscache_object_retrying_stale(struct fscache_object *object)
1079{
1080 fscache_stat(&fscache_n_cache_no_space_reject);
1081}
1082EXPORT_SYMBOL(fscache_object_retrying_stale);
1083
1084/**
1085 * fscache_object_mark_killed - Note that an object was killed
1086 * @object: The object that was culled
1087 * @why: The reason the object was killed.
1088 *
1089 * Note that an object was killed. Returns true if the object was
1090 * already marked killed, false if it wasn't.
1091 */
1092void fscache_object_mark_killed(struct fscache_object *object,
1093 enum fscache_why_object_killed why)
1094{
1095 if (test_and_set_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->flags)) {
1096 pr_err("Error: Object already killed by cache [%s]\n",
1097 object->cache->identifier);
1098 return;
1099 }
1100
1101 switch (why) {
1102 case FSCACHE_OBJECT_NO_SPACE:
1103 fscache_stat(&fscache_n_cache_no_space_reject);
1104 break;
1105 case FSCACHE_OBJECT_IS_STALE:
1106 fscache_stat(&fscache_n_cache_stale_objects);
1107 break;
1108 case FSCACHE_OBJECT_WAS_RETIRED:
1109 fscache_stat(&fscache_n_cache_retired_objects);
1110 break;
1111 case FSCACHE_OBJECT_WAS_CULLED:
1112 fscache_stat(&fscache_n_cache_culled_objects);
1113 break;
1114 }
1115}
1116EXPORT_SYMBOL(fscache_object_mark_killed);
1117
1118/*
1119 * The object is dead. We can get here if an object gets queued by an event
1120 * that would lead to its death (such as EV_KILL) when the dispatcher is
1121 * already running (and so can be requeued) but hasn't yet cleared the event
1122 * mask.
1123 */
1124static const struct fscache_state *fscache_object_dead(struct fscache_object *object,
1125 int event)
1126{
1127 if (!test_and_set_bit(FSCACHE_OBJECT_RUN_AFTER_DEAD,
1128 &object->flags))
1129 return NO_TRANSIT;
1130
1131 WARN(true, "FS-Cache object redispatched after death");
1132 return NO_TRANSIT;
1133}