Linux Audio

Check our new training course

Loading...
v3.1
 
  1/* FS-Cache worker operation management routines
  2 *
  3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
  4 * Written by David Howells (dhowells@redhat.com)
  5 *
  6 * This program is free software; you can redistribute it and/or
  7 * modify it under the terms of the GNU General Public License
  8 * as published by the Free Software Foundation; either version
  9 * 2 of the License, or (at your option) any later version.
 10 *
 11 * See Documentation/filesystems/caching/operations.txt
 12 */
 13
 14#define FSCACHE_DEBUG_LEVEL OPERATION
 15#include <linux/module.h>
 16#include <linux/seq_file.h>
 17#include <linux/slab.h>
 18#include "internal.h"
 19
 20atomic_t fscache_op_debug_id;
 21EXPORT_SYMBOL(fscache_op_debug_id);
 22
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 23/**
 24 * fscache_enqueue_operation - Enqueue an operation for processing
 25 * @op: The operation to enqueue
 26 *
 27 * Enqueue an operation for processing by the FS-Cache thread pool.
 28 *
 29 * This will get its own ref on the object.
 30 */
 31void fscache_enqueue_operation(struct fscache_operation *op)
 32{
 
 
 33	_enter("{OBJ%x OP%x,%u}",
 34	       op->object->debug_id, op->debug_id, atomic_read(&op->usage));
 35
 36	ASSERT(list_empty(&op->pend_link));
 37	ASSERT(op->processor != NULL);
 38	ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
 39	ASSERTCMP(atomic_read(&op->usage), >, 0);
 
 
 40
 41	fscache_stat(&fscache_n_op_enqueue);
 42	switch (op->flags & FSCACHE_OP_TYPE) {
 43	case FSCACHE_OP_ASYNC:
 
 44		_debug("queue async");
 45		atomic_inc(&op->usage);
 46		if (!queue_work(fscache_op_wq, &op->work))
 47			fscache_put_operation(op);
 48		break;
 49	case FSCACHE_OP_MYTHREAD:
 
 50		_debug("queue for caller's attention");
 51		break;
 52	default:
 53		printk(KERN_ERR "FS-Cache: Unexpected op type %lx",
 54		       op->flags);
 55		BUG();
 56		break;
 57	}
 58}
 59EXPORT_SYMBOL(fscache_enqueue_operation);
 60
 61/*
 62 * start an op running
 63 */
 64static void fscache_run_op(struct fscache_object *object,
 65			   struct fscache_operation *op)
 66{
 
 
 
 67	object->n_in_progress++;
 68	if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
 69		wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
 70	if (op->processor)
 71		fscache_enqueue_operation(op);
 
 
 72	fscache_stat(&fscache_n_op_run);
 73}
 74
 75/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 76 * submit an exclusive operation for an object
 77 * - other ops are excluded from running simultaneously with this one
 78 * - this gets any extra refs it needs on an op
 79 */
 80int fscache_submit_exclusive_op(struct fscache_object *object,
 81				struct fscache_operation *op)
 82{
 
 
 83	int ret;
 84
 85	_enter("{OBJ%x OP%x},", object->debug_id, op->debug_id);
 86
 
 
 
 
 
 87	spin_lock(&object->lock);
 88	ASSERTCMP(object->n_ops, >=, object->n_in_progress);
 89	ASSERTCMP(object->n_ops, >=, object->n_exclusive);
 90	ASSERT(list_empty(&op->pend_link));
 91
 92	ret = -ENOBUFS;
 93	if (fscache_object_is_active(object)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 94		op->object = object;
 95		object->n_ops++;
 96		object->n_exclusive++;	/* reads and writes must wait */
 97
 98		if (object->n_ops > 1) {
 99			atomic_inc(&op->usage);
100			list_add_tail(&op->pend_link, &object->pending_ops);
101			fscache_stat(&fscache_n_op_pend);
102		} else if (!list_empty(&object->pending_ops)) {
103			atomic_inc(&op->usage);
104			list_add_tail(&op->pend_link, &object->pending_ops);
105			fscache_stat(&fscache_n_op_pend);
106			fscache_start_operations(object);
107		} else {
108			ASSERTCMP(object->n_in_progress, ==, 0);
109			fscache_run_op(object, op);
110		}
111
112		/* need to issue a new write op after this */
113		clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
114		ret = 0;
115	} else if (object->state == FSCACHE_OBJECT_CREATING) {
116		op->object = object;
117		object->n_ops++;
118		object->n_exclusive++;	/* reads and writes must wait */
119		atomic_inc(&op->usage);
120		list_add_tail(&op->pend_link, &object->pending_ops);
121		fscache_stat(&fscache_n_op_pend);
122		ret = 0;
 
 
 
 
123	} else {
124		/* not allowed to submit ops in any other state */
125		BUG();
 
 
126	}
127
128	spin_unlock(&object->lock);
129	return ret;
130}
131
132/*
133 * report an unexpected submission
134 */
135static void fscache_report_unexpected_submission(struct fscache_object *object,
136						 struct fscache_operation *op,
137						 unsigned long ostate)
138{
139	static bool once_only;
140	struct fscache_operation *p;
141	unsigned n;
142
143	if (once_only)
144		return;
145	once_only = true;
146
147	kdebug("unexpected submission OP%x [OBJ%x %s]",
148	       op->debug_id, object->debug_id,
149	       fscache_object_states[object->state]);
150	kdebug("objstate=%s [%s]",
151	       fscache_object_states[object->state],
152	       fscache_object_states[ostate]);
153	kdebug("objflags=%lx", object->flags);
154	kdebug("objevent=%lx [%lx]", object->events, object->event_mask);
155	kdebug("ops=%u inp=%u exc=%u",
156	       object->n_ops, object->n_in_progress, object->n_exclusive);
157
158	if (!list_empty(&object->pending_ops)) {
159		n = 0;
160		list_for_each_entry(p, &object->pending_ops, pend_link) {
161			ASSERTCMP(p->object, ==, object);
162			kdebug("%p %p", op->processor, op->release);
163			n++;
164		}
165
166		kdebug("n=%u", n);
167	}
168
169	dump_stack();
170}
171
172/*
173 * submit an operation for an object
174 * - objects may be submitted only in the following states:
175 *   - during object creation (write ops may be submitted)
176 *   - whilst the object is active
177 *   - after an I/O error incurred in one of the two above states (op rejected)
178 * - this gets any extra refs it needs on an op
179 */
180int fscache_submit_op(struct fscache_object *object,
181		      struct fscache_operation *op)
182{
183	unsigned long ostate;
 
184	int ret;
185
186	_enter("{OBJ%x OP%x},{%u}",
187	       object->debug_id, op->debug_id, atomic_read(&op->usage));
188
 
 
 
189	ASSERTCMP(atomic_read(&op->usage), >, 0);
190
191	spin_lock(&object->lock);
192	ASSERTCMP(object->n_ops, >=, object->n_in_progress);
193	ASSERTCMP(object->n_ops, >=, object->n_exclusive);
194	ASSERT(list_empty(&op->pend_link));
195
196	ostate = object->state;
197	smp_rmb();
198
199	if (fscache_object_is_active(object)) {
 
 
 
 
 
 
 
 
 
 
 
200		op->object = object;
201		object->n_ops++;
202
203		if (object->n_exclusive > 0) {
204			atomic_inc(&op->usage);
205			list_add_tail(&op->pend_link, &object->pending_ops);
206			fscache_stat(&fscache_n_op_pend);
207		} else if (!list_empty(&object->pending_ops)) {
208			atomic_inc(&op->usage);
209			list_add_tail(&op->pend_link, &object->pending_ops);
210			fscache_stat(&fscache_n_op_pend);
211			fscache_start_operations(object);
212		} else {
213			ASSERTCMP(object->n_exclusive, ==, 0);
214			fscache_run_op(object, op);
215		}
216		ret = 0;
217	} else if (object->state == FSCACHE_OBJECT_CREATING) {
218		op->object = object;
219		object->n_ops++;
220		atomic_inc(&op->usage);
221		list_add_tail(&op->pend_link, &object->pending_ops);
222		fscache_stat(&fscache_n_op_pend);
223		ret = 0;
224	} else if (object->state == FSCACHE_OBJECT_DYING ||
225		   object->state == FSCACHE_OBJECT_LC_DYING ||
226		   object->state == FSCACHE_OBJECT_WITHDRAWING) {
227		fscache_stat(&fscache_n_op_rejected);
228		ret = -ENOBUFS;
229	} else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
230		fscache_report_unexpected_submission(object, op, ostate);
231		ASSERT(!fscache_object_is_active(object));
232		ret = -ENOBUFS;
233	} else {
234		ret = -ENOBUFS;
235	}
236
237	spin_unlock(&object->lock);
238	return ret;
239}
240
241/*
242 * queue an object for withdrawal on error, aborting all following asynchronous
243 * operations
244 */
245void fscache_abort_object(struct fscache_object *object)
246{
247	_enter("{OBJ%x}", object->debug_id);
248
249	fscache_raise_event(object, FSCACHE_OBJECT_EV_ERROR);
250}
251
252/*
253 * jump start the operation processing on an object
254 * - caller must hold object->lock
255 */
256void fscache_start_operations(struct fscache_object *object)
257{
258	struct fscache_operation *op;
259	bool stop = false;
260
261	while (!list_empty(&object->pending_ops) && !stop) {
262		op = list_entry(object->pending_ops.next,
263				struct fscache_operation, pend_link);
264
265		if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) {
266			if (object->n_in_progress > 0)
267				break;
268			stop = true;
269		}
270		list_del_init(&op->pend_link);
271		fscache_run_op(object, op);
272
273		/* the pending queue was holding a ref on the object */
274		fscache_put_operation(op);
275	}
276
277	ASSERTCMP(object->n_in_progress, <=, object->n_ops);
278
279	_debug("woke %d ops on OBJ%x",
280	       object->n_in_progress, object->debug_id);
281}
282
283/*
284 * cancel an operation that's pending on an object
285 */
286int fscache_cancel_op(struct fscache_operation *op)
 
287{
288	struct fscache_object *object = op->object;
 
289	int ret;
290
291	_enter("OBJ%x OP%x}", op->object->debug_id, op->debug_id);
292
 
 
 
 
 
 
293	spin_lock(&object->lock);
294
295	ret = -EBUSY;
296	if (!list_empty(&op->pend_link)) {
297		fscache_stat(&fscache_n_op_cancelled);
298		list_del_init(&op->pend_link);
299		object->n_ops--;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
300		if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
301			object->n_exclusive--;
302		if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
303			wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
304		fscache_put_operation(op);
305		ret = 0;
306	}
307
 
 
308	spin_unlock(&object->lock);
309	_leave(" = %d", ret);
310	return ret;
311}
312
313/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
314 * release an operation
315 * - queues pending ops if this is the last in-progress op
316 */
317void fscache_put_operation(struct fscache_operation *op)
318{
319	struct fscache_object *object;
320	struct fscache_cache *cache;
321
322	_enter("{OBJ%x OP%x,%d}",
323	       op->object->debug_id, op->debug_id, atomic_read(&op->usage));
 
324
325	ASSERTCMP(atomic_read(&op->usage), >, 0);
326
327	if (!atomic_dec_and_test(&op->usage))
328		return;
329
 
 
330	_debug("PUT OP");
331	if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
332		BUG();
 
333
334	fscache_stat(&fscache_n_op_release);
335
336	if (op->release) {
337		op->release(op);
338		op->release = NULL;
339	}
 
340
341	object = op->object;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
342
343	if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags))
344		atomic_dec(&object->n_reads);
345
346	/* now... we may get called with the object spinlock held, so we
347	 * complete the cleanup here only if we can immediately acquire the
348	 * lock, and defer it otherwise */
349	if (!spin_trylock(&object->lock)) {
350		_debug("defer put");
351		fscache_stat(&fscache_n_op_deferred_release);
352
353		cache = object->cache;
354		spin_lock(&cache->op_gc_list_lock);
355		list_add_tail(&op->pend_link, &cache->op_gc_list);
356		spin_unlock(&cache->op_gc_list_lock);
357		schedule_work(&cache->op_gc);
358		_leave(" [defer]");
359		return;
360	}
361
362	if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) {
363		ASSERTCMP(object->n_exclusive, >, 0);
364		object->n_exclusive--;
365	}
366
367	ASSERTCMP(object->n_in_progress, >, 0);
368	object->n_in_progress--;
369	if (object->n_in_progress == 0)
370		fscache_start_operations(object);
371
372	ASSERTCMP(object->n_ops, >, 0);
373	object->n_ops--;
374	if (object->n_ops == 0)
375		fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
376
377	spin_unlock(&object->lock);
378
379	kfree(op);
380	_leave(" [done]");
381}
382EXPORT_SYMBOL(fscache_put_operation);
383
384/*
385 * garbage collect operations that have had their release deferred
386 */
387void fscache_operation_gc(struct work_struct *work)
388{
389	struct fscache_operation *op;
390	struct fscache_object *object;
391	struct fscache_cache *cache =
392		container_of(work, struct fscache_cache, op_gc);
393	int count = 0;
394
395	_enter("");
396
397	do {
398		spin_lock(&cache->op_gc_list_lock);
399		if (list_empty(&cache->op_gc_list)) {
400			spin_unlock(&cache->op_gc_list_lock);
401			break;
402		}
403
404		op = list_entry(cache->op_gc_list.next,
405				struct fscache_operation, pend_link);
406		list_del(&op->pend_link);
407		spin_unlock(&cache->op_gc_list_lock);
408
409		object = op->object;
 
 
 
410
411		_debug("GC DEFERRED REL OBJ%x OP%x",
412		       object->debug_id, op->debug_id);
413		fscache_stat(&fscache_n_op_gc);
414
415		ASSERTCMP(atomic_read(&op->usage), ==, 0);
416
417		spin_lock(&object->lock);
418		if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) {
419			ASSERTCMP(object->n_exclusive, >, 0);
420			object->n_exclusive--;
421		}
422
423		ASSERTCMP(object->n_in_progress, >, 0);
424		object->n_in_progress--;
425		if (object->n_in_progress == 0)
426			fscache_start_operations(object);
427
428		ASSERTCMP(object->n_ops, >, 0);
429		object->n_ops--;
430		if (object->n_ops == 0)
431			fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
432
433		spin_unlock(&object->lock);
 
434
435	} while (count++ < 20);
436
437	if (!list_empty(&cache->op_gc_list))
438		schedule_work(&cache->op_gc);
439
440	_leave("");
441}
442
443/*
444 * execute an operation using fs_op_wq to provide processing context -
445 * the caller holds a ref to this object, so we don't need to hold one
446 */
447void fscache_op_work_func(struct work_struct *work)
448{
449	struct fscache_operation *op =
450		container_of(work, struct fscache_operation, work);
451	unsigned long start;
452
453	_enter("{OBJ%x OP%x,%d}",
454	       op->object->debug_id, op->debug_id, atomic_read(&op->usage));
 
 
455
456	ASSERT(op->processor != NULL);
457	start = jiffies;
458	op->processor(op);
459	fscache_hist(fscache_ops_histogram, start);
460	fscache_put_operation(op);
461
462	_leave("");
463}
v5.9
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/* FS-Cache worker operation management routines
  3 *
  4 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
  5 * Written by David Howells (dhowells@redhat.com)
  6 *
  7 * See Documentation/filesystems/caching/operations.rst
 
 
 
 
 
  8 */
  9
 10#define FSCACHE_DEBUG_LEVEL OPERATION
 11#include <linux/module.h>
 12#include <linux/seq_file.h>
 13#include <linux/slab.h>
 14#include "internal.h"
 15
 16atomic_t fscache_op_debug_id;
 17EXPORT_SYMBOL(fscache_op_debug_id);
 18
 19static void fscache_operation_dummy_cancel(struct fscache_operation *op)
 20{
 21}
 22
 23/**
 24 * fscache_operation_init - Do basic initialisation of an operation
 25 * @op: The operation to initialise
 26 * @release: The release function to assign
 27 *
 28 * Do basic initialisation of an operation.  The caller must still set flags,
 29 * object and processor if needed.
 30 */
 31void fscache_operation_init(struct fscache_cookie *cookie,
 32			    struct fscache_operation *op,
 33			    fscache_operation_processor_t processor,
 34			    fscache_operation_cancel_t cancel,
 35			    fscache_operation_release_t release)
 36{
 37	INIT_WORK(&op->work, fscache_op_work_func);
 38	atomic_set(&op->usage, 1);
 39	op->state = FSCACHE_OP_ST_INITIALISED;
 40	op->debug_id = atomic_inc_return(&fscache_op_debug_id);
 41	op->processor = processor;
 42	op->cancel = cancel ?: fscache_operation_dummy_cancel;
 43	op->release = release;
 44	INIT_LIST_HEAD(&op->pend_link);
 45	fscache_stat(&fscache_n_op_initialised);
 46	trace_fscache_op(cookie, op, fscache_op_init);
 47}
 48EXPORT_SYMBOL(fscache_operation_init);
 49
 50/**
 51 * fscache_enqueue_operation - Enqueue an operation for processing
 52 * @op: The operation to enqueue
 53 *
 54 * Enqueue an operation for processing by the FS-Cache thread pool.
 55 *
 56 * This will get its own ref on the object.
 57 */
 58void fscache_enqueue_operation(struct fscache_operation *op)
 59{
 60	struct fscache_cookie *cookie = op->object->cookie;
 61	
 62	_enter("{OBJ%x OP%x,%u}",
 63	       op->object->debug_id, op->debug_id, atomic_read(&op->usage));
 64
 65	ASSERT(list_empty(&op->pend_link));
 66	ASSERT(op->processor != NULL);
 67	ASSERT(fscache_object_is_available(op->object));
 68	ASSERTCMP(atomic_read(&op->usage), >, 0);
 69	ASSERTIFCMP(op->state != FSCACHE_OP_ST_IN_PROGRESS,
 70		    op->state, ==,  FSCACHE_OP_ST_CANCELLED);
 71
 72	fscache_stat(&fscache_n_op_enqueue);
 73	switch (op->flags & FSCACHE_OP_TYPE) {
 74	case FSCACHE_OP_ASYNC:
 75		trace_fscache_op(cookie, op, fscache_op_enqueue_async);
 76		_debug("queue async");
 77		atomic_inc(&op->usage);
 78		if (!queue_work(fscache_op_wq, &op->work))
 79			fscache_put_operation(op);
 80		break;
 81	case FSCACHE_OP_MYTHREAD:
 82		trace_fscache_op(cookie, op, fscache_op_enqueue_mythread);
 83		_debug("queue for caller's attention");
 84		break;
 85	default:
 86		pr_err("Unexpected op type %lx", op->flags);
 
 87		BUG();
 88		break;
 89	}
 90}
 91EXPORT_SYMBOL(fscache_enqueue_operation);
 92
 93/*
 94 * start an op running
 95 */
 96static void fscache_run_op(struct fscache_object *object,
 97			   struct fscache_operation *op)
 98{
 99	ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
100
101	op->state = FSCACHE_OP_ST_IN_PROGRESS;
102	object->n_in_progress++;
103	if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
104		wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
105	if (op->processor)
106		fscache_enqueue_operation(op);
107	else
108		trace_fscache_op(object->cookie, op, fscache_op_run);
109	fscache_stat(&fscache_n_op_run);
110}
111
112/*
113 * report an unexpected submission
114 */
115static void fscache_report_unexpected_submission(struct fscache_object *object,
116						 struct fscache_operation *op,
117						 const struct fscache_state *ostate)
118{
119	static bool once_only;
120	struct fscache_operation *p;
121	unsigned n;
122
123	if (once_only)
124		return;
125	once_only = true;
126
127	kdebug("unexpected submission OP%x [OBJ%x %s]",
128	       op->debug_id, object->debug_id, object->state->name);
129	kdebug("objstate=%s [%s]", object->state->name, ostate->name);
130	kdebug("objflags=%lx", object->flags);
131	kdebug("objevent=%lx [%lx]", object->events, object->event_mask);
132	kdebug("ops=%u inp=%u exc=%u",
133	       object->n_ops, object->n_in_progress, object->n_exclusive);
134
135	if (!list_empty(&object->pending_ops)) {
136		n = 0;
137		list_for_each_entry(p, &object->pending_ops, pend_link) {
138			ASSERTCMP(p->object, ==, object);
139			kdebug("%p %p", op->processor, op->release);
140			n++;
141		}
142
143		kdebug("n=%u", n);
144	}
145
146	dump_stack();
147}
148
149/*
150 * submit an exclusive operation for an object
151 * - other ops are excluded from running simultaneously with this one
152 * - this gets any extra refs it needs on an op
153 */
154int fscache_submit_exclusive_op(struct fscache_object *object,
155				struct fscache_operation *op)
156{
157	const struct fscache_state *ostate;
158	unsigned long flags;
159	int ret;
160
161	_enter("{OBJ%x OP%x},", object->debug_id, op->debug_id);
162
163	trace_fscache_op(object->cookie, op, fscache_op_submit_ex);
164
165	ASSERTCMP(op->state, ==, FSCACHE_OP_ST_INITIALISED);
166	ASSERTCMP(atomic_read(&op->usage), >, 0);
167
168	spin_lock(&object->lock);
169	ASSERTCMP(object->n_ops, >=, object->n_in_progress);
170	ASSERTCMP(object->n_ops, >=, object->n_exclusive);
171	ASSERT(list_empty(&op->pend_link));
172
173	ostate = object->state;
174	smp_rmb();
175
176	op->state = FSCACHE_OP_ST_PENDING;
177	flags = READ_ONCE(object->flags);
178	if (unlikely(!(flags & BIT(FSCACHE_OBJECT_IS_LIVE)))) {
179		fscache_stat(&fscache_n_op_rejected);
180		op->cancel(op);
181		op->state = FSCACHE_OP_ST_CANCELLED;
182		ret = -ENOBUFS;
183	} else if (unlikely(fscache_cache_is_broken(object))) {
184		op->cancel(op);
185		op->state = FSCACHE_OP_ST_CANCELLED;
186		ret = -EIO;
187	} else if (flags & BIT(FSCACHE_OBJECT_IS_AVAILABLE)) {
188		op->object = object;
189		object->n_ops++;
190		object->n_exclusive++;	/* reads and writes must wait */
191
192		if (object->n_in_progress > 0) {
193			atomic_inc(&op->usage);
194			list_add_tail(&op->pend_link, &object->pending_ops);
195			fscache_stat(&fscache_n_op_pend);
196		} else if (!list_empty(&object->pending_ops)) {
197			atomic_inc(&op->usage);
198			list_add_tail(&op->pend_link, &object->pending_ops);
199			fscache_stat(&fscache_n_op_pend);
200			fscache_start_operations(object);
201		} else {
202			ASSERTCMP(object->n_in_progress, ==, 0);
203			fscache_run_op(object, op);
204		}
205
206		/* need to issue a new write op after this */
207		clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
208		ret = 0;
209	} else if (flags & BIT(FSCACHE_OBJECT_IS_LOOKED_UP)) {
210		op->object = object;
211		object->n_ops++;
212		object->n_exclusive++;	/* reads and writes must wait */
213		atomic_inc(&op->usage);
214		list_add_tail(&op->pend_link, &object->pending_ops);
215		fscache_stat(&fscache_n_op_pend);
216		ret = 0;
217	} else if (flags & BIT(FSCACHE_OBJECT_KILLED_BY_CACHE)) {
218		op->cancel(op);
219		op->state = FSCACHE_OP_ST_CANCELLED;
220		ret = -ENOBUFS;
221	} else {
222		fscache_report_unexpected_submission(object, op, ostate);
223		op->cancel(op);
224		op->state = FSCACHE_OP_ST_CANCELLED;
225		ret = -ENOBUFS;
226	}
227
228	spin_unlock(&object->lock);
229	return ret;
230}
231
232/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
233 * submit an operation for an object
234 * - objects may be submitted only in the following states:
235 *   - during object creation (write ops may be submitted)
236 *   - whilst the object is active
237 *   - after an I/O error incurred in one of the two above states (op rejected)
238 * - this gets any extra refs it needs on an op
239 */
240int fscache_submit_op(struct fscache_object *object,
241		      struct fscache_operation *op)
242{
243	const struct fscache_state *ostate;
244	unsigned long flags;
245	int ret;
246
247	_enter("{OBJ%x OP%x},{%u}",
248	       object->debug_id, op->debug_id, atomic_read(&op->usage));
249
250	trace_fscache_op(object->cookie, op, fscache_op_submit);
251
252	ASSERTCMP(op->state, ==, FSCACHE_OP_ST_INITIALISED);
253	ASSERTCMP(atomic_read(&op->usage), >, 0);
254
255	spin_lock(&object->lock);
256	ASSERTCMP(object->n_ops, >=, object->n_in_progress);
257	ASSERTCMP(object->n_ops, >=, object->n_exclusive);
258	ASSERT(list_empty(&op->pend_link));
259
260	ostate = object->state;
261	smp_rmb();
262
263	op->state = FSCACHE_OP_ST_PENDING;
264	flags = READ_ONCE(object->flags);
265	if (unlikely(!(flags & BIT(FSCACHE_OBJECT_IS_LIVE)))) {
266		fscache_stat(&fscache_n_op_rejected);
267		op->cancel(op);
268		op->state = FSCACHE_OP_ST_CANCELLED;
269		ret = -ENOBUFS;
270	} else if (unlikely(fscache_cache_is_broken(object))) {
271		op->cancel(op);
272		op->state = FSCACHE_OP_ST_CANCELLED;
273		ret = -EIO;
274	} else if (flags & BIT(FSCACHE_OBJECT_IS_AVAILABLE)) {
275		op->object = object;
276		object->n_ops++;
277
278		if (object->n_exclusive > 0) {
279			atomic_inc(&op->usage);
280			list_add_tail(&op->pend_link, &object->pending_ops);
281			fscache_stat(&fscache_n_op_pend);
282		} else if (!list_empty(&object->pending_ops)) {
283			atomic_inc(&op->usage);
284			list_add_tail(&op->pend_link, &object->pending_ops);
285			fscache_stat(&fscache_n_op_pend);
286			fscache_start_operations(object);
287		} else {
288			ASSERTCMP(object->n_exclusive, ==, 0);
289			fscache_run_op(object, op);
290		}
291		ret = 0;
292	} else if (flags & BIT(FSCACHE_OBJECT_IS_LOOKED_UP)) {
293		op->object = object;
294		object->n_ops++;
295		atomic_inc(&op->usage);
296		list_add_tail(&op->pend_link, &object->pending_ops);
297		fscache_stat(&fscache_n_op_pend);
298		ret = 0;
299	} else if (flags & BIT(FSCACHE_OBJECT_KILLED_BY_CACHE)) {
300		op->cancel(op);
301		op->state = FSCACHE_OP_ST_CANCELLED;
 
302		ret = -ENOBUFS;
303	} else {
304		fscache_report_unexpected_submission(object, op, ostate);
305		ASSERT(!fscache_object_is_active(object));
306		op->cancel(op);
307		op->state = FSCACHE_OP_ST_CANCELLED;
308		ret = -ENOBUFS;
309	}
310
311	spin_unlock(&object->lock);
312	return ret;
313}
314
315/*
316 * queue an object for withdrawal on error, aborting all following asynchronous
317 * operations
318 */
319void fscache_abort_object(struct fscache_object *object)
320{
321	_enter("{OBJ%x}", object->debug_id);
322
323	fscache_raise_event(object, FSCACHE_OBJECT_EV_ERROR);
324}
325
326/*
327 * Jump start the operation processing on an object.  The caller must hold
328 * object->lock.
329 */
330void fscache_start_operations(struct fscache_object *object)
331{
332	struct fscache_operation *op;
333	bool stop = false;
334
335	while (!list_empty(&object->pending_ops) && !stop) {
336		op = list_entry(object->pending_ops.next,
337				struct fscache_operation, pend_link);
338
339		if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) {
340			if (object->n_in_progress > 0)
341				break;
342			stop = true;
343		}
344		list_del_init(&op->pend_link);
345		fscache_run_op(object, op);
346
347		/* the pending queue was holding a ref on the object */
348		fscache_put_operation(op);
349	}
350
351	ASSERTCMP(object->n_in_progress, <=, object->n_ops);
352
353	_debug("woke %d ops on OBJ%x",
354	       object->n_in_progress, object->debug_id);
355}
356
357/*
358 * cancel an operation that's pending on an object
359 */
360int fscache_cancel_op(struct fscache_operation *op,
361		      bool cancel_in_progress_op)
362{
363	struct fscache_object *object = op->object;
364	bool put = false;
365	int ret;
366
367	_enter("OBJ%x OP%x}", op->object->debug_id, op->debug_id);
368
369	trace_fscache_op(object->cookie, op, fscache_op_cancel);
370
371	ASSERTCMP(op->state, >=, FSCACHE_OP_ST_PENDING);
372	ASSERTCMP(op->state, !=, FSCACHE_OP_ST_CANCELLED);
373	ASSERTCMP(atomic_read(&op->usage), >, 0);
374
375	spin_lock(&object->lock);
376
377	ret = -EBUSY;
378	if (op->state == FSCACHE_OP_ST_PENDING) {
379		ASSERT(!list_empty(&op->pend_link));
380		list_del_init(&op->pend_link);
381		put = true;
382
383		fscache_stat(&fscache_n_op_cancelled);
384		op->cancel(op);
385		op->state = FSCACHE_OP_ST_CANCELLED;
386		if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
387			object->n_exclusive--;
388		if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
389			wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
390		ret = 0;
391	} else if (op->state == FSCACHE_OP_ST_IN_PROGRESS && cancel_in_progress_op) {
392		ASSERTCMP(object->n_in_progress, >, 0);
393		if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
394			object->n_exclusive--;
395		object->n_in_progress--;
396		if (object->n_in_progress == 0)
397			fscache_start_operations(object);
398
399		fscache_stat(&fscache_n_op_cancelled);
400		op->cancel(op);
401		op->state = FSCACHE_OP_ST_CANCELLED;
402		if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
403			object->n_exclusive--;
404		if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
405			wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
 
406		ret = 0;
407	}
408
409	if (put)
410		fscache_put_operation(op);
411	spin_unlock(&object->lock);
412	_leave(" = %d", ret);
413	return ret;
414}
415
416/*
417 * Cancel all pending operations on an object
418 */
419void fscache_cancel_all_ops(struct fscache_object *object)
420{
421	struct fscache_operation *op;
422
423	_enter("OBJ%x", object->debug_id);
424
425	spin_lock(&object->lock);
426
427	while (!list_empty(&object->pending_ops)) {
428		op = list_entry(object->pending_ops.next,
429				struct fscache_operation, pend_link);
430		fscache_stat(&fscache_n_op_cancelled);
431		list_del_init(&op->pend_link);
432
433		trace_fscache_op(object->cookie, op, fscache_op_cancel_all);
434
435		ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
436		op->cancel(op);
437		op->state = FSCACHE_OP_ST_CANCELLED;
438
439		if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
440			object->n_exclusive--;
441		if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
442			wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
443		fscache_put_operation(op);
444		cond_resched_lock(&object->lock);
445	}
446
447	spin_unlock(&object->lock);
448	_leave("");
449}
450
451/*
452 * Record the completion or cancellation of an in-progress operation.
453 */
454void fscache_op_complete(struct fscache_operation *op, bool cancelled)
455{
456	struct fscache_object *object = op->object;
457
458	_enter("OBJ%x", object->debug_id);
459
460	ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
461	ASSERTCMP(object->n_in_progress, >, 0);
462	ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags),
463		    object->n_exclusive, >, 0);
464	ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags),
465		    object->n_in_progress, ==, 1);
466
467	spin_lock(&object->lock);
468
469	if (!cancelled) {
470		trace_fscache_op(object->cookie, op, fscache_op_completed);
471		op->state = FSCACHE_OP_ST_COMPLETE;
472	} else {
473		op->cancel(op);
474		trace_fscache_op(object->cookie, op, fscache_op_cancelled);
475		op->state = FSCACHE_OP_ST_CANCELLED;
476	}
477
478	if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
479		object->n_exclusive--;
480	object->n_in_progress--;
481	if (object->n_in_progress == 0)
482		fscache_start_operations(object);
483
484	spin_unlock(&object->lock);
485	_leave("");
486}
487EXPORT_SYMBOL(fscache_op_complete);
488
489/*
490 * release an operation
491 * - queues pending ops if this is the last in-progress op
492 */
493void fscache_put_operation(struct fscache_operation *op)
494{
495	struct fscache_object *object;
496	struct fscache_cache *cache;
497
498	_enter("{OBJ%x OP%x,%d}",
499	       op->object ? op->object->debug_id : 0,
500	       op->debug_id, atomic_read(&op->usage));
501
502	ASSERTCMP(atomic_read(&op->usage), >, 0);
503
504	if (!atomic_dec_and_test(&op->usage))
505		return;
506
507	trace_fscache_op(op->object ? op->object->cookie : NULL, op, fscache_op_put);
508
509	_debug("PUT OP");
510	ASSERTIFCMP(op->state != FSCACHE_OP_ST_INITIALISED &&
511		    op->state != FSCACHE_OP_ST_COMPLETE,
512		    op->state, ==, FSCACHE_OP_ST_CANCELLED);
513
514	fscache_stat(&fscache_n_op_release);
515
516	if (op->release) {
517		op->release(op);
518		op->release = NULL;
519	}
520	op->state = FSCACHE_OP_ST_DEAD;
521
522	object = op->object;
523	if (likely(object)) {
524		if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags))
525			atomic_dec(&object->n_reads);
526		if (test_bit(FSCACHE_OP_UNUSE_COOKIE, &op->flags))
527			fscache_unuse_cookie(object);
528
529		/* now... we may get called with the object spinlock held, so we
530		 * complete the cleanup here only if we can immediately acquire the
531		 * lock, and defer it otherwise */
532		if (!spin_trylock(&object->lock)) {
533			_debug("defer put");
534			fscache_stat(&fscache_n_op_deferred_release);
535
536			cache = object->cache;
537			spin_lock(&cache->op_gc_list_lock);
538			list_add_tail(&op->pend_link, &cache->op_gc_list);
539			spin_unlock(&cache->op_gc_list_lock);
540			schedule_work(&cache->op_gc);
541			_leave(" [defer]");
542			return;
543		}
544
545		ASSERTCMP(object->n_ops, >, 0);
546		object->n_ops--;
547		if (object->n_ops == 0)
548			fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
549
550		spin_unlock(&object->lock);
 
 
551	}
552
 
 
 
 
 
 
 
 
 
 
 
 
553	kfree(op);
554	_leave(" [done]");
555}
556EXPORT_SYMBOL(fscache_put_operation);
557
558/*
559 * garbage collect operations that have had their release deferred
560 */
561void fscache_operation_gc(struct work_struct *work)
562{
563	struct fscache_operation *op;
564	struct fscache_object *object;
565	struct fscache_cache *cache =
566		container_of(work, struct fscache_cache, op_gc);
567	int count = 0;
568
569	_enter("");
570
571	do {
572		spin_lock(&cache->op_gc_list_lock);
573		if (list_empty(&cache->op_gc_list)) {
574			spin_unlock(&cache->op_gc_list_lock);
575			break;
576		}
577
578		op = list_entry(cache->op_gc_list.next,
579				struct fscache_operation, pend_link);
580		list_del(&op->pend_link);
581		spin_unlock(&cache->op_gc_list_lock);
582
583		object = op->object;
584		trace_fscache_op(object->cookie, op, fscache_op_gc);
585
586		spin_lock(&object->lock);
587
588		_debug("GC DEFERRED REL OBJ%x OP%x",
589		       object->debug_id, op->debug_id);
590		fscache_stat(&fscache_n_op_gc);
591
592		ASSERTCMP(atomic_read(&op->usage), ==, 0);
593		ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
 
 
 
 
 
 
 
 
 
 
594
595		ASSERTCMP(object->n_ops, >, 0);
596		object->n_ops--;
597		if (object->n_ops == 0)
598			fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
599
600		spin_unlock(&object->lock);
601		kfree(op);
602
603	} while (count++ < 20);
604
605	if (!list_empty(&cache->op_gc_list))
606		schedule_work(&cache->op_gc);
607
608	_leave("");
609}
610
611/*
612 * execute an operation using fs_op_wq to provide processing context -
613 * the caller holds a ref to this object, so we don't need to hold one
614 */
615void fscache_op_work_func(struct work_struct *work)
616{
617	struct fscache_operation *op =
618		container_of(work, struct fscache_operation, work);
619	unsigned long start;
620
621	_enter("{OBJ%x OP%x,%d}",
622	       op->object->debug_id, op->debug_id, atomic_read(&op->usage));
623
624	trace_fscache_op(op->object->cookie, op, fscache_op_work);
625
626	ASSERT(op->processor != NULL);
627	start = jiffies;
628	op->processor(op);
629	fscache_hist(fscache_ops_histogram, start);
630	fscache_put_operation(op);
631
632	_leave("");
633}