Linux Audio

Check our new training course

Loading...
v4.6
  1/* FS-Cache worker operation management routines
  2 *
  3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
  4 * Written by David Howells (dhowells@redhat.com)
  5 *
  6 * This program is free software; you can redistribute it and/or
  7 * modify it under the terms of the GNU General Public License
  8 * as published by the Free Software Foundation; either version
  9 * 2 of the License, or (at your option) any later version.
 10 *
 11 * See Documentation/filesystems/caching/operations.txt
 12 */
 13
 14#define FSCACHE_DEBUG_LEVEL OPERATION
 15#include <linux/module.h>
 16#include <linux/seq_file.h>
 17#include <linux/slab.h>
 18#include "internal.h"
 19
 20atomic_t fscache_op_debug_id;
 21EXPORT_SYMBOL(fscache_op_debug_id);
 22
 23static void fscache_operation_dummy_cancel(struct fscache_operation *op)
 24{
 25}
 26
 27/**
 28 * fscache_operation_init - Do basic initialisation of an operation
 29 * @op: The operation to initialise
 30 * @release: The release function to assign
 31 *
 32 * Do basic initialisation of an operation.  The caller must still set flags,
 33 * object and processor if needed.
 34 */
 35void fscache_operation_init(struct fscache_operation *op,
 36			    fscache_operation_processor_t processor,
 37			    fscache_operation_cancel_t cancel,
 38			    fscache_operation_release_t release)
 39{
 40	INIT_WORK(&op->work, fscache_op_work_func);
 41	atomic_set(&op->usage, 1);
 42	op->state = FSCACHE_OP_ST_INITIALISED;
 43	op->debug_id = atomic_inc_return(&fscache_op_debug_id);
 44	op->processor = processor;
 45	op->cancel = cancel ?: fscache_operation_dummy_cancel;
 46	op->release = release;
 47	INIT_LIST_HEAD(&op->pend_link);
 48	fscache_stat(&fscache_n_op_initialised);
 49}
 50EXPORT_SYMBOL(fscache_operation_init);
 51
 52/**
 53 * fscache_enqueue_operation - Enqueue an operation for processing
 54 * @op: The operation to enqueue
 55 *
 56 * Enqueue an operation for processing by the FS-Cache thread pool.
 57 *
 58 * This will get its own ref on the object.
 59 */
 60void fscache_enqueue_operation(struct fscache_operation *op)
 61{
 62	_enter("{OBJ%x OP%x,%u}",
 63	       op->object->debug_id, op->debug_id, atomic_read(&op->usage));
 64
 65	ASSERT(list_empty(&op->pend_link));
 66	ASSERT(op->processor != NULL);
 67	ASSERT(fscache_object_is_available(op->object));
 68	ASSERTCMP(atomic_read(&op->usage), >, 0);
 69	ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
 70
 71	fscache_stat(&fscache_n_op_enqueue);
 72	switch (op->flags & FSCACHE_OP_TYPE) {
 73	case FSCACHE_OP_ASYNC:
 74		_debug("queue async");
 75		atomic_inc(&op->usage);
 76		if (!queue_work(fscache_op_wq, &op->work))
 77			fscache_put_operation(op);
 78		break;
 79	case FSCACHE_OP_MYTHREAD:
 80		_debug("queue for caller's attention");
 81		break;
 82	default:
 83		pr_err("Unexpected op type %lx", op->flags);
 
 84		BUG();
 85		break;
 86	}
 87}
 88EXPORT_SYMBOL(fscache_enqueue_operation);
 89
 90/*
 91 * start an op running
 92 */
 93static void fscache_run_op(struct fscache_object *object,
 94			   struct fscache_operation *op)
 95{
 96	ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
 97
 98	op->state = FSCACHE_OP_ST_IN_PROGRESS;
 99	object->n_in_progress++;
100	if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
101		wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
102	if (op->processor)
103		fscache_enqueue_operation(op);
104	fscache_stat(&fscache_n_op_run);
105}
106
107/*
108 * report an unexpected submission
109 */
110static void fscache_report_unexpected_submission(struct fscache_object *object,
111						 struct fscache_operation *op,
112						 const struct fscache_state *ostate)
113{
114	static bool once_only;
115	struct fscache_operation *p;
116	unsigned n;
117
118	if (once_only)
119		return;
120	once_only = true;
121
122	kdebug("unexpected submission OP%x [OBJ%x %s]",
123	       op->debug_id, object->debug_id, object->state->name);
124	kdebug("objstate=%s [%s]", object->state->name, ostate->name);
125	kdebug("objflags=%lx", object->flags);
126	kdebug("objevent=%lx [%lx]", object->events, object->event_mask);
127	kdebug("ops=%u inp=%u exc=%u",
128	       object->n_ops, object->n_in_progress, object->n_exclusive);
129
130	if (!list_empty(&object->pending_ops)) {
131		n = 0;
132		list_for_each_entry(p, &object->pending_ops, pend_link) {
133			ASSERTCMP(p->object, ==, object);
134			kdebug("%p %p", op->processor, op->release);
135			n++;
136		}
137
138		kdebug("n=%u", n);
139	}
140
141	dump_stack();
142}
143
144/*
145 * submit an exclusive operation for an object
146 * - other ops are excluded from running simultaneously with this one
147 * - this gets any extra refs it needs on an op
148 */
149int fscache_submit_exclusive_op(struct fscache_object *object,
150				struct fscache_operation *op)
151{
152	const struct fscache_state *ostate;
153	unsigned long flags;
154	int ret;
155
156	_enter("{OBJ%x OP%x},", object->debug_id, op->debug_id);
157
158	ASSERTCMP(op->state, ==, FSCACHE_OP_ST_INITIALISED);
159	ASSERTCMP(atomic_read(&op->usage), >, 0);
160
161	spin_lock(&object->lock);
162	ASSERTCMP(object->n_ops, >=, object->n_in_progress);
163	ASSERTCMP(object->n_ops, >=, object->n_exclusive);
164	ASSERT(list_empty(&op->pend_link));
165
166	ostate = object->state;
167	smp_rmb();
168
169	op->state = FSCACHE_OP_ST_PENDING;
170	flags = READ_ONCE(object->flags);
171	if (unlikely(!(flags & BIT(FSCACHE_OBJECT_IS_LIVE)))) {
172		fscache_stat(&fscache_n_op_rejected);
173		op->cancel(op);
174		op->state = FSCACHE_OP_ST_CANCELLED;
175		ret = -ENOBUFS;
176	} else if (unlikely(fscache_cache_is_broken(object))) {
177		op->cancel(op);
178		op->state = FSCACHE_OP_ST_CANCELLED;
179		ret = -EIO;
180	} else if (flags & BIT(FSCACHE_OBJECT_IS_AVAILABLE)) {
181		op->object = object;
182		object->n_ops++;
183		object->n_exclusive++;	/* reads and writes must wait */
184
185		if (object->n_in_progress > 0) {
186			atomic_inc(&op->usage);
187			list_add_tail(&op->pend_link, &object->pending_ops);
188			fscache_stat(&fscache_n_op_pend);
189		} else if (!list_empty(&object->pending_ops)) {
190			atomic_inc(&op->usage);
191			list_add_tail(&op->pend_link, &object->pending_ops);
192			fscache_stat(&fscache_n_op_pend);
193			fscache_start_operations(object);
194		} else {
195			ASSERTCMP(object->n_in_progress, ==, 0);
196			fscache_run_op(object, op);
197		}
198
199		/* need to issue a new write op after this */
200		clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
201		ret = 0;
202	} else if (flags & BIT(FSCACHE_OBJECT_IS_LOOKED_UP)) {
203		op->object = object;
204		object->n_ops++;
205		object->n_exclusive++;	/* reads and writes must wait */
206		atomic_inc(&op->usage);
207		list_add_tail(&op->pend_link, &object->pending_ops);
208		fscache_stat(&fscache_n_op_pend);
209		ret = 0;
210	} else if (flags & BIT(FSCACHE_OBJECT_KILLED_BY_CACHE)) {
211		op->cancel(op);
212		op->state = FSCACHE_OP_ST_CANCELLED;
213		ret = -ENOBUFS;
214	} else {
215		fscache_report_unexpected_submission(object, op, ostate);
216		op->cancel(op);
217		op->state = FSCACHE_OP_ST_CANCELLED;
218		ret = -ENOBUFS;
219	}
220
221	spin_unlock(&object->lock);
222	return ret;
223}
224
225/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
226 * submit an operation for an object
227 * - objects may be submitted only in the following states:
228 *   - during object creation (write ops may be submitted)
229 *   - whilst the object is active
230 *   - after an I/O error incurred in one of the two above states (op rejected)
231 * - this gets any extra refs it needs on an op
232 */
233int fscache_submit_op(struct fscache_object *object,
234		      struct fscache_operation *op)
235{
236	const struct fscache_state *ostate;
237	unsigned long flags;
238	int ret;
239
240	_enter("{OBJ%x OP%x},{%u}",
241	       object->debug_id, op->debug_id, atomic_read(&op->usage));
242
243	ASSERTCMP(op->state, ==, FSCACHE_OP_ST_INITIALISED);
244	ASSERTCMP(atomic_read(&op->usage), >, 0);
245
246	spin_lock(&object->lock);
247	ASSERTCMP(object->n_ops, >=, object->n_in_progress);
248	ASSERTCMP(object->n_ops, >=, object->n_exclusive);
249	ASSERT(list_empty(&op->pend_link));
250
251	ostate = object->state;
252	smp_rmb();
253
254	op->state = FSCACHE_OP_ST_PENDING;
255	flags = READ_ONCE(object->flags);
256	if (unlikely(!(flags & BIT(FSCACHE_OBJECT_IS_LIVE)))) {
257		fscache_stat(&fscache_n_op_rejected);
258		op->cancel(op);
259		op->state = FSCACHE_OP_ST_CANCELLED;
260		ret = -ENOBUFS;
261	} else if (unlikely(fscache_cache_is_broken(object))) {
262		op->cancel(op);
263		op->state = FSCACHE_OP_ST_CANCELLED;
264		ret = -EIO;
265	} else if (flags & BIT(FSCACHE_OBJECT_IS_AVAILABLE)) {
266		op->object = object;
267		object->n_ops++;
268
269		if (object->n_exclusive > 0) {
270			atomic_inc(&op->usage);
271			list_add_tail(&op->pend_link, &object->pending_ops);
272			fscache_stat(&fscache_n_op_pend);
273		} else if (!list_empty(&object->pending_ops)) {
274			atomic_inc(&op->usage);
275			list_add_tail(&op->pend_link, &object->pending_ops);
276			fscache_stat(&fscache_n_op_pend);
277			fscache_start_operations(object);
278		} else {
279			ASSERTCMP(object->n_exclusive, ==, 0);
280			fscache_run_op(object, op);
281		}
282		ret = 0;
283	} else if (flags & BIT(FSCACHE_OBJECT_IS_LOOKED_UP)) {
284		op->object = object;
285		object->n_ops++;
286		atomic_inc(&op->usage);
287		list_add_tail(&op->pend_link, &object->pending_ops);
288		fscache_stat(&fscache_n_op_pend);
289		ret = 0;
290	} else if (flags & BIT(FSCACHE_OBJECT_KILLED_BY_CACHE)) {
291		op->cancel(op);
292		op->state = FSCACHE_OP_ST_CANCELLED;
 
293		ret = -ENOBUFS;
294	} else {
295		fscache_report_unexpected_submission(object, op, ostate);
296		ASSERT(!fscache_object_is_active(object));
297		op->cancel(op);
298		op->state = FSCACHE_OP_ST_CANCELLED;
299		ret = -ENOBUFS;
300	}
301
302	spin_unlock(&object->lock);
303	return ret;
304}
305
306/*
307 * queue an object for withdrawal on error, aborting all following asynchronous
308 * operations
309 */
310void fscache_abort_object(struct fscache_object *object)
311{
312	_enter("{OBJ%x}", object->debug_id);
313
314	fscache_raise_event(object, FSCACHE_OBJECT_EV_ERROR);
315}
316
317/*
318 * Jump start the operation processing on an object.  The caller must hold
319 * object->lock.
320 */
321void fscache_start_operations(struct fscache_object *object)
322{
323	struct fscache_operation *op;
324	bool stop = false;
325
326	while (!list_empty(&object->pending_ops) && !stop) {
327		op = list_entry(object->pending_ops.next,
328				struct fscache_operation, pend_link);
329
330		if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) {
331			if (object->n_in_progress > 0)
332				break;
333			stop = true;
334		}
335		list_del_init(&op->pend_link);
336		fscache_run_op(object, op);
337
338		/* the pending queue was holding a ref on the object */
339		fscache_put_operation(op);
340	}
341
342	ASSERTCMP(object->n_in_progress, <=, object->n_ops);
343
344	_debug("woke %d ops on OBJ%x",
345	       object->n_in_progress, object->debug_id);
346}
347
348/*
349 * cancel an operation that's pending on an object
350 */
351int fscache_cancel_op(struct fscache_operation *op,
352		      bool cancel_in_progress_op)
353{
354	struct fscache_object *object = op->object;
355	bool put = false;
356	int ret;
357
358	_enter("OBJ%x OP%x}", op->object->debug_id, op->debug_id);
359
360	ASSERTCMP(op->state, >=, FSCACHE_OP_ST_PENDING);
361	ASSERTCMP(op->state, !=, FSCACHE_OP_ST_CANCELLED);
362	ASSERTCMP(atomic_read(&op->usage), >, 0);
363
364	spin_lock(&object->lock);
365
366	ret = -EBUSY;
367	if (op->state == FSCACHE_OP_ST_PENDING) {
368		ASSERT(!list_empty(&op->pend_link));
369		list_del_init(&op->pend_link);
370		put = true;
371
372		fscache_stat(&fscache_n_op_cancelled);
373		op->cancel(op);
374		op->state = FSCACHE_OP_ST_CANCELLED;
375		if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
376			object->n_exclusive--;
377		if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
378			wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
379		ret = 0;
380	} else if (op->state == FSCACHE_OP_ST_IN_PROGRESS && cancel_in_progress_op) {
381		ASSERTCMP(object->n_in_progress, >, 0);
382		if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
383			object->n_exclusive--;
384		object->n_in_progress--;
385		if (object->n_in_progress == 0)
386			fscache_start_operations(object);
387
388		fscache_stat(&fscache_n_op_cancelled);
389		op->cancel(op);
390		op->state = FSCACHE_OP_ST_CANCELLED;
391		if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
392			object->n_exclusive--;
393		if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
394			wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
 
395		ret = 0;
396	}
397
398	if (put)
399		fscache_put_operation(op);
400	spin_unlock(&object->lock);
401	_leave(" = %d", ret);
402	return ret;
403}
404
405/*
406 * Cancel all pending operations on an object
407 */
408void fscache_cancel_all_ops(struct fscache_object *object)
409{
410	struct fscache_operation *op;
411
412	_enter("OBJ%x", object->debug_id);
413
414	spin_lock(&object->lock);
415
416	while (!list_empty(&object->pending_ops)) {
417		op = list_entry(object->pending_ops.next,
418				struct fscache_operation, pend_link);
419		fscache_stat(&fscache_n_op_cancelled);
420		list_del_init(&op->pend_link);
421
422		ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
423		op->cancel(op);
424		op->state = FSCACHE_OP_ST_CANCELLED;
425
426		if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
427			object->n_exclusive--;
428		if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
429			wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
430		fscache_put_operation(op);
431		cond_resched_lock(&object->lock);
432	}
433
434	spin_unlock(&object->lock);
435	_leave("");
436}
437
438/*
439 * Record the completion or cancellation of an in-progress operation.
440 */
441void fscache_op_complete(struct fscache_operation *op, bool cancelled)
442{
443	struct fscache_object *object = op->object;
444
445	_enter("OBJ%x", object->debug_id);
446
447	ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
448	ASSERTCMP(object->n_in_progress, >, 0);
449	ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags),
450		    object->n_exclusive, >, 0);
451	ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags),
452		    object->n_in_progress, ==, 1);
453
454	spin_lock(&object->lock);
455
456	if (!cancelled) {
457		op->state = FSCACHE_OP_ST_COMPLETE;
458	} else {
459		op->cancel(op);
460		op->state = FSCACHE_OP_ST_CANCELLED;
461	}
462
463	if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
464		object->n_exclusive--;
465	object->n_in_progress--;
466	if (object->n_in_progress == 0)
467		fscache_start_operations(object);
468
469	spin_unlock(&object->lock);
470	_leave("");
471}
472EXPORT_SYMBOL(fscache_op_complete);
473
474/*
475 * release an operation
476 * - queues pending ops if this is the last in-progress op
477 */
478void fscache_put_operation(struct fscache_operation *op)
479{
480	struct fscache_object *object;
481	struct fscache_cache *cache;
482
483	_enter("{OBJ%x OP%x,%d}",
484	       op->object->debug_id, op->debug_id, atomic_read(&op->usage));
485
486	ASSERTCMP(atomic_read(&op->usage), >, 0);
487
488	if (!atomic_dec_and_test(&op->usage))
489		return;
490
491	_debug("PUT OP");
492	ASSERTIFCMP(op->state != FSCACHE_OP_ST_INITIALISED &&
493		    op->state != FSCACHE_OP_ST_COMPLETE,
494		    op->state, ==, FSCACHE_OP_ST_CANCELLED);
495
496	fscache_stat(&fscache_n_op_release);
497
498	if (op->release) {
499		op->release(op);
500		op->release = NULL;
501	}
502	op->state = FSCACHE_OP_ST_DEAD;
503
504	object = op->object;
505	if (likely(object)) {
506		if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags))
507			atomic_dec(&object->n_reads);
508		if (test_bit(FSCACHE_OP_UNUSE_COOKIE, &op->flags))
509			fscache_unuse_cookie(object);
510
511		/* now... we may get called with the object spinlock held, so we
512		 * complete the cleanup here only if we can immediately acquire the
513		 * lock, and defer it otherwise */
514		if (!spin_trylock(&object->lock)) {
515			_debug("defer put");
516			fscache_stat(&fscache_n_op_deferred_release);
517
518			cache = object->cache;
519			spin_lock(&cache->op_gc_list_lock);
520			list_add_tail(&op->pend_link, &cache->op_gc_list);
521			spin_unlock(&cache->op_gc_list_lock);
522			schedule_work(&cache->op_gc);
523			_leave(" [defer]");
524			return;
525		}
526
527		ASSERTCMP(object->n_ops, >, 0);
528		object->n_ops--;
529		if (object->n_ops == 0)
530			fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
531
532		spin_unlock(&object->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
533	}
534
 
 
 
 
 
 
 
 
 
 
 
 
535	kfree(op);
536	_leave(" [done]");
537}
538EXPORT_SYMBOL(fscache_put_operation);
539
540/*
541 * garbage collect operations that have had their release deferred
542 */
543void fscache_operation_gc(struct work_struct *work)
544{
545	struct fscache_operation *op;
546	struct fscache_object *object;
547	struct fscache_cache *cache =
548		container_of(work, struct fscache_cache, op_gc);
549	int count = 0;
550
551	_enter("");
552
553	do {
554		spin_lock(&cache->op_gc_list_lock);
555		if (list_empty(&cache->op_gc_list)) {
556			spin_unlock(&cache->op_gc_list_lock);
557			break;
558		}
559
560		op = list_entry(cache->op_gc_list.next,
561				struct fscache_operation, pend_link);
562		list_del(&op->pend_link);
563		spin_unlock(&cache->op_gc_list_lock);
564
565		object = op->object;
566		spin_lock(&object->lock);
567
568		_debug("GC DEFERRED REL OBJ%x OP%x",
569		       object->debug_id, op->debug_id);
570		fscache_stat(&fscache_n_op_gc);
571
572		ASSERTCMP(atomic_read(&op->usage), ==, 0);
573		ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
 
 
 
 
 
 
 
 
 
 
574
575		ASSERTCMP(object->n_ops, >, 0);
576		object->n_ops--;
577		if (object->n_ops == 0)
578			fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
579
580		spin_unlock(&object->lock);
581		kfree(op);
582
583	} while (count++ < 20);
584
585	if (!list_empty(&cache->op_gc_list))
586		schedule_work(&cache->op_gc);
587
588	_leave("");
589}
590
591/*
592 * execute an operation using fs_op_wq to provide processing context -
593 * the caller holds a ref to this object, so we don't need to hold one
594 */
595void fscache_op_work_func(struct work_struct *work)
596{
597	struct fscache_operation *op =
598		container_of(work, struct fscache_operation, work);
599	unsigned long start;
600
601	_enter("{OBJ%x OP%x,%d}",
602	       op->object->debug_id, op->debug_id, atomic_read(&op->usage));
603
604	ASSERT(op->processor != NULL);
605	start = jiffies;
606	op->processor(op);
607	fscache_hist(fscache_ops_histogram, start);
608	fscache_put_operation(op);
609
610	_leave("");
611}
v3.1
  1/* FS-Cache worker operation management routines
  2 *
  3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
  4 * Written by David Howells (dhowells@redhat.com)
  5 *
  6 * This program is free software; you can redistribute it and/or
  7 * modify it under the terms of the GNU General Public License
  8 * as published by the Free Software Foundation; either version
  9 * 2 of the License, or (at your option) any later version.
 10 *
 11 * See Documentation/filesystems/caching/operations.txt
 12 */
 13
 14#define FSCACHE_DEBUG_LEVEL OPERATION
 15#include <linux/module.h>
 16#include <linux/seq_file.h>
 17#include <linux/slab.h>
 18#include "internal.h"
 19
 20atomic_t fscache_op_debug_id;
 21EXPORT_SYMBOL(fscache_op_debug_id);
 22
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 23/**
 24 * fscache_enqueue_operation - Enqueue an operation for processing
 25 * @op: The operation to enqueue
 26 *
 27 * Enqueue an operation for processing by the FS-Cache thread pool.
 28 *
 29 * This will get its own ref on the object.
 30 */
 31void fscache_enqueue_operation(struct fscache_operation *op)
 32{
 33	_enter("{OBJ%x OP%x,%u}",
 34	       op->object->debug_id, op->debug_id, atomic_read(&op->usage));
 35
 36	ASSERT(list_empty(&op->pend_link));
 37	ASSERT(op->processor != NULL);
 38	ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
 39	ASSERTCMP(atomic_read(&op->usage), >, 0);
 
 40
 41	fscache_stat(&fscache_n_op_enqueue);
 42	switch (op->flags & FSCACHE_OP_TYPE) {
 43	case FSCACHE_OP_ASYNC:
 44		_debug("queue async");
 45		atomic_inc(&op->usage);
 46		if (!queue_work(fscache_op_wq, &op->work))
 47			fscache_put_operation(op);
 48		break;
 49	case FSCACHE_OP_MYTHREAD:
 50		_debug("queue for caller's attention");
 51		break;
 52	default:
 53		printk(KERN_ERR "FS-Cache: Unexpected op type %lx",
 54		       op->flags);
 55		BUG();
 56		break;
 57	}
 58}
 59EXPORT_SYMBOL(fscache_enqueue_operation);
 60
 61/*
 62 * start an op running
 63 */
 64static void fscache_run_op(struct fscache_object *object,
 65			   struct fscache_operation *op)
 66{
 
 
 
 67	object->n_in_progress++;
 68	if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
 69		wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
 70	if (op->processor)
 71		fscache_enqueue_operation(op);
 72	fscache_stat(&fscache_n_op_run);
 73}
 74
 75/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 76 * submit an exclusive operation for an object
 77 * - other ops are excluded from running simultaneously with this one
 78 * - this gets any extra refs it needs on an op
 79 */
 80int fscache_submit_exclusive_op(struct fscache_object *object,
 81				struct fscache_operation *op)
 82{
 
 
 83	int ret;
 84
 85	_enter("{OBJ%x OP%x},", object->debug_id, op->debug_id);
 86
 
 
 
 87	spin_lock(&object->lock);
 88	ASSERTCMP(object->n_ops, >=, object->n_in_progress);
 89	ASSERTCMP(object->n_ops, >=, object->n_exclusive);
 90	ASSERT(list_empty(&op->pend_link));
 91
 92	ret = -ENOBUFS;
 93	if (fscache_object_is_active(object)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 94		op->object = object;
 95		object->n_ops++;
 96		object->n_exclusive++;	/* reads and writes must wait */
 97
 98		if (object->n_ops > 1) {
 99			atomic_inc(&op->usage);
100			list_add_tail(&op->pend_link, &object->pending_ops);
101			fscache_stat(&fscache_n_op_pend);
102		} else if (!list_empty(&object->pending_ops)) {
103			atomic_inc(&op->usage);
104			list_add_tail(&op->pend_link, &object->pending_ops);
105			fscache_stat(&fscache_n_op_pend);
106			fscache_start_operations(object);
107		} else {
108			ASSERTCMP(object->n_in_progress, ==, 0);
109			fscache_run_op(object, op);
110		}
111
112		/* need to issue a new write op after this */
113		clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
114		ret = 0;
115	} else if (object->state == FSCACHE_OBJECT_CREATING) {
116		op->object = object;
117		object->n_ops++;
118		object->n_exclusive++;	/* reads and writes must wait */
119		atomic_inc(&op->usage);
120		list_add_tail(&op->pend_link, &object->pending_ops);
121		fscache_stat(&fscache_n_op_pend);
122		ret = 0;
 
 
 
 
123	} else {
124		/* not allowed to submit ops in any other state */
125		BUG();
 
 
126	}
127
128	spin_unlock(&object->lock);
129	return ret;
130}
131
132/*
133 * report an unexpected submission
134 */
135static void fscache_report_unexpected_submission(struct fscache_object *object,
136						 struct fscache_operation *op,
137						 unsigned long ostate)
138{
139	static bool once_only;
140	struct fscache_operation *p;
141	unsigned n;
142
143	if (once_only)
144		return;
145	once_only = true;
146
147	kdebug("unexpected submission OP%x [OBJ%x %s]",
148	       op->debug_id, object->debug_id,
149	       fscache_object_states[object->state]);
150	kdebug("objstate=%s [%s]",
151	       fscache_object_states[object->state],
152	       fscache_object_states[ostate]);
153	kdebug("objflags=%lx", object->flags);
154	kdebug("objevent=%lx [%lx]", object->events, object->event_mask);
155	kdebug("ops=%u inp=%u exc=%u",
156	       object->n_ops, object->n_in_progress, object->n_exclusive);
157
158	if (!list_empty(&object->pending_ops)) {
159		n = 0;
160		list_for_each_entry(p, &object->pending_ops, pend_link) {
161			ASSERTCMP(p->object, ==, object);
162			kdebug("%p %p", op->processor, op->release);
163			n++;
164		}
165
166		kdebug("n=%u", n);
167	}
168
169	dump_stack();
170}
171
172/*
173 * submit an operation for an object
174 * - objects may be submitted only in the following states:
175 *   - during object creation (write ops may be submitted)
176 *   - whilst the object is active
177 *   - after an I/O error incurred in one of the two above states (op rejected)
178 * - this gets any extra refs it needs on an op
179 */
180int fscache_submit_op(struct fscache_object *object,
181		      struct fscache_operation *op)
182{
183	unsigned long ostate;
 
184	int ret;
185
186	_enter("{OBJ%x OP%x},{%u}",
187	       object->debug_id, op->debug_id, atomic_read(&op->usage));
188
 
189	ASSERTCMP(atomic_read(&op->usage), >, 0);
190
191	spin_lock(&object->lock);
192	ASSERTCMP(object->n_ops, >=, object->n_in_progress);
193	ASSERTCMP(object->n_ops, >=, object->n_exclusive);
194	ASSERT(list_empty(&op->pend_link));
195
196	ostate = object->state;
197	smp_rmb();
198
199	if (fscache_object_is_active(object)) {
 
 
 
 
 
 
 
 
 
 
 
200		op->object = object;
201		object->n_ops++;
202
203		if (object->n_exclusive > 0) {
204			atomic_inc(&op->usage);
205			list_add_tail(&op->pend_link, &object->pending_ops);
206			fscache_stat(&fscache_n_op_pend);
207		} else if (!list_empty(&object->pending_ops)) {
208			atomic_inc(&op->usage);
209			list_add_tail(&op->pend_link, &object->pending_ops);
210			fscache_stat(&fscache_n_op_pend);
211			fscache_start_operations(object);
212		} else {
213			ASSERTCMP(object->n_exclusive, ==, 0);
214			fscache_run_op(object, op);
215		}
216		ret = 0;
217	} else if (object->state == FSCACHE_OBJECT_CREATING) {
218		op->object = object;
219		object->n_ops++;
220		atomic_inc(&op->usage);
221		list_add_tail(&op->pend_link, &object->pending_ops);
222		fscache_stat(&fscache_n_op_pend);
223		ret = 0;
224	} else if (object->state == FSCACHE_OBJECT_DYING ||
225		   object->state == FSCACHE_OBJECT_LC_DYING ||
226		   object->state == FSCACHE_OBJECT_WITHDRAWING) {
227		fscache_stat(&fscache_n_op_rejected);
228		ret = -ENOBUFS;
229	} else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
230		fscache_report_unexpected_submission(object, op, ostate);
231		ASSERT(!fscache_object_is_active(object));
232		ret = -ENOBUFS;
233	} else {
234		ret = -ENOBUFS;
235	}
236
237	spin_unlock(&object->lock);
238	return ret;
239}
240
241/*
242 * queue an object for withdrawal on error, aborting all following asynchronous
243 * operations
244 */
245void fscache_abort_object(struct fscache_object *object)
246{
247	_enter("{OBJ%x}", object->debug_id);
248
249	fscache_raise_event(object, FSCACHE_OBJECT_EV_ERROR);
250}
251
252/*
253 * jump start the operation processing on an object
254 * - caller must hold object->lock
255 */
256void fscache_start_operations(struct fscache_object *object)
257{
258	struct fscache_operation *op;
259	bool stop = false;
260
261	while (!list_empty(&object->pending_ops) && !stop) {
262		op = list_entry(object->pending_ops.next,
263				struct fscache_operation, pend_link);
264
265		if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) {
266			if (object->n_in_progress > 0)
267				break;
268			stop = true;
269		}
270		list_del_init(&op->pend_link);
271		fscache_run_op(object, op);
272
273		/* the pending queue was holding a ref on the object */
274		fscache_put_operation(op);
275	}
276
277	ASSERTCMP(object->n_in_progress, <=, object->n_ops);
278
279	_debug("woke %d ops on OBJ%x",
280	       object->n_in_progress, object->debug_id);
281}
282
283/*
284 * cancel an operation that's pending on an object
285 */
286int fscache_cancel_op(struct fscache_operation *op)
 
287{
288	struct fscache_object *object = op->object;
 
289	int ret;
290
291	_enter("OBJ%x OP%x}", op->object->debug_id, op->debug_id);
292
 
 
 
 
293	spin_lock(&object->lock);
294
295	ret = -EBUSY;
296	if (!list_empty(&op->pend_link)) {
 
 
 
 
297		fscache_stat(&fscache_n_op_cancelled);
298		list_del_init(&op->pend_link);
299		object->n_ops--;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
300		if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
301			object->n_exclusive--;
302		if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
303			wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
304		fscache_put_operation(op);
305		ret = 0;
306	}
307
 
 
308	spin_unlock(&object->lock);
309	_leave(" = %d", ret);
310	return ret;
311}
312
313/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
314 * release an operation
315 * - queues pending ops if this is the last in-progress op
316 */
317void fscache_put_operation(struct fscache_operation *op)
318{
319	struct fscache_object *object;
320	struct fscache_cache *cache;
321
322	_enter("{OBJ%x OP%x,%d}",
323	       op->object->debug_id, op->debug_id, atomic_read(&op->usage));
324
325	ASSERTCMP(atomic_read(&op->usage), >, 0);
326
327	if (!atomic_dec_and_test(&op->usage))
328		return;
329
330	_debug("PUT OP");
331	if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
332		BUG();
 
333
334	fscache_stat(&fscache_n_op_release);
335
336	if (op->release) {
337		op->release(op);
338		op->release = NULL;
339	}
 
340
341	object = op->object;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
342
343	if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags))
344		atomic_dec(&object->n_reads);
 
 
345
346	/* now... we may get called with the object spinlock held, so we
347	 * complete the cleanup here only if we can immediately acquire the
348	 * lock, and defer it otherwise */
349	if (!spin_trylock(&object->lock)) {
350		_debug("defer put");
351		fscache_stat(&fscache_n_op_deferred_release);
352
353		cache = object->cache;
354		spin_lock(&cache->op_gc_list_lock);
355		list_add_tail(&op->pend_link, &cache->op_gc_list);
356		spin_unlock(&cache->op_gc_list_lock);
357		schedule_work(&cache->op_gc);
358		_leave(" [defer]");
359		return;
360	}
361
362	if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) {
363		ASSERTCMP(object->n_exclusive, >, 0);
364		object->n_exclusive--;
365	}
366
367	ASSERTCMP(object->n_in_progress, >, 0);
368	object->n_in_progress--;
369	if (object->n_in_progress == 0)
370		fscache_start_operations(object);
371
372	ASSERTCMP(object->n_ops, >, 0);
373	object->n_ops--;
374	if (object->n_ops == 0)
375		fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
376
377	spin_unlock(&object->lock);
378
379	kfree(op);
380	_leave(" [done]");
381}
382EXPORT_SYMBOL(fscache_put_operation);
383
384/*
385 * garbage collect operations that have had their release deferred
386 */
387void fscache_operation_gc(struct work_struct *work)
388{
389	struct fscache_operation *op;
390	struct fscache_object *object;
391	struct fscache_cache *cache =
392		container_of(work, struct fscache_cache, op_gc);
393	int count = 0;
394
395	_enter("");
396
397	do {
398		spin_lock(&cache->op_gc_list_lock);
399		if (list_empty(&cache->op_gc_list)) {
400			spin_unlock(&cache->op_gc_list_lock);
401			break;
402		}
403
404		op = list_entry(cache->op_gc_list.next,
405				struct fscache_operation, pend_link);
406		list_del(&op->pend_link);
407		spin_unlock(&cache->op_gc_list_lock);
408
409		object = op->object;
 
410
411		_debug("GC DEFERRED REL OBJ%x OP%x",
412		       object->debug_id, op->debug_id);
413		fscache_stat(&fscache_n_op_gc);
414
415		ASSERTCMP(atomic_read(&op->usage), ==, 0);
416
417		spin_lock(&object->lock);
418		if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) {
419			ASSERTCMP(object->n_exclusive, >, 0);
420			object->n_exclusive--;
421		}
422
423		ASSERTCMP(object->n_in_progress, >, 0);
424		object->n_in_progress--;
425		if (object->n_in_progress == 0)
426			fscache_start_operations(object);
427
428		ASSERTCMP(object->n_ops, >, 0);
429		object->n_ops--;
430		if (object->n_ops == 0)
431			fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
432
433		spin_unlock(&object->lock);
 
434
435	} while (count++ < 20);
436
437	if (!list_empty(&cache->op_gc_list))
438		schedule_work(&cache->op_gc);
439
440	_leave("");
441}
442
443/*
444 * execute an operation using fs_op_wq to provide processing context -
445 * the caller holds a ref to this object, so we don't need to hold one
446 */
447void fscache_op_work_func(struct work_struct *work)
448{
449	struct fscache_operation *op =
450		container_of(work, struct fscache_operation, work);
451	unsigned long start;
452
453	_enter("{OBJ%x OP%x,%d}",
454	       op->object->debug_id, op->debug_id, atomic_read(&op->usage));
455
456	ASSERT(op->processor != NULL);
457	start = jiffies;
458	op->processor(op);
459	fscache_hist(fscache_ops_histogram, start);
460	fscache_put_operation(op);
461
462	_leave("");
463}