Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/* Storage object read/write
  3 *
  4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  5 * Written by David Howells (dhowells@redhat.com)
 
 
 
 
 
  6 */
  7
  8#include <linux/mount.h>
  9#include <linux/slab.h>
 10#include <linux/file.h>
 11#include <linux/swap.h>
 12#include "internal.h"
 13
 14/*
 15 * detect wake up events generated by the unlocking of pages in which we're
 16 * interested
 17 * - we use this to detect read completion of backing pages
 18 * - the caller holds the waitqueue lock
 19 */
 20static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode,
 21				  int sync, void *_key)
 22{
 23	struct cachefiles_one_read *monitor =
 24		container_of(wait, struct cachefiles_one_read, monitor);
 25	struct cachefiles_object *object;
 26	struct fscache_retrieval *op = monitor->op;
 27	struct wait_bit_key *key = _key;
 28	struct page *page = wait->private;
 29
 30	ASSERT(key);
 31
 32	_enter("{%lu},%u,%d,{%p,%u}",
 33	       monitor->netfs_page->index, mode, sync,
 34	       key->flags, key->bit_nr);
 35
 36	if (key->flags != &page->flags ||
 37	    key->bit_nr != PG_locked)
 38		return 0;
 39
 40	_debug("--- monitor %p %lx ---", page, page->flags);
 41
 42	if (!PageUptodate(page) && !PageError(page)) {
 43		/* unlocked, not uptodate and not erronous? */
 44		_debug("page probably truncated");
 45	}
 46
 47	/* remove from the waitqueue */
 48	list_del(&wait->entry);
 49
 50	/* move onto the action list and queue for FS-Cache thread pool */
 51	ASSERT(op);
 52
 53	/* We need to temporarily bump the usage count as we don't own a ref
 54	 * here otherwise cachefiles_read_copier() may free the op between the
 55	 * monitor being enqueued on the op->to_do list and the op getting
 56	 * enqueued on the work queue.
 57	 */
 58	fscache_get_retrieval(op);
 59
 60	object = container_of(op->op.object, struct cachefiles_object, fscache);
 61	spin_lock(&object->work_lock);
 62	list_add_tail(&monitor->op_link, &op->to_do);
 63	fscache_enqueue_retrieval(op);
 64	spin_unlock(&object->work_lock);
 65
 66	fscache_put_retrieval(op);
 67	return 0;
 68}
 69
 70/*
 71 * handle a probably truncated page
 72 * - check to see if the page is still relevant and reissue the read if
 73 *   possible
 74 * - return -EIO on error, -ENODATA if the page is gone, -EINPROGRESS if we
 75 *   must wait again and 0 if successful
 76 */
 77static int cachefiles_read_reissue(struct cachefiles_object *object,
 78				   struct cachefiles_one_read *monitor)
 79{
 80	struct address_space *bmapping = d_backing_inode(object->backer)->i_mapping;
 81	struct page *backpage = monitor->back_page, *backpage2;
 82	int ret;
 83
 84	_enter("{ino=%lx},{%lx,%lx}",
 85	       d_backing_inode(object->backer)->i_ino,
 86	       backpage->index, backpage->flags);
 87
 88	/* skip if the page was truncated away completely */
 89	if (backpage->mapping != bmapping) {
 90		_leave(" = -ENODATA [mapping]");
 91		return -ENODATA;
 92	}
 93
 94	backpage2 = find_get_page(bmapping, backpage->index);
 95	if (!backpage2) {
 96		_leave(" = -ENODATA [gone]");
 97		return -ENODATA;
 98	}
 99
100	if (backpage != backpage2) {
101		put_page(backpage2);
102		_leave(" = -ENODATA [different]");
103		return -ENODATA;
104	}
105
106	/* the page is still there and we already have a ref on it, so we don't
107	 * need a second */
108	put_page(backpage2);
109
110	INIT_LIST_HEAD(&monitor->op_link);
111	add_page_wait_queue(backpage, &monitor->monitor);
112
113	if (trylock_page(backpage)) {
114		ret = -EIO;
115		if (PageError(backpage))
116			goto unlock_discard;
117		ret = 0;
118		if (PageUptodate(backpage))
119			goto unlock_discard;
120
121		_debug("reissue read");
122		ret = bmapping->a_ops->readpage(NULL, backpage);
123		if (ret < 0)
124			goto unlock_discard;
125	}
126
127	/* but the page may have been read before the monitor was installed, so
128	 * the monitor may miss the event - so we have to ensure that we do get
129	 * one in such a case */
130	if (trylock_page(backpage)) {
131		_debug("jumpstart %p {%lx}", backpage, backpage->flags);
132		unlock_page(backpage);
133	}
134
135	/* it'll reappear on the todo list */
136	_leave(" = -EINPROGRESS");
137	return -EINPROGRESS;
138
139unlock_discard:
140	unlock_page(backpage);
141	spin_lock_irq(&object->work_lock);
142	list_del(&monitor->op_link);
143	spin_unlock_irq(&object->work_lock);
144	_leave(" = %d", ret);
145	return ret;
146}
147
148/*
149 * copy data from backing pages to netfs pages to complete a read operation
150 * - driven by FS-Cache's thread pool
151 */
152static void cachefiles_read_copier(struct fscache_operation *_op)
153{
154	struct cachefiles_one_read *monitor;
155	struct cachefiles_object *object;
156	struct fscache_retrieval *op;
 
157	int error, max;
158
159	op = container_of(_op, struct fscache_retrieval, op);
160	object = container_of(op->op.object,
161			      struct cachefiles_object, fscache);
162
163	_enter("{ino=%lu}", d_backing_inode(object->backer)->i_ino);
 
 
164
165	max = 8;
166	spin_lock_irq(&object->work_lock);
167
168	while (!list_empty(&op->to_do)) {
169		monitor = list_entry(op->to_do.next,
170				     struct cachefiles_one_read, op_link);
171		list_del(&monitor->op_link);
172
173		spin_unlock_irq(&object->work_lock);
174
175		_debug("- copy {%lu}", monitor->back_page->index);
176
177	recheck:
178		if (test_bit(FSCACHE_COOKIE_INVALIDATING,
179			     &object->fscache.cookie->flags)) {
180			error = -ESTALE;
181		} else if (PageUptodate(monitor->back_page)) {
182			copy_highpage(monitor->netfs_page, monitor->back_page);
183			fscache_mark_page_cached(monitor->op,
184						 monitor->netfs_page);
 
185			error = 0;
186		} else if (!PageError(monitor->back_page)) {
187			/* the page has probably been truncated */
188			error = cachefiles_read_reissue(object, monitor);
189			if (error == -EINPROGRESS)
190				goto next;
191			goto recheck;
192		} else {
193			cachefiles_io_error_obj(
194				object,
195				"Readpage failed on backing file %lx",
196				(unsigned long) monitor->back_page->flags);
197			error = -EIO;
198		}
199
200		put_page(monitor->back_page);
201
202		fscache_end_io(op, monitor->netfs_page, error);
203		put_page(monitor->netfs_page);
204		fscache_retrieval_complete(op, 1);
205		fscache_put_retrieval(op);
206		kfree(monitor);
207
208	next:
209		/* let the thread pool have some air occasionally */
210		max--;
211		if (max < 0 || need_resched()) {
212			if (!list_empty(&op->to_do))
213				fscache_enqueue_retrieval(op);
214			_leave(" [maxed out]");
215			return;
216		}
217
218		spin_lock_irq(&object->work_lock);
219	}
220
221	spin_unlock_irq(&object->work_lock);
222	_leave("");
223}
224
225/*
226 * read the corresponding page to the given set from the backing file
227 * - an uncertain page is simply discarded, to be tried again another time
228 */
229static int cachefiles_read_backing_file_one(struct cachefiles_object *object,
230					    struct fscache_retrieval *op,
231					    struct page *netpage)
 
232{
233	struct cachefiles_one_read *monitor;
234	struct address_space *bmapping;
235	struct page *newpage, *backpage;
236	int ret;
237
238	_enter("");
239
 
 
240	_debug("read back %p{%lu,%d}",
241	       netpage, netpage->index, page_count(netpage));
242
243	monitor = kzalloc(sizeof(*monitor), cachefiles_gfp);
244	if (!monitor)
245		goto nomem;
246
247	monitor->netfs_page = netpage;
248	monitor->op = fscache_get_retrieval(op);
249
250	init_waitqueue_func_entry(&monitor->monitor, cachefiles_read_waiter);
251
252	/* attempt to get hold of the backing page */
253	bmapping = d_backing_inode(object->backer)->i_mapping;
254	newpage = NULL;
255
256	for (;;) {
257		backpage = find_get_page(bmapping, netpage->index);
258		if (backpage)
259			goto backing_page_already_present;
260
261		if (!newpage) {
262			newpage = __page_cache_alloc(cachefiles_gfp);
263			if (!newpage)
264				goto nomem_monitor;
265		}
266
267		ret = add_to_page_cache_lru(newpage, bmapping,
268					    netpage->index, cachefiles_gfp);
269		if (ret == 0)
270			goto installed_new_backing_page;
271		if (ret != -EEXIST)
272			goto nomem_page;
273	}
274
275	/* we've installed a new backing page, so now we need to start
276	 * it reading */
277installed_new_backing_page:
278	_debug("- new %p", newpage);
279
280	backpage = newpage;
281	newpage = NULL;
282
 
 
 
 
283read_backing_page:
284	ret = bmapping->a_ops->readpage(NULL, backpage);
285	if (ret < 0)
286		goto read_error;
287
288	/* set the monitor to transfer the data across */
289monitor_backing_page:
290	_debug("- monitor add");
291
292	/* install the monitor */
293	get_page(monitor->netfs_page);
294	get_page(backpage);
295	monitor->back_page = backpage;
296	monitor->monitor.private = backpage;
297	add_page_wait_queue(backpage, &monitor->monitor);
298	monitor = NULL;
299
300	/* but the page may have been read before the monitor was installed, so
301	 * the monitor may miss the event - so we have to ensure that we do get
302	 * one in such a case */
303	if (trylock_page(backpage)) {
304		_debug("jumpstart %p {%lx}", backpage, backpage->flags);
305		unlock_page(backpage);
306	}
307	goto success;
308
309	/* if the backing page is already present, it can be in one of
310	 * three states: read in progress, read failed or read okay */
311backing_page_already_present:
312	_debug("- present");
313
314	if (newpage) {
315		put_page(newpage);
316		newpage = NULL;
317	}
318
319	if (PageError(backpage))
320		goto io_error;
321
322	if (PageUptodate(backpage))
323		goto backing_page_already_uptodate;
324
325	if (!trylock_page(backpage))
326		goto monitor_backing_page;
327	_debug("read %p {%lx}", backpage, backpage->flags);
328	goto read_backing_page;
329
330	/* the backing page is already up to date, attach the netfs
331	 * page to the pagecache and LRU and copy the data across */
332backing_page_already_uptodate:
333	_debug("- uptodate");
334
335	fscache_mark_page_cached(op, netpage);
 
336
337	copy_highpage(netpage, backpage);
338	fscache_end_io(op, netpage, 0);
339	fscache_retrieval_complete(op, 1);
340
341success:
342	_debug("success");
343	ret = 0;
344
345out:
346	if (backpage)
347		put_page(backpage);
348	if (monitor) {
349		fscache_put_retrieval(monitor->op);
350		kfree(monitor);
351	}
352	_leave(" = %d", ret);
353	return ret;
354
355read_error:
356	_debug("read error %d", ret);
357	if (ret == -ENOMEM) {
358		fscache_retrieval_complete(op, 1);
359		goto out;
360	}
361io_error:
362	cachefiles_io_error_obj(object, "Page read error on backing file");
363	fscache_retrieval_complete(op, 1);
364	ret = -ENOBUFS;
365	goto out;
366
367nomem_page:
368	put_page(newpage);
369nomem_monitor:
370	fscache_put_retrieval(monitor->op);
371	kfree(monitor);
372nomem:
373	fscache_retrieval_complete(op, 1);
374	_leave(" = -ENOMEM");
375	return -ENOMEM;
376}
377
378/*
379 * read a page from the cache or allocate a block in which to store it
380 * - cache withdrawal is prevented by the caller
381 * - returns -EINTR if interrupted
382 * - returns -ENOMEM if ran out of memory
383 * - returns -ENOBUFS if no buffers can be made available
384 * - returns -ENOBUFS if page is beyond EOF
385 * - if the page is backed by a block in the cache:
386 *   - a read will be started which will call the callback on completion
387 *   - 0 will be returned
388 * - else if the page is unbacked:
389 *   - the metadata will be retained
390 *   - -ENODATA will be returned
391 */
392int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
393				  struct page *page,
394				  gfp_t gfp)
395{
396	struct cachefiles_object *object;
397	struct cachefiles_cache *cache;
 
398	struct inode *inode;
399	sector_t block;
400	unsigned shift;
401	int ret, ret2;
402
403	object = container_of(op->op.object,
404			      struct cachefiles_object, fscache);
405	cache = container_of(object->fscache.cache,
406			     struct cachefiles_cache, cache);
407
408	_enter("{%p},{%lx},,,", object, page->index);
409
410	if (!object->backer)
411		goto enobufs;
412
413	inode = d_backing_inode(object->backer);
414	ASSERT(S_ISREG(inode->i_mode));
 
415	ASSERT(inode->i_mapping->a_ops->readpages);
416
417	/* calculate the shift required to use bmap */
 
 
 
418	shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
419
420	op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
421	op->op.flags |= FSCACHE_OP_ASYNC;
422	op->op.processor = cachefiles_read_copier;
423
 
 
424	/* we assume the absence or presence of the first block is a good
425	 * enough indication for the page as a whole
426	 * - TODO: don't use bmap() for this as it is _not_ actually good
427	 *   enough for this as it doesn't indicate errors, but it's all we've
428	 *   got for the moment
429	 */
430	block = page->index;
431	block <<= shift;
432
433	ret2 = bmap(inode, &block);
434	ASSERT(ret2 == 0);
435
 
436	_debug("%llx -> %llx",
437	       (unsigned long long) (page->index << shift),
438	       (unsigned long long) block);
439
440	if (block) {
441		/* submit the apparently valid page to the backing fs to be
442		 * read from disk */
443		ret = cachefiles_read_backing_file_one(object, op, page);
 
444	} else if (cachefiles_has_space(cache, 0, 1) == 0) {
445		/* there's space in the cache we can use */
446		fscache_mark_page_cached(op, page);
447		fscache_retrieval_complete(op, 1);
448		ret = -ENODATA;
449	} else {
450		goto enobufs;
451	}
452
453	_leave(" = %d", ret);
454	return ret;
455
456enobufs:
457	fscache_retrieval_complete(op, 1);
458	_leave(" = -ENOBUFS");
459	return -ENOBUFS;
460}
461
462/*
463 * read the corresponding pages to the given set from the backing file
464 * - any uncertain pages are simply discarded, to be tried again another time
465 */
466static int cachefiles_read_backing_file(struct cachefiles_object *object,
467					struct fscache_retrieval *op,
468					struct list_head *list)
 
469{
470	struct cachefiles_one_read *monitor = NULL;
471	struct address_space *bmapping = d_backing_inode(object->backer)->i_mapping;
 
472	struct page *newpage = NULL, *netpage, *_n, *backpage = NULL;
473	int ret = 0;
474
475	_enter("");
476
 
 
477	list_for_each_entry_safe(netpage, _n, list, lru) {
478		list_del(&netpage->lru);
479
480		_debug("read back %p{%lu,%d}",
481		       netpage, netpage->index, page_count(netpage));
482
483		if (!monitor) {
484			monitor = kzalloc(sizeof(*monitor), cachefiles_gfp);
485			if (!monitor)
486				goto nomem;
487
488			monitor->op = fscache_get_retrieval(op);
489			init_waitqueue_func_entry(&monitor->monitor,
490						  cachefiles_read_waiter);
491		}
492
493		for (;;) {
494			backpage = find_get_page(bmapping, netpage->index);
495			if (backpage)
496				goto backing_page_already_present;
497
498			if (!newpage) {
499				newpage = __page_cache_alloc(cachefiles_gfp);
500				if (!newpage)
501					goto nomem;
502			}
503
504			ret = add_to_page_cache_lru(newpage, bmapping,
505						    netpage->index,
506						    cachefiles_gfp);
507			if (ret == 0)
508				goto installed_new_backing_page;
509			if (ret != -EEXIST)
510				goto nomem;
511		}
512
513		/* we've installed a new backing page, so now we need
514		 * to start it reading */
515	installed_new_backing_page:
516		_debug("- new %p", newpage);
517
518		backpage = newpage;
519		newpage = NULL;
520
 
 
 
 
521	reread_backing_page:
522		ret = bmapping->a_ops->readpage(NULL, backpage);
523		if (ret < 0)
524			goto read_error;
525
526		/* add the netfs page to the pagecache and LRU, and set the
527		 * monitor to transfer the data across */
528	monitor_backing_page:
529		_debug("- monitor add");
530
531		ret = add_to_page_cache_lru(netpage, op->mapping,
532					    netpage->index, cachefiles_gfp);
533		if (ret < 0) {
534			if (ret == -EEXIST) {
535				put_page(backpage);
536				backpage = NULL;
537				put_page(netpage);
538				netpage = NULL;
539				fscache_retrieval_complete(op, 1);
540				continue;
541			}
542			goto nomem;
543		}
544
 
 
 
 
545		/* install a monitor */
546		get_page(netpage);
547		monitor->netfs_page = netpage;
548
549		get_page(backpage);
550		monitor->back_page = backpage;
551		monitor->monitor.private = backpage;
552		add_page_wait_queue(backpage, &monitor->monitor);
553		monitor = NULL;
554
555		/* but the page may have been read before the monitor was
556		 * installed, so the monitor may miss the event - so we have to
557		 * ensure that we do get one in such a case */
558		if (trylock_page(backpage)) {
559			_debug("2unlock %p {%lx}", backpage, backpage->flags);
560			unlock_page(backpage);
561		}
562
563		put_page(backpage);
564		backpage = NULL;
565
566		put_page(netpage);
567		netpage = NULL;
568		continue;
569
570		/* if the backing page is already present, it can be in one of
571		 * three states: read in progress, read failed or read okay */
572	backing_page_already_present:
573		_debug("- present %p", backpage);
574
575		if (PageError(backpage))
576			goto io_error;
577
578		if (PageUptodate(backpage))
579			goto backing_page_already_uptodate;
580
581		_debug("- not ready %p{%lx}", backpage, backpage->flags);
582
583		if (!trylock_page(backpage))
584			goto monitor_backing_page;
585
586		if (PageError(backpage)) {
587			_debug("error %lx", backpage->flags);
588			unlock_page(backpage);
589			goto io_error;
590		}
591
592		if (PageUptodate(backpage))
593			goto backing_page_already_uptodate_unlock;
594
595		/* we've locked a page that's neither up to date nor erroneous,
596		 * so we need to attempt to read it again */
597		goto reread_backing_page;
598
599		/* the backing page is already up to date, attach the netfs
600		 * page to the pagecache and LRU and copy the data across */
601	backing_page_already_uptodate_unlock:
602		_debug("uptodate %lx", backpage->flags);
603		unlock_page(backpage);
604	backing_page_already_uptodate:
605		_debug("- uptodate");
606
607		ret = add_to_page_cache_lru(netpage, op->mapping,
608					    netpage->index, cachefiles_gfp);
609		if (ret < 0) {
610			if (ret == -EEXIST) {
611				put_page(backpage);
612				backpage = NULL;
613				put_page(netpage);
614				netpage = NULL;
615				fscache_retrieval_complete(op, 1);
616				continue;
617			}
618			goto nomem;
619		}
620
621		copy_highpage(netpage, backpage);
622
623		put_page(backpage);
624		backpage = NULL;
625
626		fscache_mark_page_cached(op, netpage);
 
 
 
 
 
627
628		/* the netpage is unlocked and marked up to date here */
629		fscache_end_io(op, netpage, 0);
630		put_page(netpage);
631		netpage = NULL;
632		fscache_retrieval_complete(op, 1);
633		continue;
634	}
635
636	netpage = NULL;
637
638	_debug("out");
639
640out:
641	/* tidy up */
 
 
642	if (newpage)
643		put_page(newpage);
644	if (netpage)
645		put_page(netpage);
646	if (backpage)
647		put_page(backpage);
648	if (monitor) {
649		fscache_put_retrieval(op);
650		kfree(monitor);
651	}
652
653	list_for_each_entry_safe(netpage, _n, list, lru) {
654		list_del(&netpage->lru);
655		put_page(netpage);
656		fscache_retrieval_complete(op, 1);
657	}
658
659	_leave(" = %d", ret);
660	return ret;
661
662nomem:
663	_debug("nomem");
664	ret = -ENOMEM;
665	goto record_page_complete;
666
667read_error:
668	_debug("read error %d", ret);
669	if (ret == -ENOMEM)
670		goto record_page_complete;
671io_error:
672	cachefiles_io_error_obj(object, "Page read error on backing file");
673	ret = -ENOBUFS;
674record_page_complete:
675	fscache_retrieval_complete(op, 1);
676	goto out;
677}
678
679/*
680 * read a list of pages from the cache or allocate blocks in which to store
681 * them
682 */
683int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
684				   struct list_head *pages,
685				   unsigned *nr_pages,
686				   gfp_t gfp)
687{
688	struct cachefiles_object *object;
689	struct cachefiles_cache *cache;
690	struct list_head backpages;
691	struct pagevec pagevec;
692	struct inode *inode;
693	struct page *page, *_n;
694	unsigned shift, nrbackpages;
695	int ret, ret2, space;
696
697	object = container_of(op->op.object,
698			      struct cachefiles_object, fscache);
699	cache = container_of(object->fscache.cache,
700			     struct cachefiles_cache, cache);
701
702	_enter("{OBJ%x,%d},,%d,,",
703	       object->fscache.debug_id, atomic_read(&op->op.usage),
704	       *nr_pages);
705
706	if (!object->backer)
707		goto all_enobufs;
708
709	space = 1;
710	if (cachefiles_has_space(cache, 0, *nr_pages) < 0)
711		space = 0;
712
713	inode = d_backing_inode(object->backer);
714	ASSERT(S_ISREG(inode->i_mode));
 
715	ASSERT(inode->i_mapping->a_ops->readpages);
716
717	/* calculate the shift required to use bmap */
 
 
 
718	shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
719
720	pagevec_init(&pagevec);
721
722	op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
723	op->op.flags |= FSCACHE_OP_ASYNC;
724	op->op.processor = cachefiles_read_copier;
725
726	INIT_LIST_HEAD(&backpages);
727	nrbackpages = 0;
728
729	ret = space ? -ENODATA : -ENOBUFS;
730	list_for_each_entry_safe(page, _n, pages, lru) {
731		sector_t block;
732
733		/* we assume the absence or presence of the first block is a
734		 * good enough indication for the page as a whole
735		 * - TODO: don't use bmap() for this as it is _not_ actually
736		 *   good enough for this as it doesn't indicate errors, but
737		 *   it's all we've got for the moment
738		 */
739		block = page->index;
740		block <<= shift;
741
742		ret2 = bmap(inode, &block);
743		ASSERT(ret2 == 0);
744
 
 
745		_debug("%llx -> %llx",
746		       (unsigned long long) (page->index << shift),
747		       (unsigned long long) block);
748
749		if (block) {
750			/* we have data - add it to the list to give to the
751			 * backing fs */
752			list_move(&page->lru, &backpages);
753			(*nr_pages)--;
754			nrbackpages++;
755		} else if (space && pagevec_add(&pagevec, page) == 0) {
756			fscache_mark_pages_cached(op, &pagevec);
757			fscache_retrieval_complete(op, 1);
758			ret = -ENODATA;
759		} else {
760			fscache_retrieval_complete(op, 1);
761		}
762	}
763
764	if (pagevec_count(&pagevec) > 0)
765		fscache_mark_pages_cached(op, &pagevec);
766
767	if (list_empty(pages))
768		ret = 0;
769
770	/* submit the apparently valid pages to the backing fs to be read from
771	 * disk */
772	if (nrbackpages > 0) {
773		ret2 = cachefiles_read_backing_file(object, op, &backpages);
 
774		if (ret2 == -ENOMEM || ret2 == -EINTR)
775			ret = ret2;
776	}
777
 
 
 
778	_leave(" = %d [nr=%u%s]",
779	       ret, *nr_pages, list_empty(pages) ? " empty" : "");
780	return ret;
781
782all_enobufs:
783	fscache_retrieval_complete(op, *nr_pages);
784	return -ENOBUFS;
785}
786
787/*
788 * allocate a block in the cache in which to store a page
789 * - cache withdrawal is prevented by the caller
790 * - returns -EINTR if interrupted
791 * - returns -ENOMEM if ran out of memory
792 * - returns -ENOBUFS if no buffers can be made available
793 * - returns -ENOBUFS if page is beyond EOF
794 * - otherwise:
795 *   - the metadata will be retained
796 *   - 0 will be returned
797 */
798int cachefiles_allocate_page(struct fscache_retrieval *op,
799			     struct page *page,
800			     gfp_t gfp)
801{
802	struct cachefiles_object *object;
803	struct cachefiles_cache *cache;
 
804	int ret;
805
806	object = container_of(op->op.object,
807			      struct cachefiles_object, fscache);
808	cache = container_of(object->fscache.cache,
809			     struct cachefiles_cache, cache);
810
811	_enter("%p,{%lx},", object, page->index);
812
813	ret = cachefiles_has_space(cache, 0, 1);
814	if (ret == 0)
815		fscache_mark_page_cached(op, page);
816	else
 
 
817		ret = -ENOBUFS;
 
818
819	fscache_retrieval_complete(op, 1);
820	_leave(" = %d", ret);
821	return ret;
822}
823
824/*
825 * allocate blocks in the cache in which to store a set of pages
826 * - cache withdrawal is prevented by the caller
827 * - returns -EINTR if interrupted
828 * - returns -ENOMEM if ran out of memory
829 * - returns -ENOBUFS if some buffers couldn't be made available
830 * - returns -ENOBUFS if some pages are beyond EOF
831 * - otherwise:
832 *   - -ENODATA will be returned
833 * - metadata will be retained for any page marked
834 */
835int cachefiles_allocate_pages(struct fscache_retrieval *op,
836			      struct list_head *pages,
837			      unsigned *nr_pages,
838			      gfp_t gfp)
839{
840	struct cachefiles_object *object;
841	struct cachefiles_cache *cache;
842	struct pagevec pagevec;
843	struct page *page;
844	int ret;
845
846	object = container_of(op->op.object,
847			      struct cachefiles_object, fscache);
848	cache = container_of(object->fscache.cache,
849			     struct cachefiles_cache, cache);
850
851	_enter("%p,,,%d,", object, *nr_pages);
852
853	ret = cachefiles_has_space(cache, 0, *nr_pages);
854	if (ret == 0) {
855		pagevec_init(&pagevec);
856
857		list_for_each_entry(page, pages, lru) {
858			if (pagevec_add(&pagevec, page) == 0)
859				fscache_mark_pages_cached(op, &pagevec);
860		}
861
862		if (pagevec_count(&pagevec) > 0)
863			fscache_mark_pages_cached(op, &pagevec);
864		ret = -ENODATA;
865	} else {
866		ret = -ENOBUFS;
867	}
868
869	fscache_retrieval_complete(op, *nr_pages);
870	_leave(" = %d", ret);
871	return ret;
872}
873
874/*
875 * request a page be stored in the cache
876 * - cache withdrawal is prevented by the caller
877 * - this request may be ignored if there's no cache block available, in which
878 *   case -ENOBUFS will be returned
879 * - if the op is in progress, 0 will be returned
880 */
881int cachefiles_write_page(struct fscache_storage *op, struct page *page)
882{
883	struct cachefiles_object *object;
884	struct cachefiles_cache *cache;
 
885	struct file *file;
886	struct path path;
887	loff_t pos, eof;
888	size_t len;
889	void *data;
890	int ret = -ENOBUFS;
891
892	ASSERT(op != NULL);
893	ASSERT(page != NULL);
894
895	object = container_of(op->op.object,
896			      struct cachefiles_object, fscache);
897
898	_enter("%p,%p{%lx},,,", object, page, page->index);
899
900	if (!object->backer) {
901		_leave(" = -ENOBUFS");
902		return -ENOBUFS;
903	}
904
905	ASSERT(d_is_reg(object->backer));
906
907	cache = container_of(object->fscache.cache,
908			     struct cachefiles_cache, cache);
909
910	pos = (loff_t)page->index << PAGE_SHIFT;
911
912	/* We mustn't write more data than we have, so we have to beware of a
913	 * partial page at EOF.
914	 */
915	eof = object->fscache.store_limit_l;
916	if (pos >= eof)
917		goto error;
918
919	/* write the page to the backing filesystem and let it store it in its
920	 * own time */
921	path.mnt = cache->mnt;
922	path.dentry = object->backer;
923	file = dentry_open(&path, O_RDWR | O_LARGEFILE, cache->cache_cred);
 
924	if (IS_ERR(file)) {
925		ret = PTR_ERR(file);
926		goto error_2;
927	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
928
929	len = PAGE_SIZE;
930	if (eof & ~PAGE_MASK) {
931		if (eof - pos < PAGE_SIZE) {
932			_debug("cut short %llx to %llx",
933			       pos, eof);
934			len = eof - pos;
935			ASSERTCMP(pos + len, ==, eof);
 
 
936		}
 
937	}
938
939	data = kmap(page);
940	ret = kernel_write(file, data, len, &pos);
941	kunmap(page);
942	fput(file);
943	if (ret != len)
944		goto error_eio;
945
946	_leave(" = 0");
947	return 0;
948
949error_eio:
950	ret = -EIO;
951error_2:
952	if (ret == -EIO)
953		cachefiles_io_error_obj(object,
954					"Write page to backing file failed");
955error:
956	_leave(" = -ENOBUFS [%d]", ret);
957	return -ENOBUFS;
958}
959
960/*
961 * detach a backing block from a page
962 * - cache withdrawal is prevented by the caller
963 */
964void cachefiles_uncache_page(struct fscache_object *_object, struct page *page)
965	__releases(&object->fscache.cookie->lock)
966{
967	struct cachefiles_object *object;
 
968
969	object = container_of(_object, struct cachefiles_object, fscache);
 
 
970
971	_enter("%p,{%lu}", object, page->index);
972
973	spin_unlock(&object->fscache.cookie->lock);
974}
v3.1
 
  1/* Storage object read/write
  2 *
  3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4 * Written by David Howells (dhowells@redhat.com)
  5 *
  6 * This program is free software; you can redistribute it and/or
  7 * modify it under the terms of the GNU General Public Licence
  8 * as published by the Free Software Foundation; either version
  9 * 2 of the Licence, or (at your option) any later version.
 10 */
 11
 12#include <linux/mount.h>
 13#include <linux/slab.h>
 14#include <linux/file.h>
 
 15#include "internal.h"
 16
 17/*
 18 * detect wake up events generated by the unlocking of pages in which we're
 19 * interested
 20 * - we use this to detect read completion of backing pages
 21 * - the caller holds the waitqueue lock
 22 */
 23static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode,
 24				  int sync, void *_key)
 25{
 26	struct cachefiles_one_read *monitor =
 27		container_of(wait, struct cachefiles_one_read, monitor);
 28	struct cachefiles_object *object;
 
 29	struct wait_bit_key *key = _key;
 30	struct page *page = wait->private;
 31
 32	ASSERT(key);
 33
 34	_enter("{%lu},%u,%d,{%p,%u}",
 35	       monitor->netfs_page->index, mode, sync,
 36	       key->flags, key->bit_nr);
 37
 38	if (key->flags != &page->flags ||
 39	    key->bit_nr != PG_locked)
 40		return 0;
 41
 42	_debug("--- monitor %p %lx ---", page, page->flags);
 43
 44	if (!PageUptodate(page) && !PageError(page)) {
 45		/* unlocked, not uptodate and not erronous? */
 46		_debug("page probably truncated");
 47	}
 48
 49	/* remove from the waitqueue */
 50	list_del(&wait->task_list);
 51
 52	/* move onto the action list and queue for FS-Cache thread pool */
 53	ASSERT(monitor->op);
 54
 55	object = container_of(monitor->op->op.object,
 56			      struct cachefiles_object, fscache);
 
 
 
 
 57
 
 58	spin_lock(&object->work_lock);
 59	list_add_tail(&monitor->op_link, &monitor->op->to_do);
 
 60	spin_unlock(&object->work_lock);
 61
 62	fscache_enqueue_retrieval(monitor->op);
 63	return 0;
 64}
 65
 66/*
 67 * handle a probably truncated page
 68 * - check to see if the page is still relevant and reissue the read if
 69 *   possible
 70 * - return -EIO on error, -ENODATA if the page is gone, -EINPROGRESS if we
 71 *   must wait again and 0 if successful
 72 */
 73static int cachefiles_read_reissue(struct cachefiles_object *object,
 74				   struct cachefiles_one_read *monitor)
 75{
 76	struct address_space *bmapping = object->backer->d_inode->i_mapping;
 77	struct page *backpage = monitor->back_page, *backpage2;
 78	int ret;
 79
 80	kenter("{ino=%lx},{%lx,%lx}",
 81	       object->backer->d_inode->i_ino,
 82	       backpage->index, backpage->flags);
 83
 84	/* skip if the page was truncated away completely */
 85	if (backpage->mapping != bmapping) {
 86		kleave(" = -ENODATA [mapping]");
 87		return -ENODATA;
 88	}
 89
 90	backpage2 = find_get_page(bmapping, backpage->index);
 91	if (!backpage2) {
 92		kleave(" = -ENODATA [gone]");
 93		return -ENODATA;
 94	}
 95
 96	if (backpage != backpage2) {
 97		put_page(backpage2);
 98		kleave(" = -ENODATA [different]");
 99		return -ENODATA;
100	}
101
102	/* the page is still there and we already have a ref on it, so we don't
103	 * need a second */
104	put_page(backpage2);
105
106	INIT_LIST_HEAD(&monitor->op_link);
107	add_page_wait_queue(backpage, &monitor->monitor);
108
109	if (trylock_page(backpage)) {
110		ret = -EIO;
111		if (PageError(backpage))
112			goto unlock_discard;
113		ret = 0;
114		if (PageUptodate(backpage))
115			goto unlock_discard;
116
117		kdebug("reissue read");
118		ret = bmapping->a_ops->readpage(NULL, backpage);
119		if (ret < 0)
120			goto unlock_discard;
121	}
122
123	/* but the page may have been read before the monitor was installed, so
124	 * the monitor may miss the event - so we have to ensure that we do get
125	 * one in such a case */
126	if (trylock_page(backpage)) {
127		_debug("jumpstart %p {%lx}", backpage, backpage->flags);
128		unlock_page(backpage);
129	}
130
131	/* it'll reappear on the todo list */
132	kleave(" = -EINPROGRESS");
133	return -EINPROGRESS;
134
135unlock_discard:
136	unlock_page(backpage);
137	spin_lock_irq(&object->work_lock);
138	list_del(&monitor->op_link);
139	spin_unlock_irq(&object->work_lock);
140	kleave(" = %d", ret);
141	return ret;
142}
143
144/*
145 * copy data from backing pages to netfs pages to complete a read operation
146 * - driven by FS-Cache's thread pool
147 */
148static void cachefiles_read_copier(struct fscache_operation *_op)
149{
150	struct cachefiles_one_read *monitor;
151	struct cachefiles_object *object;
152	struct fscache_retrieval *op;
153	struct pagevec pagevec;
154	int error, max;
155
156	op = container_of(_op, struct fscache_retrieval, op);
157	object = container_of(op->op.object,
158			      struct cachefiles_object, fscache);
159
160	_enter("{ino=%lu}", object->backer->d_inode->i_ino);
161
162	pagevec_init(&pagevec, 0);
163
164	max = 8;
165	spin_lock_irq(&object->work_lock);
166
167	while (!list_empty(&op->to_do)) {
168		monitor = list_entry(op->to_do.next,
169				     struct cachefiles_one_read, op_link);
170		list_del(&monitor->op_link);
171
172		spin_unlock_irq(&object->work_lock);
173
174		_debug("- copy {%lu}", monitor->back_page->index);
175
176	recheck:
177		if (PageUptodate(monitor->back_page)) {
 
 
 
178			copy_highpage(monitor->netfs_page, monitor->back_page);
179
180			pagevec_add(&pagevec, monitor->netfs_page);
181			fscache_mark_pages_cached(monitor->op, &pagevec);
182			error = 0;
183		} else if (!PageError(monitor->back_page)) {
184			/* the page has probably been truncated */
185			error = cachefiles_read_reissue(object, monitor);
186			if (error == -EINPROGRESS)
187				goto next;
188			goto recheck;
189		} else {
190			cachefiles_io_error_obj(
191				object,
192				"Readpage failed on backing file %lx",
193				(unsigned long) monitor->back_page->flags);
194			error = -EIO;
195		}
196
197		page_cache_release(monitor->back_page);
198
199		fscache_end_io(op, monitor->netfs_page, error);
200		page_cache_release(monitor->netfs_page);
 
201		fscache_put_retrieval(op);
202		kfree(monitor);
203
204	next:
205		/* let the thread pool have some air occasionally */
206		max--;
207		if (max < 0 || need_resched()) {
208			if (!list_empty(&op->to_do))
209				fscache_enqueue_retrieval(op);
210			_leave(" [maxed out]");
211			return;
212		}
213
214		spin_lock_irq(&object->work_lock);
215	}
216
217	spin_unlock_irq(&object->work_lock);
218	_leave("");
219}
220
221/*
222 * read the corresponding page to the given set from the backing file
223 * - an uncertain page is simply discarded, to be tried again another time
224 */
225static int cachefiles_read_backing_file_one(struct cachefiles_object *object,
226					    struct fscache_retrieval *op,
227					    struct page *netpage,
228					    struct pagevec *pagevec)
229{
230	struct cachefiles_one_read *monitor;
231	struct address_space *bmapping;
232	struct page *newpage, *backpage;
233	int ret;
234
235	_enter("");
236
237	pagevec_reinit(pagevec);
238
239	_debug("read back %p{%lu,%d}",
240	       netpage, netpage->index, page_count(netpage));
241
242	monitor = kzalloc(sizeof(*monitor), GFP_KERNEL);
243	if (!monitor)
244		goto nomem;
245
246	monitor->netfs_page = netpage;
247	monitor->op = fscache_get_retrieval(op);
248
249	init_waitqueue_func_entry(&monitor->monitor, cachefiles_read_waiter);
250
251	/* attempt to get hold of the backing page */
252	bmapping = object->backer->d_inode->i_mapping;
253	newpage = NULL;
254
255	for (;;) {
256		backpage = find_get_page(bmapping, netpage->index);
257		if (backpage)
258			goto backing_page_already_present;
259
260		if (!newpage) {
261			newpage = page_cache_alloc_cold(bmapping);
262			if (!newpage)
263				goto nomem_monitor;
264		}
265
266		ret = add_to_page_cache(newpage, bmapping,
267					netpage->index, GFP_KERNEL);
268		if (ret == 0)
269			goto installed_new_backing_page;
270		if (ret != -EEXIST)
271			goto nomem_page;
272	}
273
274	/* we've installed a new backing page, so now we need to add it
275	 * to the LRU list and start it reading */
276installed_new_backing_page:
277	_debug("- new %p", newpage);
278
279	backpage = newpage;
280	newpage = NULL;
281
282	page_cache_get(backpage);
283	pagevec_add(pagevec, backpage);
284	__pagevec_lru_add_file(pagevec);
285
286read_backing_page:
287	ret = bmapping->a_ops->readpage(NULL, backpage);
288	if (ret < 0)
289		goto read_error;
290
291	/* set the monitor to transfer the data across */
292monitor_backing_page:
293	_debug("- monitor add");
294
295	/* install the monitor */
296	page_cache_get(monitor->netfs_page);
297	page_cache_get(backpage);
298	monitor->back_page = backpage;
299	monitor->monitor.private = backpage;
300	add_page_wait_queue(backpage, &monitor->monitor);
301	monitor = NULL;
302
303	/* but the page may have been read before the monitor was installed, so
304	 * the monitor may miss the event - so we have to ensure that we do get
305	 * one in such a case */
306	if (trylock_page(backpage)) {
307		_debug("jumpstart %p {%lx}", backpage, backpage->flags);
308		unlock_page(backpage);
309	}
310	goto success;
311
312	/* if the backing page is already present, it can be in one of
313	 * three states: read in progress, read failed or read okay */
314backing_page_already_present:
315	_debug("- present");
316
317	if (newpage) {
318		page_cache_release(newpage);
319		newpage = NULL;
320	}
321
322	if (PageError(backpage))
323		goto io_error;
324
325	if (PageUptodate(backpage))
326		goto backing_page_already_uptodate;
327
328	if (!trylock_page(backpage))
329		goto monitor_backing_page;
330	_debug("read %p {%lx}", backpage, backpage->flags);
331	goto read_backing_page;
332
333	/* the backing page is already up to date, attach the netfs
334	 * page to the pagecache and LRU and copy the data across */
335backing_page_already_uptodate:
336	_debug("- uptodate");
337
338	pagevec_add(pagevec, netpage);
339	fscache_mark_pages_cached(op, pagevec);
340
341	copy_highpage(netpage, backpage);
342	fscache_end_io(op, netpage, 0);
 
343
344success:
345	_debug("success");
346	ret = 0;
347
348out:
349	if (backpage)
350		page_cache_release(backpage);
351	if (monitor) {
352		fscache_put_retrieval(monitor->op);
353		kfree(monitor);
354	}
355	_leave(" = %d", ret);
356	return ret;
357
358read_error:
359	_debug("read error %d", ret);
360	if (ret == -ENOMEM)
 
361		goto out;
 
362io_error:
363	cachefiles_io_error_obj(object, "Page read error on backing file");
 
364	ret = -ENOBUFS;
365	goto out;
366
367nomem_page:
368	page_cache_release(newpage);
369nomem_monitor:
370	fscache_put_retrieval(monitor->op);
371	kfree(monitor);
372nomem:
 
373	_leave(" = -ENOMEM");
374	return -ENOMEM;
375}
376
377/*
378 * read a page from the cache or allocate a block in which to store it
379 * - cache withdrawal is prevented by the caller
380 * - returns -EINTR if interrupted
381 * - returns -ENOMEM if ran out of memory
382 * - returns -ENOBUFS if no buffers can be made available
383 * - returns -ENOBUFS if page is beyond EOF
384 * - if the page is backed by a block in the cache:
385 *   - a read will be started which will call the callback on completion
386 *   - 0 will be returned
387 * - else if the page is unbacked:
388 *   - the metadata will be retained
389 *   - -ENODATA will be returned
390 */
391int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
392				  struct page *page,
393				  gfp_t gfp)
394{
395	struct cachefiles_object *object;
396	struct cachefiles_cache *cache;
397	struct pagevec pagevec;
398	struct inode *inode;
399	sector_t block0, block;
400	unsigned shift;
401	int ret;
402
403	object = container_of(op->op.object,
404			      struct cachefiles_object, fscache);
405	cache = container_of(object->fscache.cache,
406			     struct cachefiles_cache, cache);
407
408	_enter("{%p},{%lx},,,", object, page->index);
409
410	if (!object->backer)
411		return -ENOBUFS;
412
413	inode = object->backer->d_inode;
414	ASSERT(S_ISREG(inode->i_mode));
415	ASSERT(inode->i_mapping->a_ops->bmap);
416	ASSERT(inode->i_mapping->a_ops->readpages);
417
418	/* calculate the shift required to use bmap */
419	if (inode->i_sb->s_blocksize > PAGE_SIZE)
420		return -ENOBUFS;
421
422	shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
423
424	op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
425	op->op.flags |= FSCACHE_OP_ASYNC;
426	op->op.processor = cachefiles_read_copier;
427
428	pagevec_init(&pagevec, 0);
429
430	/* we assume the absence or presence of the first block is a good
431	 * enough indication for the page as a whole
432	 * - TODO: don't use bmap() for this as it is _not_ actually good
433	 *   enough for this as it doesn't indicate errors, but it's all we've
434	 *   got for the moment
435	 */
436	block0 = page->index;
437	block0 <<= shift;
 
 
 
438
439	block = inode->i_mapping->a_ops->bmap(inode->i_mapping, block0);
440	_debug("%llx -> %llx",
441	       (unsigned long long) block0,
442	       (unsigned long long) block);
443
444	if (block) {
445		/* submit the apparently valid page to the backing fs to be
446		 * read from disk */
447		ret = cachefiles_read_backing_file_one(object, op, page,
448						       &pagevec);
449	} else if (cachefiles_has_space(cache, 0, 1) == 0) {
450		/* there's space in the cache we can use */
451		pagevec_add(&pagevec, page);
452		fscache_mark_pages_cached(op, &pagevec);
453		ret = -ENODATA;
454	} else {
455		ret = -ENOBUFS;
456	}
457
458	_leave(" = %d", ret);
459	return ret;
 
 
 
 
 
460}
461
462/*
463 * read the corresponding pages to the given set from the backing file
464 * - any uncertain pages are simply discarded, to be tried again another time
465 */
466static int cachefiles_read_backing_file(struct cachefiles_object *object,
467					struct fscache_retrieval *op,
468					struct list_head *list,
469					struct pagevec *mark_pvec)
470{
471	struct cachefiles_one_read *monitor = NULL;
472	struct address_space *bmapping = object->backer->d_inode->i_mapping;
473	struct pagevec lru_pvec;
474	struct page *newpage = NULL, *netpage, *_n, *backpage = NULL;
475	int ret = 0;
476
477	_enter("");
478
479	pagevec_init(&lru_pvec, 0);
480
481	list_for_each_entry_safe(netpage, _n, list, lru) {
482		list_del(&netpage->lru);
483
484		_debug("read back %p{%lu,%d}",
485		       netpage, netpage->index, page_count(netpage));
486
487		if (!monitor) {
488			monitor = kzalloc(sizeof(*monitor), GFP_KERNEL);
489			if (!monitor)
490				goto nomem;
491
492			monitor->op = fscache_get_retrieval(op);
493			init_waitqueue_func_entry(&monitor->monitor,
494						  cachefiles_read_waiter);
495		}
496
497		for (;;) {
498			backpage = find_get_page(bmapping, netpage->index);
499			if (backpage)
500				goto backing_page_already_present;
501
502			if (!newpage) {
503				newpage = page_cache_alloc_cold(bmapping);
504				if (!newpage)
505					goto nomem;
506			}
507
508			ret = add_to_page_cache(newpage, bmapping,
509						netpage->index, GFP_KERNEL);
 
510			if (ret == 0)
511				goto installed_new_backing_page;
512			if (ret != -EEXIST)
513				goto nomem;
514		}
515
516		/* we've installed a new backing page, so now we need to add it
517		 * to the LRU list and start it reading */
518	installed_new_backing_page:
519		_debug("- new %p", newpage);
520
521		backpage = newpage;
522		newpage = NULL;
523
524		page_cache_get(backpage);
525		if (!pagevec_add(&lru_pvec, backpage))
526			__pagevec_lru_add_file(&lru_pvec);
527
528	reread_backing_page:
529		ret = bmapping->a_ops->readpage(NULL, backpage);
530		if (ret < 0)
531			goto read_error;
532
533		/* add the netfs page to the pagecache and LRU, and set the
534		 * monitor to transfer the data across */
535	monitor_backing_page:
536		_debug("- monitor add");
537
538		ret = add_to_page_cache(netpage, op->mapping, netpage->index,
539					GFP_KERNEL);
540		if (ret < 0) {
541			if (ret == -EEXIST) {
542				page_cache_release(netpage);
 
 
 
 
543				continue;
544			}
545			goto nomem;
546		}
547
548		page_cache_get(netpage);
549		if (!pagevec_add(&lru_pvec, netpage))
550			__pagevec_lru_add_file(&lru_pvec);
551
552		/* install a monitor */
553		page_cache_get(netpage);
554		monitor->netfs_page = netpage;
555
556		page_cache_get(backpage);
557		monitor->back_page = backpage;
558		monitor->monitor.private = backpage;
559		add_page_wait_queue(backpage, &monitor->monitor);
560		monitor = NULL;
561
562		/* but the page may have been read before the monitor was
563		 * installed, so the monitor may miss the event - so we have to
564		 * ensure that we do get one in such a case */
565		if (trylock_page(backpage)) {
566			_debug("2unlock %p {%lx}", backpage, backpage->flags);
567			unlock_page(backpage);
568		}
569
570		page_cache_release(backpage);
571		backpage = NULL;
572
573		page_cache_release(netpage);
574		netpage = NULL;
575		continue;
576
577		/* if the backing page is already present, it can be in one of
578		 * three states: read in progress, read failed or read okay */
579	backing_page_already_present:
580		_debug("- present %p", backpage);
581
582		if (PageError(backpage))
583			goto io_error;
584
585		if (PageUptodate(backpage))
586			goto backing_page_already_uptodate;
587
588		_debug("- not ready %p{%lx}", backpage, backpage->flags);
589
590		if (!trylock_page(backpage))
591			goto monitor_backing_page;
592
593		if (PageError(backpage)) {
594			_debug("error %lx", backpage->flags);
595			unlock_page(backpage);
596			goto io_error;
597		}
598
599		if (PageUptodate(backpage))
600			goto backing_page_already_uptodate_unlock;
601
602		/* we've locked a page that's neither up to date nor erroneous,
603		 * so we need to attempt to read it again */
604		goto reread_backing_page;
605
606		/* the backing page is already up to date, attach the netfs
607		 * page to the pagecache and LRU and copy the data across */
608	backing_page_already_uptodate_unlock:
609		_debug("uptodate %lx", backpage->flags);
610		unlock_page(backpage);
611	backing_page_already_uptodate:
612		_debug("- uptodate");
613
614		ret = add_to_page_cache(netpage, op->mapping, netpage->index,
615					GFP_KERNEL);
616		if (ret < 0) {
617			if (ret == -EEXIST) {
618				page_cache_release(netpage);
 
 
 
 
619				continue;
620			}
621			goto nomem;
622		}
623
624		copy_highpage(netpage, backpage);
625
626		page_cache_release(backpage);
627		backpage = NULL;
628
629		if (!pagevec_add(mark_pvec, netpage))
630			fscache_mark_pages_cached(op, mark_pvec);
631
632		page_cache_get(netpage);
633		if (!pagevec_add(&lru_pvec, netpage))
634			__pagevec_lru_add_file(&lru_pvec);
635
 
636		fscache_end_io(op, netpage, 0);
637		page_cache_release(netpage);
638		netpage = NULL;
 
639		continue;
640	}
641
642	netpage = NULL;
643
644	_debug("out");
645
646out:
647	/* tidy up */
648	pagevec_lru_add_file(&lru_pvec);
649
650	if (newpage)
651		page_cache_release(newpage);
652	if (netpage)
653		page_cache_release(netpage);
654	if (backpage)
655		page_cache_release(backpage);
656	if (monitor) {
657		fscache_put_retrieval(op);
658		kfree(monitor);
659	}
660
661	list_for_each_entry_safe(netpage, _n, list, lru) {
662		list_del(&netpage->lru);
663		page_cache_release(netpage);
 
664	}
665
666	_leave(" = %d", ret);
667	return ret;
668
669nomem:
670	_debug("nomem");
671	ret = -ENOMEM;
672	goto out;
673
674read_error:
675	_debug("read error %d", ret);
676	if (ret == -ENOMEM)
677		goto out;
678io_error:
679	cachefiles_io_error_obj(object, "Page read error on backing file");
680	ret = -ENOBUFS;
 
 
681	goto out;
682}
683
684/*
685 * read a list of pages from the cache or allocate blocks in which to store
686 * them
687 */
688int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
689				   struct list_head *pages,
690				   unsigned *nr_pages,
691				   gfp_t gfp)
692{
693	struct cachefiles_object *object;
694	struct cachefiles_cache *cache;
695	struct list_head backpages;
696	struct pagevec pagevec;
697	struct inode *inode;
698	struct page *page, *_n;
699	unsigned shift, nrbackpages;
700	int ret, ret2, space;
701
702	object = container_of(op->op.object,
703			      struct cachefiles_object, fscache);
704	cache = container_of(object->fscache.cache,
705			     struct cachefiles_cache, cache);
706
707	_enter("{OBJ%x,%d},,%d,,",
708	       object->fscache.debug_id, atomic_read(&op->op.usage),
709	       *nr_pages);
710
711	if (!object->backer)
712		return -ENOBUFS;
713
714	space = 1;
715	if (cachefiles_has_space(cache, 0, *nr_pages) < 0)
716		space = 0;
717
718	inode = object->backer->d_inode;
719	ASSERT(S_ISREG(inode->i_mode));
720	ASSERT(inode->i_mapping->a_ops->bmap);
721	ASSERT(inode->i_mapping->a_ops->readpages);
722
723	/* calculate the shift required to use bmap */
724	if (inode->i_sb->s_blocksize > PAGE_SIZE)
725		return -ENOBUFS;
726
727	shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
728
729	pagevec_init(&pagevec, 0);
730
731	op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
732	op->op.flags |= FSCACHE_OP_ASYNC;
733	op->op.processor = cachefiles_read_copier;
734
735	INIT_LIST_HEAD(&backpages);
736	nrbackpages = 0;
737
738	ret = space ? -ENODATA : -ENOBUFS;
739	list_for_each_entry_safe(page, _n, pages, lru) {
740		sector_t block0, block;
741
742		/* we assume the absence or presence of the first block is a
743		 * good enough indication for the page as a whole
744		 * - TODO: don't use bmap() for this as it is _not_ actually
745		 *   good enough for this as it doesn't indicate errors, but
746		 *   it's all we've got for the moment
747		 */
748		block0 = page->index;
749		block0 <<= shift;
 
 
 
750
751		block = inode->i_mapping->a_ops->bmap(inode->i_mapping,
752						      block0);
753		_debug("%llx -> %llx",
754		       (unsigned long long) block0,
755		       (unsigned long long) block);
756
757		if (block) {
758			/* we have data - add it to the list to give to the
759			 * backing fs */
760			list_move(&page->lru, &backpages);
761			(*nr_pages)--;
762			nrbackpages++;
763		} else if (space && pagevec_add(&pagevec, page) == 0) {
764			fscache_mark_pages_cached(op, &pagevec);
 
765			ret = -ENODATA;
 
 
766		}
767	}
768
769	if (pagevec_count(&pagevec) > 0)
770		fscache_mark_pages_cached(op, &pagevec);
771
772	if (list_empty(pages))
773		ret = 0;
774
775	/* submit the apparently valid pages to the backing fs to be read from
776	 * disk */
777	if (nrbackpages > 0) {
778		ret2 = cachefiles_read_backing_file(object, op, &backpages,
779						    &pagevec);
780		if (ret2 == -ENOMEM || ret2 == -EINTR)
781			ret = ret2;
782	}
783
784	if (pagevec_count(&pagevec) > 0)
785		fscache_mark_pages_cached(op, &pagevec);
786
787	_leave(" = %d [nr=%u%s]",
788	       ret, *nr_pages, list_empty(pages) ? " empty" : "");
789	return ret;
 
 
 
 
790}
791
792/*
793 * allocate a block in the cache in which to store a page
794 * - cache withdrawal is prevented by the caller
795 * - returns -EINTR if interrupted
796 * - returns -ENOMEM if ran out of memory
797 * - returns -ENOBUFS if no buffers can be made available
798 * - returns -ENOBUFS if page is beyond EOF
799 * - otherwise:
800 *   - the metadata will be retained
801 *   - 0 will be returned
802 */
803int cachefiles_allocate_page(struct fscache_retrieval *op,
804			     struct page *page,
805			     gfp_t gfp)
806{
807	struct cachefiles_object *object;
808	struct cachefiles_cache *cache;
809	struct pagevec pagevec;
810	int ret;
811
812	object = container_of(op->op.object,
813			      struct cachefiles_object, fscache);
814	cache = container_of(object->fscache.cache,
815			     struct cachefiles_cache, cache);
816
817	_enter("%p,{%lx},", object, page->index);
818
819	ret = cachefiles_has_space(cache, 0, 1);
820	if (ret == 0) {
821		pagevec_init(&pagevec, 0);
822		pagevec_add(&pagevec, page);
823		fscache_mark_pages_cached(op, &pagevec);
824	} else {
825		ret = -ENOBUFS;
826	}
827
 
828	_leave(" = %d", ret);
829	return ret;
830}
831
832/*
833 * allocate blocks in the cache in which to store a set of pages
834 * - cache withdrawal is prevented by the caller
835 * - returns -EINTR if interrupted
836 * - returns -ENOMEM if ran out of memory
837 * - returns -ENOBUFS if some buffers couldn't be made available
838 * - returns -ENOBUFS if some pages are beyond EOF
839 * - otherwise:
840 *   - -ENODATA will be returned
841 * - metadata will be retained for any page marked
842 */
843int cachefiles_allocate_pages(struct fscache_retrieval *op,
844			      struct list_head *pages,
845			      unsigned *nr_pages,
846			      gfp_t gfp)
847{
848	struct cachefiles_object *object;
849	struct cachefiles_cache *cache;
850	struct pagevec pagevec;
851	struct page *page;
852	int ret;
853
854	object = container_of(op->op.object,
855			      struct cachefiles_object, fscache);
856	cache = container_of(object->fscache.cache,
857			     struct cachefiles_cache, cache);
858
859	_enter("%p,,,%d,", object, *nr_pages);
860
861	ret = cachefiles_has_space(cache, 0, *nr_pages);
862	if (ret == 0) {
863		pagevec_init(&pagevec, 0);
864
865		list_for_each_entry(page, pages, lru) {
866			if (pagevec_add(&pagevec, page) == 0)
867				fscache_mark_pages_cached(op, &pagevec);
868		}
869
870		if (pagevec_count(&pagevec) > 0)
871			fscache_mark_pages_cached(op, &pagevec);
872		ret = -ENODATA;
873	} else {
874		ret = -ENOBUFS;
875	}
876
 
877	_leave(" = %d", ret);
878	return ret;
879}
880
881/*
882 * request a page be stored in the cache
883 * - cache withdrawal is prevented by the caller
884 * - this request may be ignored if there's no cache block available, in which
885 *   case -ENOBUFS will be returned
886 * - if the op is in progress, 0 will be returned
887 */
888int cachefiles_write_page(struct fscache_storage *op, struct page *page)
889{
890	struct cachefiles_object *object;
891	struct cachefiles_cache *cache;
892	mm_segment_t old_fs;
893	struct file *file;
 
894	loff_t pos, eof;
895	size_t len;
896	void *data;
897	int ret;
898
899	ASSERT(op != NULL);
900	ASSERT(page != NULL);
901
902	object = container_of(op->op.object,
903			      struct cachefiles_object, fscache);
904
905	_enter("%p,%p{%lx},,,", object, page, page->index);
906
907	if (!object->backer) {
908		_leave(" = -ENOBUFS");
909		return -ENOBUFS;
910	}
911
912	ASSERT(S_ISREG(object->backer->d_inode->i_mode));
913
914	cache = container_of(object->fscache.cache,
915			     struct cachefiles_cache, cache);
916
 
 
 
 
 
 
 
 
 
917	/* write the page to the backing filesystem and let it store it in its
918	 * own time */
919	dget(object->backer);
920	mntget(cache->mnt);
921	file = dentry_open(object->backer, cache->mnt, O_RDWR,
922			   cache->cache_cred);
923	if (IS_ERR(file)) {
924		ret = PTR_ERR(file);
925	} else {
926		ret = -EIO;
927		if (file->f_op->write) {
928			pos = (loff_t) page->index << PAGE_SHIFT;
929
930			/* we mustn't write more data than we have, so we have
931			 * to beware of a partial page at EOF */
932			eof = object->fscache.store_limit_l;
933			len = PAGE_SIZE;
934			if (eof & ~PAGE_MASK) {
935				ASSERTCMP(pos, <, eof);
936				if (eof - pos < PAGE_SIZE) {
937					_debug("cut short %llx to %llx",
938					       pos, eof);
939					len = eof - pos;
940					ASSERTCMP(pos + len, ==, eof);
941				}
942			}
943
944			data = kmap(page);
945			old_fs = get_fs();
946			set_fs(KERNEL_DS);
947			ret = file->f_op->write(
948				file, (const void __user *) data, len, &pos);
949			set_fs(old_fs);
950			kunmap(page);
951			if (ret != len)
952				ret = -EIO;
953		}
954		fput(file);
955	}
956
957	if (ret < 0) {
958		if (ret == -EIO)
959			cachefiles_io_error_obj(
960				object, "Write page to backing file failed");
961		ret = -ENOBUFS;
962	}
 
 
 
963
964	_leave(" = %d", ret);
965	return ret;
 
 
 
 
 
 
 
966}
967
968/*
969 * detach a backing block from a page
970 * - cache withdrawal is prevented by the caller
971 */
972void cachefiles_uncache_page(struct fscache_object *_object, struct page *page)
 
973{
974	struct cachefiles_object *object;
975	struct cachefiles_cache *cache;
976
977	object = container_of(_object, struct cachefiles_object, fscache);
978	cache = container_of(object->fscache.cache,
979			     struct cachefiles_cache, cache);
980
981	_enter("%p,{%lu}", object, page->index);
982
983	spin_unlock(&object->fscache.cookie->lock);
984}