Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/* Storage object read/write
  3 *
  4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  5 * Written by David Howells (dhowells@redhat.com)
  6 */
  7
  8#include <linux/mount.h>
  9#include <linux/slab.h>
 10#include <linux/file.h>
 11#include <linux/swap.h>
 12#include "internal.h"
 13
 14/*
 15 * detect wake up events generated by the unlocking of pages in which we're
 16 * interested
 17 * - we use this to detect read completion of backing pages
 18 * - the caller holds the waitqueue lock
 19 */
 20static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode,
 21				  int sync, void *_key)
 22{
 23	struct cachefiles_one_read *monitor =
 24		container_of(wait, struct cachefiles_one_read, monitor);
 25	struct cachefiles_object *object;
 26	struct fscache_retrieval *op = monitor->op;
 27	struct wait_bit_key *key = _key;
 28	struct page *page = wait->private;
 29
 30	ASSERT(key);
 31
 32	_enter("{%lu},%u,%d,{%p,%u}",
 33	       monitor->netfs_page->index, mode, sync,
 34	       key->flags, key->bit_nr);
 35
 36	if (key->flags != &page->flags ||
 37	    key->bit_nr != PG_locked)
 38		return 0;
 39
 40	_debug("--- monitor %p %lx ---", page, page->flags);
 41
 42	if (!PageUptodate(page) && !PageError(page)) {
 43		/* unlocked, not uptodate and not erronous? */
 44		_debug("page probably truncated");
 45	}
 46
 47	/* remove from the waitqueue */
 48	list_del(&wait->entry);
 49
 50	/* move onto the action list and queue for FS-Cache thread pool */
 51	ASSERT(op);
 52
 53	/* We need to temporarily bump the usage count as we don't own a ref
 54	 * here otherwise cachefiles_read_copier() may free the op between the
 55	 * monitor being enqueued on the op->to_do list and the op getting
 56	 * enqueued on the work queue.
 57	 */
 58	fscache_get_retrieval(op);
 59
 60	object = container_of(op->op.object, struct cachefiles_object, fscache);
 61	spin_lock(&object->work_lock);
 62	list_add_tail(&monitor->op_link, &op->to_do);
 63	spin_unlock(&object->work_lock);
 64
 65	fscache_enqueue_retrieval(op);
 66	fscache_put_retrieval(op);
 67	return 0;
 68}
 69
 70/*
 71 * handle a probably truncated page
 72 * - check to see if the page is still relevant and reissue the read if
 73 *   possible
 74 * - return -EIO on error, -ENODATA if the page is gone, -EINPROGRESS if we
 75 *   must wait again and 0 if successful
 76 */
 77static int cachefiles_read_reissue(struct cachefiles_object *object,
 78				   struct cachefiles_one_read *monitor)
 79{
 80	struct address_space *bmapping = d_backing_inode(object->backer)->i_mapping;
 81	struct page *backpage = monitor->back_page, *backpage2;
 82	int ret;
 83
 84	_enter("{ino=%lx},{%lx,%lx}",
 85	       d_backing_inode(object->backer)->i_ino,
 86	       backpage->index, backpage->flags);
 87
 88	/* skip if the page was truncated away completely */
 89	if (backpage->mapping != bmapping) {
 90		_leave(" = -ENODATA [mapping]");
 91		return -ENODATA;
 92	}
 93
 94	backpage2 = find_get_page(bmapping, backpage->index);
 95	if (!backpage2) {
 96		_leave(" = -ENODATA [gone]");
 97		return -ENODATA;
 98	}
 99
100	if (backpage != backpage2) {
101		put_page(backpage2);
102		_leave(" = -ENODATA [different]");
103		return -ENODATA;
104	}
105
106	/* the page is still there and we already have a ref on it, so we don't
107	 * need a second */
108	put_page(backpage2);
109
110	INIT_LIST_HEAD(&monitor->op_link);
111	add_page_wait_queue(backpage, &monitor->monitor);
112
113	if (trylock_page(backpage)) {
114		ret = -EIO;
115		if (PageError(backpage))
116			goto unlock_discard;
117		ret = 0;
118		if (PageUptodate(backpage))
119			goto unlock_discard;
120
121		_debug("reissue read");
122		ret = bmapping->a_ops->readpage(NULL, backpage);
123		if (ret < 0)
124			goto unlock_discard;
125	}
126
127	/* but the page may have been read before the monitor was installed, so
128	 * the monitor may miss the event - so we have to ensure that we do get
129	 * one in such a case */
130	if (trylock_page(backpage)) {
131		_debug("jumpstart %p {%lx}", backpage, backpage->flags);
132		unlock_page(backpage);
133	}
134
135	/* it'll reappear on the todo list */
136	_leave(" = -EINPROGRESS");
137	return -EINPROGRESS;
138
139unlock_discard:
140	unlock_page(backpage);
141	spin_lock_irq(&object->work_lock);
142	list_del(&monitor->op_link);
143	spin_unlock_irq(&object->work_lock);
144	_leave(" = %d", ret);
145	return ret;
146}
147
148/*
149 * copy data from backing pages to netfs pages to complete a read operation
150 * - driven by FS-Cache's thread pool
151 */
152static void cachefiles_read_copier(struct fscache_operation *_op)
153{
154	struct cachefiles_one_read *monitor;
155	struct cachefiles_object *object;
156	struct fscache_retrieval *op;
157	int error, max;
158
159	op = container_of(_op, struct fscache_retrieval, op);
160	object = container_of(op->op.object,
161			      struct cachefiles_object, fscache);
162
163	_enter("{ino=%lu}", d_backing_inode(object->backer)->i_ino);
164
165	max = 8;
166	spin_lock_irq(&object->work_lock);
167
168	while (!list_empty(&op->to_do)) {
169		monitor = list_entry(op->to_do.next,
170				     struct cachefiles_one_read, op_link);
171		list_del(&monitor->op_link);
172
173		spin_unlock_irq(&object->work_lock);
174
175		_debug("- copy {%lu}", monitor->back_page->index);
176
177	recheck:
178		if (test_bit(FSCACHE_COOKIE_INVALIDATING,
179			     &object->fscache.cookie->flags)) {
180			error = -ESTALE;
181		} else if (PageUptodate(monitor->back_page)) {
182			copy_highpage(monitor->netfs_page, monitor->back_page);
183			fscache_mark_page_cached(monitor->op,
184						 monitor->netfs_page);
185			error = 0;
186		} else if (!PageError(monitor->back_page)) {
187			/* the page has probably been truncated */
188			error = cachefiles_read_reissue(object, monitor);
189			if (error == -EINPROGRESS)
190				goto next;
191			goto recheck;
192		} else {
193			cachefiles_io_error_obj(
194				object,
195				"Readpage failed on backing file %lx",
196				(unsigned long) monitor->back_page->flags);
197			error = -EIO;
198		}
199
200		put_page(monitor->back_page);
201
202		fscache_end_io(op, monitor->netfs_page, error);
203		put_page(monitor->netfs_page);
204		fscache_retrieval_complete(op, 1);
205		fscache_put_retrieval(op);
206		kfree(monitor);
207
208	next:
209		/* let the thread pool have some air occasionally */
210		max--;
211		if (max < 0 || need_resched()) {
212			if (!list_empty(&op->to_do))
213				fscache_enqueue_retrieval(op);
214			_leave(" [maxed out]");
215			return;
216		}
217
218		spin_lock_irq(&object->work_lock);
219	}
220
221	spin_unlock_irq(&object->work_lock);
222	_leave("");
223}
224
225/*
226 * read the corresponding page to the given set from the backing file
227 * - an uncertain page is simply discarded, to be tried again another time
228 */
229static int cachefiles_read_backing_file_one(struct cachefiles_object *object,
230					    struct fscache_retrieval *op,
231					    struct page *netpage)
232{
233	struct cachefiles_one_read *monitor;
234	struct address_space *bmapping;
235	struct page *newpage, *backpage;
236	int ret;
237
238	_enter("");
239
240	_debug("read back %p{%lu,%d}",
241	       netpage, netpage->index, page_count(netpage));
242
243	monitor = kzalloc(sizeof(*monitor), cachefiles_gfp);
244	if (!monitor)
245		goto nomem;
246
247	monitor->netfs_page = netpage;
248	monitor->op = fscache_get_retrieval(op);
249
250	init_waitqueue_func_entry(&monitor->monitor, cachefiles_read_waiter);
251
252	/* attempt to get hold of the backing page */
253	bmapping = d_backing_inode(object->backer)->i_mapping;
254	newpage = NULL;
255
256	for (;;) {
257		backpage = find_get_page(bmapping, netpage->index);
258		if (backpage)
259			goto backing_page_already_present;
260
261		if (!newpage) {
262			newpage = __page_cache_alloc(cachefiles_gfp);
263			if (!newpage)
264				goto nomem_monitor;
265		}
266
267		ret = add_to_page_cache_lru(newpage, bmapping,
268					    netpage->index, cachefiles_gfp);
269		if (ret == 0)
270			goto installed_new_backing_page;
271		if (ret != -EEXIST)
272			goto nomem_page;
273	}
274
275	/* we've installed a new backing page, so now we need to start
276	 * it reading */
277installed_new_backing_page:
278	_debug("- new %p", newpage);
279
280	backpage = newpage;
281	newpage = NULL;
282
283read_backing_page:
284	ret = bmapping->a_ops->readpage(NULL, backpage);
285	if (ret < 0)
286		goto read_error;
287
288	/* set the monitor to transfer the data across */
289monitor_backing_page:
290	_debug("- monitor add");
291
292	/* install the monitor */
293	get_page(monitor->netfs_page);
294	get_page(backpage);
295	monitor->back_page = backpage;
296	monitor->monitor.private = backpage;
297	add_page_wait_queue(backpage, &monitor->monitor);
298	monitor = NULL;
299
300	/* but the page may have been read before the monitor was installed, so
301	 * the monitor may miss the event - so we have to ensure that we do get
302	 * one in such a case */
303	if (trylock_page(backpage)) {
304		_debug("jumpstart %p {%lx}", backpage, backpage->flags);
305		unlock_page(backpage);
306	}
307	goto success;
308
309	/* if the backing page is already present, it can be in one of
310	 * three states: read in progress, read failed or read okay */
311backing_page_already_present:
312	_debug("- present");
313
314	if (newpage) {
315		put_page(newpage);
316		newpage = NULL;
317	}
318
319	if (PageError(backpage))
320		goto io_error;
321
322	if (PageUptodate(backpage))
323		goto backing_page_already_uptodate;
324
325	if (!trylock_page(backpage))
326		goto monitor_backing_page;
327	_debug("read %p {%lx}", backpage, backpage->flags);
328	goto read_backing_page;
329
330	/* the backing page is already up to date, attach the netfs
331	 * page to the pagecache and LRU and copy the data across */
332backing_page_already_uptodate:
333	_debug("- uptodate");
334
335	fscache_mark_page_cached(op, netpage);
336
337	copy_highpage(netpage, backpage);
338	fscache_end_io(op, netpage, 0);
339	fscache_retrieval_complete(op, 1);
340
341success:
342	_debug("success");
343	ret = 0;
344
345out:
346	if (backpage)
347		put_page(backpage);
348	if (monitor) {
349		fscache_put_retrieval(monitor->op);
350		kfree(monitor);
351	}
352	_leave(" = %d", ret);
353	return ret;
354
355read_error:
356	_debug("read error %d", ret);
357	if (ret == -ENOMEM) {
358		fscache_retrieval_complete(op, 1);
359		goto out;
360	}
361io_error:
362	cachefiles_io_error_obj(object, "Page read error on backing file");
363	fscache_retrieval_complete(op, 1);
364	ret = -ENOBUFS;
365	goto out;
366
367nomem_page:
368	put_page(newpage);
369nomem_monitor:
370	fscache_put_retrieval(monitor->op);
371	kfree(monitor);
372nomem:
373	fscache_retrieval_complete(op, 1);
374	_leave(" = -ENOMEM");
375	return -ENOMEM;
376}
377
378/*
379 * read a page from the cache or allocate a block in which to store it
380 * - cache withdrawal is prevented by the caller
381 * - returns -EINTR if interrupted
382 * - returns -ENOMEM if ran out of memory
383 * - returns -ENOBUFS if no buffers can be made available
384 * - returns -ENOBUFS if page is beyond EOF
385 * - if the page is backed by a block in the cache:
386 *   - a read will be started which will call the callback on completion
387 *   - 0 will be returned
388 * - else if the page is unbacked:
389 *   - the metadata will be retained
390 *   - -ENODATA will be returned
391 */
392int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
393				  struct page *page,
394				  gfp_t gfp)
395{
396	struct cachefiles_object *object;
397	struct cachefiles_cache *cache;
398	struct inode *inode;
399	sector_t block0, block;
400	unsigned shift;
401	int ret;
402
403	object = container_of(op->op.object,
404			      struct cachefiles_object, fscache);
405	cache = container_of(object->fscache.cache,
406			     struct cachefiles_cache, cache);
407
408	_enter("{%p},{%lx},,,", object, page->index);
409
410	if (!object->backer)
411		goto enobufs;
412
413	inode = d_backing_inode(object->backer);
414	ASSERT(S_ISREG(inode->i_mode));
415	ASSERT(inode->i_mapping->a_ops->bmap);
416	ASSERT(inode->i_mapping->a_ops->readpages);
417
418	/* calculate the shift required to use bmap */
419	shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
420
421	op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
422	op->op.flags |= FSCACHE_OP_ASYNC;
423	op->op.processor = cachefiles_read_copier;
424
425	/* we assume the absence or presence of the first block is a good
426	 * enough indication for the page as a whole
427	 * - TODO: don't use bmap() for this as it is _not_ actually good
428	 *   enough for this as it doesn't indicate errors, but it's all we've
429	 *   got for the moment
430	 */
431	block0 = page->index;
432	block0 <<= shift;
433
434	block = inode->i_mapping->a_ops->bmap(inode->i_mapping, block0);
435	_debug("%llx -> %llx",
436	       (unsigned long long) block0,
437	       (unsigned long long) block);
438
439	if (block) {
440		/* submit the apparently valid page to the backing fs to be
441		 * read from disk */
442		ret = cachefiles_read_backing_file_one(object, op, page);
443	} else if (cachefiles_has_space(cache, 0, 1) == 0) {
444		/* there's space in the cache we can use */
445		fscache_mark_page_cached(op, page);
446		fscache_retrieval_complete(op, 1);
447		ret = -ENODATA;
448	} else {
449		goto enobufs;
450	}
451
452	_leave(" = %d", ret);
453	return ret;
454
455enobufs:
456	fscache_retrieval_complete(op, 1);
457	_leave(" = -ENOBUFS");
458	return -ENOBUFS;
459}
460
461/*
462 * read the corresponding pages to the given set from the backing file
463 * - any uncertain pages are simply discarded, to be tried again another time
464 */
465static int cachefiles_read_backing_file(struct cachefiles_object *object,
466					struct fscache_retrieval *op,
467					struct list_head *list)
468{
469	struct cachefiles_one_read *monitor = NULL;
470	struct address_space *bmapping = d_backing_inode(object->backer)->i_mapping;
471	struct page *newpage = NULL, *netpage, *_n, *backpage = NULL;
472	int ret = 0;
473
474	_enter("");
475
476	list_for_each_entry_safe(netpage, _n, list, lru) {
477		list_del(&netpage->lru);
478
479		_debug("read back %p{%lu,%d}",
480		       netpage, netpage->index, page_count(netpage));
481
482		if (!monitor) {
483			monitor = kzalloc(sizeof(*monitor), cachefiles_gfp);
484			if (!monitor)
485				goto nomem;
486
487			monitor->op = fscache_get_retrieval(op);
488			init_waitqueue_func_entry(&monitor->monitor,
489						  cachefiles_read_waiter);
490		}
491
492		for (;;) {
493			backpage = find_get_page(bmapping, netpage->index);
494			if (backpage)
495				goto backing_page_already_present;
496
497			if (!newpage) {
498				newpage = __page_cache_alloc(cachefiles_gfp);
499				if (!newpage)
500					goto nomem;
501			}
502
503			ret = add_to_page_cache_lru(newpage, bmapping,
504						    netpage->index,
505						    cachefiles_gfp);
506			if (ret == 0)
507				goto installed_new_backing_page;
508			if (ret != -EEXIST)
509				goto nomem;
510		}
511
512		/* we've installed a new backing page, so now we need
513		 * to start it reading */
514	installed_new_backing_page:
515		_debug("- new %p", newpage);
516
517		backpage = newpage;
518		newpage = NULL;
519
520	reread_backing_page:
521		ret = bmapping->a_ops->readpage(NULL, backpage);
522		if (ret < 0)
523			goto read_error;
524
525		/* add the netfs page to the pagecache and LRU, and set the
526		 * monitor to transfer the data across */
527	monitor_backing_page:
528		_debug("- monitor add");
529
530		ret = add_to_page_cache_lru(netpage, op->mapping,
531					    netpage->index, cachefiles_gfp);
532		if (ret < 0) {
533			if (ret == -EEXIST) {
534				put_page(backpage);
535				backpage = NULL;
536				put_page(netpage);
537				netpage = NULL;
538				fscache_retrieval_complete(op, 1);
539				continue;
540			}
541			goto nomem;
542		}
543
544		/* install a monitor */
545		get_page(netpage);
546		monitor->netfs_page = netpage;
547
548		get_page(backpage);
549		monitor->back_page = backpage;
550		monitor->monitor.private = backpage;
551		add_page_wait_queue(backpage, &monitor->monitor);
552		monitor = NULL;
553
554		/* but the page may have been read before the monitor was
555		 * installed, so the monitor may miss the event - so we have to
556		 * ensure that we do get one in such a case */
557		if (trylock_page(backpage)) {
558			_debug("2unlock %p {%lx}", backpage, backpage->flags);
559			unlock_page(backpage);
560		}
561
562		put_page(backpage);
563		backpage = NULL;
564
565		put_page(netpage);
566		netpage = NULL;
567		continue;
568
569		/* if the backing page is already present, it can be in one of
570		 * three states: read in progress, read failed or read okay */
571	backing_page_already_present:
572		_debug("- present %p", backpage);
573
574		if (PageError(backpage))
575			goto io_error;
576
577		if (PageUptodate(backpage))
578			goto backing_page_already_uptodate;
579
580		_debug("- not ready %p{%lx}", backpage, backpage->flags);
581
582		if (!trylock_page(backpage))
583			goto monitor_backing_page;
584
585		if (PageError(backpage)) {
586			_debug("error %lx", backpage->flags);
587			unlock_page(backpage);
588			goto io_error;
589		}
590
591		if (PageUptodate(backpage))
592			goto backing_page_already_uptodate_unlock;
593
594		/* we've locked a page that's neither up to date nor erroneous,
595		 * so we need to attempt to read it again */
596		goto reread_backing_page;
597
598		/* the backing page is already up to date, attach the netfs
599		 * page to the pagecache and LRU and copy the data across */
600	backing_page_already_uptodate_unlock:
601		_debug("uptodate %lx", backpage->flags);
602		unlock_page(backpage);
603	backing_page_already_uptodate:
604		_debug("- uptodate");
605
606		ret = add_to_page_cache_lru(netpage, op->mapping,
607					    netpage->index, cachefiles_gfp);
608		if (ret < 0) {
609			if (ret == -EEXIST) {
610				put_page(backpage);
611				backpage = NULL;
612				put_page(netpage);
613				netpage = NULL;
614				fscache_retrieval_complete(op, 1);
615				continue;
616			}
617			goto nomem;
618		}
619
620		copy_highpage(netpage, backpage);
621
622		put_page(backpage);
623		backpage = NULL;
624
625		fscache_mark_page_cached(op, netpage);
626
627		/* the netpage is unlocked and marked up to date here */
628		fscache_end_io(op, netpage, 0);
629		put_page(netpage);
630		netpage = NULL;
631		fscache_retrieval_complete(op, 1);
632		continue;
633	}
634
635	netpage = NULL;
636
637	_debug("out");
638
639out:
640	/* tidy up */
641	if (newpage)
642		put_page(newpage);
643	if (netpage)
644		put_page(netpage);
645	if (backpage)
646		put_page(backpage);
647	if (monitor) {
648		fscache_put_retrieval(op);
649		kfree(monitor);
650	}
651
652	list_for_each_entry_safe(netpage, _n, list, lru) {
653		list_del(&netpage->lru);
654		put_page(netpage);
655		fscache_retrieval_complete(op, 1);
656	}
657
658	_leave(" = %d", ret);
659	return ret;
660
661nomem:
662	_debug("nomem");
663	ret = -ENOMEM;
664	goto record_page_complete;
665
666read_error:
667	_debug("read error %d", ret);
668	if (ret == -ENOMEM)
669		goto record_page_complete;
670io_error:
671	cachefiles_io_error_obj(object, "Page read error on backing file");
672	ret = -ENOBUFS;
673record_page_complete:
674	fscache_retrieval_complete(op, 1);
675	goto out;
676}
677
678/*
679 * read a list of pages from the cache or allocate blocks in which to store
680 * them
681 */
682int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
683				   struct list_head *pages,
684				   unsigned *nr_pages,
685				   gfp_t gfp)
686{
687	struct cachefiles_object *object;
688	struct cachefiles_cache *cache;
689	struct list_head backpages;
690	struct pagevec pagevec;
691	struct inode *inode;
692	struct page *page, *_n;
693	unsigned shift, nrbackpages;
694	int ret, ret2, space;
695
696	object = container_of(op->op.object,
697			      struct cachefiles_object, fscache);
698	cache = container_of(object->fscache.cache,
699			     struct cachefiles_cache, cache);
700
701	_enter("{OBJ%x,%d},,%d,,",
702	       object->fscache.debug_id, atomic_read(&op->op.usage),
703	       *nr_pages);
704
705	if (!object->backer)
706		goto all_enobufs;
707
708	space = 1;
709	if (cachefiles_has_space(cache, 0, *nr_pages) < 0)
710		space = 0;
711
712	inode = d_backing_inode(object->backer);
713	ASSERT(S_ISREG(inode->i_mode));
714	ASSERT(inode->i_mapping->a_ops->bmap);
715	ASSERT(inode->i_mapping->a_ops->readpages);
716
717	/* calculate the shift required to use bmap */
718	shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
719
720	pagevec_init(&pagevec);
721
722	op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
723	op->op.flags |= FSCACHE_OP_ASYNC;
724	op->op.processor = cachefiles_read_copier;
725
726	INIT_LIST_HEAD(&backpages);
727	nrbackpages = 0;
728
729	ret = space ? -ENODATA : -ENOBUFS;
730	list_for_each_entry_safe(page, _n, pages, lru) {
731		sector_t block0, block;
732
733		/* we assume the absence or presence of the first block is a
734		 * good enough indication for the page as a whole
735		 * - TODO: don't use bmap() for this as it is _not_ actually
736		 *   good enough for this as it doesn't indicate errors, but
737		 *   it's all we've got for the moment
738		 */
739		block0 = page->index;
740		block0 <<= shift;
741
742		block = inode->i_mapping->a_ops->bmap(inode->i_mapping,
743						      block0);
744		_debug("%llx -> %llx",
745		       (unsigned long long) block0,
746		       (unsigned long long) block);
747
748		if (block) {
749			/* we have data - add it to the list to give to the
750			 * backing fs */
751			list_move(&page->lru, &backpages);
752			(*nr_pages)--;
753			nrbackpages++;
754		} else if (space && pagevec_add(&pagevec, page) == 0) {
755			fscache_mark_pages_cached(op, &pagevec);
756			fscache_retrieval_complete(op, 1);
757			ret = -ENODATA;
758		} else {
759			fscache_retrieval_complete(op, 1);
760		}
761	}
762
763	if (pagevec_count(&pagevec) > 0)
764		fscache_mark_pages_cached(op, &pagevec);
765
766	if (list_empty(pages))
767		ret = 0;
768
769	/* submit the apparently valid pages to the backing fs to be read from
770	 * disk */
771	if (nrbackpages > 0) {
772		ret2 = cachefiles_read_backing_file(object, op, &backpages);
773		if (ret2 == -ENOMEM || ret2 == -EINTR)
774			ret = ret2;
775	}
776
777	_leave(" = %d [nr=%u%s]",
778	       ret, *nr_pages, list_empty(pages) ? " empty" : "");
779	return ret;
780
781all_enobufs:
782	fscache_retrieval_complete(op, *nr_pages);
783	return -ENOBUFS;
784}
785
786/*
787 * allocate a block in the cache in which to store a page
788 * - cache withdrawal is prevented by the caller
789 * - returns -EINTR if interrupted
790 * - returns -ENOMEM if ran out of memory
791 * - returns -ENOBUFS if no buffers can be made available
792 * - returns -ENOBUFS if page is beyond EOF
793 * - otherwise:
794 *   - the metadata will be retained
795 *   - 0 will be returned
796 */
797int cachefiles_allocate_page(struct fscache_retrieval *op,
798			     struct page *page,
799			     gfp_t gfp)
800{
801	struct cachefiles_object *object;
802	struct cachefiles_cache *cache;
803	int ret;
804
805	object = container_of(op->op.object,
806			      struct cachefiles_object, fscache);
807	cache = container_of(object->fscache.cache,
808			     struct cachefiles_cache, cache);
809
810	_enter("%p,{%lx},", object, page->index);
811
812	ret = cachefiles_has_space(cache, 0, 1);
813	if (ret == 0)
814		fscache_mark_page_cached(op, page);
815	else
816		ret = -ENOBUFS;
817
818	fscache_retrieval_complete(op, 1);
819	_leave(" = %d", ret);
820	return ret;
821}
822
823/*
824 * allocate blocks in the cache in which to store a set of pages
825 * - cache withdrawal is prevented by the caller
826 * - returns -EINTR if interrupted
827 * - returns -ENOMEM if ran out of memory
828 * - returns -ENOBUFS if some buffers couldn't be made available
829 * - returns -ENOBUFS if some pages are beyond EOF
830 * - otherwise:
831 *   - -ENODATA will be returned
832 * - metadata will be retained for any page marked
833 */
834int cachefiles_allocate_pages(struct fscache_retrieval *op,
835			      struct list_head *pages,
836			      unsigned *nr_pages,
837			      gfp_t gfp)
838{
839	struct cachefiles_object *object;
840	struct cachefiles_cache *cache;
841	struct pagevec pagevec;
842	struct page *page;
843	int ret;
844
845	object = container_of(op->op.object,
846			      struct cachefiles_object, fscache);
847	cache = container_of(object->fscache.cache,
848			     struct cachefiles_cache, cache);
849
850	_enter("%p,,,%d,", object, *nr_pages);
851
852	ret = cachefiles_has_space(cache, 0, *nr_pages);
853	if (ret == 0) {
854		pagevec_init(&pagevec);
855
856		list_for_each_entry(page, pages, lru) {
857			if (pagevec_add(&pagevec, page) == 0)
858				fscache_mark_pages_cached(op, &pagevec);
859		}
860
861		if (pagevec_count(&pagevec) > 0)
862			fscache_mark_pages_cached(op, &pagevec);
863		ret = -ENODATA;
864	} else {
865		ret = -ENOBUFS;
866	}
867
868	fscache_retrieval_complete(op, *nr_pages);
869	_leave(" = %d", ret);
870	return ret;
871}
872
873/*
874 * request a page be stored in the cache
875 * - cache withdrawal is prevented by the caller
876 * - this request may be ignored if there's no cache block available, in which
877 *   case -ENOBUFS will be returned
878 * - if the op is in progress, 0 will be returned
879 */
880int cachefiles_write_page(struct fscache_storage *op, struct page *page)
881{
882	struct cachefiles_object *object;
883	struct cachefiles_cache *cache;
884	struct file *file;
885	struct path path;
886	loff_t pos, eof;
887	size_t len;
888	void *data;
889	int ret = -ENOBUFS;
890
891	ASSERT(op != NULL);
892	ASSERT(page != NULL);
893
894	object = container_of(op->op.object,
895			      struct cachefiles_object, fscache);
896
897	_enter("%p,%p{%lx},,,", object, page, page->index);
898
899	if (!object->backer) {
900		_leave(" = -ENOBUFS");
901		return -ENOBUFS;
902	}
903
904	ASSERT(d_is_reg(object->backer));
905
906	cache = container_of(object->fscache.cache,
907			     struct cachefiles_cache, cache);
908
909	pos = (loff_t)page->index << PAGE_SHIFT;
910
911	/* We mustn't write more data than we have, so we have to beware of a
912	 * partial page at EOF.
913	 */
914	eof = object->fscache.store_limit_l;
915	if (pos >= eof)
916		goto error;
917
918	/* write the page to the backing filesystem and let it store it in its
919	 * own time */
920	path.mnt = cache->mnt;
921	path.dentry = object->backer;
922	file = dentry_open(&path, O_RDWR | O_LARGEFILE, cache->cache_cred);
923	if (IS_ERR(file)) {
924		ret = PTR_ERR(file);
925		goto error_2;
926	}
927
928	len = PAGE_SIZE;
929	if (eof & ~PAGE_MASK) {
930		if (eof - pos < PAGE_SIZE) {
931			_debug("cut short %llx to %llx",
932			       pos, eof);
933			len = eof - pos;
934			ASSERTCMP(pos + len, ==, eof);
935		}
936	}
937
938	data = kmap(page);
939	ret = __kernel_write(file, data, len, &pos);
940	kunmap(page);
941	fput(file);
942	if (ret != len)
943		goto error_eio;
944
945	_leave(" = 0");
946	return 0;
947
948error_eio:
949	ret = -EIO;
950error_2:
951	if (ret == -EIO)
952		cachefiles_io_error_obj(object,
953					"Write page to backing file failed");
954error:
955	_leave(" = -ENOBUFS [%d]", ret);
956	return -ENOBUFS;
957}
958
959/*
960 * detach a backing block from a page
961 * - cache withdrawal is prevented by the caller
962 */
963void cachefiles_uncache_page(struct fscache_object *_object, struct page *page)
964	__releases(&object->fscache.cookie->lock)
965{
966	struct cachefiles_object *object;
967
968	object = container_of(_object, struct cachefiles_object, fscache);
969
970	_enter("%p,{%lu}", object, page->index);
971
972	spin_unlock(&object->fscache.cookie->lock);
973}