Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/* Network filesystem high-level read support.
  3 *
  4 * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
  5 * Written by David Howells (dhowells@redhat.com)
  6 */
  7
  8#include <linux/module.h>
  9#include <linux/export.h>
 10#include <linux/fs.h>
 11#include <linux/mm.h>
 12#include <linux/pagemap.h>
 13#include <linux/slab.h>
 14#include <linux/uio.h>
 15#include <linux/sched/mm.h>
 16#include <linux/task_io_accounting_ops.h>
 17#include "internal.h"
 18
 19/*
 20 * Clear the unread part of an I/O request.
 21 */
 22static void netfs_clear_unread(struct netfs_io_subrequest *subreq)
 23{
 24	struct iov_iter iter;
 25
 26	iov_iter_xarray(&iter, ITER_DEST, &subreq->rreq->mapping->i_pages,
 27			subreq->start + subreq->transferred,
 28			subreq->len   - subreq->transferred);
 29	iov_iter_zero(iov_iter_count(&iter), &iter);
 30}
 31
 32static void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error,
 33					bool was_async)
 34{
 35	struct netfs_io_subrequest *subreq = priv;
 36
 37	netfs_subreq_terminated(subreq, transferred_or_error, was_async);
 38}
 39
 40/*
 41 * Issue a read against the cache.
 42 * - Eats the caller's ref on subreq.
 43 */
 44static void netfs_read_from_cache(struct netfs_io_request *rreq,
 45				  struct netfs_io_subrequest *subreq,
 46				  enum netfs_read_from_hole read_hole)
 47{
 48	struct netfs_cache_resources *cres = &rreq->cache_resources;
 49	struct iov_iter iter;
 50
 51	netfs_stat(&netfs_n_rh_read);
 52	iov_iter_xarray(&iter, ITER_DEST, &rreq->mapping->i_pages,
 53			subreq->start + subreq->transferred,
 54			subreq->len   - subreq->transferred);
 55
 56	cres->ops->read(cres, subreq->start, &iter, read_hole,
 57			netfs_cache_read_terminated, subreq);
 58}
 59
 60/*
 61 * Fill a subrequest region with zeroes.
 62 */
 63static void netfs_fill_with_zeroes(struct netfs_io_request *rreq,
 64				   struct netfs_io_subrequest *subreq)
 65{
 66	netfs_stat(&netfs_n_rh_zero);
 67	__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
 68	netfs_subreq_terminated(subreq, 0, false);
 69}
 70
 71/*
 72 * Ask the netfs to issue a read request to the server for us.
 73 *
 74 * The netfs is expected to read from subreq->pos + subreq->transferred to
 75 * subreq->pos + subreq->len - 1.  It may not backtrack and write data into the
 76 * buffer prior to the transferred point as it might clobber dirty data
 77 * obtained from the cache.
 78 *
 79 * Alternatively, the netfs is allowed to indicate one of two things:
 80 *
 81 * - NETFS_SREQ_SHORT_READ: A short read - it will get called again to try and
 82 *   make progress.
 83 *
 84 * - NETFS_SREQ_CLEAR_TAIL: A short read - the rest of the buffer will be
 85 *   cleared.
 86 */
 87static void netfs_read_from_server(struct netfs_io_request *rreq,
 88				   struct netfs_io_subrequest *subreq)
 89{
 90	netfs_stat(&netfs_n_rh_download);
 91	rreq->netfs_ops->issue_read(subreq);
 92}
 93
 94/*
 95 * Release those waiting.
 96 */
 97static void netfs_rreq_completed(struct netfs_io_request *rreq, bool was_async)
 98{
 99	trace_netfs_rreq(rreq, netfs_rreq_trace_done);
100	netfs_clear_subrequests(rreq, was_async);
101	netfs_put_request(rreq, was_async, netfs_rreq_trace_put_complete);
102}
103
104/*
105 * Deal with the completion of writing the data to the cache.  We have to clear
106 * the PG_fscache bits on the folios involved and release the caller's ref.
107 *
108 * May be called in softirq mode and we inherit a ref from the caller.
109 */
110static void netfs_rreq_unmark_after_write(struct netfs_io_request *rreq,
111					  bool was_async)
112{
113	struct netfs_io_subrequest *subreq;
114	struct folio *folio;
115	pgoff_t unlocked = 0;
116	bool have_unlocked = false;
117
118	rcu_read_lock();
119
120	list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
121		XA_STATE(xas, &rreq->mapping->i_pages, subreq->start / PAGE_SIZE);
122
123		xas_for_each(&xas, folio, (subreq->start + subreq->len - 1) / PAGE_SIZE) {
124			if (xas_retry(&xas, folio))
125				continue;
126
127			/* We might have multiple writes from the same huge
128			 * folio, but we mustn't unlock a folio more than once.
129			 */
130			if (have_unlocked && folio_index(folio) <= unlocked)
131				continue;
132			unlocked = folio_index(folio);
133			folio_end_fscache(folio);
134			have_unlocked = true;
135		}
136	}
137
138	rcu_read_unlock();
139	netfs_rreq_completed(rreq, was_async);
140}
141
142static void netfs_rreq_copy_terminated(void *priv, ssize_t transferred_or_error,
143				       bool was_async)
144{
145	struct netfs_io_subrequest *subreq = priv;
146	struct netfs_io_request *rreq = subreq->rreq;
147
148	if (IS_ERR_VALUE(transferred_or_error)) {
149		netfs_stat(&netfs_n_rh_write_failed);
150		trace_netfs_failure(rreq, subreq, transferred_or_error,
151				    netfs_fail_copy_to_cache);
152	} else {
153		netfs_stat(&netfs_n_rh_write_done);
154	}
155
156	trace_netfs_sreq(subreq, netfs_sreq_trace_write_term);
157
158	/* If we decrement nr_copy_ops to 0, the ref belongs to us. */
159	if (atomic_dec_and_test(&rreq->nr_copy_ops))
160		netfs_rreq_unmark_after_write(rreq, was_async);
161
162	netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
163}
164
165/*
166 * Perform any outstanding writes to the cache.  We inherit a ref from the
167 * caller.
168 */
169static void netfs_rreq_do_write_to_cache(struct netfs_io_request *rreq)
170{
171	struct netfs_cache_resources *cres = &rreq->cache_resources;
172	struct netfs_io_subrequest *subreq, *next, *p;
173	struct iov_iter iter;
174	int ret;
175
176	trace_netfs_rreq(rreq, netfs_rreq_trace_copy);
177
178	/* We don't want terminating writes trying to wake us up whilst we're
179	 * still going through the list.
180	 */
181	atomic_inc(&rreq->nr_copy_ops);
182
183	list_for_each_entry_safe(subreq, p, &rreq->subrequests, rreq_link) {
184		if (!test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) {
185			list_del_init(&subreq->rreq_link);
186			netfs_put_subrequest(subreq, false,
187					     netfs_sreq_trace_put_no_copy);
188		}
189	}
190
191	list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
192		/* Amalgamate adjacent writes */
193		while (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
194			next = list_next_entry(subreq, rreq_link);
195			if (next->start != subreq->start + subreq->len)
196				break;
197			subreq->len += next->len;
198			list_del_init(&next->rreq_link);
199			netfs_put_subrequest(next, false,
200					     netfs_sreq_trace_put_merged);
201		}
202
203		ret = cres->ops->prepare_write(cres, &subreq->start, &subreq->len,
204					       rreq->i_size, true);
205		if (ret < 0) {
206			trace_netfs_failure(rreq, subreq, ret, netfs_fail_prepare_write);
207			trace_netfs_sreq(subreq, netfs_sreq_trace_write_skip);
208			continue;
209		}
210
211		iov_iter_xarray(&iter, ITER_SOURCE, &rreq->mapping->i_pages,
212				subreq->start, subreq->len);
213
214		atomic_inc(&rreq->nr_copy_ops);
215		netfs_stat(&netfs_n_rh_write);
216		netfs_get_subrequest(subreq, netfs_sreq_trace_get_copy_to_cache);
217		trace_netfs_sreq(subreq, netfs_sreq_trace_write);
218		cres->ops->write(cres, subreq->start, &iter,
219				 netfs_rreq_copy_terminated, subreq);
220	}
221
222	/* If we decrement nr_copy_ops to 0, the usage ref belongs to us. */
223	if (atomic_dec_and_test(&rreq->nr_copy_ops))
224		netfs_rreq_unmark_after_write(rreq, false);
225}
226
227static void netfs_rreq_write_to_cache_work(struct work_struct *work)
228{
229	struct netfs_io_request *rreq =
230		container_of(work, struct netfs_io_request, work);
231
232	netfs_rreq_do_write_to_cache(rreq);
233}
234
235static void netfs_rreq_write_to_cache(struct netfs_io_request *rreq)
236{
237	rreq->work.func = netfs_rreq_write_to_cache_work;
238	if (!queue_work(system_unbound_wq, &rreq->work))
239		BUG();
240}
241
242/*
243 * Handle a short read.
244 */
245static void netfs_rreq_short_read(struct netfs_io_request *rreq,
246				  struct netfs_io_subrequest *subreq)
247{
248	__clear_bit(NETFS_SREQ_SHORT_IO, &subreq->flags);
249	__set_bit(NETFS_SREQ_SEEK_DATA_READ, &subreq->flags);
250
251	netfs_stat(&netfs_n_rh_short_read);
252	trace_netfs_sreq(subreq, netfs_sreq_trace_resubmit_short);
253
254	netfs_get_subrequest(subreq, netfs_sreq_trace_get_short_read);
255	atomic_inc(&rreq->nr_outstanding);
256	if (subreq->source == NETFS_READ_FROM_CACHE)
257		netfs_read_from_cache(rreq, subreq, NETFS_READ_HOLE_CLEAR);
258	else
259		netfs_read_from_server(rreq, subreq);
260}
261
262/*
263 * Resubmit any short or failed operations.  Returns true if we got the rreq
264 * ref back.
265 */
266static bool netfs_rreq_perform_resubmissions(struct netfs_io_request *rreq)
267{
268	struct netfs_io_subrequest *subreq;
269
270	WARN_ON(in_interrupt());
271
272	trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit);
273
274	/* We don't want terminating submissions trying to wake us up whilst
275	 * we're still going through the list.
276	 */
277	atomic_inc(&rreq->nr_outstanding);
278
279	__clear_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
280	list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
281		if (subreq->error) {
282			if (subreq->source != NETFS_READ_FROM_CACHE)
283				break;
284			subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
285			subreq->error = 0;
286			netfs_stat(&netfs_n_rh_download_instead);
287			trace_netfs_sreq(subreq, netfs_sreq_trace_download_instead);
288			netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
289			atomic_inc(&rreq->nr_outstanding);
290			netfs_read_from_server(rreq, subreq);
291		} else if (test_bit(NETFS_SREQ_SHORT_IO, &subreq->flags)) {
292			netfs_rreq_short_read(rreq, subreq);
293		}
294	}
295
296	/* If we decrement nr_outstanding to 0, the usage ref belongs to us. */
297	if (atomic_dec_and_test(&rreq->nr_outstanding))
298		return true;
299
300	wake_up_var(&rreq->nr_outstanding);
301	return false;
302}
303
304/*
305 * Check to see if the data read is still valid.
306 */
307static void netfs_rreq_is_still_valid(struct netfs_io_request *rreq)
308{
309	struct netfs_io_subrequest *subreq;
310
311	if (!rreq->netfs_ops->is_still_valid ||
312	    rreq->netfs_ops->is_still_valid(rreq))
313		return;
314
315	list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
316		if (subreq->source == NETFS_READ_FROM_CACHE) {
317			subreq->error = -ESTALE;
318			__set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
319		}
320	}
321}
322
323/*
324 * Assess the state of a read request and decide what to do next.
325 *
326 * Note that we could be in an ordinary kernel thread, on a workqueue or in
327 * softirq context at this point.  We inherit a ref from the caller.
328 */
329static void netfs_rreq_assess(struct netfs_io_request *rreq, bool was_async)
330{
331	trace_netfs_rreq(rreq, netfs_rreq_trace_assess);
332
333again:
334	netfs_rreq_is_still_valid(rreq);
335
336	if (!test_bit(NETFS_RREQ_FAILED, &rreq->flags) &&
337	    test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags)) {
338		if (netfs_rreq_perform_resubmissions(rreq))
339			goto again;
340		return;
341	}
342
343	netfs_rreq_unlock_folios(rreq);
344
345	clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
346	wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
347
348	if (test_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags))
349		return netfs_rreq_write_to_cache(rreq);
350
351	netfs_rreq_completed(rreq, was_async);
352}
353
354static void netfs_rreq_work(struct work_struct *work)
355{
356	struct netfs_io_request *rreq =
357		container_of(work, struct netfs_io_request, work);
358	netfs_rreq_assess(rreq, false);
359}
360
361/*
362 * Handle the completion of all outstanding I/O operations on a read request.
363 * We inherit a ref from the caller.
364 */
365static void netfs_rreq_terminated(struct netfs_io_request *rreq,
366				  bool was_async)
367{
368	if (test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags) &&
369	    was_async) {
370		if (!queue_work(system_unbound_wq, &rreq->work))
371			BUG();
372	} else {
373		netfs_rreq_assess(rreq, was_async);
374	}
375}
376
377/**
378 * netfs_subreq_terminated - Note the termination of an I/O operation.
379 * @subreq: The I/O request that has terminated.
380 * @transferred_or_error: The amount of data transferred or an error code.
381 * @was_async: The termination was asynchronous
382 *
383 * This tells the read helper that a contributory I/O operation has terminated,
384 * one way or another, and that it should integrate the results.
385 *
386 * The caller indicates in @transferred_or_error the outcome of the operation,
387 * supplying a positive value to indicate the number of bytes transferred, 0 to
388 * indicate a failure to transfer anything that should be retried or a negative
389 * error code.  The helper will look after reissuing I/O operations as
390 * appropriate and writing downloaded data to the cache.
391 *
392 * If @was_async is true, the caller might be running in softirq or interrupt
393 * context and we can't sleep.
394 */
395void netfs_subreq_terminated(struct netfs_io_subrequest *subreq,
396			     ssize_t transferred_or_error,
397			     bool was_async)
398{
399	struct netfs_io_request *rreq = subreq->rreq;
400	int u;
401
402	_enter("[%u]{%llx,%lx},%zd",
403	       subreq->debug_index, subreq->start, subreq->flags,
404	       transferred_or_error);
405
406	switch (subreq->source) {
407	case NETFS_READ_FROM_CACHE:
408		netfs_stat(&netfs_n_rh_read_done);
409		break;
410	case NETFS_DOWNLOAD_FROM_SERVER:
411		netfs_stat(&netfs_n_rh_download_done);
412		break;
413	default:
414		break;
415	}
416
417	if (IS_ERR_VALUE(transferred_or_error)) {
418		subreq->error = transferred_or_error;
419		trace_netfs_failure(rreq, subreq, transferred_or_error,
420				    netfs_fail_read);
421		goto failed;
422	}
423
424	if (WARN(transferred_or_error > subreq->len - subreq->transferred,
425		 "Subreq overread: R%x[%x] %zd > %zu - %zu",
426		 rreq->debug_id, subreq->debug_index,
427		 transferred_or_error, subreq->len, subreq->transferred))
428		transferred_or_error = subreq->len - subreq->transferred;
429
430	subreq->error = 0;
431	subreq->transferred += transferred_or_error;
432	if (subreq->transferred < subreq->len)
433		goto incomplete;
434
435complete:
436	__clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
437	if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags))
438		set_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags);
439
440out:
441	trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
442
443	/* If we decrement nr_outstanding to 0, the ref belongs to us. */
444	u = atomic_dec_return(&rreq->nr_outstanding);
445	if (u == 0)
446		netfs_rreq_terminated(rreq, was_async);
447	else if (u == 1)
448		wake_up_var(&rreq->nr_outstanding);
449
450	netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
451	return;
452
453incomplete:
454	if (test_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags)) {
455		netfs_clear_unread(subreq);
456		subreq->transferred = subreq->len;
457		goto complete;
458	}
459
460	if (transferred_or_error == 0) {
461		if (__test_and_set_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags)) {
462			subreq->error = -ENODATA;
463			goto failed;
464		}
465	} else {
466		__clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
467	}
468
469	__set_bit(NETFS_SREQ_SHORT_IO, &subreq->flags);
470	set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
471	goto out;
472
473failed:
474	if (subreq->source == NETFS_READ_FROM_CACHE) {
475		netfs_stat(&netfs_n_rh_read_failed);
476		set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
477	} else {
478		netfs_stat(&netfs_n_rh_download_failed);
479		set_bit(NETFS_RREQ_FAILED, &rreq->flags);
480		rreq->error = subreq->error;
481	}
482	goto out;
483}
484EXPORT_SYMBOL(netfs_subreq_terminated);
485
486static enum netfs_io_source netfs_cache_prepare_read(struct netfs_io_subrequest *subreq,
487						       loff_t i_size)
488{
489	struct netfs_io_request *rreq = subreq->rreq;
490	struct netfs_cache_resources *cres = &rreq->cache_resources;
491
492	if (cres->ops)
493		return cres->ops->prepare_read(subreq, i_size);
494	if (subreq->start >= rreq->i_size)
495		return NETFS_FILL_WITH_ZEROES;
496	return NETFS_DOWNLOAD_FROM_SERVER;
497}
498
499/*
500 * Work out what sort of subrequest the next one will be.
501 */
502static enum netfs_io_source
503netfs_rreq_prepare_read(struct netfs_io_request *rreq,
504			struct netfs_io_subrequest *subreq)
505{
506	enum netfs_io_source source;
507
508	_enter("%llx-%llx,%llx", subreq->start, subreq->start + subreq->len, rreq->i_size);
509
510	source = netfs_cache_prepare_read(subreq, rreq->i_size);
511	if (source == NETFS_INVALID_READ)
512		goto out;
513
514	if (source == NETFS_DOWNLOAD_FROM_SERVER) {
515		/* Call out to the netfs to let it shrink the request to fit
516		 * its own I/O sizes and boundaries.  If it shinks it here, it
517		 * will be called again to make simultaneous calls; if it wants
518		 * to make serial calls, it can indicate a short read and then
519		 * we will call it again.
520		 */
521		if (subreq->len > rreq->i_size - subreq->start)
522			subreq->len = rreq->i_size - subreq->start;
523
524		if (rreq->netfs_ops->clamp_length &&
525		    !rreq->netfs_ops->clamp_length(subreq)) {
526			source = NETFS_INVALID_READ;
527			goto out;
528		}
529	}
530
531	if (WARN_ON(subreq->len == 0))
532		source = NETFS_INVALID_READ;
533
534out:
535	subreq->source = source;
536	trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
537	return source;
538}
539
540/*
541 * Slice off a piece of a read request and submit an I/O request for it.
542 */
543static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq,
544				    unsigned int *_debug_index)
545{
546	struct netfs_io_subrequest *subreq;
547	enum netfs_io_source source;
548
549	subreq = netfs_alloc_subrequest(rreq);
550	if (!subreq)
551		return false;
552
553	subreq->debug_index	= (*_debug_index)++;
554	subreq->start		= rreq->start + rreq->submitted;
555	subreq->len		= rreq->len   - rreq->submitted;
556
557	_debug("slice %llx,%zx,%zx", subreq->start, subreq->len, rreq->submitted);
558	list_add_tail(&subreq->rreq_link, &rreq->subrequests);
559
560	/* Call out to the cache to find out what it can do with the remaining
561	 * subset.  It tells us in subreq->flags what it decided should be done
562	 * and adjusts subreq->len down if the subset crosses a cache boundary.
563	 *
564	 * Then when we hand the subset, it can choose to take a subset of that
565	 * (the starts must coincide), in which case, we go around the loop
566	 * again and ask it to download the next piece.
567	 */
568	source = netfs_rreq_prepare_read(rreq, subreq);
569	if (source == NETFS_INVALID_READ)
570		goto subreq_failed;
571
572	atomic_inc(&rreq->nr_outstanding);
573
574	rreq->submitted += subreq->len;
575
576	trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
577	switch (source) {
578	case NETFS_FILL_WITH_ZEROES:
579		netfs_fill_with_zeroes(rreq, subreq);
580		break;
581	case NETFS_DOWNLOAD_FROM_SERVER:
582		netfs_read_from_server(rreq, subreq);
583		break;
584	case NETFS_READ_FROM_CACHE:
585		netfs_read_from_cache(rreq, subreq, NETFS_READ_HOLE_IGNORE);
586		break;
587	default:
588		BUG();
589	}
590
591	return true;
592
593subreq_failed:
594	rreq->error = subreq->error;
595	netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_failed);
596	return false;
597}
598
599/*
600 * Begin the process of reading in a chunk of data, where that data may be
601 * stitched together from multiple sources, including multiple servers and the
602 * local cache.
603 */
604int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
605{
606	unsigned int debug_index = 0;
607	int ret;
608
609	_enter("R=%x %llx-%llx",
610	       rreq->debug_id, rreq->start, rreq->start + rreq->len - 1);
611
612	if (rreq->len == 0) {
613		pr_err("Zero-sized read [R=%x]\n", rreq->debug_id);
614		netfs_put_request(rreq, false, netfs_rreq_trace_put_zero_len);
615		return -EIO;
616	}
617
618	INIT_WORK(&rreq->work, netfs_rreq_work);
619
620	if (sync)
621		netfs_get_request(rreq, netfs_rreq_trace_get_hold);
622
623	/* Chop the read into slices according to what the cache and the netfs
624	 * want and submit each one.
625	 */
626	atomic_set(&rreq->nr_outstanding, 1);
627	do {
628		if (!netfs_rreq_submit_slice(rreq, &debug_index))
629			break;
630
631	} while (rreq->submitted < rreq->len);
632
633	if (sync) {
634		/* Keep nr_outstanding incremented so that the ref always belongs to
635		 * us, and the service code isn't punted off to a random thread pool to
636		 * process.
637		 */
638		for (;;) {
639			wait_var_event(&rreq->nr_outstanding,
640				       atomic_read(&rreq->nr_outstanding) == 1);
641			netfs_rreq_assess(rreq, false);
642			if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags))
643				break;
644			cond_resched();
645		}
646
647		ret = rreq->error;
648		if (ret == 0 && rreq->submitted < rreq->len) {
649			trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read);
650			ret = -EIO;
651		}
652		netfs_put_request(rreq, false, netfs_rreq_trace_put_hold);
653	} else {
654		/* If we decrement nr_outstanding to 0, the ref belongs to us. */
655		if (atomic_dec_and_test(&rreq->nr_outstanding))
656			netfs_rreq_assess(rreq, false);
657		ret = 0;
658	}
659	return ret;
660}