Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.2.
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Network filesystem read subrequest retrying.
  3 *
  4 * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
  5 * Written by David Howells (dhowells@redhat.com)
  6 */
  7
  8#include <linux/fs.h>
  9#include <linux/slab.h>
 10#include "internal.h"
 11
 12static void netfs_reissue_read(struct netfs_io_request *rreq,
 13			       struct netfs_io_subrequest *subreq)
 14{
 15	struct iov_iter *io_iter = &subreq->io_iter;
 16
 17	if (iov_iter_is_folioq(io_iter)) {
 18		subreq->curr_folioq = (struct folio_queue *)io_iter->folioq;
 19		subreq->curr_folioq_slot = io_iter->folioq_slot;
 20		subreq->curr_folio_order = subreq->curr_folioq->orders[subreq->curr_folioq_slot];
 21	}
 22
 23	atomic_inc(&rreq->nr_outstanding);
 24	__set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
 25	netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
 26	subreq->rreq->netfs_ops->issue_read(subreq);
 27}
 28
 29/*
 30 * Go through the list of failed/short reads, retrying all retryable ones.  We
 31 * need to switch failed cache reads to network downloads.
 32 */
 33static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
 34{
 35	struct netfs_io_subrequest *subreq;
 36	struct netfs_io_stream *stream0 = &rreq->io_streams[0];
 37	LIST_HEAD(sublist);
 38	LIST_HEAD(queue);
 39
 40	_enter("R=%x", rreq->debug_id);
 41
 42	if (list_empty(&rreq->subrequests))
 43		return;
 44
 45	if (rreq->netfs_ops->retry_request)
 46		rreq->netfs_ops->retry_request(rreq, NULL);
 47
 48	/* If there's no renegotiation to do, just resend each retryable subreq
 49	 * up to the first permanently failed one.
 50	 */
 51	if (!rreq->netfs_ops->prepare_read &&
 52	    !rreq->cache_resources.ops) {
 53		struct netfs_io_subrequest *subreq;
 54
 55		list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
 56			if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
 57				break;
 58			if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
 59				__clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
 60				subreq->retry_count++;
 61				netfs_reset_iter(subreq);
 62				netfs_reissue_read(rreq, subreq);
 63			}
 64		}
 65		return;
 66	}
 67
 68	/* Okay, we need to renegotiate all the download requests and flip any
 69	 * failed cache reads over to being download requests and negotiate
 70	 * those also.  All fully successful subreqs have been removed from the
 71	 * list and any spare data from those has been donated.
 72	 *
 73	 * What we do is decant the list and rebuild it one subreq at a time so
 74	 * that we don't end up with donations jumping over a gap we're busy
 75	 * populating with smaller subrequests.  In the event that the subreq
 76	 * we just launched finishes before we insert the next subreq, it'll
 77	 * fill in rreq->prev_donated instead.
 78
 79	 * Note: Alternatively, we could split the tail subrequest right before
 80	 * we reissue it and fix up the donations under lock.
 81	 */
 82	list_splice_init(&rreq->subrequests, &queue);
 83
 84	do {
 85		struct netfs_io_subrequest *from;
 86		struct iov_iter source;
 87		unsigned long long start, len;
 88		size_t part, deferred_next_donated = 0;
 89		bool boundary = false;
 90
 91		/* Go through the subreqs and find the next span of contiguous
 92		 * buffer that we then rejig (cifs, for example, needs the
 93		 * rsize renegotiating) and reissue.
 94		 */
 95		from = list_first_entry(&queue, struct netfs_io_subrequest, rreq_link);
 96		list_move_tail(&from->rreq_link, &sublist);
 97		start = from->start + from->transferred;
 98		len   = from->len   - from->transferred;
 99
100		_debug("from R=%08x[%x] s=%llx ctl=%zx/%zx/%zx",
101		       rreq->debug_id, from->debug_index,
102		       from->start, from->consumed, from->transferred, from->len);
103
104		if (test_bit(NETFS_SREQ_FAILED, &from->flags) ||
105		    !test_bit(NETFS_SREQ_NEED_RETRY, &from->flags))
106			goto abandon;
107
108		deferred_next_donated = from->next_donated;
109		while ((subreq = list_first_entry_or_null(
110				&queue, struct netfs_io_subrequest, rreq_link))) {
111			if (subreq->start != start + len ||
112			    subreq->transferred > 0 ||
113			    !test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags))
114				break;
115			list_move_tail(&subreq->rreq_link, &sublist);
116			len += subreq->len;
117			deferred_next_donated = subreq->next_donated;
118			if (test_bit(NETFS_SREQ_BOUNDARY, &subreq->flags))
119				break;
120		}
121
122		_debug(" - range: %llx-%llx %llx", start, start + len - 1, len);
123
124		/* Determine the set of buffers we're going to use.  Each
125		 * subreq gets a subset of a single overall contiguous buffer.
126		 */
127		netfs_reset_iter(from);
128		source = from->io_iter;
129		source.count = len;
130
131		/* Work through the sublist. */
132		while ((subreq = list_first_entry_or_null(
133				&sublist, struct netfs_io_subrequest, rreq_link))) {
134			list_del(&subreq->rreq_link);
135
136			subreq->source	= NETFS_DOWNLOAD_FROM_SERVER;
137			subreq->start	= start - subreq->transferred;
138			subreq->len	= len   + subreq->transferred;
139			stream0->sreq_max_len = subreq->len;
140
141			__clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
142			__clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
143			subreq->retry_count++;
144
145			spin_lock_bh(&rreq->lock);
146			list_add_tail(&subreq->rreq_link, &rreq->subrequests);
147			subreq->prev_donated += rreq->prev_donated;
148			rreq->prev_donated = 0;
149			trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
150			spin_unlock_bh(&rreq->lock);
151
152			BUG_ON(!len);
153
154			/* Renegotiate max_len (rsize) */
155			if (rreq->netfs_ops->prepare_read &&
156			    rreq->netfs_ops->prepare_read(subreq) < 0) {
157				trace_netfs_sreq(subreq, netfs_sreq_trace_reprep_failed);
158				__set_bit(NETFS_SREQ_FAILED, &subreq->flags);
159			}
160
161			part = umin(len, stream0->sreq_max_len);
162			if (unlikely(rreq->io_streams[0].sreq_max_segs))
163				part = netfs_limit_iter(&source, 0, part, stream0->sreq_max_segs);
164			subreq->len = subreq->transferred + part;
165			subreq->io_iter = source;
166			iov_iter_truncate(&subreq->io_iter, part);
167			iov_iter_advance(&source, part);
168			len -= part;
169			start += part;
170			if (!len) {
171				if (boundary)
172					__set_bit(NETFS_SREQ_BOUNDARY, &subreq->flags);
173				subreq->next_donated = deferred_next_donated;
174			} else {
175				__clear_bit(NETFS_SREQ_BOUNDARY, &subreq->flags);
176				subreq->next_donated = 0;
177			}
178
179			netfs_reissue_read(rreq, subreq);
180			if (!len)
181				break;
182
183			/* If we ran out of subrequests, allocate another. */
184			if (list_empty(&sublist)) {
185				subreq = netfs_alloc_subrequest(rreq);
186				if (!subreq)
187					goto abandon;
188				subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
189				subreq->start = start;
190
191				/* We get two refs, but need just one. */
192				netfs_put_subrequest(subreq, false, netfs_sreq_trace_new);
193				trace_netfs_sreq(subreq, netfs_sreq_trace_split);
194				list_add_tail(&subreq->rreq_link, &sublist);
195			}
196		}
197
198		/* If we managed to use fewer subreqs, we can discard the
199		 * excess.
200		 */
201		while ((subreq = list_first_entry_or_null(
202				&sublist, struct netfs_io_subrequest, rreq_link))) {
203			trace_netfs_sreq(subreq, netfs_sreq_trace_discard);
204			list_del(&subreq->rreq_link);
205			netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_done);
206		}
207
208	} while (!list_empty(&queue));
209
210	return;
211
212	/* If we hit ENOMEM, fail all remaining subrequests */
213abandon:
214	list_splice_init(&sublist, &queue);
215	list_for_each_entry(subreq, &queue, rreq_link) {
216		if (!subreq->error)
217			subreq->error = -ENOMEM;
218		__clear_bit(NETFS_SREQ_FAILED, &subreq->flags);
219		__clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
220	}
221	spin_lock_bh(&rreq->lock);
222	list_splice_tail_init(&queue, &rreq->subrequests);
223	spin_unlock_bh(&rreq->lock);
224}
225
226/*
227 * Retry reads.
228 */
229void netfs_retry_reads(struct netfs_io_request *rreq)
230{
231	trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit);
232
233	atomic_inc(&rreq->nr_outstanding);
234
235	netfs_retry_read_subrequests(rreq);
236
237	if (atomic_dec_and_test(&rreq->nr_outstanding))
238		netfs_rreq_terminated(rreq, false);
239}
240
241/*
242 * Unlock any the pages that haven't been unlocked yet due to abandoned
243 * subrequests.
244 */
245void netfs_unlock_abandoned_read_pages(struct netfs_io_request *rreq)
246{
247	struct folio_queue *p;
248
249	for (p = rreq->buffer; p; p = p->next) {
250		for (int slot = 0; slot < folioq_count(p); slot++) {
251			struct folio *folio = folioq_folio(p, slot);
252
253			if (folio && !folioq_is_marked2(p, slot)) {
254				trace_netfs_folio(folio, netfs_folio_trace_abandon);
255				folio_unlock(folio);
256			}
257		}
258	}
259}