From: Nathan Chancellor <nathan@kernel.org>
To: David Howells <dhowells@redhat.com>,
Christian Brauner <brauner@kernel.org>
Cc: Steve French <smfrench@gmail.com>,
Matthew Wilcox <willy@infradead.org>,
Jeff Layton <jlayton@kernel.org>,
Gao Xiang <hsiangkao@linux.alibaba.com>,
Dominique Martinet <asmadeus@codewreck.org>,
Marc Dionne <marc.dionne@auristor.com>,
Paulo Alcantara <pc@manguebit.com>,
Shyam Prasad N <sprasad@microsoft.com>,
Tom Talpey <tom@talpey.com>,
Eric Van Hensbergen <ericvh@kernel.org>,
Ilya Dryomov <idryomov@gmail.com>,
netfs@lists.linux.dev, linux-afs@lists.infradead.org,
linux-cifs@vger.kernel.org, linux-nfs@vger.kernel.org,
ceph-devel@vger.kernel.org, v9fs@lists.linux.dev,
linux-erofs@lists.ozlabs.org, linux-fsdevel@vger.kernel.org,
linux-mm@kvack.org, netdev@vger.kernel.org,
linux-kernel@vger.kernel.org
Subject: Re: [PATCH v4 28/33] netfs: Change the read result collector to only use one work item
Date: Thu, 14 Nov 2024 09:39:31 -0700 [thread overview]
Message-ID: <20241114163931.GA1928968@thelio-3990X> (raw)
In-Reply-To: <20241108173236.1382366-29-dhowells@redhat.com>
Hi David,
On Fri, Nov 08, 2024 at 05:32:29PM +0000, David Howells wrote:
...
> diff --git a/fs/netfs/read_retry.c b/fs/netfs/read_retry.c
> index 264f3cb6a7dc..8ca0558570c1 100644
> --- a/fs/netfs/read_retry.c
> +++ b/fs/netfs/read_retry.c
> @@ -12,15 +12,8 @@
> static void netfs_reissue_read(struct netfs_io_request *rreq,
> struct netfs_io_subrequest *subreq)
> {
> - struct iov_iter *io_iter = &subreq->io_iter;
> -
> - if (iov_iter_is_folioq(io_iter)) {
> - subreq->curr_folioq = (struct folio_queue *)io_iter->folioq;
> - subreq->curr_folioq_slot = io_iter->folioq_slot;
> - subreq->curr_folio_order = subreq->curr_folioq->orders[subreq->curr_folioq_slot];
> - }
> -
> - atomic_inc(&rreq->nr_outstanding);
> + __clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
> + __set_bit(NETFS_SREQ_RETRYING, &subreq->flags);
> __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
> netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
> subreq->rreq->netfs_ops->issue_read(subreq);
> @@ -33,13 +26,12 @@ static void netfs_reissue_read(struct netfs_io_request *rreq,
> static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
> {
> struct netfs_io_subrequest *subreq;
> - struct netfs_io_stream *stream0 = &rreq->io_streams[0];
> - LIST_HEAD(sublist);
> - LIST_HEAD(queue);
> + struct netfs_io_stream *stream = &rreq->io_streams[0];
> + struct list_head *next;
>
> _enter("R=%x", rreq->debug_id);
>
> - if (list_empty(&rreq->subrequests))
> + if (list_empty(&stream->subrequests))
> return;
>
> if (rreq->netfs_ops->retry_request)
> @@ -52,7 +44,7 @@ static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
> !test_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags)) {
> struct netfs_io_subrequest *subreq;
>
> - list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
> + list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
> if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
> break;
> if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
> @@ -73,48 +65,44 @@ static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
> * populating with smaller subrequests. In the event that the subreq
> * we just launched finishes before we insert the next subreq, it'll
> * fill in rreq->prev_donated instead.
> -
> + *
> * Note: Alternatively, we could split the tail subrequest right before
> * we reissue it and fix up the donations under lock.
> */
> - list_splice_init(&rreq->subrequests, &queue);
> + next = stream->subrequests.next;
>
> do {
> - struct netfs_io_subrequest *from;
> + struct netfs_io_subrequest *subreq = NULL, *from, *to, *tmp;
> struct iov_iter source;
> unsigned long long start, len;
> - size_t part, deferred_next_donated = 0;
> + size_t part;
> bool boundary = false;
>
> /* Go through the subreqs and find the next span of contiguous
> * buffer that we then rejig (cifs, for example, needs the
> * rsize renegotiating) and reissue.
> */
> - from = list_first_entry(&queue, struct netfs_io_subrequest, rreq_link);
> - list_move_tail(&from->rreq_link, &sublist);
> + from = list_entry(next, struct netfs_io_subrequest, rreq_link);
> + to = from;
> start = from->start + from->transferred;
> len = from->len - from->transferred;
>
> - _debug("from R=%08x[%x] s=%llx ctl=%zx/%zx/%zx",
> + _debug("from R=%08x[%x] s=%llx ctl=%zx/%zx",
> rreq->debug_id, from->debug_index,
> - from->start, from->consumed, from->transferred, from->len);
> + from->start, from->transferred, from->len);
>
> if (test_bit(NETFS_SREQ_FAILED, &from->flags) ||
> !test_bit(NETFS_SREQ_NEED_RETRY, &from->flags))
> goto abandon;
>
> - deferred_next_donated = from->next_donated;
> - while ((subreq = list_first_entry_or_null(
> - &queue, struct netfs_io_subrequest, rreq_link))) {
> - if (subreq->start != start + len ||
> - subreq->transferred > 0 ||
> + list_for_each_continue(next, &stream->subrequests) {
> + subreq = list_entry(next, struct netfs_io_subrequest, rreq_link);
> + if (subreq->start + subreq->transferred != start + len ||
> + test_bit(NETFS_SREQ_BOUNDARY, &subreq->flags) ||
> !test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags))
> break;
> - list_move_tail(&subreq->rreq_link, &sublist);
> - len += subreq->len;
> - deferred_next_donated = subreq->next_donated;
> - if (test_bit(NETFS_SREQ_BOUNDARY, &subreq->flags))
> - break;
> + to = subreq;
> + len += to->len;
> }
>
> _debug(" - range: %llx-%llx %llx", start, start + len - 1, len);
> @@ -127,36 +115,28 @@ static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
> source.count = len;
>
> /* Work through the sublist. */
> - while ((subreq = list_first_entry_or_null(
> - &sublist, struct netfs_io_subrequest, rreq_link))) {
> - list_del(&subreq->rreq_link);
> -
> + subreq = from;
> + list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) {
> + if (!len)
> + break;
> subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
> subreq->start = start - subreq->transferred;
> subreq->len = len + subreq->transferred;
> - stream0->sreq_max_len = subreq->len;
> -
> __clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
> __set_bit(NETFS_SREQ_RETRYING, &subreq->flags);
> -
> - spin_lock(&rreq->lock);
> - list_add_tail(&subreq->rreq_link, &rreq->subrequests);
> - subreq->prev_donated += rreq->prev_donated;
> - rreq->prev_donated = 0;
> trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
> - spin_unlock(&rreq->lock);
> -
> - BUG_ON(!len);
>
> /* Renegotiate max_len (rsize) */
> + stream->sreq_max_len = subreq->len;
> if (rreq->netfs_ops->prepare_read(subreq) < 0) {
> trace_netfs_sreq(subreq, netfs_sreq_trace_reprep_failed);
> __set_bit(NETFS_SREQ_FAILED, &subreq->flags);
> + goto abandon;
> }
>
> - part = umin(len, stream0->sreq_max_len);
> - if (unlikely(rreq->io_streams[0].sreq_max_segs))
> - part = netfs_limit_iter(&source, 0, part, stream0->sreq_max_segs);
> + part = umin(len, stream->sreq_max_len);
> + if (unlikely(stream->sreq_max_segs))
> + part = netfs_limit_iter(&source, 0, part, stream->sreq_max_segs);
> subreq->len = subreq->transferred + part;
> subreq->io_iter = source;
> iov_iter_truncate(&subreq->io_iter, part);
> @@ -166,58 +146,106 @@ static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
> if (!len) {
> if (boundary)
> __set_bit(NETFS_SREQ_BOUNDARY, &subreq->flags);
> - subreq->next_donated = deferred_next_donated;
> } else {
> __clear_bit(NETFS_SREQ_BOUNDARY, &subreq->flags);
> - subreq->next_donated = 0;
> }
>
> + netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
> netfs_reissue_read(rreq, subreq);
> - if (!len)
> + if (subreq == to)
> break;
> -
> - /* If we ran out of subrequests, allocate another. */
> - if (list_empty(&sublist)) {
> - subreq = netfs_alloc_subrequest(rreq);
> - if (!subreq)
> - goto abandon;
> - subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
> - subreq->start = start;
> -
> - /* We get two refs, but need just one. */
> - netfs_put_subrequest(subreq, false, netfs_sreq_trace_new);
> - trace_netfs_sreq(subreq, netfs_sreq_trace_split);
> - list_add_tail(&subreq->rreq_link, &sublist);
> - }
> }
>
> /* If we managed to use fewer subreqs, we can discard the
> - * excess.
> + * excess; if we used the same number, then we're done.
> */
> - while ((subreq = list_first_entry_or_null(
> - &sublist, struct netfs_io_subrequest, rreq_link))) {
> - trace_netfs_sreq(subreq, netfs_sreq_trace_discard);
> - list_del(&subreq->rreq_link);
> - netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_done);
> + if (!len) {
> + if (subreq == to)
> + continue;
> + list_for_each_entry_safe_from(subreq, tmp,
> + &stream->subrequests, rreq_link) {
> + trace_netfs_sreq(subreq, netfs_sreq_trace_discard);
> + list_del(&subreq->rreq_link);
> + netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_done);
> + if (subreq == to)
> + break;
> + }
> + continue;
> }
>
> - } while (!list_empty(&queue));
> + /* We ran out of subrequests, so we need to allocate some more
> + * and insert them after.
> + */
> + do {
> + subreq = netfs_alloc_subrequest(rreq);
> + if (!subreq) {
> + subreq = to;
> + goto abandon_after;
> + }
> + subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
> + subreq->start = start;
> + subreq->len = len;
> + subreq->debug_index = atomic_inc_return(&rreq->subreq_counter);
> + subreq->stream_nr = stream->stream_nr;
> + __set_bit(NETFS_SREQ_RETRYING, &subreq->flags);
> +
> + trace_netfs_sreq_ref(rreq->debug_id, subreq->debug_index,
> + refcount_read(&subreq->ref),
> + netfs_sreq_trace_new);
> + netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
> +
> + list_add(&subreq->rreq_link, &to->rreq_link);
> + to = list_next_entry(to, rreq_link);
> + trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
> +
> + stream->sreq_max_len = umin(len, rreq->rsize);
> + stream->sreq_max_segs = 0;
> + if (unlikely(stream->sreq_max_segs))
> + part = netfs_limit_iter(&source, 0, part, stream->sreq_max_segs);
> +
> + netfs_stat(&netfs_n_rh_download);
> + if (rreq->netfs_ops->prepare_read(subreq) < 0) {
> + trace_netfs_sreq(subreq, netfs_sreq_trace_reprep_failed);
> + __set_bit(NETFS_SREQ_FAILED, &subreq->flags);
> + goto abandon;
> + }
> +
> + part = umin(len, stream->sreq_max_len);
> + subreq->len = subreq->transferred + part;
> + subreq->io_iter = source;
> + iov_iter_truncate(&subreq->io_iter, part);
> + iov_iter_advance(&source, part);
> +
> + len -= part;
> + start += part;
> + if (!len && boundary) {
> + __set_bit(NETFS_SREQ_BOUNDARY, &to->flags);
> + boundary = false;
> + }
> +
> + netfs_reissue_read(rreq, subreq);
> + } while (len);
> +
> + } while (!list_is_head(next, &stream->subrequests));
>
> return;
>
> - /* If we hit ENOMEM, fail all remaining subrequests */
> + /* If we hit an error, fail all remaining incomplete subrequests */
> +abandon_after:
> + if (list_is_last(&subreq->rreq_link, &stream->subrequests))
> + return;
This change as commit 1bd9011ee163 ("netfs: Change the read result
collector to only use one work item") in next-20241114 causes a clang
warning:
fs/netfs/read_retry.c:235:20: error: variable 'subreq' is uninitialized when used here [-Werror,-Wuninitialized]
235 | if (list_is_last(&subreq->rreq_link, &stream->subrequests))
| ^~~~~~
fs/netfs/read_retry.c:28:36: note: initialize the variable 'subreq' to silence this warning
28 | struct netfs_io_subrequest *subreq;
| ^
| = NULL
May be a shadowing issue, as adding KCFLAGS=-Wshadow shows:
fs/netfs/read_retry.c:75:31: error: declaration shadows a local variable [-Werror,-Wshadow]
75 | struct netfs_io_subrequest *subreq = NULL, *from, *to, *tmp;
| ^
fs/netfs/read_retry.c:28:30: note: previous declaration is here
28 | struct netfs_io_subrequest *subreq;
| ^
Cheers,
Nathan
> + subreq = list_next_entry(subreq, rreq_link);
> abandon:
> - list_splice_init(&sublist, &queue);
> - list_for_each_entry(subreq, &queue, rreq_link) {
> - if (!subreq->error)
> - subreq->error = -ENOMEM;
> - __clear_bit(NETFS_SREQ_FAILED, &subreq->flags);
> + list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) {
> + if (!subreq->error &&
> + !test_bit(NETFS_SREQ_FAILED, &subreq->flags) &&
> + !test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags))
> + continue;
> + subreq->error = -ENOMEM;
> + __set_bit(NETFS_SREQ_FAILED, &subreq->flags);
> __clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
> __clear_bit(NETFS_SREQ_RETRYING, &subreq->flags);
> }
> - spin_lock(&rreq->lock);
> - list_splice_tail_init(&queue, &rreq->subrequests);
> - spin_unlock(&rreq->lock);
> }
>
> /*
> @@ -225,14 +253,19 @@ static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
> */
> void netfs_retry_reads(struct netfs_io_request *rreq)
> {
> - trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit);
> + struct netfs_io_subrequest *subreq;
> + struct netfs_io_stream *stream = &rreq->io_streams[0];
>
> - atomic_inc(&rreq->nr_outstanding);
> + /* Wait for all outstanding I/O to quiesce before performing retries as
> + * we may need to renegotiate the I/O sizes.
> + */
> + list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
> + wait_on_bit(&subreq->flags, NETFS_SREQ_IN_PROGRESS,
> + TASK_UNINTERRUPTIBLE);
> + }
>
> + trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit);
> netfs_retry_read_subrequests(rreq);
> -
> - if (atomic_dec_and_test(&rreq->nr_outstanding))
> - netfs_rreq_terminated(rreq);
> }
>
> /*
next prev parent reply other threads:[~2024-11-14 16:39 UTC|newest]
Thread overview: 41+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-11-08 17:32 [PATCH v4 00/33] netfs: Read performance improvements and "single-blob" support David Howells
2024-11-08 17:32 ` [PATCH v4 01/33] kheaders: Ignore silly-rename files David Howells
2024-11-08 17:32 ` [PATCH v4 02/33] netfs: Remove call to folio_index() David Howells
2024-11-08 17:32 ` [PATCH v4 03/33] netfs: Fix a few minor bugs in netfs_page_mkwrite() David Howells
2024-11-08 17:32 ` [PATCH v4 04/33] netfs: Remove unnecessary references to pages David Howells
2024-11-08 17:32 ` [PATCH v4 05/33] netfs: Use a folio_queue allocation and free functions David Howells
2024-11-08 17:32 ` [PATCH v4 06/33] netfs: Add a tracepoint to log the lifespan of folio_queue structs David Howells
2024-11-08 17:32 ` [PATCH v4 07/33] netfs: Abstract out a rolling folio buffer implementation David Howells
2024-11-15 20:01 ` Kees Bakker
2024-11-18 16:39 ` David Howells
2024-11-08 17:32 ` [PATCH v4 08/33] netfs: Make netfs_advance_write() return size_t David Howells
2024-11-08 17:32 ` [PATCH v4 09/33] netfs: Split retry code out of fs/netfs/write_collect.c David Howells
2024-11-08 17:32 ` [PATCH v4 10/33] netfs: Drop the error arg from netfs_read_subreq_terminated() David Howells
2024-11-08 17:32 ` [PATCH v4 11/33] netfs: Drop the was_async " David Howells
2024-11-08 17:32 ` [PATCH v4 12/33] netfs: Don't use bh spinlock David Howells
2024-11-08 17:32 ` [PATCH v4 13/33] afs: Don't use mutex for I/O operation lock David Howells
2024-11-08 17:32 ` [PATCH v4 14/33] afs: Fix EEXIST error returned from afs_rmdir() to be ENOTEMPTY David Howells
2024-11-08 17:32 ` [PATCH v4 15/33] afs: Fix directory format encoding struct David Howells
2024-11-08 17:32 ` [PATCH v4 16/33] netfs: Remove some extraneous directory invalidations David Howells
2024-11-08 17:32 ` [PATCH v4 17/33] cachefiles: Add some subrequest tracepoints David Howells
2024-11-08 17:32 ` [PATCH v4 18/33] cachefiles: Add auxiliary data trace David Howells
2024-11-08 17:32 ` [PATCH v4 19/33] afs: Add more tracepoints to do with tracking validity David Howells
2024-11-08 17:32 ` [PATCH v4 20/33] netfs: Add functions to build/clean a buffer in a folio_queue David Howells
2024-11-08 17:32 ` [PATCH v4 21/33] netfs: Add support for caching single monolithic objects such as AFS dirs David Howells
2024-11-08 17:32 ` [PATCH v4 22/33] afs: Make afs_init_request() get a key if not given a file David Howells
2024-11-08 17:32 ` [PATCH v4 23/33] afs: Use netfslib for directories David Howells
2024-11-15 20:32 ` Kees Bakker
2024-11-18 16:35 ` David Howells
2024-11-08 17:32 ` [PATCH v4 24/33] afs: Use netfslib for symlinks, allowing them to be cached David Howells
2024-11-08 17:32 ` [PATCH v4 25/33] afs: Eliminate afs_read David Howells
2024-11-08 17:32 ` [PATCH v4 26/33] afs: Fix cleanup of immediately failed async calls David Howells
2024-11-08 17:32 ` [PATCH v4 27/33] afs: Make {Y,}FS.FetchData an asynchronous operation David Howells
2024-11-08 17:32 ` [PATCH v4 28/33] netfs: Change the read result collector to only use one work item David Howells
2024-11-14 16:39 ` Nathan Chancellor [this message]
2024-11-18 17:20 ` David Howells
2024-11-08 17:32 ` [PATCH v4 29/33] afs: Make afs_mkdir() locally initialise a new directory's content David Howells
2024-11-08 17:32 ` [PATCH v4 30/33] afs: Use the contained hashtable to search a directory David Howells
2024-11-08 17:32 ` [PATCH v4 31/33] afs: Locally initialise the contents of a new symlink on creation David Howells
2024-11-08 17:32 ` [PATCH v4 32/33] afs: Add a tracepoint for afs_read_receive() David Howells
2024-11-08 17:32 ` [PATCH v4 33/33] netfs: Report on NULL folioq in netfs_writeback_unlock_folios() David Howells
2024-11-11 9:12 ` [PATCH v4 00/33] netfs: Read performance improvements and "single-blob" support Christian Brauner
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20241114163931.GA1928968@thelio-3990X \
--to=nathan@kernel.org \
--cc=asmadeus@codewreck.org \
--cc=brauner@kernel.org \
--cc=ceph-devel@vger.kernel.org \
--cc=dhowells@redhat.com \
--cc=ericvh@kernel.org \
--cc=hsiangkao@linux.alibaba.com \
--cc=idryomov@gmail.com \
--cc=jlayton@kernel.org \
--cc=linux-afs@lists.infradead.org \
--cc=linux-cifs@vger.kernel.org \
--cc=linux-erofs@lists.ozlabs.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-nfs@vger.kernel.org \
--cc=marc.dionne@auristor.com \
--cc=netdev@vger.kernel.org \
--cc=netfs@lists.linux.dev \
--cc=pc@manguebit.com \
--cc=smfrench@gmail.com \
--cc=sprasad@microsoft.com \
--cc=tom@talpey.com \
--cc=v9fs@lists.linux.dev \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox