linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: David CARLIER <devnexen@gmail.com>
To: David Hildenbrand <david@kernel.org>,
	Kairui Song <kasong@tencent.com>, Chris Li <chrisl@kernel.org>,
	 Andrew Morton <akpm@linux-foundation.org>,
	Kemeng Shi <shikemeng@huaweicloud.com>,
	 Nhat Pham <nphamcs@gmail.com>, Baoquan He <bhe@redhat.com>,
	 Youngjun Park <youngjun.park@lge.com>,
	NeilBrown <neil@brown.name>
Cc: linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	 "Matthew Wilcox (Oracle)" <willy@infradead.org>
Subject: Re: [PATCH v4] mm/page_io: rename swap_iocb->pages to swap_iocb->bvecs
Date: Thu, 2 Apr 2026 07:17:29 +0100	[thread overview]
Message-ID: <CA+XhMqwXe89x_QYjsbUtf7rdRt47tSGGGy-QnFBMsYJU-ohOxw@mail.gmail.com> (raw)
In-Reply-To: <20260402061202.35097-1-devnexen@gmail.com>

please ignore this one, I resent after. Cheers.

On Thu, 2 Apr 2026 at 07:12, David Carlier <devnexen@gmail.com> wrote:
>
> swap_iocb->pages tracks the number of bvec entries (folios), not base
> pages. Rename it to bvecs to accurately reflect its purpose.
>
> Suggested-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> Signed-off-by: David Carlier <devnexen@gmail.com>
> ---
> v4: renamed swap_iocb->pages to ->bvecs (Willy)
> v3: use sio->len for PSWPIN accounting
> v2: convert sio_write_complete() to folio APIs
>  mm/page_io.c | 30 +++++++++++++++---------------
>  1 file changed, 15 insertions(+), 15 deletions(-)
>
> diff --git a/mm/page_io.c b/mm/page_io.c
> index 1389cd57ca88..e524cb821d04 100644
> --- a/mm/page_io.c
> +++ b/mm/page_io.c
> @@ -327,7 +327,7 @@ static void bio_associate_blkg_from_page(struct bio *bio, struct folio *folio)
>  struct swap_iocb {
>         struct kiocb            iocb;
>         struct bio_vec          bvec[SWAP_CLUSTER_MAX];
> -       int                     pages;
> +       int                     bvecs;
>         int                     len;
>  };
>  static mempool_t *sio_pool;
> @@ -362,14 +362,14 @@ static void sio_write_complete(struct kiocb *iocb, long ret)
>                  */
>                 pr_err_ratelimited("Write error %ld on dio swapfile (%llu)\n",
>                                    ret, swap_dev_pos(page_swap_entry(page)));
> -               for (p = 0; p < sio->pages; p++) {
> +               for (p = 0; p < sio->bvecs; p++) {
>                         page = sio->bvec[p].bv_page;
>                         set_page_dirty(page);
>                         ClearPageReclaim(page);
>                 }
>         }
>
> -       for (p = 0; p < sio->pages; p++)
> +       for (p = 0; p < sio->bvecs; p++)
>                 end_page_writeback(sio->bvec[p].bv_page);
>
>         mempool_free(sio, sio_pool);
> @@ -397,13 +397,13 @@ static void swap_writepage_fs(struct folio *folio, struct swap_iocb **swap_plug)
>                 init_sync_kiocb(&sio->iocb, swap_file);
>                 sio->iocb.ki_complete = sio_write_complete;
>                 sio->iocb.ki_pos = pos;
> -               sio->pages = 0;
> +               sio->bvecs = 0;
>                 sio->len = 0;
>         }
> -       bvec_set_folio(&sio->bvec[sio->pages], folio, folio_size(folio), 0);
> +       bvec_set_folio(&sio->bvec[sio->bvecs], folio, folio_size(folio), 0);
>         sio->len += folio_size(folio);
> -       sio->pages += 1;
> -       if (sio->pages == ARRAY_SIZE(sio->bvec) || !swap_plug) {
> +       sio->bvecs += 1;
> +       if (sio->bvecs == ARRAY_SIZE(sio->bvec) || !swap_plug) {
>                 swap_write_unplug(sio);
>                 sio = NULL;
>         }
> @@ -477,7 +477,7 @@ void swap_write_unplug(struct swap_iocb *sio)
>         struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
>         int ret;
>
> -       iov_iter_bvec(&from, ITER_SOURCE, sio->bvec, sio->pages, sio->len);
> +       iov_iter_bvec(&from, ITER_SOURCE, sio->bvec, sio->bvecs, sio->len);
>         ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
>         if (ret != -EIOCBQUEUED)
>                 sio_write_complete(&sio->iocb, ret);
> @@ -489,7 +489,7 @@ static void sio_read_complete(struct kiocb *iocb, long ret)
>         int p;
>
>         if (ret == sio->len) {
> -               for (p = 0; p < sio->pages; p++) {
> +               for (p = 0; p < sio->bvecs; p++) {
>                         struct folio *folio = page_folio(sio->bvec[p].bv_page);
>
>                         count_mthp_stat(folio_order(folio), MTHP_STAT_SWPIN);
> @@ -499,7 +499,7 @@ static void sio_read_complete(struct kiocb *iocb, long ret)
>                 }
>                 count_vm_events(PSWPIN, sio->len >> PAGE_SHIFT);
>         } else {
> -               for (p = 0; p < sio->pages; p++) {
> +               for (p = 0; p < sio->bvecs; p++) {
>                         struct folio *folio = page_folio(sio->bvec[p].bv_page);
>
>                         folio_unlock(folio);
> @@ -559,13 +559,13 @@ static void swap_read_folio_fs(struct folio *folio, struct swap_iocb **plug)
>                 init_sync_kiocb(&sio->iocb, sis->swap_file);
>                 sio->iocb.ki_pos = pos;
>                 sio->iocb.ki_complete = sio_read_complete;
> -               sio->pages = 0;
> +               sio->bvecs = 0;
>                 sio->len = 0;
>         }
> -       bvec_set_folio(&sio->bvec[sio->pages], folio, folio_size(folio), 0);
> +       bvec_set_folio(&sio->bvec[sio->bvecs], folio, folio_size(folio), 0);
>         sio->len += folio_size(folio);
> -       sio->pages += 1;
> -       if (sio->pages == ARRAY_SIZE(sio->bvec) || !plug) {
> +       sio->bvecs += 1;
> +       if (sio->bvecs == ARRAY_SIZE(sio->bvec) || !plug) {
>                 swap_read_unplug(sio);
>                 sio = NULL;
>         }
> @@ -666,7 +666,7 @@ void __swap_read_unplug(struct swap_iocb *sio)
>         struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
>         int ret;
>
> -       iov_iter_bvec(&from, ITER_DEST, sio->bvec, sio->pages, sio->len);
> +       iov_iter_bvec(&from, ITER_DEST, sio->bvec, sio->bvecs, sio->len);
>         ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
>         if (ret != -EIOCBQUEUED)
>                 sio_read_complete(&sio->iocb, ret);
> --
> 2.53.0
>


      reply	other threads:[~2026-04-02  6:17 UTC|newest]

Thread overview: 2+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-04-02  6:12 David Carlier
2026-04-02  6:17 ` David CARLIER [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=CA+XhMqwXe89x_QYjsbUtf7rdRt47tSGGGy-QnFBMsYJU-ohOxw@mail.gmail.com \
    --to=devnexen@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=bhe@redhat.com \
    --cc=chrisl@kernel.org \
    --cc=david@kernel.org \
    --cc=kasong@tencent.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=neil@brown.name \
    --cc=nphamcs@gmail.com \
    --cc=shikemeng@huaweicloud.com \
    --cc=willy@infradead.org \
    --cc=youngjun.park@lge.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox