From: "Marek Marczykowski-Górecki" <marmarek@invisiblethingslab.com>
To: Mikulas Patocka <mpatocka@redhat.com>
Cc: Keith Busch <kbusch@kernel.org>, Jens Axboe <axboe@fb.com>,
Christoph Hellwig <hch@lst.de>, Sagi Grimberg <sagi@grimberg.me>,
Jan Kara <jack@suse.cz>, Vlastimil Babka <vbabka@suse.cz>,
Andrew Morton <akpm@linux-foundation.org>,
Matthew Wilcox <willy@infradead.org>,
Michal Hocko <mhocko@suse.com>,
stable@vger.kernel.org, regressions@lists.linux.dev,
Alasdair Kergon <agk@redhat.com>,
Mike Snitzer <snitzer@kernel.org>,
dm-devel@lists.linux.dev, linux-mm@kvack.org
Subject: Re: Intermittent storage (dm-crypt?) freeze - regression 6.4->6.5
Date: Fri, 3 Nov 2023 17:15:49 +0100 [thread overview]
Message-ID: <ZUUctamEFtAlSnSV@mail-itl> (raw)
In-Reply-To: <3cb4133c-b6db-9187-a678-11ed8c9456e@redhat.com>
[-- Attachment #1: Type: text/plain, Size: 5212 bytes --]
On Thu, Nov 02, 2023 at 06:06:33PM +0100, Mikulas Patocka wrote:
> Then, try this patch (without "iommu=panic"), reproduce the deadlock and
> tell us which one of the "printk" statements is triggered during the
> deadlock.
The "821" one - see below.
> Mikulas
>
> ---
> drivers/nvme/host/core.c | 8 ++++++--
> drivers/nvme/host/pci.c | 27 ++++++++++++++++++++++-----
> 2 files changed, 28 insertions(+), 7 deletions(-)
>
> Index: linux-stable/drivers/nvme/host/pci.c
> ===================================================================
> --- linux-stable.orig/drivers/nvme/host/pci.c 2023-10-31 15:35:38.000000000 +0100
> +++ linux-stable/drivers/nvme/host/pci.c 2023-11-02 17:38:20.000000000 +0100
> @@ -622,6 +622,10 @@ static blk_status_t nvme_pci_setup_prps(
> prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
> if (!prp_list) {
> iod->nr_allocations = -1;
> + if (nprps <= (256 / 8))
> + printk("allocation failure at %d\n", __LINE__);
> + else
> + printk("allocation failure at %d\n", __LINE__);
> return BLK_STS_RESOURCE;
> }
> iod->list[0].prp_list = prp_list;
> @@ -631,8 +635,10 @@ static blk_status_t nvme_pci_setup_prps(
> if (i == NVME_CTRL_PAGE_SIZE >> 3) {
> __le64 *old_prp_list = prp_list;
> prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
> - if (!prp_list)
> + if (!prp_list) {
> + printk("allocation failure at %d\n", __LINE__);
> goto free_prps;
> + }
> iod->list[iod->nr_allocations++].prp_list = prp_list;
> prp_list[0] = old_prp_list[i - 1];
> old_prp_list[i - 1] = cpu_to_le64(prp_dma);
> @@ -712,6 +718,7 @@ static blk_status_t nvme_pci_setup_sgls(
> sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
> if (!sg_list) {
> iod->nr_allocations = -1;
> + printk("allocation failure at %d\n", __LINE__);
> return BLK_STS_RESOURCE;
> }
>
> @@ -736,8 +743,10 @@ static blk_status_t nvme_setup_prp_simpl
> unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - offset;
>
> iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
> - if (dma_mapping_error(dev->dev, iod->first_dma))
> + if (dma_mapping_error(dev->dev, iod->first_dma)) {
> + printk("allocation failure at %d\n", __LINE__);
> return BLK_STS_RESOURCE;
> + }
> iod->dma_len = bv->bv_len;
>
> cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma);
> @@ -755,8 +764,10 @@ static blk_status_t nvme_setup_sgl_simpl
> struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
>
> iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
> - if (dma_mapping_error(dev->dev, iod->first_dma))
> + if (dma_mapping_error(dev->dev, iod->first_dma)) {
> + printk("allocation failure at %d\n", __LINE__);
> return BLK_STS_RESOURCE;
> + }
> iod->dma_len = bv->bv_len;
>
> cmnd->flags = NVME_CMD_SGL_METABUF;
> @@ -791,8 +802,10 @@ static blk_status_t nvme_map_data(struct
>
> iod->dma_len = 0;
> iod->sgt.sgl = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
> - if (!iod->sgt.sgl)
> + if (!iod->sgt.sgl) {
> + printk("allocation failure at %d\n", __LINE__);
> return BLK_STS_RESOURCE;
> + }
> sg_init_table(iod->sgt.sgl, blk_rq_nr_phys_segments(req));
> iod->sgt.orig_nents = blk_rq_map_sg(req->q, req, iod->sgt.sgl);
> if (!iod->sgt.orig_nents)
> @@ -801,8 +814,12 @@ static blk_status_t nvme_map_data(struct
> rc = dma_map_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req),
> DMA_ATTR_NO_WARN);
> if (rc) {
> - if (rc == -EREMOTEIO)
> + if (rc == -EREMOTEIO) {
> + printk("allocation failure at %d\n", __LINE__);
> ret = BLK_STS_TARGET;
> + } else {
> + printk("allocation failure at %d\n", __LINE__);
I get a lot of this one.
> + }
> goto out_free_sg;
> }
>
> Index: linux-stable/drivers/nvme/host/core.c
> ===================================================================
> --- linux-stable.orig/drivers/nvme/host/core.c 2023-10-31 15:35:38.000000000 +0100
> +++ linux-stable/drivers/nvme/host/core.c 2023-11-02 17:12:39.000000000 +0100
> @@ -708,8 +708,10 @@ blk_status_t nvme_fail_nonready_command(
> ctrl->state != NVME_CTRL_DELETING &&
> ctrl->state != NVME_CTRL_DEAD &&
> !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) &&
> - !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
> + !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH)) {
> + printk("allocation failure at %d\n", __LINE__);
> return BLK_STS_RESOURCE;
> + }
> return nvme_host_path_error(rq);
> }
> EXPORT_SYMBOL_GPL(nvme_fail_nonready_command);
> @@ -784,8 +786,10 @@ static blk_status_t nvme_setup_discard(s
> * discard page. If that's also busy, it's safe to return
> * busy, as we know we can make progress once that's freed.
> */
> - if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy))
> + if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy)) {
> + printk("allocation failure at %d\n", __LINE__);
> return BLK_STS_RESOURCE;
> + }
>
> range = page_address(ns->ctrl->discard_page);
> }
--
Best Regards,
Marek Marczykowski-Górecki
Invisible Things Lab
[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 488 bytes --]
next prev parent reply other threads:[~2023-11-03 16:16 UTC|newest]
Thread overview: 54+ messages / expand[flat|nested] mbox.gz Atom feed top
[not found] <ZTNH0qtmint/zLJZ@mail-itl>
[not found] ` <e427823c-e869-86a2-3549-61b3fdf29537@redhat.com>
[not found] ` <ZTiHQDY54E7WAld+@mail-itl>
[not found] ` <ZTiJ3CO8w0jauOzW@mail-itl>
2023-10-25 10:13 ` Mikulas Patocka
2023-10-27 17:32 ` Mikulas Patocka
2023-10-28 9:23 ` Matthew Wilcox
2023-10-28 15:14 ` Mike Snitzer
2023-10-29 11:15 ` Marek Marczykowski-Górecki
2023-10-29 20:02 ` Vlastimil Babka
2023-10-30 7:37 ` Mikulas Patocka
2023-10-30 8:37 ` Vlastimil Babka
2023-10-30 11:22 ` Mikulas Patocka
2023-10-30 11:30 ` Vlastimil Babka
2023-10-30 11:37 ` Mikulas Patocka
2023-10-30 12:25 ` Jan Kara
2023-10-30 13:30 ` Marek Marczykowski-Górecki
2023-10-30 14:08 ` Mikulas Patocka
2023-10-30 15:56 ` Jan Kara
2023-10-30 16:51 ` Marek Marczykowski-Górecki
2023-10-30 17:50 ` Mikulas Patocka
2023-10-31 3:48 ` Marek Marczykowski-Górecki
2023-10-31 14:01 ` Jan Kara
2023-10-31 15:42 ` Marek Marczykowski-Górecki
2023-10-31 17:17 ` Mikulas Patocka
2023-10-31 17:24 ` Mikulas Patocka
2023-11-02 0:38 ` Marek Marczykowski-Górecki
2023-11-02 9:28 ` Mikulas Patocka
2023-11-02 11:45 ` Marek Marczykowski-Górecki
2023-11-02 17:06 ` Mikulas Patocka
2023-11-03 15:01 ` Marek Marczykowski-Górecki
2023-11-03 15:10 ` Keith Busch
2023-11-03 16:15 ` Marek Marczykowski-Górecki [this message]
2023-11-03 16:54 ` Keith Busch
2023-11-03 20:30 ` Marek Marczykowski-G'orecki
2023-11-03 22:42 ` Keith Busch
2023-11-04 9:27 ` Mikulas Patocka
2023-11-04 13:59 ` Keith Busch
2023-11-06 7:10 ` Christoph Hellwig
2023-11-06 14:59 ` [PATCH] swiotlb-xen: provide the "max_mapping_size" method Mikulas Patocka
2023-11-06 15:16 ` Keith Busch
2023-11-06 15:30 ` Mike Snitzer
2023-11-06 17:12 ` [PATCH v2] " Mikulas Patocka
2023-11-07 4:18 ` Stefano Stabellini
2023-11-08 7:31 ` Christoph Hellwig
2023-11-06 7:08 ` Intermittent storage (dm-crypt?) freeze - regression 6.4->6.5 Christoph Hellwig
2023-11-02 12:21 ` Jan Kara
2023-11-01 1:27 ` Ming Lei
[not found] ` <ZUG0gcRhUlFm57qN@mail-itl>
[not found] ` <ZUG016NyTms2073C@mail-itl>
2023-11-01 2:35 ` Marek Marczykowski-Górecki
2023-11-01 3:24 ` Ming Lei
2023-11-01 10:15 ` Hannes Reinecke
2023-11-01 10:26 ` Jan Kara
2023-11-01 11:23 ` Ming Lei
2023-11-02 14:02 ` Keith Busch
2023-11-01 12:16 ` Mikulas Patocka
2023-10-30 11:28 ` Jan Kara
2023-10-30 11:49 ` Mikulas Patocka
2023-10-30 12:11 ` Jan Kara
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=ZUUctamEFtAlSnSV@mail-itl \
--to=marmarek@invisiblethingslab.com \
--cc=agk@redhat.com \
--cc=akpm@linux-foundation.org \
--cc=axboe@fb.com \
--cc=dm-devel@lists.linux.dev \
--cc=hch@lst.de \
--cc=jack@suse.cz \
--cc=kbusch@kernel.org \
--cc=linux-mm@kvack.org \
--cc=mhocko@suse.com \
--cc=mpatocka@redhat.com \
--cc=regressions@lists.linux.dev \
--cc=sagi@grimberg.me \
--cc=snitzer@kernel.org \
--cc=stable@vger.kernel.org \
--cc=vbabka@suse.cz \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox