From: Kanchana P Sridhar <kanchana.p.sridhar@intel.com>
To: linux-kernel@vger.kernel.org, linux-mm@kvack.org,
hannes@cmpxchg.org, yosry.ahmed@linux.dev, nphamcs@gmail.com,
chengming.zhou@linux.dev, usamaarif642@gmail.com,
ryan.roberts@arm.com, 21cnbao@gmail.com,
ying.huang@linux.alibaba.com, akpm@linux-foundation.org,
senozhatsky@chromium.org, sj@kernel.org, kasong@tencent.com,
linux-crypto@vger.kernel.org, herbert@gondor.apana.org.au,
davem@davemloft.net, clabbe@baylibre.com, ardb@kernel.org,
ebiggers@google.com, surenb@google.com,
kristen.c.accardi@intel.com, vinicius.gomes@intel.com,
giovanni.cabiddu@intel.com
Cc: wajdi.k.feghali@intel.com, kanchana.p.sridhar@intel.com
Subject: [PATCH v14 06/26] crypto: iaa - iaa_wq uses percpu_refs for get/put reference counting.
Date: Sat, 24 Jan 2026 19:35:17 -0800 [thread overview]
Message-ID: <20260125033537.334628-7-kanchana.p.sridhar@intel.com> (raw)
In-Reply-To: <20260125033537.334628-1-kanchana.p.sridhar@intel.com>
This patch modifies the reference counting on "struct iaa_wq" to be a
percpu_ref in atomic mode, instead of an "int refcount" combined with
the "idxd->dev_lock" spin_lock currently used as a synchronization
mechanism to achieve get/put semantics.
This enables a more light-weight, cleaner and effective refcount
implementation for the iaa_wq, that prevents race conditions and
significantly reduces batch compress/decompress latency submitted to
the IAA accelerator.
For a single-threaded madvise-based workload with the Silesia.tar
dataset, these are the before/after batch compression latencies for a
compress batch of 8 pages:
==================================
p50 (ns) p99 (ns)
==================================
before 5,576 5,992
after 5,472 5,848
Change -104 -144
==================================
Signed-off-by: Kanchana P Sridhar <kanchana.p.sridhar@intel.com>
---
drivers/crypto/intel/iaa/iaa_crypto.h | 4 +-
drivers/crypto/intel/iaa/iaa_crypto_main.c | 119 +++++++--------------
2 files changed, 41 insertions(+), 82 deletions(-)
diff --git a/drivers/crypto/intel/iaa/iaa_crypto.h b/drivers/crypto/intel/iaa/iaa_crypto.h
index cc76a047b54a..9611f2518f42 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto.h
+++ b/drivers/crypto/intel/iaa/iaa_crypto.h
@@ -47,8 +47,8 @@ struct iaa_wq {
struct list_head list;
struct idxd_wq *wq;
- int ref;
- bool remove;
+ struct percpu_ref ref;
+ bool free;
bool mapped;
struct iaa_device *iaa_device;
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c
index 3466414f926a..01d7150dbbd8 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto_main.c
+++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c
@@ -702,7 +702,7 @@ static void del_iaa_device(struct iaa_device *iaa_device)
static void free_iaa_device(struct iaa_device *iaa_device)
{
- if (!iaa_device)
+ if (!iaa_device || iaa_device->n_wq)
return;
remove_device_compression_modes(iaa_device);
@@ -732,6 +732,13 @@ static bool iaa_has_wq(struct iaa_device *iaa_device, struct idxd_wq *wq)
return false;
}
+static void __iaa_wq_release(struct percpu_ref *ref)
+{
+ struct iaa_wq *iaa_wq = container_of(ref, typeof(*iaa_wq), ref);
+
+ iaa_wq->free = true;
+}
+
static int add_iaa_wq(struct iaa_device *iaa_device, struct idxd_wq *wq,
struct iaa_wq **new_wq)
{
@@ -739,11 +746,20 @@ static int add_iaa_wq(struct iaa_device *iaa_device, struct idxd_wq *wq,
struct pci_dev *pdev = idxd->pdev;
struct device *dev = &pdev->dev;
struct iaa_wq *iaa_wq;
+ int ret;
iaa_wq = kzalloc(sizeof(*iaa_wq), GFP_KERNEL);
if (!iaa_wq)
return -ENOMEM;
+ ret = percpu_ref_init(&iaa_wq->ref, __iaa_wq_release,
+ PERCPU_REF_INIT_ATOMIC, GFP_KERNEL);
+
+ if (ret) {
+ kfree(iaa_wq);
+ return -ENOMEM;
+ }
+
iaa_wq->wq = wq;
iaa_wq->iaa_device = iaa_device;
idxd_wq_set_private(wq, iaa_wq);
@@ -819,6 +835,9 @@ static void __free_iaa_wq(struct iaa_wq *iaa_wq)
if (!iaa_wq)
return;
+ WARN_ON(!percpu_ref_is_zero(&iaa_wq->ref));
+ percpu_ref_exit(&iaa_wq->ref);
+
iaa_device = iaa_wq->iaa_device;
if (iaa_device->n_wq == 0)
free_iaa_device(iaa_wq->iaa_device);
@@ -913,53 +932,6 @@ static int save_iaa_wq(struct idxd_wq *wq)
return ret;
}
-static int iaa_wq_get(struct idxd_wq *wq)
-{
- struct idxd_device *idxd = wq->idxd;
- struct iaa_wq *iaa_wq;
- int ret = 0;
-
- spin_lock(&idxd->dev_lock);
- iaa_wq = idxd_wq_get_private(wq);
- if (iaa_wq && !iaa_wq->remove) {
- iaa_wq->ref++;
- idxd_wq_get(wq);
- } else {
- ret = -ENODEV;
- }
- spin_unlock(&idxd->dev_lock);
-
- return ret;
-}
-
-static int iaa_wq_put(struct idxd_wq *wq)
-{
- struct idxd_device *idxd = wq->idxd;
- struct iaa_wq *iaa_wq;
- bool free = false;
- int ret = 0;
-
- spin_lock(&idxd->dev_lock);
- iaa_wq = idxd_wq_get_private(wq);
- if (iaa_wq) {
- iaa_wq->ref--;
- if (iaa_wq->ref == 0 && iaa_wq->remove) {
- idxd_wq_set_private(wq, NULL);
- free = true;
- }
- idxd_wq_put(wq);
- } else {
- ret = -ENODEV;
- }
- spin_unlock(&idxd->dev_lock);
- if (free) {
- __free_iaa_wq(iaa_wq);
- kfree(iaa_wq);
- }
-
- return ret;
-}
-
/***************************************************************
* Mapping IAA devices and wqs to cores with per-cpu wq_tables.
***************************************************************/
@@ -1773,7 +1745,7 @@ static void iaa_desc_complete(struct idxd_desc *idxd_desc,
if (free_desc)
idxd_free_desc(idxd_desc->wq, idxd_desc);
- iaa_wq_put(idxd_desc->wq);
+ percpu_ref_put(&iaa_wq->ref);
}
static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req,
@@ -2004,19 +1976,13 @@ static int iaa_comp_acompress(struct acomp_req *req)
cpu = get_cpu();
wq = comp_wq_table_next_wq(cpu);
put_cpu();
- if (!wq) {
- pr_debug("no wq configured for cpu=%d\n", cpu);
- return -ENODEV;
- }
- ret = iaa_wq_get(wq);
- if (ret) {
+ iaa_wq = wq ? idxd_wq_get_private(wq) : NULL;
+ if (unlikely(!iaa_wq || !percpu_ref_tryget(&iaa_wq->ref))) {
pr_debug("no wq available for cpu=%d\n", cpu);
return -ENODEV;
}
- iaa_wq = idxd_wq_get_private(wq);
-
dev = &wq->idxd->pdev->dev;
nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
@@ -2069,7 +2035,7 @@ static int iaa_comp_acompress(struct acomp_req *req)
err_map_dst:
dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
out:
- iaa_wq_put(wq);
+ percpu_ref_put(&iaa_wq->ref);
return ret;
}
@@ -2091,19 +2057,13 @@ static int iaa_comp_adecompress(struct acomp_req *req)
cpu = get_cpu();
wq = decomp_wq_table_next_wq(cpu);
put_cpu();
- if (!wq) {
- pr_debug("no wq configured for cpu=%d\n", cpu);
- return -ENODEV;
- }
- ret = iaa_wq_get(wq);
- if (ret) {
+ iaa_wq = wq ? idxd_wq_get_private(wq) : NULL;
+ if (unlikely(!iaa_wq || !percpu_ref_tryget(&iaa_wq->ref))) {
pr_debug("no wq available for cpu=%d\n", cpu);
- return -ENODEV;
+ return deflate_generic_decompress(req);
}
- iaa_wq = idxd_wq_get_private(wq);
-
dev = &wq->idxd->pdev->dev;
nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
@@ -2138,7 +2098,7 @@ static int iaa_comp_adecompress(struct acomp_req *req)
err_map_dst:
dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
out:
- iaa_wq_put(wq);
+ percpu_ref_put(&iaa_wq->ref);
return ret;
}
@@ -2311,7 +2271,6 @@ static void iaa_crypto_remove(struct idxd_dev *idxd_dev)
struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
struct idxd_device *idxd = wq->idxd;
struct iaa_wq *iaa_wq;
- bool free = false;
atomic_set(&iaa_crypto_enabled, 0);
idxd_wq_quiesce(wq);
@@ -2332,18 +2291,18 @@ static void iaa_crypto_remove(struct idxd_dev *idxd_dev)
goto out;
}
- if (iaa_wq->ref) {
- iaa_wq->remove = true;
- } else {
- wq = iaa_wq->wq;
- idxd_wq_set_private(wq, NULL);
- free = true;
- }
+ /* Drop the initial reference. */
+ percpu_ref_kill(&iaa_wq->ref);
+
+ while (!iaa_wq->free)
+ cpu_relax();
+
+ __free_iaa_wq(iaa_wq);
+
+ idxd_wq_set_private(wq, NULL);
spin_unlock(&idxd->dev_lock);
- if (free) {
- __free_iaa_wq(iaa_wq);
- kfree(iaa_wq);
- }
+
+ kfree(iaa_wq);
idxd_drv_disable_wq(wq);
--
2.27.0
next prev parent reply other threads:[~2026-01-25 3:36 UTC|newest]
Thread overview: 48+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-01-25 3:35 [PATCH v14 00/26] zswap compression batching with optimized iaa_crypto driver Kanchana P Sridhar
2026-01-25 3:35 ` [PATCH v14 01/26] crypto: iaa - Reorganize the iaa_crypto driver code Kanchana P Sridhar
2026-01-25 3:35 ` [PATCH v14 02/26] crypto: iaa - Replace sprintf with sysfs_emit in sysfs show functions Kanchana P Sridhar
2026-02-06 10:47 ` Herbert Xu
2026-01-25 3:35 ` [PATCH v14 03/26] crypto: iaa - New architecture for IAA device WQ [de]comp usage & core mapping Kanchana P Sridhar
2026-01-25 3:35 ` [PATCH v14 04/26] crypto: iaa - Simplify, consistency of function parameters, minor stats bug fix Kanchana P Sridhar
2026-01-25 3:35 ` [PATCH v14 05/26] crypto: iaa - Descriptor allocation timeouts with mitigations Kanchana P Sridhar
2026-01-25 3:35 ` Kanchana P Sridhar [this message]
2026-01-25 3:35 ` [PATCH v14 07/26] crypto: iaa - Simplify the code flow in iaa_compress() and iaa_decompress() Kanchana P Sridhar
2026-01-25 3:35 ` [PATCH v14 08/26] crypto: iaa - Refactor hardware descriptor setup into separate procedures Kanchana P Sridhar
2026-01-25 3:35 ` [PATCH v14 09/26] crypto: iaa - Simplified, efficient job submissions for non-irq mode Kanchana P Sridhar
2026-01-25 3:35 ` [PATCH v14 10/26] crypto: iaa - Deprecate exporting add/remove IAA compression modes Kanchana P Sridhar
2026-01-25 3:35 ` [PATCH v14 11/26] crypto: iaa - Expect a single scatterlist for a [de]compress request's src/dst Kanchana P Sridhar
2026-01-25 3:35 ` [PATCH v14 12/26] crypto: iaa - Rearchitect iaa_crypto to have clean interfaces with crypto_acomp Kanchana P Sridhar
2026-02-06 10:49 ` Herbert Xu
2026-01-25 3:35 ` [PATCH v14 13/26] crypto: acomp - Define a unit_size in struct acomp_req to enable batching Kanchana P Sridhar
2026-01-25 3:35 ` [PATCH v14 14/26] crypto: acomp - Add bit to indicate segmentation support Kanchana P Sridhar
2026-01-25 3:35 ` [PATCH v14 15/26] crypto: acomp - Add trivial segmentation wrapper Kanchana P Sridhar
2026-01-25 3:35 ` [PATCH v14 16/26] crypto: iaa - IAA Batching for parallel compressions/decompressions Kanchana P Sridhar
2026-01-25 3:35 ` [PATCH v14 17/26] crypto: iaa - Submit the two largest source buffers first in batch decompress Kanchana P Sridhar
2026-01-25 3:35 ` [PATCH v14 18/26] crypto: acomp, iaa - crypto_acomp integration of IAA Batching Kanchana P Sridhar
2026-02-05 4:14 ` Herbert Xu
2026-01-25 3:35 ` [PATCH v14 19/26] crypto: iaa - Enable async mode and make it the default Kanchana P Sridhar
2026-01-25 3:35 ` [PATCH v14 20/26] crypto: iaa - Disable iaa_verify_compress by default Kanchana P Sridhar
2026-01-25 3:35 ` [PATCH v14 21/26] crypto: iaa - Add deflate-iaa-dynamic compression mode Kanchana P Sridhar
2026-01-25 3:35 ` [PATCH v14 22/26] crypto: acomp - Add crypto_acomp_batch_size() to get an algorithm's batch-size Kanchana P Sridhar
2026-01-25 3:35 ` [PATCH v14 23/26] mm: zswap: Tie per-CPU acomp_ctx lifetime to the pool Kanchana P Sridhar
2026-02-04 16:29 ` Yosry Ahmed
2026-01-25 3:35 ` [PATCH v14 24/26] mm: zswap: Consistently use IS_ERR_OR_NULL() to check acomp_ctx resources Kanchana P Sridhar
2026-01-30 23:53 ` Nhat Pham
2026-01-31 1:15 ` Sridhar, Kanchana P
2026-01-25 3:35 ` [PATCH v14 25/26] mm: zswap: Store large folios in batches Kanchana P Sridhar
2026-01-31 0:33 ` Nhat Pham
2026-01-31 20:22 ` Sridhar, Kanchana P
2026-02-04 16:57 ` Yosry Ahmed
2026-01-25 3:35 ` [PATCH v14 26/26] mm: zswap: Batched zswap_compress() for compress batching of large folios Kanchana P Sridhar
2026-01-31 1:12 ` Nhat Pham
2026-01-31 20:31 ` Sridhar, Kanchana P
2026-02-01 0:48 ` Nhat Pham
2026-02-01 2:53 ` Sridhar, Kanchana P
2026-02-04 0:30 ` Nhat Pham
2026-02-04 18:10 ` Yosry Ahmed
2026-02-04 18:17 ` Yosry Ahmed
2026-02-04 18:17 ` Yosry Ahmed
2026-02-04 18:21 ` [PATCH v14 00/26] zswap compression batching with optimized iaa_crypto driver Yosry Ahmed
2026-02-04 18:39 ` Andrew Morton
2026-02-04 18:49 ` Yosry Ahmed
2026-02-05 4:16 ` Herbert Xu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260125033537.334628-7-kanchana.p.sridhar@intel.com \
--to=kanchana.p.sridhar@intel.com \
--cc=21cnbao@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=ardb@kernel.org \
--cc=chengming.zhou@linux.dev \
--cc=clabbe@baylibre.com \
--cc=davem@davemloft.net \
--cc=ebiggers@google.com \
--cc=giovanni.cabiddu@intel.com \
--cc=hannes@cmpxchg.org \
--cc=herbert@gondor.apana.org.au \
--cc=kasong@tencent.com \
--cc=kristen.c.accardi@intel.com \
--cc=linux-crypto@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=nphamcs@gmail.com \
--cc=ryan.roberts@arm.com \
--cc=senozhatsky@chromium.org \
--cc=sj@kernel.org \
--cc=surenb@google.com \
--cc=usamaarif642@gmail.com \
--cc=vinicius.gomes@intel.com \
--cc=wajdi.k.feghali@intel.com \
--cc=ying.huang@linux.alibaba.com \
--cc=yosry.ahmed@linux.dev \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox