From: Alistair Popple <apopple@nvidia.com>
To: linux-mm@kvack.org, cgroups@vger.kernel.org
Cc: linux-kernel@vger.kernel.org, jgg@nvidia.com,
jhubbard@nvidia.com, tjmercier@google.com, hannes@cmpxchg.org,
surenb@google.com, mkoutny@suse.com, daniel@ffwll.ch,
"Daniel P . Berrange" <berrange@redhat.com>,
Alex Williamson <alex.williamson@redhat.com>,
Alistair Popple <apopple@nvidia.com>,
Jens Axboe <axboe@kernel.dk>,
Pavel Begunkov <asml.silence@gmail.com>,
io-uring@vger.kernel.org
Subject: [PATCH 09/19] io_uring: convert to use vm_account
Date: Mon, 6 Feb 2023 18:47:46 +1100 [thread overview]
Message-ID: <44e6ead48bc53789191b22b0e140aeb82459e75f.1675669136.git-series.apopple@nvidia.com> (raw)
In-Reply-To: <cover.c238416f0e82377b449846dbb2459ae9d7030c8e.1675669136.git-series.apopple@nvidia.com>
Convert io_uring to use vm_account instead of directly charging pages
against the user/mm. Rather than charge pages to both user->locked_vm
and mm->pinned_vm this will only charge pages to user->locked_vm.
Signed-off-by: Alistair Popple <apopple@nvidia.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Pavel Begunkov <asml.silence@gmail.com>
Cc: io-uring@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
---
include/linux/io_uring_types.h | 4 ++--
io_uring/io_uring.c | 20 +++---------------
io_uring/notif.c | 4 ++--
io_uring/notif.h | 10 +++------
io_uring/rsrc.c | 38 +++--------------------------------
io_uring/rsrc.h | 9 +--------
6 files changed, 17 insertions(+), 68 deletions(-)
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 128a67a..45ac75d 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -5,6 +5,7 @@
#include <linux/task_work.h>
#include <linux/bitmap.h>
#include <linux/llist.h>
+#include <linux/vm_account.h>
#include <uapi/linux/io_uring.h>
struct io_wq_work_node {
@@ -343,8 +344,7 @@ struct io_ring_ctx {
struct io_wq_hash *hash_map;
/* Only used for accounting purposes */
- struct user_struct *user;
- struct mm_struct *mm_account;
+ struct vm_account vm_account;
/* ctx exit and cancelation */
struct llist_head fallback_llist;
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 0a4efad..912da4f 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -2744,15 +2744,11 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
#endif
WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list));
- if (ctx->mm_account) {
- mmdrop(ctx->mm_account);
- ctx->mm_account = NULL;
- }
+ vm_account_release(&ctx->vm_account);
io_mem_free(ctx->rings);
io_mem_free(ctx->sq_sqes);
percpu_ref_exit(&ctx->refs);
- free_uid(ctx->user);
io_req_caches_free(ctx);
if (ctx->hash_map)
io_wq_put_hash(ctx->hash_map);
@@ -3585,8 +3581,9 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
ctx->syscall_iopoll = 1;
ctx->compat = in_compat_syscall();
- if (!capable(CAP_IPC_LOCK))
- ctx->user = get_uid(current_user());
+ vm_account_init(&ctx->vm_account, current, current_user(),
+ VM_ACCOUNT_USER |
+ (capable(CAP_IPC_LOCK) ? VM_ACCOUNT_BYPASS : 0));
/*
* For SQPOLL, we just need a wakeup, always. For !SQPOLL, if
@@ -3619,15 +3616,6 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
goto err;
}
- /*
- * This is just grabbed for accounting purposes. When a process exits,
- * the mm is exited and dropped before the files, hence we need to hang
- * on to this mm purely for the purposes of being able to unaccount
- * memory (locked/pinned vm). It's not used for anything else.
- */
- mmgrab(current->mm);
- ctx->mm_account = current->mm;
-
ret = io_allocate_scq_urings(ctx, p);
if (ret)
goto err;
diff --git a/io_uring/notif.c b/io_uring/notif.c
index c4bb793..0f589fa 100644
--- a/io_uring/notif.c
+++ b/io_uring/notif.c
@@ -17,8 +17,8 @@ static void io_notif_complete_tw_ext(struct io_kiocb *notif, bool *locked)
if (nd->zc_report && (nd->zc_copied || !nd->zc_used))
notif->cqe.res |= IORING_NOTIF_USAGE_ZC_COPIED;
- if (nd->account_pages && ctx->user) {
- __io_unaccount_mem(ctx->user, nd->account_pages);
+ if (nd->account_pages) {
+ vm_unaccount_pinned(&ctx->vm_account, nd->account_pages);
nd->account_pages = 0;
}
io_req_task_complete(notif, locked);
diff --git a/io_uring/notif.h b/io_uring/notif.h
index c88c800..e2cb44a 100644
--- a/io_uring/notif.h
+++ b/io_uring/notif.h
@@ -43,11 +43,9 @@ static inline int io_notif_account_mem(struct io_kiocb *notif, unsigned len)
unsigned nr_pages = (len >> PAGE_SHIFT) + 2;
int ret;
- if (ctx->user) {
- ret = __io_account_mem(ctx->user, nr_pages);
- if (ret)
- return ret;
- nd->account_pages += nr_pages;
- }
+ ret = __io_account_mem(&ctx->vm_account, nr_pages);
+ if (ret)
+ return ret;
+ nd->account_pages += nr_pages;
return 0;
}
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index 18de10c..aa44528 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -42,49 +42,19 @@ void io_rsrc_refs_drop(struct io_ring_ctx *ctx)
}
}
-int __io_account_mem(struct user_struct *user, unsigned long nr_pages)
+int __io_account_mem(struct vm_account *vm_account, unsigned long nr_pages)
{
- unsigned long page_limit, cur_pages, new_pages;
-
- if (!nr_pages)
- return 0;
-
- /* Don't allow more pages than we can safely lock */
- page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
-
- cur_pages = atomic_long_read(&user->locked_vm);
- do {
- new_pages = cur_pages + nr_pages;
- if (new_pages > page_limit)
- return -ENOMEM;
- } while (!atomic_long_try_cmpxchg(&user->locked_vm,
- &cur_pages, new_pages));
- return 0;
+ return vm_account_pinned(vm_account, nr_pages);
}
static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
{
- if (ctx->user)
- __io_unaccount_mem(ctx->user, nr_pages);
-
- if (ctx->mm_account)
- atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
+ vm_unaccount_pinned(&ctx->vm_account, nr_pages);
}
static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
{
- int ret;
-
- if (ctx->user) {
- ret = __io_account_mem(ctx->user, nr_pages);
- if (ret)
- return ret;
- }
-
- if (ctx->mm_account)
- atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
-
- return 0;
+ return vm_account_pinned(&ctx->vm_account, nr_pages);
}
static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h
index 2b87436..d8833d0 100644
--- a/io_uring/rsrc.h
+++ b/io_uring/rsrc.h
@@ -167,12 +167,5 @@ static inline u64 *io_get_tag_slot(struct io_rsrc_data *data, unsigned int idx)
int io_files_update(struct io_kiocb *req, unsigned int issue_flags);
int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
-int __io_account_mem(struct user_struct *user, unsigned long nr_pages);
-
-static inline void __io_unaccount_mem(struct user_struct *user,
- unsigned long nr_pages)
-{
- atomic_long_sub(nr_pages, &user->locked_vm);
-}
-
+int __io_account_mem(struct vm_account *vm_account, unsigned long nr_pages);
#endif
--
git-series 0.9.1
next prev parent reply other threads:[~2023-02-06 7:49 UTC|newest]
Thread overview: 71+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-02-06 7:47 [PATCH 00/19] mm: Introduce a cgroup to limit the amount of locked and pinned memory Alistair Popple
2023-02-06 7:47 ` [PATCH 01/19] mm: Introduce vm_account Alistair Popple
2023-02-06 7:47 ` [PATCH 02/19] drivers/vhost: Convert to use vm_account Alistair Popple
2023-02-06 7:47 ` [PATCH 03/19] drivers/vdpa: Convert vdpa to use the new vm_structure Alistair Popple
2023-02-06 7:47 ` [PATCH 04/19] infiniband/umem: Convert to use vm_account Alistair Popple
2023-02-06 7:47 ` [PATCH 05/19] RMDA/siw: " Alistair Popple
2023-02-12 17:32 ` Bernard Metzler
2023-02-06 7:47 ` [PATCH 06/19] RDMA/usnic: convert " Alistair Popple
2023-02-06 7:47 ` [PATCH 07/19] vfio/type1: Charge pinned pages to pinned_vm instead of locked_vm Alistair Popple
2023-02-06 7:47 ` [PATCH 08/19] vfio/spapr_tce: Convert accounting to pinned_vm Alistair Popple
2023-02-06 7:47 ` Alistair Popple [this message]
2023-02-06 15:29 ` [PATCH 09/19] io_uring: convert to use vm_account Jens Axboe
2023-02-07 1:03 ` Alistair Popple
2023-02-07 14:28 ` Jens Axboe
2023-02-07 14:55 ` Jason Gunthorpe
2023-02-07 17:05 ` Jens Axboe
2023-02-13 11:30 ` Alistair Popple
2023-02-06 7:47 ` [PATCH 10/19] net: skb: Switch to using vm_account Alistair Popple
2023-02-06 7:47 ` [PATCH 11/19] xdp: convert to use vm_account Alistair Popple
2023-02-06 7:47 ` [PATCH 12/19] kvm/book3s_64_vio: Convert account_locked_vm() to vm_account_pinned() Alistair Popple
2023-02-06 7:47 ` [PATCH 13/19] fpga: dfl: afu: convert to use vm_account Alistair Popple
2023-02-06 7:47 ` [PATCH 14/19] mm: Introduce a cgroup for pinned memory Alistair Popple
2023-02-06 21:01 ` Yosry Ahmed
2023-02-06 21:14 ` Tejun Heo
2023-02-06 22:32 ` Yosry Ahmed
2023-02-06 22:36 ` Tejun Heo
2023-02-06 22:39 ` Yosry Ahmed
2023-02-06 23:25 ` Tejun Heo
2023-02-06 23:34 ` Yosry Ahmed
2023-02-06 23:40 ` Jason Gunthorpe
2023-02-07 0:32 ` Tejun Heo
2023-02-07 12:19 ` Jason Gunthorpe
2023-02-15 19:00 ` Michal Hocko
2023-02-15 19:07 ` Jason Gunthorpe
2023-02-16 8:04 ` Michal Hocko
2023-02-16 12:45 ` Jason Gunthorpe
2023-02-21 16:51 ` Tejun Heo
2023-02-21 17:25 ` Jason Gunthorpe
2023-02-21 17:29 ` Tejun Heo
2023-02-21 17:51 ` Jason Gunthorpe
2023-02-21 18:07 ` Tejun Heo
2023-02-21 19:26 ` Jason Gunthorpe
2023-02-21 19:45 ` Tejun Heo
2023-02-21 19:49 ` Tejun Heo
2023-02-21 19:57 ` Jason Gunthorpe
2023-02-22 11:38 ` Alistair Popple
2023-02-22 12:57 ` Jason Gunthorpe
2023-02-22 22:59 ` Alistair Popple
2023-02-23 0:05 ` Christoph Hellwig
2023-02-23 0:35 ` Alistair Popple
2023-02-23 1:53 ` Jason Gunthorpe
2023-02-23 9:12 ` Daniel P. Berrangé
2023-02-23 17:31 ` Jason Gunthorpe
2023-02-23 17:18 ` T.J. Mercier
2023-02-23 17:28 ` Jason Gunthorpe
2023-02-23 18:03 ` Yosry Ahmed
2023-02-23 18:10 ` Jason Gunthorpe
2023-02-23 18:14 ` Yosry Ahmed
2023-02-23 18:15 ` Tejun Heo
2023-02-23 18:17 ` Jason Gunthorpe
2023-02-23 18:22 ` Tejun Heo
2023-02-07 1:00 ` Waiman Long
2023-02-07 1:03 ` Tejun Heo
2023-02-07 1:50 ` Alistair Popple
2023-02-06 7:47 ` [PATCH 15/19] mm/util: Extend vm_account to charge pages against the pin cgroup Alistair Popple
2023-02-06 7:47 ` [PATCH 16/19] mm/util: Refactor account_locked_vm Alistair Popple
2023-02-06 7:47 ` [PATCH 17/19] mm: Convert mmap and mlock to use account_locked_vm Alistair Popple
2023-02-06 7:47 ` [PATCH 18/19] mm/mmap: Charge locked memory to pins cgroup Alistair Popple
2023-02-06 21:12 ` Yosry Ahmed
2023-02-06 7:47 ` [PATCH 19/19] selftests/vm: Add pins-cgroup selftest for mlock/mmap Alistair Popple
2023-02-16 11:01 ` [PATCH 00/19] mm: Introduce a cgroup to limit the amount of locked and pinned memory David Hildenbrand
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=44e6ead48bc53789191b22b0e140aeb82459e75f.1675669136.git-series.apopple@nvidia.com \
--to=apopple@nvidia.com \
--cc=alex.williamson@redhat.com \
--cc=asml.silence@gmail.com \
--cc=axboe@kernel.dk \
--cc=berrange@redhat.com \
--cc=cgroups@vger.kernel.org \
--cc=daniel@ffwll.ch \
--cc=hannes@cmpxchg.org \
--cc=io-uring@vger.kernel.org \
--cc=jgg@nvidia.com \
--cc=jhubbard@nvidia.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mkoutny@suse.com \
--cc=surenb@google.com \
--cc=tjmercier@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox