From: Alistair Popple <apopple@nvidia.com>
To: linux-mm@kvack.org, cgroups@vger.kernel.org
Cc: linux-kernel@vger.kernel.org, jgg@nvidia.com,
jhubbard@nvidia.com, tjmercier@google.com, hannes@cmpxchg.org,
surenb@google.com, mkoutny@suse.com, daniel@ffwll.ch,
Alistair Popple <apopple@nvidia.com>,
Jens Axboe <axboe@kernel.dk>,
Pavel Begunkov <asml.silence@gmail.com>,
io-uring@vger.kernel.org
Subject: [RFC PATCH 09/19] io_uring: convert to use vm_account
Date: Tue, 24 Jan 2023 16:42:38 +1100 [thread overview]
Message-ID: <9f63cf4ab74d6e56e434c1c3d7c98352bb282895.1674538665.git-series.apopple@nvidia.com> (raw)
In-Reply-To: <cover.f52b9eb2792bccb8a9ecd6bc95055705cfe2ae03.1674538665.git-series.apopple@nvidia.com>
Convert io_uring to use vm_account instead of directly charging pages
against the user/mm. Rather than charge pages to both user->locked_vm
and mm->pinned_vm this will only charge pages to user->locked_vm.
Signed-off-by: Alistair Popple <apopple@nvidia.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Pavel Begunkov <asml.silence@gmail.com>
Cc: io-uring@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
---
include/linux/io_uring_types.h | 3 +--
io_uring/io_uring.c | 20 +++---------------
io_uring/notif.c | 4 ++--
io_uring/notif.h | 10 +++------
io_uring/rsrc.c | 38 +++--------------------------------
io_uring/rsrc.h | 9 +--------
6 files changed, 16 insertions(+), 68 deletions(-)
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 128a67a..d81aceb 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -343,8 +343,7 @@ struct io_ring_ctx {
struct io_wq_hash *hash_map;
/* Only used for accounting purposes */
- struct user_struct *user;
- struct mm_struct *mm_account;
+ struct vm_account vm_account;
/* ctx exit and cancelation */
struct llist_head fallback_llist;
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 0a4efad..912da4f 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -2744,15 +2744,11 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
#endif
WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list));
- if (ctx->mm_account) {
- mmdrop(ctx->mm_account);
- ctx->mm_account = NULL;
- }
+ vm_account_release(&ctx->vm_account);
io_mem_free(ctx->rings);
io_mem_free(ctx->sq_sqes);
percpu_ref_exit(&ctx->refs);
- free_uid(ctx->user);
io_req_caches_free(ctx);
if (ctx->hash_map)
io_wq_put_hash(ctx->hash_map);
@@ -3585,8 +3581,9 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
ctx->syscall_iopoll = 1;
ctx->compat = in_compat_syscall();
- if (!capable(CAP_IPC_LOCK))
- ctx->user = get_uid(current_user());
+ vm_account_init(&ctx->vm_account, current, current_user(),
+ VM_ACCOUNT_USER |
+ (capable(CAP_IPC_LOCK) ? VM_ACCOUNT_BYPASS : 0));
/*
* For SQPOLL, we just need a wakeup, always. For !SQPOLL, if
@@ -3619,15 +3616,6 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
goto err;
}
- /*
- * This is just grabbed for accounting purposes. When a process exits,
- * the mm is exited and dropped before the files, hence we need to hang
- * on to this mm purely for the purposes of being able to unaccount
- * memory (locked/pinned vm). It's not used for anything else.
- */
- mmgrab(current->mm);
- ctx->mm_account = current->mm;
-
ret = io_allocate_scq_urings(ctx, p);
if (ret)
goto err;
diff --git a/io_uring/notif.c b/io_uring/notif.c
index c4bb793..0f589fa 100644
--- a/io_uring/notif.c
+++ b/io_uring/notif.c
@@ -17,8 +17,8 @@ static void io_notif_complete_tw_ext(struct io_kiocb *notif, bool *locked)
if (nd->zc_report && (nd->zc_copied || !nd->zc_used))
notif->cqe.res |= IORING_NOTIF_USAGE_ZC_COPIED;
- if (nd->account_pages && ctx->user) {
- __io_unaccount_mem(ctx->user, nd->account_pages);
+ if (nd->account_pages) {
+ vm_unaccount_pinned(&ctx->vm_account, nd->account_pages);
nd->account_pages = 0;
}
io_req_task_complete(notif, locked);
diff --git a/io_uring/notif.h b/io_uring/notif.h
index c88c800..e2cb44a 100644
--- a/io_uring/notif.h
+++ b/io_uring/notif.h
@@ -43,11 +43,9 @@ static inline int io_notif_account_mem(struct io_kiocb *notif, unsigned len)
unsigned nr_pages = (len >> PAGE_SHIFT) + 2;
int ret;
- if (ctx->user) {
- ret = __io_account_mem(ctx->user, nr_pages);
- if (ret)
- return ret;
- nd->account_pages += nr_pages;
- }
+ ret = __io_account_mem(&ctx->vm_account, nr_pages);
+ if (ret)
+ return ret;
+ nd->account_pages += nr_pages;
return 0;
}
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index 18de10c..aa44528 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -42,49 +42,19 @@ void io_rsrc_refs_drop(struct io_ring_ctx *ctx)
}
}
-int __io_account_mem(struct user_struct *user, unsigned long nr_pages)
+int __io_account_mem(struct vm_account *vm_account, unsigned long nr_pages)
{
- unsigned long page_limit, cur_pages, new_pages;
-
- if (!nr_pages)
- return 0;
-
- /* Don't allow more pages than we can safely lock */
- page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
-
- cur_pages = atomic_long_read(&user->locked_vm);
- do {
- new_pages = cur_pages + nr_pages;
- if (new_pages > page_limit)
- return -ENOMEM;
- } while (!atomic_long_try_cmpxchg(&user->locked_vm,
- &cur_pages, new_pages));
- return 0;
+ return vm_account_pinned(vm_account, nr_pages);
}
static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
{
- if (ctx->user)
- __io_unaccount_mem(ctx->user, nr_pages);
-
- if (ctx->mm_account)
- atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
+ vm_unaccount_pinned(&ctx->vm_account, nr_pages);
}
static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
{
- int ret;
-
- if (ctx->user) {
- ret = __io_account_mem(ctx->user, nr_pages);
- if (ret)
- return ret;
- }
-
- if (ctx->mm_account)
- atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
-
- return 0;
+ return vm_account_pinned(&ctx->vm_account, nr_pages);
}
static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h
index 2b87436..d8833d0 100644
--- a/io_uring/rsrc.h
+++ b/io_uring/rsrc.h
@@ -167,12 +167,5 @@ static inline u64 *io_get_tag_slot(struct io_rsrc_data *data, unsigned int idx)
int io_files_update(struct io_kiocb *req, unsigned int issue_flags);
int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
-int __io_account_mem(struct user_struct *user, unsigned long nr_pages);
-
-static inline void __io_unaccount_mem(struct user_struct *user,
- unsigned long nr_pages)
-{
- atomic_long_sub(nr_pages, &user->locked_vm);
-}
-
+int __io_account_mem(struct vm_account *vm_account, unsigned long nr_pages);
#endif
--
git-series 0.9.1
next prev parent reply other threads:[~2023-01-24 5:45 UTC|newest]
Thread overview: 56+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-01-24 5:42 [RFC PATCH 00/19] mm: Introduce a cgroup to limit the amount of locked and pinned memory Alistair Popple
2023-01-24 5:42 ` [RFC PATCH 01/19] mm: Introduce vm_account Alistair Popple
2023-01-24 6:29 ` Christoph Hellwig
2023-01-24 14:32 ` Jason Gunthorpe
2023-01-30 11:36 ` Alistair Popple
2023-01-31 14:00 ` David Hildenbrand
2023-01-24 5:42 ` [RFC PATCH 02/19] drivers/vhost: Convert to use vm_account Alistair Popple
2023-01-24 5:55 ` Michael S. Tsirkin
2023-01-30 10:43 ` Alistair Popple
2023-01-24 14:34 ` Jason Gunthorpe
2023-01-24 5:42 ` [RFC PATCH 03/19] drivers/vdpa: Convert vdpa to use the new vm_structure Alistair Popple
2023-01-24 14:35 ` Jason Gunthorpe
2023-01-24 5:42 ` [RFC PATCH 04/19] infiniband/umem: Convert to use vm_account Alistair Popple
2023-01-24 5:42 ` [RFC PATCH 05/19] RMDA/siw: " Alistair Popple
2023-01-24 14:37 ` Jason Gunthorpe
2023-01-24 15:22 ` Bernard Metzler
2023-01-24 15:56 ` Bernard Metzler
2023-01-30 11:34 ` Alistair Popple
2023-01-30 13:27 ` Bernard Metzler
2023-01-24 5:42 ` [RFC PATCH 06/19] RDMA/usnic: convert " Alistair Popple
2023-01-24 14:41 ` Jason Gunthorpe
2023-01-30 11:10 ` Alistair Popple
2023-01-24 5:42 ` [RFC PATCH 07/19] vfio/type1: Charge pinned pages to pinned_vm instead of locked_vm Alistair Popple
2023-01-24 5:42 ` [RFC PATCH 08/19] vfio/spapr_tce: Convert accounting to pinned_vm Alistair Popple
2023-01-24 5:42 ` Alistair Popple [this message]
2023-01-24 14:44 ` [RFC PATCH 09/19] io_uring: convert to use vm_account Jason Gunthorpe
2023-01-30 11:12 ` Alistair Popple
2023-01-30 13:21 ` Jason Gunthorpe
2023-01-24 5:42 ` [RFC PATCH 10/19] net: skb: Switch to using vm_account Alistair Popple
2023-01-24 14:51 ` Jason Gunthorpe
2023-01-30 11:17 ` Alistair Popple
2023-02-06 4:36 ` Alistair Popple
2023-02-06 13:14 ` Jason Gunthorpe
2023-01-24 5:42 ` [RFC PATCH 11/19] xdp: convert to use vm_account Alistair Popple
2023-01-24 5:42 ` [RFC PATCH 12/19] kvm/book3s_64_vio: Convert account_locked_vm() to vm_account_pinned() Alistair Popple
2023-01-24 5:42 ` [RFC PATCH 13/19] fpga: dfl: afu: convert to use vm_account Alistair Popple
2023-01-24 5:42 ` [RFC PATCH 14/19] mm: Introduce a cgroup for pinned memory Alistair Popple
2023-01-27 21:44 ` Tejun Heo
2023-01-30 13:20 ` Jason Gunthorpe
2023-01-24 5:42 ` [RFC PATCH 15/19] mm/util: Extend vm_account to charge pages against the pin cgroup Alistair Popple
2023-01-24 5:42 ` [RFC PATCH 16/19] mm/util: Refactor account_locked_vm Alistair Popple
2023-01-24 5:42 ` [RFC PATCH 17/19] mm: Convert mmap and mlock to use account_locked_vm Alistair Popple
2023-01-24 5:42 ` [RFC PATCH 18/19] mm/mmap: Charge locked memory to pins cgroup Alistair Popple
2023-01-24 5:42 ` [RFC PATCH 19/19] selftests/vm: Add pins-cgroup selftest for mlock/mmap Alistair Popple
2023-01-24 18:26 ` [RFC PATCH 00/19] mm: Introduce a cgroup to limit the amount of locked and pinned memory Yosry Ahmed
2023-01-31 0:54 ` Alistair Popple
2023-01-31 5:14 ` Yosry Ahmed
2023-01-31 11:22 ` Alistair Popple
2023-01-31 19:49 ` Yosry Ahmed
2023-01-24 20:12 ` Jason Gunthorpe
2023-01-31 13:57 ` David Hildenbrand
2023-01-31 14:03 ` Jason Gunthorpe
2023-01-31 14:06 ` David Hildenbrand
2023-01-31 14:10 ` Jason Gunthorpe
2023-01-31 14:15 ` David Hildenbrand
2023-01-31 14:21 ` Jason Gunthorpe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=9f63cf4ab74d6e56e434c1c3d7c98352bb282895.1674538665.git-series.apopple@nvidia.com \
--to=apopple@nvidia.com \
--cc=asml.silence@gmail.com \
--cc=axboe@kernel.dk \
--cc=cgroups@vger.kernel.org \
--cc=daniel@ffwll.ch \
--cc=hannes@cmpxchg.org \
--cc=io-uring@vger.kernel.org \
--cc=jgg@nvidia.com \
--cc=jhubbard@nvidia.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mkoutny@suse.com \
--cc=surenb@google.com \
--cc=tjmercier@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox