From: Peter Zijlstra <peterz@infradead.org>
To: linux-kernel@vger.kernel.org, tglx@linutronix.de
Cc: x86@kernel.org, Linus Torvalds <torvalds@linux-foundation.org>,
Andy Lutomirsky <luto@kernel.org>,
Peter Zijlstra <peterz@infradead.org>,
Dave Hansen <dave.hansen@intel.com>,
Borislav Petkov <bpetkov@suse.de>,
Greg KH <gregkh@linuxfoundation.org>,
keescook@google.com, hughd@google.com,
Brian Gerst <brgerst@gmail.com>,
Josh Poimboeuf <jpoimboe@redhat.com>,
Denys Vlasenko <dvlasenk@redhat.com>,
Boris Ostrovsky <boris.ostrovsky@oracle.com>,
Juergen Gross <jgross@suse.com>,
David Laight <David.Laight@aculab.com>,
Eduardo Valentin <eduval@amazon.com>,
aliguori@amazon.com, Will Deacon <will.deacon@arm.com>,
linux-mm@kvack.org, kirill.shutemov@linux.intel.com,
dan.j.williams@intel.com
Subject: [PATCH v2 16/17] x86/ldt: Add VMA management code
Date: Thu, 14 Dec 2017 12:27:42 +0100 [thread overview]
Message-ID: <20171214113851.897611055@infradead.org> (raw)
In-Reply-To: <20171214112726.742649793@infradead.org>
[-- Attachment #1: x86-ldt--Add-VMA-management-code.patch --]
[-- Type: text/plain, Size: 3885 bytes --]
Add the VMA management code to LDT which allows to install the LDT as a
special mapping, like VDSO and uprobes. The mapping is in the user address
space, but without the usr bit set and read only. Split out for ease of
review.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
arch/x86/kernel/ldt.c | 103 +++++++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 102 insertions(+), 1 deletion(-)
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -31,6 +31,7 @@
struct ldt_mapping {
struct ldt_struct ldts[2];
unsigned int ldt_index;
+ unsigned int ldt_mapped;
};
/* After calling this, the LDT is immutable. */
@@ -177,6 +178,105 @@ static void cleanup_ldt_struct(struct ld
ldt->nr_entries = 0;
}
+static int ldt_fault(const struct vm_special_mapping *sm,
+ struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct ldt_mapping *lmap = vma->vm_mm->context.ldt_mapping;
+ struct ldt_struct *ldt = lmap->ldts;
+ pgoff_t pgo = vmf->pgoff;
+ struct page *page;
+
+ if (pgo >= LDT_ENTRIES_PAGES) {
+ pgo -= LDT_ENTRIES_PAGES;
+ ldt++;
+ }
+ if (pgo >= LDT_ENTRIES_PAGES)
+ return VM_FAULT_SIGBUS;
+
+ page = ldt->pages[pgo];
+ if (!page)
+ return VM_FAULT_SIGBUS;
+ get_page(page);
+ vmf->page = page;
+ return 0;
+}
+
+static int ldt_mremap(const struct vm_special_mapping *sm,
+ struct vm_area_struct *new_vma)
+{
+ return -EINVAL;
+}
+
+static void ldt_close(const struct vm_special_mapping *sm,
+ struct vm_area_struct *vma)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ struct ldt_struct *ldt;
+
+ /*
+ * Orders against ldt_install().
+ */
+ mutex_lock(&mm->context.lock);
+ ldt = mm->context.ldt;
+ ldt_install_mm(mm, NULL);
+ cleanup_ldt_struct(ldt);
+ mm->context.ldt_mapping->ldt_mapped = 0;
+ mutex_unlock(&mm->context.lock);
+}
+
+static const struct vm_special_mapping ldt_special_mapping = {
+ .name = "[ldt]",
+ .fault = ldt_fault,
+ .mremap = ldt_mremap,
+ .close = ldt_close,
+};
+
+static struct vm_area_struct *ldt_alloc_vma(struct mm_struct *mm,
+ struct ldt_mapping *lmap)
+{
+ unsigned long vm_flags, size;
+ struct vm_area_struct *vma;
+ unsigned long addr;
+
+ size = 2 * LDT_ENTRIES_MAP_SIZE;
+ addr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, size, 0, 0);
+ if (IS_ERR_VALUE(addr))
+ return ERR_PTR(addr);
+
+ vm_flags = VM_READ | VM_WIPEONFORK | VM_NOUSER | VM_SHARED;
+ vma = _install_special_mapping(mm, addr, size, vm_flags,
+ &ldt_special_mapping);
+ if (IS_ERR(vma))
+ return vma;
+
+ lmap->ldts[0].entries = (struct desc_struct *) addr;
+ addr += LDT_ENTRIES_MAP_SIZE;
+ lmap->ldts[1].entries = (struct desc_struct *) addr;
+ return vma;
+}
+
+static int ldt_mmap(struct mm_struct *mm, struct ldt_mapping *lmap)
+{
+ struct vm_area_struct *vma;
+ int ret = 0;
+
+ if (down_write_killable(&mm->mmap_sem))
+ return -EINTR;
+ vma = ldt_alloc_vma(mm, lmap);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ } else {
+ /*
+ * The moment mmap_sem() is released munmap() can observe
+ * the mapping and make it go away through ldt_close(). But
+ * for now there is mapping.
+ */
+ lmap->ldt_mapped = 1;
+ }
+ up_write(&mm->mmap_sem);
+ return ret;
+}
+
/* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries)
{
@@ -289,7 +389,8 @@ static int read_ldt(void __user *ptr, un
down_read(&mm->context.ldt_usr_sem);
- ldt = mm->context.ldt;
+ /* Might race against vm_unmap, which installs a NULL LDT */
+ ldt = READ_ONCE(mm->context.ldt);
if (!ldt)
goto out_unlock;
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2017-12-14 11:43 UTC|newest]
Thread overview: 76+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-12-14 11:27 [PATCH v2 00/17] x86/ldt: Use a VMA based read only mapping Peter Zijlstra
2017-12-14 11:27 ` [PATCH v2 01/17] mm/gup: Fixup p*_access_permitted() Peter Zijlstra
2017-12-14 12:41 ` Peter Zijlstra
2017-12-14 14:37 ` Peter Zijlstra
2017-12-14 20:44 ` Dave Hansen
2017-12-14 20:54 ` Peter Zijlstra
2017-12-14 21:18 ` Peter Zijlstra
2017-12-15 5:04 ` Dave Hansen
2017-12-15 6:09 ` Linus Torvalds
2017-12-15 7:51 ` Peter Zijlstra
2017-12-16 0:20 ` Linus Torvalds
2017-12-16 0:29 ` Dan Williams
2017-12-16 1:10 ` Linus Torvalds
2017-12-16 1:25 ` Dave Hansen
2017-12-16 2:28 ` Linus Torvalds
2017-12-16 2:48 ` Al Viro
2017-12-16 2:52 ` Linus Torvalds
2017-12-16 3:00 ` Linus Torvalds
2017-12-16 3:21 ` Dave Hansen
2017-12-16 1:29 ` Dan Williams
2017-12-16 0:31 ` Al Viro
2017-12-16 1:05 ` Linus Torvalds
2017-12-15 8:00 ` Peter Zijlstra
2017-12-15 10:25 ` Peter Zijlstra
2017-12-15 11:38 ` Peter Zijlstra
2017-12-15 16:38 ` Dan Williams
2017-12-18 11:54 ` Peter Zijlstra
2017-12-18 18:42 ` Dan Williams
2017-12-15 14:04 ` Peter Zijlstra
2017-12-14 11:27 ` [PATCH v2 02/17] mm: Exempt special mappings from mlock(), mprotect() and madvise() Peter Zijlstra
2017-12-14 16:19 ` Andy Lutomirski
2017-12-14 17:36 ` Peter Zijlstra
2018-01-02 16:44 ` Dmitry Safonov
2017-12-14 11:27 ` [PATCH v2 03/17] arch: Allow arch_dup_mmap() to fail Peter Zijlstra
2017-12-14 16:22 ` Andy Lutomirski
2017-12-14 11:27 ` [PATCH v2 04/17] x86/ldt: Rework locking Peter Zijlstra
2017-12-14 11:27 ` [PATCH v2 05/17] x86/ldt: Prevent ldt inheritance on exec Peter Zijlstra
2017-12-14 16:32 ` Andy Lutomirski
2017-12-14 11:27 ` [PATCH v2 06/17] x86/ldt: Do not install LDT for kernel threads Peter Zijlstra
2017-12-14 19:43 ` Peter Zijlstra
2017-12-14 21:27 ` Andy Lutomirski
2017-12-14 11:27 ` [PATCH v2 07/17] mm/softdirty: Move VM_SOFTDIRTY into high bits Peter Zijlstra
2017-12-14 11:27 ` [PATCH v2 08/17] mm/x86: Allow special mappings with user access cleared Peter Zijlstra
2017-12-14 11:27 ` [PATCH v2 09/17] mm: Provide vm_special_mapping::close Peter Zijlstra
2017-12-14 11:27 ` [PATCH v2 10/17] selftest/x86: Implement additional LDT selftests Peter Zijlstra
2017-12-14 11:27 ` [PATCH v2 11/17] selftests/x86/ldt_gdt: Prepare for access bit forced Peter Zijlstra
2017-12-14 16:20 ` Andy Lutomirski
2017-12-14 19:43 ` Linus Torvalds
2017-12-14 21:22 ` Andy Lutomirski
2017-12-14 21:44 ` Linus Torvalds
2017-12-14 21:48 ` Linus Torvalds
2017-12-14 22:02 ` Peter Zijlstra
2017-12-14 22:14 ` Linus Torvalds
2017-12-14 22:24 ` Peter Zijlstra
2017-12-14 22:52 ` Linus Torvalds
2017-12-14 22:11 ` Andy Lutomirski
2017-12-14 22:15 ` Linus Torvalds
2017-12-14 22:30 ` Andy Lutomirski
2017-12-14 22:23 ` Thomas Gleixner
2017-12-14 22:50 ` Linus Torvalds
2017-12-14 11:27 ` [PATCH v2 12/17] mm: Make populate_vma_page_range() available Peter Zijlstra
2017-12-14 11:27 ` [PATCH v2 13/17] x86/mm: Force LDT desc accessed bit Peter Zijlstra
2017-12-14 16:21 ` Andy Lutomirski
2017-12-14 11:27 ` [PATCH v2 14/17] x86/ldt: Reshuffle code Peter Zijlstra
2017-12-14 16:23 ` Andy Lutomirski
2017-12-14 16:31 ` Thomas Gleixner
2017-12-14 16:32 ` Thomas Gleixner
2017-12-14 16:34 ` Andy Lutomirski
2017-12-14 17:47 ` Peter Zijlstra
2017-12-14 11:27 ` [PATCH v2 15/17] x86/ldt: Prepare for VMA mapping Peter Zijlstra
2017-12-14 11:27 ` Peter Zijlstra [this message]
2017-12-14 11:27 ` [PATCH v2 17/17] x86/ldt: Make it read only VMA mapped Peter Zijlstra
2017-12-14 12:03 ` [PATCH v2 00/17] x86/ldt: Use a VMA based read only mapping Thomas Gleixner
2017-12-14 12:08 ` Peter Zijlstra
2017-12-14 16:35 ` Andy Lutomirski
2017-12-14 17:50 ` Peter Zijlstra
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20171214113851.897611055@infradead.org \
--to=peterz@infradead.org \
--cc=David.Laight@aculab.com \
--cc=aliguori@amazon.com \
--cc=boris.ostrovsky@oracle.com \
--cc=bpetkov@suse.de \
--cc=brgerst@gmail.com \
--cc=dan.j.williams@intel.com \
--cc=dave.hansen@intel.com \
--cc=dvlasenk@redhat.com \
--cc=eduval@amazon.com \
--cc=gregkh@linuxfoundation.org \
--cc=hughd@google.com \
--cc=jgross@suse.com \
--cc=jpoimboe@redhat.com \
--cc=keescook@google.com \
--cc=kirill.shutemov@linux.intel.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=luto@kernel.org \
--cc=tglx@linutronix.de \
--cc=torvalds@linux-foundation.org \
--cc=will.deacon@arm.com \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox