From: Thomas Gleixner <tglx@linutronix.de>
To: LKML <linux-kernel@vger.kernel.org>
Cc: x86@kernel.org, Linus Torvalds <torvalds@linux-foundation.org>,
Andy Lutomirsky <luto@kernel.org>,
Peter Zijlstra <peterz@infradead.org>,
Dave Hansen <dave.hansen@intel.com>,
Borislav Petkov <bpetkov@suse.de>,
Greg KH <gregkh@linuxfoundation.org>,
keescook@google.com, hughd@google.com,
Brian Gerst <brgerst@gmail.com>,
Josh Poimboeuf <jpoimboe@redhat.com>,
Denys Vlasenko <dvlasenk@redhat.com>,
Boris Ostrovsky <boris.ostrovsky@oracle.com>,
Juergen Gross <jgross@suse.com>,
David Laight <David.Laight@aculab.com>,
Eduardo Valentin <eduval@amazon.com>,
aliguori@amazon.com, Will Deacon <will.deacon@arm.com>,
linux-mm@kvack.org
Subject: [patch 15/16] x86/ldt: Add VMA management code
Date: Tue, 12 Dec 2017 18:32:36 +0100 [thread overview]
Message-ID: <20171212173334.505986831@linutronix.de> (raw)
In-Reply-To: <20171212173221.496222173@linutronix.de>
[-- Attachment #1: x86-ldt--Add-VMA-management-code.patch --]
[-- Type: text/plain, Size: 3872 bytes --]
From: Thomas Gleixner <tglx@linutronix.de>
Add the VMA management code to LDT which allows to install the LDT as a
special mapping, like VDSO and uprobes. The mapping is in the user address
space, but without the usr bit set and read only. Split out for ease of
review.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/x86/kernel/ldt.c | 103 +++++++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 102 insertions(+), 1 deletion(-)
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -31,6 +31,7 @@
struct ldt_mapping {
struct ldt_struct ldts[2];
unsigned int ldt_index;
+ unsigned int ldt_mapped;
};
/* After calling this, the LDT is immutable. */
@@ -208,6 +209,105 @@ bool __ldt_write_fault(unsigned long add
return true;
}
+static int ldt_fault(const struct vm_special_mapping *sm,
+ struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct ldt_mapping *lmap = vma->vm_mm->context.ldt_mapping;
+ struct ldt_struct *ldt = lmap->ldts;
+ pgoff_t pgo = vmf->pgoff;
+ struct page *page;
+
+ if (pgo >= LDT_ENTRIES_PAGES) {
+ pgo -= LDT_ENTRIES_PAGES;
+ ldt++;
+ }
+ if (pgo >= LDT_ENTRIES_PAGES)
+ return VM_FAULT_SIGBUS;
+
+ page = ldt->pages[pgo];
+ if (!page)
+ return VM_FAULT_SIGBUS;
+ get_page(page);
+ vmf->page = page;
+ return 0;
+}
+
+static int ldt_mremap(const struct vm_special_mapping *sm,
+ struct vm_area_struct *new_vma)
+{
+ return -EINVAL;
+}
+
+static void ldt_close(const struct vm_special_mapping *sm,
+ struct vm_area_struct *vma)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ struct ldt_struct *ldt;
+
+ /*
+ * Orders against ldt_install().
+ */
+ mutex_lock(&mm->context.lock);
+ ldt = mm->context.ldt;
+ ldt_install_mm(mm, NULL);
+ cleanup_ldt_struct(ldt);
+ mm->context.ldt_mapping->ldt_mapped = 0;
+ mutex_unlock(&mm->context.lock);
+}
+
+static const struct vm_special_mapping ldt_special_mapping = {
+ .name = "[ldt]",
+ .fault = ldt_fault,
+ .mremap = ldt_mremap,
+ .close = ldt_close,
+};
+
+static struct vm_area_struct *ldt_alloc_vma(struct mm_struct *mm,
+ struct ldt_mapping *lmap)
+{
+ unsigned long vm_flags, size;
+ struct vm_area_struct *vma;
+ unsigned long addr;
+
+ size = 2 * LDT_ENTRIES_MAP_SIZE;
+ addr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, size, 0, 0);
+ if (IS_ERR_VALUE(addr))
+ return ERR_PTR(addr);
+
+ vm_flags = VM_READ | VM_LOCKED | VM_WIPEONFORK | VM_NOUSER | VM_SHARED;
+ vma = _install_special_mapping(mm, addr, size, vm_flags,
+ &ldt_special_mapping);
+ if (IS_ERR(vma))
+ return vma;
+
+ lmap->ldts[0].entries = (struct desc_struct *) addr;
+ addr += LDT_ENTRIES_MAP_SIZE;
+ lmap->ldts[1].entries = (struct desc_struct *) addr;
+ return vma;
+}
+
+static int ldt_mmap(struct mm_struct *mm, struct ldt_mapping *lmap)
+{
+ struct vm_area_struct *vma;
+ int ret = 0;
+
+ if (down_write_killable(&mm->mmap_sem))
+ return -EINTR;
+ vma = ldt_alloc_vma(mm, lmap);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ } else {
+ /*
+ * The moment mmap_sem() is released munmap() can observe
+ * the mapping and make it go away through ldt_close(). But
+ * for now there is mapping.
+ */
+ lmap->ldt_mapped = 1;
+ }
+ up_write(&mm->mmap_sem);
+ return ret;
+}
+
/* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries)
{
@@ -350,7 +450,8 @@ static int read_ldt(void __user *ptr, un
down_read(&mm->context.ldt_usr_sem);
- ldt = mm->context.ldt;
+ /* Might race against vm_unmap, which installs a NULL LDT */
+ ldt = READ_ONCE(mm->context.ldt);
if (!ldt)
goto out_unlock;
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2017-12-12 17:35 UTC|newest]
Thread overview: 67+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-12-12 17:32 [patch 00/16] x86/ldt: Use a VMA based read only mapping Thomas Gleixner
2017-12-12 17:32 ` [patch 01/16] arch: Allow arch_dup_mmap() to fail Thomas Gleixner
2017-12-12 17:32 ` [patch 02/16] x86/ldt: Rework locking Thomas Gleixner
2017-12-12 17:32 ` [patch 03/16] x86/ldt: Prevent ldt inheritance on exec Thomas Gleixner
2017-12-12 17:32 ` [patch 04/16] mm/softdirty: Move VM_SOFTDIRTY into high bits Thomas Gleixner
2017-12-12 17:32 ` [patch 05/16] mm: Allow special mappings with user access cleared Thomas Gleixner
2017-12-12 18:00 ` Andy Lutomirski
2017-12-12 18:05 ` Peter Zijlstra
2017-12-12 18:06 ` Andy Lutomirski
2017-12-12 18:25 ` Peter Zijlstra
2017-12-13 12:22 ` Peter Zijlstra
2017-12-13 12:57 ` Kirill A. Shutemov
2017-12-13 14:34 ` Peter Zijlstra
2017-12-13 14:43 ` Kirill A. Shutemov
2017-12-13 15:00 ` Peter Zijlstra
2017-12-13 15:04 ` Peter Zijlstra
2017-12-13 15:14 ` Dave Hansen
2017-12-13 15:32 ` Peter Zijlstra
2017-12-13 15:47 ` Dave Hansen
2017-12-13 15:54 ` Peter Zijlstra
2017-12-13 18:08 ` Linus Torvalds
2017-12-13 18:21 ` Dave Hansen
2017-12-13 18:23 ` Linus Torvalds
2017-12-13 18:31 ` Andy Lutomirski
2017-12-13 18:32 ` Peter Zijlstra
2017-12-13 18:35 ` Linus Torvalds
2017-12-14 4:53 ` Aneesh Kumar K.V
2017-12-13 21:50 ` Matthew Wilcox
2017-12-13 22:12 ` Peter Zijlstra
2017-12-14 0:10 ` Matthew Wilcox
2017-12-14 0:16 ` Andy Lutomirski
2017-12-12 17:32 ` [patch 06/16] mm: Provide vm_special_mapping::close Thomas Gleixner
2017-12-12 17:32 ` [patch 07/16] selftest/x86: Implement additional LDT selftests Thomas Gleixner
2017-12-12 17:32 ` [patch 08/16] selftests/x86/ldt_gdt: Prepare for access bit forced Thomas Gleixner
2017-12-12 17:32 ` [patch 09/16] mm: Make populate_vma_page_range() available Thomas Gleixner
2017-12-12 17:32 ` [patch 10/16] x86/ldt: Do not install LDT for kernel threads Thomas Gleixner
2017-12-12 17:57 ` Andy Lutomirski
2017-12-12 17:32 ` [patch 11/16] x86/ldt: Force access bit for CS/SS Thomas Gleixner
2017-12-12 18:03 ` Andy Lutomirski
2017-12-12 18:09 ` Peter Zijlstra
2017-12-12 18:10 ` Andy Lutomirski
2017-12-12 18:22 ` Andy Lutomirski
2017-12-12 18:29 ` Peter Zijlstra
2017-12-12 18:41 ` Thomas Gleixner
2017-12-12 19:04 ` Peter Zijlstra
2017-12-12 19:05 ` Linus Torvalds
2017-12-12 19:26 ` Andy Lutomirski
2017-12-19 12:10 ` David Laight
2017-12-12 17:32 ` [patch 12/16] x86/ldt: Reshuffle code Thomas Gleixner
2017-12-12 17:32 ` [patch 13/16] x86/ldt: Introduce LDT write fault handler Thomas Gleixner
2017-12-12 17:58 ` Andy Lutomirski
2017-12-12 18:19 ` Peter Zijlstra
2017-12-12 18:43 ` Thomas Gleixner
2017-12-12 19:01 ` Linus Torvalds
2017-12-12 19:21 ` Thomas Gleixner
2017-12-12 19:51 ` Linus Torvalds
2017-12-12 20:21 ` Dave Hansen
2017-12-12 20:37 ` Thomas Gleixner
2017-12-12 21:35 ` Andy Lutomirski
2017-12-12 21:42 ` Thomas Gleixner
2017-12-12 21:41 ` Thomas Gleixner
2017-12-12 21:46 ` Thomas Gleixner
2017-12-12 22:25 ` Peter Zijlstra
2017-12-12 17:32 ` [patch 14/16] x86/ldt: Prepare for VMA mapping Thomas Gleixner
2017-12-12 17:32 ` Thomas Gleixner [this message]
2017-12-12 17:32 ` [patch 16/16] x86/ldt: Make it read only VMA mapped Thomas Gleixner
2017-12-12 18:03 ` [patch 00/16] x86/ldt: Use a VMA based read only mapping Andy Lutomirski
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20171212173334.505986831@linutronix.de \
--to=tglx@linutronix.de \
--cc=David.Laight@aculab.com \
--cc=aliguori@amazon.com \
--cc=boris.ostrovsky@oracle.com \
--cc=bpetkov@suse.de \
--cc=brgerst@gmail.com \
--cc=dave.hansen@intel.com \
--cc=dvlasenk@redhat.com \
--cc=eduval@amazon.com \
--cc=gregkh@linuxfoundation.org \
--cc=hughd@google.com \
--cc=jgross@suse.com \
--cc=jpoimboe@redhat.com \
--cc=keescook@google.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=luto@kernel.org \
--cc=peterz@infradead.org \
--cc=torvalds@linux-foundation.org \
--cc=will.deacon@arm.com \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox