From: Thomas Gleixner <tglx@linutronix.de>
To: LKML <linux-kernel@vger.kernel.org>
Cc: x86@kernel.org, Linus Torvalds <torvalds@linux-foundation.org>,
Andy Lutomirsky <luto@kernel.org>,
Peter Zijlstra <peterz@infradead.org>,
Dave Hansen <dave.hansen@intel.com>,
Borislav Petkov <bpetkov@suse.de>,
Greg KH <gregkh@linuxfoundation.org>,
keescook@google.com, hughd@google.com,
Brian Gerst <brgerst@gmail.com>,
Josh Poimboeuf <jpoimboe@redhat.com>,
Denys Vlasenko <dvlasenk@redhat.com>,
Boris Ostrovsky <boris.ostrovsky@oracle.com>,
Juergen Gross <jgross@suse.com>,
David Laight <David.Laight@aculab.com>,
Eduardo Valentin <eduval@amazon.com>,
aliguori@amazon.com, Will Deacon <will.deacon@arm.com>,
linux-mm@kvack.org
Subject: [patch 12/16] x86/ldt: Reshuffle code
Date: Tue, 12 Dec 2017 18:32:33 +0100 [thread overview]
Message-ID: <20171212173334.267560774@linutronix.de> (raw)
In-Reply-To: <20171212173221.496222173@linutronix.de>
[-- Attachment #1: x86-ldt--Reshuffle-code.patch --]
[-- Type: text/plain, Size: 5928 bytes --]
From: Thomas Gleixner <tglx@linutronix.de>
Restructure the code, so the following VMA changes do not create an
unreadable mess. No functional change.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/x86/include/asm/mmu_context.h | 4 +
arch/x86/kernel/ldt.c | 118 +++++++++++++++++--------------------
2 files changed, 59 insertions(+), 63 deletions(-)
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -39,6 +39,10 @@ static inline void load_mm_cr4(struct mm
#endif
#ifdef CONFIG_MODIFY_LDT_SYSCALL
+#include <asm/ldt.h>
+
+#define LDT_ENTRIES_MAP_SIZE (LDT_ENTRIES * LDT_ENTRY_SIZE)
+
/*
* ldt_structs can be allocated, used, and freed, but they are never
* modified while live.
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -28,6 +28,12 @@
#include <asm/mmu_context.h>
#include <asm/syscalls.h>
+/* After calling this, the LDT is immutable. */
+static void finalize_ldt_struct(struct ldt_struct *ldt)
+{
+ paravirt_alloc_ldt(ldt->entries, ldt->nr_entries);
+}
+
static void refresh_ldt_segments(void)
{
#ifdef CONFIG_X86_64
@@ -48,18 +54,32 @@ static void refresh_ldt_segments(void)
}
/* context.lock is held by the task which issued the smp function call */
-static void flush_ldt(void *__mm)
+static void __ldt_install(void *__mm)
{
struct mm_struct *mm = __mm;
- mm_context_t *pc;
+ struct ldt_struct *ldt = mm->context.ldt;
- if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm)
- return;
+ if (this_cpu_read(cpu_tlbstate.loaded_mm) == mm &&
+ !(current->flags & PF_KTHREAD)) {
+ unsigned int nentries = ldt ? ldt->nr_entries : 0;
+
+ set_ldt(ldt->entries, nentries);
+ refresh_ldt_segments();
+ set_tsk_thread_flag(current, TIF_LDT);
+ }
+}
- pc = &mm->context;
- set_ldt(pc->ldt->entries, pc->ldt->nr_entries);
+static void ldt_install_mm(struct mm_struct *mm, struct ldt_struct *ldt)
+{
+ mutex_lock(&mm->context.lock);
- refresh_ldt_segments();
+ /* Synchronizes with READ_ONCE in load_mm_ldt. */
+ smp_store_release(&mm->context.ldt, ldt);
+
+ /* Activate the LDT for all CPUs using currents mm. */
+ on_each_cpu_mask(mm_cpumask(mm), __ldt_install, mm, true);
+
+ mutex_unlock(&mm->context.lock);
}
/* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
@@ -98,25 +118,6 @@ static struct ldt_struct *alloc_ldt_stru
return new_ldt;
}
-/* After calling this, the LDT is immutable. */
-static void finalize_ldt_struct(struct ldt_struct *ldt)
-{
- paravirt_alloc_ldt(ldt->entries, ldt->nr_entries);
-}
-
-static void install_ldt(struct mm_struct *mm, struct ldt_struct *ldt)
-{
- mutex_lock(&mm->context.lock);
-
- /* Synchronizes with READ_ONCE in load_mm_ldt. */
- smp_store_release(&mm->context.ldt, ldt);
-
- /* Activate the LDT for all CPUs using currents mm. */
- on_each_cpu_mask(mm_cpumask(mm), flush_ldt, mm, true);
-
- mutex_unlock(&mm->context.lock);
-}
-
static void free_ldt_struct(struct ldt_struct *ldt)
{
if (likely(!ldt))
@@ -164,6 +165,18 @@ int ldt_dup_context(struct mm_struct *ol
}
/*
+ * This can run unlocked because the mm is no longer in use. No need to
+ * clear LDT on the CPU either because that's called from __mm_drop() and
+ * the task which owned the mm is already dead. The context switch code has
+ * either cleared LDT or installed a new one.
+ */
+void destroy_context_ldt(struct mm_struct *mm)
+{
+ free_ldt_struct(mm->context.ldt);
+ mm->context.ldt = NULL;
+}
+
+/*
* Touching the LDT entries with LAR makes sure that the CPU "caches" the
* ACCESSED bit in the LDT entry which is already set when the entry is
* stored.
@@ -193,54 +206,33 @@ void ldt_exit_user(struct pt_regs *regs)
ldt_touch_seg(regs->ss);
}
-/*
- * No need to lock the MM as we are the last user
- *
- * 64bit: Don't touch the LDT register - we're already in the next thread.
- */
-void destroy_context_ldt(struct mm_struct *mm)
-{
- free_ldt_struct(mm->context.ldt);
- mm->context.ldt = NULL;
-}
-
-static int read_ldt(void __user *ptr, unsigned long bytecount)
+static int read_ldt(void __user *ptr, unsigned long nbytes)
{
struct mm_struct *mm = current->mm;
- unsigned long entries_size;
- int retval;
+ struct ldt_struct *ldt;
+ unsigned long tocopy;
+ int ret = 0;
down_read(&mm->context.ldt_usr_sem);
- if (!mm->context.ldt) {
- retval = 0;
+ ldt = mm->context.ldt;
+ if (!ldt)
goto out_unlock;
- }
- if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
- bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
+ if (nbytes > LDT_ENTRIES_MAP_SIZE)
+ nbytes = LDT_ENTRIES_MAP_SIZE;
- entries_size = mm->context.ldt->nr_entries * LDT_ENTRY_SIZE;
- if (entries_size > bytecount)
- entries_size = bytecount;
-
- if (copy_to_user(ptr, mm->context.ldt->entries, entries_size)) {
- retval = -EFAULT;
+ ret = -EFAULT;
+ tocopy = min((unsigned long)ldt->nr_entries * LDT_ENTRY_SIZE, nbytes);
+ if (tocopy < nbytes && clear_user(ptr + tocopy, nbytes - tocopy))
goto out_unlock;
- }
-
- if (entries_size != bytecount) {
- /* Zero-fill the rest and pretend we read bytecount bytes. */
- if (clear_user(ptr + entries_size, bytecount - entries_size)) {
- retval = -EFAULT;
- goto out_unlock;
- }
- }
- retval = bytecount;
+ if (copy_to_user(ptr, ldt->entries, tocopy))
+ goto out_unlock;
+ ret = nbytes;
out_unlock:
up_read(&mm->context.ldt_usr_sem);
- return retval;
+ return ret;
}
static int read_default_ldt(void __user *ptr, unsigned long bytecount)
@@ -317,7 +309,7 @@ static int write_ldt(void __user *ptr, u
new_ldt->entries[ldt_info.entry_number] = ldt;
finalize_ldt_struct(new_ldt);
- install_ldt(mm, new_ldt);
+ ldt_install_mm(mm, new_ldt);
free_ldt_struct(old_ldt);
error = 0;
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2017-12-12 17:34 UTC|newest]
Thread overview: 67+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-12-12 17:32 [patch 00/16] x86/ldt: Use a VMA based read only mapping Thomas Gleixner
2017-12-12 17:32 ` [patch 01/16] arch: Allow arch_dup_mmap() to fail Thomas Gleixner
2017-12-12 17:32 ` [patch 02/16] x86/ldt: Rework locking Thomas Gleixner
2017-12-12 17:32 ` [patch 03/16] x86/ldt: Prevent ldt inheritance on exec Thomas Gleixner
2017-12-12 17:32 ` [patch 04/16] mm/softdirty: Move VM_SOFTDIRTY into high bits Thomas Gleixner
2017-12-12 17:32 ` [patch 05/16] mm: Allow special mappings with user access cleared Thomas Gleixner
2017-12-12 18:00 ` Andy Lutomirski
2017-12-12 18:05 ` Peter Zijlstra
2017-12-12 18:06 ` Andy Lutomirski
2017-12-12 18:25 ` Peter Zijlstra
2017-12-13 12:22 ` Peter Zijlstra
2017-12-13 12:57 ` Kirill A. Shutemov
2017-12-13 14:34 ` Peter Zijlstra
2017-12-13 14:43 ` Kirill A. Shutemov
2017-12-13 15:00 ` Peter Zijlstra
2017-12-13 15:04 ` Peter Zijlstra
2017-12-13 15:14 ` Dave Hansen
2017-12-13 15:32 ` Peter Zijlstra
2017-12-13 15:47 ` Dave Hansen
2017-12-13 15:54 ` Peter Zijlstra
2017-12-13 18:08 ` Linus Torvalds
2017-12-13 18:21 ` Dave Hansen
2017-12-13 18:23 ` Linus Torvalds
2017-12-13 18:31 ` Andy Lutomirski
2017-12-13 18:32 ` Peter Zijlstra
2017-12-13 18:35 ` Linus Torvalds
2017-12-14 4:53 ` Aneesh Kumar K.V
2017-12-13 21:50 ` Matthew Wilcox
2017-12-13 22:12 ` Peter Zijlstra
2017-12-14 0:10 ` Matthew Wilcox
2017-12-14 0:16 ` Andy Lutomirski
2017-12-12 17:32 ` [patch 06/16] mm: Provide vm_special_mapping::close Thomas Gleixner
2017-12-12 17:32 ` [patch 07/16] selftest/x86: Implement additional LDT selftests Thomas Gleixner
2017-12-12 17:32 ` [patch 08/16] selftests/x86/ldt_gdt: Prepare for access bit forced Thomas Gleixner
2017-12-12 17:32 ` [patch 09/16] mm: Make populate_vma_page_range() available Thomas Gleixner
2017-12-12 17:32 ` [patch 10/16] x86/ldt: Do not install LDT for kernel threads Thomas Gleixner
2017-12-12 17:57 ` Andy Lutomirski
2017-12-12 17:32 ` [patch 11/16] x86/ldt: Force access bit for CS/SS Thomas Gleixner
2017-12-12 18:03 ` Andy Lutomirski
2017-12-12 18:09 ` Peter Zijlstra
2017-12-12 18:10 ` Andy Lutomirski
2017-12-12 18:22 ` Andy Lutomirski
2017-12-12 18:29 ` Peter Zijlstra
2017-12-12 18:41 ` Thomas Gleixner
2017-12-12 19:04 ` Peter Zijlstra
2017-12-12 19:05 ` Linus Torvalds
2017-12-12 19:26 ` Andy Lutomirski
2017-12-19 12:10 ` David Laight
2017-12-12 17:32 ` Thomas Gleixner [this message]
2017-12-12 17:32 ` [patch 13/16] x86/ldt: Introduce LDT write fault handler Thomas Gleixner
2017-12-12 17:58 ` Andy Lutomirski
2017-12-12 18:19 ` Peter Zijlstra
2017-12-12 18:43 ` Thomas Gleixner
2017-12-12 19:01 ` Linus Torvalds
2017-12-12 19:21 ` Thomas Gleixner
2017-12-12 19:51 ` Linus Torvalds
2017-12-12 20:21 ` Dave Hansen
2017-12-12 20:37 ` Thomas Gleixner
2017-12-12 21:35 ` Andy Lutomirski
2017-12-12 21:42 ` Thomas Gleixner
2017-12-12 21:41 ` Thomas Gleixner
2017-12-12 21:46 ` Thomas Gleixner
2017-12-12 22:25 ` Peter Zijlstra
2017-12-12 17:32 ` [patch 14/16] x86/ldt: Prepare for VMA mapping Thomas Gleixner
2017-12-12 17:32 ` [patch 15/16] x86/ldt: Add VMA management code Thomas Gleixner
2017-12-12 17:32 ` [patch 16/16] x86/ldt: Make it read only VMA mapped Thomas Gleixner
2017-12-12 18:03 ` [patch 00/16] x86/ldt: Use a VMA based read only mapping Andy Lutomirski
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20171212173334.267560774@linutronix.de \
--to=tglx@linutronix.de \
--cc=David.Laight@aculab.com \
--cc=aliguori@amazon.com \
--cc=boris.ostrovsky@oracle.com \
--cc=bpetkov@suse.de \
--cc=brgerst@gmail.com \
--cc=dave.hansen@intel.com \
--cc=dvlasenk@redhat.com \
--cc=eduval@amazon.com \
--cc=gregkh@linuxfoundation.org \
--cc=hughd@google.com \
--cc=jgross@suse.com \
--cc=jpoimboe@redhat.com \
--cc=keescook@google.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=luto@kernel.org \
--cc=peterz@infradead.org \
--cc=torvalds@linux-foundation.org \
--cc=will.deacon@arm.com \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox