diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 8e596de963a8f..8e1ac8be123b2 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -821,6 +821,9 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next, if (IS_ENABLED(CONFIG_PROVE_LOCKING)) WARN_ON_ONCE(!irqs_disabled()); + // FIXME + // This is totally unexplained and needs justification and + // commenting tlbsync(); /* @@ -1678,9 +1681,10 @@ void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch) } /* - * If (asynchronous) INVLPGB flushes were issued, wait for them here. - * The cpumask above contains only CPUs that were running tasks - * not using broadcast TLB flushing. + * Wait for outstanding INVLPGB flushes. batch->cpumask will + * be empty when the batch was handled completely by INVLPGB. + * Note that mm_in_asid_transition() mm's may use INVLPGB and + * the flush_tlb_multi() IPIs at the same time. */ tlbsync(); @@ -1693,9 +1697,14 @@ void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch) void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch, struct mm_struct *mm, unsigned long uaddr) { - u16 asid = mm_global_asid(mm); + u16 global_asid = mm_global_asid(mm); - if (asid) { + if (global_asid) { + /* + * Global ASIDs can be flushed with INVLPGB. Flush + * now instead of batching them for later. A later + * tlbsync() is required to ensure these completed. + */ invlpgb_flush_user_nr_nosync(kern_pcid(asid), uaddr, 1, false); /* Do any CPUs supporting INVLPGB need PTI? */ if (cpu_feature_enabled(X86_FEATURE_PTI)) @@ -1714,7 +1723,11 @@ void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch, asid = 0; } - if (!asid) { + if (!global_asid) { + /* + * Mark the mm and the CPU so that + * the TLB gets flushed later. + */ inc_mm_tlb_gen(mm); cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm)); }