* [PATCH v2 0/1] mm: clear spelling mistakes
@ 2021-05-19 6:58 Zhen Lei
2021-05-19 6:58 ` [PATCH v2 1/1] " Zhen Lei
0 siblings, 1 reply; 2+ messages in thread
From: Zhen Lei @ 2021-05-19 6:58 UTC (permalink / raw)
To: Andrew Morton, Naoya Horiguchi, Souptick Joarder, linux-mm; +Cc: Zhen Lei
v2:
1) Merge into one patch
2) Add new fixes:
We moves ==> We move
each having differents usage ==> each has a different usage //this is not reviewed-by
The files being checked:
mm/
include/linux/mm*.h, but excluded mmu_notifier.h
include/linux/mem*.h //v2
include/linux/shm.h //v2
v1:
The files being checked:
mm/
include/linux/mm*.h, but excluded mmu_notifier.h
Zhen Lei (1):
mm: clear spelling mistakes
include/linux/memremap.h | 2 +-
include/linux/mm.h | 2 +-
include/linux/mm_types.h | 4 ++--
include/linux/mmzone.h | 2 +-
mm/internal.h | 2 +-
mm/memory-failure.c | 2 +-
mm/memory_hotplug.c | 4 ++--
mm/page_alloc.c | 2 +-
mm/swap.c | 2 +-
mm/swapfile.c | 2 +-
10 files changed, 12 insertions(+), 12 deletions(-)
--
2.25.1
^ permalink raw reply [flat|nested] 2+ messages in thread
* [PATCH v2 1/1] mm: clear spelling mistakes
2021-05-19 6:58 [PATCH v2 0/1] mm: clear spelling mistakes Zhen Lei
@ 2021-05-19 6:58 ` Zhen Lei
0 siblings, 0 replies; 2+ messages in thread
From: Zhen Lei @ 2021-05-19 6:58 UTC (permalink / raw)
To: Andrew Morton, Naoya Horiguchi, Souptick Joarder, linux-mm; +Cc: Zhen Lei
Fix some spelling mistakes in comments:
each having differents usage ==> each has a different usage
statments ==> statements
adresses ==> addresses
aggresive ==> aggressive
datas ==> data
posion ==> poison
higer ==> higher
precisly ==> precisely
wont ==> won't
We moves tha ==> We move the
endianess ==> endianness
Reviewed-by: Souptick Joarder <jrdr.linux@gmail.com>
Signed-off-by: Zhen Lei <thunder.leizhen@huawei.com>
---
include/linux/memremap.h | 2 +-
include/linux/mm.h | 2 +-
include/linux/mm_types.h | 4 ++--
include/linux/mmzone.h | 2 +-
mm/internal.h | 2 +-
mm/memory-failure.c | 2 +-
mm/memory_hotplug.c | 4 ++--
mm/page_alloc.c | 2 +-
mm/swap.c | 2 +-
mm/swapfile.c | 2 +-
10 files changed, 12 insertions(+), 12 deletions(-)
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 45a79da89c5f..c0e9d35889e8 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -26,7 +26,7 @@ struct vmem_altmap {
};
/*
- * Specialize ZONE_DEVICE memory into multiple types each having differents
+ * Specialize ZONE_DEVICE memory into multiple types each has a different
* usage.
*
* MEMORY_DEVICE_PRIVATE:
diff --git a/include/linux/mm.h b/include/linux/mm.h
index c274f75efcf9..12d13c8708a5 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -155,7 +155,7 @@ extern int mmap_rnd_compat_bits __read_mostly;
/* This function must be updated when the size of struct page grows above 80
* or reduces below 56. The idea that compiler optimizes out switch()
* statement, and only leaves move/store instructions. Also the compiler can
- * combine write statments if they are both assignments and can be reordered,
+ * combine write statements if they are both assignments and can be reordered,
* this can result in several of the writes here being dropped.
*/
#define mm_zero_struct_page(pp) __mm_zero_struct_page(pp)
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 5aacc1c10a45..7034f5673d26 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -397,7 +397,7 @@ struct mm_struct {
unsigned long mmap_base; /* base of mmap area */
unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
- /* Base adresses for compatible mmap() */
+ /* Base addresses for compatible mmap() */
unsigned long mmap_compat_base;
unsigned long mmap_compat_legacy_base;
#endif
@@ -439,7 +439,7 @@ struct mm_struct {
* @has_pinned: Whether this mm has pinned any pages. This can
* be either replaced in the future by @pinned_vm when it
* becomes stable, or grow into a counter on its own. We're
- * aggresive on this bit now - even if the pinned pages were
+ * aggressive on this bit now - even if the pinned pages were
* unpinned later on, we'll still keep this bit set for the
* lifecycle of this mm just for simplicity.
*/
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 0d53eba1c383..7d7d86220f01 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -113,7 +113,7 @@ static inline bool free_area_empty(struct free_area *area, int migratetype)
struct pglist_data;
/*
- * Add a wild amount of padding here to ensure datas fall into separate
+ * Add a wild amount of padding here to ensure data fall into separate
* cachelines. There are very few zone structures in the machine, so space
* consumption is not a concern here.
*/
diff --git a/mm/internal.h b/mm/internal.h
index 54bd0dc2c23c..e64e72782978 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -100,7 +100,7 @@ static inline void set_page_refcounted(struct page *page)
* When kernel touch the user page, the user page may be have been marked
* poison but still mapped in user space, if without this page, the kernel
* can guarantee the data integrity and operation success, the kernel is
- * better to check the posion status and avoid touching it, be good not to
+ * better to check the poison status and avoid touching it, be good not to
* panic, coredump for process fatal signal is a sample case matching this
* scenario. Or if kernel can't guarantee the data integrity, it's better
* not to call this function, let kernel touch the poison page and get to
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 85ad98c00fd9..8e06c6998fb9 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1134,7 +1134,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
* could potentially call huge_pmd_unshare. Because of
* this, take semaphore in write mode here and set
* TTU_RMAP_LOCKED to indicate we have taken the lock
- * at this higer level.
+ * at this higher level.
*/
mapping = hugetlb_page_mapping_lock_write(hpage);
if (mapping) {
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 70620d0dd923..02f8073a364f 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -913,7 +913,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, struct zone *z
/*
* {on,off}lining is constrained to full memory sections (or more
- * precisly to memory blocks from the user space POV).
+ * precisely to memory blocks from the user space POV).
* memmap_on_memory is an exception because it reserves initial part
* of the physical memory space for vmemmaps. That space is pageblock
* aligned.
@@ -1703,7 +1703,7 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages)
/*
* {on,off}lining is constrained to full memory sections (or more
- * precisly to memory blocks from the user space POV).
+ * precisely to memory blocks from the user space POV).
* memmap_on_memory is an exception because it reserves initial part
* of the physical memory space for vmemmaps. That space is pageblock
* aligned.
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index aaa1655cf682..a16f8f3f9e80 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3103,7 +3103,7 @@ static void __drain_all_pages(struct zone *zone, bool force_all_cpus)
int cpu;
/*
- * Allocate in the BSS so we wont require allocation in
+ * Allocate in the BSS so we won't require allocation in
* direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
*/
static cpumask_t cpus_with_pcps;
diff --git a/mm/swap.c b/mm/swap.c
index dfb48cf9c2c9..0e5e4fccadc3 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -554,7 +554,7 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec)
} else {
/*
* The page's writeback ends up during pagevec
- * We moves tha page into tail of inactive.
+ * We move the page into tail of inactive.
*/
add_page_to_lru_list_tail(page, lruvec);
__count_vm_events(PGROTATED, nr_pages);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 149e77454e3c..88a6f01cfb88 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2972,7 +2972,7 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
return 0;
}
- /* swap partition endianess hack... */
+ /* swap partition endianness hack... */
if (swab32(swap_header->info.version) == 1) {
swab32s(&swap_header->info.version);
swab32s(&swap_header->info.last_page);
--
2.25.1
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2021-05-19 6:59 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-05-19 6:58 [PATCH v2 0/1] mm: clear spelling mistakes Zhen Lei
2021-05-19 6:58 ` [PATCH v2 1/1] " Zhen Lei
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox