* [RESEND PATCH 2/3] mm/swapfile: use list_{prev,next}_entry() instead of open-coding
2020-04-11 10:11 [RESEND PATCH 1/3] mm: Replace zero-length array with flexible-array member qiwuchen55
@ 2020-04-11 10:11 ` qiwuchen55
2020-04-11 10:11 ` [RESEND PATCH 3/3] mm/vmscan: make several optimizations for isolate_lru_pages() qiwuchen55
2020-04-11 13:54 ` [RESEND PATCH 1/3] mm: Replace zero-length array with flexible-array member Wei Yang
2 siblings, 0 replies; 7+ messages in thread
From: qiwuchen55 @ 2020-04-11 10:11 UTC (permalink / raw)
To: akpm, willy, david, richard.weiyang, mhocko, pankaj.gupta.linux,
yang.shi, cai, bhe
Cc: linux-mm, chenqiwu
From: chenqiwu <chenqiwu@xiaomi.com>
Use list_{prev,next}_entry() instead of list_entry() for better
code readability.
Signed-off-by: chenqiwu <chenqiwu@xiaomi.com>
---
mm/swapfile.c | 16 ++++++----------
1 file changed, 6 insertions(+), 10 deletions(-)
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 5871a2a..8d8dc67 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -3654,7 +3654,7 @@ static bool swap_count_continued(struct swap_info_struct *si,
spin_lock(&si->cont_lock);
offset &= ~PAGE_MASK;
- page = list_entry(head->lru.next, struct page, lru);
+ page = list_next_entry(head, lru);
map = kmap_atomic(page) + offset;
if (count == SWAP_MAP_MAX) /* initial increment from swap_map */
@@ -3666,13 +3666,13 @@ static bool swap_count_continued(struct swap_info_struct *si,
*/
while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
kunmap_atomic(map);
- page = list_entry(page->lru.next, struct page, lru);
+ page = list_next_entry(page, lru);
BUG_ON(page == head);
map = kmap_atomic(page) + offset;
}
if (*map == SWAP_CONT_MAX) {
kunmap_atomic(map);
- page = list_entry(page->lru.next, struct page, lru);
+ page = list_next_entry(page, lru);
if (page == head) {
ret = false; /* add count continuation */
goto out;
@@ -3682,12 +3682,10 @@ static bool swap_count_continued(struct swap_info_struct *si,
}
*map += 1;
kunmap_atomic(map);
- page = list_entry(page->lru.prev, struct page, lru);
- while (page != head) {
+ while ((page = list_prev_entry(page, lru)) != head) {
map = kmap_atomic(page) + offset;
*map = COUNT_CONTINUED;
kunmap_atomic(map);
- page = list_entry(page->lru.prev, struct page, lru);
}
ret = true; /* incremented */
@@ -3698,7 +3696,7 @@ static bool swap_count_continued(struct swap_info_struct *si,
BUG_ON(count != COUNT_CONTINUED);
while (*map == COUNT_CONTINUED) {
kunmap_atomic(map);
- page = list_entry(page->lru.next, struct page, lru);
+ page = list_next_entry(page, lru);
BUG_ON(page == head);
map = kmap_atomic(page) + offset;
}
@@ -3707,13 +3705,11 @@ static bool swap_count_continued(struct swap_info_struct *si,
if (*map == 0)
count = 0;
kunmap_atomic(map);
- page = list_entry(page->lru.prev, struct page, lru);
- while (page != head) {
+ while ((page = list_prev_entry(page, lru)) != head) {
map = kmap_atomic(page) + offset;
*map = SWAP_CONT_MAX | count;
count = COUNT_CONTINUED;
kunmap_atomic(map);
- page = list_entry(page->lru.prev, struct page, lru);
}
ret = count == COUNT_CONTINUED;
}
--
1.9.1
^ permalink raw reply [flat|nested] 7+ messages in thread* [RESEND PATCH 3/3] mm/vmscan: make several optimizations for isolate_lru_pages()
2020-04-11 10:11 [RESEND PATCH 1/3] mm: Replace zero-length array with flexible-array member qiwuchen55
2020-04-11 10:11 ` [RESEND PATCH 2/3] mm/swapfile: use list_{prev,next}_entry() instead of open-coding qiwuchen55
@ 2020-04-11 10:11 ` qiwuchen55
2020-04-11 16:10 ` Matthew Wilcox
2020-04-11 13:54 ` [RESEND PATCH 1/3] mm: Replace zero-length array with flexible-array member Wei Yang
2 siblings, 1 reply; 7+ messages in thread
From: qiwuchen55 @ 2020-04-11 10:11 UTC (permalink / raw)
To: akpm, willy, david, richard.weiyang, mhocko, pankaj.gupta.linux,
yang.shi, cai, bhe
Cc: linux-mm, chenqiwu
From: chenqiwu <chenqiwu@xiaomi.com>
1) Simplify the code of initializing some variables.
1) Constify the LRU isolation mode.
2) Define a page_zoneidx variable to reduce the redundant code
of page_zonenum().
Signed-off-by: chenqiwu <chenqiwu@xiaomi.com>
---
mm/vmscan.c | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b06868f..2f65a91 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1638,16 +1638,15 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
struct list_head *src = &lruvec->lists[lru];
unsigned long nr_taken = 0;
unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 };
- unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
+ unsigned long nr_skipped[MAX_NR_ZONES] = { 0 };
unsigned long skipped = 0;
- unsigned long scan, total_scan, nr_pages;
+ unsigned long scan = 0, total_scan = 0, nr_pages;
LIST_HEAD(pages_skipped);
- isolate_mode_t mode = (sc->may_unmap ? 0 : ISOLATE_UNMAPPED);
+ const isolate_mode_t mode = (sc->may_unmap ? 0 : ISOLATE_UNMAPPED);
- total_scan = 0;
- scan = 0;
while (scan < nr_to_scan && !list_empty(src)) {
struct page *page;
+ enum zone_type page_zoneidx;
page = lru_to_page(src);
prefetchw_prev_lru_page(page, src, flags);
@@ -1657,9 +1656,10 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
nr_pages = compound_nr(page);
total_scan += nr_pages;
- if (page_zonenum(page) > sc->reclaim_idx) {
+ page_zoneidx = page_zonenum(page);
+ if (page_zoneidx > sc->reclaim_idx) {
list_move(&page->lru, &pages_skipped);
- nr_skipped[page_zonenum(page)] += nr_pages;
+ nr_skipped[page_zoneidx] += nr_pages;
continue;
}
@@ -1677,7 +1677,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
switch (__isolate_lru_page(page, mode)) {
case 0:
nr_taken += nr_pages;
- nr_zone_taken[page_zonenum(page)] += nr_pages;
+ nr_zone_taken[page_zoneidx] += nr_pages;
list_move(&page->lru, dst);
break;
--
1.9.1
^ permalink raw reply [flat|nested] 7+ messages in thread* Re: [RESEND PATCH 3/3] mm/vmscan: make several optimizations for isolate_lru_pages()
2020-04-11 10:11 ` [RESEND PATCH 3/3] mm/vmscan: make several optimizations for isolate_lru_pages() qiwuchen55
@ 2020-04-11 16:10 ` Matthew Wilcox
2020-04-12 7:35 ` chenqiwu
0 siblings, 1 reply; 7+ messages in thread
From: Matthew Wilcox @ 2020-04-11 16:10 UTC (permalink / raw)
To: qiwuchen55
Cc: akpm, david, richard.weiyang, mhocko, pankaj.gupta.linux,
yang.shi, cai, bhe, linux-mm, chenqiwu
On Sat, Apr 11, 2020 at 06:11:56PM +0800, qiwuchen55@gmail.com wrote:
> 1) Simplify the code of initializing some variables.
> - unsigned long scan, total_scan, nr_pages;
> + unsigned long scan = 0, total_scan = 0, nr_pages;
>
> - total_scan = 0;
> - scan = 0;
I do not find this to be a simplification.
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [RESEND PATCH 3/3] mm/vmscan: make several optimizations for isolate_lru_pages()
2020-04-11 16:10 ` Matthew Wilcox
@ 2020-04-12 7:35 ` chenqiwu
2020-04-12 10:18 ` Matthew Wilcox
0 siblings, 1 reply; 7+ messages in thread
From: chenqiwu @ 2020-04-12 7:35 UTC (permalink / raw)
To: Matthew Wilcox
Cc: akpm, david, richard.weiyang, mhocko, pankaj.gupta.linux,
yang.shi, cai, bhe, linux-mm, chenqiwu
On Sat, Apr 11, 2020 at 09:10:21AM -0700, Matthew Wilcox wrote:
> On Sat, Apr 11, 2020 at 06:11:56PM +0800, qiwuchen55@gmail.com wrote:
> > 1) Simplify the code of initializing some variables.
> > - unsigned long scan, total_scan, nr_pages;
> > + unsigned long scan = 0, total_scan = 0, nr_pages;
> >
> > - total_scan = 0;
> > - scan = 0;
>
> I do not find this to be a simplification.
>
Hi willy,
This slightly simplify the code by definition and initialization
meanwhile instead of separating them into the two steps.
This can save two lines of code.
Qiwu
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [RESEND PATCH 3/3] mm/vmscan: make several optimizations for isolate_lru_pages()
2020-04-12 7:35 ` chenqiwu
@ 2020-04-12 10:18 ` Matthew Wilcox
0 siblings, 0 replies; 7+ messages in thread
From: Matthew Wilcox @ 2020-04-12 10:18 UTC (permalink / raw)
To: chenqiwu
Cc: akpm, david, richard.weiyang, mhocko, pankaj.gupta.linux,
yang.shi, cai, bhe, linux-mm, chenqiwu
On Sun, Apr 12, 2020 at 03:35:16PM +0800, chenqiwu wrote:
> On Sat, Apr 11, 2020 at 09:10:21AM -0700, Matthew Wilcox wrote:
> > On Sat, Apr 11, 2020 at 06:11:56PM +0800, qiwuchen55@gmail.com wrote:
> > > 1) Simplify the code of initializing some variables.
> > > - unsigned long scan, total_scan, nr_pages;
> > > + unsigned long scan = 0, total_scan = 0, nr_pages;
> > >
> > > - total_scan = 0;
> > > - scan = 0;
> >
> > I do not find this to be a simplification.
> >
> Hi willy,
> This slightly simplify the code by definition and initialization
> meanwhile instead of separating them into the two steps.
> This can save two lines of code.
And it buries the initialisation on a more complicated line. Number
of lines is not the only metric.
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [RESEND PATCH 1/3] mm: Replace zero-length array with flexible-array member
2020-04-11 10:11 [RESEND PATCH 1/3] mm: Replace zero-length array with flexible-array member qiwuchen55
2020-04-11 10:11 ` [RESEND PATCH 2/3] mm/swapfile: use list_{prev,next}_entry() instead of open-coding qiwuchen55
2020-04-11 10:11 ` [RESEND PATCH 3/3] mm/vmscan: make several optimizations for isolate_lru_pages() qiwuchen55
@ 2020-04-11 13:54 ` Wei Yang
2 siblings, 0 replies; 7+ messages in thread
From: Wei Yang @ 2020-04-11 13:54 UTC (permalink / raw)
To: qiwuchen55
Cc: akpm, willy, david, richard.weiyang, mhocko, pankaj.gupta.linux,
yang.shi, cai, bhe, linux-mm, chenqiwu
On Sat, Apr 11, 2020 at 06:11:54PM +0800, qiwuchen55@gmail.com wrote:
>From: chenqiwu <chenqiwu@xiaomi.com>
>
>The current codebase makes use of the zero-length array language
>extension to the C90 standard, but the preferred mechanism to declare
>variable-length types such as these ones is a flexible array member[1][2],
>introduced in C99:
>
>struct foo {
> int stuff;
> struct boo array[];
>};
>
>By making use of the mechanism above, we will get a compiler warning
>in case the flexible array does not occur last in the structure, which
>will help us prevent some kind of undefined behavior bugs from being
>inadvertently introduced[3] to the codebase from now on.
>
>Also, notice that, dynamic memory allocations won't be affected by
>this change:
>
>"Flexible array members have incomplete type, and so the sizeof operator
>may not be applied. As a quirk of the original implementation of
>zero-length arrays, sizeof evaluates to zero."[1]
>
>This issue was found with the help of Coccinelle.
>
>[1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html
>[2] https://github.com/KSPP/linux/issues/21
>[3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour")
>
>Signed-off-by: chenqiwu <chenqiwu@xiaomi.com>
The change looks good to me.
Reviewed-by: Wei Yang <richard.weiyang@gmail.com>
>---
> include/linux/mm.h | 2 +-
> include/linux/mmzone.h | 2 +-
> include/linux/swap.h | 2 +-
> 3 files changed, 3 insertions(+), 3 deletions(-)
>
>diff --git a/include/linux/mm.h b/include/linux/mm.h
>index 5a32342..9831bb5 100644
>--- a/include/linux/mm.h
>+++ b/include/linux/mm.h
>@@ -1718,7 +1718,7 @@ struct frame_vector {
> unsigned int nr_frames; /* Number of frames stored in ptrs array */
> bool got_ref; /* Did we pin pages by getting page ref? */
> bool is_pfns; /* Does array contain pages or pfns? */
>- void *ptrs[0]; /* Array of pinned pfns / pages. Use
>+ void *ptrs[]; /* Array of pinned pfns / pages. Use
> * pfns_vector_pages() or pfns_vector_pfns()
> * for access */
> };
>diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
>index 1b9de7d..f6b9fa9 100644
>--- a/include/linux/mmzone.h
>+++ b/include/linux/mmzone.h
>@@ -1147,7 +1147,7 @@ struct mem_section_usage {
> DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION);
> #endif
> /* See declaration of similar field in struct zone */
>- unsigned long pageblock_flags[0];
>+ unsigned long pageblock_flags[];
> };
>
> void subsection_map_init(unsigned long pfn, unsigned long nr_pages);
>diff --git a/include/linux/swap.h b/include/linux/swap.h
>index b835d8d..e1bbf7a 100644
>--- a/include/linux/swap.h
>+++ b/include/linux/swap.h
>@@ -275,7 +275,7 @@ struct swap_info_struct {
> */
> struct work_struct discard_work; /* discard worker */
> struct swap_cluster_list discard_clusters; /* discard clusters list */
>- struct plist_node avail_lists[0]; /*
>+ struct plist_node avail_lists[]; /*
> * entries in swap_avail_heads, one
> * entry per node.
> * Must be last as the number of the
>--
>1.9.1
--
Wei Yang
Help you, Help me
^ permalink raw reply [flat|nested] 7+ messages in thread