From: kbuild test robot <fengguang.wu@intel.com>
To: Pavel Tatashin <pasha.tatashin@oracle.com>
Cc: kbuild-all@01.org, Ingo Molnar <mingo@kernel.org>,
Andrew Morton <akpm@linux-foundation.org>,
Linux Memory Management List <linux-mm@kvack.org>
Subject: [linux-next:master 3210/5518] mm/vmscan.c:1293:1: warning: the frame size of 10120 bytes is larger than 8192 bytes
Date: Wed, 7 Mar 2018 07:52:57 +0800 [thread overview]
Message-ID: <201803070752.xTPT7kK0%fengguang.wu@intel.com> (raw)
[-- Attachment #1: Type: text/plain, Size: 35178 bytes --]
tree: https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git master
head: 9c142d8a6556f069be6278ccab701039da81ad6f
commit: d126e9de48465402414c4be2d8cb765ad5d4d9d2 [3210/5518] mm: uninitialized struct page poisoning sanity checking
config: x86_64-randconfig-v0-03041033 (attached as .config)
compiler: gcc-7 (Debian 7.3.0-1) 7.3.0
reproduce:
git checkout d126e9de48465402414c4be2d8cb765ad5d4d9d2
# save the attached .config to linux build tree
make ARCH=x86_64
All warnings (new ones prefixed by >>):
mm/vmscan.c: In function 'shrink_page_list':
>> mm/vmscan.c:1293:1: warning: the frame size of 10120 bytes is larger than 8192 bytes [-Wframe-larger-than=]
}
^
vim +1293 mm/vmscan.c
3c710c1ad1 Michal Hocko 2017-02-22 878
e286781d5f Nick Piggin 2008-07-25 879 /*
1742f19fa9 Andrew Morton 2006-03-22 880 * shrink_page_list() returns the number of reclaimed pages
^1da177e4c Linus Torvalds 2005-04-16 881 */
1742f19fa9 Andrew Morton 2006-03-22 882 static unsigned long shrink_page_list(struct list_head *page_list,
599d0c954f Mel Gorman 2016-07-28 883 struct pglist_data *pgdat,
f84f6e2b08 Mel Gorman 2011-10-31 884 struct scan_control *sc,
02c6de8d75 Minchan Kim 2012-10-08 885 enum ttu_flags ttu_flags,
3c710c1ad1 Michal Hocko 2017-02-22 886 struct reclaim_stat *stat,
02c6de8d75 Minchan Kim 2012-10-08 887 bool force_reclaim)
^1da177e4c Linus Torvalds 2005-04-16 888 {
^1da177e4c Linus Torvalds 2005-04-16 889 LIST_HEAD(ret_pages);
abe4c3b50c Mel Gorman 2010-08-09 890 LIST_HEAD(free_pages);
^1da177e4c Linus Torvalds 2005-04-16 891 int pgactivate = 0;
3c710c1ad1 Michal Hocko 2017-02-22 892 unsigned nr_unqueued_dirty = 0;
3c710c1ad1 Michal Hocko 2017-02-22 893 unsigned nr_dirty = 0;
3c710c1ad1 Michal Hocko 2017-02-22 894 unsigned nr_congested = 0;
3c710c1ad1 Michal Hocko 2017-02-22 895 unsigned nr_reclaimed = 0;
3c710c1ad1 Michal Hocko 2017-02-22 896 unsigned nr_writeback = 0;
3c710c1ad1 Michal Hocko 2017-02-22 897 unsigned nr_immediate = 0;
5bccd16657 Michal Hocko 2017-02-22 898 unsigned nr_ref_keep = 0;
5bccd16657 Michal Hocko 2017-02-22 899 unsigned nr_unmap_fail = 0;
^1da177e4c Linus Torvalds 2005-04-16 900
^1da177e4c Linus Torvalds 2005-04-16 901 cond_resched();
^1da177e4c Linus Torvalds 2005-04-16 902
^1da177e4c Linus Torvalds 2005-04-16 903 while (!list_empty(page_list)) {
^1da177e4c Linus Torvalds 2005-04-16 904 struct address_space *mapping;
^1da177e4c Linus Torvalds 2005-04-16 905 struct page *page;
^1da177e4c Linus Torvalds 2005-04-16 906 int may_enter_fs;
02c6de8d75 Minchan Kim 2012-10-08 907 enum page_references references = PAGEREF_RECLAIM_CLEAN;
e2be15f6c3 Mel Gorman 2013-07-03 908 bool dirty, writeback;
^1da177e4c Linus Torvalds 2005-04-16 909
^1da177e4c Linus Torvalds 2005-04-16 910 cond_resched();
^1da177e4c Linus Torvalds 2005-04-16 911
^1da177e4c Linus Torvalds 2005-04-16 912 page = lru_to_page(page_list);
^1da177e4c Linus Torvalds 2005-04-16 913 list_del(&page->lru);
^1da177e4c Linus Torvalds 2005-04-16 914
529ae9aaa0 Nick Piggin 2008-08-02 915 if (!trylock_page(page))
^1da177e4c Linus Torvalds 2005-04-16 916 goto keep;
^1da177e4c Linus Torvalds 2005-04-16 917
309381feae Sasha Levin 2014-01-23 918 VM_BUG_ON_PAGE(PageActive(page), page);
^1da177e4c Linus Torvalds 2005-04-16 919
^1da177e4c Linus Torvalds 2005-04-16 920 sc->nr_scanned++;
80e4342601 Christoph Lameter 2006-02-11 921
39b5f29ac1 Hugh Dickins 2012-10-08 922 if (unlikely(!page_evictable(page)))
ad6b67041a Minchan Kim 2017-05-03 923 goto activate_locked;
894bc31041 Lee Schermerhorn 2008-10-18 924
a6dc60f897 Johannes Weiner 2009-03-31 925 if (!sc->may_unmap && page_mapped(page))
80e4342601 Christoph Lameter 2006-02-11 926 goto keep_locked;
80e4342601 Christoph Lameter 2006-02-11 927
^1da177e4c Linus Torvalds 2005-04-16 928 /* Double the slab pressure for mapped and swapcache pages */
802a3a92ad Shaohua Li 2017-05-03 929 if ((page_mapped(page) || PageSwapCache(page)) &&
802a3a92ad Shaohua Li 2017-05-03 930 !(PageAnon(page) && !PageSwapBacked(page)))
^1da177e4c Linus Torvalds 2005-04-16 931 sc->nr_scanned++;
^1da177e4c Linus Torvalds 2005-04-16 932
c661b078fd Andy Whitcroft 2007-08-22 933 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
c661b078fd Andy Whitcroft 2007-08-22 934 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
c661b078fd Andy Whitcroft 2007-08-22 935
e62e384e9d Michal Hocko 2012-07-31 936 /*
e2be15f6c3 Mel Gorman 2013-07-03 937 * The number of dirty pages determines if a zone is marked
e2be15f6c3 Mel Gorman 2013-07-03 938 * reclaim_congested which affects wait_iff_congested. kswapd
e2be15f6c3 Mel Gorman 2013-07-03 939 * will stall and start writing pages if the tail of the LRU
e2be15f6c3 Mel Gorman 2013-07-03 940 * is all dirty unqueued pages.
e2be15f6c3 Mel Gorman 2013-07-03 941 */
e2be15f6c3 Mel Gorman 2013-07-03 942 page_check_dirty_writeback(page, &dirty, &writeback);
e2be15f6c3 Mel Gorman 2013-07-03 943 if (dirty || writeback)
e2be15f6c3 Mel Gorman 2013-07-03 944 nr_dirty++;
e2be15f6c3 Mel Gorman 2013-07-03 945
e2be15f6c3 Mel Gorman 2013-07-03 946 if (dirty && !writeback)
e2be15f6c3 Mel Gorman 2013-07-03 947 nr_unqueued_dirty++;
e2be15f6c3 Mel Gorman 2013-07-03 948
d04e8acd03 Mel Gorman 2013-07-03 949 /*
d04e8acd03 Mel Gorman 2013-07-03 950 * Treat this page as congested if the underlying BDI is or if
d04e8acd03 Mel Gorman 2013-07-03 951 * pages are cycling through the LRU so quickly that the
d04e8acd03 Mel Gorman 2013-07-03 952 * pages marked for immediate reclaim are making it to the
d04e8acd03 Mel Gorman 2013-07-03 953 * end of the LRU a second time.
d04e8acd03 Mel Gorman 2013-07-03 954 */
e2be15f6c3 Mel Gorman 2013-07-03 955 mapping = page_mapping(page);
1da58ee2a0 Jamie Liu 2014-12-10 956 if (((dirty || writeback) && mapping &&
703c270887 Tejun Heo 2015-05-22 957 inode_write_congested(mapping->host)) ||
d04e8acd03 Mel Gorman 2013-07-03 958 (writeback && PageReclaim(page)))
e2be15f6c3 Mel Gorman 2013-07-03 959 nr_congested++;
e2be15f6c3 Mel Gorman 2013-07-03 960
e2be15f6c3 Mel Gorman 2013-07-03 961 /*
283aba9f9e Mel Gorman 2013-07-03 962 * If a page at the tail of the LRU is under writeback, there
283aba9f9e Mel Gorman 2013-07-03 963 * are three cases to consider.
283aba9f9e Mel Gorman 2013-07-03 964 *
283aba9f9e Mel Gorman 2013-07-03 965 * 1) If reclaim is encountering an excessive number of pages
283aba9f9e Mel Gorman 2013-07-03 966 * under writeback and this page is both under writeback and
283aba9f9e Mel Gorman 2013-07-03 967 * PageReclaim then it indicates that pages are being queued
283aba9f9e Mel Gorman 2013-07-03 968 * for IO but are being recycled through the LRU before the
283aba9f9e Mel Gorman 2013-07-03 969 * IO can complete. Waiting on the page itself risks an
283aba9f9e Mel Gorman 2013-07-03 970 * indefinite stall if it is impossible to writeback the
283aba9f9e Mel Gorman 2013-07-03 971 * page due to IO error or disconnected storage so instead
b1a6f21e3b Mel Gorman 2013-07-03 972 * note that the LRU is being scanned too quickly and the
b1a6f21e3b Mel Gorman 2013-07-03 973 * caller can stall after page list has been processed.
283aba9f9e Mel Gorman 2013-07-03 974 *
97c9341f72 Tejun Heo 2015-05-22 975 * 2) Global or new memcg reclaim encounters a page that is
ecf5fc6e96 Michal Hocko 2015-08-04 976 * not marked for immediate reclaim, or the caller does not
ecf5fc6e96 Michal Hocko 2015-08-04 977 * have __GFP_FS (or __GFP_IO if it's simply going to swap,
ecf5fc6e96 Michal Hocko 2015-08-04 978 * not to fs). In this case mark the page for immediate
97c9341f72 Tejun Heo 2015-05-22 979 * reclaim and continue scanning.
283aba9f9e Mel Gorman 2013-07-03 980 *
ecf5fc6e96 Michal Hocko 2015-08-04 981 * Require may_enter_fs because we would wait on fs, which
ecf5fc6e96 Michal Hocko 2015-08-04 982 * may not have submitted IO yet. And the loop driver might
283aba9f9e Mel Gorman 2013-07-03 983 * enter reclaim, and deadlock if it waits on a page for
283aba9f9e Mel Gorman 2013-07-03 984 * which it is needed to do the write (loop masks off
283aba9f9e Mel Gorman 2013-07-03 985 * __GFP_IO|__GFP_FS for this reason); but more thought
283aba9f9e Mel Gorman 2013-07-03 986 * would probably show more reasons.
283aba9f9e Mel Gorman 2013-07-03 987 *
7fadc82022 Hugh Dickins 2015-09-08 988 * 3) Legacy memcg encounters a page that is already marked
283aba9f9e Mel Gorman 2013-07-03 989 * PageReclaim. memcg does not have any dirty pages
283aba9f9e Mel Gorman 2013-07-03 990 * throttling so we could easily OOM just because too many
283aba9f9e Mel Gorman 2013-07-03 991 * pages are in writeback and there is nothing else to
283aba9f9e Mel Gorman 2013-07-03 992 * reclaim. Wait for the writeback to complete.
c55e8d035b Johannes Weiner 2017-02-24 993 *
c55e8d035b Johannes Weiner 2017-02-24 994 * In cases 1) and 2) we activate the pages to get them out of
c55e8d035b Johannes Weiner 2017-02-24 995 * the way while we continue scanning for clean pages on the
c55e8d035b Johannes Weiner 2017-02-24 996 * inactive list and refilling from the active list. The
c55e8d035b Johannes Weiner 2017-02-24 997 * observation here is that waiting for disk writes is more
c55e8d035b Johannes Weiner 2017-02-24 998 * expensive than potentially causing reloads down the line.
c55e8d035b Johannes Weiner 2017-02-24 999 * Since they're marked for immediate reclaim, they won't put
c55e8d035b Johannes Weiner 2017-02-24 1000 * memory pressure on the cache working set any longer than it
c55e8d035b Johannes Weiner 2017-02-24 1001 * takes to write them to disk.
e62e384e9d Michal Hocko 2012-07-31 1002 */
283aba9f9e Mel Gorman 2013-07-03 1003 if (PageWriteback(page)) {
283aba9f9e Mel Gorman 2013-07-03 1004 /* Case 1 above */
283aba9f9e Mel Gorman 2013-07-03 1005 if (current_is_kswapd() &&
283aba9f9e Mel Gorman 2013-07-03 1006 PageReclaim(page) &&
599d0c954f Mel Gorman 2016-07-28 1007 test_bit(PGDAT_WRITEBACK, &pgdat->flags)) {
b1a6f21e3b Mel Gorman 2013-07-03 1008 nr_immediate++;
c55e8d035b Johannes Weiner 2017-02-24 1009 goto activate_locked;
283aba9f9e Mel Gorman 2013-07-03 1010
283aba9f9e Mel Gorman 2013-07-03 1011 /* Case 2 above */
97c9341f72 Tejun Heo 2015-05-22 1012 } else if (sane_reclaim(sc) ||
ecf5fc6e96 Michal Hocko 2015-08-04 1013 !PageReclaim(page) || !may_enter_fs) {
c3b94f44fc Hugh Dickins 2012-07-31 1014 /*
c3b94f44fc Hugh Dickins 2012-07-31 1015 * This is slightly racy - end_page_writeback()
c3b94f44fc Hugh Dickins 2012-07-31 1016 * might have just cleared PageReclaim, then
c3b94f44fc Hugh Dickins 2012-07-31 1017 * setting PageReclaim here end up interpreted
c3b94f44fc Hugh Dickins 2012-07-31 1018 * as PageReadahead - but that does not matter
c3b94f44fc Hugh Dickins 2012-07-31 1019 * enough to care. What we do want is for this
c3b94f44fc Hugh Dickins 2012-07-31 1020 * page to have PageReclaim set next time memcg
c3b94f44fc Hugh Dickins 2012-07-31 1021 * reclaim reaches the tests above, so it will
c3b94f44fc Hugh Dickins 2012-07-31 1022 * then wait_on_page_writeback() to avoid OOM;
c3b94f44fc Hugh Dickins 2012-07-31 1023 * and it's also appropriate in global reclaim.
c3b94f44fc Hugh Dickins 2012-07-31 1024 */
c3b94f44fc Hugh Dickins 2012-07-31 1025 SetPageReclaim(page);
92df3a723f Mel Gorman 2011-10-31 1026 nr_writeback++;
c55e8d035b Johannes Weiner 2017-02-24 1027 goto activate_locked;
283aba9f9e Mel Gorman 2013-07-03 1028
283aba9f9e Mel Gorman 2013-07-03 1029 /* Case 3 above */
283aba9f9e Mel Gorman 2013-07-03 1030 } else {
7fadc82022 Hugh Dickins 2015-09-08 1031 unlock_page(page);
c3b94f44fc Hugh Dickins 2012-07-31 1032 wait_on_page_writeback(page);
7fadc82022 Hugh Dickins 2015-09-08 1033 /* then go back and try same page again */
7fadc82022 Hugh Dickins 2015-09-08 1034 list_add_tail(&page->lru, page_list);
7fadc82022 Hugh Dickins 2015-09-08 1035 continue;
e62e384e9d Michal Hocko 2012-07-31 1036 }
283aba9f9e Mel Gorman 2013-07-03 1037 }
^1da177e4c Linus Torvalds 2005-04-16 1038
02c6de8d75 Minchan Kim 2012-10-08 1039 if (!force_reclaim)
6a18adb35c Konstantin Khlebnikov 2012-05-29 1040 references = page_check_references(page, sc);
02c6de8d75 Minchan Kim 2012-10-08 1041
dfc8d636cd Johannes Weiner 2010-03-05 1042 switch (references) {
dfc8d636cd Johannes Weiner 2010-03-05 1043 case PAGEREF_ACTIVATE:
^1da177e4c Linus Torvalds 2005-04-16 1044 goto activate_locked;
6457474624 Johannes Weiner 2010-03-05 1045 case PAGEREF_KEEP:
5bccd16657 Michal Hocko 2017-02-22 1046 nr_ref_keep++;
6457474624 Johannes Weiner 2010-03-05 1047 goto keep_locked;
dfc8d636cd Johannes Weiner 2010-03-05 1048 case PAGEREF_RECLAIM:
dfc8d636cd Johannes Weiner 2010-03-05 1049 case PAGEREF_RECLAIM_CLEAN:
dfc8d636cd Johannes Weiner 2010-03-05 1050 ; /* try to reclaim the page below */
dfc8d636cd Johannes Weiner 2010-03-05 1051 }
^1da177e4c Linus Torvalds 2005-04-16 1052
^1da177e4c Linus Torvalds 2005-04-16 1053 /*
^1da177e4c Linus Torvalds 2005-04-16 1054 * Anonymous process memory has backing store?
^1da177e4c Linus Torvalds 2005-04-16 1055 * Try to allocate it some swap space here.
802a3a92ad Shaohua Li 2017-05-03 1056 * Lazyfree page could be freed directly
^1da177e4c Linus Torvalds 2005-04-16 1057 */
bd4c82c22c Huang Ying 2017-09-06 1058 if (PageAnon(page) && PageSwapBacked(page)) {
bd4c82c22c Huang Ying 2017-09-06 1059 if (!PageSwapCache(page)) {
63eb6b93ce Hugh Dickins 2008-11-19 1060 if (!(sc->gfp_mask & __GFP_IO))
63eb6b93ce Hugh Dickins 2008-11-19 1061 goto keep_locked;
747552b1e7 Huang Ying 2017-07-06 1062 if (PageTransHuge(page)) {
b8f593cd08 Huang Ying 2017-07-06 1063 /* cannot split THP, skip it */
747552b1e7 Huang Ying 2017-07-06 1064 if (!can_split_huge_page(page, NULL))
b8f593cd08 Huang Ying 2017-07-06 1065 goto activate_locked;
747552b1e7 Huang Ying 2017-07-06 1066 /*
747552b1e7 Huang Ying 2017-07-06 1067 * Split pages without a PMD map right
747552b1e7 Huang Ying 2017-07-06 1068 * away. Chances are some or all of the
747552b1e7 Huang Ying 2017-07-06 1069 * tail pages can be freed without IO.
747552b1e7 Huang Ying 2017-07-06 1070 */
747552b1e7 Huang Ying 2017-07-06 1071 if (!compound_mapcount(page) &&
bd4c82c22c Huang Ying 2017-09-06 1072 split_huge_page_to_list(page,
bd4c82c22c Huang Ying 2017-09-06 1073 page_list))
747552b1e7 Huang Ying 2017-07-06 1074 goto activate_locked;
747552b1e7 Huang Ying 2017-07-06 1075 }
0f0746589e Minchan Kim 2017-07-06 1076 if (!add_to_swap(page)) {
0f0746589e Minchan Kim 2017-07-06 1077 if (!PageTransHuge(page))
^1da177e4c Linus Torvalds 2005-04-16 1078 goto activate_locked;
bd4c82c22c Huang Ying 2017-09-06 1079 /* Fallback to swap normal pages */
bd4c82c22c Huang Ying 2017-09-06 1080 if (split_huge_page_to_list(page,
bd4c82c22c Huang Ying 2017-09-06 1081 page_list))
0f0746589e Minchan Kim 2017-07-06 1082 goto activate_locked;
fe490cc0fe Huang Ying 2017-09-06 1083 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
fe490cc0fe Huang Ying 2017-09-06 1084 count_vm_event(THP_SWPOUT_FALLBACK);
fe490cc0fe Huang Ying 2017-09-06 1085 #endif
0f0746589e Minchan Kim 2017-07-06 1086 if (!add_to_swap(page))
0f0746589e Minchan Kim 2017-07-06 1087 goto activate_locked;
0f0746589e Minchan Kim 2017-07-06 1088 }
0f0746589e Minchan Kim 2017-07-06 1089
63eb6b93ce Hugh Dickins 2008-11-19 1090 may_enter_fs = 1;
^1da177e4c Linus Torvalds 2005-04-16 1091
e2be15f6c3 Mel Gorman 2013-07-03 1092 /* Adding to swap updated mapping */
^1da177e4c Linus Torvalds 2005-04-16 1093 mapping = page_mapping(page);
bd4c82c22c Huang Ying 2017-09-06 1094 }
7751b2da6b Kirill A. Shutemov 2016-07-26 1095 } else if (unlikely(PageTransHuge(page))) {
7751b2da6b Kirill A. Shutemov 2016-07-26 1096 /* Split file THP */
7751b2da6b Kirill A. Shutemov 2016-07-26 1097 if (split_huge_page_to_list(page, page_list))
7751b2da6b Kirill A. Shutemov 2016-07-26 1098 goto keep_locked;
e2be15f6c3 Mel Gorman 2013-07-03 1099 }
^1da177e4c Linus Torvalds 2005-04-16 1100
^1da177e4c Linus Torvalds 2005-04-16 1101 /*
^1da177e4c Linus Torvalds 2005-04-16 1102 * The page is mapped into the page tables of one or more
^1da177e4c Linus Torvalds 2005-04-16 1103 * processes. Try to unmap it here.
^1da177e4c Linus Torvalds 2005-04-16 1104 */
802a3a92ad Shaohua Li 2017-05-03 1105 if (page_mapped(page)) {
bd4c82c22c Huang Ying 2017-09-06 1106 enum ttu_flags flags = ttu_flags | TTU_BATCH_FLUSH;
bd4c82c22c Huang Ying 2017-09-06 1107
bd4c82c22c Huang Ying 2017-09-06 1108 if (unlikely(PageTransHuge(page)))
bd4c82c22c Huang Ying 2017-09-06 1109 flags |= TTU_SPLIT_HUGE_PMD;
bd4c82c22c Huang Ying 2017-09-06 1110 if (!try_to_unmap(page, flags)) {
5bccd16657 Michal Hocko 2017-02-22 1111 nr_unmap_fail++;
^1da177e4c Linus Torvalds 2005-04-16 1112 goto activate_locked;
^1da177e4c Linus Torvalds 2005-04-16 1113 }
^1da177e4c Linus Torvalds 2005-04-16 1114 }
^1da177e4c Linus Torvalds 2005-04-16 1115
^1da177e4c Linus Torvalds 2005-04-16 1116 if (PageDirty(page)) {
ee72886d8e Mel Gorman 2011-10-31 1117 /*
4eda482350 Johannes Weiner 2017-02-24 1118 * Only kswapd can writeback filesystem pages
4eda482350 Johannes Weiner 2017-02-24 1119 * to avoid risk of stack overflow. But avoid
4eda482350 Johannes Weiner 2017-02-24 1120 * injecting inefficient single-page IO into
4eda482350 Johannes Weiner 2017-02-24 1121 * flusher writeback as much as possible: only
4eda482350 Johannes Weiner 2017-02-24 1122 * write pages when we've encountered many
4eda482350 Johannes Weiner 2017-02-24 1123 * dirty pages, and when we've already scanned
4eda482350 Johannes Weiner 2017-02-24 1124 * the rest of the LRU for clean pages and see
4eda482350 Johannes Weiner 2017-02-24 1125 * the same dirty pages again (PageReclaim).
ee72886d8e Mel Gorman 2011-10-31 1126 */
f84f6e2b08 Mel Gorman 2011-10-31 1127 if (page_is_file_cache(page) &&
4eda482350 Johannes Weiner 2017-02-24 1128 (!current_is_kswapd() || !PageReclaim(page) ||
599d0c954f Mel Gorman 2016-07-28 1129 !test_bit(PGDAT_DIRTY, &pgdat->flags))) {
49ea7eb65e Mel Gorman 2011-10-31 1130 /*
49ea7eb65e Mel Gorman 2011-10-31 1131 * Immediately reclaim when written back.
49ea7eb65e Mel Gorman 2011-10-31 1132 * Similar in principal to deactivate_page()
49ea7eb65e Mel Gorman 2011-10-31 1133 * except we already have the page isolated
49ea7eb65e Mel Gorman 2011-10-31 1134 * and know it's dirty
49ea7eb65e Mel Gorman 2011-10-31 1135 */
c4a25635b6 Mel Gorman 2016-07-28 1136 inc_node_page_state(page, NR_VMSCAN_IMMEDIATE);
49ea7eb65e Mel Gorman 2011-10-31 1137 SetPageReclaim(page);
49ea7eb65e Mel Gorman 2011-10-31 1138
c55e8d035b Johannes Weiner 2017-02-24 1139 goto activate_locked;
ee72886d8e Mel Gorman 2011-10-31 1140 }
ee72886d8e Mel Gorman 2011-10-31 1141
dfc8d636cd Johannes Weiner 2010-03-05 1142 if (references == PAGEREF_RECLAIM_CLEAN)
^1da177e4c Linus Torvalds 2005-04-16 1143 goto keep_locked;
4dd4b92021 Andrew Morton 2008-03-24 1144 if (!may_enter_fs)
^1da177e4c Linus Torvalds 2005-04-16 1145 goto keep_locked;
52a8363eae Christoph Lameter 2006-02-01 1146 if (!sc->may_writepage)
^1da177e4c Linus Torvalds 2005-04-16 1147 goto keep_locked;
^1da177e4c Linus Torvalds 2005-04-16 1148
d950c9477d Mel Gorman 2015-09-04 1149 /*
d950c9477d Mel Gorman 2015-09-04 1150 * Page is dirty. Flush the TLB if a writable entry
d950c9477d Mel Gorman 2015-09-04 1151 * potentially exists to avoid CPU writes after IO
d950c9477d Mel Gorman 2015-09-04 1152 * starts and then write it out here.
d950c9477d Mel Gorman 2015-09-04 1153 */
d950c9477d Mel Gorman 2015-09-04 1154 try_to_unmap_flush_dirty();
7d3579e8e6 KOSAKI Motohiro 2010-10-26 1155 switch (pageout(page, mapping, sc)) {
^1da177e4c Linus Torvalds 2005-04-16 1156 case PAGE_KEEP:
^1da177e4c Linus Torvalds 2005-04-16 1157 goto keep_locked;
^1da177e4c Linus Torvalds 2005-04-16 1158 case PAGE_ACTIVATE:
^1da177e4c Linus Torvalds 2005-04-16 1159 goto activate_locked;
^1da177e4c Linus Torvalds 2005-04-16 1160 case PAGE_SUCCESS:
7d3579e8e6 KOSAKI Motohiro 2010-10-26 1161 if (PageWriteback(page))
41ac1999c3 Mel Gorman 2012-05-29 1162 goto keep;
7d3579e8e6 KOSAKI Motohiro 2010-10-26 1163 if (PageDirty(page))
^1da177e4c Linus Torvalds 2005-04-16 1164 goto keep;
7d3579e8e6 KOSAKI Motohiro 2010-10-26 1165
^1da177e4c Linus Torvalds 2005-04-16 1166 /*
^1da177e4c Linus Torvalds 2005-04-16 1167 * A synchronous write - probably a ramdisk. Go
^1da177e4c Linus Torvalds 2005-04-16 1168 * ahead and try to reclaim the page.
^1da177e4c Linus Torvalds 2005-04-16 1169 */
529ae9aaa0 Nick Piggin 2008-08-02 1170 if (!trylock_page(page))
^1da177e4c Linus Torvalds 2005-04-16 1171 goto keep;
^1da177e4c Linus Torvalds 2005-04-16 1172 if (PageDirty(page) || PageWriteback(page))
^1da177e4c Linus Torvalds 2005-04-16 1173 goto keep_locked;
^1da177e4c Linus Torvalds 2005-04-16 1174 mapping = page_mapping(page);
^1da177e4c Linus Torvalds 2005-04-16 1175 case PAGE_CLEAN:
^1da177e4c Linus Torvalds 2005-04-16 1176 ; /* try to free the page below */
^1da177e4c Linus Torvalds 2005-04-16 1177 }
^1da177e4c Linus Torvalds 2005-04-16 1178 }
^1da177e4c Linus Torvalds 2005-04-16 1179
^1da177e4c Linus Torvalds 2005-04-16 1180 /*
^1da177e4c Linus Torvalds 2005-04-16 1181 * If the page has buffers, try to free the buffer mappings
^1da177e4c Linus Torvalds 2005-04-16 1182 * associated with this page. If we succeed we try to free
^1da177e4c Linus Torvalds 2005-04-16 1183 * the page as well.
^1da177e4c Linus Torvalds 2005-04-16 1184 *
^1da177e4c Linus Torvalds 2005-04-16 1185 * We do this even if the page is PageDirty().
^1da177e4c Linus Torvalds 2005-04-16 1186 * try_to_release_page() does not perform I/O, but it is
^1da177e4c Linus Torvalds 2005-04-16 1187 * possible for a page to have PageDirty set, but it is actually
^1da177e4c Linus Torvalds 2005-04-16 1188 * clean (all its buffers are clean). This happens if the
^1da177e4c Linus Torvalds 2005-04-16 1189 * buffers were written out directly, with submit_bh(). ext3
^1da177e4c Linus Torvalds 2005-04-16 1190 * will do this, as well as the blockdev mapping.
^1da177e4c Linus Torvalds 2005-04-16 1191 * try_to_release_page() will discover that cleanness and will
^1da177e4c Linus Torvalds 2005-04-16 1192 * drop the buffers and mark the page clean - it can be freed.
^1da177e4c Linus Torvalds 2005-04-16 1193 *
^1da177e4c Linus Torvalds 2005-04-16 1194 * Rarely, pages can have buffers and no ->mapping. These are
^1da177e4c Linus Torvalds 2005-04-16 1195 * the pages which were not successfully invalidated in
^1da177e4c Linus Torvalds 2005-04-16 1196 * truncate_complete_page(). We try to drop those buffers here
^1da177e4c Linus Torvalds 2005-04-16 1197 * and if that worked, and the page is no longer mapped into
^1da177e4c Linus Torvalds 2005-04-16 1198 * process address space (page_count == 1) it can be freed.
^1da177e4c Linus Torvalds 2005-04-16 1199 * Otherwise, leave the page on the LRU so it is swappable.
^1da177e4c Linus Torvalds 2005-04-16 1200 */
266cf658ef David Howells 2009-04-03 1201 if (page_has_private(page)) {
^1da177e4c Linus Torvalds 2005-04-16 1202 if (!try_to_release_page(page, sc->gfp_mask))
^1da177e4c Linus Torvalds 2005-04-16 1203 goto activate_locked;
e286781d5f Nick Piggin 2008-07-25 1204 if (!mapping && page_count(page) == 1) {
e286781d5f Nick Piggin 2008-07-25 1205 unlock_page(page);
e286781d5f Nick Piggin 2008-07-25 1206 if (put_page_testzero(page))
^1da177e4c Linus Torvalds 2005-04-16 1207 goto free_it;
e286781d5f Nick Piggin 2008-07-25 1208 else {
e286781d5f Nick Piggin 2008-07-25 1209 /*
e286781d5f Nick Piggin 2008-07-25 1210 * rare race with speculative reference.
e286781d5f Nick Piggin 2008-07-25 1211 * the speculative reference will free
e286781d5f Nick Piggin 2008-07-25 1212 * this page shortly, so we may
e286781d5f Nick Piggin 2008-07-25 1213 * increment nr_reclaimed here (and
e286781d5f Nick Piggin 2008-07-25 1214 * leave it off the LRU).
e286781d5f Nick Piggin 2008-07-25 1215 */
e286781d5f Nick Piggin 2008-07-25 1216 nr_reclaimed++;
e286781d5f Nick Piggin 2008-07-25 1217 continue;
e286781d5f Nick Piggin 2008-07-25 1218 }
e286781d5f Nick Piggin 2008-07-25 1219 }
^1da177e4c Linus Torvalds 2005-04-16 1220 }
^1da177e4c Linus Torvalds 2005-04-16 1221
802a3a92ad Shaohua Li 2017-05-03 1222 if (PageAnon(page) && !PageSwapBacked(page)) {
802a3a92ad Shaohua Li 2017-05-03 1223 /* follow __remove_mapping for reference */
802a3a92ad Shaohua Li 2017-05-03 1224 if (!page_ref_freeze(page, 1))
49d2e9cc45 Christoph Lameter 2006-01-08 1225 goto keep_locked;
802a3a92ad Shaohua Li 2017-05-03 1226 if (PageDirty(page)) {
802a3a92ad Shaohua Li 2017-05-03 1227 page_ref_unfreeze(page, 1);
802a3a92ad Shaohua Li 2017-05-03 1228 goto keep_locked;
802a3a92ad Shaohua Li 2017-05-03 1229 }
^1da177e4c Linus Torvalds 2005-04-16 1230
802a3a92ad Shaohua Li 2017-05-03 1231 count_vm_event(PGLAZYFREED);
2262185c5b Roman Gushchin 2017-07-06 1232 count_memcg_page_event(page, PGLAZYFREED);
802a3a92ad Shaohua Li 2017-05-03 1233 } else if (!mapping || !__remove_mapping(mapping, page, true))
802a3a92ad Shaohua Li 2017-05-03 1234 goto keep_locked;
a978d6f521 Nick Piggin 2008-10-18 1235 /*
a978d6f521 Nick Piggin 2008-10-18 1236 * At this point, we have no other references and there is
a978d6f521 Nick Piggin 2008-10-18 1237 * no way to pick any more up (removed from LRU, removed
a978d6f521 Nick Piggin 2008-10-18 1238 * from pagecache). Can use non-atomic bitops now (and
a978d6f521 Nick Piggin 2008-10-18 1239 * we obviously don't have to worry about waking up a process
a978d6f521 Nick Piggin 2008-10-18 1240 * waiting on the page lock, because there are no references.
a978d6f521 Nick Piggin 2008-10-18 1241 */
48c935ad88 Kirill A. Shutemov 2016-01-15 1242 __ClearPageLocked(page);
e286781d5f Nick Piggin 2008-07-25 1243 free_it:
05ff51376f Andrew Morton 2006-03-22 1244 nr_reclaimed++;
abe4c3b50c Mel Gorman 2010-08-09 1245
abe4c3b50c Mel Gorman 2010-08-09 1246 /*
abe4c3b50c Mel Gorman 2010-08-09 1247 * Is there need to periodically free_page_list? It would
abe4c3b50c Mel Gorman 2010-08-09 1248 * appear not as the counts should be low
abe4c3b50c Mel Gorman 2010-08-09 1249 */
bd4c82c22c Huang Ying 2017-09-06 1250 if (unlikely(PageTransHuge(page))) {
bd4c82c22c Huang Ying 2017-09-06 1251 mem_cgroup_uncharge(page);
bd4c82c22c Huang Ying 2017-09-06 1252 (*get_compound_page_dtor(page))(page);
bd4c82c22c Huang Ying 2017-09-06 1253 } else
abe4c3b50c Mel Gorman 2010-08-09 1254 list_add(&page->lru, &free_pages);
^1da177e4c Linus Torvalds 2005-04-16 1255 continue;
^1da177e4c Linus Torvalds 2005-04-16 1256
^1da177e4c Linus Torvalds 2005-04-16 1257 activate_locked:
68a22394c2 Rik van Riel 2008-10-18 1258 /* Not a candidate for swapping, so reclaim swap space. */
ad6b67041a Minchan Kim 2017-05-03 1259 if (PageSwapCache(page) && (mem_cgroup_swap_full(page) ||
ad6b67041a Minchan Kim 2017-05-03 1260 PageMlocked(page)))
a2c43eed83 Hugh Dickins 2009-01-06 1261 try_to_free_swap(page);
309381feae Sasha Levin 2014-01-23 1262 VM_BUG_ON_PAGE(PageActive(page), page);
ad6b67041a Minchan Kim 2017-05-03 1263 if (!PageMlocked(page)) {
^1da177e4c Linus Torvalds 2005-04-16 1264 SetPageActive(page);
^1da177e4c Linus Torvalds 2005-04-16 1265 pgactivate++;
2262185c5b Roman Gushchin 2017-07-06 1266 count_memcg_page_event(page, PGACTIVATE);
ad6b67041a Minchan Kim 2017-05-03 1267 }
^1da177e4c Linus Torvalds 2005-04-16 1268 keep_locked:
^1da177e4c Linus Torvalds 2005-04-16 1269 unlock_page(page);
^1da177e4c Linus Torvalds 2005-04-16 1270 keep:
^1da177e4c Linus Torvalds 2005-04-16 1271 list_add(&page->lru, &ret_pages);
309381feae Sasha Levin 2014-01-23 1272 VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page);
^1da177e4c Linus Torvalds 2005-04-16 1273 }
abe4c3b50c Mel Gorman 2010-08-09 1274
747db954ca Johannes Weiner 2014-08-08 1275 mem_cgroup_uncharge_list(&free_pages);
72b252aed5 Mel Gorman 2015-09-04 1276 try_to_unmap_flush();
2d4894b5d2 Mel Gorman 2017-11-15 1277 free_unref_page_list(&free_pages);
abe4c3b50c Mel Gorman 2010-08-09 1278
^1da177e4c Linus Torvalds 2005-04-16 1279 list_splice(&ret_pages, page_list);
f8891e5e1f Christoph Lameter 2006-06-30 1280 count_vm_events(PGACTIVATE, pgactivate);
0a31bc97c8 Johannes Weiner 2014-08-08 1281
3c710c1ad1 Michal Hocko 2017-02-22 1282 if (stat) {
3c710c1ad1 Michal Hocko 2017-02-22 1283 stat->nr_dirty = nr_dirty;
3c710c1ad1 Michal Hocko 2017-02-22 1284 stat->nr_congested = nr_congested;
3c710c1ad1 Michal Hocko 2017-02-22 1285 stat->nr_unqueued_dirty = nr_unqueued_dirty;
3c710c1ad1 Michal Hocko 2017-02-22 1286 stat->nr_writeback = nr_writeback;
3c710c1ad1 Michal Hocko 2017-02-22 1287 stat->nr_immediate = nr_immediate;
5bccd16657 Michal Hocko 2017-02-22 1288 stat->nr_activate = pgactivate;
5bccd16657 Michal Hocko 2017-02-22 1289 stat->nr_ref_keep = nr_ref_keep;
5bccd16657 Michal Hocko 2017-02-22 1290 stat->nr_unmap_fail = nr_unmap_fail;
3c710c1ad1 Michal Hocko 2017-02-22 1291 }
05ff51376f Andrew Morton 2006-03-22 1292 return nr_reclaimed;
^1da177e4c Linus Torvalds 2005-04-16 @1293 }
^1da177e4c Linus Torvalds 2005-04-16 1294
:::::: The code at line 1293 was first introduced by commit
:::::: 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 Linux-2.6.12-rc2
:::::: TO: Linus Torvalds <torvalds@ppc970.osdl.org>
:::::: CC: Linus Torvalds <torvalds@ppc970.osdl.org>
---
0-DAY kernel test infrastructure Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all Intel Corporation
[-- Attachment #2: .config.gz --]
[-- Type: application/gzip, Size: 26137 bytes --]
reply other threads:[~2018-03-06 23:53 UTC|newest]
Thread overview: [no followups] expand[flat|nested] mbox.gz Atom feed
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=201803070752.xTPT7kK0%fengguang.wu@intel.com \
--to=fengguang.wu@intel.com \
--cc=akpm@linux-foundation.org \
--cc=kbuild-all@01.org \
--cc=linux-mm@kvack.org \
--cc=mingo@kernel.org \
--cc=pasha.tatashin@oracle.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox