ÔÚ 2020/9/24 ÉÏÎç11:28, Alex Shi дµÀ: > --- a/include/linux/mmzone.h > +++ b/include/linux/mmzone.h > @@ -273,6 +273,8 @@ enum lruvec_flags { > }; > > struct lruvec { > + /* per lruvec lru_lock for memcg */ > + spinlock_t lru_lock; > struct list_head lists[NR_LRU_LISTS]; > /* > * These track the cost of reclaiming one LRU - file or anon - Hi All, Intel Rong Chen, LKP, report a big regression on this patch, about 12 ~ 32% performance drop on fio.read_iops and case-lru-file-mmap-read case on wide Intel machine with attached kernel config. Hugh Dickins pointed it's a false sharing issue on the lru_lock. And that could be fixed by move the lru_lock out of busy lists[] cacheline, like the following patch: diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index a75e6d0effcb..58b21bffef95 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -272,9 +272,9 @@ enum lruvec_flags { }; struct lruvec { + struct list_head lists[NR_LRU_LISTS]; /* per lruvec lru_lock for memcg */ spinlock_t lru_lock; - struct list_head lists[NR_LRU_LISTS]; /* * These track the cost of reclaiming one LRU - file or anon - * over the other. As the observed cost of reclaiming one LRU Although the problem fixed£¬ But I still no idea of the reasons and the gut problem. Any comments for this? Thanks Alex