From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from kanga.kvack.org (kanga.kvack.org [205.233.56.17]) by smtp.lore.kernel.org (Postfix) with ESMTP id 9A28BC433F5 for ; Wed, 30 Mar 2022 23:02:27 +0000 (UTC) Received: by kanga.kvack.org (Postfix) id 35A9C6B0073; Wed, 30 Mar 2022 19:02:27 -0400 (EDT) Received: by kanga.kvack.org (Postfix, from userid 40) id 2E2F88D0002; Wed, 30 Mar 2022 19:02:27 -0400 (EDT) X-Delivered-To: int-list-linux-mm@kvack.org Received: by kanga.kvack.org (Postfix, from userid 63042) id 183F18D0001; Wed, 30 Mar 2022 19:02:27 -0400 (EDT) X-Delivered-To: linux-mm@kvack.org Received: from forelay.hostedemail.com (smtprelay0118.hostedemail.com [216.40.44.118]) by kanga.kvack.org (Postfix) with ESMTP id 0C61D6B0073 for ; Wed, 30 Mar 2022 19:02:27 -0400 (EDT) Received: from smtpin30.hostedemail.com (10.5.19.251.rfc1918.com [10.5.19.251]) by forelay01.hostedemail.com (Postfix) with ESMTP id C09F0182EE046 for ; Wed, 30 Mar 2022 23:02:26 +0000 (UTC) X-FDA: 79302578292.30.1EFBE38 Received: from mx0b-00082601.pphosted.com (mx0b-00082601.pphosted.com [67.231.153.30]) by imf09.hostedemail.com (Postfix) with ESMTP id 3E059140016 for ; Wed, 30 Mar 2022 23:02:26 +0000 (UTC) Received: from pps.filterd (m0109331.ppops.net [127.0.0.1]) by mx0a-00082601.pphosted.com (8.16.1.2/8.16.1.2) with ESMTP id 22UHUoVI022848 for ; Wed, 30 Mar 2022 16:02:25 -0700 Received: from mail.thefacebook.com ([163.114.132.120]) by mx0a-00082601.pphosted.com (PPS) with ESMTPS id 3f4unuaa65-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128 verify=NOT) for ; Wed, 30 Mar 2022 16:02:25 -0700 Received: from twshared29473.14.frc2.facebook.com (2620:10d:c085:108::8) by mail.thefacebook.com (2620:10d:c085:21d::5) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2308.21; Wed, 30 Mar 2022 16:02:23 -0700 Received: by devbig932.frc1.facebook.com (Postfix, from userid 4523) id CE4F34764AAC; Wed, 30 Mar 2022 15:56:51 -0700 (PDT) From: Song Liu To: , , , CC: , , , , , , , Song Liu Subject: [PATCH bpf 2/4] vmalloc: introduce HAVE_ARCH_HUGE_VMALLOC_FLAG Date: Wed, 30 Mar 2022 15:56:40 -0700 Message-ID: <20220330225642.1163897-3-song@kernel.org> X-Mailer: git-send-email 2.30.2 In-Reply-To: <20220330225642.1163897-1-song@kernel.org> References: <20220330225642.1163897-1-song@kernel.org> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-FB-Internal: Safe Content-Type: text/plain X-Proofpoint-ORIG-GUID: jM08Mzf0VNFuEJVFOinvMhqkabcaqHA3 X-Proofpoint-GUID: jM08Mzf0VNFuEJVFOinvMhqkabcaqHA3 X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.205,Aquarius:18.0.850,Hydra:6.0.425,FMLib:17.11.64.514 definitions=2022-03-30_06,2022-03-30_01,2022-02-23_01 X-Rspam-User: Authentication-Results: imf09.hostedemail.com; dkim=none; spf=none (imf09.hostedemail.com: domain of "prvs=50888507eb=songliubraving@fb.com" has no SPF policy when checking 67.231.153.30) smtp.mailfrom="prvs=50888507eb=songliubraving@fb.com"; dmarc=fail reason="No valid SPF, No valid DKIM" header.from=kernel.org (policy=none) X-Rspamd-Server: rspam03 X-Rspamd-Queue-Id: 3E059140016 X-Stat-Signature: n7698puo3erc6ug18md396smqcf39xrm X-HE-Tag: 1648681346-655815 X-Bogosity: Ham, tests=bogofilter, spamicity=0.000000, version=1.2.4 Sender: owner-linux-mm@kvack.org Precedence: bulk X-Loop: owner-majordomo@kvack.org List-ID: With HAVE_ARCH_HUGE_VMALLOC_FLAG, users of __vmalloc_node_range() could use VM_TRY_HUGE_VMAP to (try to) allocate PMD_SIZE pages for size >=3D PMD_SIZE cases. Similar to HAVE_ARCH_HUGE_VMALLOC, the use can disable huge page by specifying nohugeiomap in kernel command line. The first user of VM_TRY_HUGE_VMAP will be bpf_prog_pack. Signed-off-by: Song Liu --- arch/Kconfig | 9 +++++++++ include/linux/vmalloc.h | 9 +++++++-- mm/vmalloc.c | 28 +++++++++++++++++++--------- 3 files changed, 35 insertions(+), 11 deletions(-) diff --git a/arch/Kconfig b/arch/Kconfig index 33e06966f248..23b6e92aebaa 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -864,6 +864,15 @@ config HAVE_ARCH_HUGE_VMALLOC depends on HAVE_ARCH_HUGE_VMAP bool =20 +# +# HAVE_ARCH_HUGE_VMALLOC_FLAG allows users of __vmalloc_node_range to al= locate +# huge page without HAVE_ARCH_HUGE_VMALLOC. To allocate huge pages, the = user +# need to call __vmalloc_node_range with VM_TRY_HUGE_VMAP. +# +config HAVE_ARCH_HUGE_VMALLOC_FLAG + depends on HAVE_ARCH_HUGE_VMAP + bool + config ARCH_WANT_HUGE_PMD_SHARE bool =20 diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 3b1df7da402d..a48d0690b66f 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -35,6 +35,11 @@ struct notifier_block; /* in notifier.h */ #define VM_DEFER_KMEMLEAK 0 #endif =20 +#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC_FLAG +#define VM_TRY_HUGE_VMAP 0x00001000 /* Allow for huge pages on HAVE_ARCH= _HUGE_VMALLOC_FLAG arch's */ +#else +#define VM_TRY_HUGE_VMAP 0 +#endif /* bits [20..32] reserved for arch specific ioremap internals */ =20 /* @@ -51,7 +56,7 @@ struct vm_struct { unsigned long size; unsigned long flags; struct page **pages; -#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC +#if (defined(CONFIG_HAVE_ARCH_HUGE_VMALLOC) || defined(CONFIG_HAVE_ARCH_= HUGE_VMALLOC_FLAG)) unsigned int page_order; #endif unsigned int nr_pages; @@ -225,7 +230,7 @@ static inline bool is_vm_area_hugepages(const void *a= ddr) * prevents that. This only indicates the size of the physical page * allocated in the vmalloc layer. */ -#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC +#if (defined(CONFIG_HAVE_ARCH_HUGE_VMALLOC) || defined(CONFIG_HAVE_ARCH_= HUGE_VMALLOC_FLAG)) return find_vm_area(addr)->page_order > 0; #else return false; diff --git a/mm/vmalloc.c b/mm/vmalloc.c index e163372d3967..179200bce285 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -46,7 +46,7 @@ #include "internal.h" #include "pgalloc-track.h" =20 -#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP +#if (defined(CONFIG_HAVE_ARCH_HUGE_VMALLOC) || defined(CONFIG_HAVE_ARCH_= HUGE_VMALLOC_FLAG)) static unsigned int __ro_after_init ioremap_max_page_shift =3D BITS_PER_= LONG - 1; =20 static int __init set_nohugeiomap(char *str) @@ -55,11 +55,11 @@ static int __init set_nohugeiomap(char *str) return 0; } early_param("nohugeiomap", set_nohugeiomap); -#else /* CONFIG_HAVE_ARCH_HUGE_VMAP */ +#else /* CONFIG_HAVE_ARCH_HUGE_VMAP || CONFIG_HAVE_ARCH_HUGE_VMALLOC_FLA= G */ static const unsigned int ioremap_max_page_shift =3D PAGE_SHIFT; -#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ +#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP || CONFIG_HAVE_ARCH_HUGE_VMALLOC_FL= AG*/ =20 -#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC +#if (defined(CONFIG_HAVE_ARCH_HUGE_VMALLOC) || defined(CONFIG_HAVE_ARCH_= HUGE_VMALLOC_FLAG)) static bool __ro_after_init vmap_allow_huge =3D true; =20 static int __init set_nohugevmalloc(char *str) @@ -582,8 +582,9 @@ int vmap_pages_range_noflush(unsigned long addr, unsi= gned long end, =20 WARN_ON(page_shift < PAGE_SHIFT); =20 - if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) || - page_shift =3D=3D PAGE_SHIFT) + if ((!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) && + !IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC_FLAG)) || + (page_shift =3D=3D PAGE_SHIFT)) return vmap_small_pages_range_noflush(addr, end, prot, pages); =20 for (i =3D 0; i < nr; i +=3D 1U << (page_shift - PAGE_SHIFT)) { @@ -2252,7 +2253,7 @@ static struct vm_struct *vmlist __initdata; =20 static inline unsigned int vm_area_page_order(struct vm_struct *vm) { -#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC +#if (defined(CONFIG_HAVE_ARCH_HUGE_VMALLOC) || defined(CONFIG_HAVE_ARCH_= HUGE_VMALLOC_FLAG)) return vm->page_order; #else return 0; @@ -2261,7 +2262,7 @@ static inline unsigned int vm_area_page_order(struc= t vm_struct *vm) =20 static inline void set_vm_area_page_order(struct vm_struct *vm, unsigned= int order) { -#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC +#if (defined(CONFIG_HAVE_ARCH_HUGE_VMALLOC) || defined(CONFIG_HAVE_ARCH_= HUGE_VMALLOC_FLAG)) vm->page_order =3D order; #else BUG_ON(order !=3D 0); @@ -3056,6 +3057,15 @@ static void *__vmalloc_area_node(struct vm_struct = *area, gfp_t gfp_mask, return NULL; } =20 +static bool vmalloc_try_huge_page(unsigned long vm_flags) +{ + if (!vmap_allow_huge || (vm_flags & VM_NO_HUGE_VMAP)) + return false; + + /* VM_TRY_HUGE_VMAP only works for CONFIG_HAVE_ARCH_HUGE_VMALLOC_FLAG *= / + return vm_flags & VM_TRY_HUGE_VMAP; +} + /** * __vmalloc_node_range - allocate virtually contiguous memory * @size: allocation size @@ -3106,7 +3116,7 @@ void *__vmalloc_node_range(unsigned long size, unsi= gned long align, return NULL; } =20 - if (vmap_allow_huge && !(vm_flags & VM_NO_HUGE_VMAP)) { + if (vmalloc_try_huge_page(vm_flags)) { unsigned long size_per_node; =20 /* --=20 2.30.2