tree: git://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git master head: 7732a9817fb01002bde7615066e86c156fb5a31b commit: 0491d0d6aac97c5b8df17851db525f3758de26e6 [7235/7555] s390/mm: make hugepages_supported a boot time decision config: s390-allmodconfig (attached as .config) reproduce: wget https://git.kernel.org/cgit/linux/kernel/git/wfg/lkp-tests.git/plain/sbin/make.cross -O ~/bin/make.cross chmod +x ~/bin/make.cross git checkout 0491d0d6aac97c5b8df17851db525f3758de26e6 # save the attached .config to linux build tree make.cross ARCH=s390 All warnings: In file included from include/linux/list.h:8:0, from include/linux/preempt.h:10, from include/linux/spinlock.h:50, from include/linux/mmzone.h:7, from include/linux/gfp.h:5, from include/linux/mm.h:9, from include/linux/memblock.h:18, from mm/cma.c:28: mm/cma.c: In function 'cma_init_reserved_mem': include/linux/kernel.h:729:17: warning: comparison of distinct pointer types lacks a cast (void) (&_max1 == &_max2); \ ^ >> mm/cma.c:186:27: note: in expansion of macro 'max' alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order); ^ mm/cma.c: In function 'cma_declare_contiguous': include/linux/kernel.h:729:17: warning: comparison of distinct pointer types lacks a cast (void) (&_max1 == &_max2); \ ^ include/linux/kernel.h:728:9: note: in definition of macro 'max' typeof(y) _max2 = (y); \ ^ >> mm/cma.c:270:29: note: in expansion of macro 'max' (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order)); ^ include/linux/kernel.h:729:17: warning: comparison of distinct pointer types lacks a cast (void) (&_max1 == &_max2); \ ^ include/linux/kernel.h:728:21: note: in definition of macro 'max' typeof(y) _max2 = (y); \ ^ >> mm/cma.c:270:29: note: in expansion of macro 'max' (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order)); ^ vim +/max +186 mm/cma.c a254129e Joonsoo Kim 2014-08-06 22 #ifndef DEBUG a254129e Joonsoo Kim 2014-08-06 23 # define DEBUG a254129e Joonsoo Kim 2014-08-06 24 #endif a254129e Joonsoo Kim 2014-08-06 25 #endif 99e8ea6c Stefan Strogin 2015-04-15 26 #define CREATE_TRACE_POINTS a254129e Joonsoo Kim 2014-08-06 27 a254129e Joonsoo Kim 2014-08-06 @28 #include a254129e Joonsoo Kim 2014-08-06 29 #include a254129e Joonsoo Kim 2014-08-06 30 #include a254129e Joonsoo Kim 2014-08-06 31 #include a254129e Joonsoo Kim 2014-08-06 32 #include a254129e Joonsoo Kim 2014-08-06 33 #include a254129e Joonsoo Kim 2014-08-06 34 #include a254129e Joonsoo Kim 2014-08-06 35 #include f7426b98 Marek Szyprowski 2014-10-09 36 #include 620951e2 Thierry Reding 2014-12-12 37 #include 99e8ea6c Stefan Strogin 2015-04-15 38 #include a254129e Joonsoo Kim 2014-08-06 39 28b24c1f Sasha Levin 2015-04-14 40 #include "cma.h" 28b24c1f Sasha Levin 2015-04-14 41 28b24c1f Sasha Levin 2015-04-14 42 struct cma cma_areas[MAX_CMA_AREAS]; 28b24c1f Sasha Levin 2015-04-14 43 unsigned cma_area_count; a254129e Joonsoo Kim 2014-08-06 44 static DEFINE_MUTEX(cma_mutex); a254129e Joonsoo Kim 2014-08-06 45 ac173824 Sasha Levin 2015-04-14 46 phys_addr_t cma_get_base(const struct cma *cma) a254129e Joonsoo Kim 2014-08-06 47 { a254129e Joonsoo Kim 2014-08-06 48 return PFN_PHYS(cma->base_pfn); a254129e Joonsoo Kim 2014-08-06 49 } a254129e Joonsoo Kim 2014-08-06 50 ac173824 Sasha Levin 2015-04-14 51 unsigned long cma_get_size(const struct cma *cma) a254129e Joonsoo Kim 2014-08-06 52 { a254129e Joonsoo Kim 2014-08-06 53 return cma->count << PAGE_SHIFT; a254129e Joonsoo Kim 2014-08-06 54 } a254129e Joonsoo Kim 2014-08-06 55 ac173824 Sasha Levin 2015-04-14 56 static unsigned long cma_bitmap_aligned_mask(const struct cma *cma, ac173824 Sasha Levin 2015-04-14 57 int align_order) a254129e Joonsoo Kim 2014-08-06 58 { 68faed63 Weijie Yang 2014-10-13 59 if (align_order <= cma->order_per_bit) 68faed63 Weijie Yang 2014-10-13 60 return 0; 68faed63 Weijie Yang 2014-10-13 61 return (1UL << (align_order - cma->order_per_bit)) - 1; a254129e Joonsoo Kim 2014-08-06 62 } a254129e Joonsoo Kim 2014-08-06 63 850fc430 Danesh Petigara 2015-03-12 64 /* 850fc430 Danesh Petigara 2015-03-12 65 * Find a PFN aligned to the specified order and return an offset represented in 850fc430 Danesh Petigara 2015-03-12 66 * order_per_bits. 850fc430 Danesh Petigara 2015-03-12 67 */ ac173824 Sasha Levin 2015-04-14 68 static unsigned long cma_bitmap_aligned_offset(const struct cma *cma, ac173824 Sasha Levin 2015-04-14 69 int align_order) b5be83e3 Gregory Fong 2014-12-12 70 { b5be83e3 Gregory Fong 2014-12-12 71 if (align_order <= cma->order_per_bit) b5be83e3 Gregory Fong 2014-12-12 72 return 0; 850fc430 Danesh Petigara 2015-03-12 73 850fc430 Danesh Petigara 2015-03-12 74 return (ALIGN(cma->base_pfn, (1UL << align_order)) 850fc430 Danesh Petigara 2015-03-12 75 - cma->base_pfn) >> cma->order_per_bit; b5be83e3 Gregory Fong 2014-12-12 76 } b5be83e3 Gregory Fong 2014-12-12 77 ac173824 Sasha Levin 2015-04-14 78 static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma, a254129e Joonsoo Kim 2014-08-06 79 unsigned long pages) a254129e Joonsoo Kim 2014-08-06 80 { a254129e Joonsoo Kim 2014-08-06 81 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; a254129e Joonsoo Kim 2014-08-06 82 } a254129e Joonsoo Kim 2014-08-06 83 ac173824 Sasha Levin 2015-04-14 84 static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, ac173824 Sasha Levin 2015-04-14 85 unsigned int count) a254129e Joonsoo Kim 2014-08-06 86 { a254129e Joonsoo Kim 2014-08-06 87 unsigned long bitmap_no, bitmap_count; a254129e Joonsoo Kim 2014-08-06 88 a254129e Joonsoo Kim 2014-08-06 89 bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit; a254129e Joonsoo Kim 2014-08-06 90 bitmap_count = cma_bitmap_pages_to_bits(cma, count); a254129e Joonsoo Kim 2014-08-06 91 a254129e Joonsoo Kim 2014-08-06 92 mutex_lock(&cma->lock); a254129e Joonsoo Kim 2014-08-06 93 bitmap_clear(cma->bitmap, bitmap_no, bitmap_count); a254129e Joonsoo Kim 2014-08-06 94 mutex_unlock(&cma->lock); a254129e Joonsoo Kim 2014-08-06 95 } a254129e Joonsoo Kim 2014-08-06 96 a254129e Joonsoo Kim 2014-08-06 97 static int __init cma_activate_area(struct cma *cma) a254129e Joonsoo Kim 2014-08-06 98 { a254129e Joonsoo Kim 2014-08-06 99 int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long); a254129e Joonsoo Kim 2014-08-06 100 unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; a254129e Joonsoo Kim 2014-08-06 101 unsigned i = cma->count >> pageblock_order; a254129e Joonsoo Kim 2014-08-06 102 struct zone *zone; a254129e Joonsoo Kim 2014-08-06 103 a254129e Joonsoo Kim 2014-08-06 104 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); a254129e Joonsoo Kim 2014-08-06 105 a254129e Joonsoo Kim 2014-08-06 106 if (!cma->bitmap) a254129e Joonsoo Kim 2014-08-06 107 return -ENOMEM; a254129e Joonsoo Kim 2014-08-06 108 a254129e Joonsoo Kim 2014-08-06 109 WARN_ON_ONCE(!pfn_valid(pfn)); a254129e Joonsoo Kim 2014-08-06 110 zone = page_zone(pfn_to_page(pfn)); a254129e Joonsoo Kim 2014-08-06 111 a254129e Joonsoo Kim 2014-08-06 112 do { a254129e Joonsoo Kim 2014-08-06 113 unsigned j; a254129e Joonsoo Kim 2014-08-06 114 a254129e Joonsoo Kim 2014-08-06 115 base_pfn = pfn; a254129e Joonsoo Kim 2014-08-06 116 for (j = pageblock_nr_pages; j; --j, pfn++) { a254129e Joonsoo Kim 2014-08-06 117 WARN_ON_ONCE(!pfn_valid(pfn)); a254129e Joonsoo Kim 2014-08-06 118 /* a254129e Joonsoo Kim 2014-08-06 119 * alloc_contig_range requires the pfn range a254129e Joonsoo Kim 2014-08-06 120 * specified to be in the same zone. Make this a254129e Joonsoo Kim 2014-08-06 121 * simple by forcing the entire CMA resv range a254129e Joonsoo Kim 2014-08-06 122 * to be in the same zone. a254129e Joonsoo Kim 2014-08-06 123 */ a254129e Joonsoo Kim 2014-08-06 124 if (page_zone(pfn_to_page(pfn)) != zone) a254129e Joonsoo Kim 2014-08-06 125 goto err; a254129e Joonsoo Kim 2014-08-06 126 } a254129e Joonsoo Kim 2014-08-06 127 init_cma_reserved_pageblock(pfn_to_page(base_pfn)); a254129e Joonsoo Kim 2014-08-06 128 } while (--i); a254129e Joonsoo Kim 2014-08-06 129 a254129e Joonsoo Kim 2014-08-06 130 mutex_init(&cma->lock); 26b02a1f Sasha Levin 2015-04-14 131 26b02a1f Sasha Levin 2015-04-14 132 #ifdef CONFIG_CMA_DEBUGFS 26b02a1f Sasha Levin 2015-04-14 133 INIT_HLIST_HEAD(&cma->mem_head); 26b02a1f Sasha Levin 2015-04-14 134 spin_lock_init(&cma->mem_head_lock); 26b02a1f Sasha Levin 2015-04-14 135 #endif 26b02a1f Sasha Levin 2015-04-14 136 a254129e Joonsoo Kim 2014-08-06 137 return 0; a254129e Joonsoo Kim 2014-08-06 138 a254129e Joonsoo Kim 2014-08-06 139 err: a254129e Joonsoo Kim 2014-08-06 140 kfree(cma->bitmap); f022d8cb Laurent Pinchart 2014-10-24 141 cma->count = 0; a254129e Joonsoo Kim 2014-08-06 142 return -EINVAL; a254129e Joonsoo Kim 2014-08-06 143 } a254129e Joonsoo Kim 2014-08-06 144 a254129e Joonsoo Kim 2014-08-06 145 static int __init cma_init_reserved_areas(void) a254129e Joonsoo Kim 2014-08-06 146 { a254129e Joonsoo Kim 2014-08-06 147 int i; a254129e Joonsoo Kim 2014-08-06 148 a254129e Joonsoo Kim 2014-08-06 149 for (i = 0; i < cma_area_count; i++) { a254129e Joonsoo Kim 2014-08-06 150 int ret = cma_activate_area(&cma_areas[i]); a254129e Joonsoo Kim 2014-08-06 151 a254129e Joonsoo Kim 2014-08-06 152 if (ret) a254129e Joonsoo Kim 2014-08-06 153 return ret; a254129e Joonsoo Kim 2014-08-06 154 } a254129e Joonsoo Kim 2014-08-06 155 a254129e Joonsoo Kim 2014-08-06 156 return 0; a254129e Joonsoo Kim 2014-08-06 157 } a254129e Joonsoo Kim 2014-08-06 158 core_initcall(cma_init_reserved_areas); a254129e Joonsoo Kim 2014-08-06 159 a254129e Joonsoo Kim 2014-08-06 160 /** de9e14ee Marek Szyprowski 2014-10-13 161 * cma_init_reserved_mem() - create custom contiguous area from reserved memory de9e14ee Marek Szyprowski 2014-10-13 162 * @base: Base address of the reserved area de9e14ee Marek Szyprowski 2014-10-13 163 * @size: Size of the reserved area (in bytes), de9e14ee Marek Szyprowski 2014-10-13 164 * @order_per_bit: Order of pages represented by one bit on bitmap. de9e14ee Marek Szyprowski 2014-10-13 165 * @res_cma: Pointer to store the created cma region. de9e14ee Marek Szyprowski 2014-10-13 166 * de9e14ee Marek Szyprowski 2014-10-13 167 * This function creates custom contiguous area from already reserved memory. de9e14ee Marek Szyprowski 2014-10-13 168 */ de9e14ee Marek Szyprowski 2014-10-13 169 int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, ac173824 Sasha Levin 2015-04-14 170 unsigned int order_per_bit, ac173824 Sasha Levin 2015-04-14 171 struct cma **res_cma) de9e14ee Marek Szyprowski 2014-10-13 172 { de9e14ee Marek Szyprowski 2014-10-13 173 struct cma *cma; de9e14ee Marek Szyprowski 2014-10-13 174 phys_addr_t alignment; de9e14ee Marek Szyprowski 2014-10-13 175 de9e14ee Marek Szyprowski 2014-10-13 176 /* Sanity checks */ de9e14ee Marek Szyprowski 2014-10-13 177 if (cma_area_count == ARRAY_SIZE(cma_areas)) { de9e14ee Marek Szyprowski 2014-10-13 178 pr_err("Not enough slots for CMA reserved regions!\n"); de9e14ee Marek Szyprowski 2014-10-13 179 return -ENOSPC; de9e14ee Marek Szyprowski 2014-10-13 180 } de9e14ee Marek Szyprowski 2014-10-13 181 de9e14ee Marek Szyprowski 2014-10-13 182 if (!size || !memblock_is_region_reserved(base, size)) de9e14ee Marek Szyprowski 2014-10-13 183 return -EINVAL; de9e14ee Marek Szyprowski 2014-10-13 184 de9e14ee Marek Szyprowski 2014-10-13 185 /* ensure minimal alignment requied by mm core */ de9e14ee Marek Szyprowski 2014-10-13 @186 alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order); de9e14ee Marek Szyprowski 2014-10-13 187 de9e14ee Marek Szyprowski 2014-10-13 188 /* alignment should be aligned with order_per_bit */ de9e14ee Marek Szyprowski 2014-10-13 189 if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit)) de9e14ee Marek Szyprowski 2014-10-13 190 return -EINVAL; de9e14ee Marek Szyprowski 2014-10-13 191 de9e14ee Marek Szyprowski 2014-10-13 192 if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size) de9e14ee Marek Szyprowski 2014-10-13 193 return -EINVAL; de9e14ee Marek Szyprowski 2014-10-13 194 de9e14ee Marek Szyprowski 2014-10-13 195 /* de9e14ee Marek Szyprowski 2014-10-13 196 * Each reserved area must be initialised later, when more kernel de9e14ee Marek Szyprowski 2014-10-13 197 * subsystems (like slab allocator) are available. de9e14ee Marek Szyprowski 2014-10-13 198 */ de9e14ee Marek Szyprowski 2014-10-13 199 cma = &cma_areas[cma_area_count]; de9e14ee Marek Szyprowski 2014-10-13 200 cma->base_pfn = PFN_DOWN(base); de9e14ee Marek Szyprowski 2014-10-13 201 cma->count = size >> PAGE_SHIFT; de9e14ee Marek Szyprowski 2014-10-13 202 cma->order_per_bit = order_per_bit; de9e14ee Marek Szyprowski 2014-10-13 203 *res_cma = cma; de9e14ee Marek Szyprowski 2014-10-13 204 cma_area_count++; 94737a85 George G. Davis 2015-02-11 205 totalcma_pages += (size / PAGE_SIZE); de9e14ee Marek Szyprowski 2014-10-13 206 de9e14ee Marek Szyprowski 2014-10-13 207 return 0; de9e14ee Marek Szyprowski 2014-10-13 208 } de9e14ee Marek Szyprowski 2014-10-13 209 de9e14ee Marek Szyprowski 2014-10-13 210 /** a254129e Joonsoo Kim 2014-08-06 211 * cma_declare_contiguous() - reserve custom contiguous area a254129e Joonsoo Kim 2014-08-06 212 * @base: Base address of the reserved area optional, use 0 for any c1f733aa Joonsoo Kim 2014-08-06 213 * @size: Size of the reserved area (in bytes), a254129e Joonsoo Kim 2014-08-06 214 * @limit: End address of the reserved memory (optional, 0 for any). a254129e Joonsoo Kim 2014-08-06 215 * @alignment: Alignment for the CMA area, should be power of 2 or zero a254129e Joonsoo Kim 2014-08-06 216 * @order_per_bit: Order of pages represented by one bit on bitmap. a254129e Joonsoo Kim 2014-08-06 217 * @fixed: hint about where to place the reserved area c1f733aa Joonsoo Kim 2014-08-06 218 * @res_cma: Pointer to store the created cma region. a254129e Joonsoo Kim 2014-08-06 219 * a254129e Joonsoo Kim 2014-08-06 220 * This function reserves memory from early allocator. It should be a254129e Joonsoo Kim 2014-08-06 221 * called by arch specific code once the early allocator (memblock or bootmem) a254129e Joonsoo Kim 2014-08-06 222 * has been activated and all other subsystems have already allocated/reserved a254129e Joonsoo Kim 2014-08-06 223 * memory. This function allows to create custom reserved areas. a254129e Joonsoo Kim 2014-08-06 224 * a254129e Joonsoo Kim 2014-08-06 225 * If @fixed is true, reserve contiguous area at exactly @base. If false, a254129e Joonsoo Kim 2014-08-06 226 * reserve in range from @base to @limit. a254129e Joonsoo Kim 2014-08-06 227 */ c1f733aa Joonsoo Kim 2014-08-06 228 int __init cma_declare_contiguous(phys_addr_t base, c1f733aa Joonsoo Kim 2014-08-06 229 phys_addr_t size, phys_addr_t limit, a254129e Joonsoo Kim 2014-08-06 230 phys_addr_t alignment, unsigned int order_per_bit, c1f733aa Joonsoo Kim 2014-08-06 231 bool fixed, struct cma **res_cma) a254129e Joonsoo Kim 2014-08-06 232 { f7426b98 Marek Szyprowski 2014-10-09 233 phys_addr_t memblock_end = memblock_end_of_DRAM(); 6b101e2a Joonsoo Kim 2014-12-10 234 phys_addr_t highmem_start; a254129e Joonsoo Kim 2014-08-06 235 int ret = 0; a254129e Joonsoo Kim 2014-08-06 236 6b101e2a Joonsoo Kim 2014-12-10 237 #ifdef CONFIG_X86 6b101e2a Joonsoo Kim 2014-12-10 238 /* 6b101e2a Joonsoo Kim 2014-12-10 239 * high_memory isn't direct mapped memory so retrieving its physical 6b101e2a Joonsoo Kim 2014-12-10 240 * address isn't appropriate. But it would be useful to check the 6b101e2a Joonsoo Kim 2014-12-10 241 * physical address of the highmem boundary so it's justfiable to get 6b101e2a Joonsoo Kim 2014-12-10 242 * the physical address from it. On x86 there is a validation check for 6b101e2a Joonsoo Kim 2014-12-10 243 * this case, so the following workaround is needed to avoid it. 6b101e2a Joonsoo Kim 2014-12-10 244 */ 6b101e2a Joonsoo Kim 2014-12-10 245 highmem_start = __pa_nodebug(high_memory); 6b101e2a Joonsoo Kim 2014-12-10 246 #else 6b101e2a Joonsoo Kim 2014-12-10 247 highmem_start = __pa(high_memory); 6b101e2a Joonsoo Kim 2014-12-10 248 #endif 56fa4f60 Laurent Pinchart 2014-10-24 249 pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n", 56fa4f60 Laurent Pinchart 2014-10-24 250 __func__, &size, &base, &limit, &alignment); a254129e Joonsoo Kim 2014-08-06 251 a254129e Joonsoo Kim 2014-08-06 252 if (cma_area_count == ARRAY_SIZE(cma_areas)) { a254129e Joonsoo Kim 2014-08-06 253 pr_err("Not enough slots for CMA reserved regions!\n"); a254129e Joonsoo Kim 2014-08-06 254 return -ENOSPC; a254129e Joonsoo Kim 2014-08-06 255 } a254129e Joonsoo Kim 2014-08-06 256 a254129e Joonsoo Kim 2014-08-06 257 if (!size) a254129e Joonsoo Kim 2014-08-06 258 return -EINVAL; a254129e Joonsoo Kim 2014-08-06 259 a254129e Joonsoo Kim 2014-08-06 260 if (alignment && !is_power_of_2(alignment)) a254129e Joonsoo Kim 2014-08-06 261 return -EINVAL; a254129e Joonsoo Kim 2014-08-06 262 a254129e Joonsoo Kim 2014-08-06 263 /* a254129e Joonsoo Kim 2014-08-06 264 * Sanitise input arguments. a254129e Joonsoo Kim 2014-08-06 265 * Pages both ends in CMA area could be merged into adjacent unmovable a254129e Joonsoo Kim 2014-08-06 266 * migratetype page by page allocator's buddy algorithm. In the case, a254129e Joonsoo Kim 2014-08-06 267 * you couldn't get a contiguous memory, which is not what we want. a254129e Joonsoo Kim 2014-08-06 268 */ a254129e Joonsoo Kim 2014-08-06 269 alignment = max(alignment, a254129e Joonsoo Kim 2014-08-06 @270 (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order)); a254129e Joonsoo Kim 2014-08-06 271 base = ALIGN(base, alignment); a254129e Joonsoo Kim 2014-08-06 272 size = ALIGN(size, alignment); a254129e Joonsoo Kim 2014-08-06 273 limit &= ~(alignment - 1); :::::: The code at line 186 was first introduced by commit :::::: de9e14eebf33a60712a52a0bc6e08c043c0aba53 drivers: dma-contiguous: add initialization from device tree :::::: TO: Marek Szyprowski :::::: CC: Linus Torvalds --- 0-DAY kernel test infrastructure Open Source Technology Center http://lists.01.org/mailman/listinfo/kbuild Intel Corporation