linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [bug report] mm/migrate_device: handle partially mapped folios during collection
@ 2025-09-15  5:50 Dan Carpenter
  0 siblings, 0 replies; only message in thread
From: Dan Carpenter @ 2025-09-15  5:50 UTC (permalink / raw)
  To: Balbir Singh; +Cc: linux-mm

Hello Balbir Singh,

Commit a7e62d34c9bf ("mm/migrate_device: handle partially mapped
folios during collection") from Sep 8, 2025 (linux-next), leads to
the following Smatch static checker warning:

	mm/migrate_device.c:352 migrate_vma_collect_pmd()
	error: we previously assumed 'page' could be null (see line 344)

mm/migrate_device.c
    244 static int migrate_vma_collect_pmd(pmd_t *pmdp,
    245                                    unsigned long start,
    246                                    unsigned long end,
    247                                    struct mm_walk *walk)
    248 {
    249         lazy_mmu_state_t lazy_mmu_state;
    250         struct migrate_vma *migrate = walk->private;
    251         struct vm_area_struct *vma = walk->vma;
    252         struct mm_struct *mm = vma->vm_mm;
    253         unsigned long addr = start, unmapped = 0;
    254         spinlock_t *ptl;
    255         struct folio *fault_folio = migrate->fault_page ?
    256                 page_folio(migrate->fault_page) : NULL;
    257         pte_t *ptep;
    258 
    259 again:
    260         if (pmd_trans_huge(*pmdp) || !pmd_present(*pmdp)) {
    261                 int ret = migrate_vma_collect_huge_pmd(pmdp, start, end, walk, fault_folio);
    262 
    263                 if (ret == -EAGAIN)
    264                         goto again;
    265                 if (ret == 0)
    266                         return 0;
    267         }
    268 
    269         ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
    270         if (!ptep)
    271                 goto again;
    272         lazy_mmu_state = arch_enter_lazy_mmu_mode();
    273 
    274         for (; addr < end; addr += PAGE_SIZE, ptep++) {
    275                 struct dev_pagemap *pgmap;
    276                 unsigned long mpfn = 0, pfn;
    277                 struct folio *folio;
    278                 struct page *page;
    279                 swp_entry_t entry;
    280                 pte_t pte;
    281 
    282                 pte = ptep_get(ptep);
    283 
    284                 if (pte_none(pte)) {
    285                         if (vma_is_anonymous(vma)) {
    286                                 mpfn = MIGRATE_PFN_MIGRATE;
    287                                 migrate->cpages++;
    288                         }
    289                         goto next;
    290                 }
    291 
    292                 if (!pte_present(pte)) {
    293                         /*
    294                          * Only care about unaddressable device page special
    295                          * page table entry. Other special swap entries are not
    296                          * migratable, and we ignore regular swapped page.
    297                          */
    298                         struct folio *folio;
    299 
    300                         entry = pte_to_swp_entry(pte);
    301                         if (!is_device_private_entry(entry))
    302                                 goto next;
    303 
    304                         page = pfn_swap_entry_to_page(entry);
    305                         pgmap = page_pgmap(page);
    306                         if (!(migrate->flags &
    307                                 MIGRATE_VMA_SELECT_DEVICE_PRIVATE) ||
    308                             pgmap->owner != migrate->pgmap_owner)
    309                                 goto next;
    310 
    311                         folio = page_folio(page);
    312                         if (folio_test_large(folio)) {
    313                                 int ret;
    314 
    315                                 pte_unmap_unlock(ptep, ptl);
    316                                 ret = migrate_vma_split_folio(folio,
    317                                                           migrate->fault_page);
    318 
    319                                 if (ret) {
    320                                         ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
    321                                         goto next;
    322                                 }
    323 
    324                                 addr = start;
    325                                 goto again;
    326                         }
    327 
    328                         mpfn = migrate_pfn(page_to_pfn(page)) |
    329                                         MIGRATE_PFN_MIGRATE;
    330                         if (is_writable_device_private_entry(entry))
    331                                 mpfn |= MIGRATE_PFN_WRITE;
    332                 } else {
    333                         pfn = pte_pfn(pte);
    334                         if (is_zero_pfn(pfn) &&
    335                             (migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)) {
    336                                 mpfn = MIGRATE_PFN_MIGRATE;
    337                                 migrate->cpages++;
    338                                 goto next;
    339                         }
    340                         page = vm_normal_page(migrate->vma, addr, pte);
    341                         if (page && !is_zone_device_page(page) &&
                                    ^^^^

    342                             !(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)) {
    343                                 goto next;
    344                         } else if (page && is_device_coherent_page(page)) {
                                           ^^^^
This code assume page can be NULL

    345                                 pgmap = page_pgmap(page);
    346 
    347                                 if (!(migrate->flags &
    348                                         MIGRATE_VMA_SELECT_DEVICE_COHERENT) ||
    349                                         pgmap->owner != migrate->pgmap_owner)
    350                                         goto next;
    351                         }
--> 352                         folio = page_folio(page);
                                                   ^^^^
Unchecked dereference

    353                         if (folio_test_large(folio)) {
    354                                 int ret;
    355 
    356                                 pte_unmap_unlock(ptep, ptl);
    357                                 ret = migrate_vma_split_folio(folio,
    358                                                           migrate->fault_page);
    359 
    360                                 if (ret) {
    361                                         ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
    362                                         goto next;

regards,
dan carpenter


^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2025-09-15  5:50 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2025-09-15  5:50 [bug report] mm/migrate_device: handle partially mapped folios during collection Dan Carpenter

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox