tree: git://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git master head: 07d0e2d232fee3ff692c50150b2aa6e3b7755f8f commit: 17336ae3d58ac8fdb22c74572566e2f45e2b5589 [72/159] madvise: cleanup swapin_walk_pmd_entry() config: i386-randconfig-ib1-06201113 (attached as .config) All warnings: In file included from arch/x86/include/asm/pgtable.h:432:0, from include/linux/mm.h:51, from include/linux/mman.h:4, from mm/madvise.c:8: mm/madvise.c: In function 'swapin_walk_pte_entry': arch/x86/include/asm/pgtable_32.h:58:43: warning: value computed is not used [-Wunused-value] ((pte_t *)page_address(pmd_page(*(dir))) + pte_index((address))) ^ >> mm/madvise.c:161:2: note: in expansion of macro 'pte_offset_map' pte_offset_map(walk->pmd, start & PMD_MASK); ^ mm/madvise.c:145:9: warning: unused variable 'orig_pte' [-Wunused-variable] pte_t *orig_pte = pte - ((start & (PMD_SIZE - 1)) >> PAGE_SHIFT); ^ vim +/pte_offset_map +161 mm/madvise.c 2 * linux/mm/madvise.c 3 * 4 * Copyright (C) 1999 Linus Torvalds 5 * Copyright (C) 2002 Christoph Hellwig 6 */ 7 > 8 #include 9 #include 10 #include 11 #include 12 #include 13 #include 14 #include 15 #include 16 #include 17 #include 18 #include 19 #include 20 #include 21 #include 22 23 /* 24 * Any behaviour which results in changes to the vma->vm_flags needs to 25 * take mmap_sem for writing. Others, which simply traverse vmas, need 26 * to only take it for reading. 27 */ 28 static int madvise_need_mmap_write(int behavior) 29 { 30 switch (behavior) { 31 case MADV_REMOVE: 32 case MADV_WILLNEED: 33 case MADV_DONTNEED: 34 return 0; 35 default: 36 /* be safe, default to 1. list exceptions explicitly */ 37 return 1; 38 } 39 } 40 41 /* 42 * We can potentially split a vm area into separate 43 * areas, each area with its own behavior. 44 */ 45 static long madvise_behavior(struct vm_area_struct *vma, 46 struct vm_area_struct **prev, 47 unsigned long start, unsigned long end, int behavior) 48 { 49 struct mm_struct *mm = vma->vm_mm; 50 int error = 0; 51 pgoff_t pgoff; 52 unsigned long new_flags = vma->vm_flags; 53 54 switch (behavior) { 55 case MADV_NORMAL: 56 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ; 57 break; 58 case MADV_SEQUENTIAL: 59 new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ; 60 break; 61 case MADV_RANDOM: 62 new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ; 63 break; 64 case MADV_DONTFORK: 65 new_flags |= VM_DONTCOPY; 66 break; 67 case MADV_DOFORK: 68 if (vma->vm_flags & VM_IO) { 69 error = -EINVAL; 70 goto out; 71 } 72 new_flags &= ~VM_DONTCOPY; 73 break; 74 case MADV_DONTDUMP: 75 new_flags |= VM_DONTDUMP; 76 break; 77 case MADV_DODUMP: 78 if (new_flags & VM_SPECIAL) { 79 error = -EINVAL; 80 goto out; 81 } 82 new_flags &= ~VM_DONTDUMP; 83 break; 84 case MADV_MERGEABLE: 85 case MADV_UNMERGEABLE: 86 error = ksm_madvise(vma, start, end, behavior, &new_flags); 87 if (error) 88 goto out; 89 break; 90 case MADV_HUGEPAGE: 91 case MADV_NOHUGEPAGE: 92 error = hugepage_madvise(vma, &new_flags, behavior); 93 if (error) 94 goto out; 95 break; 96 } 97 98 if (new_flags == vma->vm_flags) { 99 *prev = vma; 100 goto out; 101 } 102 103 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); 104 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma, 105 vma->vm_file, pgoff, vma_policy(vma)); 106 if (*prev) { 107 vma = *prev; 108 goto success; 109 } 110 111 *prev = vma; 112 113 if (start != vma->vm_start) { 114 error = split_vma(mm, vma, start, 1); 115 if (error) 116 goto out; 117 } 118 119 if (end != vma->vm_end) { 120 error = split_vma(mm, vma, end, 0); 121 if (error) 122 goto out; 123 } 124 125 success: 126 /* 127 * vm_flags is protected by the mmap_sem held in write mode. 128 */ 129 vma->vm_flags = new_flags; 130 131 out: 132 if (error == -ENOMEM) 133 error = -EAGAIN; 134 return error; 135 } 136 137 #ifdef CONFIG_SWAP 138 /* 139 * Assuming that page table walker holds page table lock. 140 */ 141 static int swapin_walk_pte_entry(pte_t *pte, unsigned long start, 142 unsigned long end, struct mm_walk *walk) 143 { 144 pte_t ptent; 145 pte_t *orig_pte = pte - ((start & (PMD_SIZE - 1)) >> PAGE_SHIFT); 146 swp_entry_t entry; 147 struct page *page; 148 149 ptent = *pte; 150 pte_unmap_unlock(orig_pte, walk->ptl); 151 if (pte_present(ptent) || pte_none(ptent) || pte_file(ptent)) 152 goto lock; 153 entry = pte_to_swp_entry(ptent); 154 if (unlikely(non_swap_entry(entry))) 155 goto lock; 156 page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE, 157 walk->vma, start); 158 if (page) 159 page_cache_release(page); 160 lock: > 161 pte_offset_map(walk->pmd, start & PMD_MASK); 162 spin_lock(walk->ptl); 163 return 0; 164 } --- 0-DAY kernel build testing backend Open Source Technology Center http://lists.01.org/mailman/listinfo/kbuild Intel Corporation