* [linux-next:master 6752/7613] drivers/gpu/drm/nouveau/nouveau_dmem.c:259:13: sparse: sparse: incorrect type in assignment (different base types)
@ 2025-11-12 11:57 kernel test robot
2025-11-12 21:19 ` Balbir Singh
0 siblings, 1 reply; 2+ messages in thread
From: kernel test robot @ 2025-11-12 11:57 UTC (permalink / raw)
To: Balbir Singh; +Cc: oe-kbuild-all, Andrew Morton, Linux Memory Management List
tree: https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git master
head: b179ce312bafcb8c68dc718e015aee79b7939ff0
commit: d3b4177bcf978a441cf98b7515d043eb23728194 [6752/7613] gpu/drm/nouveau: enable THP support for GPU memory migration
config: x86_64-randconfig-121-20251112 (https://download.01.org/0day-ci/archive/20251112/202511121922.oP20Lzb8-lkp@intel.com/config)
compiler: clang version 20.1.8 (https://github.com/llvm/llvm-project 87f0227cb60147a26a1eeb4fb06e3b505e9c7261)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20251112/202511121922.oP20Lzb8-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202511121922.oP20Lzb8-lkp@intel.com/
sparse warnings: (new ones prefixed by >>)
drivers/gpu/drm/nouveau/nouveau_dmem.c: note: in included file (through drivers/gpu/drm/nouveau/nouveau_drv.h):
drivers/gpu/drm/nouveau/include/nvif/ioctl.h:9:1: sparse: sparse: directive in macro's argument list
drivers/gpu/drm/nouveau/include/nvif/ioctl.h:10:1: sparse: sparse: directive in macro's argument list
drivers/gpu/drm/nouveau/include/nvif/ioctl.h:11:1: sparse: sparse: directive in macro's argument list
drivers/gpu/drm/nouveau/include/nvif/ioctl.h:12:1: sparse: sparse: directive in macro's argument list
drivers/gpu/drm/nouveau/include/nvif/ioctl.h:13:1: sparse: sparse: directive in macro's argument list
drivers/gpu/drm/nouveau/include/nvif/ioctl.h:14:1: sparse: sparse: directive in macro's argument list
drivers/gpu/drm/nouveau/include/nvif/ioctl.h:17:1: sparse: sparse: directive in macro's argument list
drivers/gpu/drm/nouveau/include/nvif/ioctl.h:18:1: sparse: sparse: directive in macro's argument list
drivers/gpu/drm/nouveau/include/nvif/ioctl.h:20:1: sparse: sparse: directive in macro's argument list
drivers/gpu/drm/nouveau/include/nvif/ioctl.h:21:1: sparse: sparse: directive in macro's argument list
>> drivers/gpu/drm/nouveau/nouveau_dmem.c:259:13: sparse: sparse: incorrect type in assignment (different base types) @@ expected restricted vm_fault_t [assigned] [usertype] ret @@ got int @@
drivers/gpu/drm/nouveau/nouveau_dmem.c:259:13: sparse: expected restricted vm_fault_t [assigned] [usertype] ret
drivers/gpu/drm/nouveau/nouveau_dmem.c:259:13: sparse: got int
vim +259 drivers/gpu/drm/nouveau/nouveau_dmem.c
182
183 static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
184 {
185 struct nouveau_drm *drm = page_to_drm(vmf->page);
186 struct nouveau_dmem *dmem = drm->dmem;
187 struct nouveau_fence *fence;
188 struct nouveau_svmm *svmm;
189 struct page *dpage;
190 vm_fault_t ret = 0;
191 struct migrate_vma args = {
192 .vma = vmf->vma,
193 .pgmap_owner = drm->dev,
194 .fault_page = vmf->page,
195 .flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE |
196 MIGRATE_VMA_SELECT_COMPOUND,
197 .src = NULL,
198 .dst = NULL,
199 };
200 unsigned int order, nr;
201 struct folio *sfolio, *dfolio;
202 struct nouveau_dmem_dma_info dma_info;
203
204 sfolio = page_folio(vmf->page);
205 order = folio_order(sfolio);
206 nr = 1 << order;
207
208 /*
209 * Handle partial unmap faults, where the folio is large, but
210 * the pmd is split.
211 */
212 if (vmf->pte) {
213 order = 0;
214 nr = 1;
215 }
216
217 if (order)
218 args.flags |= MIGRATE_VMA_SELECT_COMPOUND;
219
220 args.start = ALIGN_DOWN(vmf->address, (PAGE_SIZE << order));
221 args.vma = vmf->vma;
222 args.end = args.start + (PAGE_SIZE << order);
223 args.src = kcalloc(nr, sizeof(*args.src), GFP_KERNEL);
224 args.dst = kcalloc(nr, sizeof(*args.dst), GFP_KERNEL);
225
226 if (!args.src || !args.dst) {
227 ret = VM_FAULT_OOM;
228 goto err;
229 }
230 /*
231 * FIXME what we really want is to find some heuristic to migrate more
232 * than just one page on CPU fault. When such fault happens it is very
233 * likely that more surrounding page will CPU fault too.
234 */
235 if (migrate_vma_setup(&args) < 0)
236 return VM_FAULT_SIGBUS;
237 if (!args.cpages)
238 return 0;
239
240 if (order)
241 dpage = folio_page(vma_alloc_folio(GFP_HIGHUSER | __GFP_ZERO,
242 order, vmf->vma, vmf->address), 0);
243 else
244 dpage = alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vmf->vma,
245 vmf->address);
246 if (!dpage) {
247 ret = VM_FAULT_OOM;
248 goto done;
249 }
250
251 args.dst[0] = migrate_pfn(page_to_pfn(dpage));
252 if (order)
253 args.dst[0] |= MIGRATE_PFN_COMPOUND;
254 dfolio = page_folio(dpage);
255
256 svmm = folio_zone_device_data(sfolio);
257 mutex_lock(&svmm->mutex);
258 nouveau_svmm_invalidate(svmm, args.start, args.end);
> 259 ret = nouveau_dmem_copy_folio(drm, sfolio, dfolio, &dma_info);
260 mutex_unlock(&svmm->mutex);
261 if (ret) {
262 ret = VM_FAULT_SIGBUS;
263 goto done;
264 }
265
266 nouveau_fence_new(&fence, dmem->migrate.chan);
267 migrate_vma_pages(&args);
268 nouveau_dmem_fence_done(&fence);
269 dma_unmap_page(drm->dev->dev, dma_info.dma_addr, PAGE_SIZE,
270 DMA_BIDIRECTIONAL);
271 done:
272 migrate_vma_finalize(&args);
273 err:
274 kfree(args.src);
275 kfree(args.dst);
276 return ret;
277 }
278
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2025-11-12 21:19 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2025-11-12 11:57 [linux-next:master 6752/7613] drivers/gpu/drm/nouveau/nouveau_dmem.c:259:13: sparse: sparse: incorrect type in assignment (different base types) kernel test robot
2025-11-12 21:19 ` Balbir Singh
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox