public inbox for kvm-ppc@vger.kernel.org
 help / color / mirror / Atom feed
From: kernel test robot <lkp@intel.com>
To: Alexander Graf <graf@amazon.com>
Cc: oe-kbuild-all@lists.linux.dev, kvm-ppc@vger.kernel.org
Subject: [agraf-2.6:virtio-dmb 1/1] drivers/virtio/virtio_pci_modern.c:545:45: sparse: sparse: incorrect type in initializer (different address spaces)
Date: Wed, 28 Jan 2026 18:01:40 +0800	[thread overview]
Message-ID: <202601281736.6KZZJdNR-lkp@intel.com> (raw)

tree:   https://github.com/agraf/linux-2.6.git virtio-dmb
head:   45a0bb294baa814a4b7f4acbc0eea8c55b05cd69
commit: 45a0bb294baa814a4b7f4acbc0eea8c55b05cd69 [1/1] virtio-pci: Add Device Memory Buffer (DMB) support
config: x86_64-randconfig-123-20260128 (https://download.01.org/0day-ci/archive/20260128/202601281736.6KZZJdNR-lkp@intel.com/config)
compiler: gcc-14 (Debian 14.2.0-19) 14.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20260128/202601281736.6KZZJdNR-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202601281736.6KZZJdNR-lkp@intel.com/

sparse warnings: (new ones prefixed by >>)
>> drivers/virtio/virtio_pci_modern.c:545:45: sparse: sparse: incorrect type in initializer (different address spaces) @@     expected void *shmem_vaddr @@     got void [noderef] __iomem * @@
   drivers/virtio/virtio_pci_modern.c:545:45: sparse:     expected void *shmem_vaddr
   drivers/virtio/virtio_pci_modern.c:545:45: sparse:     got void [noderef] __iomem *
   drivers/virtio/virtio_pci_modern.c:567:45: sparse: sparse: incorrect type in initializer (different address spaces) @@     expected void *shmem_vaddr @@     got void [noderef] __iomem * @@
   drivers/virtio/virtio_pci_modern.c:567:45: sparse:     expected void *shmem_vaddr
   drivers/virtio/virtio_pci_modern.c:567:45: sparse:     got void [noderef] __iomem *
   drivers/virtio/virtio_pci_modern.c:587:45: sparse: sparse: incorrect type in initializer (different address spaces) @@     expected void *shmem_vaddr @@     got void [noderef] __iomem * @@
   drivers/virtio/virtio_pci_modern.c:587:45: sparse:     expected void *shmem_vaddr
   drivers/virtio/virtio_pci_modern.c:587:45: sparse:     got void [noderef] __iomem *
>> drivers/virtio/virtio_pci_modern.c:659:33: sparse: sparse: incorrect type in assignment (different address spaces) @@     expected void [noderef] __iomem *dmb_mem @@     got void * @@
   drivers/virtio/virtio_pci_modern.c:659:33: sparse:     expected void [noderef] __iomem *dmb_mem
   drivers/virtio/virtio_pci_modern.c:659:33: sparse:     got void *

vim +545 drivers/virtio/virtio_pci_modern.c

   538	
   539	static void vp_dmb_unmap_page(union virtio_map map, dma_addr_t map_handle,
   540					     size_t size, enum dma_data_direction dir,
   541					     ulong attrs)
   542	{
   543		struct pci_dev *pci_dev = to_pci_dev(map.dma_dev);
   544		struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
 > 545		void *shmem_vaddr = vp_dev->dmb_mem + map_handle;
   546		struct device *dev = &pci_dev->dev;
   547		struct vp_dmb_mapping *mapping;
   548	
   549		mapping = vp_dmb_pop_map(vp_dev, map_handle);
   550		if (dev_WARN_ONCE(dev, !mapping, "Unmap without map of handle %lx", (long)map_handle))
   551			return;
   552	
   553		if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
   554		    (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
   555			memcpy(mapping->va, shmem_vaddr, size);
   556	
   557		kfree(mapping);
   558	
   559		vp_dmb_free(map, size, shmem_vaddr, map_handle, attrs);
   560	}
   561	
   562	static void vp_dmb_sync_single_for_cpu(union virtio_map map, dma_addr_t map_handle,
   563						      size_t size, enum dma_data_direction dir)
   564	{
   565		struct pci_dev *pci_dev = to_pci_dev(map.dma_dev);
   566		struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
   567		void *shmem_vaddr = vp_dev->dmb_mem + map_handle;
   568		struct device *dev = &pci_dev->dev;
   569		struct vp_dmb_mapping mapping = {};
   570		int r;
   571	
   572		r = vp_dmb_peek_map(vp_dev, map_handle, &mapping);
   573	
   574		if (dev_WARN_ONCE(dev, r, "CPU sync without map of handle %lx", (long)map_handle))
   575			return;
   576	
   577		if ((dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) {
   578			memcpy(mapping.va, shmem_vaddr, size);
   579		}
   580	}
   581	
   582	static void vp_dmb_sync_single_for_device(union virtio_map map, dma_addr_t map_handle,
   583							 size_t size, enum dma_data_direction dir)
   584	{
   585		struct pci_dev *pci_dev = to_pci_dev(map.dma_dev);
   586		struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
   587		void *shmem_vaddr = vp_dev->dmb_mem + map_handle;
   588		struct device *dev = &pci_dev->dev;
   589		struct vp_dmb_mapping mapping;
   590		int r;
   591	
   592		r = vp_dmb_peek_map(vp_dev, map_handle, &mapping);
   593	
   594		if (dev_WARN_ONCE(dev, r, "CPU sync without map of handle %lx", (long)map_handle))
   595			return;
   596	
   597		if ((dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
   598			memcpy(shmem_vaddr, mapping.va, size);
   599	}
   600	
   601	static bool vp_dmb_need_sync(union virtio_map map, dma_addr_t map_handle)
   602	{
   603		return true;
   604	}
   605	
   606	static int vp_dmb_mapping_error(union virtio_map map, dma_addr_t map_handle)
   607	{
   608		return dma_mapping_error(map.dma_dev, map_handle);
   609	}
   610	
   611	static size_t vp_dmb_max_mapping_size(union virtio_map map)
   612	{
   613		return 1024 * 1024; /* 1 MiB */
   614	}
   615	
   616	static const struct virtio_map_ops vp_dmb_map_ops = {
   617		.map_page = vp_dmb_map_page,
   618		.unmap_page = vp_dmb_unmap_page,
   619		.sync_single_for_cpu = vp_dmb_sync_single_for_cpu,
   620		.sync_single_for_device = vp_dmb_sync_single_for_device,
   621		.alloc = vp_dmb_alloc,
   622		.free = vp_dmb_free,
   623		.need_sync = vp_dmb_need_sync,
   624		.mapping_error = vp_dmb_mapping_error,
   625		.max_mapping_size = vp_dmb_max_mapping_size,
   626	};
   627	
   628	/* virtio config->finalize_features() implementation */
   629	static int vp_finalize_features(struct virtio_device *vdev)
   630	{
   631		struct virtio_pci_device *vp_dev = to_vp_device(vdev);
   632		u64 features = vdev->features;
   633	
   634		/* Give virtio_ring a chance to accept features. */
   635		vring_transport_features(vdev);
   636	
   637		/* Give virtio_pci a chance to accept features. */
   638		vp_transport_features(vdev, features);
   639	
   640		if (!__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) {
   641			dev_err(&vdev->dev, "virtio: device uses modern interface "
   642				"but does not have VIRTIO_F_VERSION_1\n");
   643			return -EINVAL;
   644		}
   645	
   646		if (vp_check_common_size(vdev))
   647			return -EINVAL;
   648	
   649		/* Initialize Device Memory Buffer if advertised by host */
   650		if (__virtio_test_bit(vdev, VIRTIO_F_DMB)) {
   651			struct virtio_shm_region region;
   652	
   653			if (!virtio_get_shm_region(vdev, &region, VIRTIO_SHMEM_ID_DMB)) {
   654				dev_warn(&vdev->dev, "Failed to find DMB region");
   655				return -EINVAL;
   656			}
   657	
   658			vp_dev->dmb_pool = devm_gen_pool_create(&vdev->dev, ilog2(SMP_CACHE_BYTES), -1, "virtio-shmem");
 > 659			vp_dev->dmb_mem = devm_memremap(&vdev->dev, region.addr, region.len, MEMREMAP_WB | MEMREMAP_DEC);
   660			INIT_LIST_HEAD(&vp_dev->dmb_mappings);
   661			spin_lock_init(&vp_dev->dmb_lock);
   662	
   663			if (vp_dev->dmb_pool) {
   664				int ret = gen_pool_add_virt(vp_dev->dmb_pool, (ulong)vp_dev->dmb_mem,
   665							    region.addr, region.len, -1);
   666				if (ret != 0) {
   667					dev_warn(&vdev->dev, "Failed to initialize DMB region");
   668					return -EINVAL;
   669				}
   670	
   671				/* Reserve first cache line to avoid offset 0 allocations */
   672				ulong reserved = gen_pool_alloc(vp_dev->dmb_pool, SMP_CACHE_BYTES);
   673				if (!reserved) {
   674					dev_warn(&vdev->dev, "Failed to reserve shmem offset 0");
   675					return -EINVAL;
   676				}
   677				vdev->map = &vp_dmb_map_ops;
   678				vdev->vmap.dma_dev = &vp_dev->pci_dev->dev;
   679	
   680				dev_info(&vdev->dev, "Enabled %d bytes Virtio DMB region for %s", (int)region.len, dev_name(&vp_dev->pci_dev->dev));
   681			}
   682		}
   683	
   684		vp_modern_set_extended_features(&vp_dev->mdev, vdev->features_array);
   685	
   686		return 0;
   687	}
   688	

-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki

                 reply	other threads:[~2026-01-28 10:02 UTC|newest]

Thread overview: [no followups] expand[flat|nested]  mbox.gz  Atom feed

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=202601281736.6KZZJdNR-lkp@intel.com \
    --to=lkp@intel.com \
    --cc=graf@amazon.com \
    --cc=kvm-ppc@vger.kernel.org \
    --cc=oe-kbuild-all@lists.linux.dev \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox