* [bug report] iommupt: Add map_pages op
@ 2025-11-21 9:22 Dan Carpenter
2025-11-21 12:48 ` Jason Gunthorpe
0 siblings, 1 reply; 4+ messages in thread
From: Dan Carpenter @ 2025-11-21 9:22 UTC (permalink / raw)
To: Jason Gunthorpe; +Cc: iommu
Hello Jason Gunthorpe,
Commit dcd6a011a8d5 ("iommupt: Add map_pages op") from Nov 4, 2025
(linux-next), leads to the following Smatch static checker warning:
drivers/iommu/generic_pt/fmt/../iommu_pt.h:701 increase_top()
warn: missing unwind goto?
drivers/iommu/generic_pt/fmt/../iommu_pt.h
665 static int increase_top(struct pt_iommu *iommu_table, struct pt_range *range,
666 struct pt_iommu_map_args *map)
667 {
668 struct iommu_pages_list free_list = IOMMU_PAGES_LIST_INIT(free_list);
669 struct pt_common *common = common_from_iommu(iommu_table);
670 uintptr_t top_of_table = READ_ONCE(common->top_of_table);
671 uintptr_t new_top_of_table = top_of_table;
672 struct pt_table_p *table_mem;
673 unsigned int new_level;
674 spinlock_t *domain_lock;
675 unsigned long flags;
676 int ret;
677
678 while (true) {
679 struct pt_range top_range =
680 _pt_top_range(common, new_top_of_table);
681 struct pt_state pts = pt_init_top(&top_range);
682
683 top_range.va = range->va;
684 top_range.last_va = range->last_va;
685
686 if (!pt_check_range(&top_range) && map->leaf_level <= pts.level)
687 break;
688
689 pts.level++;
690 if (pts.level > PT_MAX_TOP_LEVEL ||
691 pt_table_item_lg2sz(&pts) >= common->max_vasz_lg2) {
692 ret = -ERANGE;
693 goto err_free;
694 }
695
696 new_level = pts.level;
697 table_mem =
698 table_alloc_top(common, _pt_top_set(NULL, pts.level),
699 map->attrs.gfp, ALLOC_DEFER_COHERENT_FLUSH);
700 if (IS_ERR(table_mem))
--> 701 return PTR_ERR(table_mem);
goto err_free? Although, why do we need to call
iommu_pages_stop_incoherent_list() when that isn't started
702 iommu_pages_list_add(&free_list, table_mem);
703
704 /* The new table links to the lower table always at index 0 */
705 top_range.va = 0;
706 top_range.top_level = new_level;
707 pts.table_lower = pts.table;
708 pts.table = table_mem;
709 pt_load_single_entry(&pts);
710 PT_WARN_ON(pts.index != 0);
711 pt_install_table(&pts, virt_to_phys(pts.table_lower),
712 &map->attrs);
713 new_top_of_table = _pt_top_set(pts.table, pts.level);
714 }
715
716 /*
717 * Avoid double flushing, flush it once after all pt_install_table()
718 */
719 if (pt_feature(common, PT_FEAT_DMA_INCOHERENT)) {
720 ret = iommu_pages_start_incoherent_list(
721 &free_list, iommu_table->iommu_device);
until here.
722 if (ret)
723 goto err_free;
724 }
725
726 /*
727 * top_of_table is write locked by the spinlock, but readers can use
728 * READ_ONCE() to get the value. Since we encode both the level and the
729 * pointer in one quanta the lockless reader will always see something
730 * valid. The HW must be updated to the new level under the spinlock
731 * before top_of_table is updated so that concurrent readers don't map
732 * into the new level until it is fully functional. If another thread
733 * already updated it while we were working then throw everything away
734 * and try again.
735 */
736 domain_lock = iommu_table->driver_ops->get_top_lock(iommu_table);
737 spin_lock_irqsave(domain_lock, flags);
738 if (common->top_of_table != top_of_table) {
739 spin_unlock_irqrestore(domain_lock, flags);
740 ret = -EAGAIN;
741 goto err_free;
742 }
743
744 /*
745 * We do not issue any flushes for change_top on the expectation that
746 * any walk cache will not become a problem by adding another layer to
747 * the tree. Misses will rewalk from the updated top pointer, hits
748 * continue to be correct. Negative caching is fine too since all the
749 * new IOVA added by the new top is non-present.
750 */
751 iommu_table->driver_ops->change_top(
752 iommu_table, virt_to_phys(table_mem), new_level);
753 WRITE_ONCE(common->top_of_table, new_top_of_table);
754 spin_unlock_irqrestore(domain_lock, flags);
755 return 0;
756
757 err_free:
758 if (pt_feature(common, PT_FEAT_DMA_INCOHERENT))
759 iommu_pages_stop_incoherent_list(&free_list,
760 iommu_table->iommu_device);
761 iommu_put_pages_list(&free_list);
762 return ret;
763 }
regards,
dan carpenter
^ permalink raw reply [flat|nested] 4+ messages in thread* Re: [bug report] iommupt: Add map_pages op
2025-11-21 9:22 [bug report] iommupt: Add map_pages op Dan Carpenter
@ 2025-11-21 12:48 ` Jason Gunthorpe
0 siblings, 0 replies; 4+ messages in thread
From: Jason Gunthorpe @ 2025-11-21 12:48 UTC (permalink / raw)
To: Dan Carpenter; +Cc: iommu
On Fri, Nov 21, 2025 at 12:22:02PM +0300, Dan Carpenter wrote:
> Hello Jason Gunthorpe,
>
> Commit dcd6a011a8d5 ("iommupt: Add map_pages op") from Nov 4, 2025
> (linux-next), leads to the following Smatch static checker warning:
>
> drivers/iommu/generic_pt/fmt/../iommu_pt.h:701 increase_top()
> warn: missing unwind goto?
>
> drivers/iommu/generic_pt/fmt/../iommu_pt.h
> 665 static int increase_top(struct pt_iommu *iommu_table, struct pt_range *range,
> 666 struct pt_iommu_map_args *map)
> 667 {
> 668 struct iommu_pages_list free_list = IOMMU_PAGES_LIST_INIT(free_list);
> 669 struct pt_common *common = common_from_iommu(iommu_table);
> 670 uintptr_t top_of_table = READ_ONCE(common->top_of_table);
> 671 uintptr_t new_top_of_table = top_of_table;
> 672 struct pt_table_p *table_mem;
> 673 unsigned int new_level;
> 674 spinlock_t *domain_lock;
> 675 unsigned long flags;
> 676 int ret;
> 677
> 678 while (true) {
> 679 struct pt_range top_range =
> 680 _pt_top_range(common, new_top_of_table);
> 681 struct pt_state pts = pt_init_top(&top_range);
> 682
> 683 top_range.va = range->va;
> 684 top_range.last_va = range->last_va;
> 685
> 686 if (!pt_check_range(&top_range) && map->leaf_level <= pts.level)
> 687 break;
> 688
> 689 pts.level++;
> 690 if (pts.level > PT_MAX_TOP_LEVEL ||
> 691 pt_table_item_lg2sz(&pts) >= common->max_vasz_lg2) {
> 692 ret = -ERANGE;
> 693 goto err_free;
> 694 }
> 695
> 696 new_level = pts.level;
> 697 table_mem =
> 698 table_alloc_top(common, _pt_top_set(NULL, pts.level),
> 699 map->attrs.gfp, ALLOC_DEFER_COHERENT_FLUSH);
> 700 if (IS_ERR(table_mem))
> --> 701 return PTR_ERR(table_mem);
>
> goto err_free?
Yeah, I got this as well from your last email and already sent a patch.
> Although, why do we need to call
> iommu_pages_stop_incoherent_list() when that isn't started
True but also harmless.
Thanks,
Jason
^ permalink raw reply [flat|nested] 4+ messages in thread
* [bug report] iommupt: Add map_pages op
@ 2025-11-18 7:28 Dan Carpenter
2025-11-20 17:21 ` Jason Gunthorpe
0 siblings, 1 reply; 4+ messages in thread
From: Dan Carpenter @ 2025-11-18 7:28 UTC (permalink / raw)
To: Jason Gunthorpe; +Cc: iommu
Hello Jason Gunthorpe,
Commit dcd6a011a8d5 ("iommupt: Add map_pages op") from Nov 4, 2025
(linux-next), leads to the following Smatch static checker warning:
drivers/iommu/generic_pt/fmt/../iommu_pt.h:752 increase_top()
error: uninitialized symbol 'new_level'.
drivers/iommu/generic_pt/fmt/../iommu_pt.h
665 static int increase_top(struct pt_iommu *iommu_table, struct pt_range *range,
666 struct pt_iommu_map_args *map)
667 {
668 struct iommu_pages_list free_list = IOMMU_PAGES_LIST_INIT(free_list);
669 struct pt_common *common = common_from_iommu(iommu_table);
670 uintptr_t top_of_table = READ_ONCE(common->top_of_table);
671 uintptr_t new_top_of_table = top_of_table;
672 struct pt_table_p *table_mem;
673 unsigned int new_level;
674 spinlock_t *domain_lock;
675 unsigned long flags;
676 int ret;
677
678 while (true) {
679 struct pt_range top_range =
680 _pt_top_range(common, new_top_of_table);
681 struct pt_state pts = pt_init_top(&top_range);
682
683 top_range.va = range->va;
684 top_range.last_va = range->last_va;
685
686 if (!pt_check_range(&top_range) && map->leaf_level <= pts.level)
687 break;
Smatch worries that we could break on the first iteration. I don't
know if that's possible.
688
689 pts.level++;
690 if (pts.level > PT_MAX_TOP_LEVEL ||
691 pt_table_item_lg2sz(&pts) >= common->max_vasz_lg2) {
692 ret = -ERANGE;
693 goto err_free;
694 }
695
696 new_level = pts.level;
697 table_mem =
698 table_alloc_top(common, _pt_top_set(NULL, pts.level),
699 map->attrs.gfp, ALLOC_DEFER_COHERENT_FLUSH);
700 if (IS_ERR(table_mem))
701 return PTR_ERR(table_mem);
702 iommu_pages_list_add(&free_list, table_mem);
703
704 /* The new table links to the lower table always at index 0 */
705 top_range.va = 0;
706 top_range.top_level = new_level;
707 pts.table_lower = pts.table;
708 pts.table = table_mem;
709 pt_load_single_entry(&pts);
710 PT_WARN_ON(pts.index != 0);
711 pt_install_table(&pts, virt_to_phys(pts.table_lower),
712 &map->attrs);
713 new_top_of_table = _pt_top_set(pts.table, pts.level);
714 }
715
716 /*
717 * Avoid double flushing, flush it once after all pt_install_table()
718 */
719 if (pt_feature(common, PT_FEAT_DMA_INCOHERENT)) {
720 ret = iommu_pages_start_incoherent_list(
721 &free_list, iommu_table->iommu_device);
722 if (ret)
723 goto err_free;
724 }
725
726 /*
727 * top_of_table is write locked by the spinlock, but readers can use
728 * READ_ONCE() to get the value. Since we encode both the level and the
729 * pointer in one quanta the lockless reader will always see something
730 * valid. The HW must be updated to the new level under the spinlock
731 * before top_of_table is updated so that concurrent readers don't map
732 * into the new level until it is fully functional. If another thread
733 * already updated it while we were working then throw everything away
734 * and try again.
735 */
736 domain_lock = iommu_table->driver_ops->get_top_lock(iommu_table);
737 spin_lock_irqsave(domain_lock, flags);
738 if (common->top_of_table != top_of_table) {
739 spin_unlock_irqrestore(domain_lock, flags);
740 ret = -EAGAIN;
741 goto err_free;
742 }
743
744 /*
745 * We do not issue any flushes for change_top on the expectation that
746 * any walk cache will not become a problem by adding another layer to
747 * the tree. Misses will rewalk from the updated top pointer, hits
748 * continue to be correct. Negative caching is fine too since all the
749 * new IOVA added by the new top is non-present.
750 */
751 iommu_table->driver_ops->change_top(
--> 752 iommu_table, virt_to_phys(table_mem), new_level);
^^^^^^^^^
753 WRITE_ONCE(common->top_of_table, new_top_of_table);
754 spin_unlock_irqrestore(domain_lock, flags);
755 return 0;
756
757 err_free:
758 if (pt_feature(common, PT_FEAT_DMA_INCOHERENT))
759 iommu_pages_stop_incoherent_list(&free_list,
760 iommu_table->iommu_device);
761 iommu_put_pages_list(&free_list);
762 return ret;
763 }
regards,
dan carpenter
^ permalink raw reply [flat|nested] 4+ messages in thread* Re: [bug report] iommupt: Add map_pages op
2025-11-18 7:28 Dan Carpenter
@ 2025-11-20 17:21 ` Jason Gunthorpe
0 siblings, 0 replies; 4+ messages in thread
From: Jason Gunthorpe @ 2025-11-20 17:21 UTC (permalink / raw)
To: Dan Carpenter; +Cc: iommu
On Tue, Nov 18, 2025 at 10:28:53AM +0300, Dan Carpenter wrote:
> Hello Jason Gunthorpe,
>
> Commit dcd6a011a8d5 ("iommupt: Add map_pages op") from Nov 4, 2025
> (linux-next), leads to the following Smatch static checker warning:
>
> drivers/iommu/generic_pt/fmt/../iommu_pt.h:752 increase_top()
> error: uninitialized symbol 'new_level'.
>
> drivers/iommu/generic_pt/fmt/../iommu_pt.h
> 665 static int increase_top(struct pt_iommu *iommu_table, struct pt_range *range,
> 666 struct pt_iommu_map_args *map)
> 667 {
> 668 struct iommu_pages_list free_list = IOMMU_PAGES_LIST_INIT(free_list);
> 669 struct pt_common *common = common_from_iommu(iommu_table);
> 670 uintptr_t top_of_table = READ_ONCE(common->top_of_table);
> 671 uintptr_t new_top_of_table = top_of_table;
> 672 struct pt_table_p *table_mem;
> 673 unsigned int new_level;
> 674 spinlock_t *domain_lock;
> 675 unsigned long flags;
> 676 int ret;
> 677
> 678 while (true) {
> 679 struct pt_range top_range =
> 680 _pt_top_range(common, new_top_of_table);
> 681 struct pt_state pts = pt_init_top(&top_range);
> 682
> 683 top_range.va = range->va;
> 684 top_range.last_va = range->last_va;
> 685
> 686 if (!pt_check_range(&top_range) && map->leaf_level <= pts.level)
> 687 break;
>
> Smatch worries that we could break on the first iteration. I don't
> know if that's possible.
Hmm, I wanted to say it can't because of the caller, but the locking
doesn't work like that, so yeah this is a bug. I will make a patch
Jason
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2025-11-21 12:48 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-11-21 9:22 [bug report] iommupt: Add map_pages op Dan Carpenter
2025-11-21 12:48 ` Jason Gunthorpe
-- strict thread matches above, loose matches on Subject: below --
2025-11-18 7:28 Dan Carpenter
2025-11-20 17:21 ` Jason Gunthorpe
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).