From: Dan Carpenter <dan.carpenter@linaro.org>
To: Jason Gunthorpe <jgg@ziepe.ca>
Cc: iommu@lists.linux.dev
Subject: [bug report] iommupt: Add map_pages op
Date: Fri, 21 Nov 2025 12:22:02 +0300 [thread overview]
Message-ID: <aSAvOn2fgvmF0WYD@stanley.mountain> (raw)
Hello Jason Gunthorpe,
Commit dcd6a011a8d5 ("iommupt: Add map_pages op") from Nov 4, 2025
(linux-next), leads to the following Smatch static checker warning:
drivers/iommu/generic_pt/fmt/../iommu_pt.h:701 increase_top()
warn: missing unwind goto?
drivers/iommu/generic_pt/fmt/../iommu_pt.h
665 static int increase_top(struct pt_iommu *iommu_table, struct pt_range *range,
666 struct pt_iommu_map_args *map)
667 {
668 struct iommu_pages_list free_list = IOMMU_PAGES_LIST_INIT(free_list);
669 struct pt_common *common = common_from_iommu(iommu_table);
670 uintptr_t top_of_table = READ_ONCE(common->top_of_table);
671 uintptr_t new_top_of_table = top_of_table;
672 struct pt_table_p *table_mem;
673 unsigned int new_level;
674 spinlock_t *domain_lock;
675 unsigned long flags;
676 int ret;
677
678 while (true) {
679 struct pt_range top_range =
680 _pt_top_range(common, new_top_of_table);
681 struct pt_state pts = pt_init_top(&top_range);
682
683 top_range.va = range->va;
684 top_range.last_va = range->last_va;
685
686 if (!pt_check_range(&top_range) && map->leaf_level <= pts.level)
687 break;
688
689 pts.level++;
690 if (pts.level > PT_MAX_TOP_LEVEL ||
691 pt_table_item_lg2sz(&pts) >= common->max_vasz_lg2) {
692 ret = -ERANGE;
693 goto err_free;
694 }
695
696 new_level = pts.level;
697 table_mem =
698 table_alloc_top(common, _pt_top_set(NULL, pts.level),
699 map->attrs.gfp, ALLOC_DEFER_COHERENT_FLUSH);
700 if (IS_ERR(table_mem))
--> 701 return PTR_ERR(table_mem);
goto err_free? Although, why do we need to call
iommu_pages_stop_incoherent_list() when that isn't started
702 iommu_pages_list_add(&free_list, table_mem);
703
704 /* The new table links to the lower table always at index 0 */
705 top_range.va = 0;
706 top_range.top_level = new_level;
707 pts.table_lower = pts.table;
708 pts.table = table_mem;
709 pt_load_single_entry(&pts);
710 PT_WARN_ON(pts.index != 0);
711 pt_install_table(&pts, virt_to_phys(pts.table_lower),
712 &map->attrs);
713 new_top_of_table = _pt_top_set(pts.table, pts.level);
714 }
715
716 /*
717 * Avoid double flushing, flush it once after all pt_install_table()
718 */
719 if (pt_feature(common, PT_FEAT_DMA_INCOHERENT)) {
720 ret = iommu_pages_start_incoherent_list(
721 &free_list, iommu_table->iommu_device);
until here.
722 if (ret)
723 goto err_free;
724 }
725
726 /*
727 * top_of_table is write locked by the spinlock, but readers can use
728 * READ_ONCE() to get the value. Since we encode both the level and the
729 * pointer in one quanta the lockless reader will always see something
730 * valid. The HW must be updated to the new level under the spinlock
731 * before top_of_table is updated so that concurrent readers don't map
732 * into the new level until it is fully functional. If another thread
733 * already updated it while we were working then throw everything away
734 * and try again.
735 */
736 domain_lock = iommu_table->driver_ops->get_top_lock(iommu_table);
737 spin_lock_irqsave(domain_lock, flags);
738 if (common->top_of_table != top_of_table) {
739 spin_unlock_irqrestore(domain_lock, flags);
740 ret = -EAGAIN;
741 goto err_free;
742 }
743
744 /*
745 * We do not issue any flushes for change_top on the expectation that
746 * any walk cache will not become a problem by adding another layer to
747 * the tree. Misses will rewalk from the updated top pointer, hits
748 * continue to be correct. Negative caching is fine too since all the
749 * new IOVA added by the new top is non-present.
750 */
751 iommu_table->driver_ops->change_top(
752 iommu_table, virt_to_phys(table_mem), new_level);
753 WRITE_ONCE(common->top_of_table, new_top_of_table);
754 spin_unlock_irqrestore(domain_lock, flags);
755 return 0;
756
757 err_free:
758 if (pt_feature(common, PT_FEAT_DMA_INCOHERENT))
759 iommu_pages_stop_incoherent_list(&free_list,
760 iommu_table->iommu_device);
761 iommu_put_pages_list(&free_list);
762 return ret;
763 }
regards,
dan carpenter
next reply other threads:[~2025-11-21 9:22 UTC|newest]
Thread overview: 4+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-11-21 9:22 Dan Carpenter [this message]
2025-11-21 12:48 ` [bug report] iommupt: Add map_pages op Jason Gunthorpe
-- strict thread matches above, loose matches on Subject: below --
2025-11-18 7:28 Dan Carpenter
2025-11-20 17:21 ` Jason Gunthorpe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=aSAvOn2fgvmF0WYD@stanley.mountain \
--to=dan.carpenter@linaro.org \
--cc=iommu@lists.linux.dev \
--cc=jgg@ziepe.ca \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).