From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.lore.kernel.org (Postfix) with ESMTPS id 7FFD1C2BA18 for ; Wed, 12 Jun 2024 02:15:51 +0000 (UTC) Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id 0AACD10E776; Wed, 12 Jun 2024 02:15:51 +0000 (UTC) Authentication-Results: gabe.freedesktop.org; dkim=pass (2048-bit key; unprotected) header.d=intel.com header.i=@intel.com header.b="XFLFp9Ic"; dkim-atps=neutral Received: from mgamail.intel.com (mgamail.intel.com [198.175.65.9]) by gabe.freedesktop.org (Postfix) with ESMTPS id 76B6B10E76C for ; Wed, 12 Jun 2024 02:15:38 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1718158539; x=1749694539; h=from:to:subject:date:message-id:in-reply-to:references: mime-version:content-transfer-encoding; bh=AXT7CM30nC4cTZ9KF+5ZpQuVJnFCt5crLQ5CX1WSUAg=; b=XFLFp9IcLvR2azIZmzlVSN1XW8CkRyRVb3XdPL8Ay7TQVV6ovn9mAFwX v4IQknopxey3TW6rlKPwrQv68q+bcsHTmdLid8jqZQVnajFTX/iqYDb5i s5dtEAuj3iuousrCLBEtyRCcX3k1kQ7nTevHic2QsBscWTLMxsyiak8g8 2NHOIkbwXISo1NKOy/7SUsemV5RslSP7dtEtZ9mhp+81LDy4n1o4Y03PE 67cPDdAwSOkWpR1CQvnIkYqm1u5RwzwHeqsES3GBcHEhmMsvoTvQqUodR uMWcQcNG7vE5hwwPwZlDxJHWGPe3BXFprbC4EhKJ83dOb1rIDBHnaFtsn w==; X-CSE-ConnectionGUID: bwLM/NHGS3qieYMMV1lgZg== X-CSE-MsgGUID: T8LGqugMQUSm/dtg14XeWg== X-IronPort-AV: E=McAfee;i="6600,9927,11100"; a="37427796" X-IronPort-AV: E=Sophos;i="6.08,231,1712646000"; d="scan'208";a="37427796" Received: from orviesa004.jf.intel.com ([10.64.159.144]) by orvoesa101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 11 Jun 2024 19:15:27 -0700 X-CSE-ConnectionGUID: XfDtUUGsQL+vK18SmjDQiA== X-CSE-MsgGUID: a9HyNgoOQSO5/EDzK7npwg== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="6.08,231,1712646000"; d="scan'208";a="44763619" Received: from szeng-desk.jf.intel.com ([10.165.21.149]) by orviesa004-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 11 Jun 2024 19:15:26 -0700 From: Oak Zeng To: intel-xe@lists.freedesktop.org Subject: [CI 04/43] iommu/dma: Provide an interface to allow preallocate IOVA Date: Tue, 11 Jun 2024 22:25:26 -0400 Message-Id: <20240612022605.385062-4-oak.zeng@intel.com> X-Mailer: git-send-email 2.26.3 In-Reply-To: <20240612022605.385062-1-oak.zeng@intel.com> References: <20240612022605.385062-1-oak.zeng@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-BeenThere: intel-xe@lists.freedesktop.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: Intel Xe graphics driver List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: intel-xe-bounces@lists.freedesktop.org Sender: "Intel-xe" From: Leon Romanovsky Separate IOVA allocation to dedicated callback so it will allow cache of IOVA and reuse it in fast paths for devices which support ODP (on-demand-paging) mechanism. Signed-off-by: Leon Romanovsky --- drivers/iommu/dma-iommu.c | 50 +++++++++++++++++++++++++++++---------- 1 file changed, 38 insertions(+), 12 deletions(-) diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 43520e7275cc..9ce8298047f5 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -357,7 +357,7 @@ int iommu_dma_init_fq(struct iommu_domain *domain) atomic_set(&cookie->fq_timer_on, 0); /* * Prevent incomplete fq state being observable. Pairs with path from - * __iommu_dma_unmap() through iommu_dma_free_iova() to queue_iova() + * __iommu_dma_unmap() through __iommu_dma_free_iova() to queue_iova() */ smp_wmb(); WRITE_ONCE(cookie->fq_domain, domain); @@ -758,7 +758,7 @@ static int dma_info_to_prot(enum dma_data_direction dir, bool coherent, } } -static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, +static dma_addr_t __iommu_dma_alloc_iova(struct iommu_domain *domain, size_t size, u64 dma_limit, struct device *dev) { struct iommu_dma_cookie *cookie = domain->iova_cookie; @@ -804,7 +804,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, return (dma_addr_t)iova << shift; } -static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie, +static void __iommu_dma_free_iova(struct iommu_dma_cookie *cookie, dma_addr_t iova, size_t size, struct iommu_iotlb_gather *gather) { struct iova_domain *iovad = &cookie->iovad; @@ -841,7 +841,7 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr, if (!iotlb_gather.queued) iommu_iotlb_sync(domain, &iotlb_gather); - iommu_dma_free_iova(cookie, dma_addr, size, &iotlb_gather); + __iommu_dma_free_iova(cookie, dma_addr, size, &iotlb_gather); } static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, @@ -864,12 +864,12 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, size = iova_align(iovad, size + iova_off); - iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev); + iova = __iommu_dma_alloc_iova(domain, size, dma_mask, dev); if (!iova) return DMA_MAPPING_ERROR; if (iommu_map(domain, iova, phys - iova_off, size, prot, GFP_ATOMIC)) { - iommu_dma_free_iova(cookie, iova, size, NULL); + __iommu_dma_free_iova(cookie, iova, size, NULL); return DMA_MAPPING_ERROR; } return iova + iova_off; @@ -973,7 +973,7 @@ static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev, return NULL; size = iova_align(iovad, size); - iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev); + iova = __iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev); if (!iova) goto out_free_pages; @@ -1007,7 +1007,7 @@ static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev, out_free_sg: sg_free_table(sgt); out_free_iova: - iommu_dma_free_iova(cookie, iova, size, NULL); + __iommu_dma_free_iova(cookie, iova, size, NULL); out_free_pages: __iommu_dma_free_pages(pages, count); return NULL; @@ -1442,7 +1442,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, if (!iova_len) return __finalise_sg(dev, sg, nents, 0); - iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev); + iova = __iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev); if (!iova) { ret = -ENOMEM; goto out_restore_sg; @@ -1459,7 +1459,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, return __finalise_sg(dev, sg, nents, iova); out_free_iova: - iommu_dma_free_iova(cookie, iova, iova_len, NULL); + __iommu_dma_free_iova(cookie, iova, iova_len, NULL); out_restore_sg: __invalidate_sg(sg, nents); out: @@ -1720,6 +1720,30 @@ static size_t iommu_dma_max_mapping_size(struct device *dev) return SIZE_MAX; } +static dma_addr_t iommu_dma_alloc_iova(struct device *dev, size_t size) +{ + struct iommu_domain *domain = iommu_get_dma_domain(dev); + struct iommu_dma_cookie *cookie = domain->iova_cookie; + struct iova_domain *iovad = &cookie->iovad; + dma_addr_t dma_mask = dma_get_mask(dev); + + size = iova_align(iovad, size); + return __iommu_dma_alloc_iova(domain, size, dma_mask, dev); +} + +static void iommu_dma_free_iova(struct device *dev, dma_addr_t iova, + size_t size) +{ + struct iommu_domain *domain = iommu_get_dma_domain(dev); + struct iommu_dma_cookie *cookie = domain->iova_cookie; + struct iova_domain *iovad = &cookie->iovad; + struct iommu_iotlb_gather iotlb_gather; + + size = iova_align(iovad, size); + iommu_iotlb_gather_init(&iotlb_gather); + __iommu_dma_free_iova(cookie, iova, size, &iotlb_gather); +} + static const struct dma_map_ops iommu_dma_ops = { .flags = DMA_F_PCI_P2PDMA_SUPPORTED | DMA_F_CAN_SKIP_SYNC, @@ -1744,6 +1768,8 @@ static const struct dma_map_ops iommu_dma_ops = { .get_merge_boundary = iommu_dma_get_merge_boundary, .opt_mapping_size = iommu_dma_opt_mapping_size, .max_mapping_size = iommu_dma_max_mapping_size, + .alloc_iova = iommu_dma_alloc_iova, + .free_iova = iommu_dma_free_iova, }; void iommu_setup_dma_ops(struct device *dev) @@ -1786,7 +1812,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, if (!msi_page) return NULL; - iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); + iova = __iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); if (!iova) goto out_free_page; @@ -1800,7 +1826,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, return msi_page; out_free_iova: - iommu_dma_free_iova(cookie, iova, size, NULL); + __iommu_dma_free_iova(cookie, iova, size, NULL); out_free_page: kfree(msi_page); return NULL; -- 2.26.3