From mboxrd@z Thu Jan 1 00:00:00 1970 From: Tom Murphy via iommu Subject: [PATCH v3 3/4] iommu/dma-iommu: Use the dev->coherent_dma_mask Date: Mon, 6 May 2019 19:52:05 +0100 Message-ID: <20190506185207.31069-4-tmurphy@arista.com> References: <20190506185207.31069-1-tmurphy@arista.com> Reply-To: Tom Murphy Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Return-path: In-Reply-To: <20190506185207.31069-1-tmurphy-nzgTgzXrdUbQT0dZR+AlfA@public.gmane.org> List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Sender: iommu-bounces-cunTk1MwBs9QetFLy7KEm3xJsTq8ys+cHZ5vskTnxNA@public.gmane.org Errors-To: iommu-bounces-cunTk1MwBs9QetFLy7KEm3xJsTq8ys+cHZ5vskTnxNA@public.gmane.org To: iommu-cunTk1MwBs9QetFLy7KEm3xJsTq8ys+cHZ5vskTnxNA@public.gmane.org Cc: Heiko Stuebner , Will Deacon , David Brown , Thierry Reding , linux-s390-u79uwXL29TY76Z2rM5mHXA@public.gmane.org, linux-samsung-soc-u79uwXL29TY76Z2rM5mHXA@public.gmane.org, Krzysztof Kozlowski , Jonathan Hunter , linux-rockchip-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r@public.gmane.org, Kukjin Kim , Gerald Schaefer , Andy Gross , linux-tegra-u79uwXL29TY76Z2rM5mHXA@public.gmane.org, linux-arm-msm-u79uwXL29TY76Z2rM5mHXA@public.gmane.org, linux-mediatek-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r@public.gmane.org, Matthias Brugger , linux-arm-kernel-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r@public.gmane.org, Tom Murphy , Robin Murphy , linux-kernel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org, murphyt7-/Zw1syy4LHg@public.gmane.org, David Woodhouse List-Id: linux-arm-msm@vger.kernel.org Use the dev->coherent_dma_mask when allocating in the dma-iommu ops api. Signed-off-by: Tom Murphy --- drivers/iommu/dma-iommu.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index b383498e2dc3..2a968afdab10 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -442,7 +442,8 @@ static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr, } static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, - size_t size, int prot, struct iommu_domain *domain) + size_t size, int prot, struct iommu_domain *domain, + dma_addr_t dma_mask) { struct iommu_dma_cookie *cookie = domain->iova_cookie; size_t iova_off = 0; @@ -453,7 +454,7 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, size = iova_align(&cookie->iovad, size + iova_off); } - iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); + iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev); if (!iova) return DMA_MAPPING_ERROR; @@ -496,7 +497,7 @@ static void *iommu_dma_alloc_contiguous(struct device *dev, size_t size, return NULL; *dma_handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot, - iommu_get_dma_domain(dev)); + iommu_get_dma_domain(dev), dev->coherent_dma_mask); if (*dma_handle == DMA_MAPPING_ERROR) { if (!dma_release_from_contiguous(dev, page, count)) __free_pages(page, page_order); @@ -766,7 +767,7 @@ static void *iommu_dma_alloc_pool(struct device *dev, size_t size, *dma_handle = __iommu_dma_map(dev, page_to_phys(page), size, dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs), - iommu_get_domain_for_dev(dev)); + iommu_get_domain_for_dev(dev), dev->coherent_dma_mask); if (*dma_handle == DMA_MAPPING_ERROR) { dma_free_from_pool(vaddr, PAGE_ALIGN(size)); return NULL; @@ -857,7 +858,7 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, dma_handle = __iommu_dma_map(dev, phys, size, dma_info_to_prot(dir, coherent, attrs), - iommu_get_dma_domain(dev)); + iommu_get_dma_domain(dev), dma_get_mask(dev)); if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && dma_handle != DMA_MAPPING_ERROR) arch_sync_dma_for_device(dev, phys, size, dir); @@ -1067,7 +1068,7 @@ static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, { return __iommu_dma_map(dev, phys, size, dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO, - iommu_get_dma_domain(dev)); + iommu_get_dma_domain(dev), dma_get_mask(dev)); } static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, @@ -1246,7 +1247,8 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, if (!msi_page) return NULL; - iova = __iommu_dma_map(dev, msi_addr, size, prot, domain); + iova = __iommu_dma_map(dev, msi_addr, size, prot, domain, + dma_get_mask(dev)); if (iova == DMA_MAPPING_ERROR) goto out_free_page; -- 2.17.1 From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-9.0 required=3.0 tests=DKIM_SIGNED,DKIM_VALID, DKIM_VALID_AU,HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_PATCH,MAILING_LIST_MULTI, SIGNED_OFF_BY,SPF_PASS,T_DKIMWL_WL_HIGH,URIBL_BLOCKED,USER_AGENT_GIT autolearn=unavailable autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 946CBC04A6B for ; Mon, 6 May 2019 18:53:24 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 62A552087F for ; Mon, 6 May 2019 18:53:24 +0000 (UTC) Authentication-Results: mail.kernel.org; dkim=pass (2048-bit key) header.d=arista.com header.i=@arista.com header.b="TXXO0sUa" Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726435AbfEFSxX (ORCPT ); Mon, 6 May 2019 14:53:23 -0400 Received: from mail-ed1-f67.google.com ([209.85.208.67]:34431 "EHLO mail-ed1-f67.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1726845AbfEFSxF (ORCPT ); Mon, 6 May 2019 14:53:05 -0400 Received: by mail-ed1-f67.google.com with SMTP id p27so1409971eda.1 for ; Mon, 06 May 2019 11:53:04 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=arista.com; s=googlenew; h=from:to:cc:subject:date:message-id:in-reply-to:references; bh=/hTYvyPXHYQkQCzQru2covdVf7EDKqdAg5C6++p9hp0=; b=TXXO0sUa5j2S+jQ2V/DuEJrsKRb3HDirT68HeTl/j87oqJltVn4/AzBIpIAnNJ/FTH BXD7hqCTZ8mP2/lkvt1YSXSIXRnKzl+SJT6TihX5StIqNa/rsvmRT+SjQwhDfAJ6UJET MZ/njB9sHEoNIOd472p2eie0qVRix++XVw2puekuSuQtcs+wwcmPBTEbt7Pxw00WiIiH oJTpiQqm5ABDoXPS9p4VISTAAHXo9UWBrxWMGYQV4yMOP0ulAbVr0v4NsXjkGngspKYl WpzS32AYszwzwhILjzd9G0P62O4tCUfchiMuwAKSQ28IXVLOhuSTSSGSUosTjNhFTnnK vs1A== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=/hTYvyPXHYQkQCzQru2covdVf7EDKqdAg5C6++p9hp0=; b=UrDswMSN3+bsr/QI1SVP17YWqLRLX7eHqvMl6DxCAWj67fEXrvhrl2yLwcS/d4sFOx 6JhaaejvL6CH11BNNIhbk8JCaNX16fxcbAC3a/8CogNi4r9Lynmbka2t+06Wc0thKNbN gcrn+rXgykkQGLqzLY4DUDTSF1PH+gmFLJwh6BX5XVAKq56x+cJrt4rh12CDYmFI/MKN 9WK0r21sdspWbpjop3TAZOZl7XwHthaEVkjGMw7iAchRPkIGyKkP6Sl1GFjNHpm/MTSk RVGLRy4unMZ1QAYCitx83r/6Y3TxFZGfCggvchBVOCr9SPD3nNCDmwjdqGhnuYWuJ+xd dqXg== X-Gm-Message-State: APjAAAXs+3WuZ4lxN+17XR05Oi1JUGiMpSFSlt9H7lPfJsZykvH2Nf4D Mz5LzxHFJOpF84aysjxOV/6J4g== X-Google-Smtp-Source: APXvYqx6gw3vWSp/t+3S0SAtNJ94zSpQdEJDOcQR4soMcEGxTBfWu3kSLkCmkGga0x9j1A01hmIVvQ== X-Received: by 2002:a17:906:e241:: with SMTP id gq1mr20832987ejb.5.1557168784042; Mon, 06 May 2019 11:53:04 -0700 (PDT) Received: from localhost.localdomain ([2a02:8084:a0:bc00:d9d9:922a:828d:a50e]) by smtp.gmail.com with ESMTPSA id e35sm3361412eda.2.2019.05.06.11.53.02 (version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Mon, 06 May 2019 11:53:03 -0700 (PDT) From: Tom Murphy To: iommu@lists.linux-foundation.org Cc: murphyt7@tcd.ie, Tom Murphy , Joerg Roedel , Will Deacon , Robin Murphy , Marek Szyprowski , Kukjin Kim , Krzysztof Kozlowski , David Woodhouse , Andy Gross , David Brown , Matthias Brugger , Rob Clark , Heiko Stuebner , Gerald Schaefer , Thierry Reding , Jonathan Hunter , linux-kernel@vger.kernel.org, linux-arm-kernel@lists.infradead.org, linux-samsung-soc@vger.kernel.org, linux-arm-msm@vger.kernel.org, linux-mediatek@lists.infradead.org, linux-rockchip@lists.infradead.org, linux-s390@vger.kernel.org, linux-tegra@vger.kernel.org Subject: [PATCH v3 3/4] iommu/dma-iommu: Use the dev->coherent_dma_mask Date: Mon, 6 May 2019 19:52:05 +0100 Message-Id: <20190506185207.31069-4-tmurphy@arista.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20190506185207.31069-1-tmurphy@arista.com> References: <20190506185207.31069-1-tmurphy@arista.com> Sender: linux-arm-msm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-arm-msm@vger.kernel.org Content-Type: text/plain; charset="UTF-8" Message-ID: <20190506185205.04HNMOuS5YTaQRn8OOaSDJDoH_b4Ew9NcevjJwiFFrI@z> Use the dev->coherent_dma_mask when allocating in the dma-iommu ops api. Signed-off-by: Tom Murphy --- drivers/iommu/dma-iommu.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index b383498e2dc3..2a968afdab10 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -442,7 +442,8 @@ static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr, } static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, - size_t size, int prot, struct iommu_domain *domain) + size_t size, int prot, struct iommu_domain *domain, + dma_addr_t dma_mask) { struct iommu_dma_cookie *cookie = domain->iova_cookie; size_t iova_off = 0; @@ -453,7 +454,7 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, size = iova_align(&cookie->iovad, size + iova_off); } - iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); + iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev); if (!iova) return DMA_MAPPING_ERROR; @@ -496,7 +497,7 @@ static void *iommu_dma_alloc_contiguous(struct device *dev, size_t size, return NULL; *dma_handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot, - iommu_get_dma_domain(dev)); + iommu_get_dma_domain(dev), dev->coherent_dma_mask); if (*dma_handle == DMA_MAPPING_ERROR) { if (!dma_release_from_contiguous(dev, page, count)) __free_pages(page, page_order); @@ -766,7 +767,7 @@ static void *iommu_dma_alloc_pool(struct device *dev, size_t size, *dma_handle = __iommu_dma_map(dev, page_to_phys(page), size, dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs), - iommu_get_domain_for_dev(dev)); + iommu_get_domain_for_dev(dev), dev->coherent_dma_mask); if (*dma_handle == DMA_MAPPING_ERROR) { dma_free_from_pool(vaddr, PAGE_ALIGN(size)); return NULL; @@ -857,7 +858,7 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, dma_handle = __iommu_dma_map(dev, phys, size, dma_info_to_prot(dir, coherent, attrs), - iommu_get_dma_domain(dev)); + iommu_get_dma_domain(dev), dma_get_mask(dev)); if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && dma_handle != DMA_MAPPING_ERROR) arch_sync_dma_for_device(dev, phys, size, dir); @@ -1067,7 +1068,7 @@ static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, { return __iommu_dma_map(dev, phys, size, dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO, - iommu_get_dma_domain(dev)); + iommu_get_dma_domain(dev), dma_get_mask(dev)); } static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, @@ -1246,7 +1247,8 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, if (!msi_page) return NULL; - iova = __iommu_dma_map(dev, msi_addr, size, prot, domain); + iova = __iommu_dma_map(dev, msi_addr, size, prot, domain, + dma_get_mask(dev)); if (iova == DMA_MAPPING_ERROR) goto out_free_page; -- 2.17.1