From: David Gibson <david@gibson.dropbear.id.au>
To: Alexey Kardashevskiy <aik@ozlabs.ru>
Cc: linuxppc-dev@lists.ozlabs.org, kvm-ppc@vger.kernel.org,
"Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com>,
Paul Mackerras <paulus@ozlabs.org>,
Alex Williamson <alex.williamson@redhat.com>
Subject: Re: [PATCH kernel v2 6/6] KVM: PPC: Remove redundand permission bits removal
Date: Tue, 11 Sep 2018 13:15:45 +1000 [thread overview]
Message-ID: <20180911031545.GJ7978@umbus.fritz.box> (raw)
In-Reply-To: <20180910082912.13255-7-aik@ozlabs.ru>
[-- Attachment #1: Type: text/plain, Size: 6853 bytes --]
On Mon, Sep 10, 2018 at 06:29:12PM +1000, Alexey Kardashevskiy wrote:
> The kvmppc_gpa_to_ua() helper itself takes care of the permission
> bits in the TCE and yet every single caller removes them.
>
> This changes semantics of kvmppc_gpa_to_ua() so it takes TCEs
> (which are GPAs + TCE permission bits) to make the callers simpler.
>
> This should cause no behavioural change.
>
> Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
> ---
> Changes:
> v2:
> * %s/kvmppc_gpa_to_ua/kvmppc_tce_to_ua/g
> ---
> arch/powerpc/include/asm/kvm_ppc.h | 2 +-
> arch/powerpc/kvm/book3s_64_vio.c | 12 ++++--------
> arch/powerpc/kvm/book3s_64_vio_hv.c | 22 +++++++++-------------
> 3 files changed, 14 insertions(+), 22 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
> index 2f5d431..38d0328 100644
> --- a/arch/powerpc/include/asm/kvm_ppc.h
> +++ b/arch/powerpc/include/asm/kvm_ppc.h
> @@ -194,7 +194,7 @@ extern struct kvmppc_spapr_tce_table *kvmppc_find_table(
> (iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \
> (stt)->size, (ioba), (npages)) ? \
> H_PARAMETER : H_SUCCESS)
> -extern long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
> +extern long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
> unsigned long *ua, unsigned long **prmap);
> extern void kvmppc_tce_put(struct kvmppc_spapr_tce_table *tt,
> unsigned long idx, unsigned long tce);
> diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
> index 8231b17..c0c64d1 100644
> --- a/arch/powerpc/kvm/book3s_64_vio.c
> +++ b/arch/powerpc/kvm/book3s_64_vio.c
> @@ -378,8 +378,7 @@ static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt,
> if (iommu_tce_check_gpa(stt->page_shift, gpa))
> return H_TOO_HARD;
>
> - if (kvmppc_gpa_to_ua(stt->kvm, tce & ~(TCE_PCI_READ | TCE_PCI_WRITE),
> - &ua, NULL))
> + if (kvmppc_tce_to_ua(stt->kvm, tce, &ua, NULL))
> return H_TOO_HARD;
>
> list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
> @@ -552,8 +551,7 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
>
> idx = srcu_read_lock(&vcpu->kvm->srcu);
>
> - if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm,
> - tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL)) {
> + if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) {
> ret = H_PARAMETER;
> goto unlock_exit;
> }
> @@ -614,7 +612,7 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
> return ret;
>
> idx = srcu_read_lock(&vcpu->kvm->srcu);
> - if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, NULL)) {
> + if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL)) {
> ret = H_TOO_HARD;
> goto unlock_exit;
> }
> @@ -649,9 +647,7 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
> }
> tce = be64_to_cpu(tce);
>
> - if (kvmppc_gpa_to_ua(vcpu->kvm,
> - tce & ~(TCE_PCI_READ | TCE_PCI_WRITE),
> - &ua, NULL))
> + if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
> return H_PARAMETER;
>
> list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
> diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
> index adf3b21..389dac1 100644
> --- a/arch/powerpc/kvm/book3s_64_vio_hv.c
> +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
> @@ -110,8 +110,7 @@ static long kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table *stt,
> if (iommu_tce_check_gpa(stt->page_shift, gpa))
> return H_PARAMETER;
>
> - if (kvmppc_gpa_to_ua(stt->kvm, tce & ~(TCE_PCI_READ | TCE_PCI_WRITE),
> - &ua, NULL))
> + if (kvmppc_tce_to_ua(stt->kvm, tce, &ua, NULL))
> return H_TOO_HARD;
>
> list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
> @@ -180,10 +179,10 @@ void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
> }
> EXPORT_SYMBOL_GPL(kvmppc_tce_put);
>
> -long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
> +long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
> unsigned long *ua, unsigned long **prmap)
> {
> - unsigned long gfn = gpa >> PAGE_SHIFT;
> + unsigned long gfn = tce >> PAGE_SHIFT;
> struct kvm_memory_slot *memslot;
>
> memslot = search_memslots(kvm_memslots(kvm), gfn);
> @@ -191,7 +190,7 @@ long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
> return -EINVAL;
>
> *ua = __gfn_to_hva_memslot(memslot, gfn) |
> - (gpa & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
> + (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
>
> #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
> if (prmap)
> @@ -200,7 +199,7 @@ long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
>
> return 0;
> }
> -EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua);
> +EXPORT_SYMBOL_GPL(kvmppc_tce_to_ua);
>
> #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
> static long iommu_tce_xchg_rm(struct mm_struct *mm, struct iommu_table *tbl,
> @@ -389,8 +388,7 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
> return ret;
>
> dir = iommu_tce_direction(tce);
> - if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm,
> - tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL))
> + if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
> return H_PARAMETER;
>
> entry = ioba >> stt->page_shift;
> @@ -492,7 +490,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
> */
> struct mm_iommu_table_group_mem_t *mem;
>
> - if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, NULL))
> + if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL))
> return H_TOO_HARD;
>
> mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
> @@ -508,7 +506,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
> * We do not require memory to be preregistered in this case
> * so lock rmap and do __find_linux_pte_or_hugepte().
> */
> - if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
> + if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
> return H_TOO_HARD;
>
> rmap = (void *) vmalloc_to_phys(rmap);
> @@ -542,9 +540,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
> unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
>
> ua = 0;
> - if (kvmppc_gpa_to_ua(vcpu->kvm,
> - tce & ~(TCE_PCI_READ | TCE_PCI_WRITE),
> - &ua, NULL))
> + if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
> return H_PARAMETER;
>
> list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
--
David Gibson | I'll have my music baroque, and my code
david AT gibson.dropbear.id.au | minimalist, thank you. NOT _the_ _other_
| _way_ _around_!
http://www.ozlabs.org/~dgibson
[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 833 bytes --]
prev parent reply other threads:[~2018-09-11 3:16 UTC|newest]
Thread overview: 9+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-09-10 8:29 [PATCH kernel v2 0/6] KVM: PPC: TCE improvements Alexey Kardashevskiy
2018-09-10 8:29 ` [PATCH kernel v2 1/6] KVM: PPC: Avoid marking DMA-mapped pages dirty in real mode Alexey Kardashevskiy
2018-09-11 3:13 ` David Gibson
2018-09-10 8:29 ` [PATCH kernel v2 2/6] KVM: PPC: Validate all tces before updating tables Alexey Kardashevskiy
2018-09-10 8:29 ` [PATCH kernel v2 3/6] KVM: PPC: Inform the userspace about TCE update failures Alexey Kardashevskiy
2018-09-10 8:29 ` [PATCH kernel v2 4/6] KVM: PPC: Validate TCEs against preregistered memory page sizes Alexey Kardashevskiy
2018-09-10 8:29 ` [PATCH kernel v2 5/6] KVM: PPC: Propagate errors to the guest when failed instead of ignoring Alexey Kardashevskiy
2018-09-10 8:29 ` [PATCH kernel v2 6/6] KVM: PPC: Remove redundand permission bits removal Alexey Kardashevskiy
2018-09-11 3:15 ` David Gibson [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20180911031545.GJ7978@umbus.fritz.box \
--to=david@gibson.dropbear.id.au \
--cc=aik@ozlabs.ru \
--cc=alex.williamson@redhat.com \
--cc=aneesh.kumar@linux.ibm.com \
--cc=kvm-ppc@vger.kernel.org \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=paulus@ozlabs.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).