From: Alexey Kardashevskiy <aik@ozlabs.ru>
To: Alexey Kardashevskiy <aik@ozlabs.ru>
Cc: Alexander Graf <agraf@suse.de>,
qemu-devel@nongnu.org, qemu-ppc@nongnu.org,
Paolo Bonzini <pbonzini@redhat.com>,
Paul Mackerras <paulus@samba.org>,
David Gibson <david@gibson.dropbear.id.au>
Subject: Re: [Qemu-devel] [PATCH] powerpc iommu: enable multiple TCE requests
Date: Fri, 16 Aug 2013 19:49:41 +1000 [thread overview]
Message-ID: <520DF5B5.4050805@ozlabs.ru> (raw)
In-Reply-To: <1375862885-12132-1-git-send-email-aik@ozlabs.ru>
On 08/07/2013 06:08 PM, Alexey Kardashevskiy wrote:
> Currently only single TCE entry per requiest is supported (H_PUT_TCE).
> However PAPR+ specification allows multiple entry requests such as
> H_PUT_TCE_INDIRECT and H_STUFFF_TCE. Having less transitions to the host
> kernel via ioctls, support of these calls can accelerate IOMMU operations.
>
> This also removes some leftovers in debug output of the H_PUT_TCE handler.
>
> Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
> Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
> ---
>
> This patch requires a KVM_CAP_SPAPR_MULTITCE capability from kernel headers
> which is not there yet.
> However it still would be nice to have "Reviewed-by" from someone when
> the capability will make it to the upstream. Thanks.
Ping, anyone?
Is it good as it is (but I am sure it is not)?
Thank you all.
>
> ---
> hw/ppc/spapr.c | 16 ++++++++++--
> hw/ppc/spapr_iommu.c | 74 +++++++++++++++++++++++++++++++++++++++++++++++++---
> target-ppc/kvm.c | 7 +++++
> target-ppc/kvm_ppc.h | 7 +++++
> 4 files changed, 99 insertions(+), 5 deletions(-)
>
> diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
> index 9494915..a6b1f54 100644
> --- a/hw/ppc/spapr.c
> +++ b/hw/ppc/spapr.c
> @@ -301,6 +301,8 @@ static void *spapr_create_fdt_skel(const char *cpu_model,
> CPUState *cs;
> uint32_t start_prop = cpu_to_be32(initrd_base);
> uint32_t end_prop = cpu_to_be32(initrd_base + initrd_size);
> + char hypertas_propm[] = "hcall-pft\0hcall-term\0hcall-dabr\0hcall-interrupt"
> + "\0hcall-tce\0hcall-vio\0hcall-splpar\0hcall-bulk\0hcall-multi-tce";
> char hypertas_prop[] = "hcall-pft\0hcall-term\0hcall-dabr\0hcall-interrupt"
> "\0hcall-tce\0hcall-vio\0hcall-splpar\0hcall-bulk";
> char qemu_hypertas_prop[] = "hcall-memop1";
> @@ -480,8 +482,18 @@ static void *spapr_create_fdt_skel(const char *cpu_model,
> /* RTAS */
> _FDT((fdt_begin_node(fdt, "rtas")));
>
> - _FDT((fdt_property(fdt, "ibm,hypertas-functions", hypertas_prop,
> - sizeof(hypertas_prop))));
> + /* In TCG mode, the multitce functions, which we implement are a
> + * win. With KVM, we could fall back to the qemu implementation
> + * when KVM doesn't support them, but that would be much slower
> + * than just using the KVM implementations of the single TCE
> + * hypercalls. */
> + if (kvmppc_spapr_use_multitce()) {
> + _FDT((fdt_property(fdt, "ibm,hypertas-functions", hypertas_propm,
> + sizeof(hypertas_propm))));
> + } else {
> + _FDT((fdt_property(fdt, "ibm,hypertas-functions", hypertas_prop,
> + sizeof(hypertas_prop))));
> + }
> _FDT((fdt_property(fdt, "qemu,hypertas-functions", qemu_hypertas_prop,
> sizeof(qemu_hypertas_prop))));
>
> diff --git a/hw/ppc/spapr_iommu.c b/hw/ppc/spapr_iommu.c
> index 3d4a1fc..22b09be 100644
> --- a/hw/ppc/spapr_iommu.c
> +++ b/hw/ppc/spapr_iommu.c
> @@ -244,6 +244,71 @@ static target_ulong put_tce_emu(sPAPRTCETable *tcet, target_ulong ioba,
> return H_SUCCESS;
> }
>
> +static target_ulong h_put_tce_indirect(PowerPCCPU *cpu,
> + sPAPREnvironment *spapr,
> + target_ulong opcode, target_ulong *args)
> +{
> + int i;
> + target_ulong liobn = args[0];
> + target_ulong ioba = args[1];
> + target_ulong tce_list = args[2];
> + target_ulong npages = args[3];
> + target_ulong ret = 0;
> + sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn);
> +
> + if (tcet) {
> + for (i = 0; i < npages; ++i, ioba += SPAPR_TCE_PAGE_SIZE) {
> + target_ulong tce = ldq_phys((tce_list & ~SPAPR_TCE_PAGE_MASK) +
> + i * sizeof(target_ulong));
> + ret = put_tce_emu(tcet, ioba, tce);
> + if (ret) {
> + break;
> + }
> + }
> + return ret;
> + }
> +#ifdef DEBUG_TCE
> + fprintf(stderr, "%s on liobn=" TARGET_FMT_lx
> + " ioba 0x" TARGET_FMT_lx " TCE 0x" TARGET_FMT_lx
> + " ret = " TARGET_FMT_ld "\n",
> + __func__, liobn, ioba, tce_list, ret);
> +#endif
> +
> + return H_PARAMETER;
> +}
> +
> +static target_ulong h_stuff_tce(PowerPCCPU *cpu, sPAPREnvironment *spapr,
> + target_ulong opcode, target_ulong *args)
> +{
> + int i;
> + target_ulong liobn = args[0];
> + target_ulong ioba = args[1];
> + target_ulong tce_value = args[2];
> + target_ulong npages = args[3];
> + target_ulong ret = 0;
> + sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn);
> +
> + ioba &= ~(SPAPR_TCE_PAGE_SIZE - 1);
> +
> + if (tcet) {
> + for (i = 0; i < npages; ++i, ioba += SPAPR_TCE_PAGE_SIZE) {
> + ret = put_tce_emu(tcet, ioba, tce_value);
> + if (ret) {
> + break;
> + }
> + }
> + return ret;
> + }
> +#ifdef DEBUG_TCE
> + fprintf(stderr, "%s on liobn=" TARGET_FMT_lx
> + " ioba 0x" TARGET_FMT_lx " TCE 0x" TARGET_FMT_lx
> + " ret = " TARGET_FMT_ld "\n",
> + __func__, liobn, ioba, tce_value, ret);
> +#endif
> +
> + return H_PARAMETER;
> +}
> +
> static target_ulong h_put_tce(PowerPCCPU *cpu, sPAPREnvironment *spapr,
> target_ulong opcode, target_ulong *args)
> {
> @@ -258,9 +323,10 @@ static target_ulong h_put_tce(PowerPCCPU *cpu, sPAPREnvironment *spapr,
> return put_tce_emu(tcet, ioba, tce);
> }
> #ifdef DEBUG_TCE
> - fprintf(stderr, "%s on liobn=" TARGET_FMT_lx /*%s*/
> - " ioba 0x" TARGET_FMT_lx " TCE 0x" TARGET_FMT_lx "\n",
> - __func__, liobn, /*dev->qdev.id, */ioba, tce);
> + fprintf(stderr, "%s on liobn=" TARGET_FMT_lx
> + " ioba 0x" TARGET_FMT_lx " TCE 0x" TARGET_FMT_lx
> + " ret = " TARGET_FMT_ld "\n",
> + __func__, liobn, ioba, tce, ret);
> #endif
>
> return H_PARAMETER;
> @@ -318,6 +384,8 @@ static void spapr_tce_table_class_init(ObjectClass *klass, void *data)
>
> /* hcall-tce */
> spapr_register_hypercall(H_PUT_TCE, h_put_tce);
> + spapr_register_hypercall(H_PUT_TCE_INDIRECT, h_put_tce_indirect);
> + spapr_register_hypercall(H_STUFF_TCE, h_stuff_tce);
> }
>
> static TypeInfo spapr_tce_table_info = {
> diff --git a/target-ppc/kvm.c b/target-ppc/kvm.c
> index 8afa7eb..97ac670 100644
> --- a/target-ppc/kvm.c
> +++ b/target-ppc/kvm.c
> @@ -60,6 +60,7 @@ static int cap_booke_sregs;
> static int cap_ppc_smt;
> static int cap_ppc_rma;
> static int cap_spapr_tce;
> +static int cap_spapr_multitce;
> static int cap_hior;
> static int cap_one_reg;
> static int cap_epr;
> @@ -96,6 +97,7 @@ int kvm_arch_init(KVMState *s)
> cap_ppc_smt = kvm_check_extension(s, KVM_CAP_PPC_SMT);
> cap_ppc_rma = kvm_check_extension(s, KVM_CAP_PPC_RMA);
> cap_spapr_tce = kvm_check_extension(s, KVM_CAP_SPAPR_TCE);
> + cap_spapr_multitce = kvm_check_extension(s, KVM_CAP_SPAPR_MULTITCE);
> cap_one_reg = kvm_check_extension(s, KVM_CAP_ONE_REG);
> cap_hior = kvm_check_extension(s, KVM_CAP_PPC_HIOR);
> cap_epr = kvm_check_extension(s, KVM_CAP_PPC_EPR);
> @@ -1603,6 +1605,11 @@ uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift)
> }
> #endif
>
> +bool kvmppc_spapr_use_multitce(void)
> +{
> + return !kvm_enabled() || cap_spapr_multitce;
> +}
> +
> void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t window_size, int *pfd)
> {
> struct kvm_create_spapr_tce args = {
> diff --git a/target-ppc/kvm_ppc.h b/target-ppc/kvm_ppc.h
> index 12564ef..a2a903f 100644
> --- a/target-ppc/kvm_ppc.h
> +++ b/target-ppc/kvm_ppc.h
> @@ -31,6 +31,7 @@ int kvmppc_set_tcr(PowerPCCPU *cpu);
> int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu);
> #ifndef CONFIG_USER_ONLY
> off_t kvmppc_alloc_rma(const char *name, MemoryRegion *sysmem);
> +bool kvmppc_spapr_use_multitce(void);
> void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t window_size, int *pfd);
> int kvmppc_remove_spapr_tce(void *table, int pfd, uint32_t window_size);
> int kvmppc_reset_htab(int shift_hint);
> @@ -125,6 +126,12 @@ static inline off_t kvmppc_alloc_rma(const char *name, MemoryRegion *sysmem)
> return 0;
> }
>
> +static inline bool kvmppc_spapr_use_multitce(void)
> +{
> + /* No KVM, so always use the qemu multitce implementations */
> + return true;
> +}
> +
> static inline void *kvmppc_create_spapr_tce(uint32_t liobn,
> uint32_t window_size, int *fd)
> {
>
--
Alexey
next prev parent reply other threads:[~2013-08-16 9:50 UTC|newest]
Thread overview: 23+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-08-07 8:08 [Qemu-devel] [PATCH] powerpc iommu: enable multiple TCE requests Alexey Kardashevskiy
2013-08-16 9:49 ` Alexey Kardashevskiy [this message]
2013-08-18 15:22 ` Paolo Bonzini
2013-08-19 7:30 ` Alexey Kardashevskiy
2013-08-19 8:01 ` Alexander Graf
2013-08-19 8:44 ` Alexey Kardashevskiy
2013-08-19 9:47 ` Paolo Bonzini
2013-08-20 1:36 ` Alexey Kardashevskiy
2013-08-20 6:55 ` Alexander Graf
2013-08-20 8:39 ` Paolo Bonzini
2013-08-20 8:49 ` Alexey Kardashevskiy
2013-08-20 9:09 ` Paolo Bonzini
2013-08-20 9:13 ` Benjamin Herrenschmidt
2013-08-20 9:15 ` Paolo Bonzini
2013-08-20 9:20 ` Benjamin Herrenschmidt
2013-08-20 9:22 ` Paolo Bonzini
2013-08-20 9:26 ` Benjamin Herrenschmidt
2013-08-20 10:14 ` Paolo Bonzini
2013-08-20 11:38 ` Benjamin Herrenschmidt
2013-08-21 7:11 ` Alexander Graf
2013-08-21 7:29 ` [Qemu-devel] [Qemu-ppc] " Alexander Graf
2013-08-16 13:15 ` [Qemu-devel] " Alexander Graf
2013-08-19 9:01 ` Alexey Kardashevskiy
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=520DF5B5.4050805@ozlabs.ru \
--to=aik@ozlabs.ru \
--cc=agraf@suse.de \
--cc=david@gibson.dropbear.id.au \
--cc=paulus@samba.org \
--cc=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=qemu-ppc@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).