* [PATCH] KVM: PPC: Book3S HV: Tracepoints for KVM HV guest interactions
@ 2014-11-13 23:29 Suresh E. Warrier
2014-11-14 10:56 ` Alexander Graf
2014-11-20 10:40 ` Aneesh Kumar K.V
0 siblings, 2 replies; 10+ messages in thread
From: Suresh E. Warrier @ 2014-11-13 23:29 UTC (permalink / raw)
To: agraf, kvm-ppc, kvm; +Cc: Paul Mackerras
This patch adds trace points in the guest entry and exit code and also
for exceptions handled by the host in kernel mode - hypercalls and page
faults. The new events are added to /sys/kernel/debug/tracing/events
under a new subsystem called kvm_hv.
Acked-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Suresh Warrier <warrier@linux.vnet.ibm.com>
---
arch/powerpc/kvm/book3s_64_mmu_hv.c | 12 +-
arch/powerpc/kvm/book3s_hv.c | 19 ++
arch/powerpc/kvm/trace_hv.h | 497 ++++++++++++++++++++++++++++++++++++
3 files changed, 525 insertions(+), 3 deletions(-)
create mode 100644 arch/powerpc/kvm/trace_hv.h
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 70feb7b..20cbad1 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -38,6 +38,7 @@
#include <asm/cputable.h>
#include "book3s_hv_cma.h"
+#include "trace_hv.h"
/* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
#define MAX_LPID_970 63
@@ -627,6 +628,8 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
gfn = gpa >> PAGE_SHIFT;
memslot = gfn_to_memslot(kvm, gfn);
+ trace_kvm_page_fault_enter(vcpu, hpte, memslot, ea, dsisr);
+
/* No memslot means it's an emulated MMIO region */
if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
@@ -639,6 +642,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
mmu_seq = kvm->mmu_notifier_seq;
smp_rmb();
+ ret = -EFAULT;
is_io = 0;
pfn = 0;
page = NULL;
@@ -662,7 +666,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
up_read(¤t->mm->mmap_sem);
if (!pfn)
- return -EFAULT;
+ goto out_put;
} else {
page = pages[0];
if (PageHuge(page)) {
@@ -690,14 +694,14 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
pfn = page_to_pfn(page);
}
- ret = -EFAULT;
if (psize > pte_size)
goto out_put;
/* Check WIMG vs. the actual page we're accessing */
if (!hpte_cache_flags_ok(r, is_io)) {
if (is_io)
- return -EFAULT;
+ goto out_put;
+
/*
* Allow guest to map emulated device memory as
* uncacheable, but actually make it cacheable.
@@ -753,6 +757,8 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
SetPageDirty(page);
out_put:
+ trace_kvm_page_fault_exit(vcpu, hpte, ret);
+
if (page) {
/*
* We drop pages[0] here, not page because page might
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 69d4085..5143d17 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -57,6 +57,9 @@
#include "book3s.h"
+#define CREATE_TRACE_POINTS
+#include "trace_hv.h"
+
/* #define EXIT_DEBUG */
/* #define EXIT_DEBUG_SIMPLE */
/* #define EXIT_DEBUG_INT */
@@ -1679,6 +1682,7 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
kvmppc_start_thread(vcpu);
kvmppc_create_dtl_entry(vcpu, vc);
+ trace_kvm_guest_enter(vcpu);
}
/* Set this explicitly in case thread 0 doesn't have a vcpu */
@@ -1687,6 +1691,9 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
vc->vcore_state = VCORE_RUNNING;
preempt_disable();
+
+ trace_kvmppc_run_core(vc, 0);
+
spin_unlock(&vc->lock);
kvm_guest_enter();
@@ -1732,6 +1739,8 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
kvmppc_core_pending_dec(vcpu))
kvmppc_core_dequeue_dec(vcpu);
+ trace_kvm_guest_exit(vcpu);
+
ret = RESUME_GUEST;
if (vcpu->arch.trap)
ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu,
@@ -1757,6 +1766,8 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
wake_up(&vcpu->arch.cpu_run);
}
}
+
+ trace_kvmppc_run_core(vc, 1);
}
/*
@@ -1783,11 +1794,13 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
prepare_to_wait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
vc->vcore_state = VCORE_SLEEPING;
+ trace_kvmppc_vcore_blocked(vc, 0);
spin_unlock(&vc->lock);
schedule();
finish_wait(&vc->wq, &wait);
spin_lock(&vc->lock);
vc->vcore_state = VCORE_INACTIVE;
+ trace_kvmppc_vcore_blocked(vc, 1);
}
static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
@@ -1796,6 +1809,8 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
struct kvmppc_vcore *vc;
struct kvm_vcpu *v, *vn;
+ trace_kvmppc_run_vcpu_enter(vcpu);
+
kvm_run->exit_reason = 0;
vcpu->arch.ret = RESUME_GUEST;
vcpu->arch.trap = 0;
@@ -1825,6 +1840,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
VCORE_EXIT_COUNT(vc) == 0) {
kvmppc_create_dtl_entry(vcpu, vc);
kvmppc_start_thread(vcpu);
+ trace_kvm_guest_enter(vcpu);
} else if (vc->vcore_state == VCORE_SLEEPING) {
wake_up(&vc->wq);
}
@@ -1889,6 +1905,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
wake_up(&v->arch.cpu_run);
}
+ trace_kvmppc_run_vcpu_exit(vcpu, kvm_run);
spin_unlock(&vc->lock);
return vcpu->arch.ret;
}
@@ -1934,7 +1951,9 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
!(vcpu->arch.shregs.msr & MSR_PR)) {
+ trace_kvm_hcall_enter(vcpu);
r = kvmppc_pseries_do_hcall(vcpu);
+ trace_kvm_hcall_exit(vcpu, r);
kvmppc_core_prepare_to_enter(vcpu);
} else if (r == RESUME_PAGE_FAULT) {
srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
diff --git a/arch/powerpc/kvm/trace_hv.h b/arch/powerpc/kvm/trace_hv.h
new file mode 100644
index 0000000..2d2e55f
--- /dev/null
+++ b/arch/powerpc/kvm/trace_hv.h
@@ -0,0 +1,497 @@
+#if !defined(_TRACE_KVM_HV_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_KVM_HV_H
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kvm_hv
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_hv
+
+#define kvm_trace_symbol_exit \
+ {0x100, "SYSTEM_RESET"}, \
+ {0x200, "MACHINE_CHECK"}, \
+ {0x300, "DATA_STORAGE"}, \
+ {0x380, "DATA_SEGMENT"}, \
+ {0x400, "INST_STORAGE"}, \
+ {0x480, "INST_SEGMENT"}, \
+ {0x500, "EXTERNAL"}, \
+ {0x502, "EXTERNAL_HV"}, \
+ {0x600, "ALIGNMENT"}, \
+ {0x700, "PROGRAM"}, \
+ {0x800, "FP_UNAVAIL"}, \
+ {0x900, "DECREMENTER"}, \
+ {0x980, "HV_DECREMENTER"}, \
+ {0xc00, "SYSCALL"}, \
+ {0xd00, "TRACE"}, \
+ {0xe00, "H_DATA_STORAGE"}, \
+ {0xe20, "H_INST_STORAGE"}, \
+ {0xe40, "H_EMUL_ASSIST"}, \
+ {0xf00, "PERFMON"}, \
+ {0xf20, "ALTIVEC"}, \
+ {0xf40, "VSX"}
+
+#define kvm_trace_symbol_hcall \
+ {0x04, "H_REMOVE"}, \
+ {0x08, "H_ENTER"}, \
+ {0x0c, "H_READ"}, \
+ {0x10, "H_CLEAR_MOD"}, \
+ {0x14, "H_CLEAR_REF"}, \
+ {0x18, "H_PROTECT"}, \
+ {0x1c, "H_GET_TCE"}, \
+ {0x20, "H_PUT_TCE"}, \
+ {0x24, "H_SET_SPRG0"}, \
+ {0x28, "H_SET_DABR"}, \
+ {0x2c, "H_PAGE_INIT"}, \
+ {0x30, "H_SET_ASR"}, \
+ {0x34, "H_ASR_ON"}, \
+ {0x38, "H_ASR_OFF"}, \
+ {0x3c, "H_LOGICAL_CI_LOAD"}, \
+ {0x40, "H_LOGICAL_CI_STORE"}, \
+ {0x44, "H_LOGICAL_CACHE_LOAD"}, \
+ {0x48, "H_LOGICAL_CACHE_STORE"}, \
+ {0x4c, "H_LOGICAL_ICBI"}, \
+ {0x50, "H_LOGICAL_DCBF"}, \
+ {0x54, "H_GET_TERM_CHAR"}, \
+ {0x58, "H_PUT_TERM_CHAR"}, \
+ {0x5c, "H_REAL_TO_LOGICAL"}, \
+ {0x60, "H_HYPERVISOR_DATA"}, \
+ {0x64, "H_EOI"}, \
+ {0x68, "H_CPPR"}, \
+ {0x6c, "H_IPI"}, \
+ {0x70, "H_IPOLL"}, \
+ {0x74, "H_XIRR"}, \
+ {0x7c, "H_PERFMON"}, \
+ {0x78, "H_MIGRATE_DMA"}, \
+ {0xDC, "H_REGISTER_VPA"}, \
+ {0xE0, "H_CEDE"}, \
+ {0xE4, "H_CONFER"}, \
+ {0xE8, "H_PROD"}, \
+ {0xEC, "H_GET_PPP"}, \
+ {0xF0, "H_SET_PPP"}, \
+ {0xF4, "H_PURR"}, \
+ {0xF8, "H_PIC"}, \
+ {0xFC, "H_REG_CRQ"}, \
+ {0x100, "H_FREE_CRQ"}, \
+ {0x104, "H_VIO_SIGNAL"}, \
+ {0x108, "H_SEND_CRQ"}, \
+ {0x110, "H_COPY_RDMA"}, \
+ {0x114, "H_REGISTER_LOGICAL_LAN"}, \
+ {0x118, "H_FREE_LOGICAL_LAN"}, \
+ {0x11C, "H_ADD_LOGICAL_LAN_BUFFER"}, \
+ {0x120, "H_SEND_LOGICAL_LAN"}, \
+ {0x124, "H_BULK_REMOVE"}, \
+ {0x130, "H_MULTICAST_CTRL"}, \
+ {0x134, "H_SET_XDABR"}, \
+ {0x138, "H_STUFF_TCE"}, \
+ {0x13C, "H_PUT_TCE_INDIRECT"}, \
+ {0x14C, "H_CHANGE_LOGICAL_LAN_MAC"}, \
+ {0x150, "H_VTERM_PARTNER_INFO"}, \
+ {0x154, "H_REGISTER_VTERM"}, \
+ {0x158, "H_FREE_VTERM"}, \
+ {0x15C, "H_RESET_EVENTS"}, \
+ {0x160, "H_ALLOC_RESOURCE"}, \
+ {0x164, "H_FREE_RESOURCE"}, \
+ {0x168, "H_MODIFY_QP"}, \
+ {0x16C, "H_QUERY_QP"}, \
+ {0x170, "H_REREGISTER_PMR"}, \
+ {0x174, "H_REGISTER_SMR"}, \
+ {0x178, "H_QUERY_MR"}, \
+ {0x17C, "H_QUERY_MW"}, \
+ {0x180, "H_QUERY_HCA"}, \
+ {0x184, "H_QUERY_PORT"}, \
+ {0x188, "H_MODIFY_PORT"}, \
+ {0x18C, "H_DEFINE_AQP1"}, \
+ {0x190, "H_GET_TRACE_BUFFER"}, \
+ {0x194, "H_DEFINE_AQP0"}, \
+ {0x198, "H_RESIZE_MR"}, \
+ {0x19C, "H_ATTACH_MCQP"}, \
+ {0x1A0, "H_DETACH_MCQP"}, \
+ {0x1A4, "H_CREATE_RPT"}, \
+ {0x1A8, "H_REMOVE_RPT"}, \
+ {0x1AC, "H_REGISTER_RPAGES"}, \
+ {0x1B0, "H_DISABLE_AND_GETC"}, \
+ {0x1B4, "H_ERROR_DATA"}, \
+ {0x1B8, "H_GET_HCA_INFO"}, \
+ {0x1BC, "H_GET_PERF_COUNT"}, \
+ {0x1C0, "H_MANAGE_TRACE"}, \
+ {0x1D4, "H_FREE_LOGICAL_LAN_BUFFER"}, \
+ {0x1E4, "H_QUERY_INT_STATE"}, \
+ {0x1D8, "H_POLL_PENDING"}, \
+ {0x244, "H_ILLAN_ATTRIBUTES"}, \
+ {0x250, "H_MODIFY_HEA_QP"}, \
+ {0x254, "H_QUERY_HEA_QP"}, \
+ {0x258, "H_QUERY_HEA"}, \
+ {0x25C, "H_QUERY_HEA_PORT"}, \
+ {0x260, "H_MODIFY_HEA_PORT"}, \
+ {0x264, "H_REG_BCMC"}, \
+ {0x268, "H_DEREG_BCMC"}, \
+ {0x26C, "H_REGISTER_HEA_RPAGES"}, \
+ {0x270, "H_DISABLE_AND_GET_HEA"}, \
+ {0x274, "H_GET_HEA_INFO"}, \
+ {0x278, "H_ALLOC_HEA_RESOURCE"}, \
+ {0x284, "H_ADD_CONN"}, \
+ {0x288, "H_DEL_CONN"}, \
+ {0x298, "H_JOIN"}, \
+ {0x2A4, "H_VASI_STATE"}, \
+ {0x2B0, "H_ENABLE_CRQ"}, \
+ {0x2B8, "H_GET_EM_PARMS"}, \
+ {0x2D0, "H_SET_MPP"}, \
+ {0x2D4, "H_GET_MPP"}, \
+ {0x2EC, "H_HOME_NODE_ASSOCIATIVITY"}, \
+ {0x2F4, "H_BEST_ENERGY"}, \
+ {0x2FC, "H_XIRR_X"}, \
+ {0x300, "H_RANDOM"}, \
+ {0x304, "H_COP"}, \
+ {0x314, "H_GET_MPP_X"}, \
+ {0x31C, "H_SET_MODE"}, \
+ {0xf000, "H_RTAS"}
+
+#define kvm_trace_symbol_kvmret \
+ {0, "RESUME_GUEST"}, \
+ {1, "RESUME_GUEST_NV"}, \
+ {2, "RESUME_HOST"}, \
+ {3, "RESUME_HOST_NV"}
+
+#define kvm_trace_symbol_hcall_rc \
+ {0, "H_SUCCESS"}, \
+ {1, "H_BUSY"}, \
+ {2, "H_CLOSED"}, \
+ {3, "H_NOT_AVAILABLE"}, \
+ {4, "H_CONSTRAINED"}, \
+ {5, "H_PARTIAL"}, \
+ {14, "H_IN_PROGRESS"}, \
+ {15, "H_PAGE_REGISTERED"}, \
+ {16, "H_PARTIAL_STORE"}, \
+ {17, "H_PENDING"}, \
+ {18, "H_CONTINUE"}, \
+ {9900, "H_LONG_BUSY_START_RANGE"}, \
+ {9900, "H_LONG_BUSY_ORDER_1_MSEC"}, \
+ {9901, "H_LONG_BUSY_ORDER_10_MSEC"}, \
+ {9902, "H_LONG_BUSY_ORDER_100_MSEC"}, \
+ {9903, "H_LONG_BUSY_ORDER_1_SEC"}, \
+ {9904, "H_LONG_BUSY_ORDER_10_SEC"}, \
+ {9905, "H_LONG_BUSY_ORDER_100_SEC"}, \
+ {9905, "H_LONG_BUSY_END_RANGE"}, \
+ {9999, "H_TOO_HARD"}, \
+ {-1, "H_HARDWARE"}, \
+ {-2, "H_FUNCTION"}, \
+ {-3, "H_PRIVILEGE"}, \
+ {-4, "H_PARAMETER"}, \
+ {-5, "H_BAD_MODE"}, \
+ {-6, "H_PTEG_FULL"}, \
+ {-7, "H_NOT_FOUND"}, \
+ {-8, "H_RESERVED_DABR"}, \
+ {-9, "H_NO_MEM"}, \
+ {-10, "H_AUTHORITY"}, \
+ {-11, "H_PERMISSION"}, \
+ {-12, "H_DROPPED"}, \
+ {-13, "H_SOURCE_PARM"}, \
+ {-14, "H_DEST_PARM"}, \
+ {-15, "H_REMOTE_PARM"}, \
+ {-16, "H_RESOURCE"}, \
+ {-17, "H_ADAPTER_PARM"}, \
+ {-18, "H_RH_PARM"}, \
+ {-19, "H_RCQ_PARM"}, \
+ {-20, "H_SCQ_PARM"}, \
+ {-21, "H_EQ_PARM"}, \
+ {-22, "H_RT_PARM"}, \
+ {-23, "H_ST_PARM"}, \
+ {-24, "H_SIGT_PARM"}, \
+ {-25, "H_TOKEN_PARM"}, \
+ {-27, "H_MLENGTH_PARM"}, \
+ {-28, "H_MEM_PARM"}, \
+ {-29, "H_MEM_ACCESS_PARM"}, \
+ {-30, "H_ATTR_PARM"}, \
+ {-31, "H_PORT_PARM"}, \
+ {-32, "H_MCG_PARM"}, \
+ {-33, "H_VL_PARM"}, \
+ {-34, "H_TSIZE_PARM"}, \
+ {-35, "H_TRACE_PARM"}, \
+ {-37, "H_MASK_PARM"}, \
+ {-38, "H_MCG_FULL"}, \
+ {-39, "H_ALIAS_EXIST"}, \
+ {-40, "H_P_COUNTER"}, \
+ {-41, "H_TABLE_FULL"}, \
+ {-42, "H_ALT_TABLE"}, \
+ {-43, "H_MR_CONDITION"}, \
+ {-44, "H_NOT_ENOUGH_RESOURCES"}, \
+ {-45, "H_R_STATE"}, \
+ {-46, "H_RESCINDED"}, \
+ {-55, "H_P2"}, \
+ {-56, "H_P3"}, \
+ {-57, "H_P4"}, \
+ {-58, "H_P5"}, \
+ {-59, "H_P6"}, \
+ {-60, "H_P7"}, \
+ {-61, "H_P8"}, \
+ {-62, "H_P9"}, \
+ {-64, "H_TOO_BIG"}, \
+ {-68, "H_OVERLAP"}, \
+ {-69, "H_INTERRUPT"}, \
+ {-70, "H_BAD_DATA"}, \
+ {-71, "H_NOT_ACTIVE"}, \
+ {-72, "H_SG_LIST"}, \
+ {-73, "H_OP_MODE"}, \
+ {-74, "H_COP_HW"}, \
+ {-256, "H_UNSUPPORTED_FLAG_START"}, \
+ {-511, "H_UNSUPPORTED_FLAG_END"}, \
+ {-9005, "H_MULTI_THREADS_ACTIVE"}, \
+ {-9006, "H_OUTSTANDING_COP_OPS"}
+
+TRACE_EVENT(kvm_guest_enter,
+ TP_PROTO(struct kvm_vcpu *vcpu),
+ TP_ARGS(vcpu),
+
+ TP_STRUCT__entry(
+ __field(int, vcpu_id)
+ __field(unsigned long, pc)
+ __field(unsigned long, pending_exceptions)
+ __field(u8, ceded)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
+ __entry->pc = kvmppc_get_pc(vcpu);
+ __entry->ceded = vcpu->arch.ceded;
+ __entry->pending_exceptions = vcpu->arch.pending_exceptions;
+ ),
+
+ TP_printk("VCPU %d: pc=0x%lx pexcp=0x%lx ceded=%d",
+ __entry->vcpu_id,
+ __entry->pc,
+ __entry->pending_exceptions, __entry->ceded)
+);
+
+TRACE_EVENT(kvm_guest_exit,
+ TP_PROTO(struct kvm_vcpu *vcpu),
+ TP_ARGS(vcpu),
+
+ TP_STRUCT__entry(
+ __field(int, vcpu_id)
+ __field(int, trap)
+ __field(unsigned long, pc)
+ __field(unsigned long, msr)
+ __field(u8, ceded)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
+ __entry->trap = vcpu->arch.trap;
+ __entry->ceded = vcpu->arch.ceded;
+ __entry->pc = kvmppc_get_pc(vcpu);
+ __entry->msr = vcpu->arch.shregs.msr;
+ ),
+
+ TP_printk("VCPU %d: trap=%s pc=0x%lx msr=0x%lx, ceded=%d",
+ __entry->vcpu_id,
+ __print_symbolic(__entry->trap, kvm_trace_symbol_exit),
+ __entry->pc, __entry->msr, __entry->ceded
+ )
+);
+
+TRACE_EVENT(kvm_page_fault_enter,
+ TP_PROTO(struct kvm_vcpu *vcpu, unsigned long *hptep,
+ struct kvm_memory_slot *memslot, unsigned long ea,
+ unsigned long dsisr),
+
+ TP_ARGS(vcpu, hptep, memslot, ea, dsisr),
+
+ TP_STRUCT__entry(
+ __field(int, vcpu_id)
+ __field(unsigned long, hpte_v)
+ __field(unsigned long, hpte_r)
+ __field(unsigned long, gpte_r)
+ __field(unsigned long, ea)
+ __field(u64, base_gfn)
+ __field(u32, slot_flags)
+ __field(u32, dsisr)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
+ __entry->hpte_v = hptep[0];
+ __entry->hpte_r = hptep[1];
+ __entry->gpte_r = hptep[2];
+ __entry->ea = ea;
+ __entry->dsisr = dsisr;
+ __entry->base_gfn = memslot ? memslot->base_gfn : -1UL;
+ __entry->slot_flags = memslot ? memslot->flags : 0;
+ ),
+
+ TP_printk("VCPU %d: hpte=0x%lx:0x%lx guest=0x%lx ea=0x%lx,%x slot=0x%llx,0x%x",
+ __entry->vcpu_id,
+ __entry->hpte_v, __entry->hpte_r, __entry->gpte_r,
+ __entry->ea, __entry->dsisr,
+ __entry->base_gfn, __entry->slot_flags)
+);
+
+TRACE_EVENT(kvm_page_fault_exit,
+ TP_PROTO(struct kvm_vcpu *vcpu, unsigned long *hptep, long ret),
+
+ TP_ARGS(vcpu, hptep, ret),
+
+ TP_STRUCT__entry(
+ __field(int, vcpu_id)
+ __field(unsigned long, hpte_v)
+ __field(unsigned long, hpte_r)
+ __field(long, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
+ __entry->hpte_v = hptep[0];
+ __entry->hpte_r = hptep[1];
+ __entry->ret = ret;
+ ),
+
+ TP_printk("VCPU %d: hpte=0x%lx:0x%lx ret=0x%lx",
+ __entry->vcpu_id,
+ __entry->hpte_v, __entry->hpte_r, __entry->ret)
+);
+
+TRACE_EVENT(kvm_hcall_enter,
+ TP_PROTO(struct kvm_vcpu *vcpu),
+
+ TP_ARGS(vcpu),
+
+ TP_STRUCT__entry(
+ __field(int, vcpu_id)
+ __field(unsigned long, req)
+ __field(unsigned long, gpr4)
+ __field(unsigned long, gpr5)
+ __field(unsigned long, gpr6)
+ __field(unsigned long, gpr7)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
+ __entry->req = kvmppc_get_gpr(vcpu, 3);
+ __entry->gpr4 = kvmppc_get_gpr(vcpu, 4);
+ __entry->gpr5 = kvmppc_get_gpr(vcpu, 5);
+ __entry->gpr6 = kvmppc_get_gpr(vcpu, 6);
+ __entry->gpr7 = kvmppc_get_gpr(vcpu, 7);
+ ),
+
+ TP_printk("VCPU %d: hcall=%s GPR4-7=0x%lx,0x%lx,0x%lx,0x%lx",
+ __entry->vcpu_id,
+ __print_symbolic(__entry->req, kvm_trace_symbol_hcall),
+ __entry->gpr4, __entry->gpr5, __entry->gpr6, __entry->gpr7)
+);
+
+TRACE_EVENT(kvm_hcall_exit,
+ TP_PROTO(struct kvm_vcpu *vcpu, int ret),
+
+ TP_ARGS(vcpu, ret),
+
+ TP_STRUCT__entry(
+ __field(int, vcpu_id)
+ __field(unsigned long, ret)
+ __field(unsigned long, hcall_rc)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
+ __entry->ret = ret;
+ __entry->hcall_rc = kvmppc_get_gpr(vcpu, 3);
+ ),
+
+ TP_printk("VCPU %d: ret=%s hcall_rc=%s",
+ __entry->vcpu_id,
+ __print_symbolic(__entry->ret, kvm_trace_symbol_kvmret),
+ __print_symbolic(__entry->ret & RESUME_FLAG_HOST ?
+ H_TOO_HARD : __entry->hcall_rc,
+ kvm_trace_symbol_hcall_rc))
+);
+
+TRACE_EVENT(kvmppc_run_core,
+ TP_PROTO(struct kvmppc_vcore *vc, int where),
+
+ TP_ARGS(vc, where),
+
+ TP_STRUCT__entry(
+ __field(int, n_runnable)
+ __field(int, runner_vcpu)
+ __field(int, where)
+ __field(pid_t, tgid)
+ ),
+
+ TP_fast_assign(
+ __entry->runner_vcpu = vc->runner->vcpu_id;
+ __entry->n_runnable = vc->n_runnable;
+ __entry->where = where;
+ __entry->tgid = current->tgid;
+ ),
+
+ TP_printk("%s runner_vcpu==%d runnable=%d tgid=%d",
+ __entry->where ? "Exit" : "Enter",
+ __entry->runner_vcpu, __entry->n_runnable, __entry->tgid)
+);
+
+TRACE_EVENT(kvmppc_vcore_blocked,
+ TP_PROTO(struct kvmppc_vcore *vc, int where),
+
+ TP_ARGS(vc, where),
+
+ TP_STRUCT__entry(
+ __field(int, n_runnable)
+ __field(int, runner_vcpu)
+ __field(int, where)
+ __field(pid_t, tgid)
+ ),
+
+ TP_fast_assign(
+ __entry->runner_vcpu = vc->runner->vcpu_id;
+ __entry->n_runnable = vc->n_runnable;
+ __entry->where = where;
+ __entry->tgid = current->tgid;
+ ),
+
+ TP_printk("%s runner_vcpu=%d runnable=%d tgid=%d",
+ __entry->where ? "Exit" : "Enter",
+ __entry->runner_vcpu, __entry->n_runnable, __entry->tgid)
+);
+
+TRACE_EVENT(kvmppc_run_vcpu_enter,
+ TP_PROTO(struct kvm_vcpu *vcpu),
+
+ TP_ARGS(vcpu),
+
+ TP_STRUCT__entry(
+ __field(int, vcpu_id)
+ __field(pid_t, tgid)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
+ __entry->tgid = current->tgid;
+ ),
+
+ TP_printk("VCPU %d: tgid=%d", __entry->vcpu_id, __entry->tgid)
+);
+
+TRACE_EVENT(kvmppc_run_vcpu_exit,
+ TP_PROTO(struct kvm_vcpu *vcpu, struct kvm_run *run),
+
+ TP_ARGS(vcpu, run),
+
+ TP_STRUCT__entry(
+ __field(int, vcpu_id)
+ __field(int, exit)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
+ __entry->exit = run->exit_reason;
+ __entry->ret = vcpu->arch.ret;
+ ),
+
+ TP_printk("VCPU %d: exit=%d, ret=%d",
+ __entry->vcpu_id, __entry->exit, __entry->ret)
+);
+
+#endif /* _TRACE_KVM_HV_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
--
1.8.3.4
^ permalink raw reply related [flat|nested] 10+ messages in thread
* Re: [PATCH] KVM: PPC: Book3S HV: Tracepoints for KVM HV guest interactions
2014-11-13 23:29 Suresh E. Warrier
@ 2014-11-14 10:56 ` Alexander Graf
2014-11-19 21:54 ` Suresh E. Warrier
2014-11-20 10:40 ` Aneesh Kumar K.V
1 sibling, 1 reply; 10+ messages in thread
From: Alexander Graf @ 2014-11-14 10:56 UTC (permalink / raw)
To: Suresh E. Warrier
Cc: kvm-ppc@vger.kernel.org, kvm@vger.kernel.org, Paul Mackerras
> Am 14.11.2014 um 00:29 schrieb Suresh E. Warrier <warrier@linux.vnet.ibm.com>:
>
> This patch adds trace points in the guest entry and exit code and also
> for exceptions handled by the host in kernel mode - hypercalls and page
> faults. The new events are added to /sys/kernel/debug/tracing/events
> under a new subsystem called kvm_hv.
>
> Acked-by: Paul Mackerras <paulus@samba.org>
> Signed-off-by: Suresh Warrier <warrier@linux.vnet.ibm.com>
> ---
> arch/powerpc/kvm/book3s_64_mmu_hv.c | 12 +-
> arch/powerpc/kvm/book3s_hv.c | 19 ++
> arch/powerpc/kvm/trace_hv.h | 497 ++++++++++++++++++++++++++++++++++++
> 3 files changed, 525 insertions(+), 3 deletions(-)
> create mode 100644 arch/powerpc/kvm/trace_hv.h
>
> diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
> index 70feb7b..20cbad1 100644
> --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
> +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
> @@ -38,6 +38,7 @@
> #include <asm/cputable.h>
>
> #include "book3s_hv_cma.h"
> +#include "trace_hv.h"
>
> /* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
> #define MAX_LPID_970 63
> @@ -627,6 +628,8 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
> gfn = gpa >> PAGE_SHIFT;
> memslot = gfn_to_memslot(kvm, gfn);
>
> + trace_kvm_page_fault_enter(vcpu, hpte, memslot, ea, dsisr);
> +
> /* No memslot means it's an emulated MMIO region */
> if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
> return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
> @@ -639,6 +642,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
> mmu_seq = kvm->mmu_notifier_seq;
> smp_rmb();
>
> + ret = -EFAULT;
> is_io = 0;
> pfn = 0;
> page = NULL;
> @@ -662,7 +666,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
> }
> up_read(¤t->mm->mmap_sem);
> if (!pfn)
> - return -EFAULT;
> + goto out_put;
> } else {
> page = pages[0];
> if (PageHuge(page)) {
> @@ -690,14 +694,14 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
> pfn = page_to_pfn(page);
> }
>
> - ret = -EFAULT;
> if (psize > pte_size)
> goto out_put;
>
> /* Check WIMG vs. the actual page we're accessing */
> if (!hpte_cache_flags_ok(r, is_io)) {
> if (is_io)
> - return -EFAULT;
> + goto out_put;
> +
> /*
> * Allow guest to map emulated device memory as
> * uncacheable, but actually make it cacheable.
> @@ -753,6 +757,8 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
> SetPageDirty(page);
>
> out_put:
> + trace_kvm_page_fault_exit(vcpu, hpte, ret);
> +
> if (page) {
> /*
> * We drop pages[0] here, not page because page might
> diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
> index 69d4085..5143d17 100644
> --- a/arch/powerpc/kvm/book3s_hv.c
> +++ b/arch/powerpc/kvm/book3s_hv.c
> @@ -57,6 +57,9 @@
>
> #include "book3s.h"
>
> +#define CREATE_TRACE_POINTS
> +#include "trace_hv.h"
> +
> /* #define EXIT_DEBUG */
> /* #define EXIT_DEBUG_SIMPLE */
> /* #define EXIT_DEBUG_INT */
> @@ -1679,6 +1682,7 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
> list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
> kvmppc_start_thread(vcpu);
> kvmppc_create_dtl_entry(vcpu, vc);
> + trace_kvm_guest_enter(vcpu);
> }
>
> /* Set this explicitly in case thread 0 doesn't have a vcpu */
> @@ -1687,6 +1691,9 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
>
> vc->vcore_state = VCORE_RUNNING;
> preempt_disable();
> +
> + trace_kvmppc_run_core(vc, 0);
> +
> spin_unlock(&vc->lock);
>
> kvm_guest_enter();
> @@ -1732,6 +1739,8 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
> kvmppc_core_pending_dec(vcpu))
> kvmppc_core_dequeue_dec(vcpu);
>
> + trace_kvm_guest_exit(vcpu);
> +
> ret = RESUME_GUEST;
> if (vcpu->arch.trap)
> ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu,
> @@ -1757,6 +1766,8 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
> wake_up(&vcpu->arch.cpu_run);
> }
> }
> +
> + trace_kvmppc_run_core(vc, 1);
> }
>
> /*
> @@ -1783,11 +1794,13 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
>
> prepare_to_wait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
> vc->vcore_state = VCORE_SLEEPING;
> + trace_kvmppc_vcore_blocked(vc, 0);
> spin_unlock(&vc->lock);
> schedule();
> finish_wait(&vc->wq, &wait);
> spin_lock(&vc->lock);
> vc->vcore_state = VCORE_INACTIVE;
> + trace_kvmppc_vcore_blocked(vc, 1);
> }
>
> static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
> @@ -1796,6 +1809,8 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
> struct kvmppc_vcore *vc;
> struct kvm_vcpu *v, *vn;
>
> + trace_kvmppc_run_vcpu_enter(vcpu);
> +
> kvm_run->exit_reason = 0;
> vcpu->arch.ret = RESUME_GUEST;
> vcpu->arch.trap = 0;
> @@ -1825,6 +1840,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
> VCORE_EXIT_COUNT(vc) == 0) {
> kvmppc_create_dtl_entry(vcpu, vc);
> kvmppc_start_thread(vcpu);
> + trace_kvm_guest_enter(vcpu);
> } else if (vc->vcore_state == VCORE_SLEEPING) {
> wake_up(&vc->wq);
> }
> @@ -1889,6 +1905,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
> wake_up(&v->arch.cpu_run);
> }
>
> + trace_kvmppc_run_vcpu_exit(vcpu, kvm_run);
> spin_unlock(&vc->lock);
> return vcpu->arch.ret;
> }
> @@ -1934,7 +1951,9 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
>
> if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
> !(vcpu->arch.shregs.msr & MSR_PR)) {
> + trace_kvm_hcall_enter(vcpu);
> r = kvmppc_pseries_do_hcall(vcpu);
> + trace_kvm_hcall_exit(vcpu, r);
> kvmppc_core_prepare_to_enter(vcpu);
> } else if (r == RESUME_PAGE_FAULT) {
> srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
> diff --git a/arch/powerpc/kvm/trace_hv.h b/arch/powerpc/kvm/trace_hv.h
> new file mode 100644
> index 0000000..2d2e55f
> --- /dev/null
> +++ b/arch/powerpc/kvm/trace_hv.h
> @@ -0,0 +1,497 @@
> +#if !defined(_TRACE_KVM_HV_H) || defined(TRACE_HEADER_MULTI_READ)
> +#define _TRACE_KVM_HV_H
> +
> +#include <linux/tracepoint.h>
> +
> +#undef TRACE_SYSTEM
> +#define TRACE_SYSTEM kvm_hv
> +#define TRACE_INCLUDE_PATH .
> +#define TRACE_INCLUDE_FILE trace_hv
> +
> +#define kvm_trace_symbol_exit \
> + {0x100, "SYSTEM_RESET"}, \
> + {0x200, "MACHINE_CHECK"}, \
> + {0x300, "DATA_STORAGE"}, \
> + {0x380, "DATA_SEGMENT"}, \
> + {0x400, "INST_STORAGE"}, \
> + {0x480, "INST_SEGMENT"}, \
> + {0x500, "EXTERNAL"}, \
> + {0x502, "EXTERNAL_HV"}, \
> + {0x600, "ALIGNMENT"}, \
> + {0x700, "PROGRAM"}, \
> + {0x800, "FP_UNAVAIL"}, \
> + {0x900, "DECREMENTER"}, \
> + {0x980, "HV_DECREMENTER"}, \
> + {0xc00, "SYSCALL"}, \
> + {0xd00, "TRACE"}, \
> + {0xe00, "H_DATA_STORAGE"}, \
> + {0xe20, "H_INST_STORAGE"}, \
> + {0xe40, "H_EMUL_ASSIST"}, \
> + {0xf00, "PERFMON"}, \
> + {0xf20, "ALTIVEC"}, \
> + {0xf40, "VSX"}
Can we share these with PR?
> +
> +#define kvm_trace_symbol_hcall \
> + {0x04, "H_REMOVE"}, \
> + {0x08, "H_ENTER"}, \
> + {0x0c, "H_READ"}, \
> + {0x10, "H_CLEAR_MOD"}, \
> + {0x14, "H_CLEAR_REF"}, \
> + {0x18, "H_PROTECT"}, \
> + {0x1c, "H_GET_TCE"}, \
> + {0x20, "H_PUT_TCE"}, \
> + {0x24, "H_SET_SPRG0"}, \
> + {0x28, "H_SET_DABR"}, \
> + {0x2c, "H_PAGE_INIT"}, \
> + {0x30, "H_SET_ASR"}, \
> + {0x34, "H_ASR_ON"}, \
> + {0x38, "H_ASR_OFF"}, \
> + {0x3c, "H_LOGICAL_CI_LOAD"}, \
> + {0x40, "H_LOGICAL_CI_STORE"}, \
> + {0x44, "H_LOGICAL_CACHE_LOAD"}, \
> + {0x48, "H_LOGICAL_CACHE_STORE"}, \
> + {0x4c, "H_LOGICAL_ICBI"}, \
> + {0x50, "H_LOGICAL_DCBF"}, \
> + {0x54, "H_GET_TERM_CHAR"}, \
> + {0x58, "H_PUT_TERM_CHAR"}, \
> + {0x5c, "H_REAL_TO_LOGICAL"}, \
> + {0x60, "H_HYPERVISOR_DATA"}, \
> + {0x64, "H_EOI"}, \
> + {0x68, "H_CPPR"}, \
> + {0x6c, "H_IPI"}, \
> + {0x70, "H_IPOLL"}, \
> + {0x74, "H_XIRR"}, \
> + {0x7c, "H_PERFMON"}, \
> + {0x78, "H_MIGRATE_DMA"}, \
> + {0xDC, "H_REGISTER_VPA"}, \
> + {0xE0, "H_CEDE"}, \
> + {0xE4, "H_CONFER"}, \
> + {0xE8, "H_PROD"}, \
> + {0xEC, "H_GET_PPP"}, \
> + {0xF0, "H_SET_PPP"}, \
> + {0xF4, "H_PURR"}, \
> + {0xF8, "H_PIC"}, \
> + {0xFC, "H_REG_CRQ"}, \
> + {0x100, "H_FREE_CRQ"}, \
> + {0x104, "H_VIO_SIGNAL"}, \
> + {0x108, "H_SEND_CRQ"}, \
> + {0x110, "H_COPY_RDMA"}, \
> + {0x114, "H_REGISTER_LOGICAL_LAN"}, \
> + {0x118, "H_FREE_LOGICAL_LAN"}, \
> + {0x11C, "H_ADD_LOGICAL_LAN_BUFFER"}, \
> + {0x120, "H_SEND_LOGICAL_LAN"}, \
> + {0x124, "H_BULK_REMOVE"}, \
> + {0x130, "H_MULTICAST_CTRL"}, \
> + {0x134, "H_SET_XDABR"}, \
> + {0x138, "H_STUFF_TCE"}, \
> + {0x13C, "H_PUT_TCE_INDIRECT"}, \
> + {0x14C, "H_CHANGE_LOGICAL_LAN_MAC"}, \
> + {0x150, "H_VTERM_PARTNER_INFO"}, \
> + {0x154, "H_REGISTER_VTERM"}, \
> + {0x158, "H_FREE_VTERM"}, \
> + {0x15C, "H_RESET_EVENTS"}, \
> + {0x160, "H_ALLOC_RESOURCE"}, \
> + {0x164, "H_FREE_RESOURCE"}, \
> + {0x168, "H_MODIFY_QP"}, \
> + {0x16C, "H_QUERY_QP"}, \
> + {0x170, "H_REREGISTER_PMR"}, \
> + {0x174, "H_REGISTER_SMR"}, \
> + {0x178, "H_QUERY_MR"}, \
> + {0x17C, "H_QUERY_MW"}, \
> + {0x180, "H_QUERY_HCA"}, \
> + {0x184, "H_QUERY_PORT"}, \
> + {0x188, "H_MODIFY_PORT"}, \
> + {0x18C, "H_DEFINE_AQP1"}, \
> + {0x190, "H_GET_TRACE_BUFFER"}, \
> + {0x194, "H_DEFINE_AQP0"}, \
> + {0x198, "H_RESIZE_MR"}, \
> + {0x19C, "H_ATTACH_MCQP"}, \
> + {0x1A0, "H_DETACH_MCQP"}, \
> + {0x1A4, "H_CREATE_RPT"}, \
> + {0x1A8, "H_REMOVE_RPT"}, \
> + {0x1AC, "H_REGISTER_RPAGES"}, \
> + {0x1B0, "H_DISABLE_AND_GETC"}, \
> + {0x1B4, "H_ERROR_DATA"}, \
> + {0x1B8, "H_GET_HCA_INFO"}, \
> + {0x1BC, "H_GET_PERF_COUNT"}, \
> + {0x1C0, "H_MANAGE_TRACE"}, \
> + {0x1D4, "H_FREE_LOGICAL_LAN_BUFFER"}, \
> + {0x1E4, "H_QUERY_INT_STATE"}, \
> + {0x1D8, "H_POLL_PENDING"}, \
> + {0x244, "H_ILLAN_ATTRIBUTES"}, \
> + {0x250, "H_MODIFY_HEA_QP"}, \
> + {0x254, "H_QUERY_HEA_QP"}, \
> + {0x258, "H_QUERY_HEA"}, \
> + {0x25C, "H_QUERY_HEA_PORT"}, \
> + {0x260, "H_MODIFY_HEA_PORT"}, \
> + {0x264, "H_REG_BCMC"}, \
> + {0x268, "H_DEREG_BCMC"}, \
> + {0x26C, "H_REGISTER_HEA_RPAGES"}, \
> + {0x270, "H_DISABLE_AND_GET_HEA"}, \
> + {0x274, "H_GET_HEA_INFO"}, \
> + {0x278, "H_ALLOC_HEA_RESOURCE"}, \
> + {0x284, "H_ADD_CONN"}, \
> + {0x288, "H_DEL_CONN"}, \
> + {0x298, "H_JOIN"}, \
> + {0x2A4, "H_VASI_STATE"}, \
> + {0x2B0, "H_ENABLE_CRQ"}, \
> + {0x2B8, "H_GET_EM_PARMS"}, \
> + {0x2D0, "H_SET_MPP"}, \
> + {0x2D4, "H_GET_MPP"}, \
> + {0x2EC, "H_HOME_NODE_ASSOCIATIVITY"}, \
> + {0x2F4, "H_BEST_ENERGY"}, \
> + {0x2FC, "H_XIRR_X"}, \
> + {0x300, "H_RANDOM"}, \
> + {0x304, "H_COP"}, \
> + {0x314, "H_GET_MPP_X"}, \
> + {0x31C, "H_SET_MODE"}, \
> + {0xf000, "H_RTAS"}
I think we're better off using the defines here rather than hand written numbers.
> +
> +#define kvm_trace_symbol_kvmret \
> + {0, "RESUME_GUEST"}, \
> + {1, "RESUME_GUEST_NV"}, \
> + {2, "RESUME_HOST"}, \
> + {3, "RESUME_HOST_NV"}
Same here
> +
> +#define kvm_trace_symbol_hcall_rc \
> + {0, "H_SUCCESS"}, \
> + {1, "H_BUSY"}, \
> + {2, "H_CLOSED"}, \
> + {3, "H_NOT_AVAILABLE"}, \
> + {4, "H_CONSTRAINED"}, \
> + {5, "H_PARTIAL"}, \
> + {14, "H_IN_PROGRESS"}, \
> + {15, "H_PAGE_REGISTERED"}, \
> + {16, "H_PARTIAL_STORE"}, \
> + {17, "H_PENDING"}, \
> + {18, "H_CONTINUE"}, \
> + {9900, "H_LONG_BUSY_START_RANGE"}, \
> + {9900, "H_LONG_BUSY_ORDER_1_MSEC"}, \
> + {9901, "H_LONG_BUSY_ORDER_10_MSEC"}, \
> + {9902, "H_LONG_BUSY_ORDER_100_MSEC"}, \
> + {9903, "H_LONG_BUSY_ORDER_1_SEC"}, \
> + {9904, "H_LONG_BUSY_ORDER_10_SEC"}, \
> + {9905, "H_LONG_BUSY_ORDER_100_SEC"}, \
> + {9905, "H_LONG_BUSY_END_RANGE"}, \
> + {9999, "H_TOO_HARD"}, \
> + {-1, "H_HARDWARE"}, \
> + {-2, "H_FUNCTION"}, \
> + {-3, "H_PRIVILEGE"}, \
> + {-4, "H_PARAMETER"}, \
> + {-5, "H_BAD_MODE"}, \
> + {-6, "H_PTEG_FULL"}, \
> + {-7, "H_NOT_FOUND"}, \
> + {-8, "H_RESERVED_DABR"}, \
> + {-9, "H_NO_MEM"}, \
> + {-10, "H_AUTHORITY"}, \
> + {-11, "H_PERMISSION"}, \
> + {-12, "H_DROPPED"}, \
> + {-13, "H_SOURCE_PARM"}, \
> + {-14, "H_DEST_PARM"}, \
> + {-15, "H_REMOTE_PARM"}, \
> + {-16, "H_RESOURCE"}, \
> + {-17, "H_ADAPTER_PARM"}, \
> + {-18, "H_RH_PARM"}, \
> + {-19, "H_RCQ_PARM"}, \
> + {-20, "H_SCQ_PARM"}, \
> + {-21, "H_EQ_PARM"}, \
> + {-22, "H_RT_PARM"}, \
> + {-23, "H_ST_PARM"}, \
> + {-24, "H_SIGT_PARM"}, \
> + {-25, "H_TOKEN_PARM"}, \
> + {-27, "H_MLENGTH_PARM"}, \
> + {-28, "H_MEM_PARM"}, \
> + {-29, "H_MEM_ACCESS_PARM"}, \
> + {-30, "H_ATTR_PARM"}, \
> + {-31, "H_PORT_PARM"}, \
> + {-32, "H_MCG_PARM"}, \
> + {-33, "H_VL_PARM"}, \
> + {-34, "H_TSIZE_PARM"}, \
> + {-35, "H_TRACE_PARM"}, \
> + {-37, "H_MASK_PARM"}, \
> + {-38, "H_MCG_FULL"}, \
> + {-39, "H_ALIAS_EXIST"}, \
> + {-40, "H_P_COUNTER"}, \
> + {-41, "H_TABLE_FULL"}, \
> + {-42, "H_ALT_TABLE"}, \
> + {-43, "H_MR_CONDITION"}, \
> + {-44, "H_NOT_ENOUGH_RESOURCES"}, \
> + {-45, "H_R_STATE"}, \
> + {-46, "H_RESCINDED"}, \
> + {-55, "H_P2"}, \
> + {-56, "H_P3"}, \
> + {-57, "H_P4"}, \
> + {-58, "H_P5"}, \
> + {-59, "H_P6"}, \
> + {-60, "H_P7"}, \
> + {-61, "H_P8"}, \
> + {-62, "H_P9"}, \
> + {-64, "H_TOO_BIG"}, \
> + {-68, "H_OVERLAP"}, \
> + {-69, "H_INTERRUPT"}, \
> + {-70, "H_BAD_DATA"}, \
> + {-71, "H_NOT_ACTIVE"}, \
> + {-72, "H_SG_LIST"}, \
> + {-73, "H_OP_MODE"}, \
> + {-74, "H_COP_HW"}, \
> + {-256, "H_UNSUPPORTED_FLAG_START"}, \
> + {-511, "H_UNSUPPORTED_FLAG_END"}, \
> + {-9005, "H_MULTI_THREADS_ACTIVE"}, \
> + {-9006, "H_OUTSTANDING_COP_OPS"}
And here
Overall, please check for every trace point whether we can share it with PR and/or booke :)
Alex
> +
> +TRACE_EVENT(kvm_guest_enter,
> + TP_PROTO(struct kvm_vcpu *vcpu),
> + TP_ARGS(vcpu),
> +
> + TP_STRUCT__entry(
> + __field(int, vcpu_id)
> + __field(unsigned long, pc)
> + __field(unsigned long, pending_exceptions)
> + __field(u8, ceded)
> + ),
> +
> + TP_fast_assign(
> + __entry->vcpu_id = vcpu->vcpu_id;
> + __entry->pc = kvmppc_get_pc(vcpu);
> + __entry->ceded = vcpu->arch.ceded;
> + __entry->pending_exceptions = vcpu->arch.pending_exceptions;
> + ),
> +
> + TP_printk("VCPU %d: pc=0x%lx pexcp=0x%lx ceded=%d",
> + __entry->vcpu_id,
> + __entry->pc,
> + __entry->pending_exceptions, __entry->ceded)
> +);
> +
> +TRACE_EVENT(kvm_guest_exit,
> + TP_PROTO(struct kvm_vcpu *vcpu),
> + TP_ARGS(vcpu),
> +
> + TP_STRUCT__entry(
> + __field(int, vcpu_id)
> + __field(int, trap)
> + __field(unsigned long, pc)
> + __field(unsigned long, msr)
> + __field(u8, ceded)
> + ),
> +
> + TP_fast_assign(
> + __entry->vcpu_id = vcpu->vcpu_id;
> + __entry->trap = vcpu->arch.trap;
> + __entry->ceded = vcpu->arch.ceded;
> + __entry->pc = kvmppc_get_pc(vcpu);
> + __entry->msr = vcpu->arch.shregs.msr;
> + ),
> +
> + TP_printk("VCPU %d: trap=%s pc=0x%lx msr=0x%lx, ceded=%d",
> + __entry->vcpu_id,
> + __print_symbolic(__entry->trap, kvm_trace_symbol_exit),
> + __entry->pc, __entry->msr, __entry->ceded
> + )
> +);
> +
> +TRACE_EVENT(kvm_page_fault_enter,
> + TP_PROTO(struct kvm_vcpu *vcpu, unsigned long *hptep,
> + struct kvm_memory_slot *memslot, unsigned long ea,
> + unsigned long dsisr),
> +
> + TP_ARGS(vcpu, hptep, memslot, ea, dsisr),
> +
> + TP_STRUCT__entry(
> + __field(int, vcpu_id)
> + __field(unsigned long, hpte_v)
> + __field(unsigned long, hpte_r)
> + __field(unsigned long, gpte_r)
> + __field(unsigned long, ea)
> + __field(u64, base_gfn)
> + __field(u32, slot_flags)
> + __field(u32, dsisr)
> + ),
> +
> + TP_fast_assign(
> + __entry->vcpu_id = vcpu->vcpu_id;
> + __entry->hpte_v = hptep[0];
> + __entry->hpte_r = hptep[1];
> + __entry->gpte_r = hptep[2];
> + __entry->ea = ea;
> + __entry->dsisr = dsisr;
> + __entry->base_gfn = memslot ? memslot->base_gfn : -1UL;
> + __entry->slot_flags = memslot ? memslot->flags : 0;
> + ),
> +
> + TP_printk("VCPU %d: hpte=0x%lx:0x%lx guest=0x%lx ea=0x%lx,%x slot=0x%llx,0x%x",
> + __entry->vcpu_id,
> + __entry->hpte_v, __entry->hpte_r, __entry->gpte_r,
> + __entry->ea, __entry->dsisr,
> + __entry->base_gfn, __entry->slot_flags)
> +);
> +
> +TRACE_EVENT(kvm_page_fault_exit,
> + TP_PROTO(struct kvm_vcpu *vcpu, unsigned long *hptep, long ret),
> +
> + TP_ARGS(vcpu, hptep, ret),
> +
> + TP_STRUCT__entry(
> + __field(int, vcpu_id)
> + __field(unsigned long, hpte_v)
> + __field(unsigned long, hpte_r)
> + __field(long, ret)
> + ),
> +
> + TP_fast_assign(
> + __entry->vcpu_id = vcpu->vcpu_id;
> + __entry->hpte_v = hptep[0];
> + __entry->hpte_r = hptep[1];
> + __entry->ret = ret;
> + ),
> +
> + TP_printk("VCPU %d: hpte=0x%lx:0x%lx ret=0x%lx",
> + __entry->vcpu_id,
> + __entry->hpte_v, __entry->hpte_r, __entry->ret)
> +);
> +
> +TRACE_EVENT(kvm_hcall_enter,
> + TP_PROTO(struct kvm_vcpu *vcpu),
> +
> + TP_ARGS(vcpu),
> +
> + TP_STRUCT__entry(
> + __field(int, vcpu_id)
> + __field(unsigned long, req)
> + __field(unsigned long, gpr4)
> + __field(unsigned long, gpr5)
> + __field(unsigned long, gpr6)
> + __field(unsigned long, gpr7)
> + ),
> +
> + TP_fast_assign(
> + __entry->vcpu_id = vcpu->vcpu_id;
> + __entry->req = kvmppc_get_gpr(vcpu, 3);
> + __entry->gpr4 = kvmppc_get_gpr(vcpu, 4);
> + __entry->gpr5 = kvmppc_get_gpr(vcpu, 5);
> + __entry->gpr6 = kvmppc_get_gpr(vcpu, 6);
> + __entry->gpr7 = kvmppc_get_gpr(vcpu, 7);
> + ),
> +
> + TP_printk("VCPU %d: hcall=%s GPR4-7=0x%lx,0x%lx,0x%lx,0x%lx",
> + __entry->vcpu_id,
> + __print_symbolic(__entry->req, kvm_trace_symbol_hcall),
> + __entry->gpr4, __entry->gpr5, __entry->gpr6, __entry->gpr7)
> +);
> +
> +TRACE_EVENT(kvm_hcall_exit,
> + TP_PROTO(struct kvm_vcpu *vcpu, int ret),
> +
> + TP_ARGS(vcpu, ret),
> +
> + TP_STRUCT__entry(
> + __field(int, vcpu_id)
> + __field(unsigned long, ret)
> + __field(unsigned long, hcall_rc)
> + ),
> +
> + TP_fast_assign(
> + __entry->vcpu_id = vcpu->vcpu_id;
> + __entry->ret = ret;
> + __entry->hcall_rc = kvmppc_get_gpr(vcpu, 3);
> + ),
> +
> + TP_printk("VCPU %d: ret=%s hcall_rc=%s",
> + __entry->vcpu_id,
> + __print_symbolic(__entry->ret, kvm_trace_symbol_kvmret),
> + __print_symbolic(__entry->ret & RESUME_FLAG_HOST ?
> + H_TOO_HARD : __entry->hcall_rc,
> + kvm_trace_symbol_hcall_rc))
> +);
> +
> +TRACE_EVENT(kvmppc_run_core,
> + TP_PROTO(struct kvmppc_vcore *vc, int where),
> +
> + TP_ARGS(vc, where),
> +
> + TP_STRUCT__entry(
> + __field(int, n_runnable)
> + __field(int, runner_vcpu)
> + __field(int, where)
> + __field(pid_t, tgid)
> + ),
> +
> + TP_fast_assign(
> + __entry->runner_vcpu = vc->runner->vcpu_id;
> + __entry->n_runnable = vc->n_runnable;
> + __entry->where = where;
> + __entry->tgid = current->tgid;
> + ),
> +
> + TP_printk("%s runner_vcpu==%d runnable=%d tgid=%d",
> + __entry->where ? "Exit" : "Enter",
> + __entry->runner_vcpu, __entry->n_runnable, __entry->tgid)
> +);
> +
> +TRACE_EVENT(kvmppc_vcore_blocked,
> + TP_PROTO(struct kvmppc_vcore *vc, int where),
> +
> + TP_ARGS(vc, where),
> +
> + TP_STRUCT__entry(
> + __field(int, n_runnable)
> + __field(int, runner_vcpu)
> + __field(int, where)
> + __field(pid_t, tgid)
> + ),
> +
> + TP_fast_assign(
> + __entry->runner_vcpu = vc->runner->vcpu_id;
> + __entry->n_runnable = vc->n_runnable;
> + __entry->where = where;
> + __entry->tgid = current->tgid;
> + ),
> +
> + TP_printk("%s runner_vcpu=%d runnable=%d tgid=%d",
> + __entry->where ? "Exit" : "Enter",
> + __entry->runner_vcpu, __entry->n_runnable, __entry->tgid)
> +);
> +
> +TRACE_EVENT(kvmppc_run_vcpu_enter,
> + TP_PROTO(struct kvm_vcpu *vcpu),
> +
> + TP_ARGS(vcpu),
> +
> + TP_STRUCT__entry(
> + __field(int, vcpu_id)
> + __field(pid_t, tgid)
> + ),
> +
> + TP_fast_assign(
> + __entry->vcpu_id = vcpu->vcpu_id;
> + __entry->tgid = current->tgid;
> + ),
> +
> + TP_printk("VCPU %d: tgid=%d", __entry->vcpu_id, __entry->tgid)
> +);
> +
> +TRACE_EVENT(kvmppc_run_vcpu_exit,
> + TP_PROTO(struct kvm_vcpu *vcpu, struct kvm_run *run),
> +
> + TP_ARGS(vcpu, run),
> +
> + TP_STRUCT__entry(
> + __field(int, vcpu_id)
> + __field(int, exit)
> + __field(int, ret)
> + ),
> +
> + TP_fast_assign(
> + __entry->vcpu_id = vcpu->vcpu_id;
> + __entry->exit = run->exit_reason;
> + __entry->ret = vcpu->arch.ret;
> + ),
> +
> + TP_printk("VCPU %d: exit=%d, ret=%d",
> + __entry->vcpu_id, __entry->exit, __entry->ret)
> +);
> +
> +#endif /* _TRACE_KVM_HV_H */
> +
> +/* This part must be outside protection */
> +#include <trace/define_trace.h>
> --
> 1.8.3.4
>
> --
> To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at http://vger.kernel.org/majordomo-info.html
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH] KVM: PPC: Book3S HV: Tracepoints for KVM HV guest interactions
2014-11-14 10:56 ` Alexander Graf
@ 2014-11-19 21:54 ` Suresh E. Warrier
2014-11-20 12:08 ` Alexander Graf
0 siblings, 1 reply; 10+ messages in thread
From: Suresh E. Warrier @ 2014-11-19 21:54 UTC (permalink / raw)
To: Alexander Graf
Cc: kvm-ppc@vger.kernel.org, kvm@vger.kernel.org, Paul Mackerras
On 11/14/2014 04:56 AM, Alexander Graf wrote:
>
>
>
>> Am 14.11.2014 um 00:29 schrieb Suresh E. Warrier <warrier@linux.vnet.ibm.com>:
>>
>> This patch adds trace points in the guest entry and exit code and also
>> for exceptions handled by the host in kernel mode - hypercalls and page
>> faults. The new events are added to /sys/kernel/debug/tracing/events
>> under a new subsystem called kvm_hv.
>>
>> Acked-by: Paul Mackerras <paulus@samba.org>
>> Signed-off-by: Suresh Warrier <warrier@linux.vnet.ibm.com>
>> ---
>> arch/powerpc/kvm/book3s_64_mmu_hv.c | 12 +-
>> arch/powerpc/kvm/book3s_hv.c | 19 ++
>> arch/powerpc/kvm/trace_hv.h | 497 ++++++++++++++++++++++++++++++++++++
>> 3 files changed, 525 insertions(+), 3 deletions(-)
>> create mode 100644 arch/powerpc/kvm/trace_hv.h
>>
>> diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
>> index 70feb7b..20cbad1 100644
>> --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
>> +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
>> @@ -38,6 +38,7 @@
>> #include <asm/cputable.h>
>>
>> #include "book3s_hv_cma.h"
>> +#include "trace_hv.h"
>>
>> /* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
>> #define MAX_LPID_970 63
>> @@ -627,6 +628,8 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
>> gfn = gpa >> PAGE_SHIFT;
>> memslot = gfn_to_memslot(kvm, gfn);
>>
>> + trace_kvm_page_fault_enter(vcpu, hpte, memslot, ea, dsisr);
>> +
>> /* No memslot means it's an emulated MMIO region */
>> if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
>> return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
>> @@ -639,6 +642,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
>> mmu_seq = kvm->mmu_notifier_seq;
>> smp_rmb();
>>
>> + ret = -EFAULT;
>> is_io = 0;
>> pfn = 0;
>> page = NULL;
>> @@ -662,7 +666,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
>> }
>> up_read(¤t->mm->mmap_sem);
>> if (!pfn)
>> - return -EFAULT;
>> + goto out_put;
>> } else {
>> page = pages[0];
>> if (PageHuge(page)) {
>> @@ -690,14 +694,14 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
>> pfn = page_to_pfn(page);
>> }
>>
>> - ret = -EFAULT;
>> if (psize > pte_size)
>> goto out_put;
>>
>> /* Check WIMG vs. the actual page we're accessing */
>> if (!hpte_cache_flags_ok(r, is_io)) {
>> if (is_io)
>> - return -EFAULT;
>> + goto out_put;
>> +
>> /*
>> * Allow guest to map emulated device memory as
>> * uncacheable, but actually make it cacheable.
>> @@ -753,6 +757,8 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
>> SetPageDirty(page);
>>
>> out_put:
>> + trace_kvm_page_fault_exit(vcpu, hpte, ret);
>> +
>> if (page) {
>> /*
>> * We drop pages[0] here, not page because page might
>> diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
>> index 69d4085..5143d17 100644
>> --- a/arch/powerpc/kvm/book3s_hv.c
>> +++ b/arch/powerpc/kvm/book3s_hv.c
>> @@ -57,6 +57,9 @@
>>
>> #include "book3s.h"
>>
>> +#define CREATE_TRACE_POINTS
>> +#include "trace_hv.h"
>> +
>> /* #define EXIT_DEBUG */
>> /* #define EXIT_DEBUG_SIMPLE */
>> /* #define EXIT_DEBUG_INT */
>> @@ -1679,6 +1682,7 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
>> list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
>> kvmppc_start_thread(vcpu);
>> kvmppc_create_dtl_entry(vcpu, vc);
>> + trace_kvm_guest_enter(vcpu);
>> }
>>
>> /* Set this explicitly in case thread 0 doesn't have a vcpu */
>> @@ -1687,6 +1691,9 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
>>
>> vc->vcore_state = VCORE_RUNNING;
>> preempt_disable();
>> +
>> + trace_kvmppc_run_core(vc, 0);
>> +
>> spin_unlock(&vc->lock);
>>
>> kvm_guest_enter();
>> @@ -1732,6 +1739,8 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
>> kvmppc_core_pending_dec(vcpu))
>> kvmppc_core_dequeue_dec(vcpu);
>>
>> + trace_kvm_guest_exit(vcpu);
>> +
>> ret = RESUME_GUEST;
>> if (vcpu->arch.trap)
>> ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu,
>> @@ -1757,6 +1766,8 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
>> wake_up(&vcpu->arch.cpu_run);
>> }
>> }
>> +
>> + trace_kvmppc_run_core(vc, 1);
>> }
>>
>> /*
>> @@ -1783,11 +1794,13 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
>>
>> prepare_to_wait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
>> vc->vcore_state = VCORE_SLEEPING;
>> + trace_kvmppc_vcore_blocked(vc, 0);
>> spin_unlock(&vc->lock);
>> schedule();
>> finish_wait(&vc->wq, &wait);
>> spin_lock(&vc->lock);
>> vc->vcore_state = VCORE_INACTIVE;
>> + trace_kvmppc_vcore_blocked(vc, 1);
>> }
>>
>> static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
>> @@ -1796,6 +1809,8 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
>> struct kvmppc_vcore *vc;
>> struct kvm_vcpu *v, *vn;
>>
>> + trace_kvmppc_run_vcpu_enter(vcpu);
>> +
>> kvm_run->exit_reason = 0;
>> vcpu->arch.ret = RESUME_GUEST;
>> vcpu->arch.trap = 0;
>> @@ -1825,6 +1840,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
>> VCORE_EXIT_COUNT(vc) == 0) {
>> kvmppc_create_dtl_entry(vcpu, vc);
>> kvmppc_start_thread(vcpu);
>> + trace_kvm_guest_enter(vcpu);
>> } else if (vc->vcore_state == VCORE_SLEEPING) {
>> wake_up(&vc->wq);
>> }
>> @@ -1889,6 +1905,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
>> wake_up(&v->arch.cpu_run);
>> }
>>
>> + trace_kvmppc_run_vcpu_exit(vcpu, kvm_run);
>> spin_unlock(&vc->lock);
>> return vcpu->arch.ret;
>> }
>> @@ -1934,7 +1951,9 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
>>
>> if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
>> !(vcpu->arch.shregs.msr & MSR_PR)) {
>> + trace_kvm_hcall_enter(vcpu);
>> r = kvmppc_pseries_do_hcall(vcpu);
>> + trace_kvm_hcall_exit(vcpu, r);
>> kvmppc_core_prepare_to_enter(vcpu);
>> } else if (r == RESUME_PAGE_FAULT) {
>> srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
>> diff --git a/arch/powerpc/kvm/trace_hv.h b/arch/powerpc/kvm/trace_hv.h
>> new file mode 100644
>> index 0000000..2d2e55f
>> --- /dev/null
>> +++ b/arch/powerpc/kvm/trace_hv.h
>> @@ -0,0 +1,497 @@
>> +#if !defined(_TRACE_KVM_HV_H) || defined(TRACE_HEADER_MULTI_READ)
>> +#define _TRACE_KVM_HV_H
>> +
>> +#include <linux/tracepoint.h>
>> +
>> +#undef TRACE_SYSTEM
>> +#define TRACE_SYSTEM kvm_hv
>> +#define TRACE_INCLUDE_PATH .
>> +#define TRACE_INCLUDE_FILE trace_hv
>> +
>> +#define kvm_trace_symbol_exit \
>> + {0x100, "SYSTEM_RESET"}, \
>> + {0x200, "MACHINE_CHECK"}, \
>> + {0x300, "DATA_STORAGE"}, \
>> + {0x380, "DATA_SEGMENT"}, \
>> + {0x400, "INST_STORAGE"}, \
>> + {0x480, "INST_SEGMENT"}, \
>> + {0x500, "EXTERNAL"}, \
>> + {0x502, "EXTERNAL_HV"}, \
>> + {0x600, "ALIGNMENT"}, \
>> + {0x700, "PROGRAM"}, \
>> + {0x800, "FP_UNAVAIL"}, \
>> + {0x900, "DECREMENTER"}, \
>> + {0x980, "HV_DECREMENTER"}, \
>> + {0xc00, "SYSCALL"}, \
>> + {0xd00, "TRACE"}, \
>> + {0xe00, "H_DATA_STORAGE"}, \
>> + {0xe20, "H_INST_STORAGE"}, \
>> + {0xe40, "H_EMUL_ASSIST"}, \
>> + {0xf00, "PERFMON"}, \
>> + {0xf20, "ALTIVEC"}, \
>> + {0xf40, "VSX"}
>
> Can we share these with PR?
>
I could move these to a new file, say trace_kvm.h and have both trace_pr.h and
trace_hv.h include the file. Please confirm if that works for you. Or do you
have a better suggestion?
>> +
>> +#define kvm_trace_symbol_hcall \
>> + {0x04, "H_REMOVE"}, \
>> + {0x08, "H_ENTER"}, \
>> + {0x0c, "H_READ"}, \
>> + {0x10, "H_CLEAR_MOD"}, \
>> + {0x14, "H_CLEAR_REF"}, \
>> + {0x18, "H_PROTECT"}, \
>> + {0x1c, "H_GET_TCE"}, \
>> + {0x20, "H_PUT_TCE"}, \
>> + {0x24, "H_SET_SPRG0"}, \
>> + {0x28, "H_SET_DABR"}, \
>> + {0x2c, "H_PAGE_INIT"}, \
>> + {0x30, "H_SET_ASR"}, \
>> + {0x34, "H_ASR_ON"}, \
>> + {0x38, "H_ASR_OFF"}, \
>> + {0x3c, "H_LOGICAL_CI_LOAD"}, \
>> + {0x40, "H_LOGICAL_CI_STORE"}, \
>> + {0x44, "H_LOGICAL_CACHE_LOAD"}, \
>> + {0x48, "H_LOGICAL_CACHE_STORE"}, \
>> + {0x4c, "H_LOGICAL_ICBI"}, \
>> + {0x50, "H_LOGICAL_DCBF"}, \
>> + {0x54, "H_GET_TERM_CHAR"}, \
>> + {0x58, "H_PUT_TERM_CHAR"}, \
>> + {0x5c, "H_REAL_TO_LOGICAL"}, \
>> + {0x60, "H_HYPERVISOR_DATA"}, \
>> + {0x64, "H_EOI"}, \
>> + {0x68, "H_CPPR"}, \
>> + {0x6c, "H_IPI"}, \
>> + {0x70, "H_IPOLL"}, \
>> + {0x74, "H_XIRR"}, \
>> + {0x7c, "H_PERFMON"}, \
>> + {0x78, "H_MIGRATE_DMA"}, \
>> + {0xDC, "H_REGISTER_VPA"}, \
>> + {0xE0, "H_CEDE"}, \
>> + {0xE4, "H_CONFER"}, \
>> + {0xE8, "H_PROD"}, \
>> + {0xEC, "H_GET_PPP"}, \
>> + {0xF0, "H_SET_PPP"}, \
>> + {0xF4, "H_PURR"}, \
>> + {0xF8, "H_PIC"}, \
>> + {0xFC, "H_REG_CRQ"}, \
>> + {0x100, "H_FREE_CRQ"}, \
>> + {0x104, "H_VIO_SIGNAL"}, \
>> + {0x108, "H_SEND_CRQ"}, \
>> + {0x110, "H_COPY_RDMA"}, \
>> + {0x114, "H_REGISTER_LOGICAL_LAN"}, \
>> + {0x118, "H_FREE_LOGICAL_LAN"}, \
>> + {0x11C, "H_ADD_LOGICAL_LAN_BUFFER"}, \
>> + {0x120, "H_SEND_LOGICAL_LAN"}, \
>> + {0x124, "H_BULK_REMOVE"}, \
>> + {0x130, "H_MULTICAST_CTRL"}, \
>> + {0x134, "H_SET_XDABR"}, \
>> + {0x138, "H_STUFF_TCE"}, \
>> + {0x13C, "H_PUT_TCE_INDIRECT"}, \
>> + {0x14C, "H_CHANGE_LOGICAL_LAN_MAC"}, \
>> + {0x150, "H_VTERM_PARTNER_INFO"}, \
>> + {0x154, "H_REGISTER_VTERM"}, \
>> + {0x158, "H_FREE_VTERM"}, \
>> + {0x15C, "H_RESET_EVENTS"}, \
>> + {0x160, "H_ALLOC_RESOURCE"}, \
>> + {0x164, "H_FREE_RESOURCE"}, \
>> + {0x168, "H_MODIFY_QP"}, \
>> + {0x16C, "H_QUERY_QP"}, \
>> + {0x170, "H_REREGISTER_PMR"}, \
>> + {0x174, "H_REGISTER_SMR"}, \
>> + {0x178, "H_QUERY_MR"}, \
>> + {0x17C, "H_QUERY_MW"}, \
>> + {0x180, "H_QUERY_HCA"}, \
>> + {0x184, "H_QUERY_PORT"}, \
>> + {0x188, "H_MODIFY_PORT"}, \
>> + {0x18C, "H_DEFINE_AQP1"}, \
>> + {0x190, "H_GET_TRACE_BUFFER"}, \
>> + {0x194, "H_DEFINE_AQP0"}, \
>> + {0x198, "H_RESIZE_MR"}, \
>> + {0x19C, "H_ATTACH_MCQP"}, \
>> + {0x1A0, "H_DETACH_MCQP"}, \
>> + {0x1A4, "H_CREATE_RPT"}, \
>> + {0x1A8, "H_REMOVE_RPT"}, \
>> + {0x1AC, "H_REGISTER_RPAGES"}, \
>> + {0x1B0, "H_DISABLE_AND_GETC"}, \
>> + {0x1B4, "H_ERROR_DATA"}, \
>> + {0x1B8, "H_GET_HCA_INFO"}, \
>> + {0x1BC, "H_GET_PERF_COUNT"}, \
>> + {0x1C0, "H_MANAGE_TRACE"}, \
>> + {0x1D4, "H_FREE_LOGICAL_LAN_BUFFER"}, \
>> + {0x1E4, "H_QUERY_INT_STATE"}, \
>> + {0x1D8, "H_POLL_PENDING"}, \
>> + {0x244, "H_ILLAN_ATTRIBUTES"}, \
>> + {0x250, "H_MODIFY_HEA_QP"}, \
>> + {0x254, "H_QUERY_HEA_QP"}, \
>> + {0x258, "H_QUERY_HEA"}, \
>> + {0x25C, "H_QUERY_HEA_PORT"}, \
>> + {0x260, "H_MODIFY_HEA_PORT"}, \
>> + {0x264, "H_REG_BCMC"}, \
>> + {0x268, "H_DEREG_BCMC"}, \
>> + {0x26C, "H_REGISTER_HEA_RPAGES"}, \
>> + {0x270, "H_DISABLE_AND_GET_HEA"}, \
>> + {0x274, "H_GET_HEA_INFO"}, \
>> + {0x278, "H_ALLOC_HEA_RESOURCE"}, \
>> + {0x284, "H_ADD_CONN"}, \
>> + {0x288, "H_DEL_CONN"}, \
>> + {0x298, "H_JOIN"}, \
>> + {0x2A4, "H_VASI_STATE"}, \
>> + {0x2B0, "H_ENABLE_CRQ"}, \
>> + {0x2B8, "H_GET_EM_PARMS"}, \
>> + {0x2D0, "H_SET_MPP"}, \
>> + {0x2D4, "H_GET_MPP"}, \
>> + {0x2EC, "H_HOME_NODE_ASSOCIATIVITY"}, \
>> + {0x2F4, "H_BEST_ENERGY"}, \
>> + {0x2FC, "H_XIRR_X"}, \
>> + {0x300, "H_RANDOM"}, \
>> + {0x304, "H_COP"}, \
>> + {0x314, "H_GET_MPP_X"}, \
>> + {0x31C, "H_SET_MODE"}, \
>> + {0xf000, "H_RTAS"}
>
> I think we're better off using the defines here rather than hand written numbers.
>
Good point! Will do that. So, also for the other handwritten numbers below.
>> +
>> +#define kvm_trace_symbol_kvmret \
>> + {0, "RESUME_GUEST"}, \
>> + {1, "RESUME_GUEST_NV"}, \
>> + {2, "RESUME_HOST"}, \
>> + {3, "RESUME_HOST_NV"}
>
> Same here
>
>> +
>> +#define kvm_trace_symbol_hcall_rc \
>> + {0, "H_SUCCESS"}, \
>> + {1, "H_BUSY"}, \
>> + {2, "H_CLOSED"}, \
>> + {3, "H_NOT_AVAILABLE"}, \
>> + {4, "H_CONSTRAINED"}, \
>> + {5, "H_PARTIAL"}, \
>> + {14, "H_IN_PROGRESS"}, \
>> + {15, "H_PAGE_REGISTERED"}, \
>> + {16, "H_PARTIAL_STORE"}, \
>> + {17, "H_PENDING"}, \
>> + {18, "H_CONTINUE"}, \
>> + {9900, "H_LONG_BUSY_START_RANGE"}, \
>> + {9900, "H_LONG_BUSY_ORDER_1_MSEC"}, \
>> + {9901, "H_LONG_BUSY_ORDER_10_MSEC"}, \
>> + {9902, "H_LONG_BUSY_ORDER_100_MSEC"}, \
>> + {9903, "H_LONG_BUSY_ORDER_1_SEC"}, \
>> + {9904, "H_LONG_BUSY_ORDER_10_SEC"}, \
>> + {9905, "H_LONG_BUSY_ORDER_100_SEC"}, \
>> + {9905, "H_LONG_BUSY_END_RANGE"}, \
>> + {9999, "H_TOO_HARD"}, \
>> + {-1, "H_HARDWARE"}, \
>> + {-2, "H_FUNCTION"}, \
>> + {-3, "H_PRIVILEGE"}, \
>> + {-4, "H_PARAMETER"}, \
>> + {-5, "H_BAD_MODE"}, \
>> + {-6, "H_PTEG_FULL"}, \
>> + {-7, "H_NOT_FOUND"}, \
>> + {-8, "H_RESERVED_DABR"}, \
>> + {-9, "H_NO_MEM"}, \
>> + {-10, "H_AUTHORITY"}, \
>> + {-11, "H_PERMISSION"}, \
>> + {-12, "H_DROPPED"}, \
>> + {-13, "H_SOURCE_PARM"}, \
>> + {-14, "H_DEST_PARM"}, \
>> + {-15, "H_REMOTE_PARM"}, \
>> + {-16, "H_RESOURCE"}, \
>> + {-17, "H_ADAPTER_PARM"}, \
>> + {-18, "H_RH_PARM"}, \
>> + {-19, "H_RCQ_PARM"}, \
>> + {-20, "H_SCQ_PARM"}, \
>> + {-21, "H_EQ_PARM"}, \
>> + {-22, "H_RT_PARM"}, \
>> + {-23, "H_ST_PARM"}, \
>> + {-24, "H_SIGT_PARM"}, \
>> + {-25, "H_TOKEN_PARM"}, \
>> + {-27, "H_MLENGTH_PARM"}, \
>> + {-28, "H_MEM_PARM"}, \
>> + {-29, "H_MEM_ACCESS_PARM"}, \
>> + {-30, "H_ATTR_PARM"}, \
>> + {-31, "H_PORT_PARM"}, \
>> + {-32, "H_MCG_PARM"}, \
>> + {-33, "H_VL_PARM"}, \
>> + {-34, "H_TSIZE_PARM"}, \
>> + {-35, "H_TRACE_PARM"}, \
>> + {-37, "H_MASK_PARM"}, \
>> + {-38, "H_MCG_FULL"}, \
>> + {-39, "H_ALIAS_EXIST"}, \
>> + {-40, "H_P_COUNTER"}, \
>> + {-41, "H_TABLE_FULL"}, \
>> + {-42, "H_ALT_TABLE"}, \
>> + {-43, "H_MR_CONDITION"}, \
>> + {-44, "H_NOT_ENOUGH_RESOURCES"}, \
>> + {-45, "H_R_STATE"}, \
>> + {-46, "H_RESCINDED"}, \
>> + {-55, "H_P2"}, \
>> + {-56, "H_P3"}, \
>> + {-57, "H_P4"}, \
>> + {-58, "H_P5"}, \
>> + {-59, "H_P6"}, \
>> + {-60, "H_P7"}, \
>> + {-61, "H_P8"}, \
>> + {-62, "H_P9"}, \
>> + {-64, "H_TOO_BIG"}, \
>> + {-68, "H_OVERLAP"}, \
>> + {-69, "H_INTERRUPT"}, \
>> + {-70, "H_BAD_DATA"}, \
>> + {-71, "H_NOT_ACTIVE"}, \
>> + {-72, "H_SG_LIST"}, \
>> + {-73, "H_OP_MODE"}, \
>> + {-74, "H_COP_HW"}, \
>> + {-256, "H_UNSUPPORTED_FLAG_START"}, \
>> + {-511, "H_UNSUPPORTED_FLAG_END"}, \
>> + {-9005, "H_MULTI_THREADS_ACTIVE"}, \
>> + {-9006, "H_OUTSTANDING_COP_OPS"}
>
> And here
>
> Overall, please check for every trace point whether we can share it with PR and/or booke :)
I did look at that when I was coding these, but the data we need in the trace points is
different.
Thanks for your suggestions!
-suresh
>
> Alex
>
>> +
>> +TRACE_EVENT(kvm_guest_enter,
>> + TP_PROTO(struct kvm_vcpu *vcpu),
>> + TP_ARGS(vcpu),
>> +
>> + TP_STRUCT__entry(
>> + __field(int, vcpu_id)
>> + __field(unsigned long, pc)
>> + __field(unsigned long, pending_exceptions)
>> + __field(u8, ceded)
>> + ),
>> +
>> + TP_fast_assign(
>> + __entry->vcpu_id = vcpu->vcpu_id;
>> + __entry->pc = kvmppc_get_pc(vcpu);
>> + __entry->ceded = vcpu->arch.ceded;
>> + __entry->pending_exceptions = vcpu->arch.pending_exceptions;
>> + ),
>> +
>> + TP_printk("VCPU %d: pc=0x%lx pexcp=0x%lx ceded=%d",
>> + __entry->vcpu_id,
>> + __entry->pc,
>> + __entry->pending_exceptions, __entry->ceded)
>> +);
>> +
>> +TRACE_EVENT(kvm_guest_exit,
>> + TP_PROTO(struct kvm_vcpu *vcpu),
>> + TP_ARGS(vcpu),
>> +
>> + TP_STRUCT__entry(
>> + __field(int, vcpu_id)
>> + __field(int, trap)
>> + __field(unsigned long, pc)
>> + __field(unsigned long, msr)
>> + __field(u8, ceded)
>> + ),
>> +
>> + TP_fast_assign(
>> + __entry->vcpu_id = vcpu->vcpu_id;
>> + __entry->trap = vcpu->arch.trap;
>> + __entry->ceded = vcpu->arch.ceded;
>> + __entry->pc = kvmppc_get_pc(vcpu);
>> + __entry->msr = vcpu->arch.shregs.msr;
>> + ),
>> +
>> + TP_printk("VCPU %d: trap=%s pc=0x%lx msr=0x%lx, ceded=%d",
>> + __entry->vcpu_id,
>> + __print_symbolic(__entry->trap, kvm_trace_symbol_exit),
>> + __entry->pc, __entry->msr, __entry->ceded
>> + )
>> +);
>> +
>> +TRACE_EVENT(kvm_page_fault_enter,
>> + TP_PROTO(struct kvm_vcpu *vcpu, unsigned long *hptep,
>> + struct kvm_memory_slot *memslot, unsigned long ea,
>> + unsigned long dsisr),
>> +
>> + TP_ARGS(vcpu, hptep, memslot, ea, dsisr),
>> +
>> + TP_STRUCT__entry(
>> + __field(int, vcpu_id)
>> + __field(unsigned long, hpte_v)
>> + __field(unsigned long, hpte_r)
>> + __field(unsigned long, gpte_r)
>> + __field(unsigned long, ea)
>> + __field(u64, base_gfn)
>> + __field(u32, slot_flags)
>> + __field(u32, dsisr)
>> + ),
>> +
>> + TP_fast_assign(
>> + __entry->vcpu_id = vcpu->vcpu_id;
>> + __entry->hpte_v = hptep[0];
>> + __entry->hpte_r = hptep[1];
>> + __entry->gpte_r = hptep[2];
>> + __entry->ea = ea;
>> + __entry->dsisr = dsisr;
>> + __entry->base_gfn = memslot ? memslot->base_gfn : -1UL;
>> + __entry->slot_flags = memslot ? memslot->flags : 0;
>> + ),
>> +
>> + TP_printk("VCPU %d: hpte=0x%lx:0x%lx guest=0x%lx ea=0x%lx,%x slot=0x%llx,0x%x",
>> + __entry->vcpu_id,
>> + __entry->hpte_v, __entry->hpte_r, __entry->gpte_r,
>> + __entry->ea, __entry->dsisr,
>> + __entry->base_gfn, __entry->slot_flags)
>> +);
>> +
>> +TRACE_EVENT(kvm_page_fault_exit,
>> + TP_PROTO(struct kvm_vcpu *vcpu, unsigned long *hptep, long ret),
>> +
>> + TP_ARGS(vcpu, hptep, ret),
>> +
>> + TP_STRUCT__entry(
>> + __field(int, vcpu_id)
>> + __field(unsigned long, hpte_v)
>> + __field(unsigned long, hpte_r)
>> + __field(long, ret)
>> + ),
>> +
>> + TP_fast_assign(
>> + __entry->vcpu_id = vcpu->vcpu_id;
>> + __entry->hpte_v = hptep[0];
>> + __entry->hpte_r = hptep[1];
>> + __entry->ret = ret;
>> + ),
>> +
>> + TP_printk("VCPU %d: hpte=0x%lx:0x%lx ret=0x%lx",
>> + __entry->vcpu_id,
>> + __entry->hpte_v, __entry->hpte_r, __entry->ret)
>> +);
>> +
>> +TRACE_EVENT(kvm_hcall_enter,
>> + TP_PROTO(struct kvm_vcpu *vcpu),
>> +
>> + TP_ARGS(vcpu),
>> +
>> + TP_STRUCT__entry(
>> + __field(int, vcpu_id)
>> + __field(unsigned long, req)
>> + __field(unsigned long, gpr4)
>> + __field(unsigned long, gpr5)
>> + __field(unsigned long, gpr6)
>> + __field(unsigned long, gpr7)
>> + ),
>> +
>> + TP_fast_assign(
>> + __entry->vcpu_id = vcpu->vcpu_id;
>> + __entry->req = kvmppc_get_gpr(vcpu, 3);
>> + __entry->gpr4 = kvmppc_get_gpr(vcpu, 4);
>> + __entry->gpr5 = kvmppc_get_gpr(vcpu, 5);
>> + __entry->gpr6 = kvmppc_get_gpr(vcpu, 6);
>> + __entry->gpr7 = kvmppc_get_gpr(vcpu, 7);
>> + ),
>> +
>> + TP_printk("VCPU %d: hcall=%s GPR4-7=0x%lx,0x%lx,0x%lx,0x%lx",
>> + __entry->vcpu_id,
>> + __print_symbolic(__entry->req, kvm_trace_symbol_hcall),
>> + __entry->gpr4, __entry->gpr5, __entry->gpr6, __entry->gpr7)
>> +);
>> +
>> +TRACE_EVENT(kvm_hcall_exit,
>> + TP_PROTO(struct kvm_vcpu *vcpu, int ret),
>> +
>> + TP_ARGS(vcpu, ret),
>> +
>> + TP_STRUCT__entry(
>> + __field(int, vcpu_id)
>> + __field(unsigned long, ret)
>> + __field(unsigned long, hcall_rc)
>> + ),
>> +
>> + TP_fast_assign(
>> + __entry->vcpu_id = vcpu->vcpu_id;
>> + __entry->ret = ret;
>> + __entry->hcall_rc = kvmppc_get_gpr(vcpu, 3);
>> + ),
>> +
>> + TP_printk("VCPU %d: ret=%s hcall_rc=%s",
>> + __entry->vcpu_id,
>> + __print_symbolic(__entry->ret, kvm_trace_symbol_kvmret),
>> + __print_symbolic(__entry->ret & RESUME_FLAG_HOST ?
>> + H_TOO_HARD : __entry->hcall_rc,
>> + kvm_trace_symbol_hcall_rc))
>> +);
>> +
>> +TRACE_EVENT(kvmppc_run_core,
>> + TP_PROTO(struct kvmppc_vcore *vc, int where),
>> +
>> + TP_ARGS(vc, where),
>> +
>> + TP_STRUCT__entry(
>> + __field(int, n_runnable)
>> + __field(int, runner_vcpu)
>> + __field(int, where)
>> + __field(pid_t, tgid)
>> + ),
>> +
>> + TP_fast_assign(
>> + __entry->runner_vcpu = vc->runner->vcpu_id;
>> + __entry->n_runnable = vc->n_runnable;
>> + __entry->where = where;
>> + __entry->tgid = current->tgid;
>> + ),
>> +
>> + TP_printk("%s runner_vcpu==%d runnable=%d tgid=%d",
>> + __entry->where ? "Exit" : "Enter",
>> + __entry->runner_vcpu, __entry->n_runnable, __entry->tgid)
>> +);
>> +
>> +TRACE_EVENT(kvmppc_vcore_blocked,
>> + TP_PROTO(struct kvmppc_vcore *vc, int where),
>> +
>> + TP_ARGS(vc, where),
>> +
>> + TP_STRUCT__entry(
>> + __field(int, n_runnable)
>> + __field(int, runner_vcpu)
>> + __field(int, where)
>> + __field(pid_t, tgid)
>> + ),
>> +
>> + TP_fast_assign(
>> + __entry->runner_vcpu = vc->runner->vcpu_id;
>> + __entry->n_runnable = vc->n_runnable;
>> + __entry->where = where;
>> + __entry->tgid = current->tgid;
>> + ),
>> +
>> + TP_printk("%s runner_vcpu=%d runnable=%d tgid=%d",
>> + __entry->where ? "Exit" : "Enter",
>> + __entry->runner_vcpu, __entry->n_runnable, __entry->tgid)
>> +);
>> +
>> +TRACE_EVENT(kvmppc_run_vcpu_enter,
>> + TP_PROTO(struct kvm_vcpu *vcpu),
>> +
>> + TP_ARGS(vcpu),
>> +
>> + TP_STRUCT__entry(
>> + __field(int, vcpu_id)
>> + __field(pid_t, tgid)
>> + ),
>> +
>> + TP_fast_assign(
>> + __entry->vcpu_id = vcpu->vcpu_id;
>> + __entry->tgid = current->tgid;
>> + ),
>> +
>> + TP_printk("VCPU %d: tgid=%d", __entry->vcpu_id, __entry->tgid)
>> +);
>> +
>> +TRACE_EVENT(kvmppc_run_vcpu_exit,
>> + TP_PROTO(struct kvm_vcpu *vcpu, struct kvm_run *run),
>> +
>> + TP_ARGS(vcpu, run),
>> +
>> + TP_STRUCT__entry(
>> + __field(int, vcpu_id)
>> + __field(int, exit)
>> + __field(int, ret)
>> + ),
>> +
>> + TP_fast_assign(
>> + __entry->vcpu_id = vcpu->vcpu_id;
>> + __entry->exit = run->exit_reason;
>> + __entry->ret = vcpu->arch.ret;
>> + ),
>> +
>> + TP_printk("VCPU %d: exit=%d, ret=%d",
>> + __entry->vcpu_id, __entry->exit, __entry->ret)
>> +);
>> +
>> +#endif /* _TRACE_KVM_HV_H */
>> +
>> +/* This part must be outside protection */
>> +#include <trace/define_trace.h>
>> --
>> 1.8.3.4
>>
>> --
>> To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
>> the body of a message to majordomo@vger.kernel.org
>> More majordomo info at http://vger.kernel.org/majordomo-info.html
>
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH] KVM: PPC: Book3S HV: Tracepoints for KVM HV guest interactions
2014-11-13 23:29 Suresh E. Warrier
2014-11-14 10:56 ` Alexander Graf
@ 2014-11-20 10:40 ` Aneesh Kumar K.V
2014-11-20 12:10 ` Alexander Graf
1 sibling, 1 reply; 10+ messages in thread
From: Aneesh Kumar K.V @ 2014-11-20 10:40 UTC (permalink / raw)
To: Suresh E. Warrier, agraf, kvm-ppc, kvm; +Cc: Paul Mackerras
"Suresh E. Warrier" <warrier@linux.vnet.ibm.com> writes:
> This patch adds trace points in the guest entry and exit code and also
> for exceptions handled by the host in kernel mode - hypercalls and page
> faults. The new events are added to /sys/kernel/debug/tracing/events
> under a new subsystem called kvm_hv.
........
> /* Set this explicitly in case thread 0 doesn't have a vcpu */
> @@ -1687,6 +1691,9 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
>
> vc->vcore_state = VCORE_RUNNING;
> preempt_disable();
> +
> + trace_kvmppc_run_core(vc, 0);
> +
> spin_unlock(&vc->lock);
Do we really want to call tracepoint with spin lock held ? Is that a good
thing to do ?.
-aneesh
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH] KVM: PPC: Book3S HV: Tracepoints for KVM HV guest interactions
2014-11-19 21:54 ` Suresh E. Warrier
@ 2014-11-20 12:08 ` Alexander Graf
0 siblings, 0 replies; 10+ messages in thread
From: Alexander Graf @ 2014-11-20 12:08 UTC (permalink / raw)
To: Suresh E. Warrier
Cc: kvm-ppc@vger.kernel.org, kvm@vger.kernel.org, Paul Mackerras
On 19.11.14 22:54, Suresh E. Warrier wrote:
>
>
> On 11/14/2014 04:56 AM, Alexander Graf wrote:
>>
>>
>>
>>> Am 14.11.2014 um 00:29 schrieb Suresh E. Warrier <warrier@linux.vnet.ibm.com>:
>>>
>>> This patch adds trace points in the guest entry and exit code and also
>>> for exceptions handled by the host in kernel mode - hypercalls and page
>>> faults. The new events are added to /sys/kernel/debug/tracing/events
>>> under a new subsystem called kvm_hv.
>>>
>>> Acked-by: Paul Mackerras <paulus@samba.org>
>>> Signed-off-by: Suresh Warrier <warrier@linux.vnet.ibm.com>
>>> ---
>>> arch/powerpc/kvm/book3s_64_mmu_hv.c | 12 +-
>>> arch/powerpc/kvm/book3s_hv.c | 19 ++
>>> arch/powerpc/kvm/trace_hv.h | 497 ++++++++++++++++++++++++++++++++++++
>>> 3 files changed, 525 insertions(+), 3 deletions(-)
>>> create mode 100644 arch/powerpc/kvm/trace_hv.h
>>>
>>> diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
>>> index 70feb7b..20cbad1 100644
>>> --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
>>> +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
>>> @@ -38,6 +38,7 @@
>>> #include <asm/cputable.h>
>>>
>>> #include "book3s_hv_cma.h"
>>> +#include "trace_hv.h"
>>>
>>> /* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
>>> #define MAX_LPID_970 63
>>> @@ -627,6 +628,8 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
>>> gfn = gpa >> PAGE_SHIFT;
>>> memslot = gfn_to_memslot(kvm, gfn);
>>>
>>> + trace_kvm_page_fault_enter(vcpu, hpte, memslot, ea, dsisr);
>>> +
>>> /* No memslot means it's an emulated MMIO region */
>>> if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
>>> return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
>>> @@ -639,6 +642,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
>>> mmu_seq = kvm->mmu_notifier_seq;
>>> smp_rmb();
>>>
>>> + ret = -EFAULT;
>>> is_io = 0;
>>> pfn = 0;
>>> page = NULL;
>>> @@ -662,7 +666,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
>>> }
>>> up_read(¤t->mm->mmap_sem);
>>> if (!pfn)
>>> - return -EFAULT;
>>> + goto out_put;
>>> } else {
>>> page = pages[0];
>>> if (PageHuge(page)) {
>>> @@ -690,14 +694,14 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
>>> pfn = page_to_pfn(page);
>>> }
>>>
>>> - ret = -EFAULT;
>>> if (psize > pte_size)
>>> goto out_put;
>>>
>>> /* Check WIMG vs. the actual page we're accessing */
>>> if (!hpte_cache_flags_ok(r, is_io)) {
>>> if (is_io)
>>> - return -EFAULT;
>>> + goto out_put;
>>> +
>>> /*
>>> * Allow guest to map emulated device memory as
>>> * uncacheable, but actually make it cacheable.
>>> @@ -753,6 +757,8 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
>>> SetPageDirty(page);
>>>
>>> out_put:
>>> + trace_kvm_page_fault_exit(vcpu, hpte, ret);
>>> +
>>> if (page) {
>>> /*
>>> * We drop pages[0] here, not page because page might
>>> diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
>>> index 69d4085..5143d17 100644
>>> --- a/arch/powerpc/kvm/book3s_hv.c
>>> +++ b/arch/powerpc/kvm/book3s_hv.c
>>> @@ -57,6 +57,9 @@
>>>
>>> #include "book3s.h"
>>>
>>> +#define CREATE_TRACE_POINTS
>>> +#include "trace_hv.h"
>>> +
>>> /* #define EXIT_DEBUG */
>>> /* #define EXIT_DEBUG_SIMPLE */
>>> /* #define EXIT_DEBUG_INT */
>>> @@ -1679,6 +1682,7 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
>>> list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
>>> kvmppc_start_thread(vcpu);
>>> kvmppc_create_dtl_entry(vcpu, vc);
>>> + trace_kvm_guest_enter(vcpu);
>>> }
>>>
>>> /* Set this explicitly in case thread 0 doesn't have a vcpu */
>>> @@ -1687,6 +1691,9 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
>>>
>>> vc->vcore_state = VCORE_RUNNING;
>>> preempt_disable();
>>> +
>>> + trace_kvmppc_run_core(vc, 0);
>>> +
>>> spin_unlock(&vc->lock);
>>>
>>> kvm_guest_enter();
>>> @@ -1732,6 +1739,8 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
>>> kvmppc_core_pending_dec(vcpu))
>>> kvmppc_core_dequeue_dec(vcpu);
>>>
>>> + trace_kvm_guest_exit(vcpu);
>>> +
>>> ret = RESUME_GUEST;
>>> if (vcpu->arch.trap)
>>> ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu,
>>> @@ -1757,6 +1766,8 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
>>> wake_up(&vcpu->arch.cpu_run);
>>> }
>>> }
>>> +
>>> + trace_kvmppc_run_core(vc, 1);
>>> }
>>>
>>> /*
>>> @@ -1783,11 +1794,13 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
>>>
>>> prepare_to_wait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
>>> vc->vcore_state = VCORE_SLEEPING;
>>> + trace_kvmppc_vcore_blocked(vc, 0);
>>> spin_unlock(&vc->lock);
>>> schedule();
>>> finish_wait(&vc->wq, &wait);
>>> spin_lock(&vc->lock);
>>> vc->vcore_state = VCORE_INACTIVE;
>>> + trace_kvmppc_vcore_blocked(vc, 1);
>>> }
>>>
>>> static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
>>> @@ -1796,6 +1809,8 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
>>> struct kvmppc_vcore *vc;
>>> struct kvm_vcpu *v, *vn;
>>>
>>> + trace_kvmppc_run_vcpu_enter(vcpu);
>>> +
>>> kvm_run->exit_reason = 0;
>>> vcpu->arch.ret = RESUME_GUEST;
>>> vcpu->arch.trap = 0;
>>> @@ -1825,6 +1840,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
>>> VCORE_EXIT_COUNT(vc) == 0) {
>>> kvmppc_create_dtl_entry(vcpu, vc);
>>> kvmppc_start_thread(vcpu);
>>> + trace_kvm_guest_enter(vcpu);
>>> } else if (vc->vcore_state == VCORE_SLEEPING) {
>>> wake_up(&vc->wq);
>>> }
>>> @@ -1889,6 +1905,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
>>> wake_up(&v->arch.cpu_run);
>>> }
>>>
>>> + trace_kvmppc_run_vcpu_exit(vcpu, kvm_run);
>>> spin_unlock(&vc->lock);
>>> return vcpu->arch.ret;
>>> }
>>> @@ -1934,7 +1951,9 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
>>>
>>> if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
>>> !(vcpu->arch.shregs.msr & MSR_PR)) {
>>> + trace_kvm_hcall_enter(vcpu);
>>> r = kvmppc_pseries_do_hcall(vcpu);
>>> + trace_kvm_hcall_exit(vcpu, r);
>>> kvmppc_core_prepare_to_enter(vcpu);
>>> } else if (r == RESUME_PAGE_FAULT) {
>>> srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
>>> diff --git a/arch/powerpc/kvm/trace_hv.h b/arch/powerpc/kvm/trace_hv.h
>>> new file mode 100644
>>> index 0000000..2d2e55f
>>> --- /dev/null
>>> +++ b/arch/powerpc/kvm/trace_hv.h
>>> @@ -0,0 +1,497 @@
>>> +#if !defined(_TRACE_KVM_HV_H) || defined(TRACE_HEADER_MULTI_READ)
>>> +#define _TRACE_KVM_HV_H
>>> +
>>> +#include <linux/tracepoint.h>
>>> +
>>> +#undef TRACE_SYSTEM
>>> +#define TRACE_SYSTEM kvm_hv
>>> +#define TRACE_INCLUDE_PATH .
>>> +#define TRACE_INCLUDE_FILE trace_hv
>>> +
>>> +#define kvm_trace_symbol_exit \
>>> + {0x100, "SYSTEM_RESET"}, \
>>> + {0x200, "MACHINE_CHECK"}, \
>>> + {0x300, "DATA_STORAGE"}, \
>>> + {0x380, "DATA_SEGMENT"}, \
>>> + {0x400, "INST_STORAGE"}, \
>>> + {0x480, "INST_SEGMENT"}, \
>>> + {0x500, "EXTERNAL"}, \
>>> + {0x502, "EXTERNAL_HV"}, \
>>> + {0x600, "ALIGNMENT"}, \
>>> + {0x700, "PROGRAM"}, \
>>> + {0x800, "FP_UNAVAIL"}, \
>>> + {0x900, "DECREMENTER"}, \
>>> + {0x980, "HV_DECREMENTER"}, \
>>> + {0xc00, "SYSCALL"}, \
>>> + {0xd00, "TRACE"}, \
>>> + {0xe00, "H_DATA_STORAGE"}, \
>>> + {0xe20, "H_INST_STORAGE"}, \
>>> + {0xe40, "H_EMUL_ASSIST"}, \
>>> + {0xf00, "PERFMON"}, \
>>> + {0xf20, "ALTIVEC"}, \
>>> + {0xf40, "VSX"}
>>
>> Can we share these with PR?
>>
> I could move these to a new file, say trace_kvm.h and have both trace_pr.h and
> trace_hv.h include the file. Please confirm if that works for you. Or do you
> have a better suggestion?
Yeah, just create a trace_book3s.h file and include it from trace_pr.h
and trace_hv.h.
Alex
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH] KVM: PPC: Book3S HV: Tracepoints for KVM HV guest interactions
2014-11-20 10:40 ` Aneesh Kumar K.V
@ 2014-11-20 12:10 ` Alexander Graf
2014-11-20 14:01 ` Steven Rostedt
0 siblings, 1 reply; 10+ messages in thread
From: Alexander Graf @ 2014-11-20 12:10 UTC (permalink / raw)
To: Aneesh Kumar K.V, Suresh E. Warrier, kvm-ppc, kvm; +Cc: Paul Mackerras, rostedt
On 20.11.14 11:40, Aneesh Kumar K.V wrote:
> "Suresh E. Warrier" <warrier@linux.vnet.ibm.com> writes:
>
>> This patch adds trace points in the guest entry and exit code and also
>> for exceptions handled by the host in kernel mode - hypercalls and page
>> faults. The new events are added to /sys/kernel/debug/tracing/events
>> under a new subsystem called kvm_hv.
>
> ........
>
>> /* Set this explicitly in case thread 0 doesn't have a vcpu */
>> @@ -1687,6 +1691,9 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
>>
>> vc->vcore_state = VCORE_RUNNING;
>> preempt_disable();
>> +
>> + trace_kvmppc_run_core(vc, 0);
>> +
>> spin_unlock(&vc->lock);
>
> Do we really want to call tracepoint with spin lock held ? Is that a good
> thing to do ?.
I thought it was safe to call tracepoints inside of spin lock regions?
Steve?
Alex
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH] KVM: PPC: Book3S HV: Tracepoints for KVM HV guest interactions
2014-11-20 12:10 ` Alexander Graf
@ 2014-11-20 14:01 ` Steven Rostedt
2014-12-02 1:19 ` Suresh E. Warrier
0 siblings, 1 reply; 10+ messages in thread
From: Steven Rostedt @ 2014-11-20 14:01 UTC (permalink / raw)
To: Alexander Graf
Cc: Aneesh Kumar K.V, Suresh E. Warrier, kvm-ppc, kvm, Paul Mackerras
On Thu, 20 Nov 2014 13:10:12 +0100
Alexander Graf <agraf@suse.de> wrote:
>
>
> On 20.11.14 11:40, Aneesh Kumar K.V wrote:
> > "Suresh E. Warrier" <warrier@linux.vnet.ibm.com> writes:
> >
> >> This patch adds trace points in the guest entry and exit code and also
> >> for exceptions handled by the host in kernel mode - hypercalls and page
> >> faults. The new events are added to /sys/kernel/debug/tracing/events
> >> under a new subsystem called kvm_hv.
> >
> > ........
> >
> >> /* Set this explicitly in case thread 0 doesn't have a vcpu */
> >> @@ -1687,6 +1691,9 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
> >>
> >> vc->vcore_state = VCORE_RUNNING;
> >> preempt_disable();
> >> +
> >> + trace_kvmppc_run_core(vc, 0);
> >> +
> >> spin_unlock(&vc->lock);
> >
> > Do we really want to call tracepoint with spin lock held ? Is that a good
> > thing to do ?.
>
> I thought it was safe to call tracepoints inside of spin lock regions?
> Steve?
>
There's tracepoints in the guts of the scheduler where rq lock is held.
Don't worry about it. The tracing system is lockless.
-- Steve
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH] KVM: PPC: Book3S HV: Tracepoints for KVM HV guest interactions
2014-11-20 14:01 ` Steven Rostedt
@ 2014-12-02 1:19 ` Suresh E. Warrier
0 siblings, 0 replies; 10+ messages in thread
From: Suresh E. Warrier @ 2014-12-02 1:19 UTC (permalink / raw)
To: Steven Rostedt, Alexander Graf
Cc: Aneesh Kumar K.V, kvm-ppc, kvm, Paul Mackerras
On 11/20/2014 08:01 AM, Steven Rostedt wrote:
> On Thu, 20 Nov 2014 13:10:12 +0100
> Alexander Graf <agraf@suse.de> wrote:
>
>>
>>
>> On 20.11.14 11:40, Aneesh Kumar K.V wrote:
>>> "Suresh E. Warrier" <warrier@linux.vnet.ibm.com> writes:
>>>
>>>> This patch adds trace points in the guest entry and exit code and also
>>>> for exceptions handled by the host in kernel mode - hypercalls and page
>>>> faults. The new events are added to /sys/kernel/debug/tracing/events
>>>> under a new subsystem called kvm_hv.
>>>
>>> ........
>>>
>>>> /* Set this explicitly in case thread 0 doesn't have a vcpu */
>>>> @@ -1687,6 +1691,9 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
>>>>
>>>> vc->vcore_state = VCORE_RUNNING;
>>>> preempt_disable();
>>>> +
>>>> + trace_kvmppc_run_core(vc, 0);
>>>> +
>>>> spin_unlock(&vc->lock);
>>>
>>> Do we really want to call tracepoint with spin lock held ? Is that a good
>>> thing to do ?.
>>
>> I thought it was safe to call tracepoints inside of spin lock regions?
>> Steve?
>>
>
> There's tracepoints in the guts of the scheduler where rq lock is held.
> Don't worry about it. The tracing system is lockless.
>
Thanks for confirming.
-suresh
> -- Steve
>
^ permalink raw reply [flat|nested] 10+ messages in thread
* [PATCH] KVM: PPC: Book3S HV: Tracepoints for KVM HV guest interactions
@ 2014-12-04 0:48 Suresh E. Warrier
2014-12-17 12:31 ` Alexander Graf
0 siblings, 1 reply; 10+ messages in thread
From: Suresh E. Warrier @ 2014-12-04 0:48 UTC (permalink / raw)
To: Alexander Graf, kvm-ppc@vger.kernel.org, kvm@vger.kernel.org,
Paul Mackerras
This patch adds trace points in the guest entry and exit code and also
for exceptions handled by the host in kernel mode - hypercalls and page
faults. The new events are added to /sys/kernel/debug/tracing/events
under a new subsystem called kvm_hv.
Acked-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Suresh Warrier <warrier@linux.vnet.ibm.com>
---
Added new include file for common trace defines for kvm_pr and kvm_hv.
Replaced hand-written numbers with defines in trace_hv.h.
arch/powerpc/kvm/book3s_64_mmu_hv.c | 12 +-
arch/powerpc/kvm/book3s_hv.c | 19 ++
arch/powerpc/kvm/trace_book3s.h | 32 +++
arch/powerpc/kvm/trace_hv.h | 477 ++++++++++++++++++++++++++++++++++++
arch/powerpc/kvm/trace_pr.h | 25 +-
5 files changed, 538 insertions(+), 27 deletions(-)
create mode 100644 arch/powerpc/kvm/trace_book3s.h
create mode 100644 arch/powerpc/kvm/trace_hv.h
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 8190e36..52e8fa1 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -39,6 +39,7 @@
#include <asm/cputable.h>
#include "book3s_hv_cma.h"
+#include "trace_hv.h"
/* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
#define MAX_LPID_970 63
@@ -628,6 +629,8 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
gfn = gpa >> PAGE_SHIFT;
memslot = gfn_to_memslot(kvm, gfn);
+ trace_kvm_page_fault_enter(vcpu, hpte, memslot, ea, dsisr);
+
/* No memslot means it's an emulated MMIO region */
if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
gpa |= (ea & (psize - 1));
@@ -642,6 +645,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
mmu_seq = kvm->mmu_notifier_seq;
smp_rmb();
+ ret = -EFAULT;
is_io = 0;
pfn = 0;
page = NULL;
@@ -665,7 +669,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
up_read(¤t->mm->mmap_sem);
if (!pfn)
- return -EFAULT;
+ goto out_put;
} else {
page = pages[0];
if (PageHuge(page)) {
@@ -693,14 +697,14 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
pfn = page_to_pfn(page);
}
- ret = -EFAULT;
if (psize > pte_size)
goto out_put;
/* Check WIMG vs. the actual page we're accessing */
if (!hpte_cache_flags_ok(r, is_io)) {
if (is_io)
- return -EFAULT;
+ goto out_put;
+
/*
* Allow guest to map emulated device memory as
* uncacheable, but actually make it cacheable.
@@ -756,6 +760,8 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
SetPageDirty(page);
out_put:
+ trace_kvm_page_fault_exit(vcpu, hpte, ret);
+
if (page) {
/*
* We drop pages[0] here, not page because page might
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index c2d2535..40615ab 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -58,6 +58,9 @@
#include "book3s.h"
+#define CREATE_TRACE_POINTS
+#include "trace_hv.h"
+
/* #define EXIT_DEBUG */
/* #define EXIT_DEBUG_SIMPLE */
/* #define EXIT_DEBUG_INT */
@@ -1721,6 +1724,7 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
kvmppc_start_thread(vcpu);
kvmppc_create_dtl_entry(vcpu, vc);
+ trace_kvm_guest_enter(vcpu);
}
/* Set this explicitly in case thread 0 doesn't have a vcpu */
@@ -1729,6 +1733,9 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
vc->vcore_state = VCORE_RUNNING;
preempt_disable();
+
+ trace_kvmppc_run_core(vc, 0);
+
spin_unlock(&vc->lock);
kvm_guest_enter();
@@ -1774,6 +1781,8 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
kvmppc_core_pending_dec(vcpu))
kvmppc_core_dequeue_dec(vcpu);
+ trace_kvm_guest_exit(vcpu);
+
ret = RESUME_GUEST;
if (vcpu->arch.trap)
ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu,
@@ -1799,6 +1808,8 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
wake_up(&vcpu->arch.cpu_run);
}
}
+
+ trace_kvmppc_run_core(vc, 1);
}
/*
@@ -1845,11 +1856,13 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
}
vc->vcore_state = VCORE_SLEEPING;
+ trace_kvmppc_vcore_blocked(vc, 0);
spin_unlock(&vc->lock);
schedule();
finish_wait(&vc->wq, &wait);
spin_lock(&vc->lock);
vc->vcore_state = VCORE_INACTIVE;
+ trace_kvmppc_vcore_blocked(vc, 1);
}
static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
@@ -1858,6 +1871,8 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
struct kvmppc_vcore *vc;
struct kvm_vcpu *v, *vn;
+ trace_kvmppc_run_vcpu_enter(vcpu);
+
kvm_run->exit_reason = 0;
vcpu->arch.ret = RESUME_GUEST;
vcpu->arch.trap = 0;
@@ -1887,6 +1902,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
VCORE_EXIT_COUNT(vc) == 0) {
kvmppc_create_dtl_entry(vcpu, vc);
kvmppc_start_thread(vcpu);
+ trace_kvm_guest_enter(vcpu);
} else if (vc->vcore_state == VCORE_SLEEPING) {
wake_up(&vc->wq);
}
@@ -1951,6 +1967,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
wake_up(&v->arch.cpu_run);
}
+ trace_kvmppc_run_vcpu_exit(vcpu, kvm_run);
spin_unlock(&vc->lock);
return vcpu->arch.ret;
}
@@ -1996,7 +2013,9 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
!(vcpu->arch.shregs.msr & MSR_PR)) {
+ trace_kvm_hcall_enter(vcpu);
r = kvmppc_pseries_do_hcall(vcpu);
+ trace_kvm_hcall_exit(vcpu, r);
kvmppc_core_prepare_to_enter(vcpu);
} else if (r == RESUME_PAGE_FAULT) {
srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
diff --git a/arch/powerpc/kvm/trace_book3s.h b/arch/powerpc/kvm/trace_book3s.h
new file mode 100644
index 0000000..f647ce0
--- /dev/null
+++ b/arch/powerpc/kvm/trace_book3s.h
@@ -0,0 +1,32 @@
+#if !defined(_TRACE_KVM_BOOK3S_H)
+#define _TRACE_KVM_BOOK3S_H
+
+/*
+ * Common defines used by the trace macros in trace_pr.h and trace_hv.h
+ */
+
+#define kvm_trace_symbol_exit \
+ {0x100, "SYSTEM_RESET"}, \
+ {0x200, "MACHINE_CHECK"}, \
+ {0x300, "DATA_STORAGE"}, \
+ {0x380, "DATA_SEGMENT"}, \
+ {0x400, "INST_STORAGE"}, \
+ {0x480, "INST_SEGMENT"}, \
+ {0x500, "EXTERNAL"}, \
+ {0x501, "EXTERNAL_LEVEL"}, \
+ {0x502, "EXTERNAL_HV"}, \
+ {0x600, "ALIGNMENT"}, \
+ {0x700, "PROGRAM"}, \
+ {0x800, "FP_UNAVAIL"}, \
+ {0x900, "DECREMENTER"}, \
+ {0x980, "HV_DECREMENTER"}, \
+ {0xc00, "SYSCALL"}, \
+ {0xd00, "TRACE"}, \
+ {0xe00, "H_DATA_STORAGE"}, \
+ {0xe20, "H_INST_STORAGE"}, \
+ {0xe40, "H_EMUL_ASSIST"}, \
+ {0xf00, "PERFMON"}, \
+ {0xf20, "ALTIVEC"}, \
+ {0xf40, "VSX"}
+
+#endif
diff --git a/arch/powerpc/kvm/trace_hv.h b/arch/powerpc/kvm/trace_hv.h
new file mode 100644
index 0000000..33d9daf
--- /dev/null
+++ b/arch/powerpc/kvm/trace_hv.h
@@ -0,0 +1,477 @@
+#if !defined(_TRACE_KVM_HV_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_KVM_HV_H
+
+#include <linux/tracepoint.h>
+#include "trace_book3s.h"
+#include <asm/hvcall.h>
+#include <asm/kvm_asm.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kvm_hv
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_hv
+
+#define kvm_trace_symbol_hcall \
+ {H_REMOVE, "H_REMOVE"}, \
+ {H_ENTER, "H_ENTER"}, \
+ {H_READ, "H_READ"}, \
+ {H_CLEAR_MOD, "H_CLEAR_MOD"}, \
+ {H_CLEAR_REF, "H_CLEAR_REF"}, \
+ {H_PROTECT, "H_PROTECT"}, \
+ {H_GET_TCE, "H_GET_TCE"}, \
+ {H_PUT_TCE, "H_PUT_TCE"}, \
+ {H_SET_SPRG0, "H_SET_SPRG0"}, \
+ {H_SET_DABR, "H_SET_DABR"}, \
+ {H_PAGE_INIT, "H_PAGE_INIT"}, \
+ {H_SET_ASR, "H_SET_ASR"}, \
+ {H_ASR_ON, "H_ASR_ON"}, \
+ {H_ASR_OFF, "H_ASR_OFF"}, \
+ {H_LOGICAL_CI_LOAD, "H_LOGICAL_CI_LOAD"}, \
+ {H_LOGICAL_CI_STORE, "H_LOGICAL_CI_STORE"}, \
+ {H_LOGICAL_CACHE_LOAD, "H_LOGICAL_CACHE_LOAD"}, \
+ {H_LOGICAL_CACHE_STORE, "H_LOGICAL_CACHE_STORE"}, \
+ {H_LOGICAL_ICBI, "H_LOGICAL_ICBI"}, \
+ {H_LOGICAL_DCBF, "H_LOGICAL_DCBF"}, \
+ {H_GET_TERM_CHAR, "H_GET_TERM_CHAR"}, \
+ {H_PUT_TERM_CHAR, "H_PUT_TERM_CHAR"}, \
+ {H_REAL_TO_LOGICAL, "H_REAL_TO_LOGICAL"}, \
+ {H_HYPERVISOR_DATA, "H_HYPERVISOR_DATA"}, \
+ {H_EOI, "H_EOI"}, \
+ {H_CPPR, "H_CPPR"}, \
+ {H_IPI, "H_IPI"}, \
+ {H_IPOLL, "H_IPOLL"}, \
+ {H_XIRR, "H_XIRR"}, \
+ {H_PERFMON, "H_PERFMON"}, \
+ {H_MIGRATE_DMA, "H_MIGRATE_DMA"}, \
+ {H_REGISTER_VPA, "H_REGISTER_VPA"}, \
+ {H_CEDE, "H_CEDE"}, \
+ {H_CONFER, "H_CONFER"}, \
+ {H_PROD, "H_PROD"}, \
+ {H_GET_PPP, "H_GET_PPP"}, \
+ {H_SET_PPP, "H_SET_PPP"}, \
+ {H_PURR, "H_PURR"}, \
+ {H_PIC, "H_PIC"}, \
+ {H_REG_CRQ, "H_REG_CRQ"}, \
+ {H_FREE_CRQ, "H_FREE_CRQ"}, \
+ {H_VIO_SIGNAL, "H_VIO_SIGNAL"}, \
+ {H_SEND_CRQ, "H_SEND_CRQ"}, \
+ {H_COPY_RDMA, "H_COPY_RDMA"}, \
+ {H_REGISTER_LOGICAL_LAN, "H_REGISTER_LOGICAL_LAN"}, \
+ {H_FREE_LOGICAL_LAN, "H_FREE_LOGICAL_LAN"}, \
+ {H_ADD_LOGICAL_LAN_BUFFER, "H_ADD_LOGICAL_LAN_BUFFER"}, \
+ {H_SEND_LOGICAL_LAN, "H_SEND_LOGICAL_LAN"}, \
+ {H_BULK_REMOVE, "H_BULK_REMOVE"}, \
+ {H_MULTICAST_CTRL, "H_MULTICAST_CTRL"}, \
+ {H_SET_XDABR, "H_SET_XDABR"}, \
+ {H_STUFF_TCE, "H_STUFF_TCE"}, \
+ {H_PUT_TCE_INDIRECT, "H_PUT_TCE_INDIRECT"}, \
+ {H_CHANGE_LOGICAL_LAN_MAC, "H_CHANGE_LOGICAL_LAN_MAC"}, \
+ {H_VTERM_PARTNER_INFO, "H_VTERM_PARTNER_INFO"}, \
+ {H_REGISTER_VTERM, "H_REGISTER_VTERM"}, \
+ {H_FREE_VTERM, "H_FREE_VTERM"}, \
+ {H_RESET_EVENTS, "H_RESET_EVENTS"}, \
+ {H_ALLOC_RESOURCE, "H_ALLOC_RESOURCE"}, \
+ {H_FREE_RESOURCE, "H_FREE_RESOURCE"}, \
+ {H_MODIFY_QP, "H_MODIFY_QP"}, \
+ {H_QUERY_QP, "H_QUERY_QP"}, \
+ {H_REREGISTER_PMR, "H_REREGISTER_PMR"}, \
+ {H_REGISTER_SMR, "H_REGISTER_SMR"}, \
+ {H_QUERY_MR, "H_QUERY_MR"}, \
+ {H_QUERY_MW, "H_QUERY_MW"}, \
+ {H_QUERY_HCA, "H_QUERY_HCA"}, \
+ {H_QUERY_PORT, "H_QUERY_PORT"}, \
+ {H_MODIFY_PORT, "H_MODIFY_PORT"}, \
+ {H_DEFINE_AQP1, "H_DEFINE_AQP1"}, \
+ {H_GET_TRACE_BUFFER, "H_GET_TRACE_BUFFER"}, \
+ {H_DEFINE_AQP0, "H_DEFINE_AQP0"}, \
+ {H_RESIZE_MR, "H_RESIZE_MR"}, \
+ {H_ATTACH_MCQP, "H_ATTACH_MCQP"}, \
+ {H_DETACH_MCQP, "H_DETACH_MCQP"}, \
+ {H_CREATE_RPT, "H_CREATE_RPT"}, \
+ {H_REMOVE_RPT, "H_REMOVE_RPT"}, \
+ {H_REGISTER_RPAGES, "H_REGISTER_RPAGES"}, \
+ {H_DISABLE_AND_GETC, "H_DISABLE_AND_GETC"}, \
+ {H_ERROR_DATA, "H_ERROR_DATA"}, \
+ {H_GET_HCA_INFO, "H_GET_HCA_INFO"}, \
+ {H_GET_PERF_COUNT, "H_GET_PERF_COUNT"}, \
+ {H_MANAGE_TRACE, "H_MANAGE_TRACE"}, \
+ {H_FREE_LOGICAL_LAN_BUFFER, "H_FREE_LOGICAL_LAN_BUFFER"}, \
+ {H_QUERY_INT_STATE, "H_QUERY_INT_STATE"}, \
+ {H_POLL_PENDING, "H_POLL_PENDING"}, \
+ {H_ILLAN_ATTRIBUTES, "H_ILLAN_ATTRIBUTES"}, \
+ {H_MODIFY_HEA_QP, "H_MODIFY_HEA_QP"}, \
+ {H_QUERY_HEA_QP, "H_QUERY_HEA_QP"}, \
+ {H_QUERY_HEA, "H_QUERY_HEA"}, \
+ {H_QUERY_HEA_PORT, "H_QUERY_HEA_PORT"}, \
+ {H_MODIFY_HEA_PORT, "H_MODIFY_HEA_PORT"}, \
+ {H_REG_BCMC, "H_REG_BCMC"}, \
+ {H_DEREG_BCMC, "H_DEREG_BCMC"}, \
+ {H_REGISTER_HEA_RPAGES, "H_REGISTER_HEA_RPAGES"}, \
+ {H_DISABLE_AND_GET_HEA, "H_DISABLE_AND_GET_HEA"}, \
+ {H_GET_HEA_INFO, "H_GET_HEA_INFO"}, \
+ {H_ALLOC_HEA_RESOURCE, "H_ALLOC_HEA_RESOURCE"}, \
+ {H_ADD_CONN, "H_ADD_CONN"}, \
+ {H_DEL_CONN, "H_DEL_CONN"}, \
+ {H_JOIN, "H_JOIN"}, \
+ {H_VASI_STATE, "H_VASI_STATE"}, \
+ {H_ENABLE_CRQ, "H_ENABLE_CRQ"}, \
+ {H_GET_EM_PARMS, "H_GET_EM_PARMS"}, \
+ {H_SET_MPP, "H_SET_MPP"}, \
+ {H_GET_MPP, "H_GET_MPP"}, \
+ {H_HOME_NODE_ASSOCIATIVITY, "H_HOME_NODE_ASSOCIATIVITY"}, \
+ {H_BEST_ENERGY, "H_BEST_ENERGY"}, \
+ {H_XIRR_X, "H_XIRR_X"}, \
+ {H_RANDOM, "H_RANDOM"}, \
+ {H_COP, "H_COP"}, \
+ {H_GET_MPP_X, "H_GET_MPP_X"}, \
+ {H_SET_MODE, "H_SET_MODE"}, \
+ {H_RTAS, "H_RTAS"}
+
+#define kvm_trace_symbol_kvmret \
+ {RESUME_GUEST, "RESUME_GUEST"}, \
+ {RESUME_GUEST_NV, "RESUME_GUEST_NV"}, \
+ {RESUME_HOST, "RESUME_HOST"}, \
+ {RESUME_HOST_NV, "RESUME_HOST_NV"}
+
+#define kvm_trace_symbol_hcall_rc \
+ {H_SUCCESS, "H_SUCCESS"}, \
+ {H_BUSY, "H_BUSY"}, \
+ {H_CLOSED, "H_CLOSED"}, \
+ {H_NOT_AVAILABLE, "H_NOT_AVAILABLE"}, \
+ {H_CONSTRAINED, "H_CONSTRAINED"}, \
+ {H_PARTIAL, "H_PARTIAL"}, \
+ {H_IN_PROGRESS, "H_IN_PROGRESS"}, \
+ {H_PAGE_REGISTERED, "H_PAGE_REGISTERED"}, \
+ {H_PARTIAL_STORE, "H_PARTIAL_STORE"}, \
+ {H_PENDING, "H_PENDING"}, \
+ {H_CONTINUE, "H_CONTINUE"}, \
+ {H_LONG_BUSY_START_RANGE, "H_LONG_BUSY_START_RANGE"}, \
+ {H_LONG_BUSY_ORDER_1_MSEC, "H_LONG_BUSY_ORDER_1_MSEC"}, \
+ {H_LONG_BUSY_ORDER_10_MSEC, "H_LONG_BUSY_ORDER_10_MSEC"}, \
+ {H_LONG_BUSY_ORDER_100_MSEC, "H_LONG_BUSY_ORDER_100_MSEC"}, \
+ {H_LONG_BUSY_ORDER_1_SEC, "H_LONG_BUSY_ORDER_1_SEC"}, \
+ {H_LONG_BUSY_ORDER_10_SEC, "H_LONG_BUSY_ORDER_10_SEC"}, \
+ {H_LONG_BUSY_ORDER_100_SEC, "H_LONG_BUSY_ORDER_100_SEC"}, \
+ {H_LONG_BUSY_END_RANGE, "H_LONG_BUSY_END_RANGE"}, \
+ {H_TOO_HARD, "H_TOO_HARD"}, \
+ {H_HARDWARE, "H_HARDWARE"}, \
+ {H_FUNCTION, "H_FUNCTION"}, \
+ {H_PRIVILEGE, "H_PRIVILEGE"}, \
+ {H_PARAMETER, "H_PARAMETER"}, \
+ {H_BAD_MODE, "H_BAD_MODE"}, \
+ {H_PTEG_FULL, "H_PTEG_FULL"}, \
+ {H_NOT_FOUND, "H_NOT_FOUND"}, \
+ {H_RESERVED_DABR, "H_RESERVED_DABR"}, \
+ {H_NO_MEM, "H_NO_MEM"}, \
+ {H_AUTHORITY, "H_AUTHORITY"}, \
+ {H_PERMISSION, "H_PERMISSION"}, \
+ {H_DROPPED, "H_DROPPED"}, \
+ {H_SOURCE_PARM, "H_SOURCE_PARM"}, \
+ {H_DEST_PARM, "H_DEST_PARM"}, \
+ {H_REMOTE_PARM, "H_REMOTE_PARM"}, \
+ {H_RESOURCE, "H_RESOURCE"}, \
+ {H_ADAPTER_PARM, "H_ADAPTER_PARM"}, \
+ {H_RH_PARM, "H_RH_PARM"}, \
+ {H_RCQ_PARM, "H_RCQ_PARM"}, \
+ {H_SCQ_PARM, "H_SCQ_PARM"}, \
+ {H_EQ_PARM, "H_EQ_PARM"}, \
+ {H_RT_PARM, "H_RT_PARM"}, \
+ {H_ST_PARM, "H_ST_PARM"}, \
+ {H_SIGT_PARM, "H_SIGT_PARM"}, \
+ {H_TOKEN_PARM, "H_TOKEN_PARM"}, \
+ {H_MLENGTH_PARM, "H_MLENGTH_PARM"}, \
+ {H_MEM_PARM, "H_MEM_PARM"}, \
+ {H_MEM_ACCESS_PARM, "H_MEM_ACCESS_PARM"}, \
+ {H_ATTR_PARM, "H_ATTR_PARM"}, \
+ {H_PORT_PARM, "H_PORT_PARM"}, \
+ {H_MCG_PARM, "H_MCG_PARM"}, \
+ {H_VL_PARM, "H_VL_PARM"}, \
+ {H_TSIZE_PARM, "H_TSIZE_PARM"}, \
+ {H_TRACE_PARM, "H_TRACE_PARM"}, \
+ {H_MASK_PARM, "H_MASK_PARM"}, \
+ {H_MCG_FULL, "H_MCG_FULL"}, \
+ {H_ALIAS_EXIST, "H_ALIAS_EXIST"}, \
+ {H_P_COUNTER, "H_P_COUNTER"}, \
+ {H_TABLE_FULL, "H_TABLE_FULL"}, \
+ {H_ALT_TABLE, "H_ALT_TABLE"}, \
+ {H_MR_CONDITION, "H_MR_CONDITION"}, \
+ {H_NOT_ENOUGH_RESOURCES, "H_NOT_ENOUGH_RESOURCES"}, \
+ {H_R_STATE, "H_R_STATE"}, \
+ {H_RESCINDED, "H_RESCINDED"}, \
+ {H_P2, "H_P2"}, \
+ {H_P3, "H_P3"}, \
+ {H_P4, "H_P4"}, \
+ {H_P5, "H_P5"}, \
+ {H_P6, "H_P6"}, \
+ {H_P7, "H_P7"}, \
+ {H_P8, "H_P8"}, \
+ {H_P9, "H_P9"}, \
+ {H_TOO_BIG, "H_TOO_BIG"}, \
+ {H_OVERLAP, "H_OVERLAP"}, \
+ {H_INTERRUPT, "H_INTERRUPT"}, \
+ {H_BAD_DATA, "H_BAD_DATA"}, \
+ {H_NOT_ACTIVE, "H_NOT_ACTIVE"}, \
+ {H_SG_LIST, "H_SG_LIST"}, \
+ {H_OP_MODE, "H_OP_MODE"}, \
+ {H_COP_HW, "H_COP_HW"}, \
+ {H_UNSUPPORTED_FLAG_START, "H_UNSUPPORTED_FLAG_START"}, \
+ {H_UNSUPPORTED_FLAG_END, "H_UNSUPPORTED_FLAG_END"}, \
+ {H_MULTI_THREADS_ACTIVE, "H_MULTI_THREADS_ACTIVE"}, \
+ {H_OUTSTANDING_COP_OPS, "H_OUTSTANDING_COP_OPS"}
+
+TRACE_EVENT(kvm_guest_enter,
+ TP_PROTO(struct kvm_vcpu *vcpu),
+ TP_ARGS(vcpu),
+
+ TP_STRUCT__entry(
+ __field(int, vcpu_id)
+ __field(unsigned long, pc)
+ __field(unsigned long, pending_exceptions)
+ __field(u8, ceded)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
+ __entry->pc = kvmppc_get_pc(vcpu);
+ __entry->ceded = vcpu->arch.ceded;
+ __entry->pending_exceptions = vcpu->arch.pending_exceptions;
+ ),
+
+ TP_printk("VCPU %d: pc=0x%lx pexcp=0x%lx ceded=%d",
+ __entry->vcpu_id,
+ __entry->pc,
+ __entry->pending_exceptions, __entry->ceded)
+);
+
+TRACE_EVENT(kvm_guest_exit,
+ TP_PROTO(struct kvm_vcpu *vcpu),
+ TP_ARGS(vcpu),
+
+ TP_STRUCT__entry(
+ __field(int, vcpu_id)
+ __field(int, trap)
+ __field(unsigned long, pc)
+ __field(unsigned long, msr)
+ __field(u8, ceded)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
+ __entry->trap = vcpu->arch.trap;
+ __entry->ceded = vcpu->arch.ceded;
+ __entry->pc = kvmppc_get_pc(vcpu);
+ __entry->msr = vcpu->arch.shregs.msr;
+ ),
+
+ TP_printk("VCPU %d: trap=%s pc=0x%lx msr=0x%lx, ceded=%d",
+ __entry->vcpu_id,
+ __print_symbolic(__entry->trap, kvm_trace_symbol_exit),
+ __entry->pc, __entry->msr, __entry->ceded
+ )
+);
+
+TRACE_EVENT(kvm_page_fault_enter,
+ TP_PROTO(struct kvm_vcpu *vcpu, unsigned long *hptep,
+ struct kvm_memory_slot *memslot, unsigned long ea,
+ unsigned long dsisr),
+
+ TP_ARGS(vcpu, hptep, memslot, ea, dsisr),
+
+ TP_STRUCT__entry(
+ __field(int, vcpu_id)
+ __field(unsigned long, hpte_v)
+ __field(unsigned long, hpte_r)
+ __field(unsigned long, gpte_r)
+ __field(unsigned long, ea)
+ __field(u64, base_gfn)
+ __field(u32, slot_flags)
+ __field(u32, dsisr)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
+ __entry->hpte_v = hptep[0];
+ __entry->hpte_r = hptep[1];
+ __entry->gpte_r = hptep[2];
+ __entry->ea = ea;
+ __entry->dsisr = dsisr;
+ __entry->base_gfn = memslot ? memslot->base_gfn : -1UL;
+ __entry->slot_flags = memslot ? memslot->flags : 0;
+ ),
+
+ TP_printk("VCPU %d: hpte=0x%lx:0x%lx guest=0x%lx ea=0x%lx,%x slot=0x%llx,0x%x",
+ __entry->vcpu_id,
+ __entry->hpte_v, __entry->hpte_r, __entry->gpte_r,
+ __entry->ea, __entry->dsisr,
+ __entry->base_gfn, __entry->slot_flags)
+);
+
+TRACE_EVENT(kvm_page_fault_exit,
+ TP_PROTO(struct kvm_vcpu *vcpu, unsigned long *hptep, long ret),
+
+ TP_ARGS(vcpu, hptep, ret),
+
+ TP_STRUCT__entry(
+ __field(int, vcpu_id)
+ __field(unsigned long, hpte_v)
+ __field(unsigned long, hpte_r)
+ __field(long, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
+ __entry->hpte_v = hptep[0];
+ __entry->hpte_r = hptep[1];
+ __entry->ret = ret;
+ ),
+
+ TP_printk("VCPU %d: hpte=0x%lx:0x%lx ret=0x%lx",
+ __entry->vcpu_id,
+ __entry->hpte_v, __entry->hpte_r, __entry->ret)
+);
+
+TRACE_EVENT(kvm_hcall_enter,
+ TP_PROTO(struct kvm_vcpu *vcpu),
+
+ TP_ARGS(vcpu),
+
+ TP_STRUCT__entry(
+ __field(int, vcpu_id)
+ __field(unsigned long, req)
+ __field(unsigned long, gpr4)
+ __field(unsigned long, gpr5)
+ __field(unsigned long, gpr6)
+ __field(unsigned long, gpr7)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
+ __entry->req = kvmppc_get_gpr(vcpu, 3);
+ __entry->gpr4 = kvmppc_get_gpr(vcpu, 4);
+ __entry->gpr5 = kvmppc_get_gpr(vcpu, 5);
+ __entry->gpr6 = kvmppc_get_gpr(vcpu, 6);
+ __entry->gpr7 = kvmppc_get_gpr(vcpu, 7);
+ ),
+
+ TP_printk("VCPU %d: hcall=%s GPR4-7=0x%lx,0x%lx,0x%lx,0x%lx",
+ __entry->vcpu_id,
+ __print_symbolic(__entry->req, kvm_trace_symbol_hcall),
+ __entry->gpr4, __entry->gpr5, __entry->gpr6, __entry->gpr7)
+);
+
+TRACE_EVENT(kvm_hcall_exit,
+ TP_PROTO(struct kvm_vcpu *vcpu, int ret),
+
+ TP_ARGS(vcpu, ret),
+
+ TP_STRUCT__entry(
+ __field(int, vcpu_id)
+ __field(unsigned long, ret)
+ __field(unsigned long, hcall_rc)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
+ __entry->ret = ret;
+ __entry->hcall_rc = kvmppc_get_gpr(vcpu, 3);
+ ),
+
+ TP_printk("VCPU %d: ret=%s hcall_rc=%s",
+ __entry->vcpu_id,
+ __print_symbolic(__entry->ret, kvm_trace_symbol_kvmret),
+ __print_symbolic(__entry->ret & RESUME_FLAG_HOST ?
+ H_TOO_HARD : __entry->hcall_rc,
+ kvm_trace_symbol_hcall_rc))
+);
+
+TRACE_EVENT(kvmppc_run_core,
+ TP_PROTO(struct kvmppc_vcore *vc, int where),
+
+ TP_ARGS(vc, where),
+
+ TP_STRUCT__entry(
+ __field(int, n_runnable)
+ __field(int, runner_vcpu)
+ __field(int, where)
+ __field(pid_t, tgid)
+ ),
+
+ TP_fast_assign(
+ __entry->runner_vcpu = vc->runner->vcpu_id;
+ __entry->n_runnable = vc->n_runnable;
+ __entry->where = where;
+ __entry->tgid = current->tgid;
+ ),
+
+ TP_printk("%s runner_vcpu==%d runnable=%d tgid=%d",
+ __entry->where ? "Exit" : "Enter",
+ __entry->runner_vcpu, __entry->n_runnable, __entry->tgid)
+);
+
+TRACE_EVENT(kvmppc_vcore_blocked,
+ TP_PROTO(struct kvmppc_vcore *vc, int where),
+
+ TP_ARGS(vc, where),
+
+ TP_STRUCT__entry(
+ __field(int, n_runnable)
+ __field(int, runner_vcpu)
+ __field(int, where)
+ __field(pid_t, tgid)
+ ),
+
+ TP_fast_assign(
+ __entry->runner_vcpu = vc->runner->vcpu_id;
+ __entry->n_runnable = vc->n_runnable;
+ __entry->where = where;
+ __entry->tgid = current->tgid;
+ ),
+
+ TP_printk("%s runner_vcpu=%d runnable=%d tgid=%d",
+ __entry->where ? "Exit" : "Enter",
+ __entry->runner_vcpu, __entry->n_runnable, __entry->tgid)
+);
+
+TRACE_EVENT(kvmppc_run_vcpu_enter,
+ TP_PROTO(struct kvm_vcpu *vcpu),
+
+ TP_ARGS(vcpu),
+
+ TP_STRUCT__entry(
+ __field(int, vcpu_id)
+ __field(pid_t, tgid)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
+ __entry->tgid = current->tgid;
+ ),
+
+ TP_printk("VCPU %d: tgid=%d", __entry->vcpu_id, __entry->tgid)
+);
+
+TRACE_EVENT(kvmppc_run_vcpu_exit,
+ TP_PROTO(struct kvm_vcpu *vcpu, struct kvm_run *run),
+
+ TP_ARGS(vcpu, run),
+
+ TP_STRUCT__entry(
+ __field(int, vcpu_id)
+ __field(int, exit)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
+ __entry->exit = run->exit_reason;
+ __entry->ret = vcpu->arch.ret;
+ ),
+
+ TP_printk("VCPU %d: exit=%d, ret=%d",
+ __entry->vcpu_id, __entry->exit, __entry->ret)
+);
+
+#endif /* _TRACE_KVM_HV_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/arch/powerpc/kvm/trace_pr.h b/arch/powerpc/kvm/trace_pr.h
index 8b22e47..a862b28 100644
--- a/arch/powerpc/kvm/trace_pr.h
+++ b/arch/powerpc/kvm/trace_pr.h
@@ -3,36 +3,13 @@
#define _TRACE_KVM_PR_H
#include <linux/tracepoint.h>
+#include "trace_book3s.h"
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kvm_pr
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE trace_pr
-#define kvm_trace_symbol_exit \
- {0x100, "SYSTEM_RESET"}, \
- {0x200, "MACHINE_CHECK"}, \
- {0x300, "DATA_STORAGE"}, \
- {0x380, "DATA_SEGMENT"}, \
- {0x400, "INST_STORAGE"}, \
- {0x480, "INST_SEGMENT"}, \
- {0x500, "EXTERNAL"}, \
- {0x501, "EXTERNAL_LEVEL"}, \
- {0x502, "EXTERNAL_HV"}, \
- {0x600, "ALIGNMENT"}, \
- {0x700, "PROGRAM"}, \
- {0x800, "FP_UNAVAIL"}, \
- {0x900, "DECREMENTER"}, \
- {0x980, "HV_DECREMENTER"}, \
- {0xc00, "SYSCALL"}, \
- {0xd00, "TRACE"}, \
- {0xe00, "H_DATA_STORAGE"}, \
- {0xe20, "H_INST_STORAGE"}, \
- {0xe40, "H_EMUL_ASSIST"}, \
- {0xf00, "PERFMON"}, \
- {0xf20, "ALTIVEC"}, \
- {0xf40, "VSX"}
-
TRACE_EVENT(kvm_book3s_reenter,
TP_PROTO(int r, struct kvm_vcpu *vcpu),
TP_ARGS(r, vcpu),
--
1.8.3.4
^ permalink raw reply related [flat|nested] 10+ messages in thread
* Re: [PATCH] KVM: PPC: Book3S HV: Tracepoints for KVM HV guest interactions
2014-12-04 0:48 [PATCH] KVM: PPC: Book3S HV: Tracepoints for KVM HV guest interactions Suresh E. Warrier
@ 2014-12-17 12:31 ` Alexander Graf
0 siblings, 0 replies; 10+ messages in thread
From: Alexander Graf @ 2014-12-17 12:31 UTC (permalink / raw)
To: Suresh E. Warrier, kvm-ppc@vger.kernel.org, kvm@vger.kernel.org,
Paul Mackerras
On 04.12.14 01:48, Suresh E. Warrier wrote:
> This patch adds trace points in the guest entry and exit code and also
> for exceptions handled by the host in kernel mode - hypercalls and page
> faults. The new events are added to /sys/kernel/debug/tracing/events
> under a new subsystem called kvm_hv.
>
> Acked-by: Paul Mackerras <paulus@samba.org>
> Signed-off-by: Suresh Warrier <warrier@linux.vnet.ibm.com>
Thanks, applied to kvm-ppc-queue.
Alex
^ permalink raw reply [flat|nested] 10+ messages in thread
end of thread, other threads:[~2014-12-17 12:31 UTC | newest]
Thread overview: 10+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2014-12-04 0:48 [PATCH] KVM: PPC: Book3S HV: Tracepoints for KVM HV guest interactions Suresh E. Warrier
2014-12-17 12:31 ` Alexander Graf
-- strict thread matches above, loose matches on Subject: below --
2014-11-13 23:29 Suresh E. Warrier
2014-11-14 10:56 ` Alexander Graf
2014-11-19 21:54 ` Suresh E. Warrier
2014-11-20 12:08 ` Alexander Graf
2014-11-20 10:40 ` Aneesh Kumar K.V
2014-11-20 12:10 ` Alexander Graf
2014-11-20 14:01 ` Steven Rostedt
2014-12-02 1:19 ` Suresh E. Warrier
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).