From: "Paluri, PavanKumar" <papaluri@amd.com>
To: Nikunj A Dadhania <nikunj@amd.com>,
seanjc@google.com, pbonzini@redhat.com, kvm@vger.kernel.org
Cc: thomas.lendacky@amd.com, santosh.shukla@amd.com,
Pavan Kumar Paluri <papaluri@amd.com>
Subject: Re: [PATCH] KVM: SEV: Use to_kvm_sev_info() for fetching kvm_sev_info struct
Date: Fri, 24 Jan 2025 07:38:32 -0600 [thread overview]
Message-ID: <97712ebe-fd9f-4549-ab95-e638bc9f3741@amd.com> (raw)
In-Reply-To: <20250123055140.144378-1-nikunj@amd.com>
On 1/22/2025 11:51 PM, Nikunj A Dadhania wrote:
> Simplify code by replacing &to_kvm_svm(kvm)->sev_info with
> to_kvm_sev_info() helper function. Wherever possible, drop the local
> variable declaration and directly use the helper instead.
>
Just thinking out loud...
I still see local variable retained in couple functions (sev_es_guest(),
sev_snp_guest(),..). I understand the return would become unnecessarily
long when replaced with to_kvm_sev_info().. makes sense..
> No functional changes.
>
> Signed-off-by: Nikunj A Dadhania <nikunj@amd.com>
Looks good to me
Reviewed-by: Pavan Kumar Paluri <papaluri@amd.com>
> ---
> arch/x86/kvm/svm/sev.c | 124 +++++++++++++++++------------------------
> arch/x86/kvm/svm/svm.h | 8 +--
> 2 files changed, 54 insertions(+), 78 deletions(-)
>
> diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
> index 0f04f365885c..e6fd60aac30c 100644
> --- a/arch/x86/kvm/svm/sev.c
> +++ b/arch/x86/kvm/svm/sev.c
> @@ -140,7 +140,7 @@ static inline bool is_mirroring_enc_context(struct kvm *kvm)
> static bool sev_vcpu_has_debug_swap(struct vcpu_svm *svm)
> {
> struct kvm_vcpu *vcpu = &svm->vcpu;
> - struct kvm_sev_info *sev = &to_kvm_svm(vcpu->kvm)->sev_info;
> + struct kvm_sev_info *sev = to_kvm_sev_info(vcpu->kvm);
>
> return sev->vmsa_features & SVM_SEV_FEAT_DEBUG_SWAP;
> }
> @@ -226,9 +226,7 @@ static int sev_asid_new(struct kvm_sev_info *sev)
>
> static unsigned int sev_get_asid(struct kvm *kvm)
> {
> - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> -
> - return sev->asid;
> + return to_kvm_sev_info(kvm)->asid;
> }
>
> static void sev_asid_free(struct kvm_sev_info *sev)
> @@ -403,7 +401,7 @@ static int __sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp,
> struct kvm_sev_init *data,
> unsigned long vm_type)
> {
> - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> + struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
> struct sev_platform_init_args init_args = {0};
> bool es_active = vm_type != KVM_X86_SEV_VM;
> u64 valid_vmsa_features = es_active ? sev_supported_vmsa_features : 0;
> @@ -500,10 +498,9 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
>
> static int sev_guest_init2(struct kvm *kvm, struct kvm_sev_cmd *argp)
> {
> - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> struct kvm_sev_init data;
>
> - if (!sev->need_init)
> + if (!to_kvm_sev_info(kvm)->need_init)
> return -EINVAL;
>
> if (kvm->arch.vm_type != KVM_X86_SEV_VM &&
> @@ -543,14 +540,14 @@ static int __sev_issue_cmd(int fd, int id, void *data, int *error)
>
> static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
> {
> - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> + struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
>
> return __sev_issue_cmd(sev->fd, id, data, error);
> }
>
> static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
> {
> - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> + struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
> struct sev_data_launch_start start;
> struct kvm_sev_launch_start params;
> void *dh_blob, *session_blob;
> @@ -624,7 +621,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
> unsigned long ulen, unsigned long *n,
> int write)
> {
> - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> + struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
> unsigned long npages, size;
> int npinned;
> unsigned long locked, lock_limit;
> @@ -686,11 +683,9 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
> static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
> unsigned long npages)
> {
> - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> -
> unpin_user_pages(pages, npages);
> kvfree(pages);
> - sev->pages_locked -= npages;
> + to_kvm_sev_info(kvm)->pages_locked -= npages;
> }
>
> static void sev_clflush_pages(struct page *pages[], unsigned long npages)
> @@ -734,7 +729,6 @@ static unsigned long get_num_contig_pages(unsigned long idx,
> static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
> {
> unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
> - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> struct kvm_sev_launch_update_data params;
> struct sev_data_launch_update_data data;
> struct page **inpages;
> @@ -762,7 +756,7 @@ static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
> sev_clflush_pages(inpages, npages);
>
> data.reserved = 0;
> - data.handle = sev->handle;
> + data.handle = to_kvm_sev_info(kvm)->handle;
>
> for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
> int offset, len;
> @@ -802,7 +796,7 @@ static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
> static int sev_es_sync_vmsa(struct vcpu_svm *svm)
> {
> struct kvm_vcpu *vcpu = &svm->vcpu;
> - struct kvm_sev_info *sev = &to_kvm_svm(vcpu->kvm)->sev_info;
> + struct kvm_sev_info *sev = to_kvm_sev_info(vcpu->kvm);
> struct sev_es_save_area *save = svm->sev_es.vmsa;
> struct xregs_state *xsave;
> const u8 *s;
> @@ -972,7 +966,6 @@ static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
> static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
> {
> void __user *measure = u64_to_user_ptr(argp->data);
> - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> struct sev_data_launch_measure data;
> struct kvm_sev_launch_measure params;
> void __user *p = NULL;
> @@ -1005,7 +998,7 @@ static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
> }
>
> cmd:
> - data.handle = sev->handle;
> + data.handle = to_kvm_sev_info(kvm)->handle;
> ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, &data, &argp->error);
>
> /*
> @@ -1033,19 +1026,17 @@ static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
>
> static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
> {
> - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> struct sev_data_launch_finish data;
>
> if (!sev_guest(kvm))
> return -ENOTTY;
>
> - data.handle = sev->handle;
> + data.handle = to_kvm_sev_info(kvm)->handle;
> return sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, &data, &argp->error);
> }
>
> static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
> {
> - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> struct kvm_sev_guest_status params;
> struct sev_data_guest_status data;
> int ret;
> @@ -1055,7 +1046,7 @@ static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
>
> memset(&data, 0, sizeof(data));
>
> - data.handle = sev->handle;
> + data.handle = to_kvm_sev_info(kvm)->handle;
> ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, &data, &argp->error);
> if (ret)
> return ret;
> @@ -1074,11 +1065,10 @@ static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
> unsigned long dst, int size,
> int *error, bool enc)
> {
> - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> struct sev_data_dbg data;
>
> data.reserved = 0;
> - data.handle = sev->handle;
> + data.handle = to_kvm_sev_info(kvm)->handle;
> data.dst_addr = dst;
> data.src_addr = src;
> data.len = size;
> @@ -1302,7 +1292,6 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
>
> static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
> {
> - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> struct sev_data_launch_secret data;
> struct kvm_sev_launch_secret params;
> struct page **pages;
> @@ -1358,7 +1347,7 @@ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
> data.hdr_address = __psp_pa(hdr);
> data.hdr_len = params.hdr_len;
>
> - data.handle = sev->handle;
> + data.handle = to_kvm_sev_info(kvm)->handle;
> ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, &data, &argp->error);
>
> kfree(hdr);
> @@ -1378,7 +1367,6 @@ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
> static int sev_get_attestation_report(struct kvm *kvm, struct kvm_sev_cmd *argp)
> {
> void __user *report = u64_to_user_ptr(argp->data);
> - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> struct sev_data_attestation_report data;
> struct kvm_sev_attestation_report params;
> void __user *p;
> @@ -1411,7 +1399,7 @@ static int sev_get_attestation_report(struct kvm *kvm, struct kvm_sev_cmd *argp)
> memcpy(data.mnonce, params.mnonce, sizeof(params.mnonce));
> }
> cmd:
> - data.handle = sev->handle;
> + data.handle = to_kvm_sev_info(kvm)->handle;
> ret = sev_issue_cmd(kvm, SEV_CMD_ATTESTATION_REPORT, &data, &argp->error);
> /*
> * If we query the session length, FW responded with expected data.
> @@ -1441,12 +1429,11 @@ static int
> __sev_send_start_query_session_length(struct kvm *kvm, struct kvm_sev_cmd *argp,
> struct kvm_sev_send_start *params)
> {
> - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> struct sev_data_send_start data;
> int ret;
>
> memset(&data, 0, sizeof(data));
> - data.handle = sev->handle;
> + data.handle = to_kvm_sev_info(kvm)->handle;
> ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error);
>
> params->session_len = data.session_len;
> @@ -1459,7 +1446,6 @@ __sev_send_start_query_session_length(struct kvm *kvm, struct kvm_sev_cmd *argp,
>
> static int sev_send_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
> {
> - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> struct sev_data_send_start data;
> struct kvm_sev_send_start params;
> void *amd_certs, *session_data;
> @@ -1520,7 +1506,7 @@ static int sev_send_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
> data.amd_certs_len = params.amd_certs_len;
> data.session_address = __psp_pa(session_data);
> data.session_len = params.session_len;
> - data.handle = sev->handle;
> + data.handle = to_kvm_sev_info(kvm)->handle;
>
> ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error);
>
> @@ -1552,12 +1538,11 @@ static int
> __sev_send_update_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp,
> struct kvm_sev_send_update_data *params)
> {
> - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> struct sev_data_send_update_data data;
> int ret;
>
> memset(&data, 0, sizeof(data));
> - data.handle = sev->handle;
> + data.handle = to_kvm_sev_info(kvm)->handle;
> ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error);
>
> params->hdr_len = data.hdr_len;
> @@ -1572,7 +1557,6 @@ __sev_send_update_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp,
>
> static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
> {
> - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> struct sev_data_send_update_data data;
> struct kvm_sev_send_update_data params;
> void *hdr, *trans_data;
> @@ -1626,7 +1610,7 @@ static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
> data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
> data.guest_address |= sev_me_mask;
> data.guest_len = params.guest_len;
> - data.handle = sev->handle;
> + data.handle = to_kvm_sev_info(kvm)->handle;
>
> ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error);
>
> @@ -1657,31 +1641,29 @@ static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
>
> static int sev_send_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
> {
> - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> struct sev_data_send_finish data;
>
> if (!sev_guest(kvm))
> return -ENOTTY;
>
> - data.handle = sev->handle;
> + data.handle = to_kvm_sev_info(kvm)->handle;
> return sev_issue_cmd(kvm, SEV_CMD_SEND_FINISH, &data, &argp->error);
> }
>
> static int sev_send_cancel(struct kvm *kvm, struct kvm_sev_cmd *argp)
> {
> - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> struct sev_data_send_cancel data;
>
> if (!sev_guest(kvm))
> return -ENOTTY;
>
> - data.handle = sev->handle;
> + data.handle = to_kvm_sev_info(kvm)->handle;
> return sev_issue_cmd(kvm, SEV_CMD_SEND_CANCEL, &data, &argp->error);
> }
>
> static int sev_receive_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
> {
> - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> + struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
> struct sev_data_receive_start start;
> struct kvm_sev_receive_start params;
> int *error = &argp->error;
> @@ -1755,7 +1737,6 @@ static int sev_receive_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
>
> static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
> {
> - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> struct kvm_sev_receive_update_data params;
> struct sev_data_receive_update_data data;
> void *hdr = NULL, *trans = NULL;
> @@ -1815,7 +1796,7 @@ static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
> data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
> data.guest_address |= sev_me_mask;
> data.guest_len = params.guest_len;
> - data.handle = sev->handle;
> + data.handle = to_kvm_sev_info(kvm)->handle;
>
> ret = sev_issue_cmd(kvm, SEV_CMD_RECEIVE_UPDATE_DATA, &data,
> &argp->error);
> @@ -1832,13 +1813,12 @@ static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
>
> static int sev_receive_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
> {
> - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> struct sev_data_receive_finish data;
>
> if (!sev_guest(kvm))
> return -ENOTTY;
>
> - data.handle = sev->handle;
> + data.handle = to_kvm_sev_info(kvm)->handle;
> return sev_issue_cmd(kvm, SEV_CMD_RECEIVE_FINISH, &data, &argp->error);
> }
>
> @@ -1858,8 +1838,8 @@ static bool is_cmd_allowed_from_mirror(u32 cmd_id)
>
> static int sev_lock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
> {
> - struct kvm_sev_info *dst_sev = &to_kvm_svm(dst_kvm)->sev_info;
> - struct kvm_sev_info *src_sev = &to_kvm_svm(src_kvm)->sev_info;
> + struct kvm_sev_info *dst_sev = to_kvm_sev_info(dst_kvm);
> + struct kvm_sev_info *src_sev = to_kvm_sev_info(src_kvm);
> int r = -EBUSY;
>
> if (dst_kvm == src_kvm)
> @@ -1893,8 +1873,8 @@ static int sev_lock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
>
> static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
> {
> - struct kvm_sev_info *dst_sev = &to_kvm_svm(dst_kvm)->sev_info;
> - struct kvm_sev_info *src_sev = &to_kvm_svm(src_kvm)->sev_info;
> + struct kvm_sev_info *dst_sev = to_kvm_sev_info(dst_kvm);
> + struct kvm_sev_info *src_sev = to_kvm_sev_info(src_kvm);
>
> mutex_unlock(&dst_kvm->lock);
> mutex_unlock(&src_kvm->lock);
> @@ -1968,8 +1948,8 @@ static void sev_unlock_vcpus_for_migration(struct kvm *kvm)
>
> static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
> {
> - struct kvm_sev_info *dst = &to_kvm_svm(dst_kvm)->sev_info;
> - struct kvm_sev_info *src = &to_kvm_svm(src_kvm)->sev_info;
> + struct kvm_sev_info *dst = to_kvm_sev_info(dst_kvm);
> + struct kvm_sev_info *src = to_kvm_sev_info(src_kvm);
> struct kvm_vcpu *dst_vcpu, *src_vcpu;
> struct vcpu_svm *dst_svm, *src_svm;
> struct kvm_sev_info *mirror;
> @@ -2009,8 +1989,7 @@ static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
> * and add the new mirror to the list.
> */
> if (is_mirroring_enc_context(dst_kvm)) {
> - struct kvm_sev_info *owner_sev_info =
> - &to_kvm_svm(dst->enc_context_owner)->sev_info;
> + struct kvm_sev_info *owner_sev_info = to_kvm_sev_info(dst->enc_context_owner);
>
> list_del(&src->mirror_entry);
> list_add_tail(&dst->mirror_entry, &owner_sev_info->mirror_vms);
> @@ -2069,7 +2048,7 @@ static int sev_check_source_vcpus(struct kvm *dst, struct kvm *src)
>
> int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
> {
> - struct kvm_sev_info *dst_sev = &to_kvm_svm(kvm)->sev_info;
> + struct kvm_sev_info *dst_sev = to_kvm_sev_info(kvm);
> struct kvm_sev_info *src_sev, *cg_cleanup_sev;
> CLASS(fd, f)(source_fd);
> struct kvm *source_kvm;
> @@ -2093,7 +2072,7 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
> goto out_unlock;
> }
>
> - src_sev = &to_kvm_svm(source_kvm)->sev_info;
> + src_sev = to_kvm_sev_info(source_kvm);
>
> dst_sev->misc_cg = get_current_misc_cg();
> cg_cleanup_sev = dst_sev;
> @@ -2181,7 +2160,7 @@ static void *snp_context_create(struct kvm *kvm, struct kvm_sev_cmd *argp)
>
> static int snp_bind_asid(struct kvm *kvm, int *error)
> {
> - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> + struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
> struct sev_data_snp_activate data = {0};
>
> data.gctx_paddr = __psp_pa(sev->snp_context);
> @@ -2191,7 +2170,7 @@ static int snp_bind_asid(struct kvm *kvm, int *error)
>
> static int snp_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
> {
> - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> + struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
> struct sev_data_snp_launch_start start = {0};
> struct kvm_sev_snp_launch_start params;
> int rc;
> @@ -2260,7 +2239,7 @@ static int sev_gmem_post_populate(struct kvm *kvm, gfn_t gfn_start, kvm_pfn_t pf
> void __user *src, int order, void *opaque)
> {
> struct sev_gmem_populate_args *sev_populate_args = opaque;
> - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> + struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
> int n_private = 0, ret, i;
> int npages = (1 << order);
> gfn_t gfn;
> @@ -2350,7 +2329,7 @@ static int sev_gmem_post_populate(struct kvm *kvm, gfn_t gfn_start, kvm_pfn_t pf
>
> static int snp_launch_update(struct kvm *kvm, struct kvm_sev_cmd *argp)
> {
> - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> + struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
> struct sev_gmem_populate_args sev_populate_args = {0};
> struct kvm_sev_snp_launch_update params;
> struct kvm_memory_slot *memslot;
> @@ -2434,7 +2413,7 @@ static int snp_launch_update(struct kvm *kvm, struct kvm_sev_cmd *argp)
>
> static int snp_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
> {
> - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> + struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
> struct sev_data_snp_launch_update data = {};
> struct kvm_vcpu *vcpu;
> unsigned long i;
> @@ -2482,7 +2461,7 @@ static int snp_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
>
> static int snp_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
> {
> - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> + struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
> struct kvm_sev_snp_launch_finish params;
> struct sev_data_snp_launch_finish *data;
> void *id_block = NULL, *id_auth = NULL;
> @@ -2677,7 +2656,7 @@ int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp)
> int sev_mem_enc_register_region(struct kvm *kvm,
> struct kvm_enc_region *range)
> {
> - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> + struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
> struct enc_region *region;
> int ret = 0;
>
> @@ -2729,7 +2708,7 @@ int sev_mem_enc_register_region(struct kvm *kvm,
> static struct enc_region *
> find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
> {
> - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> + struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
> struct list_head *head = &sev->regions_list;
> struct enc_region *i;
>
> @@ -2824,9 +2803,9 @@ int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd)
> * The mirror kvm holds an enc_context_owner ref so its asid can't
> * disappear until we're done with it
> */
> - source_sev = &to_kvm_svm(source_kvm)->sev_info;
> + source_sev = to_kvm_sev_info(source_kvm);
> kvm_get_kvm(source_kvm);
> - mirror_sev = &to_kvm_svm(kvm)->sev_info;
> + mirror_sev = to_kvm_sev_info(kvm);
> list_add_tail(&mirror_sev->mirror_entry, &source_sev->mirror_vms);
>
> /* Set enc_context_owner and copy its encryption context over */
> @@ -2854,7 +2833,7 @@ int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd)
>
> static int snp_decommission_context(struct kvm *kvm)
> {
> - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> + struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
> struct sev_data_snp_addr data = {};
> int ret;
>
> @@ -2879,7 +2858,7 @@ static int snp_decommission_context(struct kvm *kvm)
>
> void sev_vm_destroy(struct kvm *kvm)
> {
> - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> + struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
> struct list_head *head = &sev->regions_list;
> struct list_head *pos, *q;
>
> @@ -3933,7 +3912,6 @@ void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu)
>
> static int sev_snp_ap_creation(struct vcpu_svm *svm)
> {
> - struct kvm_sev_info *sev = &to_kvm_svm(svm->vcpu.kvm)->sev_info;
> struct kvm_vcpu *vcpu = &svm->vcpu;
> struct kvm_vcpu *target_vcpu;
> struct vcpu_svm *target_svm;
> @@ -3974,7 +3952,7 @@ static int sev_snp_ap_creation(struct vcpu_svm *svm)
> u64 sev_features;
>
> sev_features = vcpu->arch.regs[VCPU_REGS_RAX];
> - sev_features ^= sev->vmsa_features;
> + sev_features ^= to_kvm_sev_info(svm->vcpu.kvm)->vmsa_features;
>
> if (sev_features & SVM_SEV_FEAT_INT_INJ_MODES) {
> vcpu_unimpl(vcpu, "vmgexit: invalid AP injection mode [%#lx] from guest\n",
> @@ -4134,7 +4112,7 @@ static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
> {
> struct vmcb_control_area *control = &svm->vmcb->control;
> struct kvm_vcpu *vcpu = &svm->vcpu;
> - struct kvm_sev_info *sev = &to_kvm_svm(vcpu->kvm)->sev_info;
> + struct kvm_sev_info *sev = to_kvm_sev_info(vcpu->kvm);
> u64 ghcb_info;
> int ret = 1;
>
> @@ -4354,7 +4332,7 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
> ret = kvm_emulate_ap_reset_hold(vcpu);
> break;
> case SVM_VMGEXIT_AP_JUMP_TABLE: {
> - struct kvm_sev_info *sev = &to_kvm_svm(vcpu->kvm)->sev_info;
> + struct kvm_sev_info *sev = to_kvm_sev_info(vcpu->kvm);
>
> switch (control->exit_info_1) {
> case 0:
> @@ -4565,7 +4543,7 @@ void sev_init_vmcb(struct vcpu_svm *svm)
> void sev_es_vcpu_reset(struct vcpu_svm *svm)
> {
> struct kvm_vcpu *vcpu = &svm->vcpu;
> - struct kvm_sev_info *sev = &to_kvm_svm(vcpu->kvm)->sev_info;
> + struct kvm_sev_info *sev = to_kvm_sev_info(vcpu->kvm);
>
> /*
> * Set the GHCB MSR value as per the GHCB specification when emulating
> @@ -4833,7 +4811,7 @@ static bool is_large_rmp_possible(struct kvm *kvm, kvm_pfn_t pfn, int order)
>
> int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order)
> {
> - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> + struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
> kvm_pfn_t pfn_aligned;
> gfn_t gfn_aligned;
> int level, rc;
> diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
> index 9d7cdb8fbf87..5b159f017055 100644
> --- a/arch/x86/kvm/svm/svm.h
> +++ b/arch/x86/kvm/svm/svm.h
> @@ -361,20 +361,18 @@ static __always_inline struct kvm_sev_info *to_kvm_sev_info(struct kvm *kvm)
> #ifdef CONFIG_KVM_AMD_SEV
> static __always_inline bool sev_guest(struct kvm *kvm)
> {
> - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> -
> - return sev->active;
> + return to_kvm_sev_info(kvm)->active;
> }
> static __always_inline bool sev_es_guest(struct kvm *kvm)
> {
> - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> + struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
>
> return sev->es_active && !WARN_ON_ONCE(!sev->active);
> }
>
> static __always_inline bool sev_snp_guest(struct kvm *kvm)
> {
> - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> + struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
>
> return (sev->vmsa_features & SVM_SEV_FEAT_SNP_ACTIVE) &&
> !WARN_ON_ONCE(!sev_es_guest(kvm));
>
> base-commit: 86eb1aef7279ec68fe9b7a44685efc09aa56a8f0
next prev parent reply other threads:[~2025-01-24 13:38 UTC|newest]
Thread overview: 4+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-01-23 5:51 [PATCH] KVM: SEV: Use to_kvm_sev_info() for fetching kvm_sev_info struct Nikunj A Dadhania
2025-01-23 7:50 ` Gupta, Pankaj
2025-01-24 13:38 ` Paluri, PavanKumar [this message]
2025-02-15 0:50 ` Sean Christopherson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=97712ebe-fd9f-4549-ab95-e638bc9f3741@amd.com \
--to=papaluri@amd.com \
--cc=kvm@vger.kernel.org \
--cc=nikunj@amd.com \
--cc=pbonzini@redhat.com \
--cc=santosh.shukla@amd.com \
--cc=seanjc@google.com \
--cc=thomas.lendacky@amd.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox