* [kvm:queue 5/24] arch/x86/kvm/svm/vmenter.S:95: Error: invalid operands (*UND* and *ABS* sections) for `*'
@ 2026-05-14 3:44 kernel test robot
0 siblings, 0 replies; only message in thread
From: kernel test robot @ 2026-05-14 3:44 UTC (permalink / raw)
To: Chang S. Bae; +Cc: oe-kbuild-all, kvm, Farrah Chen, Paolo Bonzini
Hi Chang,
FYI, the error/warning was bisected to this commit, please ignore it if it's irrelevant.
tree: https://git.kernel.org/pub/scm/virt/kvm/kvm.git queue
head: 2b5e4245e1d31fd0858bb7abbd82af85a6457c33
commit: 6dec918c1fc7766e505e4ac5cdbbc28a0cc73819 [5/24] KVM: SVM: Macrofy GPR swapping in __svm_vcpu_run()
config: i386-randconfig-r062-20260514 (https://download.01.org/0day-ci/archive/20260514/202605141116.deKGCTHy-lkp@intel.com/config)
compiler: gcc-14 (Debian 14.2.0-19) 14.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20260514/202605141116.deKGCTHy-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202605141116.deKGCTHy-lkp@intel.com/
All errors (new ones prefixed by >>):
arch/x86/kvm/svm/vmenter.S: Assembler messages:
>> arch/x86/kvm/svm/vmenter.S:95: Error: invalid operands (*UND* and *ABS* sections) for `*'
>> arch/x86/kvm/svm/vmenter.S:95: Error: invalid operands (*UND* and *ABS* sections) for `*'
>> arch/x86/kvm/svm/vmenter.S:95: Error: invalid operands (*UND* and *ABS* sections) for `*'
>> arch/x86/kvm/svm/vmenter.S:95: Error: invalid operands (*UND* and *ABS* sections) for `*'
>> arch/x86/kvm/svm/vmenter.S:95: Error: invalid operands (*UND* and *ABS* sections) for `*'
arch/x86/kvm/svm/vmenter.S:101: Error: invalid operands (*UND* and *ABS* sections) for `*'
arch/x86/kvm/svm/vmenter.S:116: Error: invalid operands (*UND* and *ABS* sections) for `*'
arch/x86/kvm/svm/vmenter.S:116: Error: invalid operands (*UND* and *ABS* sections) for `*'
arch/x86/kvm/svm/vmenter.S:116: Error: invalid operands (*UND* and *ABS* sections) for `*'
arch/x86/kvm/svm/vmenter.S:116: Error: invalid operands (*UND* and *ABS* sections) for `*'
arch/x86/kvm/svm/vmenter.S:116: Error: invalid operands (*UND* and *ABS* sections) for `*'
arch/x86/kvm/svm/vmenter.S:116: Error: invalid operands (*UND* and *ABS* sections) for `*'
Kconfig warnings: (for reference only)
WARNING: unmet direct dependencies detected for MFD_STMFX
Depends on [n]: HAS_IOMEM [=y] && I2C [=y] && OF [=n]
Selected by [y]:
- PINCTRL_STMFX [=y] && PINCTRL [=y] && I2C [=y] && HAS_IOMEM [=y]
vim +95 arch/x86/kvm/svm/vmenter.S
72
73 /* Clobbers RAX, RCX, RDX (and ESI on 32-bit), consumes RDI (@svm). */
74 RESTORE_GUEST_SPEC_CTRL
75 801:
76
77 /*
78 * Use a single vmcb (vmcb01 because it's always valid) for
79 * context switching guest state via VMLOAD/VMSAVE, that way
80 * the state doesn't need to be copied between vmcb01 and
81 * vmcb02 when switching vmcbs for nested virtualization.
82 */
83 mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
84 1: vmload %_ASM_AX
85 2:
86
87 /* Get svm->current_vmcb->pa into RAX. */
88 mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
89 mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
90
91 /*
92 * Load guest registers. Intentionally omit %_ASM_AX and %_ASM_SP as
93 * context switched by hardware
94 */
> 95 LOAD_REGS %_ASM_DI, SVM_vcpu_arch_regs, \
96 %_ASM_CX, %_ASM_DX, %_ASM_BX, %_ASM_BP, %_ASM_SI
97 #ifdef CONFIG_X86_64
98 LOAD_REGS %_ASM_DI, SVM_vcpu_arch_regs, \
99 %r8, %r9, %r10, %r11, %r12, %r13, %r14, %r15
100 #endif
101 LOAD_REGS %_ASM_DI, SVM_vcpu_arch_regs, %_ASM_DI
102
103 /* Clobbers EFLAGS.ZF */
104 SVM_CLEAR_CPU_BUFFERS
105
106 /* Enter guest mode */
107 3: vmrun %_ASM_AX
108 4:
109 /* Pop @svm to RAX while it's the only available register. */
110 pop %_ASM_AX
111
112 /*
113 * Save all guest registers. Intentionally omit %_ASM_AX and %_ASM_SP as
114 * context switched by hardware
115 */
116 STORE_REGS %_ASM_AX, SVM_vcpu_arch_regs, \
117 %_ASM_CX, %_ASM_DX, %_ASM_BX, %_ASM_BP, %_ASM_SI, %_ASM_DI
118 #ifdef CONFIG_X86_64
119 STORE_REGS %_ASM_AX, SVM_vcpu_arch_regs, \
120 %r8, %r9, %r10, %r11, %r12, %r13, %r14, %r15
121 #endif
122
123 /* @svm can stay in RDI from now on. */
124 mov %_ASM_AX, %_ASM_DI
125
126 mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
127 5: vmsave %_ASM_AX
128 6:
129
130 /* Restores GSBASE among other things, allowing access to percpu data. */
131 pop %_ASM_AX
132 7: vmload %_ASM_AX
133 8:
134
135 /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
136 FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT
137
138 /*
139 * Clobbers RAX, RCX, RDX (and ESI, EDI on 32-bit), consumes RDI (@svm)
140 * and RSP (pointer to @spec_ctrl_intercepted).
141 */
142 RESTORE_HOST_SPEC_CTRL
143 901:
144
145 /*
146 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
147 * untrained as soon as we exit the VM and are back to the
148 * kernel. This should be done before re-enabling interrupts
149 * because interrupt handlers won't sanitize 'ret' if the return is
150 * from the kernel.
151 */
152 UNTRAIN_RET_VM
153
154 /*
155 * Clear all general purpose registers except RSP and RAX to prevent
156 * speculative use of the guest's values, even those that are reloaded
157 * via the stack. In theory, an L1 cache miss when restoring registers
158 * could lead to speculative execution with the guest's values.
159 * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
160 * free. RSP and RAX are exempt as they are restored by hardware
161 * during VM-Exit.
162 */
163 CLEAR_REGS %ecx, %edx, %ebx, %ebp, %esi, %edi
164 #ifdef CONFIG_X86_64
165 CLEAR_REGS %r8d, %r9d, %r10d, %r11d, %r12d, %r13d, %r14d, %r15d
166 #endif
167
168 /* "Pop" @enter_flags. */
169 pop %_ASM_BX
170
171 pop %_ASM_BX
172
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
^ permalink raw reply [flat|nested] only message in thread
only message in thread, other threads:[~2026-05-14 3:44 UTC | newest]
Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-05-14 3:44 [kvm:queue 5/24] arch/x86/kvm/svm/vmenter.S:95: Error: invalid operands (*UND* and *ABS* sections) for `*' kernel test robot
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox