From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([2001:4830:134:3::10]:34568) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1XD4so-00085n-P3 for qemu-devel@nongnu.org; Fri, 01 Aug 2014 00:56:03 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1XD4sk-0005Vm-ER for qemu-devel@nongnu.org; Fri, 01 Aug 2014 00:55:58 -0400 Received: from [59.151.112.132] (port=61720 helo=heian.cn.fujitsu.com) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1XD4sj-0005S6-Ip for qemu-devel@nongnu.org; Fri, 01 Aug 2014 00:55:54 -0400 Message-ID: <53DB1AB3.3080009@cn.fujitsu.com> Date: Fri, 1 Aug 2014 12:42:27 +0800 From: Gu Zheng MIME-Version: 1.0 References: <1405072795-14342-1-git-send-email-guz.fnst@cn.fujitsu.com> <1405072795-14342-8-git-send-email-guz.fnst@cn.fujitsu.com> <53C881D8.60608@cn.fujitsu.com> In-Reply-To: Content-Type: text/plain; charset="UTF-8" Content-Transfer-Encoding: quoted-printable Subject: Re: [Qemu-devel] [RFC PATCH 7/7] cpus: reclaim allocated vCPU objects List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: Anshul Makkar Cc: qemu-devel , tangchen@cn.fujitsu.com, isimatu.yasuaki@jp.fujitsu.com, Chen Fan , Igor Mammedov , afaerber Hi Anshul, Thanks for your test. On 07/30/2014 10:31 PM, Anshul Makkar wrote: > Hi, >=20 > I am testing the cpu-hotunplug patches. I observed that after the > deletion of the cpu with id =3D x, if I cpu-add the same cpu again id =3D > x, then qemu exits with the error that file descriptor already exists. Could you please offer the whole reproduce routine? In my test box, we can add a removed cpu with the id.=20 >=20 > On debugging I found that if I give cpu-add , then > qemu_kvm_cpu_thread_fn->kvm_init_vcpu is called which sends an IOCTL > (KVM_CREATE_VCPU) to kvm to create a new fd. As the fd already exists > in KVM as we never delete the fd from the kernel and just park it in > separate list, it returns false and QEMU exits. In the above code > flow, no where its being checked if we have the cpu with cpuid =3D x > available in the parked list and we can reuse it. >=20 > Am I missing something or this bit is yet to be implmented. Yes, it is implemented, in the same way as you mention above, please refer to function kvm_get_vcpu(). Thanks, Gu >=20 > Thanks > Anshul Makkar >=20 > On Fri, Jul 18, 2014 at 4:09 AM, Gu Zheng wrote= : >> Hi Anshul, >> On 07/18/2014 12:24 AM, Anshul Makkar wrote: >> >>> Are we not going to introduce new command cpu_del for deleting the cpu = ? >>> >>> I couldn't find any patch for addition of cpu_del command. Is this >>> intentional and we intend to use device_del (and similarly device_add) >>> for cpu hot(un)plug or just skipped to be added later. I have the >>> patch for the same which I can release, if the intent is to add this >>> command. >> >> The "device_add/device_del" interface is the approved way to support add= /del cpu, >> which is also more common and elegant than "cpu_add/del". >> >> so we intend to use device_del rather than the cpu_del. >> And IMO, the cpu_add will be replaced by "device_add" sooner or later. >> >> Thanks, >> Gu >> >>> >>> Thanks >>> Anshul Makkar >>> >>> On Fri, Jul 11, 2014 at 11:59 AM, Gu Zheng wr= ote: >>>> After ACPI get a signal to eject a vCPU, the vCPU must be >>>> removed from CPU list=EF=BC=8Cbefore the vCPU really removed, then >>>> release the all related vCPU objects. >>>> But we do not close KVM vcpu fd, just record it into a list, in >>>> order to reuse it. >>>> >>>> Signed-off-by: Chen Fan >>>> Signed-off-by: Gu Zheng >>>> --- >>>> cpus.c | 37 ++++++++++++++++++++++++++++++++ >>>> include/sysemu/kvm.h | 1 + >>>> kvm-all.c | 57 +++++++++++++++++++++++++++++++++++++++++= ++++++++- >>>> 3 files changed, 94 insertions(+), 1 deletions(-) >>>> >>>> diff --git a/cpus.c b/cpus.c >>>> index 4dfb889..9a73407 100644 >>>> --- a/cpus.c >>>> +++ b/cpus.c >>>> @@ -786,6 +786,24 @@ void async_run_on_cpu(CPUState *cpu, void (*func)= (void *data), void *data) >>>> qemu_cpu_kick(cpu); >>>> } >>>> >>>> +static void qemu_kvm_destroy_vcpu(CPUState *cpu) >>>> +{ >>>> + CPU_REMOVE(cpu); >>>> + >>>> + if (kvm_destroy_vcpu(cpu) < 0) { >>>> + fprintf(stderr, "kvm_destroy_vcpu failed.\n"); >>>> + exit(1); >>>> + } >>>> + >>>> + object_unparent(OBJECT(cpu)); >>>> +} >>>> + >>>> +static void qemu_tcg_destroy_vcpu(CPUState *cpu) >>>> +{ >>>> + CPU_REMOVE(cpu); >>>> + object_unparent(OBJECT(cpu)); >>>> +} >>>> + >>>> static void flush_queued_work(CPUState *cpu) >>>> { >>>> struct qemu_work_item *wi; >>>> @@ -877,6 +895,11 @@ static void *qemu_kvm_cpu_thread_fn(void *arg) >>>> } >>>> } >>>> qemu_kvm_wait_io_event(cpu); >>>> + if (cpu->exit && !cpu_can_run(cpu)) { >>>> + qemu_kvm_destroy_vcpu(cpu); >>>> + qemu_mutex_unlock(&qemu_global_mutex); >>>> + return NULL; >>>> + } >>>> } >>>> >>>> return NULL; >>>> @@ -929,6 +952,7 @@ static void tcg_exec_all(void); >>>> static void *qemu_tcg_cpu_thread_fn(void *arg) >>>> { >>>> CPUState *cpu =3D arg; >>>> + CPUState *remove_cpu =3D NULL; >>>> >>>> qemu_tcg_init_cpu_signals(); >>>> qemu_thread_get_self(cpu->thread); >>>> @@ -961,6 +985,16 @@ static void *qemu_tcg_cpu_thread_fn(void *arg) >>>> } >>>> } >>>> qemu_tcg_wait_io_event(); >>>> + CPU_FOREACH(cpu) { >>>> + if (cpu->exit && !cpu_can_run(cpu)) { >>>> + remove_cpu =3D cpu; >>>> + break; >>>> + } >>>> + } >>>> + if (remove_cpu) { >>>> + qemu_tcg_destroy_vcpu(remove_cpu); >>>> + remove_cpu =3D NULL; >>>> + } >>>> } >>>> >>>> return NULL; >>>> @@ -1316,6 +1350,9 @@ static void tcg_exec_all(void) >>>> break; >>>> } >>>> } else if (cpu->stop || cpu->stopped) { >>>> + if (cpu->exit) { >>>> + next_cpu =3D CPU_NEXT(cpu); >>>> + } >>>> break; >>>> } >>>> } >>>> diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h >>>> index 174ea36..88e2403 100644 >>>> --- a/include/sysemu/kvm.h >>>> +++ b/include/sysemu/kvm.h >>>> @@ -178,6 +178,7 @@ int kvm_has_intx_set_mask(void); >>>> >>>> int kvm_init_vcpu(CPUState *cpu); >>>> int kvm_cpu_exec(CPUState *cpu); >>>> +int kvm_destroy_vcpu(CPUState *cpu); >>>> >>>> #ifdef NEED_CPU_H >>>> >>>> diff --git a/kvm-all.c b/kvm-all.c >>>> index 3ae30ee..25e2a43 100644 >>>> --- a/kvm-all.c >>>> +++ b/kvm-all.c >>>> @@ -74,6 +74,12 @@ typedef struct KVMSlot >>>> >>>> typedef struct kvm_dirty_log KVMDirtyLog; >>>> >>>> +struct KVMParkedVcpu { >>>> + unsigned long vcpu_id; >>>> + int kvm_fd; >>>> + QLIST_ENTRY(KVMParkedVcpu) node; >>>> +}; >>>> + >>>> struct KVMState >>>> { >>>> KVMSlot *slots; >>>> @@ -108,6 +114,7 @@ struct KVMState >>>> QTAILQ_HEAD(msi_hashtab, KVMMSIRoute) msi_hashtab[KVM_MSI_HASHTAB= _SIZE]; >>>> bool direct_msi; >>>> #endif >>>> + QLIST_HEAD(, KVMParkedVcpu) kvm_parked_vcpus; >>>> }; >>>> >>>> KVMState *kvm_state; >>>> @@ -226,6 +233,53 @@ static int kvm_set_user_memory_region(KVMState *s= , KVMSlot *slot) >>>> return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem); >>>> } >>>> >>>> +int kvm_destroy_vcpu(CPUState *cpu) >>>> +{ >>>> + KVMState *s =3D kvm_state; >>>> + long mmap_size; >>>> + struct KVMParkedVcpu *vcpu =3D NULL; >>>> + int ret =3D 0; >>>> + >>>> + DPRINTF("kvm_destroy_vcpu\n"); >>>> + >>>> + mmap_size =3D kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0); >>>> + if (mmap_size < 0) { >>>> + ret =3D mmap_size; >>>> + DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n"); >>>> + goto err; >>>> + } >>>> + >>>> + ret =3D munmap(cpu->kvm_run, mmap_size); >>>> + if (ret < 0) { >>>> + goto err; >>>> + } >>>> + >>>> + vcpu =3D g_malloc0(sizeof(*vcpu)); >>>> + vcpu->vcpu_id =3D kvm_arch_vcpu_id(cpu); >>>> + vcpu->kvm_fd =3D cpu->kvm_fd; >>>> + QLIST_INSERT_HEAD(&kvm_state->kvm_parked_vcpus, vcpu, node); >>>> +err: >>>> + return ret; >>>> +} >>>> + >>>> +static int kvm_get_vcpu(KVMState *s, unsigned long vcpu_id) >>>> +{ >>>> + struct KVMParkedVcpu *cpu; >>>> + >>>> + QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) { >>>> + if (cpu->vcpu_id =3D=3D vcpu_id) { >>>> + int kvm_fd; >>>> + >>>> + QLIST_REMOVE(cpu, node); >>>> + kvm_fd =3D cpu->kvm_fd; >>>> + g_free(cpu); >>>> + return kvm_fd; >>>> + } >>>> + } >>>> + >>>> + return kvm_vm_ioctl(s, KVM_CREATE_VCPU, (void *)vcpu_id); >>>> +} >>>> + >>>> int kvm_init_vcpu(CPUState *cpu) >>>> { >>>> KVMState *s =3D kvm_state; >>>> @@ -234,7 +288,7 @@ int kvm_init_vcpu(CPUState *cpu) >>>> >>>> DPRINTF("kvm_init_vcpu\n"); >>>> >>>> - ret =3D kvm_vm_ioctl(s, KVM_CREATE_VCPU, (void *)kvm_arch_vcpu_id= (cpu)); >>>> + ret =3D kvm_get_vcpu(s, kvm_arch_vcpu_id(cpu)); >>>> if (ret < 0) { >>>> DPRINTF("kvm_create_vcpu failed\n"); >>>> goto err; >>>> @@ -1404,6 +1458,7 @@ int kvm_init(MachineClass *mc) >>>> #ifdef KVM_CAP_SET_GUEST_DEBUG >>>> QTAILQ_INIT(&s->kvm_sw_breakpoints); >>>> #endif >>>> + QLIST_INIT(&s->kvm_parked_vcpus); >>>> s->vmfd =3D -1; >>>> s->fd =3D qemu_open("/dev/kvm", O_RDWR); >>>> if (s->fd =3D=3D -1) { >>>> -- >>>> 1.7.7 >>>> >>> . >>> >> >> > . >=20