* [RFC PATCH 0/2] irq destination caching prototype
@ 2012-08-13 9:16 Gleb Natapov
2012-08-13 9:16 ` [RFC PATCH 1/2] Call irq_rt callback under rcu_read_lock() Gleb Natapov
` (4 more replies)
0 siblings, 5 replies; 34+ messages in thread
From: Gleb Natapov @ 2012-08-13 9:16 UTC (permalink / raw)
To: kvm; +Cc: avi, mst
Here is a quick prototype of what we discussed yesterday. This one
caches only MSI interrupts for now. The obvious problem is that not
all interrupts (namely IPIs and MSIs using KVM_CAP_SIGNAL_MSI) use irq
routing table, so they cannot be cached.
Gleb Natapov (2):
Call irq_rt callback under rcu_read_lock()
Cache msi irq destination.
arch/x86/kvm/lapic.c | 2 +-
include/linux/kvm_host.h | 1 +
virt/kvm/ioapic.c | 2 +-
virt/kvm/ioapic.h | 3 ++-
virt/kvm/irq_comm.c | 37 ++++++++++++++++++++-----------------
5 files changed, 25 insertions(+), 20 deletions(-)
--
1.7.10.4
^ permalink raw reply [flat|nested] 34+ messages in thread
* [RFC PATCH 1/2] Call irq_rt callback under rcu_read_lock()
2012-08-13 9:16 [RFC PATCH 0/2] irq destination caching prototype Gleb Natapov
@ 2012-08-13 9:16 ` Gleb Natapov
2012-08-13 9:16 ` [RFC PATCH 2/2] Cache msi irq destination Gleb Natapov
` (3 subsequent siblings)
4 siblings, 0 replies; 34+ messages in thread
From: Gleb Natapov @ 2012-08-13 9:16 UTC (permalink / raw)
To: kvm; +Cc: avi, mst
Callbacks are no longer sleep.
Signed-off-by: Gleb Natapov <gleb@redhat.com>
---
virt/kvm/irq_comm.c | 21 ++++++++-------------
1 file changed, 8 insertions(+), 13 deletions(-)
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index 7118be0..aad58e7 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -143,8 +143,8 @@ int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
*/
int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level)
{
- struct kvm_kernel_irq_routing_entry *e, irq_set[KVM_NR_IRQCHIPS];
- int ret = -1, i = 0;
+ struct kvm_kernel_irq_routing_entry *e;
+ int ret = -1;
struct kvm_irq_routing_table *irq_rt;
struct hlist_node *n;
@@ -157,19 +157,14 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level)
rcu_read_lock();
irq_rt = rcu_dereference(kvm->irq_routing);
if (irq < irq_rt->nr_rt_entries)
- hlist_for_each_entry(e, n, &irq_rt->map[irq], link)
- irq_set[i++] = *e;
+ hlist_for_each_entry(e, n, &irq_rt->map[irq], link) {
+ int r = e->set(e, kvm, irq_source_id, level);
+ if (r < 0)
+ continue;
+ ret = r + ((ret < 0) ? 0 : ret);
+ }
rcu_read_unlock();
- while(i--) {
- int r;
- r = irq_set[i].set(&irq_set[i], kvm, irq_source_id, level);
- if (r < 0)
- continue;
-
- ret = r + ((ret < 0) ? 0 : ret);
- }
-
return ret;
}
--
1.7.10.4
^ permalink raw reply related [flat|nested] 34+ messages in thread
* [RFC PATCH 2/2] Cache msi irq destination.
2012-08-13 9:16 [RFC PATCH 0/2] irq destination caching prototype Gleb Natapov
2012-08-13 9:16 ` [RFC PATCH 1/2] Call irq_rt callback under rcu_read_lock() Gleb Natapov
@ 2012-08-13 9:16 ` Gleb Natapov
2012-08-13 9:32 ` Avi Kivity
2012-08-13 9:34 ` [RFC PATCH 0/2] irq destination caching prototype Michael S. Tsirkin
` (2 subsequent siblings)
4 siblings, 1 reply; 34+ messages in thread
From: Gleb Natapov @ 2012-08-13 9:16 UTC (permalink / raw)
To: kvm; +Cc: avi, mst
Signed-off-by: Gleb Natapov <gleb@redhat.com>
---
arch/x86/kvm/lapic.c | 2 +-
include/linux/kvm_host.h | 1 +
virt/kvm/ioapic.c | 2 +-
virt/kvm/ioapic.h | 3 ++-
virt/kvm/irq_comm.c | 16 ++++++++++++----
5 files changed, 17 insertions(+), 7 deletions(-)
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 18d149d..367a514 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -629,7 +629,7 @@ static void apic_send_ipi(struct kvm_lapic *apic)
irq.trig_mode, irq.level, irq.dest_mode, irq.delivery_mode,
irq.vector);
- kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq);
+ kvm_irq_delivery_to_apic(NULL, apic->vcpu->kvm, apic, &irq);
}
static u32 apic_get_tmcct(struct kvm_lapic *apic)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index d2b897e..bcd3dc7 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -271,6 +271,7 @@ struct kvm_kernel_irq_routing_entry {
struct msi_msg msi;
};
struct hlist_node link;
+ struct kvm_vcpu *vcpu;
};
#ifdef __KVM_HAVE_IOAPIC
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index ef61d52..e6c8717 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -188,7 +188,7 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
irqe.dest_id = ioapic->kvm->bsp_vcpu_id;
}
#endif
- return kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe);
+ return kvm_irq_delivery_to_apic(NULL, ioapic->kvm, NULL, &irqe);
}
int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h
index a30abfe..2a715bd 100644
--- a/virt/kvm/ioapic.h
+++ b/virt/kvm/ioapic.h
@@ -78,7 +78,8 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
int level);
void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id);
void kvm_ioapic_reset(struct kvm_ioapic *ioapic);
-int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
+int kvm_irq_delivery_to_apic(struct kvm_kernel_irq_routing_entry *e,
+ struct kvm *kvm, struct kvm_lapic *src,
struct kvm_lapic_irq *irq);
int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index aad58e7..b556c2c 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -61,11 +61,12 @@ inline static bool kvm_is_dm_lowest_prio(struct kvm_lapic_irq *irq)
#endif
}
-int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
+int kvm_irq_delivery_to_apic(struct kvm_kernel_irq_routing_entry *e,
+ struct kvm *kvm, struct kvm_lapic *src,
struct kvm_lapic_irq *irq)
{
- int i, r = -1;
- struct kvm_vcpu *vcpu, *lowest = NULL;
+ int i, r = -1, c = 0;
+ struct kvm_vcpu *vcpu, *cache = NULL, *lowest = NULL;
if (irq->dest_mode == 0 && irq->dest_id == 0xff &&
kvm_is_dm_lowest_prio(irq))
@@ -82,6 +83,8 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
if (!kvm_is_dm_lowest_prio(irq)) {
if (r < 0)
r = 0;
+ c++;
+ cache = vcpu;
r += kvm_apic_set_irq(vcpu, irq);
} else if (kvm_lapic_enabled(vcpu)) {
if (!lowest)
@@ -93,6 +96,8 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
if (lowest)
r = kvm_apic_set_irq(lowest, irq);
+ else if (e && c == 1)
+ e->vcpu = cache; /* cache it */
return r;
}
@@ -118,7 +123,9 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
irq.shorthand = 0;
/* TODO Deal with RH bit of MSI message address */
- return kvm_irq_delivery_to_apic(kvm, NULL, &irq);
+ if (e->vcpu)
+ return kvm_apic_set_irq(e->vcpu, &irq);
+ return kvm_irq_delivery_to_apic(e, kvm, NULL, &irq);
}
int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
@@ -131,6 +138,7 @@ int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
route.msi.address_lo = msi->address_lo;
route.msi.address_hi = msi->address_hi;
route.msi.data = msi->data;
+ route.vcpu = NULL;
return kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1);
}
--
1.7.10.4
^ permalink raw reply related [flat|nested] 34+ messages in thread
* Re: [RFC PATCH 2/2] Cache msi irq destination.
2012-08-13 9:16 ` [RFC PATCH 2/2] Cache msi irq destination Gleb Natapov
@ 2012-08-13 9:32 ` Avi Kivity
2012-08-13 9:34 ` Gleb Natapov
0 siblings, 1 reply; 34+ messages in thread
From: Avi Kivity @ 2012-08-13 9:32 UTC (permalink / raw)
To: Gleb Natapov; +Cc: kvm, mst
On 08/13/2012 12:16 PM, Gleb Natapov wrote:
> Signed-off-by: Gleb Natapov <gleb@redhat.com>
>
> -int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
> +int kvm_irq_delivery_to_apic(struct kvm_kernel_irq_routing_entry *e,
> + struct kvm *kvm, struct kvm_lapic *src,
> struct kvm_lapic_irq *irq)
Would be nicer to put e at the end, and explain that it is optional.
> {
> - int i, r = -1;
> - struct kvm_vcpu *vcpu, *lowest = NULL;
> + int i, r = -1, c = 0;
> + struct kvm_vcpu *vcpu, *cache = NULL, *lowest = NULL;
>
> if (irq->dest_mode == 0 && irq->dest_id == 0xff &&
> kvm_is_dm_lowest_prio(irq))
> @@ -82,6 +83,8 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
> if (!kvm_is_dm_lowest_prio(irq)) {
> if (r < 0)
> r = 0;
> + c++;
> + cache = vcpu;
> r += kvm_apic_set_irq(vcpu, irq);
> } else if (kvm_lapic_enabled(vcpu)) {
> if (!lowest)
> @@ -93,6 +96,8 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
>
> if (lowest)
> r = kvm_apic_set_irq(lowest, irq);
> + else if (e && c == 1)
> + e->vcpu = cache; /* cache it */
>
> return r;
> }
> @@ -118,7 +123,9 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
> irq.shorthand = 0;
>
> /* TODO Deal with RH bit of MSI message address */
> - return kvm_irq_delivery_to_apic(kvm, NULL, &irq);
> + if (e->vcpu)
> + return kvm_apic_set_irq(e->vcpu, &irq);
> + return kvm_irq_delivery_to_apic(e, kvm, NULL, &irq);
> }
>
> int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
> @@ -131,6 +138,7 @@ int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
> route.msi.address_lo = msi->address_lo;
> route.msi.address_hi = msi->address_hi;
> route.msi.data = msi->data;
> + route.vcpu = NULL;
>
> return kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1);
> }
>
Missing cache invalidate on apicid write?
Otherwise nice and simple.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [RFC PATCH 0/2] irq destination caching prototype
2012-08-13 9:16 [RFC PATCH 0/2] irq destination caching prototype Gleb Natapov
2012-08-13 9:16 ` [RFC PATCH 1/2] Call irq_rt callback under rcu_read_lock() Gleb Natapov
2012-08-13 9:16 ` [RFC PATCH 2/2] Cache msi irq destination Gleb Natapov
@ 2012-08-13 9:34 ` Michael S. Tsirkin
2012-08-13 9:36 ` Gleb Natapov
2012-08-13 9:36 ` Avi Kivity
2012-08-13 9:43 ` Avi Kivity
4 siblings, 1 reply; 34+ messages in thread
From: Michael S. Tsirkin @ 2012-08-13 9:34 UTC (permalink / raw)
To: Gleb Natapov; +Cc: kvm, avi
On Mon, Aug 13, 2012 at 12:16:46PM +0300, Gleb Natapov wrote:
> Here is a quick prototype of what we discussed yesterday. This one
> caches only MSI interrupts for now. The obvious problem is that not
> all interrupts (namely IPIs and MSIs using KVM_CAP_SIGNAL_MSI) use irq
> routing table, so they cannot be cached.
I thought this means ID changes need to replace the routing table, no?
>
> Gleb Natapov (2):
> Call irq_rt callback under rcu_read_lock()
> Cache msi irq destination.
>
> arch/x86/kvm/lapic.c | 2 +-
> include/linux/kvm_host.h | 1 +
> virt/kvm/ioapic.c | 2 +-
> virt/kvm/ioapic.h | 3 ++-
> virt/kvm/irq_comm.c | 37 ++++++++++++++++++++-----------------
> 5 files changed, 25 insertions(+), 20 deletions(-)
>
> --
> 1.7.10.4
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [RFC PATCH 2/2] Cache msi irq destination.
2012-08-13 9:32 ` Avi Kivity
@ 2012-08-13 9:34 ` Gleb Natapov
0 siblings, 0 replies; 34+ messages in thread
From: Gleb Natapov @ 2012-08-13 9:34 UTC (permalink / raw)
To: Avi Kivity; +Cc: kvm, mst
On Mon, Aug 13, 2012 at 12:32:44PM +0300, Avi Kivity wrote:
> On 08/13/2012 12:16 PM, Gleb Natapov wrote:
> > Signed-off-by: Gleb Natapov <gleb@redhat.com>
> >
> > -int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
> > +int kvm_irq_delivery_to_apic(struct kvm_kernel_irq_routing_entry *e,
> > + struct kvm *kvm, struct kvm_lapic *src,
> > struct kvm_lapic_irq *irq)
>
> Would be nicer to put e at the end, and explain that it is optional.
>
Just a prototype to see how it goes :)
> > {
> > - int i, r = -1;
> > - struct kvm_vcpu *vcpu, *lowest = NULL;
> > + int i, r = -1, c = 0;
> > + struct kvm_vcpu *vcpu, *cache = NULL, *lowest = NULL;
> >
> > if (irq->dest_mode == 0 && irq->dest_id == 0xff &&
> > kvm_is_dm_lowest_prio(irq))
> > @@ -82,6 +83,8 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
> > if (!kvm_is_dm_lowest_prio(irq)) {
> > if (r < 0)
> > r = 0;
> > + c++;
> > + cache = vcpu;
> > r += kvm_apic_set_irq(vcpu, irq);
> > } else if (kvm_lapic_enabled(vcpu)) {
> > if (!lowest)
> > @@ -93,6 +96,8 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
> >
> > if (lowest)
> > r = kvm_apic_set_irq(lowest, irq);
> > + else if (e && c == 1)
> > + e->vcpu = cache; /* cache it */
> >
> > return r;
> > }
> > @@ -118,7 +123,9 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
> > irq.shorthand = 0;
> >
> > /* TODO Deal with RH bit of MSI message address */
> > - return kvm_irq_delivery_to_apic(kvm, NULL, &irq);
> > + if (e->vcpu)
> > + return kvm_apic_set_irq(e->vcpu, &irq);
> > + return kvm_irq_delivery_to_apic(e, kvm, NULL, &irq);
> > }
> >
> > int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
> > @@ -131,6 +138,7 @@ int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
> > route.msi.address_lo = msi->address_lo;
> > route.msi.address_hi = msi->address_hi;
> > route.msi.data = msi->data;
> > + route.vcpu = NULL;
> >
> > return kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1);
> > }
> >
>
> Missing cache invalidate on apicid write?
>
Yes. Need to call to kvm_set_irq_routing() in strategic places. Same for
ioapic.
> Otherwise nice and simple.
>
>
> --
> error compiling committee.c: too many arguments to function
--
Gleb.
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [RFC PATCH 0/2] irq destination caching prototype
2012-08-13 9:34 ` [RFC PATCH 0/2] irq destination caching prototype Michael S. Tsirkin
@ 2012-08-13 9:36 ` Gleb Natapov
2012-08-13 9:46 ` Michael S. Tsirkin
0 siblings, 1 reply; 34+ messages in thread
From: Gleb Natapov @ 2012-08-13 9:36 UTC (permalink / raw)
To: Michael S. Tsirkin; +Cc: kvm, avi
On Mon, Aug 13, 2012 at 12:34:32PM +0300, Michael S. Tsirkin wrote:
> On Mon, Aug 13, 2012 at 12:16:46PM +0300, Gleb Natapov wrote:
> > Here is a quick prototype of what we discussed yesterday. This one
> > caches only MSI interrupts for now. The obvious problem is that not
> > all interrupts (namely IPIs and MSIs using KVM_CAP_SIGNAL_MSI) use irq
> > routing table, so they cannot be cached.
>
> I thought this means ID changes need to replace the routing table, no?
>
Correct. This is missing from the patches, but as I said this is just
prototype to see if it is feasible. What this prototype shows is that we
have problems with IPIs and MSIs from userspace.
> >
> > Gleb Natapov (2):
> > Call irq_rt callback under rcu_read_lock()
> > Cache msi irq destination.
> >
> > arch/x86/kvm/lapic.c | 2 +-
> > include/linux/kvm_host.h | 1 +
> > virt/kvm/ioapic.c | 2 +-
> > virt/kvm/ioapic.h | 3 ++-
> > virt/kvm/irq_comm.c | 37 ++++++++++++++++++++-----------------
> > 5 files changed, 25 insertions(+), 20 deletions(-)
> >
> > --
> > 1.7.10.4
--
Gleb.
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [RFC PATCH 0/2] irq destination caching prototype
2012-08-13 9:16 [RFC PATCH 0/2] irq destination caching prototype Gleb Natapov
` (2 preceding siblings ...)
2012-08-13 9:34 ` [RFC PATCH 0/2] irq destination caching prototype Michael S. Tsirkin
@ 2012-08-13 9:36 ` Avi Kivity
2012-08-13 10:12 ` Michael S. Tsirkin
2012-08-13 9:43 ` Avi Kivity
4 siblings, 1 reply; 34+ messages in thread
From: Avi Kivity @ 2012-08-13 9:36 UTC (permalink / raw)
To: Gleb Natapov; +Cc: kvm, mst, Jan Kiszka
On 08/13/2012 12:16 PM, Gleb Natapov wrote:
> Here is a quick prototype of what we discussed yesterday. This one
> caches only MSI interrupts for now. The obvious problem is that not
> all interrupts (namely IPIs and MSIs using KVM_CAP_SIGNAL_MSI) use irq
> routing table, so they cannot be cached.
We can have a small rcu-managed hash table to look those up.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [RFC PATCH 0/2] irq destination caching prototype
2012-08-13 9:16 [RFC PATCH 0/2] irq destination caching prototype Gleb Natapov
` (3 preceding siblings ...)
2012-08-13 9:36 ` Avi Kivity
@ 2012-08-13 9:43 ` Avi Kivity
2012-08-13 9:51 ` Michael S. Tsirkin
2012-08-13 10:33 ` Gleb Natapov
4 siblings, 2 replies; 34+ messages in thread
From: Avi Kivity @ 2012-08-13 9:43 UTC (permalink / raw)
To: Gleb Natapov; +Cc: kvm, mst
On 08/13/2012 12:16 PM, Gleb Natapov wrote:
> Here is a quick prototype of what we discussed yesterday. This one
> caches only MSI interrupts for now. The obvious problem is that not
> all interrupts (namely IPIs and MSIs using KVM_CAP_SIGNAL_MSI) use irq
> routing table, so they cannot be cached.
Missing: switch the uncached path to a work queue, so we don't have to
iterate over all vcpus in interrupt context.
That isn't trivial; for edge-triggered interrupts we need to ignore
zeros (if polarity=0) but for level-triggered interrupts we need them to
override the previous setting. But we don't know the trigger mode and
polarity at this point.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [RFC PATCH 0/2] irq destination caching prototype
2012-08-13 9:36 ` Gleb Natapov
@ 2012-08-13 9:46 ` Michael S. Tsirkin
2012-08-13 9:48 ` Gleb Natapov
0 siblings, 1 reply; 34+ messages in thread
From: Michael S. Tsirkin @ 2012-08-13 9:46 UTC (permalink / raw)
To: Gleb Natapov; +Cc: kvm, avi
On Mon, Aug 13, 2012 at 12:36:27PM +0300, Gleb Natapov wrote:
> On Mon, Aug 13, 2012 at 12:34:32PM +0300, Michael S. Tsirkin wrote:
> > On Mon, Aug 13, 2012 at 12:16:46PM +0300, Gleb Natapov wrote:
> > > Here is a quick prototype of what we discussed yesterday. This one
> > > caches only MSI interrupts for now. The obvious problem is that not
> > > all interrupts (namely IPIs and MSIs using KVM_CAP_SIGNAL_MSI) use irq
> > > routing table, so they cannot be cached.
> >
> > I thought this means ID changes need to replace the routing table, no?
> >
> Correct. This is missing from the patches, but as I said this is just
> prototype to see if it is feasible.
> What this prototype shows is that we
> have problems with IPIs and MSIs from userspace.
I think it's a worthwhile optimization all the same. When you feel it's
ready, I'm willing to test to see if it helps vhost.
> > >
> > > Gleb Natapov (2):
> > > Call irq_rt callback under rcu_read_lock()
> > > Cache msi irq destination.
> > >
> > > arch/x86/kvm/lapic.c | 2 +-
> > > include/linux/kvm_host.h | 1 +
> > > virt/kvm/ioapic.c | 2 +-
> > > virt/kvm/ioapic.h | 3 ++-
> > > virt/kvm/irq_comm.c | 37 ++++++++++++++++++++-----------------
> > > 5 files changed, 25 insertions(+), 20 deletions(-)
> > >
> > > --
> > > 1.7.10.4
>
> --
> Gleb.
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [RFC PATCH 0/2] irq destination caching prototype
2012-08-13 9:46 ` Michael S. Tsirkin
@ 2012-08-13 9:48 ` Gleb Natapov
0 siblings, 0 replies; 34+ messages in thread
From: Gleb Natapov @ 2012-08-13 9:48 UTC (permalink / raw)
To: Michael S. Tsirkin; +Cc: kvm, avi
On Mon, Aug 13, 2012 at 12:46:09PM +0300, Michael S. Tsirkin wrote:
> On Mon, Aug 13, 2012 at 12:36:27PM +0300, Gleb Natapov wrote:
> > On Mon, Aug 13, 2012 at 12:34:32PM +0300, Michael S. Tsirkin wrote:
> > > On Mon, Aug 13, 2012 at 12:16:46PM +0300, Gleb Natapov wrote:
> > > > Here is a quick prototype of what we discussed yesterday. This one
> > > > caches only MSI interrupts for now. The obvious problem is that not
> > > > all interrupts (namely IPIs and MSIs using KVM_CAP_SIGNAL_MSI) use irq
> > > > routing table, so they cannot be cached.
> > >
> > > I thought this means ID changes need to replace the routing table, no?
> > >
> > Correct. This is missing from the patches, but as I said this is just
> > prototype to see if it is feasible.
> > What this prototype shows is that we
> > have problems with IPIs and MSIs from userspace.
>
>
> I think it's a worthwhile optimization all the same. When you feel it's
> ready, I'm willing to test to see if it helps vhost.
>
>
You can test it now. It passes my very simple testing. Guests usually do
not change apic ids after HW initialization. Maximum it will fail and
you'll tell me how bad it failed :)
> > > >
> > > > Gleb Natapov (2):
> > > > Call irq_rt callback under rcu_read_lock()
> > > > Cache msi irq destination.
> > > >
> > > > arch/x86/kvm/lapic.c | 2 +-
> > > > include/linux/kvm_host.h | 1 +
> > > > virt/kvm/ioapic.c | 2 +-
> > > > virt/kvm/ioapic.h | 3 ++-
> > > > virt/kvm/irq_comm.c | 37 ++++++++++++++++++++-----------------
> > > > 5 files changed, 25 insertions(+), 20 deletions(-)
> > > >
> > > > --
> > > > 1.7.10.4
> >
> > --
> > Gleb.
--
Gleb.
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [RFC PATCH 0/2] irq destination caching prototype
2012-08-13 9:43 ` Avi Kivity
@ 2012-08-13 9:51 ` Michael S. Tsirkin
2012-08-13 9:53 ` Gleb Natapov
2012-08-13 10:33 ` Gleb Natapov
1 sibling, 1 reply; 34+ messages in thread
From: Michael S. Tsirkin @ 2012-08-13 9:51 UTC (permalink / raw)
To: Avi Kivity; +Cc: Gleb Natapov, kvm
On Mon, Aug 13, 2012 at 12:43:50PM +0300, Avi Kivity wrote:
> On 08/13/2012 12:16 PM, Gleb Natapov wrote:
> > Here is a quick prototype of what we discussed yesterday. This one
> > caches only MSI interrupts for now. The obvious problem is that not
> > all interrupts (namely IPIs and MSIs using KVM_CAP_SIGNAL_MSI) use irq
> > routing table, so they cannot be cached.
>
> Missing: switch the uncached path to a work queue, so we don't have to
> iterate over all vcpus in interrupt context.
>
> That isn't trivial; for edge-triggered interrupts we need to ignore
> zeros (if polarity=0) but for level-triggered interrupts we need them to
> override the previous setting. But we don't know the trigger mode and
> polarity at this point.
Instead of doing it like this, can we simply require
callers to use a workqueue?
Add kvm_set_msi_inatomic that returns WOULDBLOCK if cache is NULL.
>
>
> --
> error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [RFC PATCH 0/2] irq destination caching prototype
2012-08-13 9:51 ` Michael S. Tsirkin
@ 2012-08-13 9:53 ` Gleb Natapov
0 siblings, 0 replies; 34+ messages in thread
From: Gleb Natapov @ 2012-08-13 9:53 UTC (permalink / raw)
To: Michael S. Tsirkin; +Cc: Avi Kivity, kvm
On Mon, Aug 13, 2012 at 12:51:49PM +0300, Michael S. Tsirkin wrote:
> On Mon, Aug 13, 2012 at 12:43:50PM +0300, Avi Kivity wrote:
> > On 08/13/2012 12:16 PM, Gleb Natapov wrote:
> > > Here is a quick prototype of what we discussed yesterday. This one
> > > caches only MSI interrupts for now. The obvious problem is that not
> > > all interrupts (namely IPIs and MSIs using KVM_CAP_SIGNAL_MSI) use irq
> > > routing table, so they cannot be cached.
> >
> > Missing: switch the uncached path to a work queue, so we don't have to
> > iterate over all vcpus in interrupt context.
> >
> > That isn't trivial; for edge-triggered interrupts we need to ignore
> > zeros (if polarity=0) but for level-triggered interrupts we need them to
> > override the previous setting. But we don't know the trigger mode and
> > polarity at this point.
>
> Instead of doing it like this, can we simply require
> callers to use a workqueue?
> Add kvm_set_msi_inatomic that returns WOULDBLOCK if cache is NULL.
>
kvm_set_msi is simple since it is always edge and bails out very early
if level is 0. I do not yet understand where is the problem with ioapic
though.
--
Gleb.
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [RFC PATCH 0/2] irq destination caching prototype
2012-08-13 9:36 ` Avi Kivity
@ 2012-08-13 10:12 ` Michael S. Tsirkin
2012-08-13 10:16 ` Gleb Natapov
0 siblings, 1 reply; 34+ messages in thread
From: Michael S. Tsirkin @ 2012-08-13 10:12 UTC (permalink / raw)
To: Avi Kivity; +Cc: Gleb Natapov, kvm, Jan Kiszka
On Mon, Aug 13, 2012 at 12:36:41PM +0300, Avi Kivity wrote:
> On 08/13/2012 12:16 PM, Gleb Natapov wrote:
> > Here is a quick prototype of what we discussed yesterday. This one
> > caches only MSI interrupts for now. The obvious problem is that not
> > all interrupts (namely IPIs and MSIs using KVM_CAP_SIGNAL_MSI) use irq
> > routing table, so they cannot be cached.
>
> We can have a small rcu-managed hash table to look those up.
Yes but how small? We probably need at least one entry
per vcpu, no?
> --
> error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [RFC PATCH 0/2] irq destination caching prototype
2012-08-13 10:12 ` Michael S. Tsirkin
@ 2012-08-13 10:16 ` Gleb Natapov
2012-08-13 10:21 ` Avi Kivity
0 siblings, 1 reply; 34+ messages in thread
From: Gleb Natapov @ 2012-08-13 10:16 UTC (permalink / raw)
To: Michael S. Tsirkin; +Cc: Avi Kivity, kvm, Jan Kiszka
On Mon, Aug 13, 2012 at 01:12:46PM +0300, Michael S. Tsirkin wrote:
> On Mon, Aug 13, 2012 at 12:36:41PM +0300, Avi Kivity wrote:
> > On 08/13/2012 12:16 PM, Gleb Natapov wrote:
> > > Here is a quick prototype of what we discussed yesterday. This one
> > > caches only MSI interrupts for now. The obvious problem is that not
> > > all interrupts (namely IPIs and MSIs using KVM_CAP_SIGNAL_MSI) use irq
> > > routing table, so they cannot be cached.
> >
> > We can have a small rcu-managed hash table to look those up.
>
> Yes but how small? We probably need at least one entry
> per vcpu, no?
>
One entry? We will spend more time managing it than injecting interrupts
:) ideally we need entry for each IPI sent and for each potential MSI
from userspace. What happens when hash table is full? We stop caching or
invalidate old entries? If later then cache can go valid->invalid which
may complicate the code.
--
Gleb.
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [RFC PATCH 0/2] irq destination caching prototype
2012-08-13 10:16 ` Gleb Natapov
@ 2012-08-13 10:21 ` Avi Kivity
2012-08-13 10:24 ` Gleb Natapov
0 siblings, 1 reply; 34+ messages in thread
From: Avi Kivity @ 2012-08-13 10:21 UTC (permalink / raw)
To: Gleb Natapov; +Cc: Michael S. Tsirkin, kvm, Jan Kiszka
On 08/13/2012 01:16 PM, Gleb Natapov wrote:
> On Mon, Aug 13, 2012 at 01:12:46PM +0300, Michael S. Tsirkin wrote:
>> On Mon, Aug 13, 2012 at 12:36:41PM +0300, Avi Kivity wrote:
>> > On 08/13/2012 12:16 PM, Gleb Natapov wrote:
>> > > Here is a quick prototype of what we discussed yesterday. This one
>> > > caches only MSI interrupts for now. The obvious problem is that not
>> > > all interrupts (namely IPIs and MSIs using KVM_CAP_SIGNAL_MSI) use irq
>> > > routing table, so they cannot be cached.
>> >
>> > We can have a small rcu-managed hash table to look those up.
>>
>> Yes but how small? We probably need at least one entry
>> per vcpu, no?
>>
> One entry? We will spend more time managing it than injecting interrupts
> :) ideally we need entry for each IPI sent and for each potential MSI
> from userspace. What happens when hash table is full?
Drop the entire cache.
> We stop caching or
> invalidate old entries? If later then cache can go valid->invalid which
> may complicate the code.
>
We can drop the entire cache via rcu freeing. In fact we can have a
closed hash allocated as a single blob, easy to manage.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [RFC PATCH 0/2] irq destination caching prototype
2012-08-13 10:21 ` Avi Kivity
@ 2012-08-13 10:24 ` Gleb Natapov
2012-08-13 10:31 ` Avi Kivity
0 siblings, 1 reply; 34+ messages in thread
From: Gleb Natapov @ 2012-08-13 10:24 UTC (permalink / raw)
To: Avi Kivity; +Cc: Michael S. Tsirkin, kvm, Jan Kiszka
On Mon, Aug 13, 2012 at 01:21:33PM +0300, Avi Kivity wrote:
> On 08/13/2012 01:16 PM, Gleb Natapov wrote:
> > On Mon, Aug 13, 2012 at 01:12:46PM +0300, Michael S. Tsirkin wrote:
> >> On Mon, Aug 13, 2012 at 12:36:41PM +0300, Avi Kivity wrote:
> >> > On 08/13/2012 12:16 PM, Gleb Natapov wrote:
> >> > > Here is a quick prototype of what we discussed yesterday. This one
> >> > > caches only MSI interrupts for now. The obvious problem is that not
> >> > > all interrupts (namely IPIs and MSIs using KVM_CAP_SIGNAL_MSI) use irq
> >> > > routing table, so they cannot be cached.
> >> >
> >> > We can have a small rcu-managed hash table to look those up.
> >>
> >> Yes but how small? We probably need at least one entry
> >> per vcpu, no?
> >>
> > One entry? We will spend more time managing it than injecting interrupts
> > :) ideally we need entry for each IPI sent and for each potential MSI
> > from userspace. What happens when hash table is full?
>
> Drop the entire cache.
>
OK. Then it should be big enough to not do it frequently.
> > We stop caching or
> > invalidate old entries? If later then cache can go valid->invalid which
> > may complicate the code.
> >
>
> We can drop the entire cache via rcu freeing. In fact we can have a
> closed hash allocated as a single blob, easy to manage.
>
That's what I am locking at doing, yes.
--
Gleb.
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [RFC PATCH 0/2] irq destination caching prototype
2012-08-13 10:24 ` Gleb Natapov
@ 2012-08-13 10:31 ` Avi Kivity
2012-08-13 10:35 ` Gleb Natapov
2012-08-13 10:38 ` Michael S. Tsirkin
0 siblings, 2 replies; 34+ messages in thread
From: Avi Kivity @ 2012-08-13 10:31 UTC (permalink / raw)
To: Gleb Natapov; +Cc: Michael S. Tsirkin, kvm, Jan Kiszka
On 08/13/2012 01:24 PM, Gleb Natapov wrote:
> On Mon, Aug 13, 2012 at 01:21:33PM +0300, Avi Kivity wrote:
>> On 08/13/2012 01:16 PM, Gleb Natapov wrote:
>> > On Mon, Aug 13, 2012 at 01:12:46PM +0300, Michael S. Tsirkin wrote:
>> >> On Mon, Aug 13, 2012 at 12:36:41PM +0300, Avi Kivity wrote:
>> >> > On 08/13/2012 12:16 PM, Gleb Natapov wrote:
>> >> > > Here is a quick prototype of what we discussed yesterday. This one
>> >> > > caches only MSI interrupts for now. The obvious problem is that not
>> >> > > all interrupts (namely IPIs and MSIs using KVM_CAP_SIGNAL_MSI) use irq
>> >> > > routing table, so they cannot be cached.
>> >> >
>> >> > We can have a small rcu-managed hash table to look those up.
>> >>
>> >> Yes but how small? We probably need at least one entry
>> >> per vcpu, no?
>> >>
>> > One entry? We will spend more time managing it than injecting interrupts
>> > :) ideally we need entry for each IPI sent and for each potential MSI
>> > from userspace. What happens when hash table is full?
>>
>> Drop the entire cache.
>>
> OK. Then it should be big enough to not do it frequently.
Should be sized N * vcpus, where N is several dozen (generous amount of
non-device vectors, though multicast will break it since it's
essentially random).
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [RFC PATCH 0/2] irq destination caching prototype
2012-08-13 9:43 ` Avi Kivity
2012-08-13 9:51 ` Michael S. Tsirkin
@ 2012-08-13 10:33 ` Gleb Natapov
1 sibling, 0 replies; 34+ messages in thread
From: Gleb Natapov @ 2012-08-13 10:33 UTC (permalink / raw)
To: Avi Kivity; +Cc: kvm, mst
On Mon, Aug 13, 2012 at 12:43:50PM +0300, Avi Kivity wrote:
> On 08/13/2012 12:16 PM, Gleb Natapov wrote:
> > Here is a quick prototype of what we discussed yesterday. This one
> > caches only MSI interrupts for now. The obvious problem is that not
> > all interrupts (namely IPIs and MSIs using KVM_CAP_SIGNAL_MSI) use irq
> > routing table, so they cannot be cached.
>
> Missing: switch the uncached path to a work queue, so we don't have to
> iterate over all vcpus in interrupt context.
>
> That isn't trivial; for edge-triggered interrupts we need to ignore
> zeros (if polarity=0) but for level-triggered interrupts we need them to
> override the previous setting. But we don't know the trigger mode and
> polarity at this point.
>
Looked at it and I think we have enough info about trigger mode and
polarity at the point where cache is checked, but we can't switch to
a work queue there because some callers want to get injection state and
this requires injection to be synchronous. Only high level caller knows
if work queue is OK or not.
--
Gleb.
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [RFC PATCH 0/2] irq destination caching prototype
2012-08-13 10:31 ` Avi Kivity
@ 2012-08-13 10:35 ` Gleb Natapov
2012-08-13 10:38 ` Michael S. Tsirkin
1 sibling, 0 replies; 34+ messages in thread
From: Gleb Natapov @ 2012-08-13 10:35 UTC (permalink / raw)
To: Avi Kivity; +Cc: Michael S. Tsirkin, kvm, Jan Kiszka
On Mon, Aug 13, 2012 at 01:31:36PM +0300, Avi Kivity wrote:
> On 08/13/2012 01:24 PM, Gleb Natapov wrote:
> > On Mon, Aug 13, 2012 at 01:21:33PM +0300, Avi Kivity wrote:
> >> On 08/13/2012 01:16 PM, Gleb Natapov wrote:
> >> > On Mon, Aug 13, 2012 at 01:12:46PM +0300, Michael S. Tsirkin wrote:
> >> >> On Mon, Aug 13, 2012 at 12:36:41PM +0300, Avi Kivity wrote:
> >> >> > On 08/13/2012 12:16 PM, Gleb Natapov wrote:
> >> >> > > Here is a quick prototype of what we discussed yesterday. This one
> >> >> > > caches only MSI interrupts for now. The obvious problem is that not
> >> >> > > all interrupts (namely IPIs and MSIs using KVM_CAP_SIGNAL_MSI) use irq
> >> >> > > routing table, so they cannot be cached.
> >> >> >
> >> >> > We can have a small rcu-managed hash table to look those up.
> >> >>
> >> >> Yes but how small? We probably need at least one entry
> >> >> per vcpu, no?
> >> >>
> >> > One entry? We will spend more time managing it than injecting interrupts
> >> > :) ideally we need entry for each IPI sent and for each potential MSI
> >> > from userspace. What happens when hash table is full?
> >>
> >> Drop the entire cache.
> >>
> > OK. Then it should be big enough to not do it frequently.
>
> Should be sized N * vcpus, where N is several dozen (generous amount of
> non-device vectors, though multicast will break it since it's
> essentially random).
>
We can even grow it at runtime if it fills out frequently.
--
Gleb.
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [RFC PATCH 0/2] irq destination caching prototype
2012-08-13 10:31 ` Avi Kivity
2012-08-13 10:35 ` Gleb Natapov
@ 2012-08-13 10:38 ` Michael S. Tsirkin
2012-08-13 10:58 ` Avi Kivity
1 sibling, 1 reply; 34+ messages in thread
From: Michael S. Tsirkin @ 2012-08-13 10:38 UTC (permalink / raw)
To: Avi Kivity; +Cc: Gleb Natapov, kvm, Jan Kiszka
On Mon, Aug 13, 2012 at 01:31:36PM +0300, Avi Kivity wrote:
> On 08/13/2012 01:24 PM, Gleb Natapov wrote:
> > On Mon, Aug 13, 2012 at 01:21:33PM +0300, Avi Kivity wrote:
> >> On 08/13/2012 01:16 PM, Gleb Natapov wrote:
> >> > On Mon, Aug 13, 2012 at 01:12:46PM +0300, Michael S. Tsirkin wrote:
> >> >> On Mon, Aug 13, 2012 at 12:36:41PM +0300, Avi Kivity wrote:
> >> >> > On 08/13/2012 12:16 PM, Gleb Natapov wrote:
> >> >> > > Here is a quick prototype of what we discussed yesterday. This one
> >> >> > > caches only MSI interrupts for now. The obvious problem is that not
> >> >> > > all interrupts (namely IPIs and MSIs using KVM_CAP_SIGNAL_MSI) use irq
> >> >> > > routing table, so they cannot be cached.
> >> >> >
> >> >> > We can have a small rcu-managed hash table to look those up.
> >> >>
> >> >> Yes but how small? We probably need at least one entry
> >> >> per vcpu, no?
> >> >>
> >> > One entry? We will spend more time managing it than injecting interrupts
> >> > :) ideally we need entry for each IPI sent and for each potential MSI
> >> > from userspace. What happens when hash table is full?
> >>
> >> Drop the entire cache.
> >>
> > OK. Then it should be big enough to not do it frequently.
>
> Should be sized N * vcpus, where N is several dozen (generous amount of
> non-device vectors, though multicast will break it since it's
> essentially random).
KVM_MAX_VCPUS is 256 multiply by what? 50? this is 10K already.
You can not allocate that much in a single chunk, right?
>
>
> --
> error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [RFC PATCH 0/2] irq destination caching prototype
2012-08-13 10:38 ` Michael S. Tsirkin
@ 2012-08-13 10:58 ` Avi Kivity
2012-08-13 11:01 ` Gleb Natapov
0 siblings, 1 reply; 34+ messages in thread
From: Avi Kivity @ 2012-08-13 10:58 UTC (permalink / raw)
To: Michael S. Tsirkin; +Cc: Gleb Natapov, kvm, Jan Kiszka
On 08/13/2012 01:38 PM, Michael S. Tsirkin wrote:
> On Mon, Aug 13, 2012 at 01:31:36PM +0300, Avi Kivity wrote:
>> On 08/13/2012 01:24 PM, Gleb Natapov wrote:
>> > On Mon, Aug 13, 2012 at 01:21:33PM +0300, Avi Kivity wrote:
>> >> On 08/13/2012 01:16 PM, Gleb Natapov wrote:
>> >> > On Mon, Aug 13, 2012 at 01:12:46PM +0300, Michael S. Tsirkin wrote:
>> >> >> On Mon, Aug 13, 2012 at 12:36:41PM +0300, Avi Kivity wrote:
>> >> >> > On 08/13/2012 12:16 PM, Gleb Natapov wrote:
>> >> >> > > Here is a quick prototype of what we discussed yesterday. This one
>> >> >> > > caches only MSI interrupts for now. The obvious problem is that not
>> >> >> > > all interrupts (namely IPIs and MSIs using KVM_CAP_SIGNAL_MSI) use irq
>> >> >> > > routing table, so they cannot be cached.
>> >> >> >
>> >> >> > We can have a small rcu-managed hash table to look those up.
>> >> >>
>> >> >> Yes but how small? We probably need at least one entry
>> >> >> per vcpu, no?
>> >> >>
>> >> > One entry? We will spend more time managing it than injecting interrupts
>> >> > :) ideally we need entry for each IPI sent and for each potential MSI
>> >> > from userspace. What happens when hash table is full?
>> >>
>> >> Drop the entire cache.
>> >>
>> > OK. Then it should be big enough to not do it frequently.
>>
>> Should be sized N * vcpus, where N is several dozen (generous amount of
>> non-device vectors, though multicast will break it since it's
>> essentially random).
>
> KVM_MAX_VCPUS is 256 multiply by what? 50? this is 10K already.
> You can not allocate that much in a single chunk, right?
Actually this is overkill. Suppose we do an apicid->vcpu translation
cache? Then we retain O(1) behaviour, no need for a huge cache.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [RFC PATCH 0/2] irq destination caching prototype
2012-08-13 10:58 ` Avi Kivity
@ 2012-08-13 11:01 ` Gleb Natapov
2012-08-13 11:03 ` Avi Kivity
0 siblings, 1 reply; 34+ messages in thread
From: Gleb Natapov @ 2012-08-13 11:01 UTC (permalink / raw)
To: Avi Kivity; +Cc: Michael S. Tsirkin, kvm, Jan Kiszka
On Mon, Aug 13, 2012 at 01:58:21PM +0300, Avi Kivity wrote:
> On 08/13/2012 01:38 PM, Michael S. Tsirkin wrote:
> > On Mon, Aug 13, 2012 at 01:31:36PM +0300, Avi Kivity wrote:
> >> On 08/13/2012 01:24 PM, Gleb Natapov wrote:
> >> > On Mon, Aug 13, 2012 at 01:21:33PM +0300, Avi Kivity wrote:
> >> >> On 08/13/2012 01:16 PM, Gleb Natapov wrote:
> >> >> > On Mon, Aug 13, 2012 at 01:12:46PM +0300, Michael S. Tsirkin wrote:
> >> >> >> On Mon, Aug 13, 2012 at 12:36:41PM +0300, Avi Kivity wrote:
> >> >> >> > On 08/13/2012 12:16 PM, Gleb Natapov wrote:
> >> >> >> > > Here is a quick prototype of what we discussed yesterday. This one
> >> >> >> > > caches only MSI interrupts for now. The obvious problem is that not
> >> >> >> > > all interrupts (namely IPIs and MSIs using KVM_CAP_SIGNAL_MSI) use irq
> >> >> >> > > routing table, so they cannot be cached.
> >> >> >> >
> >> >> >> > We can have a small rcu-managed hash table to look those up.
> >> >> >>
> >> >> >> Yes but how small? We probably need at least one entry
> >> >> >> per vcpu, no?
> >> >> >>
> >> >> > One entry? We will spend more time managing it than injecting interrupts
> >> >> > :) ideally we need entry for each IPI sent and for each potential MSI
> >> >> > from userspace. What happens when hash table is full?
> >> >>
> >> >> Drop the entire cache.
> >> >>
> >> > OK. Then it should be big enough to not do it frequently.
> >>
> >> Should be sized N * vcpus, where N is several dozen (generous amount of
> >> non-device vectors, though multicast will break it since it's
> >> essentially random).
> >
> > KVM_MAX_VCPUS is 256 multiply by what? 50? this is 10K already.
> > You can not allocate that much in a single chunk, right?
>
> Actually this is overkill. Suppose we do an apicid->vcpu translation
> cache? Then we retain O(1) behaviour, no need for a huge cache.
>
Not sure I follow.
--
Gleb.
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [RFC PATCH 0/2] irq destination caching prototype
2012-08-13 11:01 ` Gleb Natapov
@ 2012-08-13 11:03 ` Avi Kivity
2012-08-13 11:12 ` Gleb Natapov
2012-08-13 11:19 ` Michael S. Tsirkin
0 siblings, 2 replies; 34+ messages in thread
From: Avi Kivity @ 2012-08-13 11:03 UTC (permalink / raw)
To: Gleb Natapov; +Cc: Michael S. Tsirkin, kvm, Jan Kiszka
On 08/13/2012 02:01 PM, Gleb Natapov wrote:
>>
>> Actually this is overkill. Suppose we do an apicid->vcpu translation
>> cache? Then we retain O(1) behaviour, no need for a huge cache.
>>
> Not sure I follow.
Unicast MSIs and IPIs can be speeded up by looking up the vcpu using the
apic id, using a static lookup table (only changed when the guest
updates apicid or a vcpu is inserted).
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [RFC PATCH 0/2] irq destination caching prototype
2012-08-13 11:03 ` Avi Kivity
@ 2012-08-13 11:12 ` Gleb Natapov
2012-08-13 11:22 ` Michael S. Tsirkin
2012-08-13 11:30 ` Avi Kivity
2012-08-13 11:19 ` Michael S. Tsirkin
1 sibling, 2 replies; 34+ messages in thread
From: Gleb Natapov @ 2012-08-13 11:12 UTC (permalink / raw)
To: Avi Kivity; +Cc: Michael S. Tsirkin, kvm, Jan Kiszka
On Mon, Aug 13, 2012 at 02:03:51PM +0300, Avi Kivity wrote:
> On 08/13/2012 02:01 PM, Gleb Natapov wrote:
> >>
> >> Actually this is overkill. Suppose we do an apicid->vcpu translation
> >> cache? Then we retain O(1) behaviour, no need for a huge cache.
> >>
> > Not sure I follow.
>
> Unicast MSIs and IPIs can be speeded up by looking up the vcpu using the
> apic id, using a static lookup table (only changed when the guest
> updates apicid or a vcpu is inserted).
>
To check that MSI/IPI is unicast you need to check a lot of things: delivery
mode, shorthand, dest mode, vector. In short everything but level. This
is exactly what kvm_irq_delivery_to_apic() is doing. Caching apicid->vcpu
is not enough, caching (delivery mode, shorthand, dest mode,
vector)->vcpu is enough and this is exactly what the patch does for irq
routing entries.
--
Gleb.
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [RFC PATCH 0/2] irq destination caching prototype
2012-08-13 11:03 ` Avi Kivity
2012-08-13 11:12 ` Gleb Natapov
@ 2012-08-13 11:19 ` Michael S. Tsirkin
1 sibling, 0 replies; 34+ messages in thread
From: Michael S. Tsirkin @ 2012-08-13 11:19 UTC (permalink / raw)
To: Avi Kivity; +Cc: Gleb Natapov, kvm, Jan Kiszka
On Mon, Aug 13, 2012 at 02:03:51PM +0300, Avi Kivity wrote:
> On 08/13/2012 02:01 PM, Gleb Natapov wrote:
> >>
> >> Actually this is overkill. Suppose we do an apicid->vcpu translation
> >> cache? Then we retain O(1) behaviour, no need for a huge cache.
> >>
> > Not sure I follow.
>
> Unicast MSIs and IPIs can be speeded up by looking up the vcpu using the
> apic id, using a static lookup table (only changed when the guest
> updates apicid or a vcpu is inserted).
Looks like kvm_apic_id is always 8 bit, so it's just a
table with 256 entries?
>
>
> --
> error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [RFC PATCH 0/2] irq destination caching prototype
2012-08-13 11:12 ` Gleb Natapov
@ 2012-08-13 11:22 ` Michael S. Tsirkin
2012-08-13 11:29 ` Gleb Natapov
2012-08-13 11:30 ` Avi Kivity
1 sibling, 1 reply; 34+ messages in thread
From: Michael S. Tsirkin @ 2012-08-13 11:22 UTC (permalink / raw)
To: Gleb Natapov; +Cc: Avi Kivity, kvm, Jan Kiszka
On Mon, Aug 13, 2012 at 02:12:41PM +0300, Gleb Natapov wrote:
> On Mon, Aug 13, 2012 at 02:03:51PM +0300, Avi Kivity wrote:
> > On 08/13/2012 02:01 PM, Gleb Natapov wrote:
> > >>
> > >> Actually this is overkill. Suppose we do an apicid->vcpu translation
> > >> cache? Then we retain O(1) behaviour, no need for a huge cache.
> > >>
> > > Not sure I follow.
> >
> > Unicast MSIs and IPIs can be speeded up by looking up the vcpu using the
> > apic id, using a static lookup table (only changed when the guest
> > updates apicid or a vcpu is inserted).
> >
> To check that MSI/IPI is unicast you need to check a lot of things: delivery
> mode, shorthand, dest mode, vector. In short everything but level. This
> is exactly what kvm_irq_delivery_to_apic() is doing. Caching apicid->vcpu
> is not enough, caching (delivery mode, shorthand, dest mode,
> vector)->vcpu is enough and this is exactly what the patch does for irq
> routing entries.
At least for MSI I think it is simple. Here's the relevant code from
my old patch:
+static bool kvm_msi_is_multicast(unsigned dest, int dest_mode)
+{
+ if (dest_mode == 0)
+ /* Physical mode. */
+ return dest == 0xff;
+ else
+ /* Logical mode. */
+ return dest & (dest - 1);
+}
> --
> Gleb.
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [RFC PATCH 0/2] irq destination caching prototype
2012-08-13 11:22 ` Michael S. Tsirkin
@ 2012-08-13 11:29 ` Gleb Natapov
2012-08-13 11:43 ` Gleb Natapov
0 siblings, 1 reply; 34+ messages in thread
From: Gleb Natapov @ 2012-08-13 11:29 UTC (permalink / raw)
To: Michael S. Tsirkin; +Cc: Avi Kivity, kvm, Jan Kiszka
On Mon, Aug 13, 2012 at 02:22:14PM +0300, Michael S. Tsirkin wrote:
> On Mon, Aug 13, 2012 at 02:12:41PM +0300, Gleb Natapov wrote:
> > On Mon, Aug 13, 2012 at 02:03:51PM +0300, Avi Kivity wrote:
> > > On 08/13/2012 02:01 PM, Gleb Natapov wrote:
> > > >>
> > > >> Actually this is overkill. Suppose we do an apicid->vcpu translation
> > > >> cache? Then we retain O(1) behaviour, no need for a huge cache.
> > > >>
> > > > Not sure I follow.
> > >
> > > Unicast MSIs and IPIs can be speeded up by looking up the vcpu using the
> > > apic id, using a static lookup table (only changed when the guest
> > > updates apicid or a vcpu is inserted).
> > >
> > To check that MSI/IPI is unicast you need to check a lot of things: delivery
> > mode, shorthand, dest mode, vector. In short everything but level. This
> > is exactly what kvm_irq_delivery_to_apic() is doing. Caching apicid->vcpu
> > is not enough, caching (delivery mode, shorthand, dest mode,
> > vector)->vcpu is enough and this is exactly what the patch does for irq
> > routing entries.
>
> At least for MSI I think it is simple. Here's the relevant code from
> my old patch:
>
> +static bool kvm_msi_is_multicast(unsigned dest, int dest_mode)
> +{
> + if (dest_mode == 0)
> + /* Physical mode. */
> + return dest == 0xff;
> + else
> + /* Logical mode. */
> + return dest & (dest - 1);
> +}
>
MSI does not have shorthand, so it is simpler but the code above does
work for APIC_DFR_CLUSTER as far as I can tell and it does not check
lowest prio, which is not multicast, but should bot be cached.
--
Gleb.
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [RFC PATCH 0/2] irq destination caching prototype
2012-08-13 11:12 ` Gleb Natapov
2012-08-13 11:22 ` Michael S. Tsirkin
@ 2012-08-13 11:30 ` Avi Kivity
2012-08-13 11:41 ` Gleb Natapov
1 sibling, 1 reply; 34+ messages in thread
From: Avi Kivity @ 2012-08-13 11:30 UTC (permalink / raw)
To: Gleb Natapov; +Cc: Michael S. Tsirkin, kvm, Jan Kiszka
On 08/13/2012 02:12 PM, Gleb Natapov wrote:
> On Mon, Aug 13, 2012 at 02:03:51PM +0300, Avi Kivity wrote:
>> On 08/13/2012 02:01 PM, Gleb Natapov wrote:
>> >>
>> >> Actually this is overkill. Suppose we do an apicid->vcpu translation
>> >> cache? Then we retain O(1) behaviour, no need for a huge cache.
>> >>
>> > Not sure I follow.
>>
>> Unicast MSIs and IPIs can be speeded up by looking up the vcpu using the
>> apic id, using a static lookup table (only changed when the guest
>> updates apicid or a vcpu is inserted).
>>
> To check that MSI/IPI is unicast you need to check a lot of things: delivery
> mode, shorthand, dest mode, vector. In short everything but level. This
> is exactly what kvm_irq_delivery_to_apic() is doing. Caching apicid->vcpu
> is not enough, caching (delivery mode, shorthand, dest mode,
> vector)->vcpu is enough and this is exactly what the patch does for irq
> routing entries.
apicid is checked in a loop, the others aren't. apicid is
unpredicatable; the others are.
I think we should use apicid loopup exclusively. It doesn't accelerate
everything, but most things, and is common to all unicast interrupts
except PIC (and we can also precompute the target vcpu for PIC, too).
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [RFC PATCH 0/2] irq destination caching prototype
2012-08-13 11:30 ` Avi Kivity
@ 2012-08-13 11:41 ` Gleb Natapov
2012-08-13 12:13 ` Avi Kivity
2012-08-13 12:59 ` Michael S. Tsirkin
0 siblings, 2 replies; 34+ messages in thread
From: Gleb Natapov @ 2012-08-13 11:41 UTC (permalink / raw)
To: Avi Kivity; +Cc: Michael S. Tsirkin, kvm, Jan Kiszka
On Mon, Aug 13, 2012 at 02:30:49PM +0300, Avi Kivity wrote:
> On 08/13/2012 02:12 PM, Gleb Natapov wrote:
> > On Mon, Aug 13, 2012 at 02:03:51PM +0300, Avi Kivity wrote:
> >> On 08/13/2012 02:01 PM, Gleb Natapov wrote:
> >> >>
> >> >> Actually this is overkill. Suppose we do an apicid->vcpu translation
> >> >> cache? Then we retain O(1) behaviour, no need for a huge cache.
> >> >>
> >> > Not sure I follow.
> >>
> >> Unicast MSIs and IPIs can be speeded up by looking up the vcpu using the
> >> apic id, using a static lookup table (only changed when the guest
> >> updates apicid or a vcpu is inserted).
> >>
> > To check that MSI/IPI is unicast you need to check a lot of things: delivery
> > mode, shorthand, dest mode, vector. In short everything but level. This
> > is exactly what kvm_irq_delivery_to_apic() is doing. Caching apicid->vcpu
> > is not enough, caching (delivery mode, shorthand, dest mode,
> > vector)->vcpu is enough and this is exactly what the patch does for irq
> > routing entries.
>
>
> apicid is checked in a loop, the others aren't.
Along with dest_id.
> apicid is
> unpredicatable; the others are.
What do you mean "unpredicatable"?
>
> I think we should use apicid loopup exclusively. It doesn't accelerate
> everything, but most things, and is common to all unicast interrupts
> except PIC (and we can also precompute the target vcpu for PIC, too).
>
We can change kvm_irq_delivery_to_apic() to avoid the loop if interrupt
is physical, non broadcast, non low prio. Do whatever it does now
otherwise. You think we do not need cache in such case?
--
Gleb.
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [RFC PATCH 0/2] irq destination caching prototype
2012-08-13 11:29 ` Gleb Natapov
@ 2012-08-13 11:43 ` Gleb Natapov
2012-08-13 12:14 ` Avi Kivity
0 siblings, 1 reply; 34+ messages in thread
From: Gleb Natapov @ 2012-08-13 11:43 UTC (permalink / raw)
To: Michael S. Tsirkin; +Cc: Avi Kivity, kvm, Jan Kiszka
On Mon, Aug 13, 2012 at 02:29:31PM +0300, Gleb Natapov wrote:
> On Mon, Aug 13, 2012 at 02:22:14PM +0300, Michael S. Tsirkin wrote:
> > On Mon, Aug 13, 2012 at 02:12:41PM +0300, Gleb Natapov wrote:
> > > On Mon, Aug 13, 2012 at 02:03:51PM +0300, Avi Kivity wrote:
> > > > On 08/13/2012 02:01 PM, Gleb Natapov wrote:
> > > > >>
> > > > >> Actually this is overkill. Suppose we do an apicid->vcpu translation
> > > > >> cache? Then we retain O(1) behaviour, no need for a huge cache.
> > > > >>
> > > > > Not sure I follow.
> > > >
> > > > Unicast MSIs and IPIs can be speeded up by looking up the vcpu using the
> > > > apic id, using a static lookup table (only changed when the guest
> > > > updates apicid or a vcpu is inserted).
> > > >
> > > To check that MSI/IPI is unicast you need to check a lot of things: delivery
> > > mode, shorthand, dest mode, vector. In short everything but level. This
> > > is exactly what kvm_irq_delivery_to_apic() is doing. Caching apicid->vcpu
> > > is not enough, caching (delivery mode, shorthand, dest mode,
> > > vector)->vcpu is enough and this is exactly what the patch does for irq
> > > routing entries.
> >
> > At least for MSI I think it is simple. Here's the relevant code from
> > my old patch:
> >
> > +static bool kvm_msi_is_multicast(unsigned dest, int dest_mode)
> > +{
> > + if (dest_mode == 0)
> > + /* Physical mode. */
> > + return dest == 0xff;
> > + else
> > + /* Logical mode. */
> > + return dest & (dest - 1);
> > +}
> >
> MSI does not have shorthand, so it is simpler but the code above does
> work for APIC_DFR_CLUSTER as far as I can tell and it does not check
> lowest prio, which is not multicast, but should bot be cached.
>
It also a little bit pessimistic for logical mode. Dest may have more
than one bit set, but be delivered to only one cpu.
--
Gleb.
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [RFC PATCH 0/2] irq destination caching prototype
2012-08-13 11:41 ` Gleb Natapov
@ 2012-08-13 12:13 ` Avi Kivity
2012-08-13 12:59 ` Michael S. Tsirkin
1 sibling, 0 replies; 34+ messages in thread
From: Avi Kivity @ 2012-08-13 12:13 UTC (permalink / raw)
To: Gleb Natapov; +Cc: Michael S. Tsirkin, kvm, Jan Kiszka
On 08/13/2012 02:41 PM, Gleb Natapov wrote:
> On Mon, Aug 13, 2012 at 02:30:49PM +0300, Avi Kivity wrote:
>> On 08/13/2012 02:12 PM, Gleb Natapov wrote:
>> > On Mon, Aug 13, 2012 at 02:03:51PM +0300, Avi Kivity wrote:
>> >> On 08/13/2012 02:01 PM, Gleb Natapov wrote:
>> >> >>
>> >> >> Actually this is overkill. Suppose we do an apicid->vcpu translation
>> >> >> cache? Then we retain O(1) behaviour, no need for a huge cache.
>> >> >>
>> >> > Not sure I follow.
>> >>
>> >> Unicast MSIs and IPIs can be speeded up by looking up the vcpu using the
>> >> apic id, using a static lookup table (only changed when the guest
>> >> updates apicid or a vcpu is inserted).
>> >>
>> > To check that MSI/IPI is unicast you need to check a lot of things: delivery
>> > mode, shorthand, dest mode, vector. In short everything but level. This
>> > is exactly what kvm_irq_delivery_to_apic() is doing. Caching apicid->vcpu
>> > is not enough, caching (delivery mode, shorthand, dest mode,
>> > vector)->vcpu is enough and this is exactly what the patch does for irq
>> > routing entries.
>>
>>
>> apicid is checked in a loop, the others aren't.
> Along with dest_id.
Right, that is converted to a lookup.
>
>> apicid is
>> unpredicatable; the others are.
> What do you mean "unpredicatable"?
In terms of branch prediction. We can't tell when the loop will
terminate. On the other hand most IPIs are likely to have the same
delivery mode/shorthand/dest mode.
(not entirely true, we can expect a mix of broadcast/unicast/multicast)
>
>>
>> I think we should use apicid loopup exclusively. It doesn't accelerate
>> everything, but most things, and is common to all unicast interrupts
>> except PIC (and we can also precompute the target vcpu for PIC, too).
>>
> We can change kvm_irq_delivery_to_apic() to avoid the loop if interrupt
> is physical, non broadcast, non low prio. Do whatever it does now
> otherwise. You think we do not need cache in such case?
We can also loop in logical, since the loop is limited to 16 (in x2apic
mode); it doesn't scale with the number of vcpus. We need a lookup
table of cluster id -> array of 16 vcpus.
Broadcast obviously must loop, no cache can help.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [RFC PATCH 0/2] irq destination caching prototype
2012-08-13 11:43 ` Gleb Natapov
@ 2012-08-13 12:14 ` Avi Kivity
0 siblings, 0 replies; 34+ messages in thread
From: Avi Kivity @ 2012-08-13 12:14 UTC (permalink / raw)
To: Gleb Natapov; +Cc: Michael S. Tsirkin, kvm, Jan Kiszka
On 08/13/2012 02:43 PM, Gleb Natapov wrote:
>> MSI does not have shorthand, so it is simpler but the code above does
>> work for APIC_DFR_CLUSTER as far as I can tell and it does not check
>> lowest prio, which is not multicast, but should bot be cached.
>>
> It also a little bit pessimistic for logical mode. Dest may have more
> than one bit set, but be delivered to only one cpu.
We can still loop, for_each_set_bit(). Even if all are set, it's
limited to 16.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [RFC PATCH 0/2] irq destination caching prototype
2012-08-13 11:41 ` Gleb Natapov
2012-08-13 12:13 ` Avi Kivity
@ 2012-08-13 12:59 ` Michael S. Tsirkin
1 sibling, 0 replies; 34+ messages in thread
From: Michael S. Tsirkin @ 2012-08-13 12:59 UTC (permalink / raw)
To: Gleb Natapov; +Cc: Avi Kivity, kvm, Jan Kiszka
On Mon, Aug 13, 2012 at 02:41:47PM +0300, Gleb Natapov wrote:
> On Mon, Aug 13, 2012 at 02:30:49PM +0300, Avi Kivity wrote:
> > On 08/13/2012 02:12 PM, Gleb Natapov wrote:
> > > On Mon, Aug 13, 2012 at 02:03:51PM +0300, Avi Kivity wrote:
> > >> On 08/13/2012 02:01 PM, Gleb Natapov wrote:
> > >> >>
> > >> >> Actually this is overkill. Suppose we do an apicid->vcpu translation
> > >> >> cache? Then we retain O(1) behaviour, no need for a huge cache.
> > >> >>
> > >> > Not sure I follow.
> > >>
> > >> Unicast MSIs and IPIs can be speeded up by looking up the vcpu using the
> > >> apic id, using a static lookup table (only changed when the guest
> > >> updates apicid or a vcpu is inserted).
> > >>
> > > To check that MSI/IPI is unicast you need to check a lot of things: delivery
> > > mode, shorthand, dest mode, vector. In short everything but level. This
> > > is exactly what kvm_irq_delivery_to_apic() is doing. Caching apicid->vcpu
> > > is not enough, caching (delivery mode, shorthand, dest mode,
> > > vector)->vcpu is enough and this is exactly what the patch does for irq
> > > routing entries.
> >
> >
> > apicid is checked in a loop, the others aren't.
> Along with dest_id.
>
> > apicid is
> > unpredicatable; the others are.
> What do you mean "unpredicatable"?
>
> >
> > I think we should use apicid loopup exclusively. It doesn't accelerate
> > everything, but most things, and is common to all unicast interrupts
> > except PIC (and we can also precompute the target vcpu for PIC, too).
> >
> We can change kvm_irq_delivery_to_apic() to avoid the loop if interrupt
> is physical,
logical is also not too hard. need two extra tables for
cluster/non cluster.
> non broadcast, non low prio. Do whatever it does now
> otherwise. You think we do not need cache in such case?
>
> --
> Gleb.
^ permalink raw reply [flat|nested] 34+ messages in thread
end of thread, other threads:[~2012-08-13 12:58 UTC | newest]
Thread overview: 34+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-08-13 9:16 [RFC PATCH 0/2] irq destination caching prototype Gleb Natapov
2012-08-13 9:16 ` [RFC PATCH 1/2] Call irq_rt callback under rcu_read_lock() Gleb Natapov
2012-08-13 9:16 ` [RFC PATCH 2/2] Cache msi irq destination Gleb Natapov
2012-08-13 9:32 ` Avi Kivity
2012-08-13 9:34 ` Gleb Natapov
2012-08-13 9:34 ` [RFC PATCH 0/2] irq destination caching prototype Michael S. Tsirkin
2012-08-13 9:36 ` Gleb Natapov
2012-08-13 9:46 ` Michael S. Tsirkin
2012-08-13 9:48 ` Gleb Natapov
2012-08-13 9:36 ` Avi Kivity
2012-08-13 10:12 ` Michael S. Tsirkin
2012-08-13 10:16 ` Gleb Natapov
2012-08-13 10:21 ` Avi Kivity
2012-08-13 10:24 ` Gleb Natapov
2012-08-13 10:31 ` Avi Kivity
2012-08-13 10:35 ` Gleb Natapov
2012-08-13 10:38 ` Michael S. Tsirkin
2012-08-13 10:58 ` Avi Kivity
2012-08-13 11:01 ` Gleb Natapov
2012-08-13 11:03 ` Avi Kivity
2012-08-13 11:12 ` Gleb Natapov
2012-08-13 11:22 ` Michael S. Tsirkin
2012-08-13 11:29 ` Gleb Natapov
2012-08-13 11:43 ` Gleb Natapov
2012-08-13 12:14 ` Avi Kivity
2012-08-13 11:30 ` Avi Kivity
2012-08-13 11:41 ` Gleb Natapov
2012-08-13 12:13 ` Avi Kivity
2012-08-13 12:59 ` Michael S. Tsirkin
2012-08-13 11:19 ` Michael S. Tsirkin
2012-08-13 9:43 ` Avi Kivity
2012-08-13 9:51 ` Michael S. Tsirkin
2012-08-13 9:53 ` Gleb Natapov
2012-08-13 10:33 ` Gleb Natapov
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).