From: Nicholas Piggin <npiggin@gmail.com>
To: kvm-ppc@vger.kernel.org, linuxppc-dev@lists.ozlabs.org
Cc: Nicholas Piggin <npiggin@gmail.com>
Subject: [PATCH v3 49/52] KVM: PPC: Book3S HV P9: Remove most of the vcore logic
Date: Tue, 5 Oct 2021 02:00:46 +1000 [thread overview]
Message-ID: <20211004160049.1338837-50-npiggin@gmail.com> (raw)
In-Reply-To: <20211004160049.1338837-1-npiggin@gmail.com>
The P9 path always uses one vcpu per vcore, so none of the vcore, locks,
stolen time, blocking logic, shared waitq, etc., is required.
Remove most of it.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
arch/powerpc/kvm/book3s_hv.c | 147 ++++++++++++++++++++---------------
1 file changed, 85 insertions(+), 62 deletions(-)
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 6574e8a3731e..d614f83c7b3f 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -276,6 +276,8 @@ static void kvmppc_core_start_stolen(struct kvmppc_vcore *vc, u64 tb)
{
unsigned long flags;
+ WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300));
+
spin_lock_irqsave(&vc->stoltb_lock, flags);
vc->preempt_tb = tb;
spin_unlock_irqrestore(&vc->stoltb_lock, flags);
@@ -285,6 +287,8 @@ static void kvmppc_core_end_stolen(struct kvmppc_vcore *vc, u64 tb)
{
unsigned long flags;
+ WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300));
+
spin_lock_irqsave(&vc->stoltb_lock, flags);
if (vc->preempt_tb != TB_NIL) {
vc->stolen_tb += tb - vc->preempt_tb;
@@ -297,7 +301,12 @@ static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
{
struct kvmppc_vcore *vc = vcpu->arch.vcore;
unsigned long flags;
- u64 now = mftb();
+ u64 now;
+
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ return;
+
+ now = mftb();
/*
* We can test vc->runner without taking the vcore lock,
@@ -321,7 +330,12 @@ static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcore *vc = vcpu->arch.vcore;
unsigned long flags;
- u64 now = mftb();
+ u64 now;
+
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ return;
+
+ now = mftb();
if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING)
kvmppc_core_start_stolen(vc, now);
@@ -673,6 +687,8 @@ static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now)
u64 p;
unsigned long flags;
+ WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300));
+
spin_lock_irqsave(&vc->stoltb_lock, flags);
p = vc->stolen_tb;
if (vc->vcore_state != VCORE_INACTIVE &&
@@ -695,13 +711,19 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
dt = vcpu->arch.dtl_ptr;
vpa = vcpu->arch.vpa.pinned_addr;
now = tb;
- core_stolen = vcore_stolen_time(vc, now);
- stolen = core_stolen - vcpu->arch.stolen_logged;
- vcpu->arch.stolen_logged = core_stolen;
- spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
- stolen += vcpu->arch.busy_stolen;
- vcpu->arch.busy_stolen = 0;
- spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
+
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ stolen = 0;
+ } else {
+ core_stolen = vcore_stolen_time(vc, now);
+ stolen = core_stolen - vcpu->arch.stolen_logged;
+ vcpu->arch.stolen_logged = core_stolen;
+ spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
+ stolen += vcpu->arch.busy_stolen;
+ vcpu->arch.busy_stolen = 0;
+ spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
+ }
+
if (!dt || !vpa)
return;
memset(dt, 0, sizeof(struct dtl_entry));
@@ -898,13 +920,14 @@ static int kvm_arch_vcpu_yield_to(struct kvm_vcpu *target)
* mode handler is not called but no other threads are in the
* source vcore.
*/
-
- spin_lock(&vcore->lock);
- if (target->arch.state == KVMPPC_VCPU_RUNNABLE &&
- vcore->vcore_state != VCORE_INACTIVE &&
- vcore->runner)
- target = vcore->runner;
- spin_unlock(&vcore->lock);
+ if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
+ spin_lock(&vcore->lock);
+ if (target->arch.state == KVMPPC_VCPU_RUNNABLE &&
+ vcore->vcore_state != VCORE_INACTIVE &&
+ vcore->runner)
+ target = vcore->runner;
+ spin_unlock(&vcore->lock);
+ }
return kvm_vcpu_yield_to(target);
}
@@ -3125,13 +3148,6 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc)
kvmppc_ipi_thread(cpu);
}
-/* Old path does this in asm */
-static void kvmppc_stop_thread(struct kvm_vcpu *vcpu)
-{
- vcpu->cpu = -1;
- vcpu->arch.thread_cpu = -1;
-}
-
static void kvmppc_wait_for_nap(int n_threads)
{
int cpu = smp_processor_id();
@@ -3220,6 +3236,8 @@ static void kvmppc_vcore_preempt(struct kvmppc_vcore *vc)
{
struct preempted_vcore_list *lp = this_cpu_ptr(&preempted_vcores);
+ WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300));
+
vc->vcore_state = VCORE_PREEMPT;
vc->pcpu = smp_processor_id();
if (vc->num_threads < threads_per_vcore(vc->kvm)) {
@@ -3236,6 +3254,8 @@ static void kvmppc_vcore_end_preempt(struct kvmppc_vcore *vc)
{
struct preempted_vcore_list *lp;
+ WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300));
+
kvmppc_core_end_stolen(vc, mftb());
if (!list_empty(&vc->preempt_list)) {
lp = &per_cpu(preempted_vcores, vc->pcpu);
@@ -3964,7 +3984,6 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns
static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
unsigned long lpcr, u64 *tb)
{
- struct kvmppc_vcore *vc = vcpu->arch.vcore;
u64 next_timer;
int trap;
@@ -3980,9 +3999,6 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
kvmppc_subcore_enter_guest();
- vc->entry_exit_map = 1;
- vc->in_guest = 1;
-
vcpu_vpa_increment_dispatch(vcpu);
if (kvmhv_on_pseries()) {
@@ -4035,9 +4051,6 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
vcpu_vpa_increment_dispatch(vcpu);
- vc->entry_exit_map = 0x101;
- vc->in_guest = 0;
-
kvmppc_subcore_exit_guest();
return trap;
@@ -4103,6 +4116,13 @@ static bool kvmppc_vcpu_woken(struct kvm_vcpu *vcpu)
return false;
}
+static bool kvmppc_vcpu_check_block(struct kvm_vcpu *vcpu)
+{
+ if (!vcpu->arch.ceded || kvmppc_vcpu_woken(vcpu))
+ return true;
+ return false;
+}
+
/*
* Check to see if any of the runnable vcpus on the vcore have pending
* exceptions or are no longer ceded
@@ -4113,7 +4133,7 @@ static int kvmppc_vcore_check_block(struct kvmppc_vcore *vc)
int i;
for_each_runnable_thread(i, vcpu, vc) {
- if (!vcpu->arch.ceded || kvmppc_vcpu_woken(vcpu))
+ if (kvmppc_vcpu_check_block(vcpu))
return 1;
}
@@ -4130,6 +4150,8 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
int do_sleep = 1;
u64 block_ns;
+ WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300));
+
/* Poll for pending exceptions and ceded state */
cur = start_poll = ktime_get();
if (vc->halt_poll_ns) {
@@ -4407,11 +4429,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
vcpu->arch.ceded = 0;
vcpu->arch.run_task = current;
vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
- vcpu->arch.busy_preempt = TB_NIL;
vcpu->arch.last_inst = KVM_INST_FETCH_FAILED;
- vc->runnable_threads[0] = vcpu;
- vc->n_runnable = 1;
- vc->runner = vcpu;
/* See if the MMU is ready to go */
if (unlikely(!kvm->arch.mmu_ready)) {
@@ -4429,11 +4447,8 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
kvmppc_update_vpas(vcpu);
- init_vcore_to_run(vc);
-
preempt_disable();
pcpu = smp_processor_id();
- vc->pcpu = pcpu;
if (kvm_is_radix(kvm))
kvmppc_prepare_radix_vcpu(vcpu, pcpu);
@@ -4462,21 +4477,23 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
goto out;
}
- tb = mftb();
+ if (vcpu->arch.timer_running) {
+ hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
+ vcpu->arch.timer_running = 0;
+ }
- vcpu->arch.stolen_logged = vcore_stolen_time(vc, tb);
- vc->preempt_tb = TB_NIL;
+ tb = mftb();
- kvmppc_clear_host_core(pcpu);
+ vcpu->cpu = pcpu;
+ vcpu->arch.thread_cpu = pcpu;
+ local_paca->kvm_hstate.kvm_vcpu = vcpu;
+ local_paca->kvm_hstate.ptid = 0;
+ local_paca->kvm_hstate.fake_suspend = 0;
- local_paca->kvm_hstate.napping = 0;
- local_paca->kvm_hstate.kvm_split_mode = NULL;
- kvmppc_start_thread(vcpu, vc);
+ vc->pcpu = pcpu; // for kvmppc_create_dtl_entry
kvmppc_create_dtl_entry(vcpu, vc, tb);
- trace_kvm_guest_enter(vcpu);
- vc->vcore_state = VCORE_RUNNING;
- trace_kvmppc_run_core(vc, 0);
+ trace_kvm_guest_enter(vcpu);
guest_enter_irqoff();
@@ -4498,11 +4515,10 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
set_irq_happened(trap);
- kvmppc_set_host_core(pcpu);
-
guest_exit_irqoff();
- kvmppc_stop_thread(vcpu);
+ vcpu->cpu = -1;
+ vcpu->arch.thread_cpu = -1;
powerpc_local_irq_pmu_restore(flags);
@@ -4529,28 +4545,31 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
}
vcpu->arch.ret = r;
- if (is_kvmppc_resume_guest(r) && vcpu->arch.ceded &&
- !kvmppc_vcpu_woken(vcpu)) {
+ if (is_kvmppc_resume_guest(r) && !kvmppc_vcpu_check_block(vcpu)) {
kvmppc_set_timer(vcpu);
- while (vcpu->arch.ceded && !kvmppc_vcpu_woken(vcpu)) {
+
+ prepare_to_rcuwait(&vcpu->wait);
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
if (signal_pending(current)) {
vcpu->stat.signal_exits++;
run->exit_reason = KVM_EXIT_INTR;
vcpu->arch.ret = -EINTR;
break;
}
- spin_lock(&vc->lock);
- kvmppc_vcore_blocked(vc);
- spin_unlock(&vc->lock);
+
+ if (kvmppc_vcpu_check_block(vcpu))
+ break;
+
+ trace_kvmppc_vcore_blocked(vc, 0);
+ schedule();
+ trace_kvmppc_vcore_blocked(vc, 1);
}
+ finish_rcuwait(&vcpu->wait);
}
vcpu->arch.ceded = 0;
- vc->vcore_state = VCORE_INACTIVE;
- trace_kvmppc_run_core(vc, 1);
-
done:
- kvmppc_remove_runnable(vc, vcpu, tb);
trace_kvmppc_run_vcpu_exit(vcpu);
return vcpu->arch.ret;
@@ -4632,7 +4651,8 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
kvmppc_save_current_sprs();
- vcpu->arch.waitp = &vcpu->arch.vcore->wait;
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ vcpu->arch.waitp = &vcpu->arch.vcore->wait;
vcpu->arch.pgdir = kvm->mm->pgd;
vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
@@ -5094,6 +5114,9 @@ void kvmppc_alloc_host_rm_ops(void)
int cpu, core;
int size;
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ return;
+
/* Not the first time here ? */
if (kvmppc_host_rm_ops_hv != NULL)
return;
--
2.23.0
next prev parent reply other threads:[~2021-10-04 16:34 UTC|newest]
Thread overview: 62+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-10-04 15:59 [PATCH v3 00/52] KVM: PPC: Book3S HV P9: entry/exit optimisations Nicholas Piggin
2021-10-04 15:59 ` [PATCH v3 01/52] powerpc/64s: Remove WORT SPR from POWER9/10 (take 2) Nicholas Piggin
2021-10-04 15:59 ` [PATCH v3 02/52] powerpc/64s: guard optional TIDR SPR with CPU ftr test Nicholas Piggin
2021-10-11 18:44 ` Fabiano Rosas
2021-10-12 2:08 ` Michael Ellerman
2021-10-13 16:51 ` Fabiano Rosas
2021-10-04 16:00 ` [PATCH v3 03/52] KMV: PPC: Book3S HV P9: Use set_dec to set decrementer to host Nicholas Piggin
2021-10-04 16:00 ` [PATCH v3 04/52] KVM: PPC: Book3S HV P9: Use host timer accounting to avoid decrementer read Nicholas Piggin
2021-10-04 16:00 ` [PATCH v3 05/52] KVM: PPC: Book3S HV P9: Use large decrementer for HDEC Nicholas Piggin
2021-10-04 16:00 ` [PATCH v3 06/52] KVM: PPC: Book3S HV P9: Reduce mftb per guest entry/exit Nicholas Piggin
2021-10-04 16:00 ` [PATCH v3 07/52] powerpc/time: add API for KVM to re-arm the host timer/decrementer Nicholas Piggin
2021-10-04 16:00 ` [PATCH v3 08/52] KVM: PPC: Book3S HV: POWER10 enable HAIL when running radix guests Nicholas Piggin
2021-10-04 16:00 ` [PATCH v3 09/52] powerpc/64s: Keep AMOR SPR a constant ~0 at runtime Nicholas Piggin
2021-10-04 16:00 ` [PATCH v3 10/52] KVM: PPC: Book3S HV: Don't always save PMU for guest capable of nesting Nicholas Piggin
2021-10-16 12:38 ` Fabiano Rosas
2021-10-20 5:26 ` Nicholas Piggin
2021-10-04 16:00 ` [PATCH v3 11/52] powerpc/64s: Always set PMU control registers to frozen/disabled when not in use Nicholas Piggin
2021-10-04 16:00 ` [PATCH v3 12/52] powerpc/64s: Implement PMU override command line option Nicholas Piggin
2021-10-04 16:00 ` [PATCH v3 13/52] KVM: PPC: Book3S HV P9: Implement PMU save/restore in C Nicholas Piggin
2021-10-04 16:00 ` [PATCH v3 14/52] KVM: PPC: Book3S HV P9: Factor PMU save/load into context switch functions Nicholas Piggin
2021-10-04 16:00 ` [PATCH v3 15/52] KVM: PPC: Book3S HV P9: Demand fault PMU SPRs when marked not inuse Nicholas Piggin
2021-10-04 16:00 ` [PATCH v3 16/52] KVM: PPC: Book3S HV P9: Factor out yield_count increment Nicholas Piggin
2021-10-04 16:00 ` [PATCH v3 17/52] KVM: PPC: Book3S HV: CTRL SPR does not require read-modify-write Nicholas Piggin
2021-10-16 12:54 ` Fabiano Rosas
2021-10-04 16:00 ` [PATCH v3 18/52] KVM: PPC: Book3S HV P9: Move SPRG restore to restore_p9_host_os_sprs Nicholas Piggin
2021-10-16 12:59 ` Fabiano Rosas
2021-10-04 16:00 ` [PATCH v3 19/52] KVM: PPC: Book3S HV P9: Reduce mtmsrd instructions required to save host SPRs Nicholas Piggin
2021-10-16 13:45 ` Fabiano Rosas
2021-10-20 5:35 ` Nicholas Piggin
2021-10-04 16:00 ` [PATCH v3 20/52] KVM: PPC: Book3S HV P9: Improve mtmsrd scheduling by delaying MSR[EE] disable Nicholas Piggin
2021-10-04 16:00 ` [PATCH v3 21/52] KVM: PPC: Book3S HV P9: Add kvmppc_stop_thread to match kvmppc_start_thread Nicholas Piggin
2021-10-04 16:00 ` [PATCH v3 22/52] KVM: PPC: Book3S HV: Change dec_expires to be relative to guest timebase Nicholas Piggin
2021-10-04 16:00 ` [PATCH v3 23/52] KVM: PPC: Book3S HV P9: Move TB updates Nicholas Piggin
2021-10-04 16:00 ` [PATCH v3 24/52] KVM: PPC: Book3S HV P9: Optimise timebase reads Nicholas Piggin
2021-10-04 16:00 ` [PATCH v3 25/52] KVM: PPC: Book3S HV P9: Avoid SPR scoreboard stalls Nicholas Piggin
2021-10-04 16:00 ` [PATCH v3 26/52] KVM: PPC: Book3S HV P9: Only execute mtSPR if the value changed Nicholas Piggin
2021-10-04 16:00 ` [PATCH v3 27/52] KVM: PPC: Book3S HV P9: Juggle SPR switching around Nicholas Piggin
2021-10-04 16:00 ` [PATCH v3 28/52] KVM: PPC: Book3S HV P9: Move vcpu register save/restore into functions Nicholas Piggin
2021-10-04 16:00 ` [PATCH v3 29/52] KVM: PPC: Book3S HV P9: Move host OS save/restore functions to built-in Nicholas Piggin
2021-10-04 16:00 ` [PATCH v3 30/52] KVM: PPC: Book3S HV P9: Move nested guest entry into its own function Nicholas Piggin
2021-10-04 16:00 ` [PATCH v3 31/52] KVM: PPC: Book3S HV P9: Move remaining SPR and MSR access into low level entry Nicholas Piggin
2021-10-04 16:00 ` [PATCH v3 32/52] KVM: PPC: Book3S HV P9: Implement TM fastpath for guest entry/exit Nicholas Piggin
2021-10-04 16:00 ` [PATCH v3 33/52] KVM: PPC: Book3S HV P9: Switch PMU to guest as late as possible Nicholas Piggin
2021-10-04 16:00 ` [PATCH v3 34/52] KVM: PPC: Book3S HV P9: Restrict DSISR canary workaround to processors that require it Nicholas Piggin
2021-10-04 16:00 ` [PATCH v3 35/52] KVM: PPC: Book3S HV P9: More SPR speed improvements Nicholas Piggin
2021-10-04 16:00 ` [PATCH v3 36/52] KVM: PPC: Book3S HV P9: Demand fault EBB facility registers Nicholas Piggin
2021-10-04 16:00 ` [PATCH v3 37/52] KVM: PPC: Book3S HV P9: Demand fault TM " Nicholas Piggin
2021-10-04 16:00 ` [PATCH v3 38/52] KVM: PPC: Book3S HV P9: Use Linux SPR save/restore to manage some host SPRs Nicholas Piggin
2021-10-04 16:00 ` [PATCH v3 39/52] KVM: PPC: Book3S HV P9: Comment and fix MMU context switching code Nicholas Piggin
2021-10-04 16:00 ` [PATCH v3 40/52] KVM: PPC: Book3S HV P9: Test dawr_enabled() before saving host DAWR SPRs Nicholas Piggin
2021-10-04 16:00 ` [PATCH v3 41/52] KVM: PPC: Book3S HV P9: Don't restore PSSCR if not needed Nicholas Piggin
2021-10-04 16:00 ` [PATCH v3 42/52] KVM: PPC: Book3S HV P9: Avoid tlbsync sequence on radix guest exit Nicholas Piggin
2021-10-04 16:00 ` [PATCH v3 43/52] KVM: PPC: Book3S HV Nested: Avoid extra mftb() in nested entry Nicholas Piggin
2021-10-04 16:00 ` [PATCH v3 44/52] KVM: PPC: Book3S HV P9: Improve mfmsr performance on entry Nicholas Piggin
2021-10-04 16:00 ` [PATCH v3 45/52] KVM: PPC: Book3S HV P9: Optimise hash guest SLB saving Nicholas Piggin
2021-10-04 16:00 ` [PATCH v3 46/52] KVM: PPC: Book3S HV P9: Avoid changing MSR[RI] in entry and exit Nicholas Piggin
2021-10-04 16:00 ` [PATCH v3 47/52] KVM: PPC: Book3S HV P9: Add unlikely annotation for !mmu_ready Nicholas Piggin
2021-10-04 16:00 ` [PATCH v3 48/52] KVM: PPC: Book3S HV P9: Avoid cpu_in_guest atomics on entry and exit Nicholas Piggin
2021-10-04 16:00 ` Nicholas Piggin [this message]
2021-10-04 16:00 ` [PATCH v3 50/52] KVM: PPC: Book3S HV P9: Tidy kvmppc_create_dtl_entry Nicholas Piggin
2021-10-04 16:00 ` [PATCH v3 51/52] KVM: PPC: Book3S HV P9: Stop using vc->dpdes Nicholas Piggin
2021-10-04 16:00 ` [PATCH v3 52/52] KVM: PPC: Book3S HV P9: Remove subcore HMI handling Nicholas Piggin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20211004160049.1338837-50-npiggin@gmail.com \
--to=npiggin@gmail.com \
--cc=kvm-ppc@vger.kernel.org \
--cc=linuxppc-dev@lists.ozlabs.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).