From: Vincent Donnefort <vdonnefort@google.com>
To: rostedt@goodmis.org, mhiramat@kernel.org,
mathieu.desnoyers@efficios.com,
linux-trace-kernel@vger.kernel.org, maz@kernel.org,
oliver.upton@linux.dev, joey.gouly@arm.com,
suzuki.poulose@arm.com, yuzenghui@huawei.com
Cc: kvmarm@lists.linux.dev, linux-arm-kernel@lists.infradead.org,
jstultz@google.com, qperret@google.com, will@kernel.org,
aneesh.kumar@kernel.org, kernel-team@android.com,
linux-kernel@vger.kernel.org,
Vincent Donnefort <vdonnefort@google.com>
Subject: [PATCH v11 23/30] KVM: arm64: Add tracing capability for the nVHE/pKVM hyp
Date: Sat, 31 Jan 2026 13:28:41 +0000 [thread overview]
Message-ID: <20260131132848.254084-24-vdonnefort@google.com> (raw)
In-Reply-To: <20260131132848.254084-1-vdonnefort@google.com>
There is currently no way to inspect or log what's happening at EL2
when the nVHE or pKVM hypervisor is used. With the growing set of
features for pKVM, the need for tooling is more pressing. And tracefs,
by its reliability, versatility and support for user-space is fit for
purpose.
Add support to write into a tracefs compatible ring-buffer. There's no
way the hypervisor could log events directly into the host tracefs
ring-buffers. So instead let's use our own, where the hypervisor is the
writer and the host the reader.
Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index ce516d8187b1..c3a7fc939f42 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -89,6 +89,10 @@ enum __kvm_host_smccc_func {
__KVM_HOST_SMCCC_FUNC___pkvm_vcpu_load,
__KVM_HOST_SMCCC_FUNC___pkvm_vcpu_put,
__KVM_HOST_SMCCC_FUNC___pkvm_tlb_flush_vmid,
+ __KVM_HOST_SMCCC_FUNC___tracing_load,
+ __KVM_HOST_SMCCC_FUNC___tracing_unload,
+ __KVM_HOST_SMCCC_FUNC___tracing_enable,
+ __KVM_HOST_SMCCC_FUNC___tracing_swap_reader,
};
#define DECLARE_KVM_VHE_SYM(sym) extern char sym[]
diff --git a/arch/arm64/include/asm/kvm_hyptrace.h b/arch/arm64/include/asm/kvm_hyptrace.h
new file mode 100644
index 000000000000..9c30a479bc36
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_hyptrace.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ARM64_KVM_HYPTRACE_H_
+#define __ARM64_KVM_HYPTRACE_H_
+
+#include <linux/ring_buffer.h>
+
+struct hyp_trace_desc {
+ unsigned long bpages_backing_start;
+ size_t bpages_backing_size;
+ struct trace_buffer_desc trace_buffer_desc;
+
+};
+#endif
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 6498dec00fe9..c7f50492f2cf 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -73,6 +73,11 @@ config NVHE_EL2_DEBUG
if NVHE_EL2_DEBUG
+config NVHE_EL2_TRACING
+ bool
+ depends on TRACING
+ default y
+
config PKVM_DISABLE_STAGE2_ON_PANIC
bool "Disable the host stage-2 on panic"
default n
diff --git a/arch/arm64/kvm/hyp/include/nvhe/trace.h b/arch/arm64/kvm/hyp/include/nvhe/trace.h
new file mode 100644
index 000000000000..7da8788ce527
--- /dev/null
+++ b/arch/arm64/kvm/hyp/include/nvhe/trace.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ARM64_KVM_HYP_NVHE_TRACE_H
+#define __ARM64_KVM_HYP_NVHE_TRACE_H
+#include <asm/kvm_hyptrace.h>
+
+#ifdef CONFIG_NVHE_EL2_TRACING
+void *tracing_reserve_entry(unsigned long length);
+void tracing_commit_entry(void);
+
+int __tracing_load(unsigned long desc_va, size_t desc_size);
+void __tracing_unload(void);
+int __tracing_enable(bool enable);
+int __tracing_swap_reader(unsigned int cpu);
+#else
+static inline void *tracing_reserve_entry(unsigned long length) { return NULL; }
+static inline void tracing_commit_entry(void) { }
+
+static inline int __tracing_load(unsigned long desc_va, size_t desc_size) { return -ENODEV; }
+static inline void __tracing_unload(void) { }
+static inline int __tracing_enable(bool enable) { return -ENODEV; }
+static inline int __tracing_swap_reader(unsigned int cpu) { return -ENODEV; }
+#endif
+#endif
diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile
index 8dc95257c291..f1840628d2d6 100644
--- a/arch/arm64/kvm/hyp/nvhe/Makefile
+++ b/arch/arm64/kvm/hyp/nvhe/Makefile
@@ -29,9 +29,12 @@ hyp-obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
../fpsimd.o ../hyp-entry.o ../exception.o ../pgtable.o
hyp-obj-y += ../../../kernel/smccc-call.o
hyp-obj-$(CONFIG_LIST_HARDENED) += list_debug.o
-hyp-obj-$(CONFIG_NVHE_EL2_TRACING) += clock.o
+hyp-obj-$(CONFIG_NVHE_EL2_TRACING) += clock.o trace.o
hyp-obj-y += $(lib-objs)
+# Path to simple_ring_buffer.c
+CFLAGS_trace.nvhe.o += -I$(objtree)/kernel/trace/
+
##
## Build rules for compiling nVHE hyp code
## Output of this folder is `kvm_nvhe.o`, a partially linked object
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index 5559b622ea57..eddbf5df5d13 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -18,6 +18,7 @@
#include <nvhe/mem_protect.h>
#include <nvhe/mm.h>
#include <nvhe/pkvm.h>
+#include <nvhe/trace.h>
#include <nvhe/trap_handler.h>
DEFINE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
@@ -587,6 +588,33 @@ static void handle___pkvm_teardown_vm(struct kvm_cpu_context *host_ctxt)
cpu_reg(host_ctxt, 1) = __pkvm_teardown_vm(handle);
}
+static void handle___tracing_load(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(unsigned long, desc_hva, host_ctxt, 1);
+ DECLARE_REG(size_t, desc_size, host_ctxt, 2);
+
+ cpu_reg(host_ctxt, 1) = __tracing_load(desc_hva, desc_size);
+}
+
+static void handle___tracing_unload(struct kvm_cpu_context *host_ctxt)
+{
+ __tracing_unload();
+}
+
+static void handle___tracing_enable(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(bool, enable, host_ctxt, 1);
+
+ cpu_reg(host_ctxt, 1) = __tracing_enable(enable);
+}
+
+static void handle___tracing_swap_reader(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(unsigned int, cpu, host_ctxt, 1);
+
+ cpu_reg(host_ctxt, 1) = __tracing_swap_reader(cpu);
+}
+
typedef void (*hcall_t)(struct kvm_cpu_context *);
#define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x
@@ -628,6 +656,10 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__pkvm_vcpu_load),
HANDLE_FUNC(__pkvm_vcpu_put),
HANDLE_FUNC(__pkvm_tlb_flush_vmid),
+ HANDLE_FUNC(__tracing_load),
+ HANDLE_FUNC(__tracing_unload),
+ HANDLE_FUNC(__tracing_enable),
+ HANDLE_FUNC(__tracing_swap_reader),
};
static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
diff --git a/arch/arm64/kvm/hyp/nvhe/trace.c b/arch/arm64/kvm/hyp/nvhe/trace.c
new file mode 100644
index 000000000000..282cba70ce9b
--- /dev/null
+++ b/arch/arm64/kvm/hyp/nvhe/trace.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2025 Google LLC
+ * Author: Vincent Donnefort <vdonnefort@google.com>
+ */
+
+#include <nvhe/clock.h>
+#include <nvhe/mem_protect.h>
+#include <nvhe/mm.h>
+#include <nvhe/trace.h>
+
+#include <asm/percpu.h>
+#include <asm/kvm_mmu.h>
+#include <asm/local.h>
+
+#include "simple_ring_buffer.c"
+
+static DEFINE_PER_CPU(struct simple_rb_per_cpu, __simple_rbs);
+
+static struct hyp_trace_buffer {
+ struct simple_rb_per_cpu __percpu *simple_rbs;
+ void *bpages_backing_start;
+ size_t bpages_backing_size;
+ hyp_spinlock_t lock;
+} trace_buffer = {
+ .simple_rbs = &__simple_rbs,
+ .lock = __HYP_SPIN_LOCK_UNLOCKED,
+};
+
+static bool hyp_trace_buffer_loaded(struct hyp_trace_buffer *trace_buffer)
+{
+ return trace_buffer->bpages_backing_size > 0;
+}
+
+void *tracing_reserve_entry(unsigned long length)
+{
+ return simple_ring_buffer_reserve(this_cpu_ptr(trace_buffer.simple_rbs), length,
+ trace_clock());
+}
+
+void tracing_commit_entry(void)
+{
+ simple_ring_buffer_commit(this_cpu_ptr(trace_buffer.simple_rbs));
+}
+
+static int __admit_host_mem(void *start, u64 size)
+{
+ if (!PAGE_ALIGNED(start) || !PAGE_ALIGNED(size) || !size)
+ return -EINVAL;
+
+ if (!is_protected_kvm_enabled())
+ return 0;
+
+ return __pkvm_host_donate_hyp(hyp_virt_to_pfn(start), size >> PAGE_SHIFT);
+}
+
+static void __release_host_mem(void *start, u64 size)
+{
+ if (!is_protected_kvm_enabled())
+ return;
+
+ WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(start), size >> PAGE_SHIFT));
+}
+
+static int hyp_trace_buffer_load_bpage_backing(struct hyp_trace_buffer *trace_buffer,
+ struct hyp_trace_desc *desc)
+{
+ void *start = (void *)kern_hyp_va(desc->bpages_backing_start);
+ size_t size = desc->bpages_backing_size;
+ int ret;
+
+ ret = __admit_host_mem(start, size);
+ if (ret)
+ return ret;
+
+ memset(start, 0, size);
+
+ trace_buffer->bpages_backing_start = start;
+ trace_buffer->bpages_backing_size = size;
+
+ return 0;
+}
+
+static void hyp_trace_buffer_unload_bpage_backing(struct hyp_trace_buffer *trace_buffer)
+{
+ void *start = trace_buffer->bpages_backing_start;
+ size_t size = trace_buffer->bpages_backing_size;
+
+ if (!size)
+ return;
+
+ memset(start, 0, size);
+
+ __release_host_mem(start, size);
+
+ trace_buffer->bpages_backing_start = 0;
+ trace_buffer->bpages_backing_size = 0;
+}
+
+static void *__pin_shared_page(unsigned long kern_va)
+{
+ void *va = kern_hyp_va((void *)kern_va);
+
+ if (!is_protected_kvm_enabled())
+ return va;
+
+ return hyp_pin_shared_mem(va, va + PAGE_SIZE) ? NULL : va;
+}
+
+static void __unpin_shared_page(void *va)
+{
+ if (!is_protected_kvm_enabled())
+ return;
+
+ hyp_unpin_shared_mem(va, va + PAGE_SIZE);
+}
+
+static void hyp_trace_buffer_unload(struct hyp_trace_buffer *trace_buffer)
+{
+ int cpu;
+
+ hyp_assert_lock_held(&trace_buffer->lock);
+
+ if (!hyp_trace_buffer_loaded(trace_buffer))
+ return;
+
+ for (cpu = 0; cpu < hyp_nr_cpus; cpu++)
+ simple_ring_buffer_unload_mm(per_cpu_ptr(trace_buffer->simple_rbs, cpu),
+ __unpin_shared_page);
+
+ hyp_trace_buffer_unload_bpage_backing(trace_buffer);
+}
+
+static int hyp_trace_buffer_load(struct hyp_trace_buffer *trace_buffer,
+ struct hyp_trace_desc *desc)
+{
+ struct simple_buffer_page *bpages;
+ struct ring_buffer_desc *rb_desc;
+ int ret, cpu;
+
+ hyp_assert_lock_held(&trace_buffer->lock);
+
+ if (hyp_trace_buffer_loaded(trace_buffer))
+ return -EINVAL;
+
+ ret = hyp_trace_buffer_load_bpage_backing(trace_buffer, desc);
+ if (ret)
+ return ret;
+
+ bpages = trace_buffer->bpages_backing_start;
+ for_each_ring_buffer_desc(rb_desc, cpu, &desc->trace_buffer_desc) {
+ ret = simple_ring_buffer_init_mm(per_cpu_ptr(trace_buffer->simple_rbs, cpu),
+ bpages, rb_desc, __pin_shared_page,
+ __unpin_shared_page);
+ if (ret)
+ break;
+
+ bpages += rb_desc->nr_page_va;
+ }
+
+ if (ret)
+ hyp_trace_buffer_unload(trace_buffer);
+
+ return ret;
+}
+
+static bool hyp_trace_desc_validate(struct hyp_trace_desc *desc, size_t desc_size)
+{
+ struct ring_buffer_desc *rb_desc;
+ unsigned int cpu;
+ size_t nr_bpages;
+ void *desc_end;
+
+ /*
+ * Both desc_size and bpages_backing_size are untrusted host-provided
+ * values. We rely on __pkvm_host_donate_hyp() to enforce their validity.
+ */
+ desc_end = (void *)desc + desc_size;
+ nr_bpages = desc->bpages_backing_size / sizeof(struct simple_buffer_page);
+
+ for_each_ring_buffer_desc(rb_desc, cpu, &desc->trace_buffer_desc) {
+ /* Can we read nr_page_va? */
+ if ((void *)rb_desc + struct_size(rb_desc, page_va, 0) > desc_end)
+ return false;
+
+ /* Overflow desc? */
+ if ((void *)rb_desc + struct_size(rb_desc, page_va, rb_desc->nr_page_va) > desc_end)
+ return false;
+
+ /* Overflow bpages backing memory? */
+ if (nr_bpages < rb_desc->nr_page_va)
+ return false;
+
+ if (cpu >= hyp_nr_cpus)
+ return false;
+
+ if (cpu != rb_desc->cpu)
+ return false;
+
+ nr_bpages -= rb_desc->nr_page_va;
+ }
+
+ return true;
+}
+
+int __tracing_load(unsigned long desc_hva, size_t desc_size)
+{
+ struct hyp_trace_desc *desc = (struct hyp_trace_desc *)kern_hyp_va(desc_hva);
+ int ret;
+
+ ret = __admit_host_mem(desc, desc_size);
+ if (ret)
+ return ret;
+
+ if (!hyp_trace_desc_validate(desc, desc_size))
+ goto err_release_desc;
+
+ hyp_spin_lock(&trace_buffer.lock);
+
+ ret = hyp_trace_buffer_load(&trace_buffer, desc);
+
+ hyp_spin_unlock(&trace_buffer.lock);
+
+err_release_desc:
+ __release_host_mem(desc, desc_size);
+ return ret;
+}
+
+void __tracing_unload(void)
+{
+ hyp_spin_lock(&trace_buffer.lock);
+ hyp_trace_buffer_unload(&trace_buffer);
+ hyp_spin_unlock(&trace_buffer.lock);
+}
+
+int __tracing_enable(bool enable)
+{
+ int cpu, ret = enable ? -EINVAL : 0;
+
+ hyp_spin_lock(&trace_buffer.lock);
+
+ if (!hyp_trace_buffer_loaded(&trace_buffer))
+ goto unlock;
+
+ for (cpu = 0; cpu < hyp_nr_cpus; cpu++)
+ simple_ring_buffer_enable_tracing(per_cpu_ptr(trace_buffer.simple_rbs, cpu),
+ enable);
+
+ ret = 0;
+
+unlock:
+ hyp_spin_unlock(&trace_buffer.lock);
+
+ return ret;
+}
+
+int __tracing_swap_reader(unsigned int cpu)
+{
+ int ret = -ENODEV;
+
+ if (cpu >= hyp_nr_cpus)
+ return -EINVAL;
+
+ hyp_spin_lock(&trace_buffer.lock);
+
+ if (hyp_trace_buffer_loaded(&trace_buffer))
+ ret = simple_ring_buffer_swap_reader_page(
+ per_cpu_ptr(trace_buffer.simple_rbs, cpu));
+
+ hyp_spin_unlock(&trace_buffer.lock);
+
+ return ret;
+}
--
2.53.0.rc1.225.gd81095ad13-goog
next prev parent reply other threads:[~2026-01-31 13:29 UTC|newest]
Thread overview: 43+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-01-31 13:28 [PATCH v11 00/30] Tracefs support for pKVM Vincent Donnefort
2026-01-31 13:28 ` [PATCH v11 01/30] ring-buffer: Add page statistics to the meta-page Vincent Donnefort
2026-01-31 13:28 ` [PATCH v11 02/30] ring-buffer: Store bpage pointers into subbuf_ids Vincent Donnefort
2026-01-31 13:28 ` [PATCH v11 03/30] ring-buffer: Introduce ring-buffer remotes Vincent Donnefort
2026-01-31 13:28 ` [PATCH v11 04/30] ring-buffer: Add non-consuming read for " Vincent Donnefort
2026-01-31 13:28 ` [PATCH v11 05/30] tracing: Introduce trace remotes Vincent Donnefort
2026-01-31 13:28 ` [PATCH v11 06/30] tracing: Add reset to " Vincent Donnefort
2026-01-31 13:28 ` [PATCH v11 07/30] tracing: Add non-consuming read " Vincent Donnefort
2026-02-04 23:52 ` Steven Rostedt
2026-02-10 15:32 ` Vincent Donnefort
2026-01-31 13:28 ` [PATCH v11 08/30] tracing: Add init callback " Vincent Donnefort
2026-01-31 13:28 ` [PATCH v11 09/30] tracing: Add events " Vincent Donnefort
2026-02-05 0:40 ` Steven Rostedt
2026-01-31 13:28 ` [PATCH v11 10/30] tracing: Add events/ root files " Vincent Donnefort
2026-01-31 13:28 ` [PATCH v11 11/30] tracing: Add helpers to create trace remote events Vincent Donnefort
2026-01-31 13:28 ` [PATCH v11 12/30] ring-buffer: Export buffer_data_page and macros Vincent Donnefort
2026-01-31 13:28 ` [PATCH v11 13/30] tracing: Introduce simple_ring_buffer Vincent Donnefort
2026-02-05 1:06 ` Steven Rostedt
2026-01-31 13:28 ` [PATCH v11 14/30] tracing: Add a trace remote module for testing Vincent Donnefort
2026-02-05 1:32 ` Steven Rostedt
2026-01-31 13:28 ` [PATCH v11 15/30] tracing: selftests: Add trace remote tests Vincent Donnefort
2026-02-05 17:42 ` Steven Rostedt
2026-02-10 15:54 ` Vincent Donnefort
2026-02-19 14:36 ` Steven Rostedt
2026-01-31 13:28 ` [PATCH v11 16/30] Documentation: tracing: Add tracing remotes Vincent Donnefort
2026-02-05 17:45 ` Steven Rostedt
2026-01-31 13:28 ` [PATCH v11 17/30] tracing: load/unload page callbacks for simple_ring_buffer Vincent Donnefort
2026-02-05 17:47 ` Steven Rostedt
2026-01-31 13:28 ` [PATCH v11 18/30] tracing: Check for undefined symbols in simple_ring_buffer Vincent Donnefort
2026-01-31 13:28 ` [PATCH v11 19/30] KVM: arm64: Add PKVM_DISABLE_STAGE2_ON_PANIC Vincent Donnefort
2026-01-31 13:28 ` [PATCH v11 20/30] KVM: arm64: Add clock support to nVHE/pKVM hyp Vincent Donnefort
2026-01-31 13:28 ` [PATCH v11 21/30] KVM: arm64: Initialise hyp_nr_cpus for nVHE hyp Vincent Donnefort
2026-01-31 13:28 ` [PATCH v11 22/30] KVM: arm64: Support unaligned fixmap in the pKVM hyp Vincent Donnefort
2026-01-31 13:28 ` Vincent Donnefort [this message]
2026-01-31 13:28 ` [PATCH v11 24/30] KVM: arm64: Add trace remote for the nVHE/pKVM hyp Vincent Donnefort
2026-01-31 13:28 ` [PATCH v11 25/30] KVM: arm64: Sync boot clock with " Vincent Donnefort
2026-01-31 13:28 ` [PATCH v11 26/30] KVM: arm64: Add trace reset to " Vincent Donnefort
2026-01-31 13:28 ` [PATCH v11 27/30] KVM: arm64: Add event support to the nVHE/pKVM hyp and trace remote Vincent Donnefort
2026-01-31 13:28 ` [PATCH v11 28/30] KVM: arm64: Add hyp_enter/hyp_exit events to nVHE/pKVM hyp Vincent Donnefort
2026-01-31 13:28 ` [PATCH v11 29/30] KVM: arm64: Add selftest event support " Vincent Donnefort
2026-01-31 13:28 ` [PATCH v11 30/30] tracing: selftests: Add hypervisor trace remote tests Vincent Donnefort
2026-02-04 22:45 ` [PATCH v11 00/30] Tracefs support for pKVM Steven Rostedt
2026-02-05 17:51 ` Steven Rostedt
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260131132848.254084-24-vdonnefort@google.com \
--to=vdonnefort@google.com \
--cc=aneesh.kumar@kernel.org \
--cc=joey.gouly@arm.com \
--cc=jstultz@google.com \
--cc=kernel-team@android.com \
--cc=kvmarm@lists.linux.dev \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-trace-kernel@vger.kernel.org \
--cc=mathieu.desnoyers@efficios.com \
--cc=maz@kernel.org \
--cc=mhiramat@kernel.org \
--cc=oliver.upton@linux.dev \
--cc=qperret@google.com \
--cc=rostedt@goodmis.org \
--cc=suzuki.poulose@arm.com \
--cc=will@kernel.org \
--cc=yuzenghui@huawei.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox