Linux-ARM-Kernel Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: Steven Price <steven.price@arm.com>
To: kvm@vger.kernel.org, kvmarm@lists.linux.dev
Cc: Steven Price <steven.price@arm.com>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Marc Zyngier <maz@kernel.org>, Will Deacon <will@kernel.org>,
	James Morse <james.morse@arm.com>,
	Oliver Upton <oliver.upton@linux.dev>,
	Suzuki K Poulose <suzuki.poulose@arm.com>,
	Zenghui Yu <yuzenghui@huawei.com>,
	linux-arm-kernel@lists.infradead.org,
	linux-kernel@vger.kernel.org, Joey Gouly <joey.gouly@arm.com>,
	Alexandru Elisei <alexandru.elisei@arm.com>,
	Christoffer Dall <christoffer.dall@arm.com>,
	Fuad Tabba <tabba@google.com>,
	linux-coco@lists.linux.dev,
	Ganapatrao Kulkarni <gankulkarni@os.amperecomputing.com>,
	Gavin Shan <gshan@redhat.com>,
	Shanker Donthineni <sdonthineni@nvidia.com>,
	Alper Gun <alpergun@google.com>,
	"Aneesh Kumar K . V" <aneesh.kumar@kernel.org>,
	Emi Kisanuki <fj0570is@fujitsu.com>,
	Vishal Annapurve <vannapurve@google.com>,
	WeiLin.Chang@arm.com, Lorenzo.Pieralisi2@arm.com
Subject: [PATCH v14 10/44] arm64: RMI: Add support for SRO
Date: Wed, 13 May 2026 14:17:18 +0100	[thread overview]
Message-ID: <20260513131757.116630-11-steven.price@arm.com> (raw)
In-Reply-To: <20260513131757.116630-1-steven.price@arm.com>

RMM v2.0 introduces the concept of "Stateful RMI Operations" (SRO). This
means that an SMC can return with an operation still in progress. The
host is excepted to continue the operation until is reaches a conclusion
(either success or failure). During this process the RMM can request
additional memory ('donate') or hand memory back to the host
('reclaim'). The host can request an in progress operation is cancelled,
but still continue the operation until it has completed (otherwise the
incomplete operation may cause future RMM operations to fail).

The SRO is tracked using a struct rmi_sro_state object which keeps track
of any memory which has been allocated but not yet consumed by the RMM
or reclaimed from the RMM. This allows the memory to be reused in a
future request within the same operation. It will also permit an
operation to be done in a context where memory allocation may be
difficult (e.g. atomic context) with the option to abort the operation
and retry the memory allocation outside of the atomic context. The
memory stored in the struct rmi_sro_state object can then be reused on
the subsequent attempt.

Signed-off-by: Steven Price <steven.price@arm.com>
---
v14:
 * SRO support has improved although is still not fully complete. The
   infrastructure has been moved out of KVM.
---
 arch/arm64/include/asm/rmi_cmds.h |   1 +
 arch/arm64/kernel/rmi.c           | 359 ++++++++++++++++++++++++++++++
 2 files changed, 360 insertions(+)

diff --git a/arch/arm64/include/asm/rmi_cmds.h b/arch/arm64/include/asm/rmi_cmds.h
index eb213c8e6f26..1a7b0c8f1e38 100644
--- a/arch/arm64/include/asm/rmi_cmds.h
+++ b/arch/arm64/include/asm/rmi_cmds.h
@@ -35,6 +35,7 @@ struct rmi_sro_state {
 
 int rmi_delegate_range(phys_addr_t phys, unsigned long size);
 int rmi_undelegate_range(phys_addr_t phys, unsigned long size);
+int free_delegated_page(phys_addr_t phys);
 
 static inline int rmi_delegate_page(phys_addr_t phys)
 {
diff --git a/arch/arm64/kernel/rmi.c b/arch/arm64/kernel/rmi.c
index 08cef54acadb..a8107ca9bb6d 100644
--- a/arch/arm64/kernel/rmi.c
+++ b/arch/arm64/kernel/rmi.c
@@ -48,6 +48,365 @@ int rmi_undelegate_range(phys_addr_t phys, unsigned long size)
 	return ret;
 }
 
+static unsigned long donate_req_to_size(unsigned long donatereq)
+{
+	unsigned long unit_size = RMI_DONATE_SIZE(donatereq);
+
+	switch (unit_size) {
+	case 0:
+		return PAGE_SIZE;
+	case 1:
+		return PMD_SIZE;
+	case 2:
+		return PUD_SIZE;
+	case 3:
+		return P4D_SIZE;
+	}
+	unreachable();
+}
+
+static void rmi_smccc_invoke(struct arm_smccc_1_2_regs *regs_in,
+			     struct arm_smccc_1_2_regs *regs_out)
+{
+	struct arm_smccc_1_2_regs regs = *regs_in;
+	unsigned long status;
+
+	do {
+		arm_smccc_1_2_invoke(&regs, regs_out);
+		status = RMI_RETURN_STATUS(regs_out->a0);
+	} while (status == RMI_BUSY || status == RMI_BLOCKED);
+}
+
+int free_delegated_page(phys_addr_t phys)
+{
+	if (WARN_ON(rmi_undelegate_page(phys))) {
+		/* Undelegate failed: leak the page */
+		return -EBUSY;
+	}
+
+	free_page((unsigned long)phys_to_virt(phys));
+
+	return 0;
+}
+
+static int rmi_sro_ensure_capacity(struct rmi_sro_state *sro,
+				   unsigned long count)
+{
+	if (WARN_ON_ONCE(sro->addr_count > RMI_MAX_ADDR_LIST))
+		return -EOVERFLOW;
+
+	if (count > RMI_MAX_ADDR_LIST - sro->addr_count)
+		return -ENOSPC;
+
+	return 0;
+}
+
+static int rmi_sro_donate_contig(struct rmi_sro_state *sro,
+				 unsigned long sro_handle,
+				 unsigned long donatereq,
+				 struct arm_smccc_1_2_regs *out_regs,
+				 gfp_t gfp)
+{
+	unsigned long unit_size = RMI_DONATE_SIZE(donatereq);
+	unsigned long unit_size_bytes = donate_req_to_size(donatereq);
+	unsigned long count = RMI_DONATE_COUNT(donatereq);
+	unsigned long state = RMI_DONATE_STATE(donatereq);
+	unsigned long size = unit_size_bytes * count;
+	unsigned long addr_range;
+	int ret;
+	void *virt;
+	phys_addr_t phys;
+	struct arm_smccc_1_2_regs regs = {
+		SMC_RMI_OP_MEM_DONATE,
+		sro_handle
+	};
+
+	for (int i = 0; i < sro->addr_count; i++) {
+		unsigned long entry = sro->addr_list[i];
+
+		if (RMI_ADDR_RANGE_SIZE(entry) == unit_size &&
+		    RMI_ADDR_RANGE_COUNT(entry) == count &&
+		    RMI_ADDR_RANGE_STATE(entry) == state) {
+			sro->addr_count--;
+			swap(sro->addr_list[sro->addr_count],
+			     sro->addr_list[i]);
+
+			goto out;
+		}
+	}
+
+	ret = rmi_sro_ensure_capacity(sro, 1);
+	if (ret)
+		return ret;
+
+	virt = alloc_pages_exact(size, gfp);
+	if (!virt)
+		return -ENOMEM;
+	phys = virt_to_phys(virt);
+
+	if (state == RMI_OP_MEM_DELEGATED) {
+		if (rmi_delegate_range(phys, size)) {
+			free_pages_exact(virt, size);
+			return -ENXIO;
+		}
+	}
+
+	addr_range = phys & RMI_ADDR_RANGE_ADDR_MASK;
+	FIELD_MODIFY(RMI_ADDR_RANGE_SIZE_MASK, &addr_range, unit_size);
+	FIELD_MODIFY(RMI_ADDR_RANGE_COUNT_MASK, &addr_range, count);
+	FIELD_MODIFY(RMI_ADDR_RANGE_STATE_MASK, &addr_range, state);
+
+	sro->addr_list[sro->addr_count] = addr_range;
+
+out:
+	regs.a2 = virt_to_phys(&sro->addr_list[sro->addr_count]);
+	regs.a3 = 1;
+	rmi_smccc_invoke(&regs, out_regs);
+
+	unsigned long donated_granules = out_regs->a1;
+	unsigned long donated_size = donated_granules << PAGE_SHIFT;
+
+	if (donated_granules == 0) {
+		/* No pages used by the RMM */
+		sro->addr_count++;
+	} else if (donated_size < size) {
+		phys = sro->addr_list[sro->addr_count] & RMI_ADDR_RANGE_ADDR_MASK;
+
+		/* Not all granules used by the RMM, free the remaining pages */
+		for (long i = donated_size; i < size; i += PAGE_SIZE) {
+			if (state == RMI_OP_MEM_DELEGATED)
+				free_delegated_page(phys + i);
+			else
+				__free_page(phys_to_page(phys + i));
+		}
+	}
+
+	return 0;
+}
+
+static int rmi_sro_donate_noncontig(struct rmi_sro_state *sro,
+				    unsigned long sro_handle,
+				    unsigned long donatereq,
+				    struct arm_smccc_1_2_regs *out_regs,
+				    gfp_t gfp)
+{
+	unsigned long unit_size = RMI_DONATE_SIZE(donatereq);
+	unsigned long unit_size_bytes = donate_req_to_size(donatereq);
+	unsigned long count = RMI_DONATE_COUNT(donatereq);
+	unsigned long state = RMI_DONATE_STATE(donatereq);
+	unsigned long found = 0;
+	unsigned long addr_list_start = sro->addr_count;
+	int ret;
+	struct arm_smccc_1_2_regs regs = {
+		SMC_RMI_OP_MEM_DONATE,
+		sro_handle
+	};
+
+	for (int i = 0; i < addr_list_start && found < count; i++) {
+		unsigned long entry = sro->addr_list[i];
+
+		if (RMI_ADDR_RANGE_SIZE(entry) == unit_size &&
+		    RMI_ADDR_RANGE_COUNT(entry) == 1 &&
+		    RMI_ADDR_RANGE_STATE(entry) == state) {
+			addr_list_start--;
+			swap(sro->addr_list[addr_list_start],
+			     sro->addr_list[i]);
+			found++;
+			i--;
+		}
+	}
+
+	ret = rmi_sro_ensure_capacity(sro, count - found);
+	if (ret)
+		return ret;
+
+	while (found < count) {
+		unsigned long addr_range;
+		void *virt = alloc_pages_exact(unit_size_bytes, gfp);
+		phys_addr_t phys;
+
+		if (!virt)
+			return -ENOMEM;
+
+		phys = virt_to_phys(virt);
+
+		if (state == RMI_OP_MEM_DELEGATED) {
+			if (rmi_delegate_range(phys, unit_size_bytes)) {
+				free_pages_exact(virt, unit_size_bytes);
+				return -ENXIO;
+			}
+		}
+
+		addr_range = phys & RMI_ADDR_RANGE_ADDR_MASK;
+		FIELD_MODIFY(RMI_ADDR_RANGE_SIZE_MASK, &addr_range, unit_size);
+		FIELD_MODIFY(RMI_ADDR_RANGE_COUNT_MASK, &addr_range, 1);
+		FIELD_MODIFY(RMI_ADDR_RANGE_STATE_MASK, &addr_range, state);
+
+		sro->addr_list[sro->addr_count++] = addr_range;
+		found++;
+	}
+
+	regs.a2 = virt_to_phys(&sro->addr_list[addr_list_start]);
+	regs.a3 = found;
+	rmi_smccc_invoke(&regs, out_regs);
+
+	unsigned long donated_granules = out_regs->a1;
+
+	if (WARN_ON(donated_granules & ((unit_size_bytes >> PAGE_SHIFT) - 1))) {
+		/*
+		 * FIXME: RMM has only consumed part of a huge page, this leaks
+		 * the rest of the huge page
+		 */
+		donated_granules = ALIGN(donated_granules,
+					 (unit_size_bytes >> PAGE_SHIFT));
+	}
+	unsigned long donated_blocks = donated_granules / (unit_size_bytes >> PAGE_SHIFT);
+
+	if (WARN_ON(donated_blocks > found))
+		donated_blocks = found;
+
+	unsigned long undonated_blocks = found - donated_blocks;
+
+	while (donated_blocks && undonated_blocks) {
+		sro->addr_count--;
+		swap(sro->addr_list[addr_list_start],
+		     sro->addr_list[sro->addr_count]);
+		addr_list_start++;
+
+		donated_blocks--;
+		undonated_blocks--;
+	}
+	sro->addr_count -= donated_blocks;
+
+	return 0;
+}
+
+static int rmi_sro_donate(struct rmi_sro_state *sro,
+			  unsigned long sro_handle,
+			  unsigned long donatereq,
+			  struct arm_smccc_1_2_regs *regs,
+			  gfp_t gfp)
+{
+	unsigned long count = RMI_DONATE_COUNT(donatereq);
+
+	if (WARN_ON(!count))
+		return 0;
+
+	if (RMI_DONATE_CONTIG(donatereq)) {
+		return rmi_sro_donate_contig(sro, sro_handle, donatereq,
+					     regs, gfp);
+	} else {
+		return rmi_sro_donate_noncontig(sro, sro_handle, donatereq,
+						regs, gfp);
+	}
+}
+
+static int rmi_sro_reclaim(struct rmi_sro_state *sro,
+			   unsigned long sro_handle,
+			   struct arm_smccc_1_2_regs *out_regs)
+{
+	unsigned long capacity;
+	struct arm_smccc_1_2_regs regs;
+	int ret;
+
+	ret = rmi_sro_ensure_capacity(sro, 1);
+	if (ret)
+		rmi_sro_free(sro);
+
+	capacity = RMI_MAX_ADDR_LIST - sro->addr_count;
+
+	regs = (struct arm_smccc_1_2_regs){
+		SMC_RMI_OP_MEM_RECLAIM,
+		sro_handle,
+		virt_to_phys(&sro->addr_list[sro->addr_count]),
+		capacity
+	};
+	rmi_smccc_invoke(&regs, out_regs);
+
+	if (WARN_ON_ONCE(out_regs->a1 > capacity))
+		out_regs->a1 = capacity;
+
+	sro->addr_count += out_regs->a1;
+
+	return 0;
+}
+
+void rmi_sro_free(struct rmi_sro_state *sro)
+{
+	for (int i = 0; i < sro->addr_count; i++) {
+		unsigned long entry = sro->addr_list[i];
+		unsigned long addr = RMI_ADDR_RANGE_ADDR(entry);
+		unsigned long unit_size = RMI_ADDR_RANGE_SIZE(entry);
+		unsigned long count = RMI_ADDR_RANGE_COUNT(entry);
+		unsigned long state = RMI_ADDR_RANGE_STATE(entry);
+		unsigned long size = donate_req_to_size(unit_size) * count;
+
+		if (state == RMI_OP_MEM_DELEGATED) {
+			if (WARN_ON(rmi_undelegate_range(addr, size))) {
+				/* Leak the pages */
+				continue;
+			}
+		}
+		free_pages_exact(phys_to_virt(addr), size);
+	}
+
+	sro->addr_count = 0;
+}
+
+unsigned long rmi_sro_execute(struct rmi_sro_state *sro, gfp_t gfp)
+{
+	unsigned long sro_handle;
+	struct arm_smccc_1_2_regs regs;
+	struct arm_smccc_1_2_regs *regs_in = &sro->regs;
+
+	rmi_smccc_invoke(regs_in, &regs);
+
+	sro_handle = regs.a1;
+
+	while (RMI_RETURN_STATUS(regs.a0) == RMI_INCOMPLETE) {
+		bool can_cancel = RMI_RETURN_CAN_CANCEL(regs.a0);
+		int ret;
+
+		switch (RMI_RETURN_MEMREQ(regs.a0)) {
+		case RMI_OP_MEM_REQ_NONE:
+			regs = (struct arm_smccc_1_2_regs){
+				SMC_RMI_OP_CONTINUE, sro_handle, 0
+			};
+			rmi_smccc_invoke(&regs, &regs);
+			break;
+		case RMI_OP_MEM_REQ_DONATE:
+			ret = rmi_sro_donate(sro, sro_handle, regs.a2, &regs,
+					     gfp);
+			break;
+		case RMI_OP_MEM_REQ_RECLAIM:
+			ret = rmi_sro_reclaim(sro, sro_handle, &regs);
+			break;
+		default:
+			ret = WARN_ON(1);
+			break;
+		}
+
+		if (ret) {
+			if (can_cancel) {
+				/*
+				 * FIXME: Handle cancelling properly!
+				 *
+				 * If the operation has failed due to memory
+				 * allocation failure then the information on
+				 * the memory allocation should be saved, so
+				 * that the allocation can be repeated outside
+				 * of any context which prevented the
+				 * allocation.
+				 */
+			}
+			if (WARN_ON(ret))
+				return ret;
+		}
+	}
+
+	return regs.a0;
+}
+
 static int rmi_check_version(void)
 {
 	struct arm_smccc_res res;
-- 
2.43.0



  parent reply	other threads:[~2026-05-13 13:19 UTC|newest]

Thread overview: 45+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-05-13 13:17 [PATCH v14 00/44] arm64: Support for Arm CCA in KVM Steven Price
2026-05-13 13:17 ` [PATCH v14 01/44] kvm: arm64: Include kvm_emulate.h in kvm/arm_psci.h Steven Price
2026-05-13 13:17 ` [PATCH v14 02/44] kvm: arm64: Avoid including linux/kvm_host.h in kvm_pgtable.h Steven Price
2026-05-13 13:17 ` [PATCH v14 03/44] arm64: RME: Handle Granule Protection Faults (GPFs) Steven Price
2026-05-13 13:17 ` [PATCH v14 04/44] arm64: RMI: Add SMC definitions for calling the RMM Steven Price
2026-05-13 13:17 ` [PATCH v14 05/44] arm64: RMI: Add wrappers for RMI calls Steven Price
2026-05-13 13:17 ` [PATCH v14 06/44] arm64: RMI: Check for RMI support at init Steven Price
2026-05-13 13:17 ` [PATCH v14 07/44] arm64: RMI: Configure the RMM with the host's page size Steven Price
2026-05-13 13:17 ` [PATCH v14 08/44] arm64: RMI: Ensure that the RMM has GPT entries for memory Steven Price
2026-05-13 13:17 ` [PATCH v14 09/44] arm64: RMI: Provide functions to delegate/undelegate ranges of memory Steven Price
2026-05-13 13:17 ` Steven Price [this message]
2026-05-13 13:17 ` [PATCH v14 11/44] arm64: RMI: Check for RMI support at KVM init Steven Price
2026-05-13 13:17 ` [PATCH v14 12/44] arm64: RMI: Check for LPA2 support Steven Price
2026-05-13 13:17 ` [PATCH v14 13/44] arm64: RMI: Define the user ABI Steven Price
2026-05-13 13:17 ` [PATCH v14 14/44] arm64: RMI: Basic infrastructure for creating a realm Steven Price
2026-05-13 13:17 ` [PATCH v14 15/44] kvm: arm64: Don't expose unsupported capabilities for realm guests Steven Price
2026-05-13 13:17 ` [PATCH v14 16/44] KVM: arm64: Allow passing machine type in KVM creation Steven Price
2026-05-13 13:17 ` [PATCH v14 17/44] arm64: RMI: RTT tear down Steven Price
2026-05-13 13:17 ` [PATCH v14 18/44] arm64: RMI: Activate realm on first VCPU run Steven Price
2026-05-13 13:17 ` [PATCH v14 19/44] arm64: RMI: Allocate/free RECs to match vCPUs Steven Price
2026-05-13 13:17 ` [PATCH v14 20/44] arm64: RMI: Support for the VGIC in realms Steven Price
2026-05-13 13:17 ` [PATCH v14 21/44] KVM: arm64: Support timers in realm RECs Steven Price
2026-05-13 13:17 ` [PATCH v14 22/44] arm64: RMI: Handle realm enter/exit Steven Price
2026-05-13 13:17 ` [PATCH v14 23/44] arm64: RMI: Handle RMI_EXIT_RIPAS_CHANGE Steven Price
2026-05-13 13:17 ` [PATCH v14 24/44] KVM: arm64: Handle realm MMIO emulation Steven Price
2026-05-13 13:17 ` [PATCH v14 25/44] KVM: arm64: Expose support for private memory Steven Price
2026-05-13 13:17 ` [PATCH v14 26/44] arm64: RMI: Allow populating initial contents Steven Price
2026-05-13 13:17 ` [PATCH v14 27/44] arm64: RMI: Set RIPAS of initial memslots Steven Price
2026-05-13 13:17 ` [PATCH v14 28/44] arm64: RMI: Create the realm descriptor Steven Price
2026-05-13 13:17 ` [PATCH v14 29/44] arm64: RMI: Runtime faulting of memory Steven Price
2026-05-13 13:17 ` [PATCH v14 30/44] KVM: arm64: Handle realm VCPU load Steven Price
2026-05-13 13:17 ` [PATCH v14 31/44] KVM: arm64: Validate register access for a Realm VM Steven Price
2026-05-13 13:17 ` [PATCH v14 32/44] KVM: arm64: Handle Realm PSCI requests Steven Price
2026-05-13 13:17 ` [PATCH v14 33/44] KVM: arm64: WARN on injected undef exceptions Steven Price
2026-05-13 13:17 ` [PATCH v14 34/44] arm64: RMI: allow userspace to inject aborts Steven Price
2026-05-13 13:17 ` [PATCH v14 35/44] arm64: RMI: support RSI_HOST_CALL Steven Price
2026-05-13 13:17 ` [PATCH v14 36/44] arm64: RMI: Allow checking SVE on VM instance Steven Price
2026-05-13 13:17 ` [PATCH v14 37/44] arm64: RMI: Prevent Device mappings for Realms Steven Price
2026-05-13 13:17 ` [PATCH v14 38/44] arm64: RMI: Propagate number of breakpoints and watchpoints to userspace Steven Price
2026-05-13 13:17 ` [PATCH v14 39/44] arm64: RMI: Set breakpoint parameters through SET_ONE_REG Steven Price
2026-05-13 13:17 ` [PATCH v14 40/44] arm64: RMI: Propagate max SVE vector length from RMM Steven Price
2026-05-13 13:17 ` [PATCH v14 41/44] arm64: RMI: Configure max SVE vector length for a Realm Steven Price
2026-05-13 13:17 ` [PATCH v14 42/44] arm64: RMI: Provide register list for unfinalized RMI RECs Steven Price
2026-05-13 13:17 ` [PATCH v14 43/44] arm64: RMI: Provide accurate register list Steven Price
2026-05-13 13:17 ` [PATCH v14 44/44] arm64: RMI: Enable realms to be created Steven Price

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260513131757.116630-11-steven.price@arm.com \
    --to=steven.price@arm.com \
    --cc=Lorenzo.Pieralisi2@arm.com \
    --cc=WeiLin.Chang@arm.com \
    --cc=alexandru.elisei@arm.com \
    --cc=alpergun@google.com \
    --cc=aneesh.kumar@kernel.org \
    --cc=catalin.marinas@arm.com \
    --cc=christoffer.dall@arm.com \
    --cc=fj0570is@fujitsu.com \
    --cc=gankulkarni@os.amperecomputing.com \
    --cc=gshan@redhat.com \
    --cc=james.morse@arm.com \
    --cc=joey.gouly@arm.com \
    --cc=kvm@vger.kernel.org \
    --cc=kvmarm@lists.linux.dev \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-coco@lists.linux.dev \
    --cc=linux-kernel@vger.kernel.org \
    --cc=maz@kernel.org \
    --cc=oliver.upton@linux.dev \
    --cc=sdonthineni@nvidia.com \
    --cc=suzuki.poulose@arm.com \
    --cc=tabba@google.com \
    --cc=vannapurve@google.com \
    --cc=will@kernel.org \
    --cc=yuzenghui@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox