linux-coco.lists.linux.dev archive mirror
 help / color / mirror / Atom feed
From: Baoquan He <bhe@redhat.com>
To: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>, Borislav Petkov <bp@alien8.de>,
	Dave Hansen <dave.hansen@linux.intel.com>,
	x86@kernel.org, "Rafael J. Wysocki" <rafael@kernel.org>,
	Peter Zijlstra <peterz@infradead.org>,
	Adrian Hunter <adrian.hunter@intel.com>,
	Kuppuswamy Sathyanarayanan
	<sathyanarayanan.kuppuswamy@linux.intel.com>,
	Elena Reshetova <elena.reshetova@intel.com>,
	Jun Nakajima <jun.nakajima@intel.com>,
	Rick Edgecombe <rick.p.edgecombe@intel.com>,
	Tom Lendacky <thomas.lendacky@amd.com>,
	kexec@lists.infradead.org, linux-coco@lists.linux.dev,
	linux-kernel@vger.kernel.org
Subject: Re: [PATCH 10/13] x86/tdx: Convert shared memory back to private on kexec
Date: Sun, 8 Oct 2023 16:35:27 +0800	[thread overview]
Message-ID: <ZSJpzw2e7KgtJAZy@MiWiFi-R3L-srv> (raw)
In-Reply-To: <20231005131402.14611-11-kirill.shutemov@linux.intel.com>

On 10/05/23 at 04:13pm, Kirill A. Shutemov wrote:
> TDX guests allocate shared buffers to perform I/O. It is done by
> allocating pages normally from the buddy allocator and converting them
> to shared with set_memory_decrypted().
> 
> The target kernel has no idea what memory is converted this way. It only
      ~~~~~~~~~~~~~
> sees E820_TYPE_RAM.

I finally realized it means the 2nd kernel of kexec rebooting. Maybe we
can call it 2nd kernel always, it works for both kexec and kdump
jumping. 

> 
> Accessing shared memory via private mapping is fatal. It leads to
> unrecoverable TD exit.
> 
> On TD shutdown (also covers kexec), walk direct mapping and convert all
> shared memory back to private. It makes all RAM private again and target
> kernel may use it normally.
> 
> Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
> ---
>  arch/x86/Kconfig          |   1 +
>  arch/x86/coco/tdx/kexec.c |   0
>  arch/x86/coco/tdx/tdx.c   | 137 +++++++++++++++++++++++++++++++++++++-
>  3 files changed, 136 insertions(+), 2 deletions(-)
>  create mode 100644 arch/x86/coco/tdx/kexec.c
> 
> diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
> index 7368d254d01f..b5acf9fb4c70 100644
> --- a/arch/x86/Kconfig
> +++ b/arch/x86/Kconfig
> @@ -884,6 +884,7 @@ config INTEL_TDX_GUEST
>  	select X86_MEM_ENCRYPT
>  	select X86_MCE
>  	select UNACCEPTED_MEMORY
> +	select EMERGENCY_VIRT_CALLBACK
>  	help
>  	  Support running as a guest under Intel TDX.  Without this support,
>  	  the guest kernel can not boot or run under TDX.
> diff --git a/arch/x86/coco/tdx/kexec.c b/arch/x86/coco/tdx/kexec.c
> new file mode 100644
> index 000000000000..e69de29bb2d1
> diff --git a/arch/x86/coco/tdx/tdx.c b/arch/x86/coco/tdx/tdx.c
> index 56e152126f20..ac0745303983 100644
> --- a/arch/x86/coco/tdx/tdx.c
> +++ b/arch/x86/coco/tdx/tdx.c
> @@ -6,6 +6,7 @@
>  
>  #include <linux/cpufeature.h>
>  #include <linux/debugfs.h>
> +#include <linux/delay.h>
>  #include <linux/export.h>
>  #include <linux/io.h>
>  #include <asm/coco.h>
> @@ -14,6 +15,8 @@
>  #include <asm/insn.h>
>  #include <asm/insn-eval.h>
>  #include <asm/pgtable.h>
> +#include <asm/reboot.h>
> +#include <asm/set_memory.h>
>  
>  /* MMIO direction */
>  #define EPT_READ	0
> @@ -40,6 +43,9 @@
>  
>  static atomic_long_t nr_shared;
>  
> +static atomic_t conversions_in_progress;
> +static bool conversion_allowed = true;
> +
>  static inline bool pte_decrypted(pte_t pte)
>  {
>  	return cc_mkdec(pte_val(pte)) == pte_val(pte);
> @@ -704,6 +710,14 @@ static bool tdx_tlb_flush_required(bool private)
>  
>  static bool tdx_cache_flush_required(void)
>  {
> +	/*
> +	 * Avoid issuing CLFLUSH on set_memory_decrypted() if conversions
> +	 * stopped. Otherwise it can race with unshare_all_memory() and trigger
> +	 * implicit conversion to shared.
> +	 */
> +	if (!conversion_allowed)
> +		return false;
> +
>  	/*
>  	 * AMD SME/SEV can avoid cache flushing if HW enforces cache coherence.
>  	 * TDX doesn't have such capability.
> @@ -787,12 +801,25 @@ static bool tdx_enc_status_changed(unsigned long vaddr, int numpages, bool enc)
>  static int tdx_enc_status_change_prepare(unsigned long vaddr, int numpages,
>  					 bool enc)
>  {
> +	atomic_inc(&conversions_in_progress);
> +
> +	/*
> +	 * Check after bumping conversions_in_progress to serialize
> +	 * against tdx_shutdown().
> +	 */
> +	if (!conversion_allowed) {
> +		atomic_dec(&conversions_in_progress);
> +		return -EBUSY;
> +	}
> +
>  	/*
>  	 * Only handle shared->private conversion here.
>  	 * See the comment in tdx_early_init().
>  	 */
> -	if (enc && !tdx_enc_status_changed(vaddr, numpages, enc))
> +	if (enc && !tdx_enc_status_changed(vaddr, numpages, enc)) {
> +		atomic_dec(&conversions_in_progress);
>  		return -EIO;
> +	}
>  
>  	return 0;
>  }
> @@ -804,17 +831,115 @@ static int tdx_enc_status_change_finish(unsigned long vaddr, int numpages,
>  	 * Only handle private->shared conversion here.
>  	 * See the comment in tdx_early_init().
>  	 */
> -	if (!enc && !tdx_enc_status_changed(vaddr, numpages, enc))
> +	if (!enc && !tdx_enc_status_changed(vaddr, numpages, enc)) {
> +		atomic_dec(&conversions_in_progress);
>  		return -EIO;
> +	}
>  
>  	if (enc)
>  		atomic_long_sub(numpages, &nr_shared);
>  	else
>  		atomic_long_add(numpages, &nr_shared);
>  
> +	atomic_dec(&conversions_in_progress);
> +
>  	return 0;
>  }
>  
> +static void unshare_all_memory(bool unmap)
> +{
> +	unsigned long addr, end;
> +	long found = 0, shared;
> +
> +	/*
> +	 * Walk direct mapping and convert all shared memory back to private,
> +	 */
> +
> +	addr = PAGE_OFFSET;
> +	end  = PAGE_OFFSET + get_max_mapped();
> +
> +	while (addr < end) {
> +		unsigned long size;
> +		unsigned int level;
> +		pte_t *pte;
> +
> +		pte = lookup_address(addr, &level);
> +		size = page_level_size(level);
> +
> +		if (pte && pte_decrypted(*pte)) {
> +			int pages = size / PAGE_SIZE;
> +
> +			/*
> +			 * Touching memory with shared bit set triggers implicit
> +			 * conversion to shared.
> +			 *
> +			 * Make sure nobody touches the shared range from
> +			 * now on.
> +			 *
> +			 * Bypass unmapping for crash scenario. Unmapping
> +			 * requires sleepable context, but in crash case kernel
> +			 * hits the code path with interrupts disabled.
> +			 * It shouldn't be a problem as all secondary CPUs are
> +			 * down and kernel runs with interrupts disabled, so
> +			 * there is no room for race.
> +			 */
> +			if (unmap)
> +				set_memory_np(addr, pages);
> +
> +			if (!tdx_enc_status_changed(addr, pages, true)) {
> +				pr_err("Failed to unshare range %#lx-%#lx\n",
> +				       addr, addr + size);
> +			}
> +
> +			found += pages;
> +		}
> +
> +		addr += size;
> +	}
> +
> +	shared = atomic_long_read(&nr_shared);
> +	if (shared != found) {
> +		pr_err("shared page accounting is off\n");
> +		pr_err("nr_shared = %ld, nr_found = %ld\n", shared, found);
> +	}
> +}
> +
> +static void tdx_shutdown(void)
> +{
> +	unsigned long timeout;
> +
> +	/*
> +	 * Stop new private<->shared conversions and wait for in-flight
> +	 * conversions to complete.
> +	 *
> +	 * Do not wait more than 30 seconds.
> +	 */
> +	timeout = 30 * USEC_PER_SEC;
> +	conversion_allowed = false;
> +	while (atomic_read(&conversions_in_progress) && timeout--)
> +		udelay(1);
> +
> +	if (!timeout)
> +		pr_warn("Failed to finish shared<->private conversions\n");
> +
> +	unshare_all_memory(true);
> +
> +	native_machine_shutdown();
> +}
> +
> +static void tdx_crash_shutdown(void)
> +{
> +	/*
> +	 * Crash can race with private<->shared conversion.
> +	 *
> +	 * There's no clean way out: report and proceed.
> +	 */
> +	if (atomic_read(&conversions_in_progress))
> +		pr_warn("Failed to finish shared<->private conversions\n");
> +
> +	unshare_all_memory(false);
> +}
> +
>  void __init tdx_early_init(void)
>  {
>  	struct tdx_module_args args = {
> @@ -882,6 +1007,14 @@ void __init tdx_early_init(void)
>  	 */
>  	x86_cpuinit.parallel_bringup = false;
>  
> +	machine_ops.shutdown = tdx_shutdown;
> +
> +	/*
> +	 * KVM overrides machine_ops.crash_shutdown, use emergency
> +	 * virt callback instead.
> +	 */
> +	cpu_emergency_register_virt_callback(tdx_crash_shutdown);
> +
>  	pr_info("Guest detected\n");
>  }
>  
> -- 
> 2.41.0
> 
> 
> _______________________________________________
> kexec mailing list
> kexec@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/kexec
> 


  parent reply	other threads:[~2023-10-08  8:35 UTC|newest]

Thread overview: 55+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-10-05 13:13 [PATCH 00/13] x86/tdx: Add kexec support Kirill A. Shutemov
2023-10-05 13:13 ` [PATCH 01/13] x86/acpi: Extract ACPI MADT wakeup code into a separate file Kirill A. Shutemov
2023-10-06 10:22   ` Huang, Kai
2023-10-06 11:59     ` kirill.shutemov
2023-10-06 18:33   ` Kuppuswamy Sathyanarayanan
2023-10-09 13:32     ` Kirill A. Shutemov
2023-10-05 13:13 ` [PATCH 02/13] kernel/cpu: Add support for declaring CPU hotplug not supported Kirill A. Shutemov
2023-10-10 13:35   ` Kuppuswamy Sathyanarayanan
2023-10-11 13:07     ` Kirill A. Shutemov
2023-10-11 13:08   ` Thomas Gleixner
2023-10-05 13:13 ` [PATCH 03/13] cpu/hotplug, x86/acpi: Disable CPU hotplug for ACPI MADT wakeup Kirill A. Shutemov
2023-10-10 10:24   ` Huang, Kai
2023-10-20 11:58     ` Huang, Kai
2023-10-20 12:42       ` kirill.shutemov
2023-10-10 13:39   ` Kuppuswamy Sathyanarayanan
2023-10-11 13:09   ` Thomas Gleixner
2023-10-05 13:13 ` [PATCH 04/13] x86/kvm: Do not try to disable kvmclock if it was not enabled Kirill A. Shutemov
2023-10-06 14:36   ` Sean Christopherson
2023-10-06 14:50     ` Kirill A. Shutemov
2023-10-10 13:53   ` Kuppuswamy Sathyanarayanan
2023-10-11 13:11     ` Kirill A. Shutemov
2023-10-05 13:13 ` [PATCH 05/13] x86/kexec: Keep CR4.MCE set during kexec for TDX guest Kirill A. Shutemov
2023-10-09 12:30   ` Huang, Kai
2023-10-09 13:32     ` kirill.shutemov
2023-10-05 13:13 ` [PATCH 06/13] x86/mm: Make x86_platform.guest.enc_status_change_*() return errno Kirill A. Shutemov
2023-10-05 13:13 ` [PATCH 07/13] x86/mm: Return correct level from lookup_address() if pte is none Kirill A. Shutemov
2023-10-05 13:13 ` [PATCH 08/13] KVM: x86: Add config option to gate emergency virt callback support Kirill A. Shutemov
2023-10-05 13:13 ` [PATCH 09/13] x86/tdx: Account shared memory Kirill A. Shutemov
2023-10-10 10:05   ` Huang, Kai
2023-10-11 13:14     ` kirill.shutemov
2023-10-05 13:13 ` [PATCH 10/13] x86/tdx: Convert shared memory back to private on kexec Kirill A. Shutemov
2023-10-05 18:41   ` Kalra, Ashish
2023-10-05 21:28     ` Kirill A. Shutemov
2023-10-05 22:01       ` Kalra, Ashish
2023-10-05 22:28         ` Kirill A. Shutemov
2023-10-06 19:24           ` Kalra, Ashish
2023-10-20  9:21             ` Kirill A. Shutemov
2023-10-20  9:39               ` Kirill A. Shutemov
2023-10-06 14:58   ` Sean Christopherson
2023-10-06 15:11     ` Kirill A. Shutemov
2023-10-06 22:15       ` Kalra, Ashish
2023-10-08  8:35   ` Baoquan He [this message]
2023-10-09 13:35     ` Kirill A. Shutemov
2023-10-05 13:14 ` [PATCH 11/13] x86/mm: Make e820_end_ram_pfn() cover E820_TYPE_ACPI ranges Kirill A. Shutemov
2023-10-05 13:14 ` [PATCH 12/13] x86/acpi: Do not attempt to bring up secondary CPUs in kexec case Kirill A. Shutemov
2023-10-20  3:29   ` Huang, Kai
2023-10-20  9:29     ` kirill.shutemov
2023-10-05 13:14 ` [PATCH 13/13] x86/acpi: Add support for CPU offlining for ACPI MADT wakeup method Kirill A. Shutemov
2023-10-20  9:49   ` Huang, Kai
2023-10-20 10:42     ` kirill.shutemov
2023-10-20 11:21   ` Huang, Kai
2023-10-20 12:34     ` kirill.shutemov
2023-10-08 23:49 ` [PATCH 00/13] x86/tdx: Add kexec support Baoquan He
2023-10-09 13:36   ` Kirill A. Shutemov
2023-10-09 14:13     ` Baoquan He

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ZSJpzw2e7KgtJAZy@MiWiFi-R3L-srv \
    --to=bhe@redhat.com \
    --cc=adrian.hunter@intel.com \
    --cc=bp@alien8.de \
    --cc=dave.hansen@linux.intel.com \
    --cc=elena.reshetova@intel.com \
    --cc=jun.nakajima@intel.com \
    --cc=kexec@lists.infradead.org \
    --cc=kirill.shutemov@linux.intel.com \
    --cc=linux-coco@lists.linux.dev \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=peterz@infradead.org \
    --cc=rafael@kernel.org \
    --cc=rick.p.edgecombe@intel.com \
    --cc=sathyanarayanan.kuppuswamy@linux.intel.com \
    --cc=tglx@linutronix.de \
    --cc=thomas.lendacky@amd.com \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).