

---

 b/arch/x86/include/asm/tdx.h |   31 +++++++++++++++----------------
 1 file changed, 15 insertions(+), 16 deletions(-)

diff -puN arch/x86/include/asm/tdx.h~tdx-func arch/x86/include/asm/tdx.h
--- a/arch/x86/include/asm/tdx.h~tdx-func	2025-09-08 08:43:58.391194504 -0700
+++ b/arch/x86/include/asm/tdx.h	2025-09-08 09:03:46.039834165 -0700
@@ -108,23 +108,19 @@ void tdx_init(void);
 
 typedef u64 (*sc_func_t)(u64 fn, struct tdx_module_args *args);
 
-static __always_inline u64 __seamcall_dirty_cache(sc_func_t func, u64 fn,
-						  struct tdx_module_args *args)
+/*
+ * SEAMCALLs generate dirty cachelines of TDX private memory.
+ * Mark cache state incoherent so that the cache can be flushed
+ * during kexec.
+ *
+ * Do this before actually making a SEAMCALL. This avoids a
+ * window where the flush is needed, but unmarked.
+ */
+static __always_inline void tdx_mark_caches_dirty(void)
 {
 	lockdep_assert_preemption_disabled();
 
-	/*
-	 * SEAMCALLs are made to the TDX module and can generate dirty
-	 * cachelines of TDX private memory.  Mark cache state incoherent
-	 * so that the cache can be flushed during kexec.
-	 *
-	 * This needs to be done before actually making the SEAMCALL,
-	 * because kexec-ing CPU could send NMI to stop remote CPUs,
-	 * in which case even disabling IRQ won't help here.
-	 */
 	this_cpu_write(cache_state_incoherent, true);
-
-	return func(fn, args);
 }
 
 static __always_inline u64 sc_retry(sc_func_t func, u64 fn,
@@ -133,12 +129,15 @@ static __always_inline u64 sc_retry(sc_f
 	int retry = RDRAND_RETRY_LOOPS;
 	u64 ret;
 
+	preempt_disable();
+	tdx_mark_caches_dirty();
+
 	do {
-		preempt_disable();
-		ret = __seamcall_dirty_cache(func, fn, args);
-		preempt_enable();
+		ret = func(fn, args);
 	} while (ret == TDX_RND_NO_ENTROPY && --retry);
 
+	preempt_enable();
+
 	return ret;
 }
 
_
