diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c
index 678afe5c8e..26c5f86a1f 100644
--- a/target/arm/hvf/hvf.c
+++ b/target/arm/hvf/hvf.c
@@ -301,6 +301,8 @@ void hvf_arm_init_debug(void)
 #define TMR_CTL_IMASK   (1 << 1)
 #define TMR_CTL_ISTATUS (1 << 2)
 
+static void hvf_wfi(CPUState *cpu);
+
 static uint32_t chosen_ipa_bit_size;
 
 typedef struct HVFVTimer {
@@ -2027,17 +2029,96 @@ static uint64_t hvf_vtimer_val_raw(void)
     return mach_absolute_time() - hvf_state->vtimer_offset;
 }
 
-static int hvf_wfi(CPUState *cpu)
+static uint64_t hvf_vtimer_val(void)
 {
+    if (!runstate_is_running()) {
+        /* VM is paused, the vtimer value is in vtimer.vtimer_val */
+        return vtimer.vtimer_val;
+    }
+
+    return hvf_vtimer_val_raw();
+}
+
+static void hvf_wfi(CPUState *cpu)
+{
+    ARMCPU *arm_cpu = ARM_CPU(cpu);
+    struct timespec ts;
+    hv_return_t r;
+    uint64_t ctl;
+    uint64_t cval;
+    int64_t ticks_to_sleep;
+    uint64_t seconds;
+    uint64_t nanos;
+    uint32_t cntfrq;
+    sigset_t mask, oldmask, pselect_mask;
+
     if (cpu_has_work(cpu)) {
-        /*
-         * Don't bother to go into our "low power state" if
-         * we would just wake up immediately.
+        return;
+    }
+
+    r = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl);
+    assert_hvf_ok(r);
+
+    if (!(ctl & 1) || (ctl & 2)) {
+        /* 
+         * Timer disabled or masked. 
+         * Limit sleep to 5 seconds to prevent permanent deadlock.
          */
-        return 0;
+        ts = (struct timespec) { 5, 0 };
+    } else {
+        /* Timer enabled. Calculate exact sleep time. */
+        r = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_CNTV_CVAL_EL0, &cval);
+        assert_hvf_ok(r);
+
+        ticks_to_sleep = cval - hvf_vtimer_val();
+        if (ticks_to_sleep < 0) {
+            return;
+        }
+
+        cntfrq = gt_cntfrq_period_ns(arm_cpu);
+        seconds = muldiv64(ticks_to_sleep, cntfrq, NANOSECONDS_PER_SECOND);
+        ticks_to_sleep -= muldiv64(seconds, NANOSECONDS_PER_SECOND, cntfrq);
+        nanos = ticks_to_sleep * cntfrq;
+
+        if (!seconds && nanos < 2000000) {
+            return;
+        }
+
+// Limit timed waits to 5 sec because well just because.
+        if (seconds >= 5) {
+            seconds = 5;
+            nanos = 0;
+        }
+
+        ts = (struct timespec) { seconds, nanos };
     }
 
-    return EXCP_HLT;
+    /* 
+     * Race Condition Fix:
+     * Block SIG_IPI before checking thread_kicked to ensure no wake-up
+     * signal is missed between the check and entering pselect.
+     */
+    sigemptyset(&mask);
+    sigaddset(&mask, SIG_IPI);
+    pthread_sigmask(SIG_BLOCK, &mask, &oldmask);
+    
+    if (qatomic_read(&cpu->thread_kicked)) {
+        pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
+        return;
+    }
+    
+    qatomic_set(&cpu->thread_kicked, false);
+    
+    /* Prepare pselect mask: same as oldmask but with SIG_IPI unblocked */
+    pselect_mask = oldmask;
+    sigdelset(&pselect_mask, SIG_IPI);
+    
+    /* Sleep with BQL unlocked to allow other VCPUs to progress. */
+    bql_unlock();
+    pselect(0, NULL, NULL, NULL, &ts, &pselect_mask);
+    bql_lock();
+    
+    pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
 }
 
 /* Must be called by the owning thread */
@@ -2227,7 +2308,7 @@ static int hvf_handle_exception(CPUState *cpu, hv_vcpu_exit_exception_t *excp)
     case EC_WFX_TRAP:
         advance_pc = true;
         if (!(syndrome & WFX_IS_WFE)) {
-            ret = hvf_wfi(cpu);
+            hvf_wfi(cpu);
         }
         break;
     case EC_AA64_HVC:
