Sync pc before memory hooks

This ensures correct PC for any arch
This commit is contained in:
mio
2025-04-12 22:08:29 +08:00
parent 3a7bde03b8
commit 4a13bc7cb8
2 changed files with 35 additions and 19 deletions

View File

@@ -1484,6 +1484,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
HOOK_FOREACH_VAR_DECLARE;
struct uc_struct *uc = env->uc;
MemoryRegion *mr;
bool synced = false;
/* Handle CPU specific unaligned behaviour */
if (addr & ((1 << a_bits) - 1)) {
@@ -1513,13 +1514,17 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
// if there is already an unhandled eror, skip callbacks.
if (uc->invalid_error == UC_ERR_OK) {
if (code_read) {
// code fetching
// code fetching
error_code = UC_ERR_FETCH_UNMAPPED;
HOOK_FOREACH(uc, hook, UC_HOOK_MEM_FETCH_UNMAPPED) {
if (hook->to_delete)
continue;
if (!HOOK_BOUND_CHECK(hook, paddr))
continue;
if (!synced && retaddr) {
cpu_restore_state(uc->cpu, retaddr, false);
synced = true;
}
JIT_CALLBACK_GUARD_VAR(handled,
((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_FETCH_UNMAPPED, paddr, size, 0, hook->user_data));
if (handled)
@@ -1537,6 +1542,10 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
continue;
if (!HOOK_BOUND_CHECK(hook, paddr))
continue;
if (!synced && retaddr) {
cpu_restore_state(uc->cpu, retaddr, false);
synced = true;
}
JIT_CALLBACK_GUARD_VAR(handled,
((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_READ_UNMAPPED, paddr, size, 0, hook->user_data));
if (handled)
@@ -1601,6 +1610,10 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
continue;
if (!HOOK_BOUND_CHECK(hook, paddr))
continue;
if (!synced && retaddr) {
cpu_restore_state(uc->cpu, retaddr, false);
synced = true;
}
JIT_CALLBACK_GUARD(((uc_cb_hookmem_t)hook->callback)(env->uc, UC_MEM_READ, paddr, size, 0, hook->user_data));
// the last callback may already asked to stop emulation
if (uc->stop_request)
@@ -1629,6 +1642,10 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
continue;
if (!HOOK_BOUND_CHECK(hook, paddr))
continue;
if (!synced && retaddr) {
cpu_restore_state(uc->cpu, retaddr, false);
synced = true;
}
JIT_CALLBACK_GUARD_VAR(handled,
((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_READ_PROT, paddr, size, 0, hook->user_data));
if (handled)
@@ -1675,6 +1692,10 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
continue;
if (!HOOK_BOUND_CHECK(hook, paddr))
continue;
if (!synced && retaddr) {
cpu_restore_state(uc->cpu, retaddr, false);
synced = true;
}
JIT_CALLBACK_GUARD_VAR(handled,
((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_FETCH_PROT, paddr, size, 0, hook->user_data));
if (handled)
@@ -2098,6 +2119,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
struct hook *hook;
bool handled;
MemoryRegion *mr;
bool synced = false;
/* Handle CPU specific unaligned behaviour */
if (addr & ((1 << a_bits) - 1)) {
@@ -2128,6 +2150,10 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
continue;
if (!HOOK_BOUND_CHECK(hook, paddr))
continue;
if (!synced && retaddr) {
cpu_restore_state(uc->cpu, retaddr, false);
synced = true;
}
JIT_CALLBACK_GUARD(((uc_cb_hookmem_t)hook->callback)(uc, UC_MEM_WRITE, paddr, size, val, hook->user_data));
// the last callback may already asked to stop emulation
if (uc->stop_request)
@@ -2143,6 +2169,10 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
continue;
if (!HOOK_BOUND_CHECK(hook, paddr))
continue;
if (!synced && retaddr) {
cpu_restore_state(uc->cpu, retaddr, false);
synced = true;
}
JIT_CALLBACK_GUARD_VAR(handled,
((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_WRITE_UNMAPPED, paddr, size, val, hook->user_data));
if (handled)
@@ -2192,6 +2222,10 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
continue;
if (!HOOK_BOUND_CHECK(hook, paddr))
continue;
if (!synced && retaddr) {
cpu_restore_state(uc->cpu, retaddr, false);
synced = true;
}
JIT_CALLBACK_GUARD_VAR(handled,
((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_WRITE_PROT, paddr, size, val, hook->user_data));
if (handled)