Sync pc before memory hooks

This ensures correct PC for any arch
This commit is contained in:
mio
2025-04-12 22:08:29 +08:00
parent 3a7bde03b8
commit 4a13bc7cb8
2 changed files with 35 additions and 19 deletions

View File

@@ -1484,6 +1484,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
HOOK_FOREACH_VAR_DECLARE;
struct uc_struct *uc = env->uc;
MemoryRegion *mr;
bool synced = false;
/* Handle CPU specific unaligned behaviour */
if (addr & ((1 << a_bits) - 1)) {
@@ -1513,13 +1514,17 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
// if there is already an unhandled eror, skip callbacks.
if (uc->invalid_error == UC_ERR_OK) {
if (code_read) {
// code fetching
// code fetching
error_code = UC_ERR_FETCH_UNMAPPED;
HOOK_FOREACH(uc, hook, UC_HOOK_MEM_FETCH_UNMAPPED) {
if (hook->to_delete)
continue;
if (!HOOK_BOUND_CHECK(hook, paddr))
continue;
if (!synced && retaddr) {
cpu_restore_state(uc->cpu, retaddr, false);
synced = true;
}
JIT_CALLBACK_GUARD_VAR(handled,
((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_FETCH_UNMAPPED, paddr, size, 0, hook->user_data));
if (handled)
@@ -1537,6 +1542,10 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
continue;
if (!HOOK_BOUND_CHECK(hook, paddr))
continue;
if (!synced && retaddr) {
cpu_restore_state(uc->cpu, retaddr, false);
synced = true;
}
JIT_CALLBACK_GUARD_VAR(handled,
((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_READ_UNMAPPED, paddr, size, 0, hook->user_data));
if (handled)
@@ -1601,6 +1610,10 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
continue;
if (!HOOK_BOUND_CHECK(hook, paddr))
continue;
if (!synced && retaddr) {
cpu_restore_state(uc->cpu, retaddr, false);
synced = true;
}
JIT_CALLBACK_GUARD(((uc_cb_hookmem_t)hook->callback)(env->uc, UC_MEM_READ, paddr, size, 0, hook->user_data));
// the last callback may already asked to stop emulation
if (uc->stop_request)
@@ -1629,6 +1642,10 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
continue;
if (!HOOK_BOUND_CHECK(hook, paddr))
continue;
if (!synced && retaddr) {
cpu_restore_state(uc->cpu, retaddr, false);
synced = true;
}
JIT_CALLBACK_GUARD_VAR(handled,
((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_READ_PROT, paddr, size, 0, hook->user_data));
if (handled)
@@ -1675,6 +1692,10 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
continue;
if (!HOOK_BOUND_CHECK(hook, paddr))
continue;
if (!synced && retaddr) {
cpu_restore_state(uc->cpu, retaddr, false);
synced = true;
}
JIT_CALLBACK_GUARD_VAR(handled,
((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_FETCH_PROT, paddr, size, 0, hook->user_data));
if (handled)
@@ -2098,6 +2119,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
struct hook *hook;
bool handled;
MemoryRegion *mr;
bool synced = false;
/* Handle CPU specific unaligned behaviour */
if (addr & ((1 << a_bits) - 1)) {
@@ -2128,6 +2150,10 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
continue;
if (!HOOK_BOUND_CHECK(hook, paddr))
continue;
if (!synced && retaddr) {
cpu_restore_state(uc->cpu, retaddr, false);
synced = true;
}
JIT_CALLBACK_GUARD(((uc_cb_hookmem_t)hook->callback)(uc, UC_MEM_WRITE, paddr, size, val, hook->user_data));
// the last callback may already asked to stop emulation
if (uc->stop_request)
@@ -2143,6 +2169,10 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
continue;
if (!HOOK_BOUND_CHECK(hook, paddr))
continue;
if (!synced && retaddr) {
cpu_restore_state(uc->cpu, retaddr, false);
synced = true;
}
JIT_CALLBACK_GUARD_VAR(handled,
((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_WRITE_UNMAPPED, paddr, size, val, hook->user_data));
if (handled)
@@ -2192,6 +2222,10 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
continue;
if (!HOOK_BOUND_CHECK(hook, paddr))
continue;
if (!synced && retaddr) {
cpu_restore_state(uc->cpu, retaddr, false);
synced = true;
}
JIT_CALLBACK_GUARD_VAR(handled,
((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_WRITE_PROT, paddr, size, val, hook->user_data));
if (handled)

View File

@@ -545,9 +545,6 @@ static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
{
TCGContext *tcg_ctx = s->uc->tcg_ctx;
if (HOOK_EXISTS(s->uc, UC_HOOK_MEM_READ))
gen_sync_pc(tcg_ctx, s->prev_pc); // Unicorn: sync EIP
tcg_gen_qemu_ld_tl(tcg_ctx, t0, a0, s->mem_index, idx | MO_LE);
}
@@ -555,9 +552,6 @@ static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
{
TCGContext *tcg_ctx = s->uc->tcg_ctx;
if (HOOK_EXISTS(s->uc, UC_HOOK_MEM_WRITE))
gen_sync_pc(tcg_ctx, s->prev_pc); // Unicorn: sync EIP
tcg_gen_qemu_st_tl(tcg_ctx, t0, a0, s->mem_index, idx | MO_LE);
}
@@ -2914,9 +2908,6 @@ static inline void gen_ldq_env_A0(DisasContext *s, int offset)
{
TCGContext *tcg_ctx = s->uc->tcg_ctx;
if (HOOK_EXISTS(s->uc, UC_HOOK_MEM_READ))
gen_sync_pc(tcg_ctx, s->prev_pc); // Unicorn: sync EIP
tcg_gen_qemu_ld_i64(tcg_ctx, s->tmp1_i64, s->A0, s->mem_index, MO_LEQ);
tcg_gen_st_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env, offset);
}
@@ -2925,9 +2916,6 @@ static inline void gen_stq_env_A0(DisasContext *s, int offset)
{
TCGContext *tcg_ctx = s->uc->tcg_ctx;
if (HOOK_EXISTS(s->uc, UC_HOOK_MEM_WRITE))
gen_sync_pc(tcg_ctx, s->prev_pc); // Unicorn: sync EIP
tcg_gen_ld_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env, offset);
tcg_gen_qemu_st_i64(tcg_ctx, s->tmp1_i64, s->A0, s->mem_index, MO_LEQ);
}
@@ -2937,9 +2925,6 @@ static inline void gen_ldo_env_A0(DisasContext *s, int offset)
TCGContext *tcg_ctx = s->uc->tcg_ctx;
int mem_index = s->mem_index;
if (HOOK_EXISTS(s->uc, UC_HOOK_MEM_READ))
gen_sync_pc(tcg_ctx, s->prev_pc); // Unicorn: sync EIP
tcg_gen_qemu_ld_i64(tcg_ctx, s->tmp1_i64, s->A0, mem_index, MO_LEQ);
tcg_gen_st_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0)));
tcg_gen_addi_tl(tcg_ctx, s->tmp0, s->A0, 8);
@@ -2952,9 +2937,6 @@ static inline void gen_sto_env_A0(DisasContext *s, int offset)
TCGContext *tcg_ctx = s->uc->tcg_ctx;
int mem_index = s->mem_index;
if (HOOK_EXISTS(s->uc, UC_HOOK_MEM_WRITE))
gen_sync_pc(tcg_ctx, s->prev_pc); // Unicorn: sync EIP
tcg_gen_ld_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0)));
tcg_gen_qemu_st_i64(tcg_ctx, s->tmp1_i64, s->A0, mem_index, MO_LEQ);
tcg_gen_addi_tl(tcg_ctx, s->tmp0, s->A0, 8);