Merge rhelmot's fix
This commit is contained in:
@@ -595,6 +595,9 @@ int cpu_exec(struct uc_struct *uc, CPUState *cpu)
|
|||||||
}
|
}
|
||||||
|
|
||||||
tb = tb_find(cpu, last_tb, tb_exit, cflags);
|
tb = tb_find(cpu, last_tb, tb_exit, cflags);
|
||||||
|
if (unlikely(cpu->exit_request)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit);
|
cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit);
|
||||||
/* Try to align the host and virtual clocks
|
/* Try to align the host and virtual clocks
|
||||||
if the guest is in advance */
|
if the guest is in advance */
|
||||||
|
|||||||
@@ -1451,7 +1451,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
|
|||||||
continue;
|
continue;
|
||||||
if (!HOOK_BOUND_CHECK(hook, addr))
|
if (!HOOK_BOUND_CHECK(hook, addr))
|
||||||
continue;
|
continue;
|
||||||
if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_FETCH_UNMAPPED, addr, size - uc->size_recur_mem, 0, hook->user_data)))
|
if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_FETCH_UNMAPPED, addr, size, 0, hook->user_data)))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
// the last callback may already asked to stop emulation
|
// the last callback may already asked to stop emulation
|
||||||
@@ -1466,7 +1466,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
|
|||||||
continue;
|
continue;
|
||||||
if (!HOOK_BOUND_CHECK(hook, addr))
|
if (!HOOK_BOUND_CHECK(hook, addr))
|
||||||
continue;
|
continue;
|
||||||
if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_READ_UNMAPPED, addr, size - uc->size_recur_mem, 0, hook->user_data)))
|
if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_READ_UNMAPPED, addr, size, 0, hook->user_data)))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
// the last callback may already asked to stop emulation
|
// the last callback may already asked to stop emulation
|
||||||
@@ -1518,7 +1518,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
|
|||||||
continue;
|
continue;
|
||||||
if (!HOOK_BOUND_CHECK(hook, addr))
|
if (!HOOK_BOUND_CHECK(hook, addr))
|
||||||
continue;
|
continue;
|
||||||
if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_READ_PROT, addr, size - uc->size_recur_mem, 0, hook->user_data)))
|
if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_READ_PROT, addr, size, 0, hook->user_data)))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
// the last callback may already asked to stop emulation
|
// the last callback may already asked to stop emulation
|
||||||
@@ -1546,7 +1546,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
|
|||||||
continue;
|
continue;
|
||||||
if (!HOOK_BOUND_CHECK(hook, addr))
|
if (!HOOK_BOUND_CHECK(hook, addr))
|
||||||
continue;
|
continue;
|
||||||
if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_FETCH_PROT, addr, size - uc->size_recur_mem, 0, hook->user_data)))
|
if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_FETCH_PROT, addr, size, 0, hook->user_data)))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
// the last callback may already asked to stop emulation
|
// the last callback may already asked to stop emulation
|
||||||
@@ -1635,11 +1635,15 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
|
|||||||
target_ulong addr1, addr2;
|
target_ulong addr1, addr2;
|
||||||
uint64_t r1, r2;
|
uint64_t r1, r2;
|
||||||
unsigned shift;
|
unsigned shift;
|
||||||
|
int old_size;
|
||||||
do_unaligned_access:
|
do_unaligned_access:
|
||||||
addr1 = addr & ~((target_ulong)size - 1);
|
addr1 = addr & ~((target_ulong)size - 1);
|
||||||
addr2 = addr1 + size;
|
addr2 = addr1 + size;
|
||||||
|
old_size = uc->size_recur_mem;
|
||||||
|
uc->size_recur_mem = size;
|
||||||
r1 = full_load(env, addr1, oi, retaddr);
|
r1 = full_load(env, addr1, oi, retaddr);
|
||||||
r2 = full_load(env, addr2, oi, retaddr);
|
r2 = full_load(env, addr2, oi, retaddr);
|
||||||
|
uc->size_recur_mem = old_size;
|
||||||
shift = (addr & (size - 1)) * 8;
|
shift = (addr & (size - 1)) * 8;
|
||||||
|
|
||||||
if (memop_big_endian(op)) {
|
if (memop_big_endian(op)) {
|
||||||
@@ -2139,6 +2143,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
|
|||||||
CPUTLBEntry *entry2;
|
CPUTLBEntry *entry2;
|
||||||
target_ulong page2, tlb_addr2;
|
target_ulong page2, tlb_addr2;
|
||||||
size_t size2;
|
size_t size2;
|
||||||
|
int old_size;
|
||||||
|
|
||||||
do_unaligned_access:
|
do_unaligned_access:
|
||||||
/*
|
/*
|
||||||
@@ -2181,6 +2186,8 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
|
|||||||
* This loop must go in the forward direction to avoid issues
|
* This loop must go in the forward direction to avoid issues
|
||||||
* with self-modifying code in Windows 64-bit.
|
* with self-modifying code in Windows 64-bit.
|
||||||
*/
|
*/
|
||||||
|
old_size = uc->size_recur_mem;
|
||||||
|
uc->size_recur_mem = size;
|
||||||
for (i = 0; i < size; ++i) {
|
for (i = 0; i < size; ++i) {
|
||||||
uint8_t val8;
|
uint8_t val8;
|
||||||
if (memop_big_endian(op)) {
|
if (memop_big_endian(op)) {
|
||||||
@@ -2192,6 +2199,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
|
|||||||
}
|
}
|
||||||
helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr);
|
helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr);
|
||||||
}
|
}
|
||||||
|
uc->size_recur_mem = old_size;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1584,9 +1584,8 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
|||||||
phys_pc = get_page_addr_code(env, pc);
|
phys_pc = get_page_addr_code(env, pc);
|
||||||
|
|
||||||
if (phys_pc == -1) {
|
if (phys_pc == -1) {
|
||||||
/* Generate a temporary TB with 1 insn in it */
|
/* Generate a temporary TB; do not cache */
|
||||||
cflags &= ~CF_COUNT_MASK;
|
cflags |= CF_NOCACHE;
|
||||||
cflags |= CF_NOCACHE | 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
cflags &= ~CF_CLUSTER_MASK;
|
cflags &= ~CF_CLUSTER_MASK;
|
||||||
|
|||||||
@@ -1106,6 +1106,7 @@ static void test_x86_correct_address_in_long_jump_hook(void)
|
|||||||
OK(uc_close(uc));
|
OK(uc_close(uc));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static void test_x86_invalid_vex_l(void)
|
static void test_x86_invalid_vex_l(void)
|
||||||
{
|
{
|
||||||
uc_engine *uc;
|
uc_engine *uc;
|
||||||
@@ -1122,7 +1123,98 @@ static void test_x86_invalid_vex_l(void)
|
|||||||
|
|
||||||
uc_assert_err(UC_ERR_INSN_INVALID,
|
uc_assert_err(UC_ERR_INSN_INVALID,
|
||||||
uc_emu_start(uc, 0, sizeof(code) / sizeof(code[0]), 0, 0));
|
uc_emu_start(uc, 0, sizeof(code) / sizeof(code[0]), 0, 0));
|
||||||
|
OK(uc_close(uc));
|
||||||
|
}
|
||||||
|
|
||||||
|
struct writelog_t {
|
||||||
|
uint32_t addr, size;
|
||||||
|
};
|
||||||
|
|
||||||
|
static void test_x86_unaligned_access_callback(uc_engine *uc, uc_mem_type type,
|
||||||
|
uint64_t address, int size, int64_t value, void *user_data)
|
||||||
|
{
|
||||||
|
TEST_CHECK(size != 0);
|
||||||
|
struct writelog_t *write_log = (struct writelog_t *)user_data;
|
||||||
|
|
||||||
|
for (int i = 0; i < 10; i++) {
|
||||||
|
if (write_log[i].size == 0) {
|
||||||
|
write_log[i].addr = (uint32_t) address;
|
||||||
|
write_log[i].size = (uint32_t) size;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
TEST_ASSERT(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void test_x86_unaligned_access(void)
|
||||||
|
{
|
||||||
|
uc_engine *uc;
|
||||||
|
uc_hook hook;
|
||||||
|
// mov dword ptr [0x200001], eax; mov eax, dword ptr [0x200001]
|
||||||
|
char code[] = "\xa3\x01\x00\x20\x00\xa1\x01\x00\x20\x00";
|
||||||
|
uint32_t r_eax = 0x41424344;
|
||||||
|
struct writelog_t write_log[10];
|
||||||
|
struct writelog_t read_log[10];
|
||||||
|
memset(write_log, 0, sizeof(write_log));
|
||||||
|
memset(read_log, 0, sizeof(read_log));
|
||||||
|
|
||||||
|
uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_32, code, sizeof(code) - 1);
|
||||||
|
OK(uc_mem_map(uc, 0x200000, 0x1000, UC_PROT_ALL));
|
||||||
|
OK(uc_hook_add(uc, &hook, UC_HOOK_MEM_WRITE, test_x86_unaligned_access_callback,
|
||||||
|
write_log, 1, 0));
|
||||||
|
OK(uc_hook_add(uc, &hook, UC_HOOK_MEM_READ, test_x86_unaligned_access_callback,
|
||||||
|
read_log, 1, 0));
|
||||||
|
|
||||||
|
OK(uc_reg_write(uc, UC_X86_REG_EAX, &r_eax));
|
||||||
|
OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0));
|
||||||
|
|
||||||
|
TEST_CHECK(write_log[0].addr == 0x200001);
|
||||||
|
TEST_CHECK(write_log[0].size == 4);
|
||||||
|
TEST_CHECK(write_log[1].size == 0);
|
||||||
|
|
||||||
|
TEST_CHECK(read_log[0].addr == 0x200001);
|
||||||
|
TEST_CHECK(read_log[0].size == 4);
|
||||||
|
TEST_CHECK(read_log[1].size == 0);
|
||||||
|
|
||||||
|
char b;
|
||||||
|
OK(uc_mem_read(uc, 0x200001, &b, 1));
|
||||||
|
TEST_CHECK(b == 0x44);
|
||||||
|
OK(uc_mem_read(uc, 0x200002, &b, 1));
|
||||||
|
TEST_CHECK(b == 0x43);
|
||||||
|
OK(uc_mem_read(uc, 0x200003, &b, 1));
|
||||||
|
TEST_CHECK(b == 0x42);
|
||||||
|
OK(uc_mem_read(uc, 0x200004, &b, 1));
|
||||||
|
TEST_CHECK(b == 0x41);
|
||||||
|
|
||||||
|
OK(uc_close(uc));
|
||||||
|
}
|
||||||
|
|
||||||
|
static void test_x86_lazy_mapping_mem_callback(uc_engine *uc, uc_mem_type type,
|
||||||
|
uint64_t address, int size, int64_t value, void *user_data)
|
||||||
|
{
|
||||||
|
OK(uc_mem_map(uc, 0x1000, 0x1000, UC_PROT_ALL));
|
||||||
|
OK(uc_mem_write(uc, 0x1000, "\x90\x90", 2)); // nop; nop
|
||||||
|
}
|
||||||
|
|
||||||
|
static void test_x86_lazy_mapping_block_callback(uc_engine *uc,
|
||||||
|
uint64_t address, uint32_t size, void *user_data)
|
||||||
|
{
|
||||||
|
int *block_count = (int*)user_data;
|
||||||
|
(*block_count)++;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void test_x86_lazy_mapping(void)
|
||||||
|
{
|
||||||
|
uc_engine *uc;
|
||||||
|
uc_hook mem_hook, block_hook;
|
||||||
|
int block_count = 0;
|
||||||
|
|
||||||
|
OK(uc_open(UC_ARCH_X86, UC_MODE_32, &uc));
|
||||||
|
OK(uc_hook_add(uc, &mem_hook, UC_HOOK_MEM_FETCH_UNMAPPED, test_x86_lazy_mapping_mem_callback, NULL, 1, 0));
|
||||||
|
OK(uc_hook_add(uc, &block_hook, UC_HOOK_BLOCK, test_x86_lazy_mapping_block_callback, &block_count, 1, 0));
|
||||||
|
|
||||||
|
OK(uc_emu_start(uc, 0x1000, 0x1002, 0, 0));
|
||||||
|
TEST_CHECK(block_count == 1);
|
||||||
OK(uc_close(uc));
|
OK(uc_close(uc));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1164,4 +1256,6 @@ TEST_LIST = {
|
|||||||
{"test_x86_correct_address_in_long_jump_hook",
|
{"test_x86_correct_address_in_long_jump_hook",
|
||||||
test_x86_correct_address_in_long_jump_hook},
|
test_x86_correct_address_in_long_jump_hook},
|
||||||
{"test_x86_invalid_vex_l", test_x86_invalid_vex_l},
|
{"test_x86_invalid_vex_l", test_x86_invalid_vex_l},
|
||||||
|
{"test_x86_unaligned_access", test_x86_unaligned_access},
|
||||||
|
{"test_x86_lazy_mapping", test_x86_lazy_mapping},
|
||||||
{NULL, NULL}};
|
{NULL, NULL}};
|
||||||
|
|||||||
Reference in New Issue
Block a user