Merge branch 'master' of github.com:unicorn-engine/unicorn into fpip_update
This commit is contained in:
@@ -117,13 +117,6 @@ int cpu_exec(struct uc_struct *uc, CPUArchState *env) // qq
|
||||
(uc->arch == UC_ARCH_M68K && cpu->exception_index == 0x2f) /* M68K's EXCP_TRAP15 */
|
||||
) {
|
||||
cpu->halted = 1;
|
||||
//cpu->exception_index = EXCP_HLT;
|
||||
//no_shutdown = 0;
|
||||
//printf(">>> calling shutdown-request...\n");
|
||||
//printf(">>> ** current EIP = %x\n", X86_CPU(cpu)->env.eip);
|
||||
//qemu_system_shutdown_request();
|
||||
//pause_all_vcpus();
|
||||
//cpu_loop_exit(cpu);
|
||||
ret = EXCP_HLT;
|
||||
break;
|
||||
}
|
||||
|
||||
0
qemu/header_gen.py
Executable file → Normal file
0
qemu/header_gen.py
Executable file → Normal file
@@ -170,6 +170,8 @@ struct MemoryRegion {
|
||||
MemoryRegionIoeventfd *ioeventfds;
|
||||
NotifierList iommu_notify;
|
||||
struct uc_struct *uc;
|
||||
uint32_t perms; //all perms, partially redundant with readonly
|
||||
uint64_t end;
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -315,12 +317,14 @@ void memory_region_init_io(struct uc_struct *uc, MemoryRegion *mr,
|
||||
* @owner: the object that tracks the region's reference count
|
||||
* @name: the name of the region.
|
||||
* @size: size of the region.
|
||||
* @perms: permissions on the region (UC_PROT_READ, UC_PROT_WRITE, UC_PROT_EXEC).
|
||||
* @errp: pointer to Error*, to store an error if it happens.
|
||||
*/
|
||||
void memory_region_init_ram(struct uc_struct *uc, MemoryRegion *mr,
|
||||
struct Object *owner,
|
||||
const char *name,
|
||||
uint64_t size,
|
||||
uint32_t perms,
|
||||
Error **errp);
|
||||
|
||||
/**
|
||||
@@ -934,7 +938,7 @@ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
|
||||
|
||||
void memory_register_types(struct uc_struct *uc);
|
||||
|
||||
int memory_map(struct uc_struct *uc, ram_addr_t begin, size_t size);
|
||||
MemoryRegion *memory_map(struct uc_struct *uc, ram_addr_t begin, size_t size, uint32_t perms);
|
||||
int memory_free(struct uc_struct *uc);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -31,27 +31,28 @@
|
||||
|
||||
|
||||
// Unicorn engine
|
||||
int memory_map(struct uc_struct *uc, ram_addr_t begin, size_t size)
|
||||
MemoryRegion *memory_map(struct uc_struct *uc, ram_addr_t begin, size_t size, uint32_t perms)
|
||||
{
|
||||
uc->ram = g_new(MemoryRegion, 1);
|
||||
|
||||
memory_region_init_ram(uc, uc->ram, NULL, "pc.ram", size, &error_abort);
|
||||
memory_region_init_ram(uc, uc->ram, NULL, "pc.ram", size, perms, &error_abort);
|
||||
|
||||
memory_region_add_subregion(get_system_memory(uc), begin, uc->ram);
|
||||
|
||||
if (uc->current_cpu)
|
||||
tlb_flush(uc->current_cpu, 1);
|
||||
|
||||
return 0;
|
||||
return uc->ram;
|
||||
}
|
||||
|
||||
int memory_free(struct uc_struct *uc)
|
||||
{
|
||||
int i;
|
||||
get_system_memory(uc)->enabled = false;
|
||||
if (uc->ram) {
|
||||
uc->ram->enabled = false;
|
||||
memory_region_del_subregion(get_system_memory(uc), uc->ram);
|
||||
g_free(uc->ram);
|
||||
for (i = 0; i < uc->mapped_block_count; i++) {
|
||||
uc->mapped_blocks[i]->enabled = false;
|
||||
memory_region_del_subregion(get_system_memory(uc), uc->mapped_blocks[i]);
|
||||
g_free(uc->mapped_blocks[i]);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -1151,10 +1152,15 @@ void memory_region_init_ram(struct uc_struct *uc, MemoryRegion *mr,
|
||||
Object *owner,
|
||||
const char *name,
|
||||
uint64_t size,
|
||||
uint32_t perms,
|
||||
Error **errp)
|
||||
{
|
||||
memory_region_init(uc, mr, owner, name, size);
|
||||
mr->ram = true;
|
||||
if (!(perms & UC_PROT_WRITE)) {
|
||||
mr->readonly = true;
|
||||
}
|
||||
mr->perms = perms;
|
||||
mr->terminates = true;
|
||||
mr->destructor = memory_region_destructor_ram;
|
||||
mr->ram_addr = qemu_ram_alloc(size, mr, errp);
|
||||
@@ -1309,6 +1315,12 @@ void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
|
||||
if (mr->readonly != readonly) {
|
||||
memory_region_transaction_begin(mr->uc);
|
||||
mr->readonly = readonly;
|
||||
if (readonly) {
|
||||
mr->perms &= ~UC_PROT_WRITE;
|
||||
}
|
||||
else {
|
||||
mr->perms |= UC_PROT_WRITE;
|
||||
}
|
||||
mr->uc->memory_region_update_pending |= mr->enabled;
|
||||
memory_region_transaction_commit(mr->uc);
|
||||
}
|
||||
@@ -1528,6 +1540,7 @@ static void memory_region_add_subregion_common(MemoryRegion *mr,
|
||||
assert(!subregion->container);
|
||||
subregion->container = mr;
|
||||
subregion->addr = offset;
|
||||
subregion->end = offset + int128_get64(subregion->size);
|
||||
memory_region_update_container_subregions(subregion);
|
||||
}
|
||||
|
||||
|
||||
0
qemu/scripts/make_device_config.sh
Normal file → Executable file
0
qemu/scripts/make_device_config.sh
Normal file → Executable file
131
qemu/softmmu_template.h
Executable file → Normal file
131
qemu/softmmu_template.h
Executable file → Normal file
@@ -178,6 +178,9 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
|
||||
uintptr_t haddr;
|
||||
DATA_TYPE res;
|
||||
|
||||
struct uc_struct *uc = env->uc;
|
||||
MemoryRegion *mr = memory_mapping(uc, addr);
|
||||
|
||||
// Unicorn: callback on memory read
|
||||
if (env->uc->hook_mem_read && READ_ACCESS_TYPE == MMU_DATA_LOAD) {
|
||||
struct hook_struct *trace = hook_find((uch)env->uc, UC_MEM_READ, addr);
|
||||
@@ -188,7 +191,7 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
|
||||
}
|
||||
|
||||
// Unicorn: callback on invalid memory
|
||||
if (env->uc->hook_mem_idx && !memory_mapping(env->uc, addr)) {
|
||||
if (env->uc->hook_mem_idx && mr == NULL) {
|
||||
if (!((uc_cb_eventmem_t)env->uc->hook_callbacks[env->uc->hook_mem_idx].callback)(
|
||||
(uch)env->uc, UC_MEM_READ, addr, DATA_SIZE, 0,
|
||||
env->uc->hook_callbacks[env->uc->hook_mem_idx].user_data)) {
|
||||
@@ -203,6 +206,26 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
|
||||
}
|
||||
}
|
||||
|
||||
// Unicorn: callback on read only memory
|
||||
if (mr != NULL && !(mr->perms & UC_PROT_READ)) { //non-readable
|
||||
bool result = false;
|
||||
if (uc->hook_mem_idx) {
|
||||
result = ((uc_cb_eventmem_t)uc->hook_callbacks[uc->hook_mem_idx].callback)(
|
||||
(uch)uc, UC_MEM_READ_NR, addr, DATA_SIZE, 0,
|
||||
uc->hook_callbacks[uc->hook_mem_idx].user_data);
|
||||
}
|
||||
if (result) {
|
||||
env->invalid_error = UC_ERR_OK;
|
||||
}
|
||||
else {
|
||||
env->invalid_addr = addr;
|
||||
env->invalid_error = UC_ERR_MEM_READ_NR;
|
||||
// printf("***** Invalid memory read (non-readable) at " TARGET_FMT_lx "\n", addr);
|
||||
cpu_exit(uc->current_cpu);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* Adjust the given return address. */
|
||||
retaddr -= GETPC_ADJ;
|
||||
|
||||
@@ -300,6 +323,9 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
|
||||
uintptr_t haddr;
|
||||
DATA_TYPE res;
|
||||
|
||||
struct uc_struct *uc = env->uc;
|
||||
MemoryRegion *mr = memory_mapping(uc, addr);
|
||||
|
||||
// Unicorn: callback on memory read
|
||||
if (env->uc->hook_mem_read && READ_ACCESS_TYPE == MMU_DATA_LOAD) {
|
||||
struct hook_struct *trace = hook_find((uch)env->uc, UC_MEM_READ, addr);
|
||||
@@ -310,7 +336,7 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
|
||||
}
|
||||
|
||||
// Unicorn: callback on invalid memory
|
||||
if (env->uc->hook_mem_idx && !memory_mapping(env->uc, addr)) {
|
||||
if (env->uc->hook_mem_idx && mr == NULL) {
|
||||
if (!((uc_cb_eventmem_t)env->uc->hook_callbacks[env->uc->hook_mem_idx].callback)(
|
||||
(uch)env->uc, UC_MEM_READ, addr, DATA_SIZE, 0,
|
||||
env->uc->hook_callbacks[env->uc->hook_mem_idx].user_data)) {
|
||||
@@ -325,6 +351,26 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
|
||||
}
|
||||
}
|
||||
|
||||
// Unicorn: callback on read only memory
|
||||
if (mr != NULL && !(mr->perms & UC_PROT_READ)) { //non-readable
|
||||
bool result = false;
|
||||
if (uc->hook_mem_idx) {
|
||||
result = ((uc_cb_eventmem_t)uc->hook_callbacks[uc->hook_mem_idx].callback)(
|
||||
(uch)uc, UC_MEM_READ_NR, addr, DATA_SIZE, 0,
|
||||
uc->hook_callbacks[uc->hook_mem_idx].user_data);
|
||||
}
|
||||
if (result) {
|
||||
env->invalid_error = UC_ERR_OK;
|
||||
}
|
||||
else {
|
||||
env->invalid_addr = addr;
|
||||
env->invalid_error = UC_ERR_MEM_READ_NR;
|
||||
// printf("***** Invalid memory read (non-readable) at " TARGET_FMT_lx "\n", addr);
|
||||
cpu_exit(uc->current_cpu);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* Adjust the given return address. */
|
||||
retaddr -= GETPC_ADJ;
|
||||
|
||||
@@ -460,31 +506,53 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
||||
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
|
||||
uintptr_t haddr;
|
||||
|
||||
struct uc_struct *uc = env->uc;
|
||||
MemoryRegion *mr = memory_mapping(uc, addr);
|
||||
|
||||
// Unicorn: callback on memory write
|
||||
if (env->uc->hook_mem_write) {
|
||||
struct hook_struct *trace = hook_find((uch)env->uc, UC_MEM_WRITE, addr);
|
||||
if (uc->hook_mem_write) {
|
||||
struct hook_struct *trace = hook_find((uch)uc, UC_MEM_WRITE, addr);
|
||||
if (trace) {
|
||||
((uc_cb_hookmem_t)trace->callback)((uch)env->uc, UC_MEM_WRITE,
|
||||
((uc_cb_hookmem_t)trace->callback)((uch)uc, UC_MEM_WRITE,
|
||||
(uint64_t)addr, (int)DATA_SIZE, (int64_t)val, trace->user_data);
|
||||
}
|
||||
}
|
||||
|
||||
// Unicorn: callback on invalid memory
|
||||
if (env->uc->hook_mem_idx && !memory_mapping(env->uc, addr)) {
|
||||
if (!((uc_cb_eventmem_t)env->uc->hook_callbacks[env->uc->hook_mem_idx].callback)(
|
||||
(uch)env->uc, UC_MEM_WRITE, addr, DATA_SIZE, (int64_t)val,
|
||||
env->uc->hook_callbacks[env->uc->hook_mem_idx].user_data)) {
|
||||
if (uc->hook_mem_idx && mr == NULL) {
|
||||
if (!((uc_cb_eventmem_t)uc->hook_callbacks[uc->hook_mem_idx].callback)(
|
||||
(uch)uc, UC_MEM_WRITE, addr, DATA_SIZE, (int64_t)val,
|
||||
uc->hook_callbacks[uc->hook_mem_idx].user_data)) {
|
||||
// save error & quit
|
||||
env->invalid_addr = addr;
|
||||
env->invalid_error = UC_ERR_MEM_WRITE;
|
||||
// printf("***** Invalid memory write at " TARGET_FMT_lx "\n", addr);
|
||||
cpu_exit(env->uc->current_cpu);
|
||||
cpu_exit(uc->current_cpu);
|
||||
return;
|
||||
} else {
|
||||
env->invalid_error = UC_ERR_OK;
|
||||
}
|
||||
}
|
||||
|
||||
// Unicorn: callback on read only memory
|
||||
if (mr != NULL && !(mr->perms & UC_PROT_WRITE)) { //read only memory
|
||||
bool result = false;
|
||||
if (uc->hook_mem_idx) {
|
||||
result = ((uc_cb_eventmem_t)uc->hook_callbacks[uc->hook_mem_idx].callback)(
|
||||
(uch)uc, UC_MEM_WRITE_NW, addr, DATA_SIZE, (int64_t)val,
|
||||
uc->hook_callbacks[uc->hook_mem_idx].user_data);
|
||||
}
|
||||
if (result) {
|
||||
env->invalid_error = UC_ERR_OK;
|
||||
}
|
||||
else {
|
||||
env->invalid_addr = addr;
|
||||
env->invalid_error = UC_ERR_MEM_WRITE_NW;
|
||||
// printf("***** Invalid memory write (ro) at " TARGET_FMT_lx "\n", addr);
|
||||
cpu_exit(uc->current_cpu);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/* Adjust the given return address. */
|
||||
retaddr -= GETPC_ADJ;
|
||||
@@ -546,6 +614,8 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
||||
Undo that for the recursion. */
|
||||
glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
|
||||
mmu_idx, retaddr + GETPC_ADJ);
|
||||
if (env->invalid_error != UC_ERR_OK)
|
||||
break;
|
||||
}
|
||||
return;
|
||||
}
|
||||
@@ -574,31 +644,54 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
||||
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
|
||||
uintptr_t haddr;
|
||||
|
||||
struct uc_struct *uc = env->uc;
|
||||
MemoryRegion *mr = memory_mapping(uc, addr);
|
||||
|
||||
// Unicorn: callback on memory write
|
||||
if (env->uc->hook_mem_write) {
|
||||
struct hook_struct *trace = hook_find((uch)env->uc, UC_MEM_WRITE, addr);
|
||||
if (uc->hook_mem_write) {
|
||||
struct hook_struct *trace = hook_find((uch)uc, UC_MEM_WRITE, addr);
|
||||
if (trace) {
|
||||
((uc_cb_hookmem_t)trace->callback)((uch)env->uc, UC_MEM_WRITE,
|
||||
((uc_cb_hookmem_t)trace->callback)((uch)uc, UC_MEM_WRITE,
|
||||
(uint64_t)addr, (int)DATA_SIZE, (int64_t)val, trace->user_data);
|
||||
}
|
||||
}
|
||||
|
||||
// Unicorn: callback on invalid memory
|
||||
if (env->uc->hook_mem_idx && !memory_mapping(env->uc, addr)) {
|
||||
if (!((uc_cb_eventmem_t)env->uc->hook_callbacks[env->uc->hook_mem_idx].callback)(
|
||||
(uch)env->uc, UC_MEM_WRITE, addr, DATA_SIZE, (int64_t)val,
|
||||
env->uc->hook_callbacks[env->uc->hook_mem_idx].user_data)) {
|
||||
if (uc->hook_mem_idx && mr == NULL) {
|
||||
if (!((uc_cb_eventmem_t)uc->hook_callbacks[uc->hook_mem_idx].callback)(
|
||||
(uch)uc, UC_MEM_WRITE, addr, DATA_SIZE, (int64_t)val,
|
||||
uc->hook_callbacks[uc->hook_mem_idx].user_data)) {
|
||||
// save error & quit
|
||||
env->invalid_addr = addr;
|
||||
env->invalid_error = UC_ERR_MEM_WRITE;
|
||||
// printf("***** Invalid memory write at " TARGET_FMT_lx "\n", addr);
|
||||
cpu_exit(env->uc->current_cpu);
|
||||
cpu_exit(uc->current_cpu);
|
||||
return;
|
||||
} else {
|
||||
env->invalid_error = UC_ERR_OK;
|
||||
}
|
||||
}
|
||||
|
||||
// Unicorn: callback on read only memory
|
||||
if (mr != NULL && !(mr->perms & UC_PROT_WRITE)) { //read only memory
|
||||
bool result = false;
|
||||
if (uc->hook_mem_idx) {
|
||||
result = ((uc_cb_eventmem_t)uc->hook_callbacks[uc->hook_mem_idx].callback)(
|
||||
(uch)uc, UC_MEM_WRITE_NW, addr, DATA_SIZE, (int64_t)val,
|
||||
uc->hook_callbacks[uc->hook_mem_idx].user_data);
|
||||
}
|
||||
if (result) {
|
||||
env->invalid_error = UC_ERR_OK;
|
||||
}
|
||||
else {
|
||||
env->invalid_addr = addr;
|
||||
env->invalid_error = UC_ERR_MEM_WRITE_NW;
|
||||
// printf("***** Invalid memory write (ro) at " TARGET_FMT_lx "\n", addr);
|
||||
cpu_exit(uc->current_cpu);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/* Adjust the given return address. */
|
||||
retaddr -= GETPC_ADJ;
|
||||
|
||||
@@ -659,6 +752,8 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
||||
Undo that for the recursion. */
|
||||
glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
|
||||
mmu_idx, retaddr + GETPC_ADJ);
|
||||
if (env->invalid_error != UC_ERR_OK)
|
||||
break;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -8268,7 +8268,7 @@ static inline void gen_intermediate_code_internal(uint8_t *gen_opc_cc_op,
|
||||
uint64_t flags;
|
||||
target_ulong pc_start;
|
||||
target_ulong cs_base;
|
||||
int num_insns;
|
||||
int num_insns = 0;
|
||||
int max_insns;
|
||||
bool block_full = false;
|
||||
|
||||
@@ -8353,12 +8353,18 @@ static inline void gen_intermediate_code_internal(uint8_t *gen_opc_cc_op,
|
||||
// done with initializing TCG variables
|
||||
env->uc->init_tcg = true;
|
||||
|
||||
// early check to see if the address of this block is the until address
|
||||
if (tb->pc == env->uc->addr_end) {
|
||||
gen_tb_start(tcg_ctx);
|
||||
gen_interrupt(dc, 0x99, tb->pc - tb->cs_base, tb->pc - tb->cs_base);
|
||||
goto done_generating;
|
||||
}
|
||||
|
||||
gen_opc_end = tcg_ctx->gen_opc_buf + OPC_MAX_SIZE;
|
||||
|
||||
dc->is_jmp = DISAS_NEXT;
|
||||
pc_ptr = pc_start;
|
||||
lj = -1;
|
||||
num_insns = 0;
|
||||
max_insns = tb->cflags & CF_COUNT_MASK;
|
||||
if (max_insns == 0)
|
||||
max_insns = CF_COUNT_MASK;
|
||||
|
||||
@@ -73,6 +73,7 @@ static inline void uc_common_init(struct uc_struct* uc)
|
||||
uc->pause_all_vcpus = pause_all_vcpus;
|
||||
uc->vm_start = vm_start;
|
||||
uc->memory_map = memory_map;
|
||||
uc->readonly_mem = memory_region_set_readonly;
|
||||
|
||||
if (!uc->release)
|
||||
uc->release = release_common;
|
||||
|
||||
Reference in New Issue
Block a user