This code should now build the x86_x64-softmmu part 2.

This commit is contained in:
xorstream
2017-01-19 22:50:28 +11:00
parent 37f9a248ea
commit 1aeaf5c40d
174 changed files with 2418 additions and 1414 deletions

View File

@@ -19,6 +19,7 @@
/* Modified for Unicorn Engine by Nguyen Anh Quynh, 2015 */
#ifdef _WIN32
#include <winsock2.h>
#include <windows.h>
#else
#include <sys/types.h>
@@ -28,7 +29,7 @@
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <inttypes.h>
#include "platform.h"
#include "config.h"
@@ -43,7 +44,6 @@
#if __FreeBSD_version >= 700104
#define HAVE_KINFO_GETVMMAP
#define sigqueue sigqueue_freebsd /* avoid redefinition */
#include <sys/time.h>
#include <sys/proc.h>
#include <machine/profile.h>
#define _KERNEL
@@ -140,7 +140,7 @@ static void tb_clean_internal(struct uc_struct *uc, int i, void** lp)
if (i == 0 || lp == 0) {
return;
}
tb_clean_internal(uc, i-1, (*lp) + ((0 >> (i * V_L2_BITS)) & (V_L2_SIZE - 1)));
tb_clean_internal(uc, i-1, (void*)(((char*)*lp) + ((0 >> (i * V_L2_BITS)) & (V_L2_SIZE - 1))));
if (lp && *lp) {
g_free(*lp);
}
@@ -563,8 +563,7 @@ static inline void *split_cross_256mb(struct uc_struct *uc, void *buf1, size_t s
#endif
#ifdef USE_STATIC_CODE_GEN_BUFFER
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
__attribute__((aligned(CODE_GEN_ALIGN)));
static uint8_t QEMU_ALIGN(CODE_GEN_ALIGN, static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]);
void free_code_gen_buffer(struct uc_struct *uc)
{
@@ -719,7 +718,7 @@ static inline void code_gen_alloc(struct uc_struct *uc, size_t tb_size)
from TB's to the prologue are going to be in range. It also means
that we don't need to mark (additional) portions of the data segment
as executable. */
tcg_ctx->code_gen_prologue = tcg_ctx->code_gen_buffer +
tcg_ctx->code_gen_prologue = (char*)tcg_ctx->code_gen_buffer +
tcg_ctx->code_gen_buffer_size - 1024;
tcg_ctx->code_gen_buffer_size -= 1024;
@@ -765,7 +764,7 @@ static TranslationBlock *tb_alloc(struct uc_struct *uc, target_ulong pc)
TCGContext *tcg_ctx = uc->tcg_ctx;
if (tcg_ctx->tb_ctx.nb_tbs >= tcg_ctx->code_gen_max_blocks ||
(tcg_ctx->code_gen_ptr - tcg_ctx->code_gen_buffer) >=
(size_t)(((char*)tcg_ctx->code_gen_ptr - (char*)tcg_ctx->code_gen_buffer)) >=
tcg_ctx->code_gen_buffer_max_size) {
return NULL;
}
@@ -849,7 +848,7 @@ void tb_flush(CPUArchState *env1)
((unsigned long)(tcg_ctx->code_gen_ptr - tcg_ctx->code_gen_buffer)) /
tcg_ctx->tb_ctx.nb_tbs : 0);
#endif
if ((unsigned long)(tcg_ctx->code_gen_ptr - tcg_ctx->code_gen_buffer)
if ((unsigned long)((char*)tcg_ctx->code_gen_ptr - (char*)tcg_ctx->code_gen_buffer)
> tcg_ctx->code_gen_buffer_size) {
cpu_abort(cpu, "Internal error: code buffer overflow\n");
}
@@ -972,7 +971,7 @@ static inline void tb_jmp_remove(TranslationBlock *tb, int n)
another TB */
static inline void tb_reset_jump(TranslationBlock *tb, int n)
{
tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
tb_set_jmp_target(tb, n, (uintptr_t)((char*)tb->tc_ptr + tb->tb_next_offset[n]));
}
/* invalidate one TB */
@@ -1521,8 +1520,8 @@ void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
|| memory_region_is_romd(mr))) {
return;
}
ram_addr = (memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK)
+ addr;
ram_addr = (ram_addr_t)((memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK)
+ addr);
tb_invalidate_phys_page_range(as->uc, ram_addr, ram_addr + 1, 0);
}
#endif /* TARGET_HAS_ICE && !defined(CONFIG_USER_ONLY) */
@@ -1604,7 +1603,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
tb_phys_invalidate(cpu->uc, tb, -1);
/* FIXME: In theory this could raise an exception. In practice
we have already translated the block once so it's probably ok. */
tb_gen_code(cpu, pc, cs_base, flags, cflags);
tb_gen_code(cpu, pc, cs_base, (int)flags, cflags);
/* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
the first in the TB) then we end up generating a whole new TB and
repeating the fault, which is horribly inefficient.