diff --git a/qemu/target-i386/cpu.h b/qemu/target-i386/cpu.h index a4f400cf..5d55410b 100644 --- a/qemu/target-i386/cpu.h +++ b/qemu/target-i386/cpu.h @@ -1137,6 +1137,10 @@ void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector); void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32); void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32); +/* the binding language can not catch the exceptions. + check the arguments, return error instead of raise exceptions. */ +int uc_check_cpu_x86_load_seg(CPUX86State *env, int seg_reg, int sel); + /* you can call this signal handler from your SIGBUS and SIGSEGV signal handlers to inform the virtual CPU of exceptions. non zero is returned if the signal was handled by the virtual CPU. */ diff --git a/qemu/target-i386/seg_helper.c b/qemu/target-i386/seg_helper.c index c3e4f938..a86a89e4 100644 --- a/qemu/target-i386/seg_helper.c +++ b/qemu/target-i386/seg_helper.c @@ -1539,6 +1539,84 @@ void helper_ltr(CPUX86State *env, int selector) env->tr.selector = selector; } +// Unicorn: check the arguments before run cpu_x86_load_seg(). +int uc_check_cpu_x86_load_seg(CPUX86State *env, int seg_reg, int sel) +{ + int selector; + uint32_t e2; + int cpl, dpl, rpl; + SegmentCache *dt; + int index; + target_ulong ptr; + + if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) { + return 0; + } else { + selector = sel & 0xffff; + cpl = env->hflags & HF_CPL_MASK; + if ((selector & 0xfffc) == 0) { + /* null selector case */ + if (seg_reg == R_SS +#ifdef TARGET_X86_64 + && (!(env->hflags & HF_CS64_MASK) || cpl == 3) +#endif + ) { + return UC_ERR_EXCEPTION; + } + return 0; + } else { + if (selector & 0x4) { + dt = &env->ldt; + } else { + dt = &env->gdt; + } + index = selector & ~7; + if ((index + 7) > dt->limit) { + return UC_ERR_EXCEPTION; + } + ptr = dt->base + index; + e2 = cpu_ldl_kernel(env, ptr + 4); + + if (!(e2 & DESC_S_MASK)) { + return UC_ERR_EXCEPTION; + } + rpl = selector & 3; + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + if (seg_reg == R_SS) { + /* must be writable segment */ + if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) { + return UC_ERR_EXCEPTION; + } + if (rpl != cpl || dpl != cpl) { + return UC_ERR_EXCEPTION; + } + } else { + /* must be readable segment */ + if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) { + return UC_ERR_EXCEPTION; + } + + if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { + /* if not conforming code, test rights */ + if (dpl < cpl || dpl < rpl) { + return UC_ERR_EXCEPTION; + } + } + } + + if (!(e2 & DESC_P_MASK)) { + if (seg_reg == R_SS) { + return UC_ERR_EXCEPTION; + } else { + return UC_ERR_EXCEPTION; + } + } + } + } + + return 0; +} + /* only works if protected mode and not VM86. seg_reg must be != R_CS */ void helper_load_seg(CPUX86State *env, int seg_reg, int selector) { diff --git a/qemu/target-i386/unicorn.c b/qemu/target-i386/unicorn.c index d194167d..e1642aea 100644 --- a/qemu/target-i386/unicorn.c +++ b/qemu/target-i386/unicorn.c @@ -795,6 +795,7 @@ int x86_reg_write(struct uc_struct *uc, unsigned int *regs, void *const *vals, i { CPUState *mycpu = uc->cpu; int i; + int ret; for (i = 0; i < count; i++) { unsigned int regid = regs[i]; @@ -1015,21 +1016,45 @@ int x86_reg_write(struct uc_struct *uc, unsigned int *regs, void *const *vals, i uc_emu_stop(uc); break; case UC_X86_REG_CS: + ret = uc_check_cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_CS, *(uint16_t *)value); + if (ret) { + return ret; + } cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_CS, *(uint16_t *)value); break; case UC_X86_REG_DS: + ret = uc_check_cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_DS, *(uint16_t *)value); + if (ret) { + return ret; + } cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_DS, *(uint16_t *)value); break; case UC_X86_REG_SS: + ret = uc_check_cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_SS, *(uint16_t *)value); + if (ret) { + return ret; + } cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_SS, *(uint16_t *)value); break; case UC_X86_REG_ES: + ret = uc_check_cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_ES, *(uint16_t *)value); + if (ret) { + return ret; + } cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_ES, *(uint16_t *)value); break; case UC_X86_REG_FS: + ret = uc_check_cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_FS, *(uint16_t *)value); + if (ret) { + return ret; + } cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_FS, *(uint16_t *)value); break; case UC_X86_REG_GS: + ret = uc_check_cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_GS, *(uint16_t *)value); + if (ret) { + return ret; + } cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_GS, *(uint16_t *)value); break; case UC_X86_REG_IDTR: @@ -1226,9 +1251,17 @@ int x86_reg_write(struct uc_struct *uc, unsigned int *regs, void *const *vals, i X86_CPU(uc, mycpu)->env.segs[R_ES].selector = *(uint16_t *)value; break; case UC_X86_REG_FS: + ret = uc_check_cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_FS, *(uint16_t *)value); + if (ret) { + return ret; + } cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_FS, *(uint16_t *)value); break; case UC_X86_REG_GS: + ret = uc_check_cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_GS, *(uint16_t *)value); + if (ret) { + return ret; + } cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_GS, *(uint16_t *)value); break; case UC_X86_REG_R8: diff --git a/uc.c b/uc.c index 11399017..e3399657 100644 --- a/uc.c +++ b/uc.c @@ -371,12 +371,13 @@ uc_err uc_reg_read_batch(uc_engine *uc, int *ids, void **vals, int count) UNICORN_EXPORT uc_err uc_reg_write_batch(uc_engine *uc, int *ids, void *const *vals, int count) { + int ret = UC_ERR_OK; if (uc->reg_write) - uc->reg_write(uc, (unsigned int *)ids, vals, count); + ret = uc->reg_write(uc, (unsigned int *)ids, vals, count); else - return -1; // FIXME: need a proper uc_err + return UC_ERR_EXCEPTION; // FIXME: need a proper uc_err - return UC_ERR_OK; + return ret; }