import
This commit is contained in:
5
qemu/target-i386/Makefile.objs
Normal file
5
qemu/target-i386/Makefile.objs
Normal file
@@ -0,0 +1,5 @@
|
||||
obj-y += translate.o helper.o cpu.o
|
||||
obj-y += excp_helper.o fpu_helper.o cc_helper.o int_helper.o svm_helper.o
|
||||
obj-y += smm_helper.o misc_helper.o mem_helper.o seg_helper.o
|
||||
obj-$(CONFIG_SOFTMMU) += arch_memory_mapping.o
|
||||
obj-y += unicorn.o
|
||||
31
qemu/target-i386/TODO
Normal file
31
qemu/target-i386/TODO
Normal file
@@ -0,0 +1,31 @@
|
||||
Correctness issues:
|
||||
|
||||
- some eflags manipulation incorrectly reset the bit 0x2.
|
||||
- SVM: test, cpu save/restore, SMM save/restore.
|
||||
- x86_64: lcall/ljmp intel/amd differences ?
|
||||
- better code fetch (different exception handling + CS.limit support)
|
||||
- user/kernel PUSHL/POPL in helper.c
|
||||
- add missing cpuid tests
|
||||
- return UD exception if LOCK prefix incorrectly used
|
||||
- test ldt limit < 7 ?
|
||||
- fix some 16 bit sp push/pop overflow (pusha/popa, lcall lret)
|
||||
- full support of segment limit/rights
|
||||
- full x87 exception support
|
||||
- improve x87 bit exactness (use bochs code ?)
|
||||
- DRx register support
|
||||
- CR0.AC emulation
|
||||
- SSE alignment checks
|
||||
|
||||
Optimizations/Features:
|
||||
|
||||
- add SVM nested paging support
|
||||
- add VMX support
|
||||
- add AVX support
|
||||
- add SSE5 support
|
||||
- fxsave/fxrstor AMD extensions
|
||||
- improve monitor/mwait support
|
||||
- faster EFLAGS update: consider SZAP, C, O can be updated separately
|
||||
with a bit field in CC_OP and more state variables.
|
||||
- evaluate x87 stack pointer statically
|
||||
- find a way to avoid translating several time the same TB if CR0.TS
|
||||
is set or not.
|
||||
279
qemu/target-i386/arch_memory_mapping.c
Normal file
279
qemu/target-i386/arch_memory_mapping.c
Normal file
@@ -0,0 +1,279 @@
|
||||
/*
|
||||
* i386 memory mapping
|
||||
*
|
||||
* Copyright Fujitsu, Corp. 2011, 2012
|
||||
*
|
||||
* Authors:
|
||||
* Wen Congyang <wency@cn.fujitsu.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "cpu.h"
|
||||
#include "exec/cpu-all.h"
|
||||
#include "sysemu/memory_mapping.h"
|
||||
|
||||
/* PAE Paging or IA-32e Paging */
|
||||
static void walk_pte(MemoryMappingList *list, AddressSpace *as,
|
||||
hwaddr pte_start_addr,
|
||||
int32_t a20_mask, target_ulong start_line_addr)
|
||||
{
|
||||
hwaddr pte_addr, start_paddr;
|
||||
uint64_t pte;
|
||||
target_ulong start_vaddr;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 512; i++) {
|
||||
pte_addr = (pte_start_addr + i * 8) & a20_mask;
|
||||
pte = ldq_phys(as, pte_addr);
|
||||
if (!(pte & PG_PRESENT_MASK)) {
|
||||
/* not present */
|
||||
continue;
|
||||
}
|
||||
|
||||
start_paddr = (pte & ~0xfff) & ~(0x1ULL << 63);
|
||||
if (cpu_physical_memory_is_io(as, start_paddr)) {
|
||||
/* I/O region */
|
||||
continue;
|
||||
}
|
||||
|
||||
start_vaddr = start_line_addr | ((i & 0x1ff) << 12);
|
||||
memory_mapping_list_add_merge_sorted(list, start_paddr,
|
||||
start_vaddr, 1 << 12);
|
||||
}
|
||||
}
|
||||
|
||||
/* 32-bit Paging */
|
||||
static void walk_pte2(MemoryMappingList *list, AddressSpace *as,
|
||||
hwaddr pte_start_addr, int32_t a20_mask,
|
||||
target_ulong start_line_addr)
|
||||
{
|
||||
hwaddr pte_addr, start_paddr;
|
||||
uint32_t pte;
|
||||
target_ulong start_vaddr;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 1024; i++) {
|
||||
pte_addr = (pte_start_addr + i * 4) & a20_mask;
|
||||
pte = ldl_phys(as, pte_addr);
|
||||
if (!(pte & PG_PRESENT_MASK)) {
|
||||
/* not present */
|
||||
continue;
|
||||
}
|
||||
|
||||
start_paddr = pte & ~0xfff;
|
||||
if (cpu_physical_memory_is_io(as, start_paddr)) {
|
||||
/* I/O region */
|
||||
continue;
|
||||
}
|
||||
|
||||
start_vaddr = start_line_addr | ((i & 0x3ff) << 12);
|
||||
memory_mapping_list_add_merge_sorted(list, start_paddr,
|
||||
start_vaddr, 1 << 12);
|
||||
}
|
||||
}
|
||||
|
||||
/* PAE Paging or IA-32e Paging */
|
||||
#define PLM4_ADDR_MASK 0xffffffffff000ULL /* selects bits 51:12 */
|
||||
|
||||
static void walk_pde(MemoryMappingList *list, AddressSpace *as,
|
||||
hwaddr pde_start_addr,
|
||||
int32_t a20_mask, target_ulong start_line_addr)
|
||||
{
|
||||
hwaddr pde_addr, pte_start_addr, start_paddr;
|
||||
uint64_t pde;
|
||||
target_ulong line_addr, start_vaddr;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 512; i++) {
|
||||
pde_addr = (pde_start_addr + i * 8) & a20_mask;
|
||||
pde = ldq_phys(as, pde_addr);
|
||||
if (!(pde & PG_PRESENT_MASK)) {
|
||||
/* not present */
|
||||
continue;
|
||||
}
|
||||
|
||||
line_addr = start_line_addr | ((i & 0x1ff) << 21);
|
||||
if (pde & PG_PSE_MASK) {
|
||||
/* 2 MB page */
|
||||
start_paddr = (pde & ~0x1fffff) & ~(0x1ULL << 63);
|
||||
if (cpu_physical_memory_is_io(as, start_paddr)) {
|
||||
/* I/O region */
|
||||
continue;
|
||||
}
|
||||
start_vaddr = line_addr;
|
||||
memory_mapping_list_add_merge_sorted(list, start_paddr,
|
||||
start_vaddr, 1 << 21);
|
||||
continue;
|
||||
}
|
||||
|
||||
pte_start_addr = (pde & PLM4_ADDR_MASK) & a20_mask;
|
||||
walk_pte(list, as, pte_start_addr, a20_mask, line_addr);
|
||||
}
|
||||
}
|
||||
|
||||
/* 32-bit Paging */
|
||||
static void walk_pde2(MemoryMappingList *list, AddressSpace *as,
|
||||
hwaddr pde_start_addr, int32_t a20_mask,
|
||||
bool pse)
|
||||
{
|
||||
hwaddr pde_addr, pte_start_addr, start_paddr, high_paddr;
|
||||
uint32_t pde;
|
||||
target_ulong line_addr, start_vaddr;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 1024; i++) {
|
||||
pde_addr = (pde_start_addr + i * 4) & a20_mask;
|
||||
pde = ldl_phys(as, pde_addr);
|
||||
if (!(pde & PG_PRESENT_MASK)) {
|
||||
/* not present */
|
||||
continue;
|
||||
}
|
||||
|
||||
line_addr = (((unsigned int)i & 0x3ff) << 22);
|
||||
if ((pde & PG_PSE_MASK) && pse) {
|
||||
/*
|
||||
* 4 MB page:
|
||||
* bits 39:32 are bits 20:13 of the PDE
|
||||
* bit3 31:22 are bits 31:22 of the PDE
|
||||
*/
|
||||
high_paddr = ((hwaddr)(pde & 0x1fe000) << 19);
|
||||
start_paddr = (pde & ~0x3fffff) | high_paddr;
|
||||
if (cpu_physical_memory_is_io(as, start_paddr)) {
|
||||
/* I/O region */
|
||||
continue;
|
||||
}
|
||||
start_vaddr = line_addr;
|
||||
memory_mapping_list_add_merge_sorted(list, start_paddr,
|
||||
start_vaddr, 1 << 22);
|
||||
continue;
|
||||
}
|
||||
|
||||
pte_start_addr = (pde & ~0xfff) & a20_mask;
|
||||
walk_pte2(list, as, pte_start_addr, a20_mask, line_addr);
|
||||
}
|
||||
}
|
||||
|
||||
/* PAE Paging */
|
||||
static void walk_pdpe2(MemoryMappingList *list, AddressSpace *as,
|
||||
hwaddr pdpe_start_addr, int32_t a20_mask)
|
||||
{
|
||||
hwaddr pdpe_addr, pde_start_addr;
|
||||
uint64_t pdpe;
|
||||
target_ulong line_addr;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
pdpe_addr = (pdpe_start_addr + i * 8) & a20_mask;
|
||||
pdpe = ldq_phys(as, pdpe_addr);
|
||||
if (!(pdpe & PG_PRESENT_MASK)) {
|
||||
/* not present */
|
||||
continue;
|
||||
}
|
||||
|
||||
line_addr = (((unsigned int)i & 0x3) << 30);
|
||||
pde_start_addr = (pdpe & ~0xfff) & a20_mask;
|
||||
walk_pde(list, as, pde_start_addr, a20_mask, line_addr);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
/* IA-32e Paging */
|
||||
static void walk_pdpe(MemoryMappingList *list, AddressSpace *as,
|
||||
hwaddr pdpe_start_addr, int32_t a20_mask,
|
||||
target_ulong start_line_addr)
|
||||
{
|
||||
hwaddr pdpe_addr, pde_start_addr, start_paddr;
|
||||
uint64_t pdpe;
|
||||
target_ulong line_addr, start_vaddr;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 512; i++) {
|
||||
pdpe_addr = (pdpe_start_addr + i * 8) & a20_mask;
|
||||
pdpe = ldq_phys(as, pdpe_addr);
|
||||
if (!(pdpe & PG_PRESENT_MASK)) {
|
||||
/* not present */
|
||||
continue;
|
||||
}
|
||||
|
||||
line_addr = start_line_addr | ((i & 0x1ffULL) << 30);
|
||||
if (pdpe & PG_PSE_MASK) {
|
||||
/* 1 GB page */
|
||||
start_paddr = (pdpe & ~0x3fffffff) & ~(0x1ULL << 63);
|
||||
if (cpu_physical_memory_is_io(as, start_paddr)) {
|
||||
/* I/O region */
|
||||
continue;
|
||||
}
|
||||
start_vaddr = line_addr;
|
||||
memory_mapping_list_add_merge_sorted(list, start_paddr,
|
||||
start_vaddr, 1 << 30);
|
||||
continue;
|
||||
}
|
||||
|
||||
pde_start_addr = (pdpe & PLM4_ADDR_MASK) & a20_mask;
|
||||
walk_pde(list, as, pde_start_addr, a20_mask, line_addr);
|
||||
}
|
||||
}
|
||||
|
||||
/* IA-32e Paging */
|
||||
static void walk_pml4e(MemoryMappingList *list, AddressSpace *as,
|
||||
hwaddr pml4e_start_addr, int32_t a20_mask)
|
||||
{
|
||||
hwaddr pml4e_addr, pdpe_start_addr;
|
||||
uint64_t pml4e;
|
||||
target_ulong line_addr;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 512; i++) {
|
||||
pml4e_addr = (pml4e_start_addr + i * 8) & a20_mask;
|
||||
pml4e = ldq_phys(as, pml4e_addr);
|
||||
if (!(pml4e & PG_PRESENT_MASK)) {
|
||||
/* not present */
|
||||
continue;
|
||||
}
|
||||
|
||||
line_addr = ((i & 0x1ffULL) << 39) | (0xffffULL << 48);
|
||||
pdpe_start_addr = (pml4e & PLM4_ADDR_MASK) & a20_mask;
|
||||
walk_pdpe(list, as, pdpe_start_addr, a20_mask, line_addr);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void x86_cpu_get_memory_mapping(CPUState *cs, MemoryMappingList *list,
|
||||
Error **errp)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(cs->uc, cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
|
||||
if (!cpu_paging_enabled(cs)) {
|
||||
/* paging is disabled */
|
||||
return;
|
||||
}
|
||||
|
||||
if (env->cr[4] & CR4_PAE_MASK) {
|
||||
#ifdef TARGET_X86_64
|
||||
if (env->hflags & HF_LMA_MASK) {
|
||||
hwaddr pml4e_addr;
|
||||
|
||||
pml4e_addr = (env->cr[3] & PLM4_ADDR_MASK) & env->a20_mask;
|
||||
walk_pml4e(list, cs->as, pml4e_addr, env->a20_mask);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
hwaddr pdpe_addr;
|
||||
|
||||
pdpe_addr = (env->cr[3] & ~0x1f) & env->a20_mask;
|
||||
walk_pdpe2(list, cs->as, pdpe_addr, env->a20_mask);
|
||||
}
|
||||
} else {
|
||||
hwaddr pde_addr;
|
||||
bool pse;
|
||||
|
||||
pde_addr = (env->cr[3] & ~0xfff) & env->a20_mask;
|
||||
pse = !!(env->cr[4] & CR4_PSE_MASK);
|
||||
walk_pde2(list, cs->as, pde_addr, env->a20_mask, pse);
|
||||
}
|
||||
}
|
||||
|
||||
394
qemu/target-i386/cc_helper.c
Normal file
394
qemu/target-i386/cc_helper.c
Normal file
@@ -0,0 +1,394 @@
|
||||
/*
|
||||
* x86 condition code helpers
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "cpu.h"
|
||||
#include "exec/helper-proto.h"
|
||||
|
||||
const uint8_t parity_table[256] = {
|
||||
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
|
||||
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
|
||||
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
|
||||
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
|
||||
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
|
||||
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
|
||||
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
|
||||
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
|
||||
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
|
||||
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
|
||||
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
|
||||
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
|
||||
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
|
||||
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
|
||||
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
|
||||
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
|
||||
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
|
||||
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
|
||||
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
|
||||
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
|
||||
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
|
||||
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
|
||||
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
|
||||
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
|
||||
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
|
||||
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
|
||||
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
|
||||
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
|
||||
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
|
||||
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
|
||||
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
|
||||
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
|
||||
};
|
||||
|
||||
#define SHIFT 0
|
||||
#include "cc_helper_template.h"
|
||||
#undef SHIFT
|
||||
|
||||
#define SHIFT 1
|
||||
#include "cc_helper_template.h"
|
||||
#undef SHIFT
|
||||
|
||||
#define SHIFT 2
|
||||
#include "cc_helper_template.h"
|
||||
#undef SHIFT
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
|
||||
#define SHIFT 3
|
||||
#include "cc_helper_template.h"
|
||||
#undef SHIFT
|
||||
|
||||
#endif
|
||||
|
||||
static target_ulong compute_all_adcx(target_ulong dst, target_ulong src1,
|
||||
target_ulong src2)
|
||||
{
|
||||
return (src1 & ~CC_C) | (dst * CC_C);
|
||||
}
|
||||
|
||||
static target_ulong compute_all_adox(target_ulong dst, target_ulong src1,
|
||||
target_ulong src2)
|
||||
{
|
||||
return (src1 & ~CC_O) | (src2 * CC_O);
|
||||
}
|
||||
|
||||
static target_ulong compute_all_adcox(target_ulong dst, target_ulong src1,
|
||||
target_ulong src2)
|
||||
{
|
||||
return (src1 & ~(CC_C | CC_O)) | (dst * CC_C) | (src2 * CC_O);
|
||||
}
|
||||
|
||||
target_ulong helper_cc_compute_all(target_ulong dst, target_ulong src1,
|
||||
target_ulong src2, int op)
|
||||
{
|
||||
switch (op) {
|
||||
default: /* should never happen */
|
||||
return 0;
|
||||
|
||||
case CC_OP_EFLAGS:
|
||||
return src1;
|
||||
case CC_OP_CLR:
|
||||
return CC_Z | CC_P;
|
||||
|
||||
case CC_OP_MULB:
|
||||
return compute_all_mulb(dst, src1);
|
||||
case CC_OP_MULW:
|
||||
return compute_all_mulw(dst, src1);
|
||||
case CC_OP_MULL:
|
||||
return compute_all_mull(dst, src1);
|
||||
|
||||
case CC_OP_ADDB:
|
||||
return compute_all_addb(dst, src1);
|
||||
case CC_OP_ADDW:
|
||||
return compute_all_addw(dst, src1);
|
||||
case CC_OP_ADDL:
|
||||
return compute_all_addl(dst, src1);
|
||||
|
||||
case CC_OP_ADCB:
|
||||
return compute_all_adcb(dst, src1, src2);
|
||||
case CC_OP_ADCW:
|
||||
return compute_all_adcw(dst, src1, src2);
|
||||
case CC_OP_ADCL:
|
||||
return compute_all_adcl(dst, src1, src2);
|
||||
|
||||
case CC_OP_SUBB:
|
||||
return compute_all_subb(dst, src1);
|
||||
case CC_OP_SUBW:
|
||||
return compute_all_subw(dst, src1);
|
||||
case CC_OP_SUBL:
|
||||
return compute_all_subl(dst, src1);
|
||||
|
||||
case CC_OP_SBBB:
|
||||
return compute_all_sbbb(dst, src1, src2);
|
||||
case CC_OP_SBBW:
|
||||
return compute_all_sbbw(dst, src1, src2);
|
||||
case CC_OP_SBBL:
|
||||
return compute_all_sbbl(dst, src1, src2);
|
||||
|
||||
case CC_OP_LOGICB:
|
||||
return compute_all_logicb(dst, src1);
|
||||
case CC_OP_LOGICW:
|
||||
return compute_all_logicw(dst, src1);
|
||||
case CC_OP_LOGICL:
|
||||
return compute_all_logicl(dst, src1);
|
||||
|
||||
case CC_OP_INCB:
|
||||
return compute_all_incb(dst, src1);
|
||||
case CC_OP_INCW:
|
||||
return compute_all_incw(dst, src1);
|
||||
case CC_OP_INCL:
|
||||
return compute_all_incl(dst, src1);
|
||||
|
||||
case CC_OP_DECB:
|
||||
return compute_all_decb(dst, src1);
|
||||
case CC_OP_DECW:
|
||||
return compute_all_decw(dst, src1);
|
||||
case CC_OP_DECL:
|
||||
return compute_all_decl(dst, src1);
|
||||
|
||||
case CC_OP_SHLB:
|
||||
return compute_all_shlb(dst, src1);
|
||||
case CC_OP_SHLW:
|
||||
return compute_all_shlw(dst, src1);
|
||||
case CC_OP_SHLL:
|
||||
return compute_all_shll(dst, src1);
|
||||
|
||||
case CC_OP_SARB:
|
||||
return compute_all_sarb(dst, src1);
|
||||
case CC_OP_SARW:
|
||||
return compute_all_sarw(dst, src1);
|
||||
case CC_OP_SARL:
|
||||
return compute_all_sarl(dst, src1);
|
||||
|
||||
case CC_OP_BMILGB:
|
||||
return compute_all_bmilgb(dst, src1);
|
||||
case CC_OP_BMILGW:
|
||||
return compute_all_bmilgw(dst, src1);
|
||||
case CC_OP_BMILGL:
|
||||
return compute_all_bmilgl(dst, src1);
|
||||
|
||||
case CC_OP_ADCX:
|
||||
return compute_all_adcx(dst, src1, src2);
|
||||
case CC_OP_ADOX:
|
||||
return compute_all_adox(dst, src1, src2);
|
||||
case CC_OP_ADCOX:
|
||||
return compute_all_adcox(dst, src1, src2);
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
case CC_OP_MULQ:
|
||||
return compute_all_mulq(dst, src1);
|
||||
case CC_OP_ADDQ:
|
||||
return compute_all_addq(dst, src1);
|
||||
case CC_OP_ADCQ:
|
||||
return compute_all_adcq(dst, src1, src2);
|
||||
case CC_OP_SUBQ:
|
||||
return compute_all_subq(dst, src1);
|
||||
case CC_OP_SBBQ:
|
||||
return compute_all_sbbq(dst, src1, src2);
|
||||
case CC_OP_LOGICQ:
|
||||
return compute_all_logicq(dst, src1);
|
||||
case CC_OP_INCQ:
|
||||
return compute_all_incq(dst, src1);
|
||||
case CC_OP_DECQ:
|
||||
return compute_all_decq(dst, src1);
|
||||
case CC_OP_SHLQ:
|
||||
return compute_all_shlq(dst, src1);
|
||||
case CC_OP_SARQ:
|
||||
return compute_all_sarq(dst, src1);
|
||||
case CC_OP_BMILGQ:
|
||||
return compute_all_bmilgq(dst, src1);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t cpu_cc_compute_all(CPUX86State *env, int op)
|
||||
{
|
||||
return helper_cc_compute_all(CC_DST, CC_SRC, CC_SRC2, op);
|
||||
}
|
||||
|
||||
target_ulong helper_cc_compute_c(target_ulong dst, target_ulong src1,
|
||||
target_ulong src2, int op)
|
||||
{
|
||||
switch (op) {
|
||||
default: /* should never happen */
|
||||
case CC_OP_LOGICB:
|
||||
case CC_OP_LOGICW:
|
||||
case CC_OP_LOGICL:
|
||||
case CC_OP_LOGICQ:
|
||||
case CC_OP_CLR:
|
||||
return 0;
|
||||
|
||||
case CC_OP_EFLAGS:
|
||||
case CC_OP_SARB:
|
||||
case CC_OP_SARW:
|
||||
case CC_OP_SARL:
|
||||
case CC_OP_SARQ:
|
||||
case CC_OP_ADOX:
|
||||
return src1 & 1;
|
||||
|
||||
case CC_OP_INCB:
|
||||
case CC_OP_INCW:
|
||||
case CC_OP_INCL:
|
||||
case CC_OP_INCQ:
|
||||
case CC_OP_DECB:
|
||||
case CC_OP_DECW:
|
||||
case CC_OP_DECL:
|
||||
case CC_OP_DECQ:
|
||||
return src1;
|
||||
|
||||
case CC_OP_MULB:
|
||||
case CC_OP_MULW:
|
||||
case CC_OP_MULL:
|
||||
case CC_OP_MULQ:
|
||||
return src1 != 0;
|
||||
|
||||
case CC_OP_ADCX:
|
||||
case CC_OP_ADCOX:
|
||||
return dst;
|
||||
|
||||
case CC_OP_ADDB:
|
||||
return compute_c_addb(dst, src1);
|
||||
case CC_OP_ADDW:
|
||||
return compute_c_addw(dst, src1);
|
||||
case CC_OP_ADDL:
|
||||
return compute_c_addl(dst, src1);
|
||||
|
||||
case CC_OP_ADCB:
|
||||
return compute_c_adcb(dst, src1, src2);
|
||||
case CC_OP_ADCW:
|
||||
return compute_c_adcw(dst, src1, src2);
|
||||
case CC_OP_ADCL:
|
||||
return compute_c_adcl(dst, src1, src2);
|
||||
|
||||
case CC_OP_SUBB:
|
||||
return compute_c_subb(dst, src1);
|
||||
case CC_OP_SUBW:
|
||||
return compute_c_subw(dst, src1);
|
||||
case CC_OP_SUBL:
|
||||
return compute_c_subl(dst, src1);
|
||||
|
||||
case CC_OP_SBBB:
|
||||
return compute_c_sbbb(dst, src1, src2);
|
||||
case CC_OP_SBBW:
|
||||
return compute_c_sbbw(dst, src1, src2);
|
||||
case CC_OP_SBBL:
|
||||
return compute_c_sbbl(dst, src1, src2);
|
||||
|
||||
case CC_OP_SHLB:
|
||||
return compute_c_shlb(dst, src1);
|
||||
case CC_OP_SHLW:
|
||||
return compute_c_shlw(dst, src1);
|
||||
case CC_OP_SHLL:
|
||||
return compute_c_shll(dst, src1);
|
||||
|
||||
case CC_OP_BMILGB:
|
||||
return compute_c_bmilgb(dst, src1);
|
||||
case CC_OP_BMILGW:
|
||||
return compute_c_bmilgw(dst, src1);
|
||||
case CC_OP_BMILGL:
|
||||
return compute_c_bmilgl(dst, src1);
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
case CC_OP_ADDQ:
|
||||
return compute_c_addq(dst, src1);
|
||||
case CC_OP_ADCQ:
|
||||
return compute_c_adcq(dst, src1, src2);
|
||||
case CC_OP_SUBQ:
|
||||
return compute_c_subq(dst, src1);
|
||||
case CC_OP_SBBQ:
|
||||
return compute_c_sbbq(dst, src1, src2);
|
||||
case CC_OP_SHLQ:
|
||||
return compute_c_shlq(dst, src1);
|
||||
case CC_OP_BMILGQ:
|
||||
return compute_c_bmilgq(dst, src1);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
void helper_write_eflags(CPUX86State *env, target_ulong t0,
|
||||
uint32_t update_mask)
|
||||
{
|
||||
cpu_load_eflags(env, t0, update_mask);
|
||||
}
|
||||
|
||||
target_ulong helper_read_eflags(CPUX86State *env)
|
||||
{
|
||||
uint32_t eflags;
|
||||
|
||||
eflags = cpu_cc_compute_all(env, CC_OP);
|
||||
eflags |= (env->df & DF_MASK);
|
||||
eflags |= env->eflags & ~(VM_MASK | RF_MASK);
|
||||
return eflags;
|
||||
}
|
||||
|
||||
void helper_clts(CPUX86State *env)
|
||||
{
|
||||
env->cr[0] &= ~CR0_TS_MASK;
|
||||
env->hflags &= ~HF_TS_MASK;
|
||||
}
|
||||
|
||||
void helper_reset_rf(CPUX86State *env)
|
||||
{
|
||||
env->eflags &= ~RF_MASK;
|
||||
}
|
||||
|
||||
void helper_cli(CPUX86State *env)
|
||||
{
|
||||
env->eflags &= ~IF_MASK;
|
||||
}
|
||||
|
||||
void helper_sti(CPUX86State *env)
|
||||
{
|
||||
env->eflags |= IF_MASK;
|
||||
}
|
||||
|
||||
void helper_clac(CPUX86State *env)
|
||||
{
|
||||
env->eflags &= ~AC_MASK;
|
||||
}
|
||||
|
||||
void helper_stac(CPUX86State *env)
|
||||
{
|
||||
env->eflags |= AC_MASK;
|
||||
}
|
||||
|
||||
#if 0
|
||||
/* vm86plus instructions */
|
||||
void helper_cli_vm(CPUX86State *env)
|
||||
{
|
||||
env->eflags &= ~VIF_MASK;
|
||||
}
|
||||
|
||||
void helper_sti_vm(CPUX86State *env)
|
||||
{
|
||||
env->eflags |= VIF_MASK;
|
||||
if (env->eflags & VIP_MASK) {
|
||||
raise_exception(env, EXCP0D_GPF);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void helper_set_inhibit_irq(CPUX86State *env)
|
||||
{
|
||||
env->hflags |= HF_INHIBIT_IRQ_MASK;
|
||||
}
|
||||
|
||||
void helper_reset_inhibit_irq(CPUX86State *env)
|
||||
{
|
||||
env->hflags &= ~HF_INHIBIT_IRQ_MASK;
|
||||
}
|
||||
242
qemu/target-i386/cc_helper_template.h
Normal file
242
qemu/target-i386/cc_helper_template.h
Normal file
@@ -0,0 +1,242 @@
|
||||
/*
|
||||
* x86 condition code helpers
|
||||
*
|
||||
* Copyright (c) 2008 Fabrice Bellard
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#define DATA_BITS (1 << (3 + SHIFT))
|
||||
|
||||
#if DATA_BITS == 8
|
||||
#define SUFFIX b
|
||||
#define DATA_TYPE uint8_t
|
||||
#elif DATA_BITS == 16
|
||||
#define SUFFIX w
|
||||
#define DATA_TYPE uint16_t
|
||||
#elif DATA_BITS == 32
|
||||
#define SUFFIX l
|
||||
#define DATA_TYPE uint32_t
|
||||
#elif DATA_BITS == 64
|
||||
#define SUFFIX q
|
||||
#define DATA_TYPE uint64_t
|
||||
#else
|
||||
#error unhandled operand size
|
||||
#endif
|
||||
|
||||
#define SIGN_MASK (((DATA_TYPE)1) << (DATA_BITS - 1))
|
||||
|
||||
/* dynamic flags computation */
|
||||
|
||||
static int glue(compute_all_add, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
|
||||
{
|
||||
int cf, pf, af, zf, sf, of;
|
||||
DATA_TYPE src2 = dst - src1;
|
||||
|
||||
cf = dst < src1;
|
||||
pf = parity_table[(uint8_t)dst];
|
||||
af = (dst ^ src1 ^ src2) & CC_A;
|
||||
zf = (dst == 0) * CC_Z;
|
||||
sf = lshift(dst, 8 - DATA_BITS) & CC_S;
|
||||
of = lshift((src1 ^ src2 ^ -1) & (src1 ^ dst), 12 - DATA_BITS) & CC_O;
|
||||
return cf | pf | af | zf | sf | of;
|
||||
}
|
||||
|
||||
static int glue(compute_c_add, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
|
||||
{
|
||||
return dst < src1;
|
||||
}
|
||||
|
||||
static int glue(compute_all_adc, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1,
|
||||
DATA_TYPE src3)
|
||||
{
|
||||
int cf, pf, af, zf, sf, of;
|
||||
DATA_TYPE src2 = dst - src1 - src3;
|
||||
|
||||
cf = (src3 ? dst <= src1 : dst < src1);
|
||||
pf = parity_table[(uint8_t)dst];
|
||||
af = (dst ^ src1 ^ src2) & 0x10;
|
||||
zf = (dst == 0) << 6;
|
||||
sf = lshift(dst, 8 - DATA_BITS) & 0x80;
|
||||
of = lshift((src1 ^ src2 ^ -1) & (src1 ^ dst), 12 - DATA_BITS) & CC_O;
|
||||
return cf | pf | af | zf | sf | of;
|
||||
}
|
||||
|
||||
static int glue(compute_c_adc, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1,
|
||||
DATA_TYPE src3)
|
||||
{
|
||||
return src3 ? dst <= src1 : dst < src1;
|
||||
}
|
||||
|
||||
static int glue(compute_all_sub, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2)
|
||||
{
|
||||
int cf, pf, af, zf, sf, of;
|
||||
DATA_TYPE src1 = dst + src2;
|
||||
|
||||
cf = src1 < src2;
|
||||
pf = parity_table[(uint8_t)dst];
|
||||
af = (dst ^ src1 ^ src2) & CC_A;
|
||||
zf = (dst == 0) * CC_Z;
|
||||
sf = lshift(dst, 8 - DATA_BITS) & CC_S;
|
||||
of = lshift((src1 ^ src2) & (src1 ^ dst), 12 - DATA_BITS) & CC_O;
|
||||
return cf | pf | af | zf | sf | of;
|
||||
}
|
||||
|
||||
static int glue(compute_c_sub, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2)
|
||||
{
|
||||
DATA_TYPE src1 = dst + src2;
|
||||
|
||||
return src1 < src2;
|
||||
}
|
||||
|
||||
static int glue(compute_all_sbb, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2,
|
||||
DATA_TYPE src3)
|
||||
{
|
||||
int cf, pf, af, zf, sf, of;
|
||||
DATA_TYPE src1 = dst + src2 + src3;
|
||||
|
||||
cf = (src3 ? src1 <= src2 : src1 < src2);
|
||||
pf = parity_table[(uint8_t)dst];
|
||||
af = (dst ^ src1 ^ src2) & 0x10;
|
||||
zf = (dst == 0) << 6;
|
||||
sf = lshift(dst, 8 - DATA_BITS) & 0x80;
|
||||
of = lshift((src1 ^ src2) & (src1 ^ dst), 12 - DATA_BITS) & CC_O;
|
||||
return cf | pf | af | zf | sf | of;
|
||||
}
|
||||
|
||||
static int glue(compute_c_sbb, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2,
|
||||
DATA_TYPE src3)
|
||||
{
|
||||
DATA_TYPE src1 = dst + src2 + src3;
|
||||
|
||||
return (src3 ? src1 <= src2 : src1 < src2);
|
||||
}
|
||||
|
||||
static int glue(compute_all_logic, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
|
||||
{
|
||||
int cf, pf, af, zf, sf, of;
|
||||
|
||||
cf = 0;
|
||||
pf = parity_table[(uint8_t)dst];
|
||||
af = 0;
|
||||
zf = (dst == 0) * CC_Z;
|
||||
sf = lshift(dst, 8 - DATA_BITS) & CC_S;
|
||||
of = 0;
|
||||
return cf | pf | af | zf | sf | of;
|
||||
}
|
||||
|
||||
static int glue(compute_all_inc, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
|
||||
{
|
||||
int cf, pf, af, zf, sf, of;
|
||||
DATA_TYPE src2;
|
||||
|
||||
cf = src1;
|
||||
src1 = dst - 1;
|
||||
src2 = 1;
|
||||
pf = parity_table[(uint8_t)dst];
|
||||
af = (dst ^ src1 ^ src2) & CC_A;
|
||||
zf = (dst == 0) * CC_Z;
|
||||
sf = lshift(dst, 8 - DATA_BITS) & CC_S;
|
||||
of = (dst == SIGN_MASK) * CC_O;
|
||||
return cf | pf | af | zf | sf | of;
|
||||
}
|
||||
|
||||
static int glue(compute_all_dec, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
|
||||
{
|
||||
int cf, pf, af, zf, sf, of;
|
||||
DATA_TYPE src2;
|
||||
|
||||
cf = src1;
|
||||
src1 = dst + 1;
|
||||
src2 = 1;
|
||||
pf = parity_table[(uint8_t)dst];
|
||||
af = (dst ^ src1 ^ src2) & CC_A;
|
||||
zf = (dst == 0) * CC_Z;
|
||||
sf = lshift(dst, 8 - DATA_BITS) & CC_S;
|
||||
of = (dst == SIGN_MASK - 1) * CC_O;
|
||||
return cf | pf | af | zf | sf | of;
|
||||
}
|
||||
|
||||
static int glue(compute_all_shl, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
|
||||
{
|
||||
int cf, pf, af, zf, sf, of;
|
||||
|
||||
cf = (src1 >> (DATA_BITS - 1)) & CC_C;
|
||||
pf = parity_table[(uint8_t)dst];
|
||||
af = 0; /* undefined */
|
||||
zf = (dst == 0) * CC_Z;
|
||||
sf = lshift(dst, 8 - DATA_BITS) & CC_S;
|
||||
/* of is defined iff shift count == 1 */
|
||||
of = lshift(src1 ^ dst, 12 - DATA_BITS) & CC_O;
|
||||
return cf | pf | af | zf | sf | of;
|
||||
}
|
||||
|
||||
static int glue(compute_c_shl, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
|
||||
{
|
||||
return (src1 >> (DATA_BITS - 1)) & CC_C;
|
||||
}
|
||||
|
||||
static int glue(compute_all_sar, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
|
||||
{
|
||||
int cf, pf, af, zf, sf, of;
|
||||
|
||||
cf = src1 & 1;
|
||||
pf = parity_table[(uint8_t)dst];
|
||||
af = 0; /* undefined */
|
||||
zf = (dst == 0) * CC_Z;
|
||||
sf = lshift(dst, 8 - DATA_BITS) & CC_S;
|
||||
/* of is defined iff shift count == 1 */
|
||||
of = lshift(src1 ^ dst, 12 - DATA_BITS) & CC_O;
|
||||
return cf | pf | af | zf | sf | of;
|
||||
}
|
||||
|
||||
/* NOTE: we compute the flags like the P4. On olders CPUs, only OF and
|
||||
CF are modified and it is slower to do that. Note as well that we
|
||||
don't truncate SRC1 for computing carry to DATA_TYPE. */
|
||||
static int glue(compute_all_mul, SUFFIX)(DATA_TYPE dst, target_long src1)
|
||||
{
|
||||
int cf, pf, af, zf, sf, of;
|
||||
|
||||
cf = (src1 != 0);
|
||||
pf = parity_table[(uint8_t)dst];
|
||||
af = 0; /* undefined */
|
||||
zf = (dst == 0) * CC_Z;
|
||||
sf = lshift(dst, 8 - DATA_BITS) & CC_S;
|
||||
of = cf * CC_O;
|
||||
return cf | pf | af | zf | sf | of;
|
||||
}
|
||||
|
||||
static int glue(compute_all_bmilg, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
|
||||
{
|
||||
int cf, pf, af, zf, sf, of;
|
||||
|
||||
cf = (src1 == 0);
|
||||
pf = 0; /* undefined */
|
||||
af = 0; /* undefined */
|
||||
zf = (dst == 0) * CC_Z;
|
||||
sf = lshift(dst, 8 - DATA_BITS) & CC_S;
|
||||
of = 0;
|
||||
return cf | pf | af | zf | sf | of;
|
||||
}
|
||||
|
||||
static int glue(compute_c_bmilg, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
|
||||
{
|
||||
return src1 == 0;
|
||||
}
|
||||
|
||||
#undef DATA_BITS
|
||||
#undef SIGN_MASK
|
||||
#undef DATA_TYPE
|
||||
#undef DATA_MASK
|
||||
#undef SUFFIX
|
||||
158
qemu/target-i386/cpu-qom.h
Normal file
158
qemu/target-i386/cpu-qom.h
Normal file
@@ -0,0 +1,158 @@
|
||||
/*
|
||||
* QEMU x86 CPU
|
||||
*
|
||||
* Copyright (c) 2012 SUSE LINUX Products GmbH
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see
|
||||
* <http://www.gnu.org/licenses/lgpl-2.1.html>
|
||||
*/
|
||||
#ifndef QEMU_I386_CPU_QOM_H
|
||||
#define QEMU_I386_CPU_QOM_H
|
||||
|
||||
#include "qom/cpu.h"
|
||||
#include "cpu.h"
|
||||
#include "qapi/error.h"
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
#define TYPE_X86_CPU "x86_64-cpu"
|
||||
#else
|
||||
#define TYPE_X86_CPU "i386-cpu"
|
||||
#endif
|
||||
|
||||
#define X86_CPU_CLASS(uc, klass) \
|
||||
OBJECT_CLASS_CHECK(uc, X86CPUClass, (klass), TYPE_X86_CPU)
|
||||
#define X86_CPU(uc, obj) \
|
||||
OBJECT_CHECK(uc, X86CPU, (obj), TYPE_X86_CPU)
|
||||
#define X86_CPU_GET_CLASS(uc, obj) \
|
||||
OBJECT_GET_CLASS(uc, X86CPUClass, (obj), TYPE_X86_CPU)
|
||||
|
||||
/**
|
||||
* X86CPUDefinition:
|
||||
*
|
||||
* CPU model definition data that was not converted to QOM per-subclass
|
||||
* property defaults yet.
|
||||
*/
|
||||
typedef struct X86CPUDefinition X86CPUDefinition;
|
||||
|
||||
/**
|
||||
* X86CPUClass:
|
||||
* @cpu_def: CPU model definition
|
||||
* @kvm_required: Whether CPU model requires KVM to be enabled.
|
||||
* @parent_realize: The parent class' realize handler.
|
||||
* @parent_reset: The parent class' reset handler.
|
||||
*
|
||||
* An x86 CPU model or family.
|
||||
*/
|
||||
typedef struct X86CPUClass {
|
||||
/*< private >*/
|
||||
CPUClass parent_class;
|
||||
/*< public >*/
|
||||
|
||||
/* Should be eventually replaced by subclass-specific property defaults. */
|
||||
X86CPUDefinition *cpu_def;
|
||||
|
||||
bool kvm_required;
|
||||
|
||||
DeviceRealize parent_realize;
|
||||
void (*parent_reset)(CPUState *cpu);
|
||||
} X86CPUClass;
|
||||
|
||||
/**
|
||||
* X86CPU:
|
||||
* @env: #CPUX86State
|
||||
* @migratable: If set, only migratable flags will be accepted when "enforce"
|
||||
* mode is used, and only migratable flags will be included in the "host"
|
||||
* CPU model.
|
||||
*
|
||||
* An x86 CPU.
|
||||
*/
|
||||
typedef struct X86CPU {
|
||||
/*< private >*/
|
||||
CPUState parent_obj;
|
||||
/*< public >*/
|
||||
|
||||
CPUX86State env;
|
||||
|
||||
bool hyperv_vapic;
|
||||
bool hyperv_relaxed_timing;
|
||||
int hyperv_spinlock_attempts;
|
||||
bool hyperv_time;
|
||||
bool check_cpuid;
|
||||
bool enforce_cpuid;
|
||||
bool expose_kvm;
|
||||
bool migratable;
|
||||
bool host_features;
|
||||
|
||||
/* if true the CPUID code directly forward host cache leaves to the guest */
|
||||
bool cache_info_passthrough;
|
||||
|
||||
/* Features that were filtered out because of missing host capabilities */
|
||||
uint32_t filtered_features[FEATURE_WORDS];
|
||||
|
||||
/* Enable PMU CPUID bits. This can't be enabled by default yet because
|
||||
* it doesn't have ABI stability guarantees, as it passes all PMU CPUID
|
||||
* bits returned by GET_SUPPORTED_CPUID (that depend on host CPU and kernel
|
||||
* capabilities) directly to the guest.
|
||||
*/
|
||||
bool enable_pmu;
|
||||
|
||||
/* in order to simplify APIC support, we leave this pointer to the
|
||||
user */
|
||||
struct DeviceState *apic_state;
|
||||
} X86CPU;
|
||||
|
||||
static inline X86CPU *x86_env_get_cpu(CPUX86State *env)
|
||||
{
|
||||
return container_of(env, X86CPU, env);
|
||||
}
|
||||
|
||||
#define ENV_GET_CPU(e) CPU(x86_env_get_cpu(e))
|
||||
|
||||
#define ENV_OFFSET offsetof(X86CPU, env)
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
extern struct VMStateDescription vmstate_x86_cpu;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* x86_cpu_do_interrupt:
|
||||
* @cpu: vCPU the interrupt is to be handled by.
|
||||
*/
|
||||
void x86_cpu_do_interrupt(CPUState *cpu);
|
||||
bool x86_cpu_exec_interrupt(CPUState *cpu, int int_req);
|
||||
|
||||
int x86_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
|
||||
int cpuid, void *opaque);
|
||||
int x86_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
|
||||
int cpuid, void *opaque);
|
||||
int x86_cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
|
||||
void *opaque);
|
||||
int x86_cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
|
||||
void *opaque);
|
||||
|
||||
void x86_cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
|
||||
Error **errp);
|
||||
|
||||
void x86_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
|
||||
int flags);
|
||||
|
||||
hwaddr x86_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
|
||||
|
||||
int x86_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
|
||||
int x86_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
|
||||
|
||||
void x86_cpu_exec_enter(CPUState *cpu);
|
||||
void x86_cpu_exec_exit(CPUState *cpu);
|
||||
|
||||
#endif
|
||||
2502
qemu/target-i386/cpu.c
Normal file
2502
qemu/target-i386/cpu.c
Normal file
File diff suppressed because it is too large
Load Diff
1379
qemu/target-i386/cpu.h
Normal file
1379
qemu/target-i386/cpu.h
Normal file
File diff suppressed because it is too large
Load Diff
133
qemu/target-i386/excp_helper.c
Normal file
133
qemu/target-i386/excp_helper.c
Normal file
@@ -0,0 +1,133 @@
|
||||
/*
|
||||
* x86 exception helpers
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "cpu.h"
|
||||
#include "qemu/log.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
#include "exec/helper-proto.h"
|
||||
|
||||
#include "uc_priv.h"
|
||||
|
||||
#if 0
|
||||
#define raise_exception_err(env, a, b) \
|
||||
do { \
|
||||
qemu_log("raise_exception line=%d\n", __LINE__); \
|
||||
(raise_exception_err)(env, a, b); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
void helper_raise_interrupt(CPUX86State *env, int intno, int next_eip_addend)
|
||||
{
|
||||
raise_interrupt(env, intno, 1, 0, next_eip_addend);
|
||||
}
|
||||
|
||||
void helper_raise_exception(CPUX86State *env, int exception_index)
|
||||
{
|
||||
raise_exception(env, exception_index);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check nested exceptions and change to double or triple fault if
|
||||
* needed. It should only be called, if this is not an interrupt.
|
||||
* Returns the new exception number.
|
||||
*/
|
||||
static int check_exception(CPUX86State *env, int intno, int *error_code)
|
||||
{
|
||||
int first_contributory = env->old_exception == 0 ||
|
||||
(env->old_exception >= 10 &&
|
||||
env->old_exception <= 13);
|
||||
int second_contributory = intno == 0 ||
|
||||
(intno >= 10 && intno <= 13);
|
||||
|
||||
qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
|
||||
env->old_exception, intno);
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
if (env->old_exception == EXCP08_DBLE) {
|
||||
if (env->hflags & HF_SVMI_MASK) {
|
||||
cpu_vmexit(env, SVM_EXIT_SHUTDOWN, 0); /* does not return */
|
||||
}
|
||||
|
||||
qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
|
||||
|
||||
qemu_system_reset_request(env->uc);
|
||||
return EXCP_HLT;
|
||||
}
|
||||
#endif
|
||||
|
||||
if ((first_contributory && second_contributory)
|
||||
|| (env->old_exception == EXCP0E_PAGE &&
|
||||
(second_contributory || (intno == EXCP0E_PAGE)))) {
|
||||
intno = EXCP08_DBLE;
|
||||
*error_code = 0;
|
||||
}
|
||||
|
||||
if (second_contributory || (intno == EXCP0E_PAGE) ||
|
||||
(intno == EXCP08_DBLE)) {
|
||||
env->old_exception = intno;
|
||||
}
|
||||
|
||||
return intno;
|
||||
}
|
||||
|
||||
/*
|
||||
* Signal an interruption. It is executed in the main CPU loop.
|
||||
* is_int is TRUE if coming from the int instruction. next_eip is the
|
||||
* env->eip value AFTER the interrupt instruction. It is only relevant if
|
||||
* is_int is TRUE.
|
||||
*/
|
||||
static void QEMU_NORETURN raise_interrupt2(CPUX86State *env, int intno,
|
||||
int is_int, int error_code,
|
||||
int next_eip_addend)
|
||||
{
|
||||
CPUState *cs = CPU(x86_env_get_cpu(env));
|
||||
|
||||
if (!is_int) {
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_EXCP_BASE + intno,
|
||||
error_code);
|
||||
intno = check_exception(env, intno, &error_code);
|
||||
} else {
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_SWINT, 0);
|
||||
}
|
||||
|
||||
cs->exception_index = intno; // qq
|
||||
env->error_code = error_code;
|
||||
env->exception_is_int = is_int;
|
||||
env->exception_next_eip = env->eip + next_eip_addend;
|
||||
cpu_loop_exit(cs);
|
||||
}
|
||||
|
||||
/* shortcuts to generate exceptions */
|
||||
|
||||
void QEMU_NORETURN raise_interrupt(CPUX86State *env, int intno, int is_int,
|
||||
int error_code, int next_eip_addend)
|
||||
{
|
||||
raise_interrupt2(env, intno, is_int, error_code, next_eip_addend);
|
||||
}
|
||||
|
||||
void raise_exception_err(CPUX86State *env, int exception_index,
|
||||
int error_code)
|
||||
{
|
||||
raise_interrupt2(env, exception_index, 0, error_code, 0);
|
||||
}
|
||||
|
||||
void raise_exception(CPUX86State *env, int exception_index)
|
||||
{
|
||||
raise_interrupt2(env, exception_index, 0, 0, 0);
|
||||
}
|
||||
1294
qemu/target-i386/fpu_helper.c
Normal file
1294
qemu/target-i386/fpu_helper.c
Normal file
File diff suppressed because it is too large
Load Diff
1144
qemu/target-i386/helper.c
Normal file
1144
qemu/target-i386/helper.c
Normal file
File diff suppressed because it is too large
Load Diff
227
qemu/target-i386/helper.h
Normal file
227
qemu/target-i386/helper.h
Normal file
@@ -0,0 +1,227 @@
|
||||
DEF_HELPER_5(uc_tracecode, void, i32, ptr, ptr, i64, ptr)
|
||||
|
||||
DEF_HELPER_FLAGS_4(cc_compute_all, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl, int)
|
||||
DEF_HELPER_FLAGS_4(cc_compute_c, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl, int)
|
||||
|
||||
DEF_HELPER_1(lock, void, env)
|
||||
DEF_HELPER_1(unlock, void, env)
|
||||
DEF_HELPER_3(write_eflags, void, env, tl, i32)
|
||||
DEF_HELPER_1(read_eflags, tl, env)
|
||||
DEF_HELPER_2(divb_AL, void, env, tl)
|
||||
DEF_HELPER_2(idivb_AL, void, env, tl)
|
||||
DEF_HELPER_2(divw_AX, void, env, tl)
|
||||
DEF_HELPER_2(idivw_AX, void, env, tl)
|
||||
DEF_HELPER_2(divl_EAX, void, env, tl)
|
||||
DEF_HELPER_2(idivl_EAX, void, env, tl)
|
||||
#ifdef TARGET_X86_64
|
||||
DEF_HELPER_2(divq_EAX, void, env, tl)
|
||||
DEF_HELPER_2(idivq_EAX, void, env, tl)
|
||||
#endif
|
||||
|
||||
DEF_HELPER_2(aam, void, env, int)
|
||||
DEF_HELPER_2(aad, void, env, int)
|
||||
DEF_HELPER_1(aaa, void, env)
|
||||
DEF_HELPER_1(aas, void, env)
|
||||
DEF_HELPER_1(daa, void, env)
|
||||
DEF_HELPER_1(das, void, env)
|
||||
|
||||
DEF_HELPER_2(lsl, tl, env, tl)
|
||||
DEF_HELPER_2(lar, tl, env, tl)
|
||||
DEF_HELPER_2(verr, void, env, tl)
|
||||
DEF_HELPER_2(verw, void, env, tl)
|
||||
DEF_HELPER_2(lldt, void, env, int)
|
||||
DEF_HELPER_2(ltr, void, env, int)
|
||||
DEF_HELPER_3(load_seg, void, env, int, int)
|
||||
DEF_HELPER_4(ljmp_protected, void, env, int, tl, int)
|
||||
DEF_HELPER_5(lcall_real, void, env, int, tl, int, int)
|
||||
DEF_HELPER_5(lcall_protected, void, env, int, tl, int, int)
|
||||
DEF_HELPER_2(iret_real, void, env, int)
|
||||
DEF_HELPER_3(iret_protected, void, env, int, int)
|
||||
DEF_HELPER_3(lret_protected, void, env, int, int)
|
||||
DEF_HELPER_2(read_crN, tl, env, int)
|
||||
DEF_HELPER_3(write_crN, void, env, int, tl)
|
||||
DEF_HELPER_2(lmsw, void, env, tl)
|
||||
DEF_HELPER_1(clts, void, env)
|
||||
DEF_HELPER_3(movl_drN_T0, void, env, int, tl)
|
||||
DEF_HELPER_2(invlpg, void, env, tl)
|
||||
|
||||
DEF_HELPER_4(enter_level, void, env, int, int, tl)
|
||||
#ifdef TARGET_X86_64
|
||||
DEF_HELPER_4(enter64_level, void, env, int, int, tl)
|
||||
#endif
|
||||
DEF_HELPER_1(sysenter, void, env)
|
||||
DEF_HELPER_2(sysexit, void, env, int)
|
||||
#ifdef TARGET_X86_64
|
||||
DEF_HELPER_2(syscall, void, env, int)
|
||||
DEF_HELPER_2(sysret, void, env, int)
|
||||
#endif
|
||||
DEF_HELPER_2(hlt, void, env, int)
|
||||
DEF_HELPER_2(monitor, void, env, tl)
|
||||
DEF_HELPER_2(mwait, void, env, int)
|
||||
DEF_HELPER_2(pause, void, env, int)
|
||||
DEF_HELPER_1(debug, void, env)
|
||||
DEF_HELPER_1(reset_rf, void, env)
|
||||
DEF_HELPER_3(raise_interrupt, void, env, int, int)
|
||||
DEF_HELPER_2(raise_exception, void, env, int)
|
||||
DEF_HELPER_1(cli, void, env)
|
||||
DEF_HELPER_1(sti, void, env)
|
||||
DEF_HELPER_1(clac, void, env)
|
||||
DEF_HELPER_1(stac, void, env)
|
||||
DEF_HELPER_1(set_inhibit_irq, void, env)
|
||||
DEF_HELPER_1(reset_inhibit_irq, void, env)
|
||||
DEF_HELPER_3(boundw, void, env, tl, int)
|
||||
DEF_HELPER_3(boundl, void, env, tl, int)
|
||||
DEF_HELPER_1(rsm, void, env)
|
||||
DEF_HELPER_2(into, void, env, int)
|
||||
DEF_HELPER_2(cmpxchg8b, void, env, tl)
|
||||
#ifdef TARGET_X86_64
|
||||
DEF_HELPER_2(cmpxchg16b, void, env, tl)
|
||||
#endif
|
||||
DEF_HELPER_1(single_step, void, env)
|
||||
DEF_HELPER_1(cpuid, void, env)
|
||||
DEF_HELPER_1(rdtsc, void, env)
|
||||
DEF_HELPER_1(rdtscp, void, env)
|
||||
DEF_HELPER_1(rdpmc, void, env)
|
||||
DEF_HELPER_1(rdmsr, void, env)
|
||||
DEF_HELPER_1(wrmsr, void, env)
|
||||
|
||||
DEF_HELPER_2(check_iob, void, env, i32)
|
||||
DEF_HELPER_2(check_iow, void, env, i32)
|
||||
DEF_HELPER_2(check_iol, void, env, i32)
|
||||
DEF_HELPER_3(outb, void, ptr, i32, i32)
|
||||
DEF_HELPER_2(inb, tl, ptr, i32)
|
||||
DEF_HELPER_3(outw, void, ptr, i32, i32)
|
||||
DEF_HELPER_2(inw, tl, ptr, i32)
|
||||
DEF_HELPER_3(outl, void, ptr, i32, i32)
|
||||
DEF_HELPER_2(inl, tl, ptr, i32)
|
||||
|
||||
DEF_HELPER_3(svm_check_intercept_param, void, env, i32, i64)
|
||||
DEF_HELPER_3(vmexit, void, env, i32, i64)
|
||||
DEF_HELPER_4(svm_check_io, void, env, i32, i32, i32)
|
||||
DEF_HELPER_3(vmrun, void, env, int, int)
|
||||
DEF_HELPER_1(vmmcall, void, env)
|
||||
DEF_HELPER_2(vmload, void, env, int)
|
||||
DEF_HELPER_2(vmsave, void, env, int)
|
||||
DEF_HELPER_1(stgi, void, env)
|
||||
DEF_HELPER_1(clgi, void, env)
|
||||
DEF_HELPER_1(skinit, void, env)
|
||||
DEF_HELPER_2(invlpga, void, env, int)
|
||||
|
||||
/* x86 FPU */
|
||||
|
||||
DEF_HELPER_2(flds_FT0, void, env, i32)
|
||||
DEF_HELPER_2(fldl_FT0, void, env, i64)
|
||||
DEF_HELPER_2(fildl_FT0, void, env, s32)
|
||||
DEF_HELPER_2(flds_ST0, void, env, i32)
|
||||
DEF_HELPER_2(fldl_ST0, void, env, i64)
|
||||
DEF_HELPER_2(fildl_ST0, void, env, s32)
|
||||
DEF_HELPER_2(fildll_ST0, void, env, s64)
|
||||
DEF_HELPER_1(fsts_ST0, i32, env)
|
||||
DEF_HELPER_1(fstl_ST0, i64, env)
|
||||
DEF_HELPER_1(fist_ST0, s32, env)
|
||||
DEF_HELPER_1(fistl_ST0, s32, env)
|
||||
DEF_HELPER_1(fistll_ST0, s64, env)
|
||||
DEF_HELPER_1(fistt_ST0, s32, env)
|
||||
DEF_HELPER_1(fisttl_ST0, s32, env)
|
||||
DEF_HELPER_1(fisttll_ST0, s64, env)
|
||||
DEF_HELPER_2(fldt_ST0, void, env, tl)
|
||||
DEF_HELPER_2(fstt_ST0, void, env, tl)
|
||||
DEF_HELPER_1(fpush, void, env)
|
||||
DEF_HELPER_1(fpop, void, env)
|
||||
DEF_HELPER_1(fdecstp, void, env)
|
||||
DEF_HELPER_1(fincstp, void, env)
|
||||
DEF_HELPER_2(ffree_STN, void, env, int)
|
||||
DEF_HELPER_1(fmov_ST0_FT0, void, env)
|
||||
DEF_HELPER_2(fmov_FT0_STN, void, env, int)
|
||||
DEF_HELPER_2(fmov_ST0_STN, void, env, int)
|
||||
DEF_HELPER_2(fmov_STN_ST0, void, env, int)
|
||||
DEF_HELPER_2(fxchg_ST0_STN, void, env, int)
|
||||
DEF_HELPER_1(fcom_ST0_FT0, void, env)
|
||||
DEF_HELPER_1(fucom_ST0_FT0, void, env)
|
||||
DEF_HELPER_1(fcomi_ST0_FT0, void, env)
|
||||
DEF_HELPER_1(fucomi_ST0_FT0, void, env)
|
||||
DEF_HELPER_1(fadd_ST0_FT0, void, env)
|
||||
DEF_HELPER_1(fmul_ST0_FT0, void, env)
|
||||
DEF_HELPER_1(fsub_ST0_FT0, void, env)
|
||||
DEF_HELPER_1(fsubr_ST0_FT0, void, env)
|
||||
DEF_HELPER_1(fdiv_ST0_FT0, void, env)
|
||||
DEF_HELPER_1(fdivr_ST0_FT0, void, env)
|
||||
DEF_HELPER_2(fadd_STN_ST0, void, env, int)
|
||||
DEF_HELPER_2(fmul_STN_ST0, void, env, int)
|
||||
DEF_HELPER_2(fsub_STN_ST0, void, env, int)
|
||||
DEF_HELPER_2(fsubr_STN_ST0, void, env, int)
|
||||
DEF_HELPER_2(fdiv_STN_ST0, void, env, int)
|
||||
DEF_HELPER_2(fdivr_STN_ST0, void, env, int)
|
||||
DEF_HELPER_1(fchs_ST0, void, env)
|
||||
DEF_HELPER_1(fabs_ST0, void, env)
|
||||
DEF_HELPER_1(fxam_ST0, void, env)
|
||||
DEF_HELPER_1(fld1_ST0, void, env)
|
||||
DEF_HELPER_1(fldl2t_ST0, void, env)
|
||||
DEF_HELPER_1(fldl2e_ST0, void, env)
|
||||
DEF_HELPER_1(fldpi_ST0, void, env)
|
||||
DEF_HELPER_1(fldlg2_ST0, void, env)
|
||||
DEF_HELPER_1(fldln2_ST0, void, env)
|
||||
DEF_HELPER_1(fldz_ST0, void, env)
|
||||
DEF_HELPER_1(fldz_FT0, void, env)
|
||||
DEF_HELPER_1(fnstsw, i32, env)
|
||||
DEF_HELPER_1(fnstcw, i32, env)
|
||||
DEF_HELPER_2(fldcw, void, env, i32)
|
||||
DEF_HELPER_1(fclex, void, env)
|
||||
DEF_HELPER_1(fwait, void, env)
|
||||
DEF_HELPER_1(fninit, void, env)
|
||||
DEF_HELPER_2(fbld_ST0, void, env, tl)
|
||||
DEF_HELPER_2(fbst_ST0, void, env, tl)
|
||||
DEF_HELPER_1(f2xm1, void, env)
|
||||
DEF_HELPER_1(fyl2x, void, env)
|
||||
DEF_HELPER_1(fptan, void, env)
|
||||
DEF_HELPER_1(fpatan, void, env)
|
||||
DEF_HELPER_1(fxtract, void, env)
|
||||
DEF_HELPER_1(fprem1, void, env)
|
||||
DEF_HELPER_1(fprem, void, env)
|
||||
DEF_HELPER_1(fyl2xp1, void, env)
|
||||
DEF_HELPER_1(fsqrt, void, env)
|
||||
DEF_HELPER_1(fsincos, void, env)
|
||||
DEF_HELPER_1(frndint, void, env)
|
||||
DEF_HELPER_1(fscale, void, env)
|
||||
DEF_HELPER_1(fsin, void, env)
|
||||
DEF_HELPER_1(fcos, void, env)
|
||||
DEF_HELPER_3(fstenv, void, env, tl, int)
|
||||
DEF_HELPER_3(fldenv, void, env, tl, int)
|
||||
DEF_HELPER_3(fsave, void, env, tl, int)
|
||||
DEF_HELPER_3(frstor, void, env, tl, int)
|
||||
DEF_HELPER_3(fxsave, void, env, tl, int)
|
||||
DEF_HELPER_3(fxrstor, void, env, tl, int)
|
||||
|
||||
DEF_HELPER_FLAGS_1(clz_x86, TCG_CALL_NO_RWG_SE, tl, tl)
|
||||
|
||||
#ifdef TARGET_I386
|
||||
#define helper_clz helper_clz_x86
|
||||
#define gen_helper_clz gen_helper_clz_x86
|
||||
#endif
|
||||
|
||||
DEF_HELPER_FLAGS_1(ctz, TCG_CALL_NO_RWG_SE, tl, tl)
|
||||
DEF_HELPER_FLAGS_2(pdep, TCG_CALL_NO_RWG_SE, tl, tl, tl)
|
||||
DEF_HELPER_FLAGS_2(pext, TCG_CALL_NO_RWG_SE, tl, tl, tl)
|
||||
|
||||
/* MMX/SSE */
|
||||
|
||||
DEF_HELPER_2(ldmxcsr, void, env, i32)
|
||||
DEF_HELPER_1(enter_mmx, void, env)
|
||||
DEF_HELPER_1(emms, void, env)
|
||||
DEF_HELPER_3(movq, void, env, ptr, ptr)
|
||||
|
||||
#define SHIFT 0
|
||||
#include "ops_sse_header.h"
|
||||
#define SHIFT 1
|
||||
#include "ops_sse_header.h"
|
||||
|
||||
DEF_HELPER_3(rclb, tl, env, tl, tl)
|
||||
DEF_HELPER_3(rclw, tl, env, tl, tl)
|
||||
DEF_HELPER_3(rcll, tl, env, tl, tl)
|
||||
DEF_HELPER_3(rcrb, tl, env, tl, tl)
|
||||
DEF_HELPER_3(rcrw, tl, env, tl, tl)
|
||||
DEF_HELPER_3(rcrl, tl, env, tl, tl)
|
||||
#ifdef TARGET_X86_64
|
||||
DEF_HELPER_3(rclq, tl, env, tl, tl)
|
||||
DEF_HELPER_3(rcrq, tl, env, tl, tl)
|
||||
#endif
|
||||
471
qemu/target-i386/int_helper.c
Normal file
471
qemu/target-i386/int_helper.c
Normal file
@@ -0,0 +1,471 @@
|
||||
/*
|
||||
* x86 integer helpers
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "cpu.h"
|
||||
#include "qemu/host-utils.h"
|
||||
#include "exec/helper-proto.h"
|
||||
|
||||
//#define DEBUG_MULDIV
|
||||
|
||||
/* modulo 9 table */
|
||||
static const uint8_t rclb_table[32] = {
|
||||
0, 1, 2, 3, 4, 5, 6, 7,
|
||||
8, 0, 1, 2, 3, 4, 5, 6,
|
||||
7, 8, 0, 1, 2, 3, 4, 5,
|
||||
6, 7, 8, 0, 1, 2, 3, 4,
|
||||
};
|
||||
|
||||
/* modulo 17 table */
|
||||
static const uint8_t rclw_table[32] = {
|
||||
0, 1, 2, 3, 4, 5, 6, 7,
|
||||
8, 9, 10, 11, 12, 13, 14, 15,
|
||||
16, 0, 1, 2, 3, 4, 5, 6,
|
||||
7, 8, 9, 10, 11, 12, 13, 14,
|
||||
};
|
||||
|
||||
/* division, flags are undefined */
|
||||
|
||||
void helper_divb_AL(CPUX86State *env, target_ulong t0)
|
||||
{
|
||||
unsigned int num, den, q, r;
|
||||
|
||||
num = (env->regs[R_EAX] & 0xffff);
|
||||
den = (t0 & 0xff);
|
||||
if (den == 0) {
|
||||
raise_exception(env, EXCP00_DIVZ);
|
||||
}
|
||||
q = (num / den);
|
||||
if (q > 0xff) {
|
||||
raise_exception(env, EXCP00_DIVZ);
|
||||
}
|
||||
q &= 0xff;
|
||||
r = (num % den) & 0xff;
|
||||
env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | (r << 8) | q;
|
||||
}
|
||||
|
||||
void helper_idivb_AL(CPUX86State *env, target_ulong t0)
|
||||
{
|
||||
int num, den, q, r;
|
||||
|
||||
num = (int16_t)env->regs[R_EAX];
|
||||
den = (int8_t)t0;
|
||||
if (den == 0) {
|
||||
raise_exception(env, EXCP00_DIVZ);
|
||||
}
|
||||
q = (num / den);
|
||||
if (q != (int8_t)q) {
|
||||
raise_exception(env, EXCP00_DIVZ);
|
||||
}
|
||||
q &= 0xff;
|
||||
r = (num % den) & 0xff;
|
||||
env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | (r << 8) | q;
|
||||
}
|
||||
|
||||
void helper_divw_AX(CPUX86State *env, target_ulong t0)
|
||||
{
|
||||
unsigned int num, den, q, r;
|
||||
|
||||
num = (env->regs[R_EAX] & 0xffff) | ((env->regs[R_EDX] & 0xffff) << 16);
|
||||
den = (t0 & 0xffff);
|
||||
if (den == 0) {
|
||||
raise_exception(env, EXCP00_DIVZ);
|
||||
}
|
||||
q = (num / den);
|
||||
if (q > 0xffff) {
|
||||
raise_exception(env, EXCP00_DIVZ);
|
||||
}
|
||||
q &= 0xffff;
|
||||
r = (num % den) & 0xffff;
|
||||
env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | q;
|
||||
env->regs[R_EDX] = (env->regs[R_EDX] & ~0xffff) | r;
|
||||
}
|
||||
|
||||
void helper_idivw_AX(CPUX86State *env, target_ulong t0)
|
||||
{
|
||||
int num, den, q, r;
|
||||
|
||||
num = (env->regs[R_EAX] & 0xffff) | ((env->regs[R_EDX] & 0xffff) << 16);
|
||||
den = (int16_t)t0;
|
||||
if (den == 0) {
|
||||
raise_exception(env, EXCP00_DIVZ);
|
||||
}
|
||||
q = (num / den);
|
||||
if (q != (int16_t)q) {
|
||||
raise_exception(env, EXCP00_DIVZ);
|
||||
}
|
||||
q &= 0xffff;
|
||||
r = (num % den) & 0xffff;
|
||||
env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | q;
|
||||
env->regs[R_EDX] = (env->regs[R_EDX] & ~0xffff) | r;
|
||||
}
|
||||
|
||||
void helper_divl_EAX(CPUX86State *env, target_ulong t0)
|
||||
{
|
||||
unsigned int den, r;
|
||||
uint64_t num, q;
|
||||
|
||||
num = ((uint32_t)env->regs[R_EAX]) | ((uint64_t)((uint32_t)env->regs[R_EDX]) << 32);
|
||||
den = t0;
|
||||
if (den == 0) {
|
||||
raise_exception(env, EXCP00_DIVZ);
|
||||
}
|
||||
q = (num / den);
|
||||
r = (num % den);
|
||||
if (q > 0xffffffff) {
|
||||
raise_exception(env, EXCP00_DIVZ);
|
||||
}
|
||||
env->regs[R_EAX] = (uint32_t)q;
|
||||
env->regs[R_EDX] = (uint32_t)r;
|
||||
}
|
||||
|
||||
void helper_idivl_EAX(CPUX86State *env, target_ulong t0)
|
||||
{
|
||||
int den, r;
|
||||
int64_t num, q;
|
||||
|
||||
num = ((uint32_t)env->regs[R_EAX]) | ((uint64_t)((uint32_t)env->regs[R_EDX]) << 32);
|
||||
den = t0;
|
||||
if (den == 0) {
|
||||
raise_exception(env, EXCP00_DIVZ);
|
||||
}
|
||||
q = (num / den);
|
||||
r = (num % den);
|
||||
if (q != (int32_t)q) {
|
||||
raise_exception(env, EXCP00_DIVZ);
|
||||
}
|
||||
env->regs[R_EAX] = (uint32_t)q;
|
||||
env->regs[R_EDX] = (uint32_t)r;
|
||||
}
|
||||
|
||||
/* bcd */
|
||||
|
||||
/* XXX: exception */
|
||||
void helper_aam(CPUX86State *env, int base)
|
||||
{
|
||||
int al, ah;
|
||||
|
||||
al = env->regs[R_EAX] & 0xff;
|
||||
ah = al / base;
|
||||
al = al % base;
|
||||
env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | al | (ah << 8);
|
||||
CC_DST = al;
|
||||
}
|
||||
|
||||
void helper_aad(CPUX86State *env, int base)
|
||||
{
|
||||
int al, ah;
|
||||
|
||||
al = env->regs[R_EAX] & 0xff;
|
||||
ah = (env->regs[R_EAX] >> 8) & 0xff;
|
||||
al = ((ah * base) + al) & 0xff;
|
||||
env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | al;
|
||||
CC_DST = al;
|
||||
}
|
||||
|
||||
void helper_aaa(CPUX86State *env)
|
||||
{
|
||||
int icarry;
|
||||
int al, ah, af;
|
||||
int eflags;
|
||||
|
||||
eflags = cpu_cc_compute_all(env, CC_OP);
|
||||
af = eflags & CC_A;
|
||||
al = env->regs[R_EAX] & 0xff;
|
||||
ah = (env->regs[R_EAX] >> 8) & 0xff;
|
||||
|
||||
icarry = (al > 0xf9);
|
||||
if (((al & 0x0f) > 9) || af) {
|
||||
al = (al + 6) & 0x0f;
|
||||
ah = (ah + 1 + icarry) & 0xff;
|
||||
eflags |= CC_C | CC_A;
|
||||
} else {
|
||||
eflags &= ~(CC_C | CC_A);
|
||||
al &= 0x0f;
|
||||
}
|
||||
env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | al | (ah << 8);
|
||||
CC_SRC = eflags;
|
||||
}
|
||||
|
||||
void helper_aas(CPUX86State *env)
|
||||
{
|
||||
int icarry;
|
||||
int al, ah, af;
|
||||
int eflags;
|
||||
|
||||
eflags = cpu_cc_compute_all(env, CC_OP);
|
||||
af = eflags & CC_A;
|
||||
al = env->regs[R_EAX] & 0xff;
|
||||
ah = (env->regs[R_EAX] >> 8) & 0xff;
|
||||
|
||||
icarry = (al < 6);
|
||||
if (((al & 0x0f) > 9) || af) {
|
||||
al = (al - 6) & 0x0f;
|
||||
ah = (ah - 1 - icarry) & 0xff;
|
||||
eflags |= CC_C | CC_A;
|
||||
} else {
|
||||
eflags &= ~(CC_C | CC_A);
|
||||
al &= 0x0f;
|
||||
}
|
||||
env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | al | (ah << 8);
|
||||
CC_SRC = eflags;
|
||||
}
|
||||
|
||||
void helper_daa(CPUX86State *env)
|
||||
{
|
||||
int old_al, al, af, cf;
|
||||
int eflags;
|
||||
|
||||
eflags = cpu_cc_compute_all(env, CC_OP);
|
||||
cf = eflags & CC_C;
|
||||
af = eflags & CC_A;
|
||||
old_al = al = env->regs[R_EAX] & 0xff;
|
||||
|
||||
eflags = 0;
|
||||
if (((al & 0x0f) > 9) || af) {
|
||||
al = (al + 6) & 0xff;
|
||||
eflags |= CC_A;
|
||||
}
|
||||
if ((old_al > 0x99) || cf) {
|
||||
al = (al + 0x60) & 0xff;
|
||||
eflags |= CC_C;
|
||||
}
|
||||
env->regs[R_EAX] = (env->regs[R_EAX] & ~0xff) | al;
|
||||
/* well, speed is not an issue here, so we compute the flags by hand */
|
||||
eflags |= (al == 0) << 6; /* zf */
|
||||
eflags |= parity_table[al]; /* pf */
|
||||
eflags |= (al & 0x80); /* sf */
|
||||
CC_SRC = eflags;
|
||||
}
|
||||
|
||||
void helper_das(CPUX86State *env)
|
||||
{
|
||||
int al, al1, af, cf;
|
||||
int eflags;
|
||||
|
||||
eflags = cpu_cc_compute_all(env, CC_OP);
|
||||
cf = eflags & CC_C;
|
||||
af = eflags & CC_A;
|
||||
al = env->regs[R_EAX] & 0xff;
|
||||
|
||||
eflags = 0;
|
||||
al1 = al;
|
||||
if (((al & 0x0f) > 9) || af) {
|
||||
eflags |= CC_A;
|
||||
if (al < 6 || cf) {
|
||||
eflags |= CC_C;
|
||||
}
|
||||
al = (al - 6) & 0xff;
|
||||
}
|
||||
if ((al1 > 0x99) || cf) {
|
||||
al = (al - 0x60) & 0xff;
|
||||
eflags |= CC_C;
|
||||
}
|
||||
env->regs[R_EAX] = (env->regs[R_EAX] & ~0xff) | al;
|
||||
/* well, speed is not an issue here, so we compute the flags by hand */
|
||||
eflags |= (al == 0) << 6; /* zf */
|
||||
eflags |= parity_table[al]; /* pf */
|
||||
eflags |= (al & 0x80); /* sf */
|
||||
CC_SRC = eflags;
|
||||
}
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
|
||||
{
|
||||
*plow += a;
|
||||
/* carry test */
|
||||
if (*plow < a) {
|
||||
(*phigh)++;
|
||||
}
|
||||
*phigh += b;
|
||||
}
|
||||
|
||||
static void neg128(uint64_t *plow, uint64_t *phigh)
|
||||
{
|
||||
*plow = ~*plow;
|
||||
*phigh = ~*phigh;
|
||||
add128(plow, phigh, 1, 0);
|
||||
}
|
||||
|
||||
/* return TRUE if overflow */
|
||||
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
|
||||
{
|
||||
uint64_t q, r, a1, a0;
|
||||
int i, qb, ab;
|
||||
|
||||
a0 = *plow;
|
||||
a1 = *phigh;
|
||||
if (a1 == 0) {
|
||||
q = a0 / b;
|
||||
r = a0 % b;
|
||||
*plow = q;
|
||||
*phigh = r;
|
||||
} else {
|
||||
if (a1 >= b) {
|
||||
return 1;
|
||||
}
|
||||
/* XXX: use a better algorithm */
|
||||
for (i = 0; i < 64; i++) {
|
||||
ab = a1 >> 63;
|
||||
a1 = (a1 << 1) | (a0 >> 63);
|
||||
if (ab || a1 >= b) {
|
||||
a1 -= b;
|
||||
qb = 1;
|
||||
} else {
|
||||
qb = 0;
|
||||
}
|
||||
a0 = (a0 << 1) | qb;
|
||||
}
|
||||
#if defined(DEBUG_MULDIV)
|
||||
printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64
|
||||
": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
|
||||
*phigh, *plow, b, a0, a1);
|
||||
#endif
|
||||
*plow = a0;
|
||||
*phigh = a1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* return TRUE if overflow */
|
||||
static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
|
||||
{
|
||||
int sa, sb;
|
||||
|
||||
sa = ((int64_t)*phigh < 0);
|
||||
if (sa) {
|
||||
neg128(plow, phigh);
|
||||
}
|
||||
sb = (b < 0);
|
||||
if (sb) {
|
||||
b = -b;
|
||||
}
|
||||
if (div64(plow, phigh, b) != 0) {
|
||||
return 1;
|
||||
}
|
||||
if (sa ^ sb) {
|
||||
if (*plow > (1ULL << 63)) {
|
||||
return 1;
|
||||
}
|
||||
*plow = -*plow;
|
||||
} else {
|
||||
if (*plow >= (1ULL << 63)) {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
if (sa) {
|
||||
*phigh = -*phigh;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void helper_divq_EAX(CPUX86State *env, target_ulong t0)
|
||||
{
|
||||
uint64_t r0, r1;
|
||||
|
||||
if (t0 == 0) {
|
||||
raise_exception(env, EXCP00_DIVZ);
|
||||
}
|
||||
r0 = env->regs[R_EAX];
|
||||
r1 = env->regs[R_EDX];
|
||||
if (div64(&r0, &r1, t0)) {
|
||||
raise_exception(env, EXCP00_DIVZ);
|
||||
}
|
||||
env->regs[R_EAX] = r0;
|
||||
env->regs[R_EDX] = r1;
|
||||
}
|
||||
|
||||
void helper_idivq_EAX(CPUX86State *env, target_ulong t0)
|
||||
{
|
||||
uint64_t r0, r1;
|
||||
|
||||
if (t0 == 0) {
|
||||
raise_exception(env, EXCP00_DIVZ);
|
||||
}
|
||||
r0 = env->regs[R_EAX];
|
||||
r1 = env->regs[R_EDX];
|
||||
if (idiv64(&r0, &r1, t0)) {
|
||||
raise_exception(env, EXCP00_DIVZ);
|
||||
}
|
||||
env->regs[R_EAX] = r0;
|
||||
env->regs[R_EDX] = r1;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if TARGET_LONG_BITS == 32
|
||||
# define ctztl ctz32
|
||||
# define clztl clz32
|
||||
#else
|
||||
# define ctztl ctz64
|
||||
# define clztl clz64
|
||||
#endif
|
||||
|
||||
/* bit operations */
|
||||
target_ulong helper_ctz(target_ulong t0)
|
||||
{
|
||||
return ctztl(t0);
|
||||
}
|
||||
|
||||
target_ulong helper_clz_x86(target_ulong t0)
|
||||
{
|
||||
return clztl(t0);
|
||||
}
|
||||
|
||||
target_ulong helper_pdep(target_ulong src, target_ulong mask)
|
||||
{
|
||||
target_ulong dest = 0;
|
||||
int i, o;
|
||||
|
||||
for (i = 0; mask != 0; i++) {
|
||||
o = ctztl(mask);
|
||||
mask &= mask - 1;
|
||||
dest |= ((src >> i) & 1) << o;
|
||||
}
|
||||
return dest;
|
||||
}
|
||||
|
||||
target_ulong helper_pext(target_ulong src, target_ulong mask)
|
||||
{
|
||||
target_ulong dest = 0;
|
||||
int i, o;
|
||||
|
||||
for (o = 0; mask != 0; o++) {
|
||||
i = ctztl(mask);
|
||||
mask &= mask - 1;
|
||||
dest |= ((src >> i) & 1) << o;
|
||||
}
|
||||
return dest;
|
||||
}
|
||||
|
||||
#define SHIFT 0
|
||||
#include "shift_helper_template.h"
|
||||
#undef SHIFT
|
||||
|
||||
#define SHIFT 1
|
||||
#include "shift_helper_template.h"
|
||||
#undef SHIFT
|
||||
|
||||
#define SHIFT 2
|
||||
#include "shift_helper_template.h"
|
||||
#undef SHIFT
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
#define SHIFT 3
|
||||
#include "shift_helper_template.h"
|
||||
#undef SHIFT
|
||||
#endif
|
||||
132
qemu/target-i386/mem_helper.c
Normal file
132
qemu/target-i386/mem_helper.c
Normal file
@@ -0,0 +1,132 @@
|
||||
/*
|
||||
* x86 memory access helpers
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "cpu.h"
|
||||
#include "exec/helper-proto.h"
|
||||
#include "exec/cpu_ldst.h"
|
||||
|
||||
#include "uc_priv.h"
|
||||
|
||||
/* broken thread support */
|
||||
|
||||
void helper_lock(CPUX86State *env)
|
||||
{
|
||||
spin_lock(&x86_env_get_cpu(env)->parent_obj.uc->x86_global_cpu_lock);
|
||||
}
|
||||
|
||||
void helper_unlock(CPUX86State *env)
|
||||
{
|
||||
spin_unlock(&x86_env_get_cpu(env)->parent_obj.uc->x86_global_cpu_lock);
|
||||
}
|
||||
|
||||
void helper_cmpxchg8b(CPUX86State *env, target_ulong a0)
|
||||
{
|
||||
uint64_t d;
|
||||
int eflags;
|
||||
|
||||
eflags = cpu_cc_compute_all(env, CC_OP);
|
||||
d = cpu_ldq_data(env, a0);
|
||||
if (d == (((uint64_t)env->regs[R_EDX] << 32) | (uint32_t)env->regs[R_EAX])) {
|
||||
cpu_stq_data(env, a0, ((uint64_t)env->regs[R_ECX] << 32) | (uint32_t)env->regs[R_EBX]);
|
||||
eflags |= CC_Z;
|
||||
} else {
|
||||
/* always do the store */
|
||||
cpu_stq_data(env, a0, d);
|
||||
env->regs[R_EDX] = (uint32_t)(d >> 32);
|
||||
env->regs[R_EAX] = (uint32_t)d;
|
||||
eflags &= ~CC_Z;
|
||||
}
|
||||
CC_SRC = eflags;
|
||||
}
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
void helper_cmpxchg16b(CPUX86State *env, target_ulong a0)
|
||||
{
|
||||
uint64_t d0, d1;
|
||||
int eflags;
|
||||
|
||||
if ((a0 & 0xf) != 0) {
|
||||
raise_exception(env, EXCP0D_GPF);
|
||||
}
|
||||
eflags = cpu_cc_compute_all(env, CC_OP);
|
||||
d0 = cpu_ldq_data(env, a0);
|
||||
d1 = cpu_ldq_data(env, a0 + 8);
|
||||
if (d0 == env->regs[R_EAX] && d1 == env->regs[R_EDX]) {
|
||||
cpu_stq_data(env, a0, env->regs[R_EBX]);
|
||||
cpu_stq_data(env, a0 + 8, env->regs[R_ECX]);
|
||||
eflags |= CC_Z;
|
||||
} else {
|
||||
/* always do the store */
|
||||
cpu_stq_data(env, a0, d0);
|
||||
cpu_stq_data(env, a0 + 8, d1);
|
||||
env->regs[R_EDX] = d1;
|
||||
env->regs[R_EAX] = d0;
|
||||
eflags &= ~CC_Z;
|
||||
}
|
||||
CC_SRC = eflags;
|
||||
}
|
||||
#endif
|
||||
|
||||
void helper_boundw(CPUX86State *env, target_ulong a0, int v)
|
||||
{
|
||||
int low, high;
|
||||
|
||||
low = cpu_ldsw_data(env, a0);
|
||||
high = cpu_ldsw_data(env, a0 + 2);
|
||||
v = (int16_t)v;
|
||||
if (v < low || v > high) {
|
||||
raise_exception(env, EXCP05_BOUND);
|
||||
}
|
||||
}
|
||||
|
||||
void helper_boundl(CPUX86State *env, target_ulong a0, int v)
|
||||
{
|
||||
int low, high;
|
||||
|
||||
low = cpu_ldl_data(env, a0);
|
||||
high = cpu_ldl_data(env, a0 + 4);
|
||||
if (v < low || v > high) {
|
||||
raise_exception(env, EXCP05_BOUND);
|
||||
}
|
||||
}
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
/* try to fill the TLB and return an exception if error. If retaddr is
|
||||
* NULL, it means that the function was called in C code (i.e. not
|
||||
* from generated code or from helper.c)
|
||||
*/
|
||||
/* XXX: fix it to restore all registers */
|
||||
void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
|
||||
uintptr_t retaddr)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = x86_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx);
|
||||
if (ret) {
|
||||
X86CPU *cpu = X86_CPU(cs->uc, cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
|
||||
if (retaddr) {
|
||||
/* now we have a real cpu fault */
|
||||
cpu_restore_state(cs, retaddr);
|
||||
}
|
||||
raise_exception_err(env, cs->exception_index, env->error_code);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
604
qemu/target-i386/misc_helper.c
Normal file
604
qemu/target-i386/misc_helper.c
Normal file
@@ -0,0 +1,604 @@
|
||||
/*
|
||||
* x86 misc helpers
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "cpu.h"
|
||||
#include "exec/ioport.h"
|
||||
#include "exec/helper-proto.h"
|
||||
#include "exec/cpu_ldst.h"
|
||||
|
||||
#include "uc_priv.h"
|
||||
|
||||
void helper_outb(void *handle, uint32_t port, uint32_t data)
|
||||
{
|
||||
cpu_outb(handle, port, data & 0xff);
|
||||
}
|
||||
|
||||
target_ulong helper_inb(void *handle, uint32_t port)
|
||||
{
|
||||
return cpu_inb(handle, port);
|
||||
}
|
||||
|
||||
void helper_outw(void *handle, uint32_t port, uint32_t data)
|
||||
{
|
||||
cpu_outw(handle, port, data & 0xffff);
|
||||
}
|
||||
|
||||
target_ulong helper_inw(void *handle, uint32_t port)
|
||||
{
|
||||
return cpu_inw(handle, port);
|
||||
}
|
||||
|
||||
void helper_outl(void *handle, uint32_t port, uint32_t data)
|
||||
{
|
||||
cpu_outl(handle, port, data);
|
||||
}
|
||||
|
||||
target_ulong helper_inl(void *handle, uint32_t port)
|
||||
{
|
||||
return cpu_inl(handle, port);
|
||||
}
|
||||
|
||||
void helper_into(CPUX86State *env, int next_eip_addend)
|
||||
{
|
||||
int eflags;
|
||||
|
||||
eflags = cpu_cc_compute_all(env, CC_OP);
|
||||
if (eflags & CC_O) {
|
||||
raise_interrupt(env, EXCP04_INTO, 1, 0, next_eip_addend);
|
||||
}
|
||||
}
|
||||
|
||||
void helper_single_step(CPUX86State *env)
|
||||
{
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
check_hw_breakpoints(env, true);
|
||||
env->dr[6] |= DR6_BS;
|
||||
#endif
|
||||
raise_exception(env, EXCP01_DB);
|
||||
}
|
||||
|
||||
void helper_cpuid(CPUX86State *env)
|
||||
{
|
||||
uint32_t eax, ebx, ecx, edx;
|
||||
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_CPUID, 0);
|
||||
|
||||
cpu_x86_cpuid(env, (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
|
||||
&eax, &ebx, &ecx, &edx);
|
||||
env->regs[R_EAX] = eax;
|
||||
env->regs[R_EBX] = ebx;
|
||||
env->regs[R_ECX] = ecx;
|
||||
env->regs[R_EDX] = edx;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
target_ulong helper_read_crN(CPUX86State *env, int reg)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void helper_write_crN(CPUX86State *env, int reg, target_ulong t0)
|
||||
{
|
||||
}
|
||||
|
||||
void helper_movl_drN_T0(CPUX86State *env, int reg, target_ulong t0)
|
||||
{
|
||||
}
|
||||
#else
|
||||
target_ulong helper_read_crN(CPUX86State *env, int reg)
|
||||
{
|
||||
target_ulong val;
|
||||
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_READ_CR0 + reg, 0);
|
||||
switch (reg) {
|
||||
default:
|
||||
val = env->cr[reg];
|
||||
break;
|
||||
case 8:
|
||||
if (!(env->hflags2 & HF2_VINTR_MASK)) {
|
||||
val = cpu_get_apic_tpr(env->uc, x86_env_get_cpu(env)->apic_state);
|
||||
} else {
|
||||
val = env->v_tpr;
|
||||
}
|
||||
break;
|
||||
}
|
||||
return val;
|
||||
}
|
||||
|
||||
void helper_write_crN(CPUX86State *env, int reg, target_ulong t0)
|
||||
{
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_WRITE_CR0 + reg, 0);
|
||||
switch (reg) {
|
||||
case 0:
|
||||
cpu_x86_update_cr0(env, t0);
|
||||
break;
|
||||
case 3:
|
||||
cpu_x86_update_cr3(env, t0);
|
||||
break;
|
||||
case 4:
|
||||
cpu_x86_update_cr4(env, t0);
|
||||
break;
|
||||
case 8:
|
||||
if (!(env->hflags2 & HF2_VINTR_MASK)) {
|
||||
cpu_set_apic_tpr(env->uc, x86_env_get_cpu(env)->apic_state, t0);
|
||||
}
|
||||
env->v_tpr = t0 & 0x0f;
|
||||
break;
|
||||
default:
|
||||
env->cr[reg] = t0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void helper_movl_drN_T0(CPUX86State *env, int reg, target_ulong t0)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (reg < 4) {
|
||||
hw_breakpoint_remove(env, reg);
|
||||
env->dr[reg] = t0;
|
||||
hw_breakpoint_insert(env, reg);
|
||||
} else if (reg == 7) {
|
||||
for (i = 0; i < DR7_MAX_BP; i++) {
|
||||
hw_breakpoint_remove(env, i);
|
||||
}
|
||||
env->dr[7] = t0;
|
||||
for (i = 0; i < DR7_MAX_BP; i++) {
|
||||
hw_breakpoint_insert(env, i);
|
||||
}
|
||||
} else {
|
||||
env->dr[reg] = t0;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void helper_lmsw(CPUX86State *env, target_ulong t0)
|
||||
{
|
||||
/* only 4 lower bits of CR0 are modified. PE cannot be set to zero
|
||||
if already set to one. */
|
||||
t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
|
||||
helper_write_crN(env, 0, t0);
|
||||
}
|
||||
|
||||
void helper_invlpg(CPUX86State *env, target_ulong addr)
|
||||
{
|
||||
X86CPU *cpu = x86_env_get_cpu(env);
|
||||
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPG, 0);
|
||||
tlb_flush_page(CPU(cpu), addr);
|
||||
}
|
||||
|
||||
void helper_rdtsc(CPUX86State *env)
|
||||
{
|
||||
uint64_t val;
|
||||
|
||||
if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
|
||||
raise_exception(env, EXCP0D_GPF);
|
||||
}
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_RDTSC, 0);
|
||||
|
||||
val = cpu_get_tsc(env) + env->tsc_offset;
|
||||
env->regs[R_EAX] = (uint32_t)(val);
|
||||
env->regs[R_EDX] = (uint32_t)(val >> 32);
|
||||
}
|
||||
|
||||
void helper_rdtscp(CPUX86State *env)
|
||||
{
|
||||
helper_rdtsc(env);
|
||||
env->regs[R_ECX] = (uint32_t)(env->tsc_aux);
|
||||
}
|
||||
|
||||
void helper_rdpmc(CPUX86State *env)
|
||||
{
|
||||
if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
|
||||
raise_exception(env, EXCP0D_GPF);
|
||||
}
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_RDPMC, 0);
|
||||
|
||||
/* currently unimplemented */
|
||||
qemu_log_mask(LOG_UNIMP, "x86: unimplemented rdpmc\n");
|
||||
raise_exception_err(env, EXCP06_ILLOP, 0);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
void helper_wrmsr(CPUX86State *env)
|
||||
{
|
||||
}
|
||||
|
||||
void helper_rdmsr(CPUX86State *env)
|
||||
{
|
||||
}
|
||||
#else
|
||||
void helper_wrmsr(CPUX86State *env)
|
||||
{
|
||||
uint64_t val;
|
||||
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 1);
|
||||
|
||||
val = ((uint32_t)env->regs[R_EAX]) |
|
||||
((uint64_t)((uint32_t)env->regs[R_EDX]) << 32);
|
||||
|
||||
switch ((uint32_t)env->regs[R_ECX]) {
|
||||
case MSR_IA32_SYSENTER_CS:
|
||||
env->sysenter_cs = val & 0xffff;
|
||||
break;
|
||||
case MSR_IA32_SYSENTER_ESP:
|
||||
env->sysenter_esp = val;
|
||||
break;
|
||||
case MSR_IA32_SYSENTER_EIP:
|
||||
env->sysenter_eip = val;
|
||||
break;
|
||||
case MSR_IA32_APICBASE:
|
||||
cpu_set_apic_base(env->uc, x86_env_get_cpu(env)->apic_state, val);
|
||||
break;
|
||||
case MSR_EFER:
|
||||
{
|
||||
uint64_t update_mask;
|
||||
|
||||
update_mask = 0;
|
||||
if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_SYSCALL) {
|
||||
update_mask |= MSR_EFER_SCE;
|
||||
}
|
||||
if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
|
||||
update_mask |= MSR_EFER_LME;
|
||||
}
|
||||
if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) {
|
||||
update_mask |= MSR_EFER_FFXSR;
|
||||
}
|
||||
if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_NX) {
|
||||
update_mask |= MSR_EFER_NXE;
|
||||
}
|
||||
if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
|
||||
update_mask |= MSR_EFER_SVME;
|
||||
}
|
||||
if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) {
|
||||
update_mask |= MSR_EFER_FFXSR;
|
||||
}
|
||||
cpu_load_efer(env, (env->efer & ~update_mask) |
|
||||
(val & update_mask));
|
||||
}
|
||||
break;
|
||||
case MSR_STAR:
|
||||
env->star = val;
|
||||
break;
|
||||
case MSR_PAT:
|
||||
env->pat = val;
|
||||
break;
|
||||
case MSR_VM_HSAVE_PA:
|
||||
env->vm_hsave = val;
|
||||
break;
|
||||
#ifdef TARGET_X86_64
|
||||
case MSR_LSTAR:
|
||||
env->lstar = val;
|
||||
break;
|
||||
case MSR_CSTAR:
|
||||
env->cstar = val;
|
||||
break;
|
||||
case MSR_FMASK:
|
||||
env->fmask = val;
|
||||
break;
|
||||
case MSR_FSBASE:
|
||||
env->segs[R_FS].base = val;
|
||||
break;
|
||||
case MSR_GSBASE:
|
||||
env->segs[R_GS].base = val;
|
||||
break;
|
||||
case MSR_KERNELGSBASE:
|
||||
env->kernelgsbase = val;
|
||||
break;
|
||||
#endif
|
||||
case MSR_MTRRphysBase(0):
|
||||
case MSR_MTRRphysBase(1):
|
||||
case MSR_MTRRphysBase(2):
|
||||
case MSR_MTRRphysBase(3):
|
||||
case MSR_MTRRphysBase(4):
|
||||
case MSR_MTRRphysBase(5):
|
||||
case MSR_MTRRphysBase(6):
|
||||
case MSR_MTRRphysBase(7):
|
||||
env->mtrr_var[((uint32_t)env->regs[R_ECX] -
|
||||
MSR_MTRRphysBase(0)) / 2].base = val;
|
||||
break;
|
||||
case MSR_MTRRphysMask(0):
|
||||
case MSR_MTRRphysMask(1):
|
||||
case MSR_MTRRphysMask(2):
|
||||
case MSR_MTRRphysMask(3):
|
||||
case MSR_MTRRphysMask(4):
|
||||
case MSR_MTRRphysMask(5):
|
||||
case MSR_MTRRphysMask(6):
|
||||
case MSR_MTRRphysMask(7):
|
||||
env->mtrr_var[((uint32_t)env->regs[R_ECX] -
|
||||
MSR_MTRRphysMask(0)) / 2].mask = val;
|
||||
break;
|
||||
case MSR_MTRRfix64K_00000:
|
||||
env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
|
||||
MSR_MTRRfix64K_00000] = val;
|
||||
break;
|
||||
case MSR_MTRRfix16K_80000:
|
||||
case MSR_MTRRfix16K_A0000:
|
||||
env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
|
||||
MSR_MTRRfix16K_80000 + 1] = val;
|
||||
break;
|
||||
case MSR_MTRRfix4K_C0000:
|
||||
case MSR_MTRRfix4K_C8000:
|
||||
case MSR_MTRRfix4K_D0000:
|
||||
case MSR_MTRRfix4K_D8000:
|
||||
case MSR_MTRRfix4K_E0000:
|
||||
case MSR_MTRRfix4K_E8000:
|
||||
case MSR_MTRRfix4K_F0000:
|
||||
case MSR_MTRRfix4K_F8000:
|
||||
env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
|
||||
MSR_MTRRfix4K_C0000 + 3] = val;
|
||||
break;
|
||||
case MSR_MTRRdefType:
|
||||
env->mtrr_deftype = val;
|
||||
break;
|
||||
case MSR_MCG_STATUS:
|
||||
env->mcg_status = val;
|
||||
break;
|
||||
case MSR_MCG_CTL:
|
||||
if ((env->mcg_cap & MCG_CTL_P)
|
||||
&& (val == 0 || val == ~(uint64_t)0)) {
|
||||
env->mcg_ctl = val;
|
||||
}
|
||||
break;
|
||||
case MSR_TSC_AUX:
|
||||
env->tsc_aux = val;
|
||||
break;
|
||||
case MSR_IA32_MISC_ENABLE:
|
||||
env->msr_ia32_misc_enable = val;
|
||||
break;
|
||||
default:
|
||||
if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL
|
||||
&& (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL +
|
||||
(4 * env->mcg_cap & 0xff)) {
|
||||
uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL;
|
||||
if ((offset & 0x3) != 0
|
||||
|| (val == 0 || val == ~(uint64_t)0)) {
|
||||
env->mce_banks[offset] = val;
|
||||
}
|
||||
break;
|
||||
}
|
||||
/* XXX: exception? */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void helper_rdmsr(CPUX86State *env)
|
||||
{
|
||||
uint64_t val;
|
||||
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 0);
|
||||
|
||||
switch ((uint32_t)env->regs[R_ECX]) {
|
||||
case MSR_IA32_SYSENTER_CS:
|
||||
val = env->sysenter_cs;
|
||||
break;
|
||||
case MSR_IA32_SYSENTER_ESP:
|
||||
val = env->sysenter_esp;
|
||||
break;
|
||||
case MSR_IA32_SYSENTER_EIP:
|
||||
val = env->sysenter_eip;
|
||||
break;
|
||||
case MSR_IA32_APICBASE:
|
||||
val = cpu_get_apic_base(env->uc, x86_env_get_cpu(env)->apic_state);
|
||||
break;
|
||||
case MSR_EFER:
|
||||
val = env->efer;
|
||||
break;
|
||||
case MSR_STAR:
|
||||
val = env->star;
|
||||
break;
|
||||
case MSR_PAT:
|
||||
val = env->pat;
|
||||
break;
|
||||
case MSR_VM_HSAVE_PA:
|
||||
val = env->vm_hsave;
|
||||
break;
|
||||
case MSR_IA32_PERF_STATUS:
|
||||
/* tsc_increment_by_tick */
|
||||
val = 1000ULL;
|
||||
/* CPU multiplier */
|
||||
val |= (((uint64_t)4ULL) << 40);
|
||||
break;
|
||||
#ifdef TARGET_X86_64
|
||||
case MSR_LSTAR:
|
||||
val = env->lstar;
|
||||
break;
|
||||
case MSR_CSTAR:
|
||||
val = env->cstar;
|
||||
break;
|
||||
case MSR_FMASK:
|
||||
val = env->fmask;
|
||||
break;
|
||||
case MSR_FSBASE:
|
||||
val = env->segs[R_FS].base;
|
||||
break;
|
||||
case MSR_GSBASE:
|
||||
val = env->segs[R_GS].base;
|
||||
break;
|
||||
case MSR_KERNELGSBASE:
|
||||
val = env->kernelgsbase;
|
||||
break;
|
||||
case MSR_TSC_AUX:
|
||||
val = env->tsc_aux;
|
||||
break;
|
||||
#endif
|
||||
case MSR_MTRRphysBase(0):
|
||||
case MSR_MTRRphysBase(1):
|
||||
case MSR_MTRRphysBase(2):
|
||||
case MSR_MTRRphysBase(3):
|
||||
case MSR_MTRRphysBase(4):
|
||||
case MSR_MTRRphysBase(5):
|
||||
case MSR_MTRRphysBase(6):
|
||||
case MSR_MTRRphysBase(7):
|
||||
val = env->mtrr_var[((uint32_t)env->regs[R_ECX] -
|
||||
MSR_MTRRphysBase(0)) / 2].base;
|
||||
break;
|
||||
case MSR_MTRRphysMask(0):
|
||||
case MSR_MTRRphysMask(1):
|
||||
case MSR_MTRRphysMask(2):
|
||||
case MSR_MTRRphysMask(3):
|
||||
case MSR_MTRRphysMask(4):
|
||||
case MSR_MTRRphysMask(5):
|
||||
case MSR_MTRRphysMask(6):
|
||||
case MSR_MTRRphysMask(7):
|
||||
val = env->mtrr_var[((uint32_t)env->regs[R_ECX] -
|
||||
MSR_MTRRphysMask(0)) / 2].mask;
|
||||
break;
|
||||
case MSR_MTRRfix64K_00000:
|
||||
val = env->mtrr_fixed[0];
|
||||
break;
|
||||
case MSR_MTRRfix16K_80000:
|
||||
case MSR_MTRRfix16K_A0000:
|
||||
val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
|
||||
MSR_MTRRfix16K_80000 + 1];
|
||||
break;
|
||||
case MSR_MTRRfix4K_C0000:
|
||||
case MSR_MTRRfix4K_C8000:
|
||||
case MSR_MTRRfix4K_D0000:
|
||||
case MSR_MTRRfix4K_D8000:
|
||||
case MSR_MTRRfix4K_E0000:
|
||||
case MSR_MTRRfix4K_E8000:
|
||||
case MSR_MTRRfix4K_F0000:
|
||||
case MSR_MTRRfix4K_F8000:
|
||||
val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
|
||||
MSR_MTRRfix4K_C0000 + 3];
|
||||
break;
|
||||
case MSR_MTRRdefType:
|
||||
val = env->mtrr_deftype;
|
||||
break;
|
||||
case MSR_MTRRcap:
|
||||
if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
|
||||
val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT |
|
||||
MSR_MTRRcap_WC_SUPPORTED;
|
||||
} else {
|
||||
/* XXX: exception? */
|
||||
val = 0;
|
||||
}
|
||||
break;
|
||||
case MSR_MCG_CAP:
|
||||
val = env->mcg_cap;
|
||||
break;
|
||||
case MSR_MCG_CTL:
|
||||
if (env->mcg_cap & MCG_CTL_P) {
|
||||
val = env->mcg_ctl;
|
||||
} else {
|
||||
val = 0;
|
||||
}
|
||||
break;
|
||||
case MSR_MCG_STATUS:
|
||||
val = env->mcg_status;
|
||||
break;
|
||||
case MSR_IA32_MISC_ENABLE:
|
||||
val = env->msr_ia32_misc_enable;
|
||||
break;
|
||||
default:
|
||||
if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL
|
||||
&& (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL +
|
||||
(4 * env->mcg_cap & 0xff)) {
|
||||
uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL;
|
||||
val = env->mce_banks[offset];
|
||||
break;
|
||||
}
|
||||
/* XXX: exception? */
|
||||
val = 0;
|
||||
break;
|
||||
}
|
||||
env->regs[R_EAX] = (uint32_t)(val);
|
||||
env->regs[R_EDX] = (uint32_t)(val >> 32);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void do_pause(X86CPU *cpu)
|
||||
{
|
||||
CPUState *cs = CPU(cpu);
|
||||
|
||||
/* Just let another CPU run. */
|
||||
cs->exception_index = EXCP_INTERRUPT;
|
||||
cpu_loop_exit(cs);
|
||||
}
|
||||
|
||||
static void do_hlt(X86CPU *cpu)
|
||||
{
|
||||
CPUState *cs = CPU(cpu);
|
||||
CPUX86State *env = &cpu->env;
|
||||
|
||||
env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
|
||||
cs->halted = 1;
|
||||
cs->exception_index = EXCP_HLT;
|
||||
cpu_loop_exit(cs);
|
||||
}
|
||||
|
||||
void helper_hlt(CPUX86State *env, int next_eip_addend)
|
||||
{
|
||||
X86CPU *cpu = x86_env_get_cpu(env);
|
||||
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_HLT, 0);
|
||||
env->eip += next_eip_addend;
|
||||
|
||||
do_hlt(cpu);
|
||||
}
|
||||
|
||||
void helper_monitor(CPUX86State *env, target_ulong ptr)
|
||||
{
|
||||
if ((uint32_t)env->regs[R_ECX] != 0) {
|
||||
raise_exception(env, EXCP0D_GPF);
|
||||
}
|
||||
/* XXX: store address? */
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_MONITOR, 0);
|
||||
}
|
||||
|
||||
void helper_mwait(CPUX86State *env, int next_eip_addend)
|
||||
{
|
||||
CPUState *cs;
|
||||
X86CPU *cpu;
|
||||
|
||||
if ((uint32_t)env->regs[R_ECX] != 0) {
|
||||
raise_exception(env, EXCP0D_GPF);
|
||||
}
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_MWAIT, 0);
|
||||
env->eip += next_eip_addend;
|
||||
|
||||
cpu = x86_env_get_cpu(env);
|
||||
cs = CPU(cpu);
|
||||
/* XXX: not complete but not completely erroneous */
|
||||
if (cs->cpu_index != 0 || CPU_NEXT(cs) != NULL) {
|
||||
do_pause(cpu);
|
||||
} else {
|
||||
do_hlt(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
void helper_pause(CPUX86State *env, int next_eip_addend)
|
||||
{
|
||||
X86CPU *cpu = x86_env_get_cpu(env);
|
||||
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_PAUSE, 0);
|
||||
env->eip += next_eip_addend;
|
||||
|
||||
do_pause(cpu);
|
||||
}
|
||||
|
||||
void helper_debug(CPUX86State *env)
|
||||
{
|
||||
CPUState *cs = CPU(x86_env_get_cpu(env));
|
||||
|
||||
cs->exception_index = EXCP_DEBUG;
|
||||
cpu_loop_exit(cs);
|
||||
}
|
||||
2296
qemu/target-i386/ops_sse.h
Normal file
2296
qemu/target-i386/ops_sse.h
Normal file
File diff suppressed because it is too large
Load Diff
360
qemu/target-i386/ops_sse_header.h
Normal file
360
qemu/target-i386/ops_sse_header.h
Normal file
@@ -0,0 +1,360 @@
|
||||
/*
|
||||
* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4/PNI support
|
||||
*
|
||||
* Copyright (c) 2005 Fabrice Bellard
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#if SHIFT == 0
|
||||
#define Reg MMXReg
|
||||
#define SUFFIX _mmx
|
||||
#else
|
||||
#define Reg XMMReg
|
||||
#define SUFFIX _xmm
|
||||
#endif
|
||||
|
||||
#define dh_alias_Reg ptr
|
||||
#define dh_alias_XMMReg ptr
|
||||
#define dh_alias_MMXReg ptr
|
||||
#define dh_ctype_Reg Reg *
|
||||
#define dh_ctype_XMMReg XMMReg *
|
||||
#define dh_ctype_MMXReg MMXReg *
|
||||
#define dh_is_signed_Reg dh_is_signed_ptr
|
||||
#define dh_is_signed_XMMReg dh_is_signed_ptr
|
||||
#define dh_is_signed_MMXReg dh_is_signed_ptr
|
||||
|
||||
DEF_HELPER_3(glue(psrlw, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(psraw, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(psllw, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(psrld, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(psrad, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pslld, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(psrlq, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(psllq, SUFFIX), void, env, Reg, Reg)
|
||||
|
||||
#if SHIFT == 1
|
||||
DEF_HELPER_3(glue(psrldq, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pslldq, SUFFIX), void, env, Reg, Reg)
|
||||
#endif
|
||||
|
||||
#define SSE_HELPER_B(name, F)\
|
||||
DEF_HELPER_3(glue(name, SUFFIX), void, env, Reg, Reg)
|
||||
|
||||
#define SSE_HELPER_W(name, F)\
|
||||
DEF_HELPER_3(glue(name, SUFFIX), void, env, Reg, Reg)
|
||||
|
||||
#define SSE_HELPER_L(name, F)\
|
||||
DEF_HELPER_3(glue(name, SUFFIX), void, env, Reg, Reg)
|
||||
|
||||
#define SSE_HELPER_Q(name, F)\
|
||||
DEF_HELPER_3(glue(name, SUFFIX), void, env, Reg, Reg)
|
||||
|
||||
SSE_HELPER_B(paddb, FADD)
|
||||
SSE_HELPER_W(paddw, FADD)
|
||||
SSE_HELPER_L(paddl, FADD)
|
||||
SSE_HELPER_Q(paddq, FADD)
|
||||
|
||||
SSE_HELPER_B(psubb, FSUB)
|
||||
SSE_HELPER_W(psubw, FSUB)
|
||||
SSE_HELPER_L(psubl, FSUB)
|
||||
SSE_HELPER_Q(psubq, FSUB)
|
||||
|
||||
SSE_HELPER_B(paddusb, FADDUB)
|
||||
SSE_HELPER_B(paddsb, FADDSB)
|
||||
SSE_HELPER_B(psubusb, FSUBUB)
|
||||
SSE_HELPER_B(psubsb, FSUBSB)
|
||||
|
||||
SSE_HELPER_W(paddusw, FADDUW)
|
||||
SSE_HELPER_W(paddsw, FADDSW)
|
||||
SSE_HELPER_W(psubusw, FSUBUW)
|
||||
SSE_HELPER_W(psubsw, FSUBSW)
|
||||
|
||||
SSE_HELPER_B(pminub, FMINUB)
|
||||
SSE_HELPER_B(pmaxub, FMAXUB)
|
||||
|
||||
SSE_HELPER_W(pminsw, FMINSW)
|
||||
SSE_HELPER_W(pmaxsw, FMAXSW)
|
||||
|
||||
SSE_HELPER_Q(pand, FAND)
|
||||
SSE_HELPER_Q(pandn, FANDN)
|
||||
SSE_HELPER_Q(por, FOR)
|
||||
SSE_HELPER_Q(pxor, FXOR)
|
||||
|
||||
SSE_HELPER_B(pcmpgtb, FCMPGTB)
|
||||
SSE_HELPER_W(pcmpgtw, FCMPGTW)
|
||||
SSE_HELPER_L(pcmpgtl, FCMPGTL)
|
||||
|
||||
SSE_HELPER_B(pcmpeqb, FCMPEQ)
|
||||
SSE_HELPER_W(pcmpeqw, FCMPEQ)
|
||||
SSE_HELPER_L(pcmpeql, FCMPEQ)
|
||||
|
||||
SSE_HELPER_W(pmullw, FMULLW)
|
||||
#if SHIFT == 0
|
||||
SSE_HELPER_W(pmulhrw, FMULHRW)
|
||||
#endif
|
||||
SSE_HELPER_W(pmulhuw, FMULHUW)
|
||||
SSE_HELPER_W(pmulhw, FMULHW)
|
||||
|
||||
SSE_HELPER_B(pavgb, FAVG)
|
||||
SSE_HELPER_W(pavgw, FAVG)
|
||||
|
||||
DEF_HELPER_3(glue(pmuludq, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pmaddwd, SUFFIX), void, env, Reg, Reg)
|
||||
|
||||
DEF_HELPER_3(glue(psadbw, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_4(glue(maskmov, SUFFIX), void, env, Reg, Reg, tl)
|
||||
DEF_HELPER_2(glue(movl_mm_T0, SUFFIX), void, Reg, i32)
|
||||
#ifdef TARGET_X86_64
|
||||
DEF_HELPER_2(glue(movq_mm_T0, SUFFIX), void, Reg, i64)
|
||||
#endif
|
||||
|
||||
#if SHIFT == 0
|
||||
DEF_HELPER_3(glue(pshufw, SUFFIX), void, Reg, Reg, int)
|
||||
#else
|
||||
DEF_HELPER_3(shufps, void, Reg, Reg, int)
|
||||
DEF_HELPER_3(shufpd, void, Reg, Reg, int)
|
||||
DEF_HELPER_3(glue(pshufd, SUFFIX), void, Reg, Reg, int)
|
||||
DEF_HELPER_3(glue(pshuflw, SUFFIX), void, Reg, Reg, int)
|
||||
DEF_HELPER_3(glue(pshufhw, SUFFIX), void, Reg, Reg, int)
|
||||
#endif
|
||||
|
||||
#if SHIFT == 1
|
||||
/* FPU ops */
|
||||
/* XXX: not accurate */
|
||||
|
||||
#define SSE_HELPER_S(name, F) \
|
||||
DEF_HELPER_3(name ## ps, void, env, Reg, Reg) \
|
||||
DEF_HELPER_3(name ## ss, void, env, Reg, Reg) \
|
||||
DEF_HELPER_3(name ## pd, void, env, Reg, Reg) \
|
||||
DEF_HELPER_3(name ## sd, void, env, Reg, Reg)
|
||||
|
||||
SSE_HELPER_S(add, FPU_ADD)
|
||||
SSE_HELPER_S(sub, FPU_SUB)
|
||||
SSE_HELPER_S(mul, FPU_MUL)
|
||||
SSE_HELPER_S(div, FPU_DIV)
|
||||
SSE_HELPER_S(min, FPU_MIN)
|
||||
SSE_HELPER_S(max, FPU_MAX)
|
||||
SSE_HELPER_S(sqrt, FPU_SQRT)
|
||||
|
||||
|
||||
DEF_HELPER_3(cvtps2pd, void, env, Reg, Reg)
|
||||
DEF_HELPER_3(cvtpd2ps, void, env, Reg, Reg)
|
||||
DEF_HELPER_3(cvtss2sd, void, env, Reg, Reg)
|
||||
DEF_HELPER_3(cvtsd2ss, void, env, Reg, Reg)
|
||||
DEF_HELPER_3(cvtdq2ps, void, env, Reg, Reg)
|
||||
DEF_HELPER_3(cvtdq2pd, void, env, Reg, Reg)
|
||||
DEF_HELPER_3(cvtpi2ps, void, env, XMMReg, MMXReg)
|
||||
DEF_HELPER_3(cvtpi2pd, void, env, XMMReg, MMXReg)
|
||||
DEF_HELPER_3(cvtsi2ss, void, env, XMMReg, i32)
|
||||
DEF_HELPER_3(cvtsi2sd, void, env, XMMReg, i32)
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
DEF_HELPER_3(cvtsq2ss, void, env, XMMReg, i64)
|
||||
DEF_HELPER_3(cvtsq2sd, void, env, XMMReg, i64)
|
||||
#endif
|
||||
|
||||
DEF_HELPER_3(cvtps2dq, void, env, XMMReg, XMMReg)
|
||||
DEF_HELPER_3(cvtpd2dq, void, env, XMMReg, XMMReg)
|
||||
DEF_HELPER_3(cvtps2pi, void, env, MMXReg, XMMReg)
|
||||
DEF_HELPER_3(cvtpd2pi, void, env, MMXReg, XMMReg)
|
||||
DEF_HELPER_2(cvtss2si, s32, env, XMMReg)
|
||||
DEF_HELPER_2(cvtsd2si, s32, env, XMMReg)
|
||||
#ifdef TARGET_X86_64
|
||||
DEF_HELPER_2(cvtss2sq, s64, env, XMMReg)
|
||||
DEF_HELPER_2(cvtsd2sq, s64, env, XMMReg)
|
||||
#endif
|
||||
|
||||
DEF_HELPER_3(cvttps2dq, void, env, XMMReg, XMMReg)
|
||||
DEF_HELPER_3(cvttpd2dq, void, env, XMMReg, XMMReg)
|
||||
DEF_HELPER_3(cvttps2pi, void, env, MMXReg, XMMReg)
|
||||
DEF_HELPER_3(cvttpd2pi, void, env, MMXReg, XMMReg)
|
||||
DEF_HELPER_2(cvttss2si, s32, env, XMMReg)
|
||||
DEF_HELPER_2(cvttsd2si, s32, env, XMMReg)
|
||||
#ifdef TARGET_X86_64
|
||||
DEF_HELPER_2(cvttss2sq, s64, env, XMMReg)
|
||||
DEF_HELPER_2(cvttsd2sq, s64, env, XMMReg)
|
||||
#endif
|
||||
|
||||
DEF_HELPER_3(rsqrtps, void, env, XMMReg, XMMReg)
|
||||
DEF_HELPER_3(rsqrtss, void, env, XMMReg, XMMReg)
|
||||
DEF_HELPER_3(rcpps, void, env, XMMReg, XMMReg)
|
||||
DEF_HELPER_3(rcpss, void, env, XMMReg, XMMReg)
|
||||
DEF_HELPER_3(extrq_r, void, env, XMMReg, XMMReg)
|
||||
DEF_HELPER_4(extrq_i, void, env, XMMReg, int, int)
|
||||
DEF_HELPER_3(insertq_r, void, env, XMMReg, XMMReg)
|
||||
DEF_HELPER_4(insertq_i, void, env, XMMReg, int, int)
|
||||
DEF_HELPER_3(haddps, void, env, XMMReg, XMMReg)
|
||||
DEF_HELPER_3(haddpd, void, env, XMMReg, XMMReg)
|
||||
DEF_HELPER_3(hsubps, void, env, XMMReg, XMMReg)
|
||||
DEF_HELPER_3(hsubpd, void, env, XMMReg, XMMReg)
|
||||
DEF_HELPER_3(addsubps, void, env, XMMReg, XMMReg)
|
||||
DEF_HELPER_3(addsubpd, void, env, XMMReg, XMMReg)
|
||||
|
||||
#define SSE_HELPER_CMP(name, F) \
|
||||
DEF_HELPER_3(name ## ps, void, env, Reg, Reg) \
|
||||
DEF_HELPER_3(name ## ss, void, env, Reg, Reg) \
|
||||
DEF_HELPER_3(name ## pd, void, env, Reg, Reg) \
|
||||
DEF_HELPER_3(name ## sd, void, env, Reg, Reg)
|
||||
|
||||
SSE_HELPER_CMP(cmpeq, FPU_CMPEQ)
|
||||
SSE_HELPER_CMP(cmplt, FPU_CMPLT)
|
||||
SSE_HELPER_CMP(cmple, FPU_CMPLE)
|
||||
SSE_HELPER_CMP(cmpunord, FPU_CMPUNORD)
|
||||
SSE_HELPER_CMP(cmpneq, FPU_CMPNEQ)
|
||||
SSE_HELPER_CMP(cmpnlt, FPU_CMPNLT)
|
||||
SSE_HELPER_CMP(cmpnle, FPU_CMPNLE)
|
||||
SSE_HELPER_CMP(cmpord, FPU_CMPORD)
|
||||
|
||||
DEF_HELPER_3(ucomiss, void, env, Reg, Reg)
|
||||
DEF_HELPER_3(comiss, void, env, Reg, Reg)
|
||||
DEF_HELPER_3(ucomisd, void, env, Reg, Reg)
|
||||
DEF_HELPER_3(comisd, void, env, Reg, Reg)
|
||||
DEF_HELPER_2(movmskps, i32, env, Reg)
|
||||
DEF_HELPER_2(movmskpd, i32, env, Reg)
|
||||
#endif
|
||||
|
||||
DEF_HELPER_2(glue(pmovmskb, SUFFIX), i32, env, Reg)
|
||||
DEF_HELPER_3(glue(packsswb, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(packuswb, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(packssdw, SUFFIX), void, env, Reg, Reg)
|
||||
#define UNPCK_OP(base_name, base) \
|
||||
DEF_HELPER_3(glue(punpck ## base_name ## bw, SUFFIX), void, env, Reg, Reg) \
|
||||
DEF_HELPER_3(glue(punpck ## base_name ## wd, SUFFIX), void, env, Reg, Reg) \
|
||||
DEF_HELPER_3(glue(punpck ## base_name ## dq, SUFFIX), void, env, Reg, Reg)
|
||||
|
||||
UNPCK_OP(l, 0)
|
||||
UNPCK_OP(h, 1)
|
||||
|
||||
#if SHIFT == 1
|
||||
DEF_HELPER_3(glue(punpcklqdq, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(punpckhqdq, SUFFIX), void, env, Reg, Reg)
|
||||
#endif
|
||||
|
||||
/* 3DNow! float ops */
|
||||
#if SHIFT == 0
|
||||
DEF_HELPER_3(pi2fd, void, env, MMXReg, MMXReg)
|
||||
DEF_HELPER_3(pi2fw, void, env, MMXReg, MMXReg)
|
||||
DEF_HELPER_3(pf2id, void, env, MMXReg, MMXReg)
|
||||
DEF_HELPER_3(pf2iw, void, env, MMXReg, MMXReg)
|
||||
DEF_HELPER_3(pfacc, void, env, MMXReg, MMXReg)
|
||||
DEF_HELPER_3(pfadd, void, env, MMXReg, MMXReg)
|
||||
DEF_HELPER_3(pfcmpeq, void, env, MMXReg, MMXReg)
|
||||
DEF_HELPER_3(pfcmpge, void, env, MMXReg, MMXReg)
|
||||
DEF_HELPER_3(pfcmpgt, void, env, MMXReg, MMXReg)
|
||||
DEF_HELPER_3(pfmax, void, env, MMXReg, MMXReg)
|
||||
DEF_HELPER_3(pfmin, void, env, MMXReg, MMXReg)
|
||||
DEF_HELPER_3(pfmul, void, env, MMXReg, MMXReg)
|
||||
DEF_HELPER_3(pfnacc, void, env, MMXReg, MMXReg)
|
||||
DEF_HELPER_3(pfpnacc, void, env, MMXReg, MMXReg)
|
||||
DEF_HELPER_3(pfrcp, void, env, MMXReg, MMXReg)
|
||||
DEF_HELPER_3(pfrsqrt, void, env, MMXReg, MMXReg)
|
||||
DEF_HELPER_3(pfsub, void, env, MMXReg, MMXReg)
|
||||
DEF_HELPER_3(pfsubr, void, env, MMXReg, MMXReg)
|
||||
DEF_HELPER_3(pswapd, void, env, MMXReg, MMXReg)
|
||||
#endif
|
||||
|
||||
/* SSSE3 op helpers */
|
||||
DEF_HELPER_3(glue(phaddw, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(phaddd, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(phaddsw, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(phsubw, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(phsubd, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(phsubsw, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pabsb, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pabsw, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pabsd, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pmaddubsw, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pmulhrsw, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pshufb, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(psignb, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(psignw, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(psignd, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_4(glue(palignr, SUFFIX), void, env, Reg, Reg, s32)
|
||||
|
||||
/* SSE4.1 op helpers */
|
||||
#if SHIFT == 1
|
||||
DEF_HELPER_3(glue(pblendvb, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(blendvps, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(blendvpd, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(ptest, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pmovsxbw, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pmovsxbd, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pmovsxbq, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pmovsxwd, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pmovsxwq, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pmovsxdq, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pmovzxbw, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pmovzxbd, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pmovzxbq, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pmovzxwd, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pmovzxwq, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pmovzxdq, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pmuldq, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pcmpeqq, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(packusdw, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pminsb, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pminsd, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pminuw, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pminud, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pmaxsb, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pmaxsd, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pmaxuw, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pmaxud, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pmulld, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(phminposuw, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_4(glue(roundps, SUFFIX), void, env, Reg, Reg, i32)
|
||||
DEF_HELPER_4(glue(roundpd, SUFFIX), void, env, Reg, Reg, i32)
|
||||
DEF_HELPER_4(glue(roundss, SUFFIX), void, env, Reg, Reg, i32)
|
||||
DEF_HELPER_4(glue(roundsd, SUFFIX), void, env, Reg, Reg, i32)
|
||||
DEF_HELPER_4(glue(blendps, SUFFIX), void, env, Reg, Reg, i32)
|
||||
DEF_HELPER_4(glue(blendpd, SUFFIX), void, env, Reg, Reg, i32)
|
||||
DEF_HELPER_4(glue(pblendw, SUFFIX), void, env, Reg, Reg, i32)
|
||||
DEF_HELPER_4(glue(dpps, SUFFIX), void, env, Reg, Reg, i32)
|
||||
DEF_HELPER_4(glue(dppd, SUFFIX), void, env, Reg, Reg, i32)
|
||||
DEF_HELPER_4(glue(mpsadbw, SUFFIX), void, env, Reg, Reg, i32)
|
||||
#endif
|
||||
|
||||
/* SSE4.2 op helpers */
|
||||
#if SHIFT == 1
|
||||
DEF_HELPER_3(glue(pcmpgtq, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_4(glue(pcmpestri, SUFFIX), void, env, Reg, Reg, i32)
|
||||
DEF_HELPER_4(glue(pcmpestrm, SUFFIX), void, env, Reg, Reg, i32)
|
||||
DEF_HELPER_4(glue(pcmpistri, SUFFIX), void, env, Reg, Reg, i32)
|
||||
DEF_HELPER_4(glue(pcmpistrm, SUFFIX), void, env, Reg, Reg, i32)
|
||||
DEF_HELPER_3(crc32, tl, i32, tl, i32)
|
||||
DEF_HELPER_3(popcnt, tl, env, tl, i32)
|
||||
#endif
|
||||
|
||||
/* AES-NI op helpers */
|
||||
#if SHIFT == 1
|
||||
DEF_HELPER_3(glue(aesdec, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(aesdeclast, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(aesenc, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(aesenclast, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(aesimc, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_4(glue(aeskeygenassist, SUFFIX), void, env, Reg, Reg, i32)
|
||||
DEF_HELPER_4(glue(pclmulqdq, SUFFIX), void, env, Reg, Reg, i32)
|
||||
#endif
|
||||
|
||||
#undef SHIFT
|
||||
#undef Reg
|
||||
#undef SUFFIX
|
||||
|
||||
#undef SSE_HELPER_B
|
||||
#undef SSE_HELPER_W
|
||||
#undef SSE_HELPER_L
|
||||
#undef SSE_HELPER_Q
|
||||
#undef SSE_HELPER_S
|
||||
#undef SSE_HELPER_CMP
|
||||
#undef UNPCK_OP
|
||||
2590
qemu/target-i386/seg_helper.c
Normal file
2590
qemu/target-i386/seg_helper.c
Normal file
File diff suppressed because it is too large
Load Diff
108
qemu/target-i386/shift_helper_template.h
Normal file
108
qemu/target-i386/shift_helper_template.h
Normal file
@@ -0,0 +1,108 @@
|
||||
/*
|
||||
* x86 shift helpers
|
||||
*
|
||||
* Copyright (c) 2008 Fabrice Bellard
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#define DATA_BITS (1 << (3 + SHIFT))
|
||||
#define SHIFT_MASK (DATA_BITS - 1)
|
||||
#if DATA_BITS <= 32
|
||||
#define SHIFT1_MASK 0x1f
|
||||
#else
|
||||
#define SHIFT1_MASK 0x3f
|
||||
#endif
|
||||
|
||||
#if DATA_BITS == 8
|
||||
#define SUFFIX b
|
||||
#define DATA_MASK 0xff
|
||||
#elif DATA_BITS == 16
|
||||
#define SUFFIX w
|
||||
#define DATA_MASK 0xffff
|
||||
#elif DATA_BITS == 32
|
||||
#define SUFFIX l
|
||||
#define DATA_MASK 0xffffffff
|
||||
#elif DATA_BITS == 64
|
||||
#define SUFFIX q
|
||||
#define DATA_MASK 0xffffffffffffffffULL
|
||||
#else
|
||||
#error unhandled operand size
|
||||
#endif
|
||||
|
||||
target_ulong glue(helper_rcl, SUFFIX)(CPUX86State *env, target_ulong t0,
|
||||
target_ulong t1)
|
||||
{
|
||||
int count, eflags;
|
||||
target_ulong src;
|
||||
target_long res;
|
||||
|
||||
count = t1 & SHIFT1_MASK;
|
||||
#if DATA_BITS == 16
|
||||
count = rclw_table[count];
|
||||
#elif DATA_BITS == 8
|
||||
count = rclb_table[count];
|
||||
#endif
|
||||
if (count) {
|
||||
eflags = env->cc_src;
|
||||
t0 &= DATA_MASK;
|
||||
src = t0;
|
||||
res = (t0 << count) | ((target_ulong)(eflags & CC_C) << (count - 1));
|
||||
if (count > 1) {
|
||||
res |= t0 >> (DATA_BITS + 1 - count);
|
||||
}
|
||||
t0 = res;
|
||||
env->cc_src = (eflags & ~(CC_C | CC_O)) |
|
||||
(lshift(src ^ t0, 11 - (DATA_BITS - 1)) & CC_O) |
|
||||
((src >> (DATA_BITS - count)) & CC_C);
|
||||
}
|
||||
return t0;
|
||||
}
|
||||
|
||||
target_ulong glue(helper_rcr, SUFFIX)(CPUX86State *env, target_ulong t0,
|
||||
target_ulong t1)
|
||||
{
|
||||
int count, eflags;
|
||||
target_ulong src;
|
||||
target_long res;
|
||||
|
||||
count = t1 & SHIFT1_MASK;
|
||||
#if DATA_BITS == 16
|
||||
count = rclw_table[count];
|
||||
#elif DATA_BITS == 8
|
||||
count = rclb_table[count];
|
||||
#endif
|
||||
if (count) {
|
||||
eflags = env->cc_src;
|
||||
t0 &= DATA_MASK;
|
||||
src = t0;
|
||||
res = (t0 >> count) |
|
||||
((target_ulong)(eflags & CC_C) << (DATA_BITS - count));
|
||||
if (count > 1) {
|
||||
res |= t0 << (DATA_BITS + 1 - count);
|
||||
}
|
||||
t0 = res;
|
||||
env->cc_src = (eflags & ~(CC_C | CC_O)) |
|
||||
(lshift(src ^ t0, 11 - (DATA_BITS - 1)) & CC_O) |
|
||||
((src >> (count - 1)) & CC_C);
|
||||
}
|
||||
return t0;
|
||||
}
|
||||
|
||||
#undef DATA_BITS
|
||||
#undef SHIFT_MASK
|
||||
#undef SHIFT1_MASK
|
||||
#undef DATA_TYPE
|
||||
#undef DATA_MASK
|
||||
#undef SUFFIX
|
||||
317
qemu/target-i386/smm_helper.c
Normal file
317
qemu/target-i386/smm_helper.c
Normal file
@@ -0,0 +1,317 @@
|
||||
/*
|
||||
* x86 SMM helpers
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "cpu.h"
|
||||
#include "exec/helper-proto.h"
|
||||
|
||||
/* SMM support */
|
||||
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
|
||||
void do_smm_enter(X86CPU *cpu)
|
||||
{
|
||||
}
|
||||
|
||||
void helper_rsm(CPUX86State *env)
|
||||
{
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
#define SMM_REVISION_ID 0x00020064
|
||||
#else
|
||||
#define SMM_REVISION_ID 0x00020000
|
||||
#endif
|
||||
|
||||
void do_smm_enter(X86CPU *cpu)
|
||||
{
|
||||
CPUX86State *env = &cpu->env;
|
||||
CPUState *cs = CPU(cpu);
|
||||
target_ulong sm_state;
|
||||
SegmentCache *dt;
|
||||
int i, offset;
|
||||
|
||||
qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
|
||||
log_cpu_state_mask(CPU_LOG_INT, CPU(cpu), CPU_DUMP_CCOP);
|
||||
|
||||
env->hflags |= HF_SMM_MASK;
|
||||
cpu_smm_update(env);
|
||||
|
||||
sm_state = env->smbase + 0x8000;
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
for (i = 0; i < 6; i++) {
|
||||
dt = &env->segs[i];
|
||||
offset = 0x7e00 + i * 16;
|
||||
stw_phys(cs->as, sm_state + offset, dt->selector);
|
||||
stw_phys(cs->as, sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
|
||||
stl_phys(cs->as, sm_state + offset + 4, dt->limit);
|
||||
stq_phys(cs->as, sm_state + offset + 8, dt->base);
|
||||
}
|
||||
|
||||
stq_phys(cs->as, sm_state + 0x7e68, env->gdt.base);
|
||||
stl_phys(cs->as, sm_state + 0x7e64, env->gdt.limit);
|
||||
|
||||
stw_phys(cs->as, sm_state + 0x7e70, env->ldt.selector);
|
||||
stq_phys(cs->as, sm_state + 0x7e78, env->ldt.base);
|
||||
stl_phys(cs->as, sm_state + 0x7e74, env->ldt.limit);
|
||||
stw_phys(cs->as, sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
|
||||
|
||||
stq_phys(cs->as, sm_state + 0x7e88, env->idt.base);
|
||||
stl_phys(cs->as, sm_state + 0x7e84, env->idt.limit);
|
||||
|
||||
stw_phys(cs->as, sm_state + 0x7e90, env->tr.selector);
|
||||
stq_phys(cs->as, sm_state + 0x7e98, env->tr.base);
|
||||
stl_phys(cs->as, sm_state + 0x7e94, env->tr.limit);
|
||||
stw_phys(cs->as, sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
|
||||
|
||||
stq_phys(cs->as, sm_state + 0x7ed0, env->efer);
|
||||
|
||||
stq_phys(cs->as, sm_state + 0x7ff8, env->regs[R_EAX]);
|
||||
stq_phys(cs->as, sm_state + 0x7ff0, env->regs[R_ECX]);
|
||||
stq_phys(cs->as, sm_state + 0x7fe8, env->regs[R_EDX]);
|
||||
stq_phys(cs->as, sm_state + 0x7fe0, env->regs[R_EBX]);
|
||||
stq_phys(cs->as, sm_state + 0x7fd8, env->regs[R_ESP]);
|
||||
stq_phys(cs->as, sm_state + 0x7fd0, env->regs[R_EBP]);
|
||||
stq_phys(cs->as, sm_state + 0x7fc8, env->regs[R_ESI]);
|
||||
stq_phys(cs->as, sm_state + 0x7fc0, env->regs[R_EDI]);
|
||||
for (i = 8; i < 16; i++) {
|
||||
stq_phys(cs->as, sm_state + 0x7ff8 - i * 8, env->regs[i]);
|
||||
}
|
||||
stq_phys(cs->as, sm_state + 0x7f78, env->eip);
|
||||
stl_phys(cs->as, sm_state + 0x7f70, cpu_compute_eflags(env));
|
||||
stl_phys(cs->as, sm_state + 0x7f68, env->dr[6]);
|
||||
stl_phys(cs->as, sm_state + 0x7f60, env->dr[7]);
|
||||
|
||||
stl_phys(cs->as, sm_state + 0x7f48, env->cr[4]);
|
||||
stl_phys(cs->as, sm_state + 0x7f50, env->cr[3]);
|
||||
stl_phys(cs->as, sm_state + 0x7f58, env->cr[0]);
|
||||
|
||||
stl_phys(cs->as, sm_state + 0x7efc, SMM_REVISION_ID);
|
||||
stl_phys(cs->as, sm_state + 0x7f00, env->smbase);
|
||||
#else
|
||||
stl_phys(cs->as, sm_state + 0x7ffc, env->cr[0]);
|
||||
stl_phys(cs->as, sm_state + 0x7ff8, env->cr[3]);
|
||||
stl_phys(cs->as, sm_state + 0x7ff4, cpu_compute_eflags(env));
|
||||
stl_phys(cs->as, sm_state + 0x7ff0, env->eip);
|
||||
stl_phys(cs->as, sm_state + 0x7fec, env->regs[R_EDI]);
|
||||
stl_phys(cs->as, sm_state + 0x7fe8, env->regs[R_ESI]);
|
||||
stl_phys(cs->as, sm_state + 0x7fe4, env->regs[R_EBP]);
|
||||
stl_phys(cs->as, sm_state + 0x7fe0, env->regs[R_ESP]);
|
||||
stl_phys(cs->as, sm_state + 0x7fdc, env->regs[R_EBX]);
|
||||
stl_phys(cs->as, sm_state + 0x7fd8, env->regs[R_EDX]);
|
||||
stl_phys(cs->as, sm_state + 0x7fd4, env->regs[R_ECX]);
|
||||
stl_phys(cs->as, sm_state + 0x7fd0, env->regs[R_EAX]);
|
||||
stl_phys(cs->as, sm_state + 0x7fcc, env->dr[6]);
|
||||
stl_phys(cs->as, sm_state + 0x7fc8, env->dr[7]);
|
||||
|
||||
stl_phys(cs->as, sm_state + 0x7fc4, env->tr.selector);
|
||||
stl_phys(cs->as, sm_state + 0x7f64, env->tr.base);
|
||||
stl_phys(cs->as, sm_state + 0x7f60, env->tr.limit);
|
||||
stl_phys(cs->as, sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
|
||||
|
||||
stl_phys(cs->as, sm_state + 0x7fc0, env->ldt.selector);
|
||||
stl_phys(cs->as, sm_state + 0x7f80, env->ldt.base);
|
||||
stl_phys(cs->as, sm_state + 0x7f7c, env->ldt.limit);
|
||||
stl_phys(cs->as, sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
|
||||
|
||||
stl_phys(cs->as, sm_state + 0x7f74, env->gdt.base);
|
||||
stl_phys(cs->as, sm_state + 0x7f70, env->gdt.limit);
|
||||
|
||||
stl_phys(cs->as, sm_state + 0x7f58, env->idt.base);
|
||||
stl_phys(cs->as, sm_state + 0x7f54, env->idt.limit);
|
||||
|
||||
for (i = 0; i < 6; i++) {
|
||||
dt = &env->segs[i];
|
||||
if (i < 3) {
|
||||
offset = 0x7f84 + i * 12;
|
||||
} else {
|
||||
offset = 0x7f2c + (i - 3) * 12;
|
||||
}
|
||||
stl_phys(cs->as, sm_state + 0x7fa8 + i * 4, dt->selector);
|
||||
stl_phys(cs->as, sm_state + offset + 8, dt->base);
|
||||
stl_phys(cs->as, sm_state + offset + 4, dt->limit);
|
||||
stl_phys(cs->as, sm_state + offset, (dt->flags >> 8) & 0xf0ff);
|
||||
}
|
||||
stl_phys(cs->as, sm_state + 0x7f14, env->cr[4]);
|
||||
|
||||
stl_phys(cs->as, sm_state + 0x7efc, SMM_REVISION_ID);
|
||||
stl_phys(cs->as, sm_state + 0x7ef8, env->smbase);
|
||||
#endif
|
||||
/* init SMM cpu state */
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
cpu_load_efer(env, 0);
|
||||
#endif
|
||||
cpu_load_eflags(env, 0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C |
|
||||
DF_MASK));
|
||||
env->eip = 0x00008000;
|
||||
cpu_x86_update_cr0(env,
|
||||
env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK |
|
||||
CR0_PG_MASK));
|
||||
cpu_x86_update_cr4(env, 0);
|
||||
env->dr[7] = 0x00000400;
|
||||
|
||||
cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
|
||||
0xffffffff,
|
||||
DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
|
||||
DESC_A_MASK);
|
||||
cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff,
|
||||
DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
|
||||
DESC_A_MASK);
|
||||
cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff,
|
||||
DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
|
||||
DESC_A_MASK);
|
||||
cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff,
|
||||
DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
|
||||
DESC_A_MASK);
|
||||
cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff,
|
||||
DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
|
||||
DESC_A_MASK);
|
||||
cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff,
|
||||
DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
|
||||
DESC_A_MASK);
|
||||
}
|
||||
|
||||
void helper_rsm(CPUX86State *env)
|
||||
{
|
||||
X86CPU *cpu = x86_env_get_cpu(env);
|
||||
CPUState *cs = CPU(cpu);
|
||||
target_ulong sm_state;
|
||||
int i, offset;
|
||||
uint32_t val;
|
||||
|
||||
sm_state = env->smbase + 0x8000;
|
||||
#ifdef TARGET_X86_64
|
||||
cpu_load_efer(env, ldq_phys(cs->as, sm_state + 0x7ed0));
|
||||
|
||||
env->gdt.base = ldq_phys(cs->as, sm_state + 0x7e68);
|
||||
env->gdt.limit = ldl_phys(cs->as, sm_state + 0x7e64);
|
||||
|
||||
env->ldt.selector = lduw_phys(cs->as, sm_state + 0x7e70);
|
||||
env->ldt.base = ldq_phys(cs->as, sm_state + 0x7e78);
|
||||
env->ldt.limit = ldl_phys(cs->as, sm_state + 0x7e74);
|
||||
env->ldt.flags = (lduw_phys(cs->as, sm_state + 0x7e72) & 0xf0ff) << 8;
|
||||
|
||||
env->idt.base = ldq_phys(cs->as, sm_state + 0x7e88);
|
||||
env->idt.limit = ldl_phys(cs->as, sm_state + 0x7e84);
|
||||
|
||||
env->tr.selector = lduw_phys(cs->as, sm_state + 0x7e90);
|
||||
env->tr.base = ldq_phys(cs->as, sm_state + 0x7e98);
|
||||
env->tr.limit = ldl_phys(cs->as, sm_state + 0x7e94);
|
||||
env->tr.flags = (lduw_phys(cs->as, sm_state + 0x7e92) & 0xf0ff) << 8;
|
||||
|
||||
env->regs[R_EAX] = ldq_phys(cs->as, sm_state + 0x7ff8);
|
||||
env->regs[R_ECX] = ldq_phys(cs->as, sm_state + 0x7ff0);
|
||||
env->regs[R_EDX] = ldq_phys(cs->as, sm_state + 0x7fe8);
|
||||
env->regs[R_EBX] = ldq_phys(cs->as, sm_state + 0x7fe0);
|
||||
env->regs[R_ESP] = ldq_phys(cs->as, sm_state + 0x7fd8);
|
||||
env->regs[R_EBP] = ldq_phys(cs->as, sm_state + 0x7fd0);
|
||||
env->regs[R_ESI] = ldq_phys(cs->as, sm_state + 0x7fc8);
|
||||
env->regs[R_EDI] = ldq_phys(cs->as, sm_state + 0x7fc0);
|
||||
for (i = 8; i < 16; i++) {
|
||||
env->regs[i] = ldq_phys(cs->as, sm_state + 0x7ff8 - i * 8);
|
||||
}
|
||||
env->eip = ldq_phys(cs->as, sm_state + 0x7f78);
|
||||
cpu_load_eflags(env, ldl_phys(cs->as, sm_state + 0x7f70),
|
||||
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
|
||||
env->dr[6] = ldl_phys(cs->as, sm_state + 0x7f68);
|
||||
env->dr[7] = ldl_phys(cs->as, sm_state + 0x7f60);
|
||||
|
||||
cpu_x86_update_cr4(env, ldl_phys(cs->as, sm_state + 0x7f48));
|
||||
cpu_x86_update_cr3(env, ldl_phys(cs->as, sm_state + 0x7f50));
|
||||
cpu_x86_update_cr0(env, ldl_phys(cs->as, sm_state + 0x7f58));
|
||||
|
||||
for (i = 0; i < 6; i++) {
|
||||
offset = 0x7e00 + i * 16;
|
||||
cpu_x86_load_seg_cache(env, i,
|
||||
lduw_phys(cs->as, sm_state + offset),
|
||||
ldq_phys(cs->as, sm_state + offset + 8),
|
||||
ldl_phys(cs->as, sm_state + offset + 4),
|
||||
(lduw_phys(cs->as, sm_state + offset + 2) &
|
||||
0xf0ff) << 8);
|
||||
}
|
||||
|
||||
val = ldl_phys(cs->as, sm_state + 0x7efc); /* revision ID */
|
||||
if (val & 0x20000) {
|
||||
env->smbase = ldl_phys(cs->as, sm_state + 0x7f00) & ~0x7fff;
|
||||
}
|
||||
#else
|
||||
cpu_x86_update_cr0(env, ldl_phys(cs->as, sm_state + 0x7ffc));
|
||||
cpu_x86_update_cr3(env, ldl_phys(cs->as, sm_state + 0x7ff8));
|
||||
cpu_load_eflags(env, ldl_phys(cs->as, sm_state + 0x7ff4),
|
||||
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
|
||||
env->eip = ldl_phys(cs->as, sm_state + 0x7ff0);
|
||||
env->regs[R_EDI] = ldl_phys(cs->as, sm_state + 0x7fec);
|
||||
env->regs[R_ESI] = ldl_phys(cs->as, sm_state + 0x7fe8);
|
||||
env->regs[R_EBP] = ldl_phys(cs->as, sm_state + 0x7fe4);
|
||||
env->regs[R_ESP] = ldl_phys(cs->as, sm_state + 0x7fe0);
|
||||
env->regs[R_EBX] = ldl_phys(cs->as, sm_state + 0x7fdc);
|
||||
env->regs[R_EDX] = ldl_phys(cs->as, sm_state + 0x7fd8);
|
||||
env->regs[R_ECX] = ldl_phys(cs->as, sm_state + 0x7fd4);
|
||||
env->regs[R_EAX] = ldl_phys(cs->as, sm_state + 0x7fd0);
|
||||
env->dr[6] = ldl_phys(cs->as, sm_state + 0x7fcc);
|
||||
env->dr[7] = ldl_phys(cs->as, sm_state + 0x7fc8);
|
||||
|
||||
env->tr.selector = ldl_phys(cs->as, sm_state + 0x7fc4) & 0xffff;
|
||||
env->tr.base = ldl_phys(cs->as, sm_state + 0x7f64);
|
||||
env->tr.limit = ldl_phys(cs->as, sm_state + 0x7f60);
|
||||
env->tr.flags = (ldl_phys(cs->as, sm_state + 0x7f5c) & 0xf0ff) << 8;
|
||||
|
||||
env->ldt.selector = ldl_phys(cs->as, sm_state + 0x7fc0) & 0xffff;
|
||||
env->ldt.base = ldl_phys(cs->as, sm_state + 0x7f80);
|
||||
env->ldt.limit = ldl_phys(cs->as, sm_state + 0x7f7c);
|
||||
env->ldt.flags = (ldl_phys(cs->as, sm_state + 0x7f78) & 0xf0ff) << 8;
|
||||
|
||||
env->gdt.base = ldl_phys(cs->as, sm_state + 0x7f74);
|
||||
env->gdt.limit = ldl_phys(cs->as, sm_state + 0x7f70);
|
||||
|
||||
env->idt.base = ldl_phys(cs->as, sm_state + 0x7f58);
|
||||
env->idt.limit = ldl_phys(cs->as, sm_state + 0x7f54);
|
||||
|
||||
for (i = 0; i < 6; i++) {
|
||||
if (i < 3) {
|
||||
offset = 0x7f84 + i * 12;
|
||||
} else {
|
||||
offset = 0x7f2c + (i - 3) * 12;
|
||||
}
|
||||
cpu_x86_load_seg_cache(env, i,
|
||||
ldl_phys(cs->as,
|
||||
sm_state + 0x7fa8 + i * 4) & 0xffff,
|
||||
ldl_phys(cs->as, sm_state + offset + 8),
|
||||
ldl_phys(cs->as, sm_state + offset + 4),
|
||||
(ldl_phys(cs->as,
|
||||
sm_state + offset) & 0xf0ff) << 8);
|
||||
}
|
||||
cpu_x86_update_cr4(env, ldl_phys(cs->as, sm_state + 0x7f14));
|
||||
|
||||
val = ldl_phys(cs->as, sm_state + 0x7efc); /* revision ID */
|
||||
if (val & 0x20000) {
|
||||
env->smbase = ldl_phys(cs->as, sm_state + 0x7ef8) & ~0x7fff;
|
||||
}
|
||||
#endif
|
||||
env->hflags &= ~HF_SMM_MASK;
|
||||
cpu_smm_update(env);
|
||||
|
||||
qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
|
||||
log_cpu_state_mask(CPU_LOG_INT, CPU(cpu), CPU_DUMP_CCOP);
|
||||
}
|
||||
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
222
qemu/target-i386/svm.h
Normal file
222
qemu/target-i386/svm.h
Normal file
@@ -0,0 +1,222 @@
|
||||
#ifndef __SVM_H
|
||||
#define __SVM_H
|
||||
|
||||
#define TLB_CONTROL_DO_NOTHING 0
|
||||
#define TLB_CONTROL_FLUSH_ALL_ASID 1
|
||||
|
||||
#define V_TPR_MASK 0x0f
|
||||
|
||||
#define V_IRQ_SHIFT 8
|
||||
#define V_IRQ_MASK (1 << V_IRQ_SHIFT)
|
||||
|
||||
#define V_INTR_PRIO_SHIFT 16
|
||||
#define V_INTR_PRIO_MASK (0x0f << V_INTR_PRIO_SHIFT)
|
||||
|
||||
#define V_IGN_TPR_SHIFT 20
|
||||
#define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT)
|
||||
|
||||
#define V_INTR_MASKING_SHIFT 24
|
||||
#define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT)
|
||||
|
||||
#define SVM_INTERRUPT_SHADOW_MASK 1
|
||||
|
||||
#define SVM_IOIO_STR_SHIFT 2
|
||||
#define SVM_IOIO_REP_SHIFT 3
|
||||
#define SVM_IOIO_SIZE_SHIFT 4
|
||||
#define SVM_IOIO_ASIZE_SHIFT 7
|
||||
|
||||
#define SVM_IOIO_TYPE_MASK 1
|
||||
#define SVM_IOIO_STR_MASK (1 << SVM_IOIO_STR_SHIFT)
|
||||
#define SVM_IOIO_REP_MASK (1 << SVM_IOIO_REP_SHIFT)
|
||||
#define SVM_IOIO_SIZE_MASK (7 << SVM_IOIO_SIZE_SHIFT)
|
||||
#define SVM_IOIO_ASIZE_MASK (7 << SVM_IOIO_ASIZE_SHIFT)
|
||||
|
||||
#define SVM_EVTINJ_VEC_MASK 0xff
|
||||
|
||||
#define SVM_EVTINJ_TYPE_SHIFT 8
|
||||
#define SVM_EVTINJ_TYPE_MASK (7 << SVM_EVTINJ_TYPE_SHIFT)
|
||||
|
||||
#define SVM_EVTINJ_TYPE_INTR (0 << SVM_EVTINJ_TYPE_SHIFT)
|
||||
#define SVM_EVTINJ_TYPE_NMI (2 << SVM_EVTINJ_TYPE_SHIFT)
|
||||
#define SVM_EVTINJ_TYPE_EXEPT (3 << SVM_EVTINJ_TYPE_SHIFT)
|
||||
#define SVM_EVTINJ_TYPE_SOFT (4 << SVM_EVTINJ_TYPE_SHIFT)
|
||||
|
||||
#define SVM_EVTINJ_VALID (1 << 31)
|
||||
#define SVM_EVTINJ_VALID_ERR (1 << 11)
|
||||
|
||||
#define SVM_EXITINTINFO_VEC_MASK SVM_EVTINJ_VEC_MASK
|
||||
|
||||
#define SVM_EXITINTINFO_TYPE_INTR SVM_EVTINJ_TYPE_INTR
|
||||
#define SVM_EXITINTINFO_TYPE_NMI SVM_EVTINJ_TYPE_NMI
|
||||
#define SVM_EXITINTINFO_TYPE_EXEPT SVM_EVTINJ_TYPE_EXEPT
|
||||
#define SVM_EXITINTINFO_TYPE_SOFT SVM_EVTINJ_TYPE_SOFT
|
||||
|
||||
#define SVM_EXITINTINFO_VALID SVM_EVTINJ_VALID
|
||||
#define SVM_EXITINTINFO_VALID_ERR SVM_EVTINJ_VALID_ERR
|
||||
|
||||
#define SVM_EXIT_READ_CR0 0x000
|
||||
#define SVM_EXIT_READ_CR3 0x003
|
||||
#define SVM_EXIT_READ_CR4 0x004
|
||||
#define SVM_EXIT_READ_CR8 0x008
|
||||
#define SVM_EXIT_WRITE_CR0 0x010
|
||||
#define SVM_EXIT_WRITE_CR3 0x013
|
||||
#define SVM_EXIT_WRITE_CR4 0x014
|
||||
#define SVM_EXIT_WRITE_CR8 0x018
|
||||
#define SVM_EXIT_READ_DR0 0x020
|
||||
#define SVM_EXIT_READ_DR1 0x021
|
||||
#define SVM_EXIT_READ_DR2 0x022
|
||||
#define SVM_EXIT_READ_DR3 0x023
|
||||
#define SVM_EXIT_READ_DR4 0x024
|
||||
#define SVM_EXIT_READ_DR5 0x025
|
||||
#define SVM_EXIT_READ_DR6 0x026
|
||||
#define SVM_EXIT_READ_DR7 0x027
|
||||
#define SVM_EXIT_WRITE_DR0 0x030
|
||||
#define SVM_EXIT_WRITE_DR1 0x031
|
||||
#define SVM_EXIT_WRITE_DR2 0x032
|
||||
#define SVM_EXIT_WRITE_DR3 0x033
|
||||
#define SVM_EXIT_WRITE_DR4 0x034
|
||||
#define SVM_EXIT_WRITE_DR5 0x035
|
||||
#define SVM_EXIT_WRITE_DR6 0x036
|
||||
#define SVM_EXIT_WRITE_DR7 0x037
|
||||
#define SVM_EXIT_EXCP_BASE 0x040
|
||||
#define SVM_EXIT_INTR 0x060
|
||||
#define SVM_EXIT_NMI 0x061
|
||||
#define SVM_EXIT_SMI 0x062
|
||||
#define SVM_EXIT_INIT 0x063
|
||||
#define SVM_EXIT_VINTR 0x064
|
||||
#define SVM_EXIT_CR0_SEL_WRITE 0x065
|
||||
#define SVM_EXIT_IDTR_READ 0x066
|
||||
#define SVM_EXIT_GDTR_READ 0x067
|
||||
#define SVM_EXIT_LDTR_READ 0x068
|
||||
#define SVM_EXIT_TR_READ 0x069
|
||||
#define SVM_EXIT_IDTR_WRITE 0x06a
|
||||
#define SVM_EXIT_GDTR_WRITE 0x06b
|
||||
#define SVM_EXIT_LDTR_WRITE 0x06c
|
||||
#define SVM_EXIT_TR_WRITE 0x06d
|
||||
#define SVM_EXIT_RDTSC 0x06e
|
||||
#define SVM_EXIT_RDPMC 0x06f
|
||||
#define SVM_EXIT_PUSHF 0x070
|
||||
#define SVM_EXIT_POPF 0x071
|
||||
#define SVM_EXIT_CPUID 0x072
|
||||
#define SVM_EXIT_RSM 0x073
|
||||
#define SVM_EXIT_IRET 0x074
|
||||
#define SVM_EXIT_SWINT 0x075
|
||||
#define SVM_EXIT_INVD 0x076
|
||||
#define SVM_EXIT_PAUSE 0x077
|
||||
#define SVM_EXIT_HLT 0x078
|
||||
#define SVM_EXIT_INVLPG 0x079
|
||||
#define SVM_EXIT_INVLPGA 0x07a
|
||||
#define SVM_EXIT_IOIO 0x07b
|
||||
#define SVM_EXIT_MSR 0x07c
|
||||
#define SVM_EXIT_TASK_SWITCH 0x07d
|
||||
#define SVM_EXIT_FERR_FREEZE 0x07e
|
||||
#define SVM_EXIT_SHUTDOWN 0x07f
|
||||
#define SVM_EXIT_VMRUN 0x080
|
||||
#define SVM_EXIT_VMMCALL 0x081
|
||||
#define SVM_EXIT_VMLOAD 0x082
|
||||
#define SVM_EXIT_VMSAVE 0x083
|
||||
#define SVM_EXIT_STGI 0x084
|
||||
#define SVM_EXIT_CLGI 0x085
|
||||
#define SVM_EXIT_SKINIT 0x086
|
||||
#define SVM_EXIT_RDTSCP 0x087
|
||||
#define SVM_EXIT_ICEBP 0x088
|
||||
#define SVM_EXIT_WBINVD 0x089
|
||||
/* only included in documentation, maybe wrong */
|
||||
#define SVM_EXIT_MONITOR 0x08a
|
||||
#define SVM_EXIT_MWAIT 0x08b
|
||||
#define SVM_EXIT_NPF 0x400
|
||||
|
||||
#define SVM_EXIT_ERR -1
|
||||
|
||||
#define SVM_CR0_SELECTIVE_MASK (1 << 3 | 1) /* TS and MP */
|
||||
|
||||
struct QEMU_PACKED vmcb_control_area {
|
||||
uint16_t intercept_cr_read;
|
||||
uint16_t intercept_cr_write;
|
||||
uint16_t intercept_dr_read;
|
||||
uint16_t intercept_dr_write;
|
||||
uint32_t intercept_exceptions;
|
||||
uint64_t intercept;
|
||||
uint8_t reserved_1[44];
|
||||
uint64_t iopm_base_pa;
|
||||
uint64_t msrpm_base_pa;
|
||||
uint64_t tsc_offset;
|
||||
uint32_t asid;
|
||||
uint8_t tlb_ctl;
|
||||
uint8_t reserved_2[3];
|
||||
uint32_t int_ctl;
|
||||
uint32_t int_vector;
|
||||
uint32_t int_state;
|
||||
uint8_t reserved_3[4];
|
||||
uint64_t exit_code;
|
||||
uint64_t exit_info_1;
|
||||
uint64_t exit_info_2;
|
||||
uint32_t exit_int_info;
|
||||
uint32_t exit_int_info_err;
|
||||
uint64_t nested_ctl;
|
||||
uint8_t reserved_4[16];
|
||||
uint32_t event_inj;
|
||||
uint32_t event_inj_err;
|
||||
uint64_t nested_cr3;
|
||||
uint64_t lbr_ctl;
|
||||
uint8_t reserved_5[832];
|
||||
};
|
||||
|
||||
struct QEMU_PACKED vmcb_seg {
|
||||
uint16_t selector;
|
||||
uint16_t attrib;
|
||||
uint32_t limit;
|
||||
uint64_t base;
|
||||
};
|
||||
|
||||
struct QEMU_PACKED vmcb_save_area {
|
||||
struct vmcb_seg es;
|
||||
struct vmcb_seg cs;
|
||||
struct vmcb_seg ss;
|
||||
struct vmcb_seg ds;
|
||||
struct vmcb_seg fs;
|
||||
struct vmcb_seg gs;
|
||||
struct vmcb_seg gdtr;
|
||||
struct vmcb_seg ldtr;
|
||||
struct vmcb_seg idtr;
|
||||
struct vmcb_seg tr;
|
||||
uint8_t reserved_1[43];
|
||||
uint8_t cpl;
|
||||
uint8_t reserved_2[4];
|
||||
uint64_t efer;
|
||||
uint8_t reserved_3[112];
|
||||
uint64_t cr4;
|
||||
uint64_t cr3;
|
||||
uint64_t cr0;
|
||||
uint64_t dr7;
|
||||
uint64_t dr6;
|
||||
uint64_t rflags;
|
||||
uint64_t rip;
|
||||
uint8_t reserved_4[88];
|
||||
uint64_t rsp;
|
||||
uint8_t reserved_5[24];
|
||||
uint64_t rax;
|
||||
uint64_t star;
|
||||
uint64_t lstar;
|
||||
uint64_t cstar;
|
||||
uint64_t sfmask;
|
||||
uint64_t kernel_gs_base;
|
||||
uint64_t sysenter_cs;
|
||||
uint64_t sysenter_esp;
|
||||
uint64_t sysenter_eip;
|
||||
uint64_t cr2;
|
||||
uint8_t reserved_6[32];
|
||||
uint64_t g_pat;
|
||||
uint64_t dbgctl;
|
||||
uint64_t br_from;
|
||||
uint64_t br_to;
|
||||
uint64_t last_excp_from;
|
||||
uint64_t last_excp_to;
|
||||
};
|
||||
|
||||
struct QEMU_PACKED vmcb {
|
||||
struct vmcb_control_area control;
|
||||
struct vmcb_save_area save;
|
||||
};
|
||||
|
||||
#endif
|
||||
772
qemu/target-i386/svm_helper.c
Normal file
772
qemu/target-i386/svm_helper.c
Normal file
@@ -0,0 +1,772 @@
|
||||
/*
|
||||
* x86 SVM helpers
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "cpu.h"
|
||||
#include "exec/cpu-all.h"
|
||||
#include "exec/helper-proto.h"
|
||||
#include "exec/cpu_ldst.h"
|
||||
|
||||
/* Secure Virtual Machine helpers */
|
||||
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
|
||||
void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
|
||||
{
|
||||
}
|
||||
|
||||
void helper_vmmcall(CPUX86State *env)
|
||||
{
|
||||
}
|
||||
|
||||
void helper_vmload(CPUX86State *env, int aflag)
|
||||
{
|
||||
}
|
||||
|
||||
void helper_vmsave(CPUX86State *env, int aflag)
|
||||
{
|
||||
}
|
||||
|
||||
void helper_stgi(CPUX86State *env)
|
||||
{
|
||||
}
|
||||
|
||||
void helper_clgi(CPUX86State *env)
|
||||
{
|
||||
}
|
||||
|
||||
void helper_skinit(CPUX86State *env)
|
||||
{
|
||||
}
|
||||
|
||||
void helper_invlpga(CPUX86State *env, int aflag)
|
||||
{
|
||||
}
|
||||
|
||||
void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
|
||||
{
|
||||
}
|
||||
|
||||
void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1)
|
||||
{
|
||||
}
|
||||
|
||||
void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
|
||||
uint64_t param)
|
||||
{
|
||||
}
|
||||
|
||||
void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
|
||||
uint64_t param)
|
||||
{
|
||||
}
|
||||
|
||||
void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
|
||||
uint32_t next_eip_addend)
|
||||
{
|
||||
}
|
||||
#else
|
||||
|
||||
static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
|
||||
const SegmentCache *sc)
|
||||
{
|
||||
CPUState *cs = CPU(x86_env_get_cpu(env));
|
||||
|
||||
stw_phys(cs->as, addr + offsetof(struct vmcb_seg, selector),
|
||||
sc->selector);
|
||||
stq_phys(cs->as, addr + offsetof(struct vmcb_seg, base),
|
||||
sc->base);
|
||||
stl_phys(cs->as, addr + offsetof(struct vmcb_seg, limit),
|
||||
sc->limit);
|
||||
stw_phys(cs->as, addr + offsetof(struct vmcb_seg, attrib),
|
||||
((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
|
||||
}
|
||||
|
||||
static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
|
||||
SegmentCache *sc)
|
||||
{
|
||||
CPUState *cs = CPU(x86_env_get_cpu(env));
|
||||
unsigned int flags;
|
||||
|
||||
sc->selector = lduw_phys(cs->as,
|
||||
addr + offsetof(struct vmcb_seg, selector));
|
||||
sc->base = ldq_phys(cs->as, addr + offsetof(struct vmcb_seg, base));
|
||||
sc->limit = ldl_phys(cs->as, addr + offsetof(struct vmcb_seg, limit));
|
||||
flags = lduw_phys(cs->as, addr + offsetof(struct vmcb_seg, attrib));
|
||||
sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
|
||||
}
|
||||
|
||||
static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
|
||||
int seg_reg)
|
||||
{
|
||||
SegmentCache sc1, *sc = &sc1;
|
||||
|
||||
svm_load_seg(env, addr, sc);
|
||||
cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
|
||||
sc->base, sc->limit, sc->flags);
|
||||
}
|
||||
|
||||
void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
|
||||
{
|
||||
CPUState *cs = CPU(x86_env_get_cpu(env));
|
||||
target_ulong addr;
|
||||
uint32_t event_inj;
|
||||
uint32_t int_ctl;
|
||||
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0);
|
||||
|
||||
if (aflag == 2) {
|
||||
addr = env->regs[R_EAX];
|
||||
} else {
|
||||
addr = (uint32_t)env->regs[R_EAX];
|
||||
}
|
||||
|
||||
qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
|
||||
|
||||
env->vm_vmcb = addr;
|
||||
|
||||
/* save the current CPU state in the hsave page */
|
||||
stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
|
||||
env->gdt.base);
|
||||
stl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
|
||||
env->gdt.limit);
|
||||
|
||||
stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
|
||||
env->idt.base);
|
||||
stl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
|
||||
env->idt.limit);
|
||||
|
||||
stq_phys(cs->as,
|
||||
env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
|
||||
stq_phys(cs->as,
|
||||
env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
|
||||
stq_phys(cs->as,
|
||||
env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
|
||||
stq_phys(cs->as,
|
||||
env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
|
||||
stq_phys(cs->as,
|
||||
env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
|
||||
stq_phys(cs->as,
|
||||
env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
|
||||
|
||||
stq_phys(cs->as,
|
||||
env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
|
||||
stq_phys(cs->as,
|
||||
env->vm_hsave + offsetof(struct vmcb, save.rflags),
|
||||
cpu_compute_eflags(env));
|
||||
|
||||
svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
|
||||
&env->segs[R_ES]);
|
||||
svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
|
||||
&env->segs[R_CS]);
|
||||
svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
|
||||
&env->segs[R_SS]);
|
||||
svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
|
||||
&env->segs[R_DS]);
|
||||
|
||||
stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.rip),
|
||||
env->eip + next_eip_addend);
|
||||
stq_phys(cs->as,
|
||||
env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
|
||||
stq_phys(cs->as,
|
||||
env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
|
||||
|
||||
/* load the interception bitmaps so we do not need to access the
|
||||
vmcb in svm mode */
|
||||
env->intercept = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
|
||||
control.intercept));
|
||||
env->intercept_cr_read = lduw_phys(cs->as, env->vm_vmcb +
|
||||
offsetof(struct vmcb,
|
||||
control.intercept_cr_read));
|
||||
env->intercept_cr_write = lduw_phys(cs->as, env->vm_vmcb +
|
||||
offsetof(struct vmcb,
|
||||
control.intercept_cr_write));
|
||||
env->intercept_dr_read = lduw_phys(cs->as, env->vm_vmcb +
|
||||
offsetof(struct vmcb,
|
||||
control.intercept_dr_read));
|
||||
env->intercept_dr_write = lduw_phys(cs->as, env->vm_vmcb +
|
||||
offsetof(struct vmcb,
|
||||
control.intercept_dr_write));
|
||||
env->intercept_exceptions = ldl_phys(cs->as, env->vm_vmcb +
|
||||
offsetof(struct vmcb,
|
||||
control.intercept_exceptions
|
||||
));
|
||||
|
||||
/* enable intercepts */
|
||||
env->hflags |= HF_SVMI_MASK;
|
||||
|
||||
env->tsc_offset = ldq_phys(cs->as, env->vm_vmcb +
|
||||
offsetof(struct vmcb, control.tsc_offset));
|
||||
|
||||
env->gdt.base = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
|
||||
save.gdtr.base));
|
||||
env->gdt.limit = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
|
||||
save.gdtr.limit));
|
||||
|
||||
env->idt.base = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
|
||||
save.idtr.base));
|
||||
env->idt.limit = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
|
||||
save.idtr.limit));
|
||||
|
||||
/* clear exit_info_2 so we behave like the real hardware */
|
||||
stq_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
|
||||
|
||||
cpu_x86_update_cr0(env, ldq_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb,
|
||||
save.cr0)));
|
||||
cpu_x86_update_cr4(env, ldq_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb,
|
||||
save.cr4)));
|
||||
cpu_x86_update_cr3(env, ldq_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb,
|
||||
save.cr3)));
|
||||
env->cr[2] = ldq_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb, save.cr2));
|
||||
int_ctl = ldl_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
|
||||
env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
|
||||
if (int_ctl & V_INTR_MASKING_MASK) {
|
||||
env->v_tpr = int_ctl & V_TPR_MASK;
|
||||
env->hflags2 |= HF2_VINTR_MASK;
|
||||
if (env->eflags & IF_MASK) {
|
||||
env->hflags2 |= HF2_HIF_MASK;
|
||||
}
|
||||
}
|
||||
|
||||
cpu_load_efer(env,
|
||||
ldq_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb, save.efer)));
|
||||
env->eflags = 0;
|
||||
cpu_load_eflags(env, ldq_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb,
|
||||
save.rflags)),
|
||||
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
|
||||
|
||||
svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
|
||||
R_ES);
|
||||
svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
|
||||
R_CS);
|
||||
svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
|
||||
R_SS);
|
||||
svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
|
||||
R_DS);
|
||||
|
||||
env->eip = ldq_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb, save.rip));
|
||||
|
||||
env->regs[R_ESP] = ldq_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb, save.rsp));
|
||||
env->regs[R_EAX] = ldq_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb, save.rax));
|
||||
env->dr[7] = ldq_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb, save.dr7));
|
||||
env->dr[6] = ldq_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb, save.dr6));
|
||||
|
||||
/* FIXME: guest state consistency checks */
|
||||
|
||||
switch (ldub_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
|
||||
case TLB_CONTROL_DO_NOTHING:
|
||||
break;
|
||||
case TLB_CONTROL_FLUSH_ALL_ASID:
|
||||
/* FIXME: this is not 100% correct but should work for now */
|
||||
tlb_flush(cs, 1);
|
||||
break;
|
||||
}
|
||||
|
||||
env->hflags2 |= HF2_GIF_MASK;
|
||||
|
||||
if (int_ctl & V_IRQ_MASK) {
|
||||
CPUState *cs = CPU(x86_env_get_cpu(env));
|
||||
|
||||
cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
|
||||
}
|
||||
|
||||
/* maybe we need to inject an event */
|
||||
event_inj = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
|
||||
control.event_inj));
|
||||
if (event_inj & SVM_EVTINJ_VALID) {
|
||||
uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
|
||||
uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
|
||||
uint32_t event_inj_err = ldl_phys(cs->as, env->vm_vmcb +
|
||||
offsetof(struct vmcb,
|
||||
control.event_inj_err));
|
||||
|
||||
qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
|
||||
/* FIXME: need to implement valid_err */
|
||||
switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
|
||||
case SVM_EVTINJ_TYPE_INTR:
|
||||
cs->exception_index = vector;
|
||||
env->error_code = event_inj_err;
|
||||
env->exception_is_int = 0;
|
||||
env->exception_next_eip = -1;
|
||||
qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
|
||||
/* XXX: is it always correct? */
|
||||
do_interrupt_x86_hardirq(env, vector, 1);
|
||||
break;
|
||||
case SVM_EVTINJ_TYPE_NMI:
|
||||
cs->exception_index = EXCP02_NMI;
|
||||
env->error_code = event_inj_err;
|
||||
env->exception_is_int = 0;
|
||||
env->exception_next_eip = env->eip;
|
||||
qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
|
||||
cpu_loop_exit(cs);
|
||||
break;
|
||||
case SVM_EVTINJ_TYPE_EXEPT:
|
||||
cs->exception_index = vector;
|
||||
env->error_code = event_inj_err;
|
||||
env->exception_is_int = 0;
|
||||
env->exception_next_eip = -1;
|
||||
qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
|
||||
cpu_loop_exit(cs);
|
||||
break;
|
||||
case SVM_EVTINJ_TYPE_SOFT:
|
||||
cs->exception_index = vector;
|
||||
env->error_code = event_inj_err;
|
||||
env->exception_is_int = 1;
|
||||
env->exception_next_eip = env->eip;
|
||||
qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
|
||||
cpu_loop_exit(cs);
|
||||
break;
|
||||
}
|
||||
qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
|
||||
env->error_code);
|
||||
}
|
||||
}
|
||||
|
||||
void helper_vmmcall(CPUX86State *env)
|
||||
{
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0);
|
||||
raise_exception(env, EXCP06_ILLOP);
|
||||
}
|
||||
|
||||
void helper_vmload(CPUX86State *env, int aflag)
|
||||
{
|
||||
CPUState *cs = CPU(x86_env_get_cpu(env));
|
||||
target_ulong addr;
|
||||
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0);
|
||||
|
||||
if (aflag == 2) {
|
||||
addr = env->regs[R_EAX];
|
||||
} else {
|
||||
addr = (uint32_t)env->regs[R_EAX];
|
||||
}
|
||||
|
||||
qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
|
||||
"\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
|
||||
addr, ldq_phys(cs->as, addr + offsetof(struct vmcb,
|
||||
save.fs.base)),
|
||||
env->segs[R_FS].base);
|
||||
|
||||
svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
|
||||
svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
|
||||
svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
|
||||
svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
env->kernelgsbase = ldq_phys(cs->as, addr + offsetof(struct vmcb,
|
||||
save.kernel_gs_base));
|
||||
env->lstar = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.lstar));
|
||||
env->cstar = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.cstar));
|
||||
env->fmask = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.sfmask));
|
||||
#endif
|
||||
env->star = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.star));
|
||||
env->sysenter_cs = ldq_phys(cs->as,
|
||||
addr + offsetof(struct vmcb, save.sysenter_cs));
|
||||
env->sysenter_esp = ldq_phys(cs->as, addr + offsetof(struct vmcb,
|
||||
save.sysenter_esp));
|
||||
env->sysenter_eip = ldq_phys(cs->as, addr + offsetof(struct vmcb,
|
||||
save.sysenter_eip));
|
||||
}
|
||||
|
||||
void helper_vmsave(CPUX86State *env, int aflag)
|
||||
{
|
||||
CPUState *cs = CPU(x86_env_get_cpu(env));
|
||||
target_ulong addr;
|
||||
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0);
|
||||
|
||||
if (aflag == 2) {
|
||||
addr = env->regs[R_EAX];
|
||||
} else {
|
||||
addr = (uint32_t)env->regs[R_EAX];
|
||||
}
|
||||
|
||||
qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
|
||||
"\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
|
||||
addr, ldq_phys(cs->as,
|
||||
addr + offsetof(struct vmcb, save.fs.base)),
|
||||
env->segs[R_FS].base);
|
||||
|
||||
svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
|
||||
&env->segs[R_FS]);
|
||||
svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
|
||||
&env->segs[R_GS]);
|
||||
svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
|
||||
&env->tr);
|
||||
svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
|
||||
&env->ldt);
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
stq_phys(cs->as, addr + offsetof(struct vmcb, save.kernel_gs_base),
|
||||
env->kernelgsbase);
|
||||
stq_phys(cs->as, addr + offsetof(struct vmcb, save.lstar), env->lstar);
|
||||
stq_phys(cs->as, addr + offsetof(struct vmcb, save.cstar), env->cstar);
|
||||
stq_phys(cs->as, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
|
||||
#endif
|
||||
stq_phys(cs->as, addr + offsetof(struct vmcb, save.star), env->star);
|
||||
stq_phys(cs->as,
|
||||
addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
|
||||
stq_phys(cs->as, addr + offsetof(struct vmcb, save.sysenter_esp),
|
||||
env->sysenter_esp);
|
||||
stq_phys(cs->as, addr + offsetof(struct vmcb, save.sysenter_eip),
|
||||
env->sysenter_eip);
|
||||
}
|
||||
|
||||
void helper_stgi(CPUX86State *env)
|
||||
{
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0);
|
||||
env->hflags2 |= HF2_GIF_MASK;
|
||||
}
|
||||
|
||||
void helper_clgi(CPUX86State *env)
|
||||
{
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0);
|
||||
env->hflags2 &= ~HF2_GIF_MASK;
|
||||
}
|
||||
|
||||
void helper_skinit(CPUX86State *env)
|
||||
{
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0);
|
||||
/* XXX: not implemented */
|
||||
raise_exception(env, EXCP06_ILLOP);
|
||||
}
|
||||
|
||||
void helper_invlpga(CPUX86State *env, int aflag)
|
||||
{
|
||||
X86CPU *cpu = x86_env_get_cpu(env);
|
||||
target_ulong addr;
|
||||
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0);
|
||||
|
||||
if (aflag == 2) {
|
||||
addr = env->regs[R_EAX];
|
||||
} else {
|
||||
addr = (uint32_t)env->regs[R_EAX];
|
||||
}
|
||||
|
||||
/* XXX: could use the ASID to see if it is needed to do the
|
||||
flush */
|
||||
tlb_flush_page(CPU(cpu), addr);
|
||||
}
|
||||
|
||||
void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
|
||||
uint64_t param)
|
||||
{
|
||||
CPUState *cs = CPU(x86_env_get_cpu(env));
|
||||
|
||||
if (likely(!(env->hflags & HF_SVMI_MASK))) {
|
||||
return;
|
||||
}
|
||||
switch (type) {
|
||||
case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
|
||||
if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
|
||||
helper_vmexit(env, type, param);
|
||||
}
|
||||
break;
|
||||
case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
|
||||
if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
|
||||
helper_vmexit(env, type, param);
|
||||
}
|
||||
break;
|
||||
case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
|
||||
if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
|
||||
helper_vmexit(env, type, param);
|
||||
}
|
||||
break;
|
||||
case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
|
||||
if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
|
||||
helper_vmexit(env, type, param);
|
||||
}
|
||||
break;
|
||||
case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
|
||||
if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
|
||||
helper_vmexit(env, type, param);
|
||||
}
|
||||
break;
|
||||
case SVM_EXIT_MSR:
|
||||
if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
|
||||
/* FIXME: this should be read in at vmrun (faster this way?) */
|
||||
uint64_t addr = ldq_phys(cs->as, env->vm_vmcb +
|
||||
offsetof(struct vmcb,
|
||||
control.msrpm_base_pa));
|
||||
uint32_t t0, t1;
|
||||
|
||||
switch ((uint32_t)env->regs[R_ECX]) {
|
||||
case 0 ... 0x1fff:
|
||||
t0 = (env->regs[R_ECX] * 2) % 8;
|
||||
t1 = (env->regs[R_ECX] * 2) / 8;
|
||||
break;
|
||||
case 0xc0000000 ... 0xc0001fff:
|
||||
t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
|
||||
t1 = (t0 / 8);
|
||||
t0 %= 8;
|
||||
break;
|
||||
case 0xc0010000 ... 0xc0011fff:
|
||||
t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
|
||||
t1 = (t0 / 8);
|
||||
t0 %= 8;
|
||||
break;
|
||||
default:
|
||||
helper_vmexit(env, type, param);
|
||||
t0 = 0;
|
||||
t1 = 0;
|
||||
break;
|
||||
}
|
||||
if (ldub_phys(cs->as, addr + t1) & ((1 << param) << t0)) {
|
||||
helper_vmexit(env, type, param);
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
|
||||
helper_vmexit(env, type, param);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
|
||||
uint64_t param)
|
||||
{
|
||||
helper_svm_check_intercept_param(env, type, param);
|
||||
}
|
||||
|
||||
void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
|
||||
uint32_t next_eip_addend)
|
||||
{
|
||||
CPUState *cs = CPU(x86_env_get_cpu(env));
|
||||
|
||||
if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
|
||||
/* FIXME: this should be read in at vmrun (faster this way?) */
|
||||
uint64_t addr = ldq_phys(cs->as, env->vm_vmcb +
|
||||
offsetof(struct vmcb, control.iopm_base_pa));
|
||||
uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
|
||||
|
||||
if (lduw_phys(cs->as, addr + port / 8) & (mask << (port & 7))) {
|
||||
/* next env->eip */
|
||||
stq_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
|
||||
env->eip + next_eip_addend);
|
||||
helper_vmexit(env, SVM_EXIT_IOIO, param | (port << 16));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Note: currently only 32 bits of exit_code are used */
|
||||
void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
|
||||
{
|
||||
CPUState *cs = CPU(x86_env_get_cpu(env));
|
||||
uint32_t int_ctl;
|
||||
|
||||
qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
|
||||
PRIx64 ", " TARGET_FMT_lx ")!\n",
|
||||
exit_code, exit_info_1,
|
||||
ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
|
||||
control.exit_info_2)),
|
||||
env->eip);
|
||||
|
||||
if (env->hflags & HF_INHIBIT_IRQ_MASK) {
|
||||
stl_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb, control.int_state),
|
||||
SVM_INTERRUPT_SHADOW_MASK);
|
||||
env->hflags &= ~HF_INHIBIT_IRQ_MASK;
|
||||
} else {
|
||||
stl_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
|
||||
}
|
||||
|
||||
/* Save the VM state in the vmcb */
|
||||
svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
|
||||
&env->segs[R_ES]);
|
||||
svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
|
||||
&env->segs[R_CS]);
|
||||
svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
|
||||
&env->segs[R_SS]);
|
||||
svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
|
||||
&env->segs[R_DS]);
|
||||
|
||||
stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
|
||||
env->gdt.base);
|
||||
stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
|
||||
env->gdt.limit);
|
||||
|
||||
stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
|
||||
env->idt.base);
|
||||
stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
|
||||
env->idt.limit);
|
||||
|
||||
stq_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
|
||||
stq_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
|
||||
stq_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
|
||||
stq_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
|
||||
stq_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
|
||||
|
||||
int_ctl = ldl_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
|
||||
int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
|
||||
int_ctl |= env->v_tpr & V_TPR_MASK;
|
||||
if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
|
||||
int_ctl |= V_IRQ_MASK;
|
||||
}
|
||||
stl_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
|
||||
|
||||
stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
|
||||
cpu_compute_eflags(env));
|
||||
stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.rip),
|
||||
env->eip);
|
||||
stq_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
|
||||
stq_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
|
||||
stq_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
|
||||
stq_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
|
||||
stb_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
|
||||
env->hflags & HF_CPL_MASK);
|
||||
|
||||
/* Reload the host state from vm_hsave */
|
||||
env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
|
||||
env->hflags &= ~HF_SVMI_MASK;
|
||||
env->intercept = 0;
|
||||
env->intercept_exceptions = 0;
|
||||
cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
|
||||
env->tsc_offset = 0;
|
||||
|
||||
env->gdt.base = ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
|
||||
save.gdtr.base));
|
||||
env->gdt.limit = ldl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
|
||||
save.gdtr.limit));
|
||||
|
||||
env->idt.base = ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
|
||||
save.idtr.base));
|
||||
env->idt.limit = ldl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
|
||||
save.idtr.limit));
|
||||
|
||||
cpu_x86_update_cr0(env, ldq_phys(cs->as,
|
||||
env->vm_hsave + offsetof(struct vmcb,
|
||||
save.cr0)) |
|
||||
CR0_PE_MASK);
|
||||
cpu_x86_update_cr4(env, ldq_phys(cs->as,
|
||||
env->vm_hsave + offsetof(struct vmcb,
|
||||
save.cr4)));
|
||||
cpu_x86_update_cr3(env, ldq_phys(cs->as,
|
||||
env->vm_hsave + offsetof(struct vmcb,
|
||||
save.cr3)));
|
||||
/* we need to set the efer after the crs so the hidden flags get
|
||||
set properly */
|
||||
cpu_load_efer(env, ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
|
||||
save.efer)));
|
||||
env->eflags = 0;
|
||||
cpu_load_eflags(env, ldq_phys(cs->as,
|
||||
env->vm_hsave + offsetof(struct vmcb,
|
||||
save.rflags)),
|
||||
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
|
||||
VM_MASK));
|
||||
|
||||
svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
|
||||
R_ES);
|
||||
svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
|
||||
R_CS);
|
||||
svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
|
||||
R_SS);
|
||||
svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
|
||||
R_DS);
|
||||
|
||||
env->eip = ldq_phys(cs->as,
|
||||
env->vm_hsave + offsetof(struct vmcb, save.rip));
|
||||
env->regs[R_ESP] = ldq_phys(cs->as, env->vm_hsave +
|
||||
offsetof(struct vmcb, save.rsp));
|
||||
env->regs[R_EAX] = ldq_phys(cs->as, env->vm_hsave +
|
||||
offsetof(struct vmcb, save.rax));
|
||||
|
||||
env->dr[6] = ldq_phys(cs->as,
|
||||
env->vm_hsave + offsetof(struct vmcb, save.dr6));
|
||||
env->dr[7] = ldq_phys(cs->as,
|
||||
env->vm_hsave + offsetof(struct vmcb, save.dr7));
|
||||
|
||||
/* other setups */
|
||||
stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
|
||||
exit_code);
|
||||
stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
|
||||
exit_info_1);
|
||||
|
||||
stl_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
|
||||
ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
|
||||
control.event_inj)));
|
||||
stl_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
|
||||
ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
|
||||
control.event_inj_err)));
|
||||
stl_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
|
||||
|
||||
env->hflags2 &= ~HF2_GIF_MASK;
|
||||
/* FIXME: Resets the current ASID register to zero (host ASID). */
|
||||
|
||||
/* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
|
||||
|
||||
/* Clears the TSC_OFFSET inside the processor. */
|
||||
|
||||
/* If the host is in PAE mode, the processor reloads the host's PDPEs
|
||||
from the page table indicated the host's CR3. If the PDPEs contain
|
||||
illegal state, the processor causes a shutdown. */
|
||||
|
||||
/* Disables all breakpoints in the host DR7 register. */
|
||||
|
||||
/* Checks the reloaded host state for consistency. */
|
||||
|
||||
/* If the host's rIP reloaded by #VMEXIT is outside the limit of the
|
||||
host's code segment or non-canonical (in the case of long mode), a
|
||||
#GP fault is delivered inside the host. */
|
||||
|
||||
/* remove any pending exception */
|
||||
cs->exception_index = -1;
|
||||
env->error_code = 0;
|
||||
env->old_exception = -1;
|
||||
|
||||
cpu_loop_exit(cs);
|
||||
}
|
||||
|
||||
void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
|
||||
{
|
||||
helper_vmexit(env, exit_code, exit_info_1);
|
||||
}
|
||||
|
||||
#endif
|
||||
134
qemu/target-i386/topology.h
Normal file
134
qemu/target-i386/topology.h
Normal file
@@ -0,0 +1,134 @@
|
||||
/*
|
||||
* x86 CPU topology data structures and functions
|
||||
*
|
||||
* Copyright (c) 2012 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
#ifndef TARGET_I386_TOPOLOGY_H
|
||||
#define TARGET_I386_TOPOLOGY_H
|
||||
|
||||
/* This file implements the APIC-ID-based CPU topology enumeration logic,
|
||||
* documented at the following document:
|
||||
* Intel® 64 Architecture Processor Topology Enumeration
|
||||
* http://software.intel.com/en-us/articles/intel-64-architecture-processor-topology-enumeration/
|
||||
*
|
||||
* This code should be compatible with AMD's "Extended Method" described at:
|
||||
* AMD CPUID Specification (Publication #25481)
|
||||
* Section 3: Multiple Core Calcuation
|
||||
* as long as:
|
||||
* nr_threads is set to 1;
|
||||
* OFFSET_IDX is assumed to be 0;
|
||||
* CPUID Fn8000_0008_ECX[ApicIdCoreIdSize[3:0]] is set to apicid_core_width().
|
||||
*/
|
||||
|
||||
#include <stdint.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "qemu/bitops.h"
|
||||
|
||||
/* APIC IDs can be 32-bit, but beware: APIC IDs > 255 require x2APIC support
|
||||
*/
|
||||
typedef uint32_t apic_id_t;
|
||||
|
||||
/* Return the bit width needed for 'count' IDs
|
||||
*/
|
||||
static unsigned apicid_bitwidth_for_count(unsigned count)
|
||||
{
|
||||
g_assert(count >= 1);
|
||||
count -= 1;
|
||||
return count ? 32 - clz32(count) : 0;
|
||||
}
|
||||
|
||||
/* Bit width of the SMT_ID (thread ID) field on the APIC ID
|
||||
*/
|
||||
static inline unsigned apicid_smt_width(unsigned nr_cores, unsigned nr_threads)
|
||||
{
|
||||
return apicid_bitwidth_for_count(nr_threads);
|
||||
}
|
||||
|
||||
/* Bit width of the Core_ID field
|
||||
*/
|
||||
static inline unsigned apicid_core_width(unsigned nr_cores, unsigned nr_threads)
|
||||
{
|
||||
return apicid_bitwidth_for_count(nr_cores);
|
||||
}
|
||||
|
||||
/* Bit offset of the Core_ID field
|
||||
*/
|
||||
static inline unsigned apicid_core_offset(unsigned nr_cores,
|
||||
unsigned nr_threads)
|
||||
{
|
||||
return apicid_smt_width(nr_cores, nr_threads);
|
||||
}
|
||||
|
||||
/* Bit offset of the Pkg_ID (socket ID) field
|
||||
*/
|
||||
static inline unsigned apicid_pkg_offset(unsigned nr_cores, unsigned nr_threads)
|
||||
{
|
||||
return apicid_core_offset(nr_cores, nr_threads) +
|
||||
apicid_core_width(nr_cores, nr_threads);
|
||||
}
|
||||
|
||||
/* Make APIC ID for the CPU based on Pkg_ID, Core_ID, SMT_ID
|
||||
*
|
||||
* The caller must make sure core_id < nr_cores and smt_id < nr_threads.
|
||||
*/
|
||||
static inline apic_id_t apicid_from_topo_ids(unsigned nr_cores,
|
||||
unsigned nr_threads,
|
||||
unsigned pkg_id,
|
||||
unsigned core_id,
|
||||
unsigned smt_id)
|
||||
{
|
||||
return (pkg_id << apicid_pkg_offset(nr_cores, nr_threads)) |
|
||||
(core_id << apicid_core_offset(nr_cores, nr_threads)) |
|
||||
smt_id;
|
||||
}
|
||||
|
||||
/* Calculate thread/core/package IDs for a specific topology,
|
||||
* based on (contiguous) CPU index
|
||||
*/
|
||||
static inline void x86_topo_ids_from_idx(unsigned nr_cores,
|
||||
unsigned nr_threads,
|
||||
unsigned cpu_index,
|
||||
unsigned *pkg_id,
|
||||
unsigned *core_id,
|
||||
unsigned *smt_id)
|
||||
{
|
||||
unsigned core_index = cpu_index / nr_threads;
|
||||
*smt_id = cpu_index % nr_threads;
|
||||
*core_id = core_index % nr_cores;
|
||||
*pkg_id = core_index / nr_cores;
|
||||
}
|
||||
|
||||
/* Make APIC ID for the CPU 'cpu_index'
|
||||
*
|
||||
* 'cpu_index' is a sequential, contiguous ID for the CPU.
|
||||
*/
|
||||
static inline apic_id_t x86_apicid_from_cpu_idx(unsigned nr_cores,
|
||||
unsigned nr_threads,
|
||||
unsigned cpu_index)
|
||||
{
|
||||
unsigned pkg_id, core_id, smt_id;
|
||||
x86_topo_ids_from_idx(nr_cores, nr_threads, cpu_index,
|
||||
&pkg_id, &core_id, &smt_id);
|
||||
return apicid_from_topo_ids(nr_cores, nr_threads, pkg_id, core_id, smt_id);
|
||||
}
|
||||
|
||||
#endif /* TARGET_I386_TOPOLOGY_H */
|
||||
8454
qemu/target-i386/translate.c
Normal file
8454
qemu/target-i386/translate.c
Normal file
File diff suppressed because it is too large
Load Diff
958
qemu/target-i386/unicorn.c
Normal file
958
qemu/target-i386/unicorn.c
Normal file
@@ -0,0 +1,958 @@
|
||||
/* Unicorn Emulator Engine */
|
||||
/* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015 */
|
||||
|
||||
#include "hw/boards.h"
|
||||
#include "sysemu/cpus.h"
|
||||
#include "hw/i386/pc.h"
|
||||
#include "unicorn.h"
|
||||
#include "cpu.h"
|
||||
#include "tcg.h"
|
||||
|
||||
#include "unicorn_common.h"
|
||||
|
||||
#define READ_QWORD(x) ((uint64)x)
|
||||
#define READ_DWORD(x) (x & 0xffffffff)
|
||||
#define READ_WORD(x) (x & 0xffff)
|
||||
#define READ_BYTE_H(x) ((x & 0xffff) >> 8)
|
||||
#define READ_BYTE_L(x) (x & 0xff)
|
||||
|
||||
|
||||
static void x86_set_pc(struct uc_struct *uc, uint64_t address)
|
||||
{
|
||||
((CPUX86State *)uc->current_cpu->env_ptr)->eip = address;
|
||||
}
|
||||
|
||||
void x86_release(void *ctx);
|
||||
|
||||
void x86_release(void *ctx)
|
||||
{
|
||||
release_common(ctx);
|
||||
TCGContext *s = (TCGContext *) ctx;
|
||||
|
||||
// arch specific
|
||||
g_free(s->cpu_A0);
|
||||
g_free(s->cpu_T[0]);
|
||||
g_free(s->cpu_T[1]);
|
||||
g_free(s->cpu_tmp0);
|
||||
g_free(s->cpu_tmp4);
|
||||
g_free(s->cpu_cc_srcT);
|
||||
g_free(s->cpu_cc_dst);
|
||||
g_free(s->cpu_cc_src);
|
||||
g_free(s->cpu_cc_src2);
|
||||
|
||||
int i;
|
||||
for (i = 0; i < CPU_NB_REGS; ++i) {
|
||||
g_free(s->cpu_regs[i]);
|
||||
}
|
||||
|
||||
g_free(s->tb_ctx.tbs);
|
||||
}
|
||||
|
||||
void x86_reg_reset(uch handle)
|
||||
{
|
||||
struct uc_struct *uc = (struct uc_struct *) handle;
|
||||
CPUArchState *env;
|
||||
|
||||
env = first_cpu->env_ptr;
|
||||
|
||||
env->invalid_error = UC_ERR_OK; // no error
|
||||
memset(env->regs, 0, sizeof(env->regs));
|
||||
memset(env->segs, 0, sizeof(env->segs));
|
||||
memset(env->cr, 0, sizeof(env->cr));
|
||||
|
||||
memset(&env->ldt, 0, sizeof(env->ldt));
|
||||
memset(&env->gdt, 0, sizeof(env->gdt));
|
||||
memset(&env->tr, 0, sizeof(env->tr));
|
||||
memset(&env->idt, 0, sizeof(env->idt));
|
||||
|
||||
env->eip = 0;
|
||||
env->eflags = 0;
|
||||
|
||||
env->fpstt = 0; /* top of stack index */
|
||||
env->fpus = 0;
|
||||
env->fpuc = 0;
|
||||
memset(env->fptags, 0, sizeof(env->fptags)); /* 0 = valid, 1 = empty */
|
||||
|
||||
env->mxcsr = 0;
|
||||
memset(env->xmm_regs, 0, sizeof(env->xmm_regs));
|
||||
memset(&env->xmm_t0, 0, sizeof(env->xmm_t0));
|
||||
memset(&env->mmx_t0, 0, sizeof(env->mmx_t0));
|
||||
|
||||
memset(env->ymmh_regs, 0, sizeof(env->ymmh_regs));
|
||||
|
||||
memset(env->opmask_regs, 0, sizeof(env->opmask_regs));
|
||||
memset(env->zmmh_regs, 0, sizeof(env->zmmh_regs));
|
||||
|
||||
/* sysenter registers */
|
||||
env->sysenter_cs = 0;
|
||||
env->sysenter_esp = 0;
|
||||
env->sysenter_eip = 0;
|
||||
env->efer = 0;
|
||||
env->star = 0;
|
||||
|
||||
env->vm_hsave = 0;
|
||||
|
||||
env->tsc = 0;
|
||||
env->tsc_adjust = 0;
|
||||
env->tsc_deadline = 0;
|
||||
|
||||
env->mcg_status = 0;
|
||||
env->msr_ia32_misc_enable = 0;
|
||||
env->msr_ia32_feature_control = 0;
|
||||
|
||||
env->msr_fixed_ctr_ctrl = 0;
|
||||
env->msr_global_ctrl = 0;
|
||||
env->msr_global_status = 0;
|
||||
env->msr_global_ovf_ctrl = 0;
|
||||
memset(env->msr_fixed_counters, 0, sizeof(env->msr_fixed_counters));
|
||||
memset(env->msr_gp_counters, 0, sizeof(env->msr_gp_counters));
|
||||
memset(env->msr_gp_evtsel, 0, sizeof(env->msr_gp_evtsel));
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
memset(env->hi16_zmm_regs, 0, sizeof(env->hi16_zmm_regs));
|
||||
env->lstar = 0;
|
||||
env->cstar = 0;
|
||||
env->fmask = 0;
|
||||
env->kernelgsbase = 0;
|
||||
#endif
|
||||
|
||||
// TODO: reset other registers in CPUX86State qemu/target-i386/cpu.h
|
||||
|
||||
// properly initialize internal setup for each mode
|
||||
switch(uc->mode) {
|
||||
default:
|
||||
break;
|
||||
case UC_MODE_32:
|
||||
env->hflags |= HF_CS32_MASK | HF_SS32_MASK;
|
||||
env->cr[0] = CR0_PE_MASK; // protected mode
|
||||
break;
|
||||
case UC_MODE_64:
|
||||
env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK | HF_LMA_MASK;
|
||||
env->hflags &= ~(HF_ADDSEG_MASK);
|
||||
env->cr[0] = CR0_PE_MASK; // protected mode
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
int x86_reg_read(uch handle, unsigned int regid, void *value)
|
||||
{
|
||||
CPUState *mycpu;
|
||||
struct uc_struct *uc = (struct uc_struct *) handle;
|
||||
|
||||
mycpu = first_cpu;
|
||||
|
||||
switch(uc->mode) {
|
||||
default:
|
||||
break;
|
||||
case UC_MODE_16:
|
||||
switch(regid) {
|
||||
default: break;
|
||||
case X86_REG_ES:
|
||||
*(int16_t *)value = X86_CPU(uc, mycpu)->env.segs[R_ES].selector;
|
||||
return 0;
|
||||
case X86_REG_SS:
|
||||
*(int16_t *)value = X86_CPU(uc, mycpu)->env.segs[R_SS].selector;
|
||||
return 0;
|
||||
case X86_REG_DS:
|
||||
*(int16_t *)value = X86_CPU(uc, mycpu)->env.segs[R_DS].selector;
|
||||
return 0;
|
||||
case X86_REG_FS:
|
||||
*(int16_t *)value = X86_CPU(uc, mycpu)->env.segs[R_FS].selector;
|
||||
return 0;
|
||||
case X86_REG_GS:
|
||||
*(int16_t *)value = X86_CPU(uc, mycpu)->env.segs[R_GS].selector;
|
||||
return 0;
|
||||
}
|
||||
// fall-thru
|
||||
case UC_MODE_32:
|
||||
switch(regid) {
|
||||
default:
|
||||
break;
|
||||
case X86_REG_CR0 ... X86_REG_CR4:
|
||||
*(int32_t *)value = X86_CPU(uc, mycpu)->env.cr[regid - X86_REG_CR0];
|
||||
break;
|
||||
case X86_REG_DR0 ... X86_REG_DR7:
|
||||
*(int32_t *)value = X86_CPU(uc, mycpu)->env.dr[regid - X86_REG_DR0];
|
||||
break;
|
||||
case X86_REG_EFLAGS:
|
||||
*(int32_t *)value = X86_CPU(uc, mycpu)->env.eflags;
|
||||
break;
|
||||
case X86_REG_EAX:
|
||||
*(int32_t *)value = X86_CPU(uc, mycpu)->env.regs[R_EAX];
|
||||
break;
|
||||
case X86_REG_AX:
|
||||
*(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_EAX]);
|
||||
break;
|
||||
case X86_REG_AH:
|
||||
*(int8_t *)value = READ_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EAX]);
|
||||
break;
|
||||
case X86_REG_AL:
|
||||
*(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EAX]);
|
||||
break;
|
||||
case X86_REG_EBX:
|
||||
*(int32_t *)value = X86_CPU(uc, mycpu)->env.regs[R_EBX];
|
||||
break;
|
||||
case X86_REG_BX:
|
||||
*(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_EBX]);
|
||||
break;
|
||||
case X86_REG_BH:
|
||||
*(int8_t *)value = READ_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EBX]);
|
||||
break;
|
||||
case X86_REG_BL:
|
||||
*(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EBX]);
|
||||
break;
|
||||
case X86_REG_ECX:
|
||||
*(int32_t *)value = X86_CPU(uc, mycpu)->env.regs[R_ECX];
|
||||
break;
|
||||
case X86_REG_CX:
|
||||
*(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_ECX]);
|
||||
break;
|
||||
case X86_REG_CH:
|
||||
*(int8_t *)value = READ_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_ECX]);
|
||||
break;
|
||||
case X86_REG_CL:
|
||||
*(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_ECX]);
|
||||
break;
|
||||
case X86_REG_EDX:
|
||||
*(int32_t *)value = X86_CPU(uc, mycpu)->env.regs[R_EDX];
|
||||
break;
|
||||
case X86_REG_DX:
|
||||
*(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_EDX]);
|
||||
break;
|
||||
case X86_REG_DH:
|
||||
*(int8_t *)value = READ_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EDX]);
|
||||
break;
|
||||
case X86_REG_DL:
|
||||
*(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EDX]);
|
||||
break;
|
||||
case X86_REG_ESP:
|
||||
*(int32_t *)value = X86_CPU(uc, mycpu)->env.regs[R_ESP];
|
||||
break;
|
||||
case X86_REG_SP:
|
||||
*(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_ESP]);
|
||||
break;
|
||||
case X86_REG_EBP:
|
||||
*(int32_t *)value = X86_CPU(uc, mycpu)->env.regs[R_EBP];
|
||||
break;
|
||||
case X86_REG_BP:
|
||||
*(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_EBP]);
|
||||
break;
|
||||
case X86_REG_ESI:
|
||||
*(int32_t *)value = X86_CPU(uc, mycpu)->env.regs[R_ESI];
|
||||
break;
|
||||
case X86_REG_SI:
|
||||
*(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_ESI]);
|
||||
break;
|
||||
case X86_REG_EDI:
|
||||
*(int32_t *)value = X86_CPU(uc, mycpu)->env.regs[R_EDI];
|
||||
break;
|
||||
case X86_REG_DI:
|
||||
*(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_EDI]);
|
||||
break;
|
||||
case X86_REG_EIP:
|
||||
*(int32_t *)value = X86_CPU(uc, mycpu)->env.eip;
|
||||
break;
|
||||
case X86_REG_IP:
|
||||
*(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.eip);
|
||||
break;
|
||||
case X86_REG_CS:
|
||||
*(int16_t *)value = X86_CPU(uc, mycpu)->env.segs[R_CS].base;
|
||||
break;
|
||||
case X86_REG_DS:
|
||||
*(int16_t *)value = X86_CPU(uc, mycpu)->env.segs[R_DS].base;
|
||||
break;
|
||||
case X86_REG_SS:
|
||||
*(int16_t *)value = X86_CPU(uc, mycpu)->env.segs[R_SS].base;
|
||||
break;
|
||||
case X86_REG_ES:
|
||||
*(int16_t *)value = X86_CPU(uc, mycpu)->env.segs[R_ES].base;
|
||||
break;
|
||||
case X86_REG_FS:
|
||||
*(int16_t *)value = X86_CPU(uc, mycpu)->env.segs[R_FS].base;
|
||||
break;
|
||||
case X86_REG_GS:
|
||||
*(int16_t *)value = X86_CPU(uc, mycpu)->env.segs[R_GS].base;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
case UC_MODE_64:
|
||||
switch(regid) {
|
||||
default:
|
||||
break;
|
||||
case X86_REG_CR0 ... X86_REG_CR4:
|
||||
*(int64_t *)value = X86_CPU(uc, mycpu)->env.cr[regid - X86_REG_CR0];
|
||||
break;
|
||||
case X86_REG_DR0 ... X86_REG_DR7:
|
||||
*(int64_t *)value = X86_CPU(uc, mycpu)->env.dr[regid - X86_REG_DR0];
|
||||
break;
|
||||
case X86_REG_EFLAGS:
|
||||
*(int64_t *)value = X86_CPU(uc, mycpu)->env.eflags;
|
||||
break;
|
||||
case X86_REG_RAX:
|
||||
*(uint64_t *)value = X86_CPU(uc, mycpu)->env.regs[R_EAX];
|
||||
break;
|
||||
case X86_REG_EAX:
|
||||
*(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[R_EAX]);
|
||||
break;
|
||||
case X86_REG_AX:
|
||||
*(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_EAX]);
|
||||
break;
|
||||
case X86_REG_AH:
|
||||
*(int8_t *)value = READ_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EAX]);
|
||||
break;
|
||||
case X86_REG_AL:
|
||||
*(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EAX]);
|
||||
break;
|
||||
case X86_REG_RBX:
|
||||
*(uint64_t *)value = X86_CPU(uc, mycpu)->env.regs[R_EBX];
|
||||
break;
|
||||
case X86_REG_EBX:
|
||||
*(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[R_EBX]);
|
||||
break;
|
||||
case X86_REG_BX:
|
||||
*(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_EBX]);
|
||||
break;
|
||||
case X86_REG_BH:
|
||||
*(int8_t *)value = READ_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EBX]);
|
||||
break;
|
||||
case X86_REG_BL:
|
||||
*(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EBX]);
|
||||
break;
|
||||
case X86_REG_RCX:
|
||||
*(uint64_t *)value = X86_CPU(uc, mycpu)->env.regs[R_ECX];
|
||||
break;
|
||||
case X86_REG_ECX:
|
||||
*(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[R_ECX]);
|
||||
break;
|
||||
case X86_REG_CX:
|
||||
*(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_ECX]);
|
||||
break;
|
||||
case X86_REG_CH:
|
||||
*(int8_t *)value = READ_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_ECX]);
|
||||
break;
|
||||
case X86_REG_CL:
|
||||
*(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_ECX]);
|
||||
break;
|
||||
case X86_REG_RDX:
|
||||
*(uint64_t *)value = X86_CPU(uc, mycpu)->env.regs[R_EDX];
|
||||
break;
|
||||
case X86_REG_EDX:
|
||||
*(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[R_EDX]);
|
||||
break;
|
||||
case X86_REG_DX:
|
||||
*(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_EDX]);
|
||||
break;
|
||||
case X86_REG_DH:
|
||||
*(int8_t *)value = READ_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EDX]);
|
||||
break;
|
||||
case X86_REG_DL:
|
||||
*(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EDX]);
|
||||
break;
|
||||
case X86_REG_RSP:
|
||||
*(uint64_t *)value = X86_CPU(uc, mycpu)->env.regs[R_ESP];
|
||||
break;
|
||||
case X86_REG_ESP:
|
||||
*(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[R_ESP]);
|
||||
break;
|
||||
case X86_REG_SP:
|
||||
*(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_ESP]);
|
||||
break;
|
||||
case X86_REG_SPL:
|
||||
*(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_ESP]);
|
||||
break;
|
||||
case X86_REG_RBP:
|
||||
*(uint64_t *)value = X86_CPU(uc, mycpu)->env.regs[R_EBP];
|
||||
break;
|
||||
case X86_REG_EBP:
|
||||
*(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[R_EBP]);
|
||||
break;
|
||||
case X86_REG_BP:
|
||||
*(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_EBP]);
|
||||
break;
|
||||
case X86_REG_BPL:
|
||||
*(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EBP]);
|
||||
break;
|
||||
case X86_REG_RSI:
|
||||
*(uint64_t *)value = X86_CPU(uc, mycpu)->env.regs[R_ESI];
|
||||
break;
|
||||
case X86_REG_ESI:
|
||||
*(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[R_ESI]);
|
||||
break;
|
||||
case X86_REG_SI:
|
||||
*(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_ESI]);
|
||||
break;
|
||||
case X86_REG_SIL:
|
||||
*(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_ESI]);
|
||||
break;
|
||||
case X86_REG_RDI:
|
||||
*(uint64_t *)value = X86_CPU(uc, mycpu)->env.regs[R_EDI];
|
||||
break;
|
||||
case X86_REG_EDI:
|
||||
*(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[R_EDI]);
|
||||
break;
|
||||
case X86_REG_DI:
|
||||
*(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_EDI]);
|
||||
break;
|
||||
case X86_REG_DIL:
|
||||
*(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EDI]);
|
||||
break;
|
||||
case X86_REG_RIP:
|
||||
*(uint64_t *)value = X86_CPU(uc, mycpu)->env.eip;
|
||||
break;
|
||||
case X86_REG_EIP:
|
||||
*(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.eip);
|
||||
break;
|
||||
case X86_REG_IP:
|
||||
*(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.eip);
|
||||
break;
|
||||
case X86_REG_CS:
|
||||
*(int16_t *)value = X86_CPU(uc, mycpu)->env.segs[R_CS].base;
|
||||
break;
|
||||
case X86_REG_DS:
|
||||
*(int16_t *)value = X86_CPU(uc, mycpu)->env.segs[R_DS].base;
|
||||
break;
|
||||
case X86_REG_SS:
|
||||
*(int16_t *)value = X86_CPU(uc, mycpu)->env.segs[R_SS].base;
|
||||
break;
|
||||
case X86_REG_ES:
|
||||
*(int16_t *)value = X86_CPU(uc, mycpu)->env.segs[R_ES].base;
|
||||
break;
|
||||
case X86_REG_FS:
|
||||
*(int16_t *)value = X86_CPU(uc, mycpu)->env.segs[R_FS].base;
|
||||
break;
|
||||
case X86_REG_GS:
|
||||
*(int16_t *)value = X86_CPU(uc, mycpu)->env.segs[R_GS].base;
|
||||
break;
|
||||
case X86_REG_R8:
|
||||
*(int64_t *)value = READ_QWORD(X86_CPU(uc, mycpu)->env.regs[8]);
|
||||
break;
|
||||
case X86_REG_R8D:
|
||||
*(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[8]);
|
||||
break;
|
||||
case X86_REG_R8W:
|
||||
*(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[8]);
|
||||
break;
|
||||
case X86_REG_R8B:
|
||||
*(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[8]);
|
||||
break;
|
||||
case X86_REG_R9:
|
||||
*(int64_t *)value = READ_QWORD(X86_CPU(uc, mycpu)->env.regs[9]);
|
||||
break;
|
||||
case X86_REG_R9D:
|
||||
*(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[9]);
|
||||
break;
|
||||
case X86_REG_R9W:
|
||||
*(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[9]);
|
||||
break;
|
||||
case X86_REG_R9B:
|
||||
*(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[9]);
|
||||
break;
|
||||
case X86_REG_R10:
|
||||
*(int64_t *)value = READ_QWORD(X86_CPU(uc, mycpu)->env.regs[10]);
|
||||
break;
|
||||
case X86_REG_R10D:
|
||||
*(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[10]);
|
||||
break;
|
||||
case X86_REG_R10W:
|
||||
*(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[10]);
|
||||
break;
|
||||
case X86_REG_R10B:
|
||||
*(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[10]);
|
||||
break;
|
||||
case X86_REG_R11:
|
||||
*(int64_t *)value = READ_QWORD(X86_CPU(uc, mycpu)->env.regs[11]);
|
||||
break;
|
||||
case X86_REG_R11D:
|
||||
*(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[11]);
|
||||
break;
|
||||
case X86_REG_R11W:
|
||||
*(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[11]);
|
||||
break;
|
||||
case X86_REG_R11B:
|
||||
*(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[11]);
|
||||
break;
|
||||
case X86_REG_R12:
|
||||
*(int64_t *)value = READ_QWORD(X86_CPU(uc, mycpu)->env.regs[12]);
|
||||
break;
|
||||
case X86_REG_R12D:
|
||||
*(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[12]);
|
||||
break;
|
||||
case X86_REG_R12W:
|
||||
*(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[12]);
|
||||
break;
|
||||
case X86_REG_R12B:
|
||||
*(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[12]);
|
||||
break;
|
||||
case X86_REG_R13:
|
||||
*(int64_t *)value = READ_QWORD(X86_CPU(uc, mycpu)->env.regs[13]);
|
||||
break;
|
||||
case X86_REG_R13D:
|
||||
*(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[13]);
|
||||
break;
|
||||
case X86_REG_R13W:
|
||||
*(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[13]);
|
||||
break;
|
||||
case X86_REG_R13B:
|
||||
*(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[13]);
|
||||
break;
|
||||
case X86_REG_R14:
|
||||
*(int64_t *)value = READ_QWORD(X86_CPU(uc, mycpu)->env.regs[14]);
|
||||
break;
|
||||
case X86_REG_R14D:
|
||||
*(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[14]);
|
||||
break;
|
||||
case X86_REG_R14W:
|
||||
*(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[14]);
|
||||
break;
|
||||
case X86_REG_R14B:
|
||||
*(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[14]);
|
||||
break;
|
||||
case X86_REG_R15:
|
||||
*(int64_t *)value = READ_QWORD(X86_CPU(uc, mycpu)->env.regs[15]);
|
||||
break;
|
||||
case X86_REG_R15D:
|
||||
*(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[15]);
|
||||
break;
|
||||
case X86_REG_R15W:
|
||||
*(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[15]);
|
||||
break;
|
||||
case X86_REG_R15B:
|
||||
*(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[15]);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
#define WRITE_DWORD(x, w) (x = (x & ~0xffffffff) | (w & 0xffffffff))
|
||||
#define WRITE_WORD(x, w) (x = (x & ~0xffff) | (w & 0xffff))
|
||||
#define WRITE_BYTE_H(x, b) (x = (x & ~0xff00) | (b & 0xff))
|
||||
#define WRITE_BYTE_L(x, b) (x = (x & ~0xff) | (b & 0xff))
|
||||
|
||||
int x86_reg_write(uch handle, unsigned int regid, void *value)
|
||||
{
|
||||
CPUState *mycpu;
|
||||
struct uc_struct *uc = (struct uc_struct *) handle;
|
||||
|
||||
mycpu = first_cpu;
|
||||
|
||||
switch(uc->mode) {
|
||||
default:
|
||||
break;
|
||||
|
||||
case UC_MODE_16:
|
||||
switch(regid) {
|
||||
default: break;
|
||||
case X86_REG_ES:
|
||||
X86_CPU(uc, mycpu)->env.segs[R_ES].selector = *(int16_t *)value;
|
||||
return 0;
|
||||
case X86_REG_SS:
|
||||
X86_CPU(uc, mycpu)->env.segs[R_SS].selector = *(int16_t *)value;
|
||||
return 0;
|
||||
case X86_REG_DS:
|
||||
X86_CPU(uc, mycpu)->env.segs[R_DS].selector = *(int16_t *)value;
|
||||
return 0;
|
||||
case X86_REG_FS:
|
||||
X86_CPU(uc, mycpu)->env.segs[R_FS].selector = *(int16_t *)value;
|
||||
return 0;
|
||||
case X86_REG_GS:
|
||||
X86_CPU(uc, mycpu)->env.segs[R_GS].selector = *(int16_t *)value;
|
||||
return 0;
|
||||
}
|
||||
// fall-thru
|
||||
case UC_MODE_32:
|
||||
switch(regid) {
|
||||
default:
|
||||
break;
|
||||
case X86_REG_CR0 ... X86_REG_CR4:
|
||||
X86_CPU(uc, mycpu)->env.cr[regid - X86_REG_CR0] = *(int32_t *)value;
|
||||
break;
|
||||
case X86_REG_DR0 ... X86_REG_DR7:
|
||||
X86_CPU(uc, mycpu)->env.dr[regid - X86_REG_DR0] = *(int32_t *)value;
|
||||
break;
|
||||
case X86_REG_EFLAGS:
|
||||
X86_CPU(uc, mycpu)->env.eflags = *(int32_t *)value;
|
||||
break;
|
||||
case X86_REG_EAX:
|
||||
X86_CPU(uc, mycpu)->env.regs[R_EAX] = *(int32_t *)value;
|
||||
break;
|
||||
case X86_REG_AX:
|
||||
WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_EAX], *(int16_t *)value);
|
||||
break;
|
||||
case X86_REG_AH:
|
||||
WRITE_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EAX], *(int8_t *)value);
|
||||
break;
|
||||
case X86_REG_AL:
|
||||
WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EAX], *(int8_t *)value);
|
||||
break;
|
||||
case X86_REG_EBX:
|
||||
X86_CPU(uc, mycpu)->env.regs[R_EBX] = *(int32_t *)value;
|
||||
break;
|
||||
case X86_REG_BX:
|
||||
WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_EBX], *(int16_t *)value);
|
||||
break;
|
||||
case X86_REG_BH:
|
||||
WRITE_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EBX], *(int8_t *)value);
|
||||
break;
|
||||
case X86_REG_BL:
|
||||
WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EBX], *(int8_t *)value);
|
||||
break;
|
||||
case X86_REG_ECX:
|
||||
X86_CPU(uc, mycpu)->env.regs[R_ECX] = *(int32_t *)value;
|
||||
break;
|
||||
case X86_REG_CX:
|
||||
WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_ECX], *(int16_t *)value);
|
||||
break;
|
||||
case X86_REG_CH:
|
||||
WRITE_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_ECX], *(int8_t *)value);
|
||||
break;
|
||||
case X86_REG_CL:
|
||||
WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_ECX], *(int8_t *)value);
|
||||
break;
|
||||
case X86_REG_EDX:
|
||||
X86_CPU(uc, mycpu)->env.regs[R_EDX] = *(int32_t *)value;
|
||||
break;
|
||||
case X86_REG_DX:
|
||||
WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_EDX], *(int16_t *)value);
|
||||
break;
|
||||
case X86_REG_DH:
|
||||
WRITE_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EDX], *(int8_t *)value);
|
||||
break;
|
||||
case X86_REG_DL:
|
||||
WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EDX], *(int8_t *)value);
|
||||
break;
|
||||
case X86_REG_ESP:
|
||||
X86_CPU(uc, mycpu)->env.regs[R_ESP] = *(int32_t *)value;
|
||||
break;
|
||||
case X86_REG_SP:
|
||||
WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_ESP], *(int16_t *)value);
|
||||
break;
|
||||
case X86_REG_EBP:
|
||||
X86_CPU(uc, mycpu)->env.regs[R_EBP] = *(int32_t *)value;
|
||||
break;
|
||||
case X86_REG_BP:
|
||||
WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_EBP], *(int16_t *)value);
|
||||
break;
|
||||
case X86_REG_ESI:
|
||||
X86_CPU(uc, mycpu)->env.regs[R_ESI] = *(int32_t *)value;
|
||||
break;
|
||||
case X86_REG_SI:
|
||||
WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_ESI], *(int16_t *)value);
|
||||
break;
|
||||
case X86_REG_EDI:
|
||||
X86_CPU(uc, mycpu)->env.regs[R_EDI] = *(int32_t *)value;
|
||||
break;
|
||||
case X86_REG_DI:
|
||||
WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_EDI], *(int16_t *)value);
|
||||
break;
|
||||
case X86_REG_EIP:
|
||||
X86_CPU(uc, mycpu)->env.eip = *(int32_t *)value;
|
||||
break;
|
||||
case X86_REG_IP:
|
||||
WRITE_WORD(X86_CPU(uc, mycpu)->env.eip, *(int16_t *)value);
|
||||
break;
|
||||
case X86_REG_CS:
|
||||
X86_CPU(uc, mycpu)->env.segs[R_CS].base = *(int16_t *)value;
|
||||
break;
|
||||
case X86_REG_DS:
|
||||
X86_CPU(uc, mycpu)->env.segs[R_DS].base = *(int16_t *)value;
|
||||
break;
|
||||
case X86_REG_SS:
|
||||
X86_CPU(uc, mycpu)->env.segs[R_SS].base = *(int16_t *)value;
|
||||
break;
|
||||
case X86_REG_ES:
|
||||
X86_CPU(uc, mycpu)->env.segs[R_ES].base = *(int16_t *)value;
|
||||
break;
|
||||
case X86_REG_FS:
|
||||
X86_CPU(uc, mycpu)->env.segs[R_FS].base = *(int16_t *)value;
|
||||
break;
|
||||
case X86_REG_GS:
|
||||
X86_CPU(uc, mycpu)->env.segs[R_GS].base = *(int16_t *)value;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
case UC_MODE_64:
|
||||
switch(regid) {
|
||||
default:
|
||||
break;
|
||||
case X86_REG_CR0 ... X86_REG_CR4:
|
||||
X86_CPU(uc, mycpu)->env.cr[regid - X86_REG_CR0] = *(int64_t *)value;
|
||||
break;
|
||||
case X86_REG_DR0 ... X86_REG_DR7:
|
||||
X86_CPU(uc, mycpu)->env.dr[regid - X86_REG_DR0] = *(int64_t *)value;
|
||||
break;
|
||||
case X86_REG_EFLAGS:
|
||||
X86_CPU(uc, mycpu)->env.eflags = *(int64_t *)value;
|
||||
break;
|
||||
case X86_REG_RAX:
|
||||
X86_CPU(uc, mycpu)->env.regs[R_EAX] = *(int64_t *)value;
|
||||
break;
|
||||
case X86_REG_EAX:
|
||||
WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[R_EAX], *(int32_t *)value);
|
||||
break;
|
||||
case X86_REG_AX:
|
||||
WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_EAX], *(int16_t *)value);
|
||||
break;
|
||||
case X86_REG_AH:
|
||||
WRITE_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EAX], *(int8_t *)value);
|
||||
break;
|
||||
case X86_REG_AL:
|
||||
WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EAX], *(int8_t *)value);
|
||||
break;
|
||||
case X86_REG_RBX:
|
||||
X86_CPU(uc, mycpu)->env.regs[R_EBX] = *(int64_t *)value;
|
||||
break;
|
||||
case X86_REG_EBX:
|
||||
WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[R_EBX], *(int32_t *)value);
|
||||
break;
|
||||
case X86_REG_BX:
|
||||
WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_EBX], *(int16_t *)value);
|
||||
break;
|
||||
case X86_REG_BH:
|
||||
WRITE_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EBX], *(int8_t *)value);
|
||||
break;
|
||||
case X86_REG_BL:
|
||||
WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EBX], *(int8_t *)value);
|
||||
break;
|
||||
case X86_REG_RCX:
|
||||
X86_CPU(uc, mycpu)->env.regs[R_ECX] = *(int64_t *)value;
|
||||
break;
|
||||
case X86_REG_ECX:
|
||||
WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[R_ECX], *(int32_t *)value);
|
||||
break;
|
||||
case X86_REG_CX:
|
||||
WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_ECX], *(int16_t *)value);
|
||||
break;
|
||||
case X86_REG_CH:
|
||||
WRITE_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_ECX], *(int8_t *)value);
|
||||
break;
|
||||
case X86_REG_CL:
|
||||
WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_ECX], *(int8_t *)value);
|
||||
break;
|
||||
case X86_REG_RDX:
|
||||
X86_CPU(uc, mycpu)->env.regs[R_EDX] = *(int64_t *)value;
|
||||
break;
|
||||
case X86_REG_EDX:
|
||||
WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[R_EDX], *(int32_t *)value);
|
||||
break;
|
||||
case X86_REG_DX:
|
||||
WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_EDX], *(int16_t *)value);
|
||||
break;
|
||||
case X86_REG_DH:
|
||||
WRITE_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EDX], *(int8_t *)value);
|
||||
break;
|
||||
case X86_REG_DL:
|
||||
WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EDX], *(int8_t *)value);
|
||||
break;
|
||||
case X86_REG_RSP:
|
||||
X86_CPU(uc, mycpu)->env.regs[R_ESP] = *(int64_t *)value;
|
||||
break;
|
||||
case X86_REG_ESP:
|
||||
WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[R_ESP], *(int32_t *)value);
|
||||
break;
|
||||
case X86_REG_SP:
|
||||
WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_ESP], *(int16_t *)value);
|
||||
break;
|
||||
case X86_REG_SPL:
|
||||
WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_ESP], *(int8_t *)value);
|
||||
break;
|
||||
case X86_REG_RBP:
|
||||
X86_CPU(uc, mycpu)->env.regs[R_EBP] = *(int64_t *)value;
|
||||
break;
|
||||
case X86_REG_EBP:
|
||||
WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[R_EBP], *(int32_t *)value);
|
||||
break;
|
||||
case X86_REG_BP:
|
||||
WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_EBP], *(int16_t *)value);
|
||||
break;
|
||||
case X86_REG_BPL:
|
||||
WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EBP], *(int8_t *)value);
|
||||
break;
|
||||
case X86_REG_RSI:
|
||||
X86_CPU(uc, mycpu)->env.regs[R_ESI] = *(int64_t *)value;
|
||||
break;
|
||||
case X86_REG_ESI:
|
||||
WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[R_ESI], *(int32_t *)value);
|
||||
break;
|
||||
case X86_REG_SI:
|
||||
WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_ESI], *(int16_t *)value);
|
||||
break;
|
||||
case X86_REG_SIL:
|
||||
WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_ESI], *(int8_t *)value);
|
||||
break;
|
||||
case X86_REG_RDI:
|
||||
X86_CPU(uc, mycpu)->env.regs[R_EDI] = *(int64_t *)value;
|
||||
break;
|
||||
case X86_REG_EDI:
|
||||
WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[R_EDI], *(int32_t *)value);
|
||||
break;
|
||||
case X86_REG_DI:
|
||||
WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_EDI], *(int16_t *)value);
|
||||
break;
|
||||
case X86_REG_DIL:
|
||||
WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EDI], *(int8_t *)value);
|
||||
break;
|
||||
case X86_REG_RIP:
|
||||
X86_CPU(uc, mycpu)->env.eip = *(int64_t *)value;
|
||||
break;
|
||||
case X86_REG_EIP:
|
||||
WRITE_DWORD(X86_CPU(uc, mycpu)->env.eip, *(int32_t *)value);
|
||||
break;
|
||||
case X86_REG_IP:
|
||||
WRITE_WORD(X86_CPU(uc, mycpu)->env.eip, *(int16_t *)value);
|
||||
break;
|
||||
case X86_REG_CS:
|
||||
X86_CPU(uc, mycpu)->env.segs[R_CS].base = *(int16_t *)value;
|
||||
break;
|
||||
case X86_REG_DS:
|
||||
X86_CPU(uc, mycpu)->env.segs[R_DS].base = *(int16_t *)value;
|
||||
break;
|
||||
case X86_REG_SS:
|
||||
X86_CPU(uc, mycpu)->env.segs[R_SS].base = *(int16_t *)value;
|
||||
break;
|
||||
case X86_REG_ES:
|
||||
X86_CPU(uc, mycpu)->env.segs[R_ES].base = *(int16_t *)value;
|
||||
break;
|
||||
case X86_REG_FS:
|
||||
X86_CPU(uc, mycpu)->env.segs[R_FS].base = *(int16_t *)value;
|
||||
break;
|
||||
case X86_REG_GS:
|
||||
X86_CPU(uc, mycpu)->env.segs[R_GS].base = *(int16_t *)value;
|
||||
break;
|
||||
case X86_REG_R8:
|
||||
X86_CPU(uc, mycpu)->env.regs[8] = *(int64_t *)value;
|
||||
break;
|
||||
case X86_REG_R8D:
|
||||
WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[8], *(int32_t *)value);
|
||||
break;
|
||||
case X86_REG_R8W:
|
||||
WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[8], *(int16_t *)value);
|
||||
break;
|
||||
case X86_REG_R8B:
|
||||
WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[8], *(int8_t *)value);
|
||||
break;
|
||||
case X86_REG_R9:
|
||||
X86_CPU(uc, mycpu)->env.regs[9] = *(int64_t *)value;
|
||||
break;
|
||||
case X86_REG_R9D:
|
||||
WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[9], *(int32_t *)value);
|
||||
break;
|
||||
case X86_REG_R9W:
|
||||
WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[9], *(int16_t *)value);
|
||||
break;
|
||||
case X86_REG_R9B:
|
||||
WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[9], *(int8_t *)value);
|
||||
break;
|
||||
case X86_REG_R10:
|
||||
X86_CPU(uc, mycpu)->env.regs[10] = *(int64_t *)value;
|
||||
break;
|
||||
case X86_REG_R10D:
|
||||
WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[10], *(int32_t *)value);
|
||||
break;
|
||||
case X86_REG_R10W:
|
||||
WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[10], *(int16_t *)value);
|
||||
break;
|
||||
case X86_REG_R10B:
|
||||
WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[10], *(int8_t *)value);
|
||||
break;
|
||||
case X86_REG_R11:
|
||||
X86_CPU(uc, mycpu)->env.regs[11] = *(int64_t *)value;
|
||||
break;
|
||||
case X86_REG_R11D:
|
||||
WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[11], *(int32_t *)value);
|
||||
break;
|
||||
case X86_REG_R11W:
|
||||
WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[11], *(int16_t *)value);
|
||||
break;
|
||||
case X86_REG_R11B:
|
||||
WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[11], *(int8_t *)value);
|
||||
break;
|
||||
case X86_REG_R12:
|
||||
X86_CPU(uc, mycpu)->env.regs[12] = *(int64_t *)value;
|
||||
break;
|
||||
case X86_REG_R12D:
|
||||
WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[12], *(int32_t *)value);
|
||||
break;
|
||||
case X86_REG_R12W:
|
||||
WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[12], *(int16_t *)value);
|
||||
break;
|
||||
case X86_REG_R12B:
|
||||
WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[12], *(int8_t *)value);
|
||||
break;
|
||||
case X86_REG_R13:
|
||||
X86_CPU(uc, mycpu)->env.regs[13] = *(int64_t *)value;
|
||||
break;
|
||||
case X86_REG_R13D:
|
||||
WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[13], *(int32_t *)value);
|
||||
break;
|
||||
case X86_REG_R13W:
|
||||
WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[13], *(int16_t *)value);
|
||||
break;
|
||||
case X86_REG_R13B:
|
||||
WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[13], *(int8_t *)value);
|
||||
break;
|
||||
case X86_REG_R14:
|
||||
X86_CPU(uc, mycpu)->env.regs[14] = *(int64_t *)value;
|
||||
break;
|
||||
case X86_REG_R14D:
|
||||
WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[14], *(int32_t *)value);
|
||||
break;
|
||||
case X86_REG_R14W:
|
||||
WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[14], *(int16_t *)value);
|
||||
break;
|
||||
case X86_REG_R14B:
|
||||
WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[14], *(int8_t *)value);
|
||||
break;
|
||||
case X86_REG_R15:
|
||||
X86_CPU(uc, mycpu)->env.regs[15] = *(int64_t *)value;
|
||||
break;
|
||||
case X86_REG_R15D:
|
||||
WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[15], *(int32_t *)value);
|
||||
break;
|
||||
case X86_REG_R15W:
|
||||
WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[15], *(int16_t *)value);
|
||||
break;
|
||||
case X86_REG_R15B:
|
||||
WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[15], *(int8_t *)value);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
#endif
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
__attribute__ ((visibility ("default")))
|
||||
int x86_uc_machine_init(struct uc_struct *uc)
|
||||
{
|
||||
return machine_initialize(uc);
|
||||
}
|
||||
|
||||
void pc_machine_init(struct uc_struct *uc);
|
||||
|
||||
__attribute__ ((visibility ("default")))
|
||||
void x86_uc_init(struct uc_struct* uc)
|
||||
{
|
||||
apic_register_types(uc);
|
||||
apic_common_register_types(uc);
|
||||
register_accel_types(uc);
|
||||
pc_machine_register_types(uc);
|
||||
x86_cpu_register_types(uc);
|
||||
pc_machine_init(uc); // pc_piix
|
||||
uc->reg_read = x86_reg_read;
|
||||
uc->reg_write = x86_reg_write;
|
||||
uc->reg_reset = x86_reg_reset;
|
||||
uc->release = x86_release;
|
||||
uc->set_pc = x86_set_pc;
|
||||
uc_common_init(uc);
|
||||
}
|
||||
15
qemu/target-i386/unicorn.h
Normal file
15
qemu/target-i386/unicorn.h
Normal file
@@ -0,0 +1,15 @@
|
||||
/* Unicorn Emulator Engine */
|
||||
/* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015 */
|
||||
|
||||
#ifndef UC_QEMU_TARGET_I386_H
|
||||
#define UC_QEMU_TARGET_I386_H
|
||||
|
||||
// functions to read & write registers
|
||||
int x86_reg_read(uch handle, unsigned int regid, void *value);
|
||||
int x86_reg_write(uch handle, unsigned int regid, void *value);
|
||||
|
||||
void x86_reg_reset(uch handle);
|
||||
|
||||
void x86_uc_init(struct uc_struct* uc);
|
||||
int x86_uc_machine_init(struct uc_struct *uc);
|
||||
#endif
|
||||
Reference in New Issue
Block a user