import Unicorn2
This commit is contained in:
31
qemu/target/i386/TODO
Normal file
31
qemu/target/i386/TODO
Normal file
@@ -0,0 +1,31 @@
|
||||
Correctness issues:
|
||||
|
||||
- some eflags manipulation incorrectly reset the bit 0x2.
|
||||
- SVM: test, cpu save/restore, SMM save/restore.
|
||||
- x86_64: lcall/ljmp intel/amd differences ?
|
||||
- better code fetch (different exception handling + CS.limit support)
|
||||
- user/kernel PUSHL/POPL in helper.c
|
||||
- add missing cpuid tests
|
||||
- return UD exception if LOCK prefix incorrectly used
|
||||
- test ldt limit < 7 ?
|
||||
- fix some 16 bit sp push/pop overflow (pusha/popa, lcall lret)
|
||||
- full support of segment limit/rights
|
||||
- full x87 exception support
|
||||
- improve x87 bit exactness (use bochs code ?)
|
||||
- DRx register support
|
||||
- CR0.AC emulation
|
||||
- SSE alignment checks
|
||||
|
||||
Optimizations/Features:
|
||||
|
||||
- add SVM nested paging support
|
||||
- add VMX support
|
||||
- add AVX support
|
||||
- add SSE5 support
|
||||
- fxsave/fxrstor AMD extensions
|
||||
- improve monitor/mwait support
|
||||
- faster EFLAGS update: consider SZAP, C, O can be updated separately
|
||||
with a bit field in CC_OP and more state variables.
|
||||
- evaluate x87 stack pointer statically
|
||||
- find a way to avoid translating several time the same TB if CR0.TS
|
||||
is set or not.
|
||||
345
qemu/target/i386/arch_memory_mapping.c
Normal file
345
qemu/target/i386/arch_memory_mapping.c
Normal file
@@ -0,0 +1,345 @@
|
||||
/*
|
||||
* i386 memory mapping
|
||||
*
|
||||
* Copyright Fujitsu, Corp. 2011, 2012
|
||||
*
|
||||
* Authors:
|
||||
* Wen Congyang <wency@cn.fujitsu.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "cpu.h"
|
||||
#include "sysemu/memory_mapping.h"
|
||||
|
||||
/* PAE Paging or IA-32e Paging */
|
||||
static void walk_pte(MemoryMappingList *list, AddressSpace *as,
|
||||
hwaddr pte_start_addr,
|
||||
int32_t a20_mask, target_ulong start_line_addr)
|
||||
{
|
||||
hwaddr pte_addr, start_paddr;
|
||||
uint64_t pte;
|
||||
target_ulong start_vaddr;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 512; i++) {
|
||||
pte_addr = (pte_start_addr + i * 8) & a20_mask;
|
||||
#ifdef UNICORN_ARCH_POSTFIX
|
||||
pte = glue(address_space_ldq, UNICORN_ARCH_POSTFIX)(as->uc, as, pte_addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
#else
|
||||
pte = address_space_ldq(as->uc, as, pte_addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
#endif
|
||||
if (!(pte & PG_PRESENT_MASK)) {
|
||||
/* not present */
|
||||
continue;
|
||||
}
|
||||
|
||||
start_paddr = (pte & ~0xfff) & ~(0x1ULL << 63);
|
||||
if (cpu_physical_memory_is_io(as, start_paddr)) {
|
||||
/* I/O region */
|
||||
continue;
|
||||
}
|
||||
|
||||
start_vaddr = start_line_addr | ((i & 0x1ff) << 12);
|
||||
memory_mapping_list_add_merge_sorted(list, start_paddr,
|
||||
start_vaddr, 1 << 12);
|
||||
}
|
||||
}
|
||||
|
||||
/* 32-bit Paging */
|
||||
static void walk_pte2(MemoryMappingList *list, AddressSpace *as,
|
||||
hwaddr pte_start_addr, int32_t a20_mask,
|
||||
target_ulong start_line_addr)
|
||||
{
|
||||
hwaddr pte_addr, start_paddr;
|
||||
uint32_t pte;
|
||||
target_ulong start_vaddr;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 1024; i++) {
|
||||
pte_addr = (pte_start_addr + i * 4) & a20_mask;
|
||||
#ifdef UNICORN_ARCH_POSTFIX
|
||||
pte = glue(address_space_ldl, UNICORN_ARCH_POSTFIX)(as->uc, as, pte_addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
#else
|
||||
pte = address_space_ldl(as->uc, as, pte_addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
#endif
|
||||
if (!(pte & PG_PRESENT_MASK)) {
|
||||
/* not present */
|
||||
continue;
|
||||
}
|
||||
|
||||
start_paddr = pte & ~0xfff;
|
||||
if (cpu_physical_memory_is_io(as, start_paddr)) {
|
||||
/* I/O region */
|
||||
continue;
|
||||
}
|
||||
|
||||
start_vaddr = start_line_addr | ((i & 0x3ff) << 12);
|
||||
memory_mapping_list_add_merge_sorted(list, start_paddr,
|
||||
start_vaddr, 1 << 12);
|
||||
}
|
||||
}
|
||||
|
||||
/* PAE Paging or IA-32e Paging */
|
||||
#define PLM4_ADDR_MASK 0xffffffffff000ULL /* selects bits 51:12 */
|
||||
|
||||
static void walk_pde(MemoryMappingList *list, AddressSpace *as,
|
||||
hwaddr pde_start_addr,
|
||||
int32_t a20_mask, target_ulong start_line_addr)
|
||||
{
|
||||
hwaddr pde_addr, pte_start_addr, start_paddr;
|
||||
uint64_t pde;
|
||||
target_ulong line_addr, start_vaddr;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 512; i++) {
|
||||
pde_addr = (pde_start_addr + i * 8) & a20_mask;
|
||||
#ifdef UNICORN_ARCH_POSTFIX
|
||||
pde = glue(address_space_ldq, UNICORN_ARCH_POSTFIX)(as->uc, as, pde_addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
#else
|
||||
pde = address_space_ldq(as->uc, as, pde_addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
#endif
|
||||
if (!(pde & PG_PRESENT_MASK)) {
|
||||
/* not present */
|
||||
continue;
|
||||
}
|
||||
|
||||
line_addr = start_line_addr | ((i & 0x1ff) << 21);
|
||||
if (pde & PG_PSE_MASK) {
|
||||
/* 2 MB page */
|
||||
start_paddr = (pde & ~0x1fffff) & ~(0x1ULL << 63);
|
||||
if (cpu_physical_memory_is_io(as, start_paddr)) {
|
||||
/* I/O region */
|
||||
continue;
|
||||
}
|
||||
start_vaddr = line_addr;
|
||||
memory_mapping_list_add_merge_sorted(list, start_paddr,
|
||||
start_vaddr, 1 << 21);
|
||||
continue;
|
||||
}
|
||||
|
||||
pte_start_addr = (pde & PLM4_ADDR_MASK) & a20_mask;
|
||||
walk_pte(list, as, pte_start_addr, a20_mask, line_addr);
|
||||
}
|
||||
}
|
||||
|
||||
/* 32-bit Paging */
|
||||
static void walk_pde2(MemoryMappingList *list, AddressSpace *as,
|
||||
hwaddr pde_start_addr, int32_t a20_mask,
|
||||
bool pse)
|
||||
{
|
||||
hwaddr pde_addr, pte_start_addr, start_paddr, high_paddr;
|
||||
uint32_t pde;
|
||||
target_ulong line_addr, start_vaddr;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 1024; i++) {
|
||||
pde_addr = (pde_start_addr + i * 4) & a20_mask;
|
||||
#ifdef UNICORN_ARCH_POSTFIX
|
||||
pde = glue(address_space_ldl, UNICORN_ARCH_POSTFIX)(as->uc, as, pde_addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
#else
|
||||
pde = address_space_ldl(as->uc, as, pde_addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
#endif
|
||||
if (!(pde & PG_PRESENT_MASK)) {
|
||||
/* not present */
|
||||
continue;
|
||||
}
|
||||
|
||||
line_addr = (((unsigned int)i & 0x3ff) << 22);
|
||||
if ((pde & PG_PSE_MASK) && pse) {
|
||||
/*
|
||||
* 4 MB page:
|
||||
* bits 39:32 are bits 20:13 of the PDE
|
||||
* bit3 31:22 are bits 31:22 of the PDE
|
||||
*/
|
||||
high_paddr = ((hwaddr)(pde & 0x1fe000) << 19);
|
||||
start_paddr = (pde & ~0x3fffff) | high_paddr;
|
||||
if (cpu_physical_memory_is_io(as, start_paddr)) {
|
||||
/* I/O region */
|
||||
continue;
|
||||
}
|
||||
start_vaddr = line_addr;
|
||||
memory_mapping_list_add_merge_sorted(list, start_paddr,
|
||||
start_vaddr, 1 << 22);
|
||||
continue;
|
||||
}
|
||||
|
||||
pte_start_addr = (pde & ~0xfff) & a20_mask;
|
||||
walk_pte2(list, as, pte_start_addr, a20_mask, line_addr);
|
||||
}
|
||||
}
|
||||
|
||||
/* PAE Paging */
|
||||
static void walk_pdpe2(MemoryMappingList *list, AddressSpace *as,
|
||||
hwaddr pdpe_start_addr, int32_t a20_mask)
|
||||
{
|
||||
hwaddr pdpe_addr, pde_start_addr;
|
||||
uint64_t pdpe;
|
||||
target_ulong line_addr;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
pdpe_addr = (pdpe_start_addr + i * 8) & a20_mask;
|
||||
#ifdef UNICORN_ARCH_POSTFIX
|
||||
pdpe = glue(address_space_ldq, UNICORN_ARCH_POSTFIX)(as->uc, as, pdpe_addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
#else
|
||||
pdpe = address_space_ldq(as->uc, as, pdpe_addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
#endif
|
||||
if (!(pdpe & PG_PRESENT_MASK)) {
|
||||
/* not present */
|
||||
continue;
|
||||
}
|
||||
|
||||
line_addr = (((unsigned int)i & 0x3) << 30);
|
||||
pde_start_addr = (pdpe & ~0xfff) & a20_mask;
|
||||
walk_pde(list, as, pde_start_addr, a20_mask, line_addr);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
/* IA-32e Paging */
|
||||
static void walk_pdpe(MemoryMappingList *list, AddressSpace *as,
|
||||
hwaddr pdpe_start_addr, int32_t a20_mask,
|
||||
target_ulong start_line_addr)
|
||||
{
|
||||
hwaddr pdpe_addr, pde_start_addr, start_paddr;
|
||||
uint64_t pdpe;
|
||||
target_ulong line_addr, start_vaddr;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 512; i++) {
|
||||
pdpe_addr = (pdpe_start_addr + i * 8) & a20_mask;
|
||||
#ifdef UNICORN_ARCH_POSTFIX
|
||||
pdpe = glue(address_space_ldq, UNICORN_ARCH_POSTFIX)(as->uc, as, pdpe_addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
#else
|
||||
pdpe = address_space_ldq(as->uc, as, pdpe_addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
#endif
|
||||
if (!(pdpe & PG_PRESENT_MASK)) {
|
||||
/* not present */
|
||||
continue;
|
||||
}
|
||||
|
||||
line_addr = start_line_addr | ((i & 0x1ffULL) << 30);
|
||||
if (pdpe & PG_PSE_MASK) {
|
||||
/* 1 GB page */
|
||||
start_paddr = (pdpe & ~0x3fffffff) & ~(0x1ULL << 63);
|
||||
if (cpu_physical_memory_is_io(as, start_paddr)) {
|
||||
/* I/O region */
|
||||
continue;
|
||||
}
|
||||
start_vaddr = line_addr;
|
||||
memory_mapping_list_add_merge_sorted(list, start_paddr,
|
||||
start_vaddr, 1 << 30);
|
||||
continue;
|
||||
}
|
||||
|
||||
pde_start_addr = (pdpe & PLM4_ADDR_MASK) & a20_mask;
|
||||
walk_pde(list, as, pde_start_addr, a20_mask, line_addr);
|
||||
}
|
||||
}
|
||||
|
||||
/* IA-32e Paging */
|
||||
static void walk_pml4e(MemoryMappingList *list, AddressSpace *as,
|
||||
hwaddr pml4e_start_addr, int32_t a20_mask,
|
||||
target_ulong start_line_addr)
|
||||
{
|
||||
hwaddr pml4e_addr, pdpe_start_addr;
|
||||
uint64_t pml4e;
|
||||
target_ulong line_addr;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 512; i++) {
|
||||
pml4e_addr = (pml4e_start_addr + i * 8) & a20_mask;
|
||||
#ifdef UNICORN_ARCH_POSTFIX
|
||||
pml4e = glue(address_space_ldq, UNICORN_ARCH_POSTFIX)(as->uc, as, pml4e_addr, MEMTXATTRS_UNSPECIFIED,
|
||||
#else
|
||||
pml4e = address_space_ldq(as->uc, as, pml4e_addr, MEMTXATTRS_UNSPECIFIED,
|
||||
#endif
|
||||
NULL);
|
||||
if (!(pml4e & PG_PRESENT_MASK)) {
|
||||
/* not present */
|
||||
continue;
|
||||
}
|
||||
|
||||
line_addr = start_line_addr | ((i & 0x1ffULL) << 39);
|
||||
pdpe_start_addr = (pml4e & PLM4_ADDR_MASK) & a20_mask;
|
||||
walk_pdpe(list, as, pdpe_start_addr, a20_mask, line_addr);
|
||||
}
|
||||
}
|
||||
|
||||
static void walk_pml5e(MemoryMappingList *list, AddressSpace *as,
|
||||
hwaddr pml5e_start_addr, int32_t a20_mask)
|
||||
{
|
||||
hwaddr pml5e_addr, pml4e_start_addr;
|
||||
uint64_t pml5e;
|
||||
target_ulong line_addr;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 512; i++) {
|
||||
pml5e_addr = (pml5e_start_addr + i * 8) & a20_mask;
|
||||
#ifdef UNICORN_ARCH_POSTFIX
|
||||
pml5e = glue(address_space_ldq, UNICORN_ARCH_POSTFIX)(as->uc, as, pml5e_addr, MEMTXATTRS_UNSPECIFIED,
|
||||
#else
|
||||
pml5e = address_space_ldq(as->uc, as, pml5e_addr, MEMTXATTRS_UNSPECIFIED,
|
||||
#endif
|
||||
NULL);
|
||||
if (!(pml5e & PG_PRESENT_MASK)) {
|
||||
/* not present */
|
||||
continue;
|
||||
}
|
||||
|
||||
line_addr = (0x7fULL << 57) | ((i & 0x1ffULL) << 48);
|
||||
pml4e_start_addr = (pml5e & PLM4_ADDR_MASK) & a20_mask;
|
||||
walk_pml4e(list, as, pml4e_start_addr, a20_mask, line_addr);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void x86_cpu_get_memory_mapping(CPUState *cs, MemoryMappingList *list)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
int32_t a20_mask;
|
||||
|
||||
if (!cpu_paging_enabled(cs)) {
|
||||
/* paging is disabled */
|
||||
return;
|
||||
}
|
||||
|
||||
a20_mask = x86_get_a20_mask(env);
|
||||
if (env->cr[4] & CR4_PAE_MASK) {
|
||||
#ifdef TARGET_X86_64
|
||||
if (env->hflags & HF_LMA_MASK) {
|
||||
if (env->cr[4] & CR4_LA57_MASK) {
|
||||
hwaddr pml5e_addr;
|
||||
|
||||
pml5e_addr = (env->cr[3] & PLM4_ADDR_MASK) & a20_mask;
|
||||
walk_pml5e(list, cs->as, pml5e_addr, a20_mask);
|
||||
} else {
|
||||
hwaddr pml4e_addr;
|
||||
|
||||
pml4e_addr = (env->cr[3] & PLM4_ADDR_MASK) & a20_mask;
|
||||
walk_pml4e(list, cs->as, pml4e_addr, a20_mask,
|
||||
0xffffULL << 48);
|
||||
}
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
hwaddr pdpe_addr;
|
||||
|
||||
pdpe_addr = (env->cr[3] & ~0x1f) & a20_mask;
|
||||
walk_pdpe2(list, cs->as, pdpe_addr, a20_mask);
|
||||
}
|
||||
} else {
|
||||
hwaddr pde_addr;
|
||||
bool pse;
|
||||
|
||||
pde_addr = (env->cr[3] & ~0xfff) & a20_mask;
|
||||
pse = !!(env->cr[4] & CR4_PSE_MASK);
|
||||
walk_pde2(list, cs->as, pde_addr, a20_mask, pse);
|
||||
}
|
||||
}
|
||||
|
||||
327
qemu/target/i386/bpt_helper.c
Normal file
327
qemu/target/i386/bpt_helper.c
Normal file
@@ -0,0 +1,327 @@
|
||||
/*
|
||||
* i386 breakpoint helpers
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "cpu.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "exec/helper-proto.h"
|
||||
|
||||
|
||||
static inline bool hw_local_breakpoint_enabled(unsigned long dr7, int index)
|
||||
{
|
||||
return (dr7 >> (index * 2)) & 1;
|
||||
}
|
||||
|
||||
static inline bool hw_global_breakpoint_enabled(unsigned long dr7, int index)
|
||||
{
|
||||
return (dr7 >> (index * 2)) & 2;
|
||||
|
||||
}
|
||||
static inline bool hw_breakpoint_enabled(unsigned long dr7, int index)
|
||||
{
|
||||
return hw_global_breakpoint_enabled(dr7, index) ||
|
||||
hw_local_breakpoint_enabled(dr7, index);
|
||||
}
|
||||
|
||||
static inline int hw_breakpoint_type(unsigned long dr7, int index)
|
||||
{
|
||||
return (dr7 >> (DR7_TYPE_SHIFT + (index * 4))) & 3;
|
||||
}
|
||||
|
||||
static inline int hw_breakpoint_len(unsigned long dr7, int index)
|
||||
{
|
||||
int len = ((dr7 >> (DR7_LEN_SHIFT + (index * 4))) & 3);
|
||||
return (len == 2) ? 8 : len + 1;
|
||||
}
|
||||
|
||||
static int hw_breakpoint_insert(CPUX86State *env, int index)
|
||||
{
|
||||
CPUState *cs = env_cpu(env);
|
||||
target_ulong dr7 = env->dr[7];
|
||||
target_ulong drN = env->dr[index];
|
||||
int err = 0;
|
||||
|
||||
switch (hw_breakpoint_type(dr7, index)) {
|
||||
case DR7_TYPE_BP_INST:
|
||||
if (hw_breakpoint_enabled(dr7, index)) {
|
||||
err = cpu_breakpoint_insert(cs, drN, BP_CPU,
|
||||
&env->cpu_breakpoint[index]);
|
||||
}
|
||||
break;
|
||||
|
||||
case DR7_TYPE_IO_RW:
|
||||
/* Notice when we should enable calls to bpt_io. */
|
||||
return hw_breakpoint_enabled(env->dr[7], index)
|
||||
? HF_IOBPT_MASK : 0;
|
||||
|
||||
case DR7_TYPE_DATA_WR:
|
||||
if (hw_breakpoint_enabled(dr7, index)) {
|
||||
err = cpu_watchpoint_insert(cs, drN,
|
||||
hw_breakpoint_len(dr7, index),
|
||||
BP_CPU | BP_MEM_WRITE,
|
||||
&env->cpu_watchpoint[index]);
|
||||
}
|
||||
break;
|
||||
|
||||
case DR7_TYPE_DATA_RW:
|
||||
if (hw_breakpoint_enabled(dr7, index)) {
|
||||
err = cpu_watchpoint_insert(cs, drN,
|
||||
hw_breakpoint_len(dr7, index),
|
||||
BP_CPU | BP_MEM_ACCESS,
|
||||
&env->cpu_watchpoint[index]);
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (err) {
|
||||
env->cpu_breakpoint[index] = NULL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hw_breakpoint_remove(CPUX86State *env, int index)
|
||||
{
|
||||
CPUState *cs = env_cpu(env);
|
||||
|
||||
switch (hw_breakpoint_type(env->dr[7], index)) {
|
||||
case DR7_TYPE_BP_INST:
|
||||
if (env->cpu_breakpoint[index]) {
|
||||
cpu_breakpoint_remove_by_ref(cs, env->cpu_breakpoint[index]);
|
||||
env->cpu_breakpoint[index] = NULL;
|
||||
}
|
||||
break;
|
||||
|
||||
case DR7_TYPE_DATA_WR:
|
||||
case DR7_TYPE_DATA_RW:
|
||||
if (env->cpu_breakpoint[index]) {
|
||||
cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[index]);
|
||||
env->cpu_breakpoint[index] = NULL;
|
||||
}
|
||||
break;
|
||||
|
||||
case DR7_TYPE_IO_RW:
|
||||
/* HF_IOBPT_MASK cleared elsewhere. */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void cpu_x86_update_dr7(CPUX86State *env, uint32_t new_dr7)
|
||||
{
|
||||
target_ulong old_dr7 = env->dr[7];
|
||||
int iobpt = 0;
|
||||
int i;
|
||||
|
||||
new_dr7 |= DR7_FIXED_1;
|
||||
|
||||
/* If nothing is changing except the global/local enable bits,
|
||||
then we can make the change more efficient. */
|
||||
if (((old_dr7 ^ new_dr7) & ~0xff) == 0) {
|
||||
/* Fold the global and local enable bits together into the
|
||||
global fields, then xor to show which registers have
|
||||
changed collective enable state. */
|
||||
int mod = ((old_dr7 | old_dr7 * 2) ^ (new_dr7 | new_dr7 * 2)) & 0xff;
|
||||
|
||||
for (i = 0; i < DR7_MAX_BP; i++) {
|
||||
if ((mod & (2 << i * 2)) && !hw_breakpoint_enabled(new_dr7, i)) {
|
||||
hw_breakpoint_remove(env, i);
|
||||
}
|
||||
}
|
||||
env->dr[7] = new_dr7;
|
||||
for (i = 0; i < DR7_MAX_BP; i++) {
|
||||
if (mod & (2 << i * 2) && hw_breakpoint_enabled(new_dr7, i)) {
|
||||
iobpt |= hw_breakpoint_insert(env, i);
|
||||
} else if (hw_breakpoint_type(new_dr7, i) == DR7_TYPE_IO_RW
|
||||
&& hw_breakpoint_enabled(new_dr7, i)) {
|
||||
iobpt |= HF_IOBPT_MASK;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < DR7_MAX_BP; i++) {
|
||||
hw_breakpoint_remove(env, i);
|
||||
}
|
||||
env->dr[7] = new_dr7;
|
||||
for (i = 0; i < DR7_MAX_BP; i++) {
|
||||
iobpt |= hw_breakpoint_insert(env, i);
|
||||
}
|
||||
}
|
||||
|
||||
env->hflags = (env->hflags & ~HF_IOBPT_MASK) | iobpt;
|
||||
}
|
||||
|
||||
static bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update)
|
||||
{
|
||||
target_ulong dr6;
|
||||
int reg;
|
||||
bool hit_enabled = false;
|
||||
|
||||
dr6 = env->dr[6] & ~0xf;
|
||||
for (reg = 0; reg < DR7_MAX_BP; reg++) {
|
||||
bool bp_match = false;
|
||||
bool wp_match = false;
|
||||
|
||||
switch (hw_breakpoint_type(env->dr[7], reg)) {
|
||||
case DR7_TYPE_BP_INST:
|
||||
if (env->dr[reg] == env->eip) {
|
||||
bp_match = true;
|
||||
}
|
||||
break;
|
||||
case DR7_TYPE_DATA_WR:
|
||||
case DR7_TYPE_DATA_RW:
|
||||
if (env->cpu_watchpoint[reg] &&
|
||||
env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT) {
|
||||
wp_match = true;
|
||||
}
|
||||
break;
|
||||
case DR7_TYPE_IO_RW:
|
||||
break;
|
||||
}
|
||||
if (bp_match || wp_match) {
|
||||
dr6 |= 1ULL << reg;
|
||||
if (hw_breakpoint_enabled(env->dr[7], reg)) {
|
||||
hit_enabled = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (hit_enabled || force_dr6_update) {
|
||||
env->dr[6] = dr6;
|
||||
}
|
||||
|
||||
return hit_enabled;
|
||||
}
|
||||
|
||||
void breakpoint_handler(CPUState *cs)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
CPUBreakpoint *bp;
|
||||
|
||||
if (cs->watchpoint_hit) {
|
||||
if (cs->watchpoint_hit->flags & BP_CPU) {
|
||||
cs->watchpoint_hit = NULL;
|
||||
if (check_hw_breakpoints(env, false)) {
|
||||
raise_exception(env, EXCP01_DB);
|
||||
} else {
|
||||
cpu_loop_exit_noexc(cs);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
|
||||
if (bp->pc == env->eip) {
|
||||
if (bp->flags & BP_CPU) {
|
||||
check_hw_breakpoints(env, true);
|
||||
raise_exception(env, EXCP01_DB);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void helper_single_step(CPUX86State *env)
|
||||
{
|
||||
check_hw_breakpoints(env, true);
|
||||
env->dr[6] |= DR6_BS;
|
||||
raise_exception(env, EXCP01_DB);
|
||||
}
|
||||
|
||||
void helper_rechecking_single_step(CPUX86State *env)
|
||||
{
|
||||
if ((env->eflags & TF_MASK) != 0) {
|
||||
helper_single_step(env);
|
||||
}
|
||||
}
|
||||
|
||||
void helper_set_dr(CPUX86State *env, int reg, target_ulong t0)
|
||||
{
|
||||
switch (reg) {
|
||||
case 0: case 1: case 2: case 3:
|
||||
if (hw_breakpoint_enabled(env->dr[7], reg)
|
||||
&& hw_breakpoint_type(env->dr[7], reg) != DR7_TYPE_IO_RW) {
|
||||
hw_breakpoint_remove(env, reg);
|
||||
env->dr[reg] = t0;
|
||||
hw_breakpoint_insert(env, reg);
|
||||
} else {
|
||||
env->dr[reg] = t0;
|
||||
}
|
||||
return;
|
||||
case 4:
|
||||
if (env->cr[4] & CR4_DE_MASK) {
|
||||
break;
|
||||
}
|
||||
/* fallthru */
|
||||
case 6:
|
||||
env->dr[6] = t0 | DR6_FIXED_1;
|
||||
return;
|
||||
case 5:
|
||||
if (env->cr[4] & CR4_DE_MASK) {
|
||||
break;
|
||||
}
|
||||
/* fallthru */
|
||||
case 7:
|
||||
cpu_x86_update_dr7(env, t0);
|
||||
return;
|
||||
}
|
||||
raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
|
||||
}
|
||||
|
||||
target_ulong helper_get_dr(CPUX86State *env, int reg)
|
||||
{
|
||||
switch (reg) {
|
||||
case 0: case 1: case 2: case 3: case 6: case 7:
|
||||
return env->dr[reg];
|
||||
case 4:
|
||||
if (env->cr[4] & CR4_DE_MASK) {
|
||||
break;
|
||||
} else {
|
||||
return env->dr[6];
|
||||
}
|
||||
case 5:
|
||||
if (env->cr[4] & CR4_DE_MASK) {
|
||||
break;
|
||||
} else {
|
||||
return env->dr[7];
|
||||
}
|
||||
}
|
||||
raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
|
||||
}
|
||||
|
||||
/* Check if Port I/O is trapped by a breakpoint. */
|
||||
void helper_bpt_io(CPUX86State *env, uint32_t port,
|
||||
uint32_t size, target_ulong next_eip)
|
||||
{
|
||||
target_ulong dr7 = env->dr[7];
|
||||
int i, hit = 0;
|
||||
|
||||
for (i = 0; i < DR7_MAX_BP; ++i) {
|
||||
if (hw_breakpoint_type(dr7, i) == DR7_TYPE_IO_RW
|
||||
&& hw_breakpoint_enabled(dr7, i)) {
|
||||
int bpt_len = hw_breakpoint_len(dr7, i);
|
||||
if (port + size - 1 >= env->dr[i]
|
||||
&& port <= env->dr[i] + bpt_len - 1) {
|
||||
hit |= 1 << i;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (hit) {
|
||||
env->dr[6] = (env->dr[6] & ~0xf) | hit;
|
||||
env->eip = next_eip;
|
||||
raise_exception(env, EXCP01_DB);
|
||||
}
|
||||
}
|
||||
383
qemu/target/i386/cc_helper.c
Normal file
383
qemu/target/i386/cc_helper.c
Normal file
@@ -0,0 +1,383 @@
|
||||
/*
|
||||
* x86 condition code helpers
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "cpu.h"
|
||||
#include "exec/helper-proto.h"
|
||||
|
||||
const uint8_t parity_table[256] = {
|
||||
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
|
||||
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
|
||||
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
|
||||
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
|
||||
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
|
||||
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
|
||||
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
|
||||
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
|
||||
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
|
||||
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
|
||||
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
|
||||
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
|
||||
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
|
||||
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
|
||||
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
|
||||
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
|
||||
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
|
||||
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
|
||||
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
|
||||
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
|
||||
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
|
||||
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
|
||||
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
|
||||
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
|
||||
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
|
||||
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
|
||||
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
|
||||
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
|
||||
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
|
||||
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
|
||||
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
|
||||
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
|
||||
};
|
||||
|
||||
#define SHIFT 0
|
||||
#include "cc_helper_template.h"
|
||||
#undef SHIFT
|
||||
|
||||
#define SHIFT 1
|
||||
#include "cc_helper_template.h"
|
||||
#undef SHIFT
|
||||
|
||||
#define SHIFT 2
|
||||
#include "cc_helper_template.h"
|
||||
#undef SHIFT
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
|
||||
#define SHIFT 3
|
||||
#include "cc_helper_template.h"
|
||||
#undef SHIFT
|
||||
|
||||
#endif
|
||||
|
||||
static target_ulong compute_all_adcx(target_ulong dst, target_ulong src1,
|
||||
target_ulong src2)
|
||||
{
|
||||
return (src1 & ~CC_C) | (dst * CC_C);
|
||||
}
|
||||
|
||||
static target_ulong compute_all_adox(target_ulong dst, target_ulong src1,
|
||||
target_ulong src2)
|
||||
{
|
||||
return (src1 & ~CC_O) | (src2 * CC_O);
|
||||
}
|
||||
|
||||
static target_ulong compute_all_adcox(target_ulong dst, target_ulong src1,
|
||||
target_ulong src2)
|
||||
{
|
||||
return (src1 & ~(CC_C | CC_O)) | (dst * CC_C) | (src2 * CC_O);
|
||||
}
|
||||
|
||||
target_ulong helper_cc_compute_all(target_ulong dst, target_ulong src1,
|
||||
target_ulong src2, int op)
|
||||
{
|
||||
switch (op) {
|
||||
default: /* should never happen */
|
||||
return 0;
|
||||
|
||||
case CC_OP_EFLAGS:
|
||||
return src1;
|
||||
case CC_OP_CLR:
|
||||
return CC_Z | CC_P;
|
||||
case CC_OP_POPCNT:
|
||||
return src1 ? 0 : CC_Z;
|
||||
|
||||
case CC_OP_MULB:
|
||||
return compute_all_mulb(dst, src1);
|
||||
case CC_OP_MULW:
|
||||
return compute_all_mulw(dst, src1);
|
||||
case CC_OP_MULL:
|
||||
return compute_all_mull(dst, src1);
|
||||
|
||||
case CC_OP_ADDB:
|
||||
return compute_all_addb(dst, src1);
|
||||
case CC_OP_ADDW:
|
||||
return compute_all_addw(dst, src1);
|
||||
case CC_OP_ADDL:
|
||||
return compute_all_addl(dst, src1);
|
||||
|
||||
case CC_OP_ADCB:
|
||||
return compute_all_adcb(dst, src1, src2);
|
||||
case CC_OP_ADCW:
|
||||
return compute_all_adcw(dst, src1, src2);
|
||||
case CC_OP_ADCL:
|
||||
return compute_all_adcl(dst, src1, src2);
|
||||
|
||||
case CC_OP_SUBB:
|
||||
return compute_all_subb(dst, src1);
|
||||
case CC_OP_SUBW:
|
||||
return compute_all_subw(dst, src1);
|
||||
case CC_OP_SUBL:
|
||||
return compute_all_subl(dst, src1);
|
||||
|
||||
case CC_OP_SBBB:
|
||||
return compute_all_sbbb(dst, src1, src2);
|
||||
case CC_OP_SBBW:
|
||||
return compute_all_sbbw(dst, src1, src2);
|
||||
case CC_OP_SBBL:
|
||||
return compute_all_sbbl(dst, src1, src2);
|
||||
|
||||
case CC_OP_LOGICB:
|
||||
return compute_all_logicb(dst, src1);
|
||||
case CC_OP_LOGICW:
|
||||
return compute_all_logicw(dst, src1);
|
||||
case CC_OP_LOGICL:
|
||||
return compute_all_logicl(dst, src1);
|
||||
|
||||
case CC_OP_INCB:
|
||||
return compute_all_incb(dst, src1);
|
||||
case CC_OP_INCW:
|
||||
return compute_all_incw(dst, src1);
|
||||
case CC_OP_INCL:
|
||||
return compute_all_incl(dst, src1);
|
||||
|
||||
case CC_OP_DECB:
|
||||
return compute_all_decb(dst, src1);
|
||||
case CC_OP_DECW:
|
||||
return compute_all_decw(dst, src1);
|
||||
case CC_OP_DECL:
|
||||
return compute_all_decl(dst, src1);
|
||||
|
||||
case CC_OP_SHLB:
|
||||
return compute_all_shlb(dst, src1);
|
||||
case CC_OP_SHLW:
|
||||
return compute_all_shlw(dst, src1);
|
||||
case CC_OP_SHLL:
|
||||
return compute_all_shll(dst, src1);
|
||||
|
||||
case CC_OP_SARB:
|
||||
return compute_all_sarb(dst, src1);
|
||||
case CC_OP_SARW:
|
||||
return compute_all_sarw(dst, src1);
|
||||
case CC_OP_SARL:
|
||||
return compute_all_sarl(dst, src1);
|
||||
|
||||
case CC_OP_BMILGB:
|
||||
return compute_all_bmilgb(dst, src1);
|
||||
case CC_OP_BMILGW:
|
||||
return compute_all_bmilgw(dst, src1);
|
||||
case CC_OP_BMILGL:
|
||||
return compute_all_bmilgl(dst, src1);
|
||||
|
||||
case CC_OP_ADCX:
|
||||
return compute_all_adcx(dst, src1, src2);
|
||||
case CC_OP_ADOX:
|
||||
return compute_all_adox(dst, src1, src2);
|
||||
case CC_OP_ADCOX:
|
||||
return compute_all_adcox(dst, src1, src2);
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
case CC_OP_MULQ:
|
||||
return compute_all_mulq(dst, src1);
|
||||
case CC_OP_ADDQ:
|
||||
return compute_all_addq(dst, src1);
|
||||
case CC_OP_ADCQ:
|
||||
return compute_all_adcq(dst, src1, src2);
|
||||
case CC_OP_SUBQ:
|
||||
return compute_all_subq(dst, src1);
|
||||
case CC_OP_SBBQ:
|
||||
return compute_all_sbbq(dst, src1, src2);
|
||||
case CC_OP_LOGICQ:
|
||||
return compute_all_logicq(dst, src1);
|
||||
case CC_OP_INCQ:
|
||||
return compute_all_incq(dst, src1);
|
||||
case CC_OP_DECQ:
|
||||
return compute_all_decq(dst, src1);
|
||||
case CC_OP_SHLQ:
|
||||
return compute_all_shlq(dst, src1);
|
||||
case CC_OP_SARQ:
|
||||
return compute_all_sarq(dst, src1);
|
||||
case CC_OP_BMILGQ:
|
||||
return compute_all_bmilgq(dst, src1);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t cpu_cc_compute_all(CPUX86State *env, int op)
|
||||
{
|
||||
return (uint32_t)helper_cc_compute_all(CC_DST, CC_SRC, CC_SRC2, op);
|
||||
}
|
||||
|
||||
target_ulong helper_cc_compute_c(target_ulong dst, target_ulong src1,
|
||||
target_ulong src2, int op)
|
||||
{
|
||||
switch (op) {
|
||||
default: /* should never happen */
|
||||
case CC_OP_LOGICB:
|
||||
case CC_OP_LOGICW:
|
||||
case CC_OP_LOGICL:
|
||||
case CC_OP_LOGICQ:
|
||||
case CC_OP_CLR:
|
||||
case CC_OP_POPCNT:
|
||||
return 0;
|
||||
|
||||
case CC_OP_EFLAGS:
|
||||
case CC_OP_SARB:
|
||||
case CC_OP_SARW:
|
||||
case CC_OP_SARL:
|
||||
case CC_OP_SARQ:
|
||||
case CC_OP_ADOX:
|
||||
return src1 & 1;
|
||||
|
||||
case CC_OP_INCB:
|
||||
case CC_OP_INCW:
|
||||
case CC_OP_INCL:
|
||||
case CC_OP_INCQ:
|
||||
case CC_OP_DECB:
|
||||
case CC_OP_DECW:
|
||||
case CC_OP_DECL:
|
||||
case CC_OP_DECQ:
|
||||
return src1;
|
||||
|
||||
case CC_OP_MULB:
|
||||
case CC_OP_MULW:
|
||||
case CC_OP_MULL:
|
||||
case CC_OP_MULQ:
|
||||
return src1 != 0;
|
||||
|
||||
case CC_OP_ADCX:
|
||||
case CC_OP_ADCOX:
|
||||
return dst;
|
||||
|
||||
case CC_OP_ADDB:
|
||||
return compute_c_addb(dst, src1);
|
||||
case CC_OP_ADDW:
|
||||
return compute_c_addw(dst, src1);
|
||||
case CC_OP_ADDL:
|
||||
return compute_c_addl(dst, src1);
|
||||
|
||||
case CC_OP_ADCB:
|
||||
return compute_c_adcb(dst, src1, src2);
|
||||
case CC_OP_ADCW:
|
||||
return compute_c_adcw(dst, src1, src2);
|
||||
case CC_OP_ADCL:
|
||||
return compute_c_adcl(dst, src1, src2);
|
||||
|
||||
case CC_OP_SUBB:
|
||||
return compute_c_subb(dst, src1);
|
||||
case CC_OP_SUBW:
|
||||
return compute_c_subw(dst, src1);
|
||||
case CC_OP_SUBL:
|
||||
return compute_c_subl(dst, src1);
|
||||
|
||||
case CC_OP_SBBB:
|
||||
return compute_c_sbbb(dst, src1, src2);
|
||||
case CC_OP_SBBW:
|
||||
return compute_c_sbbw(dst, src1, src2);
|
||||
case CC_OP_SBBL:
|
||||
return compute_c_sbbl(dst, src1, src2);
|
||||
|
||||
case CC_OP_SHLB:
|
||||
return compute_c_shlb(dst, src1);
|
||||
case CC_OP_SHLW:
|
||||
return compute_c_shlw(dst, src1);
|
||||
case CC_OP_SHLL:
|
||||
return compute_c_shll(dst, src1);
|
||||
|
||||
case CC_OP_BMILGB:
|
||||
return compute_c_bmilgb(dst, src1);
|
||||
case CC_OP_BMILGW:
|
||||
return compute_c_bmilgw(dst, src1);
|
||||
case CC_OP_BMILGL:
|
||||
return compute_c_bmilgl(dst, src1);
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
case CC_OP_ADDQ:
|
||||
return compute_c_addq(dst, src1);
|
||||
case CC_OP_ADCQ:
|
||||
return compute_c_adcq(dst, src1, src2);
|
||||
case CC_OP_SUBQ:
|
||||
return compute_c_subq(dst, src1);
|
||||
case CC_OP_SBBQ:
|
||||
return compute_c_sbbq(dst, src1, src2);
|
||||
case CC_OP_SHLQ:
|
||||
return compute_c_shlq(dst, src1);
|
||||
case CC_OP_BMILGQ:
|
||||
return compute_c_bmilgq(dst, src1);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
void helper_write_eflags(CPUX86State *env, target_ulong t0,
|
||||
uint32_t update_mask)
|
||||
{
|
||||
cpu_load_eflags(env, (int)t0, update_mask);
|
||||
}
|
||||
|
||||
target_ulong helper_read_eflags(CPUX86State *env)
|
||||
{
|
||||
return cpu_compute_eflags(env);
|
||||
}
|
||||
|
||||
void helper_clts(CPUX86State *env)
|
||||
{
|
||||
env->cr[0] &= ~CR0_TS_MASK;
|
||||
env->hflags &= ~HF_TS_MASK;
|
||||
}
|
||||
|
||||
void helper_reset_rf(CPUX86State *env)
|
||||
{
|
||||
env->eflags &= ~RF_MASK;
|
||||
}
|
||||
|
||||
void helper_cli(CPUX86State *env)
|
||||
{
|
||||
env->eflags &= ~IF_MASK;
|
||||
}
|
||||
|
||||
void helper_sti(CPUX86State *env)
|
||||
{
|
||||
env->eflags |= IF_MASK;
|
||||
}
|
||||
|
||||
void helper_clac(CPUX86State *env)
|
||||
{
|
||||
env->eflags &= ~AC_MASK;
|
||||
}
|
||||
|
||||
void helper_stac(CPUX86State *env)
|
||||
{
|
||||
env->eflags |= AC_MASK;
|
||||
}
|
||||
|
||||
#if 0
|
||||
/* vm86plus instructions */
|
||||
void helper_cli_vm(CPUX86State *env)
|
||||
{
|
||||
env->eflags &= ~VIF_MASK;
|
||||
}
|
||||
|
||||
void helper_sti_vm(CPUX86State *env)
|
||||
{
|
||||
env->eflags |= VIF_MASK;
|
||||
if (env->eflags & VIP_MASK) {
|
||||
raise_exception_ra(env, EXCP0D_GPF, GETPC());
|
||||
}
|
||||
}
|
||||
#endif
|
||||
242
qemu/target/i386/cc_helper_template.h
Normal file
242
qemu/target/i386/cc_helper_template.h
Normal file
@@ -0,0 +1,242 @@
|
||||
/*
|
||||
* x86 condition code helpers
|
||||
*
|
||||
* Copyright (c) 2008 Fabrice Bellard
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#define DATA_BITS (1 << (3 + SHIFT))
|
||||
|
||||
#if DATA_BITS == 8
|
||||
#define SUFFIX b
|
||||
#define DATA_TYPE uint8_t
|
||||
#elif DATA_BITS == 16
|
||||
#define SUFFIX w
|
||||
#define DATA_TYPE uint16_t
|
||||
#elif DATA_BITS == 32
|
||||
#define SUFFIX l
|
||||
#define DATA_TYPE uint32_t
|
||||
#elif DATA_BITS == 64
|
||||
#define SUFFIX q
|
||||
#define DATA_TYPE uint64_t
|
||||
#else
|
||||
#error unhandled operand size
|
||||
#endif
|
||||
|
||||
#define SIGN_MASK (((DATA_TYPE)1) << (DATA_BITS - 1))
|
||||
|
||||
/* dynamic flags computation */
|
||||
|
||||
static int glue(compute_all_add, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
|
||||
{
|
||||
int cf, pf, af, zf, sf, of;
|
||||
DATA_TYPE src2 = dst - src1;
|
||||
|
||||
cf = dst < src1;
|
||||
pf = parity_table[(uint8_t)dst];
|
||||
af = (dst ^ src1 ^ src2) & CC_A;
|
||||
zf = (dst == 0) * CC_Z;
|
||||
sf = lshift(dst, 8 - DATA_BITS) & CC_S;
|
||||
of = lshift((src1 ^ src2 ^ -1) & (src1 ^ dst), 12 - DATA_BITS) & CC_O;
|
||||
return cf | pf | af | zf | sf | of;
|
||||
}
|
||||
|
||||
static int glue(compute_c_add, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
|
||||
{
|
||||
return dst < src1;
|
||||
}
|
||||
|
||||
static int glue(compute_all_adc, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1,
|
||||
DATA_TYPE src3)
|
||||
{
|
||||
int cf, pf, af, zf, sf, of;
|
||||
DATA_TYPE src2 = dst - src1 - src3;
|
||||
|
||||
cf = (src3 ? dst <= src1 : dst < src1);
|
||||
pf = parity_table[(uint8_t)dst];
|
||||
af = (dst ^ src1 ^ src2) & 0x10;
|
||||
zf = (dst == 0) << 6;
|
||||
sf = lshift(dst, 8 - DATA_BITS) & 0x80;
|
||||
of = lshift((src1 ^ src2 ^ -1) & (src1 ^ dst), 12 - DATA_BITS) & CC_O;
|
||||
return cf | pf | af | zf | sf | of;
|
||||
}
|
||||
|
||||
static int glue(compute_c_adc, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1,
|
||||
DATA_TYPE src3)
|
||||
{
|
||||
return src3 ? dst <= src1 : dst < src1;
|
||||
}
|
||||
|
||||
static int glue(compute_all_sub, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2)
|
||||
{
|
||||
int cf, pf, af, zf, sf, of;
|
||||
DATA_TYPE src1 = dst + src2;
|
||||
|
||||
cf = src1 < src2;
|
||||
pf = parity_table[(uint8_t)dst];
|
||||
af = (dst ^ src1 ^ src2) & CC_A;
|
||||
zf = (dst == 0) * CC_Z;
|
||||
sf = lshift(dst, 8 - DATA_BITS) & CC_S;
|
||||
of = lshift((src1 ^ src2) & (src1 ^ dst), 12 - DATA_BITS) & CC_O;
|
||||
return cf | pf | af | zf | sf | of;
|
||||
}
|
||||
|
||||
static int glue(compute_c_sub, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2)
|
||||
{
|
||||
DATA_TYPE src1 = dst + src2;
|
||||
|
||||
return src1 < src2;
|
||||
}
|
||||
|
||||
static int glue(compute_all_sbb, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2,
|
||||
DATA_TYPE src3)
|
||||
{
|
||||
int cf, pf, af, zf, sf, of;
|
||||
DATA_TYPE src1 = dst + src2 + src3;
|
||||
|
||||
cf = (src3 ? src1 <= src2 : src1 < src2);
|
||||
pf = parity_table[(uint8_t)dst];
|
||||
af = (dst ^ src1 ^ src2) & 0x10;
|
||||
zf = (dst == 0) << 6;
|
||||
sf = lshift(dst, 8 - DATA_BITS) & 0x80;
|
||||
of = lshift((src1 ^ src2) & (src1 ^ dst), 12 - DATA_BITS) & CC_O;
|
||||
return cf | pf | af | zf | sf | of;
|
||||
}
|
||||
|
||||
static int glue(compute_c_sbb, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2,
|
||||
DATA_TYPE src3)
|
||||
{
|
||||
DATA_TYPE src1 = dst + src2 + src3;
|
||||
|
||||
return (src3 ? src1 <= src2 : src1 < src2);
|
||||
}
|
||||
|
||||
static int glue(compute_all_logic, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
|
||||
{
|
||||
int cf, pf, af, zf, sf, of;
|
||||
|
||||
cf = 0;
|
||||
pf = parity_table[(uint8_t)dst];
|
||||
af = 0;
|
||||
zf = (dst == 0) * CC_Z;
|
||||
sf = lshift(dst, 8 - DATA_BITS) & CC_S;
|
||||
of = 0;
|
||||
return cf | pf | af | zf | sf | of;
|
||||
}
|
||||
|
||||
static int glue(compute_all_inc, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
|
||||
{
|
||||
int cf, pf, af, zf, sf, of;
|
||||
DATA_TYPE src2;
|
||||
|
||||
cf = (int)src1;
|
||||
src1 = dst - 1;
|
||||
src2 = 1;
|
||||
pf = parity_table[(uint8_t)dst];
|
||||
af = (dst ^ src1 ^ src2) & CC_A;
|
||||
zf = (dst == 0) * CC_Z;
|
||||
sf = lshift(dst, 8 - DATA_BITS) & CC_S;
|
||||
of = (dst == SIGN_MASK) * CC_O;
|
||||
return cf | pf | af | zf | sf | of;
|
||||
}
|
||||
|
||||
static int glue(compute_all_dec, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
|
||||
{
|
||||
int cf, pf, af, zf, sf, of;
|
||||
DATA_TYPE src2;
|
||||
|
||||
cf = (int)src1;
|
||||
src1 = dst + 1;
|
||||
src2 = 1;
|
||||
pf = parity_table[(uint8_t)dst];
|
||||
af = (dst ^ src1 ^ src2) & CC_A;
|
||||
zf = (dst == 0) * CC_Z;
|
||||
sf = lshift(dst, 8 - DATA_BITS) & CC_S;
|
||||
of = (dst == SIGN_MASK - 1) * CC_O;
|
||||
return cf | pf | af | zf | sf | of;
|
||||
}
|
||||
|
||||
static int glue(compute_all_shl, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
|
||||
{
|
||||
int cf, pf, af, zf, sf, of;
|
||||
|
||||
cf = (src1 >> (DATA_BITS - 1)) & CC_C;
|
||||
pf = parity_table[(uint8_t)dst];
|
||||
af = 0; /* undefined */
|
||||
zf = (dst == 0) * CC_Z;
|
||||
sf = lshift(dst, 8 - DATA_BITS) & CC_S;
|
||||
/* of is defined iff shift count == 1 */
|
||||
of = lshift(src1 ^ dst, 12 - DATA_BITS) & CC_O;
|
||||
return cf | pf | af | zf | sf | of;
|
||||
}
|
||||
|
||||
static int glue(compute_c_shl, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
|
||||
{
|
||||
return (src1 >> (DATA_BITS - 1)) & CC_C;
|
||||
}
|
||||
|
||||
static int glue(compute_all_sar, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
|
||||
{
|
||||
int cf, pf, af, zf, sf, of;
|
||||
|
||||
cf = src1 & 1;
|
||||
pf = parity_table[(uint8_t)dst];
|
||||
af = 0; /* undefined */
|
||||
zf = (dst == 0) * CC_Z;
|
||||
sf = lshift(dst, 8 - DATA_BITS) & CC_S;
|
||||
/* of is defined iff shift count == 1 */
|
||||
of = lshift(src1 ^ dst, 12 - DATA_BITS) & CC_O;
|
||||
return cf | pf | af | zf | sf | of;
|
||||
}
|
||||
|
||||
/* NOTE: we compute the flags like the P4. On olders CPUs, only OF and
|
||||
CF are modified and it is slower to do that. Note as well that we
|
||||
don't truncate SRC1 for computing carry to DATA_TYPE. */
|
||||
static int glue(compute_all_mul, SUFFIX)(DATA_TYPE dst, target_long src1)
|
||||
{
|
||||
int cf, pf, af, zf, sf, of;
|
||||
|
||||
cf = (src1 != 0);
|
||||
pf = parity_table[(uint8_t)dst];
|
||||
af = 0; /* undefined */
|
||||
zf = (dst == 0) * CC_Z;
|
||||
sf = lshift(dst, 8 - DATA_BITS) & CC_S;
|
||||
of = cf * CC_O;
|
||||
return cf | pf | af | zf | sf | of;
|
||||
}
|
||||
|
||||
static int glue(compute_all_bmilg, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
|
||||
{
|
||||
int cf, pf, af, zf, sf, of;
|
||||
|
||||
cf = (src1 == 0);
|
||||
pf = 0; /* undefined */
|
||||
af = 0; /* undefined */
|
||||
zf = (dst == 0) * CC_Z;
|
||||
sf = lshift(dst, 8 - DATA_BITS) & CC_S;
|
||||
of = 0;
|
||||
return cf | pf | af | zf | sf | of;
|
||||
}
|
||||
|
||||
static int glue(compute_c_bmilg, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
|
||||
{
|
||||
return src1 == 0;
|
||||
}
|
||||
|
||||
#undef DATA_BITS
|
||||
#undef SIGN_MASK
|
||||
#undef DATA_TYPE
|
||||
#undef DATA_MASK
|
||||
#undef SUFFIX
|
||||
28
qemu/target/i386/cpu-param.h
Normal file
28
qemu/target/i386/cpu-param.h
Normal file
@@ -0,0 +1,28 @@
|
||||
/*
|
||||
* i386 cpu parameters for qemu.
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
* SPDX-License-Identifier: LGPL-2.0+
|
||||
*/
|
||||
|
||||
#ifndef I386_CPU_PARAM_H
|
||||
#define I386_CPU_PARAM_H 1
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
# define TARGET_LONG_BITS 64
|
||||
# define TARGET_PHYS_ADDR_SPACE_BITS 52
|
||||
/*
|
||||
* ??? This is really 48 bits, sign-extended, but the only thing
|
||||
* accessible to userland with bit 48 set is the VSYSCALL, and that
|
||||
* is handled via other mechanisms.
|
||||
*/
|
||||
# define TARGET_VIRT_ADDR_SPACE_BITS 47
|
||||
#else
|
||||
# define TARGET_LONG_BITS 32
|
||||
# define TARGET_PHYS_ADDR_SPACE_BITS 36
|
||||
# define TARGET_VIRT_ADDR_SPACE_BITS 32
|
||||
#endif
|
||||
#define TARGET_PAGE_BITS 12
|
||||
#define NB_MMU_MODES 3
|
||||
|
||||
#endif
|
||||
62
qemu/target/i386/cpu-qom.h
Normal file
62
qemu/target/i386/cpu-qom.h
Normal file
@@ -0,0 +1,62 @@
|
||||
/*
|
||||
* QEMU x86 CPU
|
||||
*
|
||||
* Copyright (c) 2012 SUSE LINUX Products GmbH
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see
|
||||
* <http://www.gnu.org/licenses/lgpl-2.1.html>
|
||||
*/
|
||||
#ifndef QEMU_I386_CPU_QOM_H
|
||||
#define QEMU_I386_CPU_QOM_H
|
||||
|
||||
#include "hw/core/cpu.h"
|
||||
|
||||
typedef struct X86CPUModel X86CPUModel;
|
||||
|
||||
/**
|
||||
* X86CPUClass:
|
||||
* @cpu_def: CPU model definition
|
||||
* @host_cpuid_required: Whether CPU model requires cpuid from host.
|
||||
* @ordering: Ordering on the "-cpu help" CPU model list.
|
||||
* @migration_safe: See CpuDefinitionInfo::migration_safe
|
||||
* @static_model: See CpuDefinitionInfo::static
|
||||
* @parent_realize: The parent class' realize handler.
|
||||
* @parent_reset: The parent class' reset handler.
|
||||
*
|
||||
* An x86 CPU model or family.
|
||||
*/
|
||||
typedef struct X86CPUClass {
|
||||
/*< private >*/
|
||||
CPUClass parent_class;
|
||||
/*< public >*/
|
||||
|
||||
/* CPU definition, automatically loaded by instance_init if not NULL.
|
||||
* Should be eventually replaced by subclass-specific property defaults.
|
||||
*/
|
||||
X86CPUModel *model;
|
||||
|
||||
bool host_cpuid_required;
|
||||
int ordering;
|
||||
bool static_model;
|
||||
|
||||
/* Optional description of CPU model.
|
||||
* If unavailable, cpu_def->model_id is used */
|
||||
const char *model_description;
|
||||
|
||||
void (*parent_reset)(CPUState *cpu);
|
||||
} X86CPUClass;
|
||||
|
||||
typedef struct X86CPU X86CPU;
|
||||
|
||||
#endif
|
||||
4855
qemu/target/i386/cpu.c
Normal file
4855
qemu/target/i386/cpu.c
Normal file
File diff suppressed because it is too large
Load Diff
2134
qemu/target/i386/cpu.h
Normal file
2134
qemu/target/i386/cpu.h
Normal file
File diff suppressed because it is too large
Load Diff
695
qemu/target/i386/excp_helper.c
Normal file
695
qemu/target/i386/excp_helper.c
Normal file
@@ -0,0 +1,695 @@
|
||||
/*
|
||||
* x86 exception helpers
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "cpu.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "qemu/log.h"
|
||||
#include "exec/helper-proto.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
|
||||
#include "uc_priv.h"
|
||||
|
||||
void helper_raise_interrupt(CPUX86State *env, int intno, int next_eip_addend)
|
||||
{
|
||||
raise_interrupt(env, intno, 1, 0, next_eip_addend);
|
||||
}
|
||||
|
||||
void helper_raise_exception(CPUX86State *env, int exception_index)
|
||||
{
|
||||
raise_exception(env, exception_index);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check nested exceptions and change to double or triple fault if
|
||||
* needed. It should only be called, if this is not an interrupt.
|
||||
* Returns the new exception number.
|
||||
*/
|
||||
static int check_exception(CPUX86State *env, int intno, int *error_code,
|
||||
uintptr_t retaddr)
|
||||
{
|
||||
int first_contributory = env->old_exception == 0 ||
|
||||
(env->old_exception >= 10 &&
|
||||
env->old_exception <= 13);
|
||||
int second_contributory = intno == 0 ||
|
||||
(intno >= 10 && intno <= 13);
|
||||
|
||||
qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
|
||||
env->old_exception, intno);
|
||||
|
||||
if (env->old_exception == EXCP08_DBLE) {
|
||||
if (env->hflags & HF_GUEST_MASK) {
|
||||
cpu_vmexit(env, SVM_EXIT_SHUTDOWN, 0, retaddr); /* does not return */
|
||||
}
|
||||
|
||||
qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
|
||||
|
||||
qemu_system_reset_request(env->uc);
|
||||
return EXCP_HLT;
|
||||
}
|
||||
|
||||
if ((first_contributory && second_contributory)
|
||||
|| (env->old_exception == EXCP0E_PAGE &&
|
||||
(second_contributory || (intno == EXCP0E_PAGE)))) {
|
||||
intno = EXCP08_DBLE;
|
||||
*error_code = 0;
|
||||
}
|
||||
|
||||
if (second_contributory || (intno == EXCP0E_PAGE) ||
|
||||
(intno == EXCP08_DBLE)) {
|
||||
env->old_exception = intno;
|
||||
}
|
||||
|
||||
return intno;
|
||||
}
|
||||
|
||||
/*
|
||||
* Signal an interruption. It is executed in the main CPU loop.
|
||||
* is_int is TRUE if coming from the int instruction. next_eip is the
|
||||
* env->eip value AFTER the interrupt instruction. It is only relevant if
|
||||
* is_int is TRUE.
|
||||
*/
|
||||
static void QEMU_NORETURN raise_interrupt2(CPUX86State *env, int intno,
|
||||
int is_int, int error_code,
|
||||
int next_eip_addend,
|
||||
uintptr_t retaddr)
|
||||
{
|
||||
CPUState *cs = env_cpu(env);
|
||||
|
||||
if (!is_int) {
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_EXCP_BASE + intno,
|
||||
error_code, retaddr);
|
||||
intno = check_exception(env, intno, &error_code, retaddr);
|
||||
} else {
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_SWINT, 0, retaddr);
|
||||
}
|
||||
|
||||
cs->exception_index = intno;
|
||||
env->error_code = error_code;
|
||||
env->exception_is_int = is_int;
|
||||
env->exception_next_eip = env->eip + next_eip_addend;
|
||||
cpu_loop_exit_restore(cs, retaddr);
|
||||
}
|
||||
|
||||
/* shortcuts to generate exceptions */
|
||||
|
||||
void QEMU_NORETURN raise_interrupt(CPUX86State *env, int intno, int is_int,
|
||||
int error_code, int next_eip_addend)
|
||||
{
|
||||
raise_interrupt2(env, intno, is_int, error_code, next_eip_addend, 0);
|
||||
}
|
||||
|
||||
void raise_exception_err(CPUX86State *env, int exception_index,
|
||||
int error_code)
|
||||
{
|
||||
raise_interrupt2(env, exception_index, 0, error_code, 0, 0);
|
||||
}
|
||||
|
||||
void raise_exception_err_ra(CPUX86State *env, int exception_index,
|
||||
int error_code, uintptr_t retaddr)
|
||||
{
|
||||
raise_interrupt2(env, exception_index, 0, error_code, 0, retaddr);
|
||||
}
|
||||
|
||||
void raise_exception(CPUX86State *env, int exception_index)
|
||||
{
|
||||
raise_interrupt2(env, exception_index, 0, 0, 0, 0);
|
||||
}
|
||||
|
||||
void raise_exception_ra(CPUX86State *env, int exception_index, uintptr_t retaddr)
|
||||
{
|
||||
raise_interrupt2(env, exception_index, 0, 0, 0, retaddr);
|
||||
}
|
||||
|
||||
static hwaddr get_hphys(CPUState *cs, hwaddr gphys, MMUAccessType access_type,
|
||||
int *prot)
|
||||
{
|
||||
CPUX86State *env = &X86_CPU(cs)->env;
|
||||
uint64_t rsvd_mask = PG_HI_RSVD_MASK;
|
||||
uint64_t ptep, pte;
|
||||
uint64_t exit_info_1 = 0;
|
||||
target_ulong pde_addr, pte_addr;
|
||||
uint32_t page_offset;
|
||||
int page_size;
|
||||
|
||||
if (likely(!(env->hflags2 & HF2_NPT_MASK))) {
|
||||
return gphys;
|
||||
}
|
||||
|
||||
if (!(env->nested_pg_mode & SVM_NPT_NXE)) {
|
||||
rsvd_mask |= PG_NX_MASK;
|
||||
}
|
||||
|
||||
if (env->nested_pg_mode & SVM_NPT_PAE) {
|
||||
uint64_t pde, pdpe;
|
||||
target_ulong pdpe_addr;
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
if (env->nested_pg_mode & SVM_NPT_LMA) {
|
||||
uint64_t pml5e;
|
||||
uint64_t pml4e_addr, pml4e;
|
||||
|
||||
pml5e = env->nested_cr3;
|
||||
ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
|
||||
|
||||
pml4e_addr = (pml5e & PG_ADDRESS_MASK) +
|
||||
(((gphys >> 39) & 0x1ff) << 3);
|
||||
pml4e = x86_ldq_phys(cs, pml4e_addr);
|
||||
if (!(pml4e & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
if (pml4e & (rsvd_mask | PG_PSE_MASK)) {
|
||||
goto do_fault_rsvd;
|
||||
}
|
||||
if (!(pml4e & PG_ACCESSED_MASK)) {
|
||||
pml4e |= PG_ACCESSED_MASK;
|
||||
x86_stl_phys_notdirty(cs, pml4e_addr, pml4e);
|
||||
}
|
||||
ptep &= pml4e ^ PG_NX_MASK;
|
||||
pdpe_addr = (pml4e & PG_ADDRESS_MASK) +
|
||||
(((gphys >> 30) & 0x1ff) << 3);
|
||||
pdpe = x86_ldq_phys(cs, pdpe_addr);
|
||||
if (!(pdpe & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
if (pdpe & rsvd_mask) {
|
||||
goto do_fault_rsvd;
|
||||
}
|
||||
ptep &= pdpe ^ PG_NX_MASK;
|
||||
if (!(pdpe & PG_ACCESSED_MASK)) {
|
||||
pdpe |= PG_ACCESSED_MASK;
|
||||
x86_stl_phys_notdirty(cs, pdpe_addr, pdpe);
|
||||
}
|
||||
if (pdpe & PG_PSE_MASK) {
|
||||
/* 1 GB page */
|
||||
page_size = 1024 * 1024 * 1024;
|
||||
pte_addr = pdpe_addr;
|
||||
pte = pdpe;
|
||||
goto do_check_protect;
|
||||
}
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
pdpe_addr = (env->nested_cr3 & ~0x1f) + ((gphys >> 27) & 0x18);
|
||||
pdpe = x86_ldq_phys(cs, pdpe_addr);
|
||||
if (!(pdpe & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
rsvd_mask |= PG_HI_USER_MASK;
|
||||
if (pdpe & (rsvd_mask | PG_NX_MASK)) {
|
||||
goto do_fault_rsvd;
|
||||
}
|
||||
ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
|
||||
}
|
||||
|
||||
pde_addr = (pdpe & PG_ADDRESS_MASK) + (((gphys >> 21) & 0x1ff) << 3);
|
||||
pde = x86_ldq_phys(cs, pde_addr);
|
||||
if (!(pde & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
if (pde & rsvd_mask) {
|
||||
goto do_fault_rsvd;
|
||||
}
|
||||
ptep &= pde ^ PG_NX_MASK;
|
||||
if (pde & PG_PSE_MASK) {
|
||||
/* 2 MB page */
|
||||
page_size = 2048 * 1024;
|
||||
pte_addr = pde_addr;
|
||||
pte = pde;
|
||||
goto do_check_protect;
|
||||
}
|
||||
/* 4 KB page */
|
||||
if (!(pde & PG_ACCESSED_MASK)) {
|
||||
pde |= PG_ACCESSED_MASK;
|
||||
x86_stl_phys_notdirty(cs, pde_addr, pde);
|
||||
}
|
||||
pte_addr = (pde & PG_ADDRESS_MASK) + (((gphys >> 12) & 0x1ff) << 3);
|
||||
pte = x86_ldq_phys(cs, pte_addr);
|
||||
if (!(pte & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
if (pte & rsvd_mask) {
|
||||
goto do_fault_rsvd;
|
||||
}
|
||||
/* combine pde and pte nx, user and rw protections */
|
||||
ptep &= pte ^ PG_NX_MASK;
|
||||
page_size = 4096;
|
||||
} else {
|
||||
uint32_t pde;
|
||||
|
||||
/* page directory entry */
|
||||
pde_addr = (env->nested_cr3 & ~0xfff) + ((gphys >> 20) & 0xffc);
|
||||
pde = x86_ldl_phys(cs, pde_addr);
|
||||
if (!(pde & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
ptep = pde | PG_NX_MASK;
|
||||
|
||||
/* if PSE bit is set, then we use a 4MB page */
|
||||
if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
|
||||
page_size = 4096 * 1024;
|
||||
pte_addr = pde_addr;
|
||||
|
||||
/* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
|
||||
* Leave bits 20-13 in place for setting accessed/dirty bits below.
|
||||
*/
|
||||
pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
|
||||
rsvd_mask = 0x200000;
|
||||
goto do_check_protect_pse36;
|
||||
}
|
||||
|
||||
if (!(pde & PG_ACCESSED_MASK)) {
|
||||
pde |= PG_ACCESSED_MASK;
|
||||
x86_stl_phys_notdirty(cs, pde_addr, pde);
|
||||
}
|
||||
|
||||
/* page directory entry */
|
||||
pte_addr = (pde & ~0xfff) + ((gphys >> 10) & 0xffc);
|
||||
pte = x86_ldl_phys(cs, pte_addr);
|
||||
if (!(pte & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
/* combine pde and pte user and rw protections */
|
||||
ptep &= pte | PG_NX_MASK;
|
||||
page_size = 4096;
|
||||
rsvd_mask = 0;
|
||||
}
|
||||
|
||||
do_check_protect:
|
||||
rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK;
|
||||
do_check_protect_pse36:
|
||||
if (pte & rsvd_mask) {
|
||||
goto do_fault_rsvd;
|
||||
}
|
||||
ptep ^= PG_NX_MASK;
|
||||
|
||||
if (!(ptep & PG_USER_MASK)) {
|
||||
goto do_fault_protect;
|
||||
}
|
||||
if (ptep & PG_NX_MASK) {
|
||||
if (access_type == MMU_INST_FETCH) {
|
||||
goto do_fault_protect;
|
||||
}
|
||||
*prot &= ~PAGE_EXEC;
|
||||
}
|
||||
if (!(ptep & PG_RW_MASK)) {
|
||||
if (access_type == MMU_DATA_STORE) {
|
||||
goto do_fault_protect;
|
||||
}
|
||||
*prot &= ~PAGE_WRITE;
|
||||
}
|
||||
|
||||
pte &= PG_ADDRESS_MASK & ~(page_size - 1);
|
||||
page_offset = gphys & (page_size - 1);
|
||||
return pte + page_offset;
|
||||
|
||||
do_fault_rsvd:
|
||||
exit_info_1 |= SVM_NPTEXIT_RSVD;
|
||||
do_fault_protect:
|
||||
exit_info_1 |= SVM_NPTEXIT_P;
|
||||
do_fault:
|
||||
x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
|
||||
gphys);
|
||||
exit_info_1 |= SVM_NPTEXIT_US;
|
||||
if (access_type == MMU_DATA_STORE) {
|
||||
exit_info_1 |= SVM_NPTEXIT_RW;
|
||||
} else if (access_type == MMU_INST_FETCH) {
|
||||
exit_info_1 |= SVM_NPTEXIT_ID;
|
||||
}
|
||||
if (prot) {
|
||||
exit_info_1 |= SVM_NPTEXIT_GPA;
|
||||
} else { /* page table access */
|
||||
exit_info_1 |= SVM_NPTEXIT_GPT;
|
||||
}
|
||||
cpu_vmexit(env, SVM_EXIT_NPF, exit_info_1, env->retaddr);
|
||||
}
|
||||
|
||||
/* return value:
|
||||
* -1 = cannot handle fault
|
||||
* 0 = nothing more to do
|
||||
* 1 = generate PF fault
|
||||
*/
|
||||
static int handle_mmu_fault(CPUState *cs, vaddr addr, int size,
|
||||
int is_write1, int mmu_idx)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
uint64_t ptep, pte;
|
||||
int32_t a20_mask;
|
||||
target_ulong pde_addr, pte_addr;
|
||||
int error_code = 0;
|
||||
int is_dirty, prot, page_size, is_write, is_user;
|
||||
hwaddr paddr;
|
||||
uint64_t rsvd_mask = PG_HI_RSVD_MASK;
|
||||
uint32_t page_offset;
|
||||
target_ulong vaddr;
|
||||
|
||||
is_user = mmu_idx == MMU_USER_IDX;
|
||||
#if defined(DEBUG_MMU)
|
||||
printf("MMU fault: addr=%" VADDR_PRIx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
|
||||
addr, is_write1, is_user, env->eip);
|
||||
#endif
|
||||
is_write = is_write1 & 1;
|
||||
|
||||
a20_mask = x86_get_a20_mask(env);
|
||||
if (!(env->cr[0] & CR0_PG_MASK)) {
|
||||
pte = addr;
|
||||
#ifdef TARGET_X86_64
|
||||
if (!(env->hflags & HF_LMA_MASK)) {
|
||||
/* Without long mode we can only address 32bits in real mode */
|
||||
pte = (uint32_t)pte;
|
||||
}
|
||||
#endif
|
||||
prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
||||
page_size = 4096;
|
||||
goto do_mapping;
|
||||
}
|
||||
|
||||
if (!(env->efer & MSR_EFER_NXE)) {
|
||||
rsvd_mask |= PG_NX_MASK;
|
||||
}
|
||||
|
||||
if (env->cr[4] & CR4_PAE_MASK) {
|
||||
uint64_t pde, pdpe;
|
||||
target_ulong pdpe_addr;
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
if (env->hflags & HF_LMA_MASK) {
|
||||
bool la57 = env->cr[4] & CR4_LA57_MASK;
|
||||
uint64_t pml5e_addr, pml5e;
|
||||
uint64_t pml4e_addr, pml4e;
|
||||
int32_t sext;
|
||||
|
||||
/* test virtual address sign extension */
|
||||
sext = la57 ? (int64_t)addr >> 56 : (int64_t)addr >> 47;
|
||||
if (sext != 0 && sext != -1) {
|
||||
env->error_code = 0;
|
||||
cs->exception_index = EXCP0D_GPF;
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (la57) {
|
||||
pml5e_addr = ((env->cr[3] & ~0xfff) +
|
||||
(((addr >> 48) & 0x1ff) << 3)) & a20_mask;
|
||||
pml5e_addr = get_hphys(cs, pml5e_addr, MMU_DATA_STORE, NULL);
|
||||
pml5e = x86_ldq_phys(cs, pml5e_addr);
|
||||
if (!(pml5e & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
if (pml5e & (rsvd_mask | PG_PSE_MASK)) {
|
||||
goto do_fault_rsvd;
|
||||
}
|
||||
if (!(pml5e & PG_ACCESSED_MASK)) {
|
||||
pml5e |= PG_ACCESSED_MASK;
|
||||
x86_stl_phys_notdirty(cs, pml5e_addr, pml5e);
|
||||
}
|
||||
ptep = pml5e ^ PG_NX_MASK;
|
||||
} else {
|
||||
pml5e = env->cr[3];
|
||||
ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
|
||||
}
|
||||
|
||||
pml4e_addr = ((pml5e & PG_ADDRESS_MASK) +
|
||||
(((addr >> 39) & 0x1ff) << 3)) & a20_mask;
|
||||
pml4e_addr = get_hphys(cs, pml4e_addr, MMU_DATA_STORE, false);
|
||||
pml4e = x86_ldq_phys(cs, pml4e_addr);
|
||||
if (!(pml4e & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
if (pml4e & (rsvd_mask | PG_PSE_MASK)) {
|
||||
goto do_fault_rsvd;
|
||||
}
|
||||
if (!(pml4e & PG_ACCESSED_MASK)) {
|
||||
pml4e |= PG_ACCESSED_MASK;
|
||||
x86_stl_phys_notdirty(cs, pml4e_addr, pml4e);
|
||||
}
|
||||
ptep &= pml4e ^ PG_NX_MASK;
|
||||
pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
|
||||
a20_mask;
|
||||
pdpe_addr = get_hphys(cs, pdpe_addr, MMU_DATA_STORE, NULL);
|
||||
pdpe = x86_ldq_phys(cs, pdpe_addr);
|
||||
if (!(pdpe & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
if (pdpe & rsvd_mask) {
|
||||
goto do_fault_rsvd;
|
||||
}
|
||||
ptep &= pdpe ^ PG_NX_MASK;
|
||||
if (!(pdpe & PG_ACCESSED_MASK)) {
|
||||
pdpe |= PG_ACCESSED_MASK;
|
||||
x86_stl_phys_notdirty(cs, pdpe_addr, pdpe);
|
||||
}
|
||||
if (pdpe & PG_PSE_MASK) {
|
||||
/* 1 GB page */
|
||||
page_size = 1024 * 1024 * 1024;
|
||||
pte_addr = pdpe_addr;
|
||||
pte = pdpe;
|
||||
goto do_check_protect;
|
||||
}
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
/* XXX: load them when cr3 is loaded ? */
|
||||
pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
|
||||
a20_mask;
|
||||
pdpe_addr = get_hphys(cs, pdpe_addr, MMU_DATA_STORE, false);
|
||||
pdpe = x86_ldq_phys(cs, pdpe_addr);
|
||||
if (!(pdpe & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
rsvd_mask |= PG_HI_USER_MASK;
|
||||
if (pdpe & (rsvd_mask | PG_NX_MASK)) {
|
||||
goto do_fault_rsvd;
|
||||
}
|
||||
ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
|
||||
}
|
||||
|
||||
pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
|
||||
a20_mask;
|
||||
pde_addr = get_hphys(cs, pde_addr, MMU_DATA_STORE, NULL);
|
||||
pde = x86_ldq_phys(cs, pde_addr);
|
||||
if (!(pde & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
if (pde & rsvd_mask) {
|
||||
goto do_fault_rsvd;
|
||||
}
|
||||
ptep &= pde ^ PG_NX_MASK;
|
||||
if (pde & PG_PSE_MASK) {
|
||||
/* 2 MB page */
|
||||
page_size = 2048 * 1024;
|
||||
pte_addr = pde_addr;
|
||||
pte = pde;
|
||||
goto do_check_protect;
|
||||
}
|
||||
/* 4 KB page */
|
||||
if (!(pde & PG_ACCESSED_MASK)) {
|
||||
pde |= PG_ACCESSED_MASK;
|
||||
x86_stl_phys_notdirty(cs, pde_addr, pde);
|
||||
}
|
||||
pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
|
||||
a20_mask;
|
||||
pte_addr = get_hphys(cs, pte_addr, MMU_DATA_STORE, NULL);
|
||||
pte = x86_ldq_phys(cs, pte_addr);
|
||||
if (!(pte & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
if (pte & rsvd_mask) {
|
||||
goto do_fault_rsvd;
|
||||
}
|
||||
/* combine pde and pte nx, user and rw protections */
|
||||
ptep &= pte ^ PG_NX_MASK;
|
||||
page_size = 4096;
|
||||
} else {
|
||||
uint32_t pde;
|
||||
|
||||
/* page directory entry */
|
||||
pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
|
||||
a20_mask;
|
||||
pde_addr = get_hphys(cs, pde_addr, MMU_DATA_STORE, NULL);
|
||||
pde = x86_ldl_phys(cs, pde_addr);
|
||||
if (!(pde & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
ptep = pde | PG_NX_MASK;
|
||||
|
||||
/* if PSE bit is set, then we use a 4MB page */
|
||||
if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
|
||||
page_size = 4096 * 1024;
|
||||
pte_addr = pde_addr;
|
||||
|
||||
/* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
|
||||
* Leave bits 20-13 in place for setting accessed/dirty bits below.
|
||||
*/
|
||||
pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
|
||||
rsvd_mask = 0x200000;
|
||||
goto do_check_protect_pse36;
|
||||
}
|
||||
|
||||
if (!(pde & PG_ACCESSED_MASK)) {
|
||||
pde |= PG_ACCESSED_MASK;
|
||||
x86_stl_phys_notdirty(cs, pde_addr, pde);
|
||||
}
|
||||
|
||||
/* page directory entry */
|
||||
pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
|
||||
a20_mask;
|
||||
pte_addr = get_hphys(cs, pte_addr, MMU_DATA_STORE, NULL);
|
||||
pte = x86_ldl_phys(cs, pte_addr);
|
||||
if (!(pte & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
/* combine pde and pte user and rw protections */
|
||||
ptep &= pte | PG_NX_MASK;
|
||||
page_size = 4096;
|
||||
rsvd_mask = 0;
|
||||
}
|
||||
|
||||
do_check_protect:
|
||||
rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK;
|
||||
do_check_protect_pse36:
|
||||
if (pte & rsvd_mask) {
|
||||
goto do_fault_rsvd;
|
||||
}
|
||||
ptep ^= PG_NX_MASK;
|
||||
|
||||
/* can the page can be put in the TLB? prot will tell us */
|
||||
if (is_user && !(ptep & PG_USER_MASK)) {
|
||||
goto do_fault_protect;
|
||||
}
|
||||
|
||||
prot = 0;
|
||||
if (mmu_idx != MMU_KSMAP_IDX || !(ptep & PG_USER_MASK)) {
|
||||
prot |= PAGE_READ;
|
||||
if ((ptep & PG_RW_MASK) || (!is_user && !(env->cr[0] & CR0_WP_MASK))) {
|
||||
prot |= PAGE_WRITE;
|
||||
}
|
||||
}
|
||||
if (!(ptep & PG_NX_MASK) &&
|
||||
(mmu_idx == MMU_USER_IDX ||
|
||||
!((env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK)))) {
|
||||
prot |= PAGE_EXEC;
|
||||
}
|
||||
if ((env->cr[4] & CR4_PKE_MASK) && (env->hflags & HF_LMA_MASK) &&
|
||||
(ptep & PG_USER_MASK) && env->pkru) {
|
||||
uint32_t pk = (pte & PG_PKRU_MASK) >> PG_PKRU_BIT;
|
||||
uint32_t pkru_ad = (env->pkru >> pk * 2) & 1;
|
||||
uint32_t pkru_wd = (env->pkru >> pk * 2) & 2;
|
||||
uint32_t pkru_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
||||
|
||||
if (pkru_ad) {
|
||||
pkru_prot &= ~(PAGE_READ | PAGE_WRITE);
|
||||
} else if (pkru_wd && (is_user || env->cr[0] & CR0_WP_MASK)) {
|
||||
pkru_prot &= ~PAGE_WRITE;
|
||||
}
|
||||
|
||||
prot &= pkru_prot;
|
||||
if ((pkru_prot & (1 << is_write1)) == 0) {
|
||||
assert(is_write1 != 2);
|
||||
error_code |= PG_ERROR_PK_MASK;
|
||||
goto do_fault_protect;
|
||||
}
|
||||
}
|
||||
|
||||
if ((prot & (1 << is_write1)) == 0) {
|
||||
goto do_fault_protect;
|
||||
}
|
||||
|
||||
/* yes, it can! */
|
||||
is_dirty = is_write && !(pte & PG_DIRTY_MASK);
|
||||
if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
|
||||
pte |= PG_ACCESSED_MASK;
|
||||
if (is_dirty) {
|
||||
pte |= PG_DIRTY_MASK;
|
||||
}
|
||||
x86_stl_phys_notdirty(cs, pte_addr, pte);
|
||||
}
|
||||
|
||||
if (!(pte & PG_DIRTY_MASK)) {
|
||||
/* only set write access if already dirty... otherwise wait
|
||||
for dirty access */
|
||||
assert(!is_write);
|
||||
prot &= ~PAGE_WRITE;
|
||||
}
|
||||
|
||||
do_mapping:
|
||||
|
||||
pte = pte & a20_mask;
|
||||
|
||||
/* align to page_size */
|
||||
pte &= PG_ADDRESS_MASK & ~(page_size - 1);
|
||||
page_offset = addr & (page_size - 1);
|
||||
paddr = get_hphys(cs, pte + page_offset, is_write1, &prot);
|
||||
|
||||
/* Even if 4MB pages, we map only one 4KB page in the cache to
|
||||
avoid filling it too fast */
|
||||
vaddr = addr & TARGET_PAGE_MASK;
|
||||
paddr &= TARGET_PAGE_MASK;
|
||||
assert(prot & (1 << is_write1));
|
||||
|
||||
// Unicorn: indentity map guest virtual address to host virtual address
|
||||
vaddr = addr & TARGET_PAGE_MASK;
|
||||
paddr = vaddr;
|
||||
//printf(">>> map address %"PRIx64" to %"PRIx64"\n", vaddr, paddr);
|
||||
|
||||
tlb_set_page_with_attrs(cs, vaddr, paddr, cpu_get_mem_attrs(env),
|
||||
prot, mmu_idx, page_size);
|
||||
return 0;
|
||||
do_fault_rsvd:
|
||||
error_code |= PG_ERROR_RSVD_MASK;
|
||||
do_fault_protect:
|
||||
error_code |= PG_ERROR_P_MASK;
|
||||
do_fault:
|
||||
error_code |= (is_write << PG_ERROR_W_BIT);
|
||||
if (is_user)
|
||||
error_code |= PG_ERROR_U_MASK;
|
||||
if (is_write1 == 2 &&
|
||||
(((env->efer & MSR_EFER_NXE) &&
|
||||
(env->cr[4] & CR4_PAE_MASK)) ||
|
||||
(env->cr[4] & CR4_SMEP_MASK)))
|
||||
error_code |= PG_ERROR_I_D_MASK;
|
||||
if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
|
||||
/* cr2 is not modified in case of exceptions */
|
||||
x86_stq_phys(cs,
|
||||
env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
|
||||
addr);
|
||||
} else {
|
||||
env->cr[2] = addr;
|
||||
}
|
||||
env->error_code = error_code;
|
||||
cs->exception_index = EXCP0E_PAGE;
|
||||
return 1;
|
||||
}
|
||||
|
||||
bool x86_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
|
||||
MMUAccessType access_type, int mmu_idx,
|
||||
bool probe, uintptr_t retaddr)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
|
||||
env->retaddr = retaddr;
|
||||
if (handle_mmu_fault(cs, addr, size, access_type, mmu_idx)) {
|
||||
/* FIXME: On error in get_hphys we have already jumped out. */
|
||||
g_assert(!probe);
|
||||
raise_exception_err_ra(env, cs->exception_index,
|
||||
env->error_code, retaddr);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
1639
qemu/target/i386/fpu_helper.c
Normal file
1639
qemu/target/i386/fpu_helper.c
Normal file
File diff suppressed because it is too large
Load Diff
521
qemu/target/i386/helper.c
Normal file
521
qemu/target/i386/helper.c
Normal file
@@ -0,0 +1,521 @@
|
||||
/*
|
||||
* i386 helpers (without register variable usage)
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "cpu.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "sysemu/tcg.h"
|
||||
|
||||
void cpu_sync_bndcs_hflags(CPUX86State *env)
|
||||
{
|
||||
uint32_t hflags = env->hflags;
|
||||
uint32_t hflags2 = env->hflags2;
|
||||
uint32_t bndcsr;
|
||||
|
||||
if ((hflags & HF_CPL_MASK) == 3) {
|
||||
bndcsr = env->bndcs_regs.cfgu;
|
||||
} else {
|
||||
bndcsr = env->msr_bndcfgs;
|
||||
}
|
||||
|
||||
if ((env->cr[4] & CR4_OSXSAVE_MASK)
|
||||
&& (env->xcr0 & XSTATE_BNDCSR_MASK)
|
||||
&& (bndcsr & BNDCFG_ENABLE)) {
|
||||
hflags |= HF_MPX_EN_MASK;
|
||||
} else {
|
||||
hflags &= ~HF_MPX_EN_MASK;
|
||||
}
|
||||
|
||||
if (bndcsr & BNDCFG_BNDPRESERVE) {
|
||||
hflags2 |= HF2_MPX_PR_MASK;
|
||||
} else {
|
||||
hflags2 &= ~HF2_MPX_PR_MASK;
|
||||
}
|
||||
|
||||
env->hflags = hflags;
|
||||
env->hflags2 = hflags2;
|
||||
}
|
||||
|
||||
static void cpu_x86_version(CPUX86State *env, int *family, int *model)
|
||||
{
|
||||
int cpuver = env->cpuid_version;
|
||||
|
||||
if (family == NULL || model == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
*family = (cpuver >> 8) & 0x0f;
|
||||
*model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
|
||||
}
|
||||
|
||||
/* Broadcast MCA signal for processor version 06H_EH and above */
|
||||
int cpu_x86_support_mca_broadcast(CPUX86State *env)
|
||||
{
|
||||
int family = 0;
|
||||
int model = 0;
|
||||
|
||||
cpu_x86_version(env, &family, &model);
|
||||
if ((family == 6 && model >= 14) || family > 6) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/***********************************************************/
|
||||
/* x86 mmu */
|
||||
/* XXX: add PGE support */
|
||||
|
||||
void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
|
||||
{
|
||||
CPUX86State *env = &cpu->env;
|
||||
|
||||
a20_state = (a20_state != 0);
|
||||
if (a20_state != ((env->a20_mask >> 20) & 1)) {
|
||||
CPUState *cs = CPU(cpu);
|
||||
|
||||
qemu_log_mask(CPU_LOG_MMU, "A20 update: a20=%d\n", a20_state);
|
||||
/* if the cpu is currently executing code, we must unlink it and
|
||||
all the potentially executing TB */
|
||||
cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
|
||||
|
||||
/* when a20 is changed, all the MMU mappings are invalid, so
|
||||
we must flush everything */
|
||||
tlb_flush(cs);
|
||||
env->a20_mask = ~(1 << 20) | (a20_state << 20);
|
||||
}
|
||||
}
|
||||
|
||||
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
|
||||
{
|
||||
X86CPU *cpu = env_archcpu(env);
|
||||
int pe_state;
|
||||
|
||||
qemu_log_mask(CPU_LOG_MMU, "CR0 update: CR0=0x%08x\n", new_cr0);
|
||||
if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
|
||||
(env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
|
||||
tlb_flush(CPU(cpu));
|
||||
}
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
|
||||
(env->efer & MSR_EFER_LME)) {
|
||||
/* enter in long mode */
|
||||
/* XXX: generate an exception */
|
||||
if (!(env->cr[4] & CR4_PAE_MASK))
|
||||
return;
|
||||
env->efer |= MSR_EFER_LMA;
|
||||
env->hflags |= HF_LMA_MASK;
|
||||
} else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
|
||||
(env->efer & MSR_EFER_LMA)) {
|
||||
/* exit long mode */
|
||||
env->efer &= ~MSR_EFER_LMA;
|
||||
env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
|
||||
env->eip &= 0xffffffff;
|
||||
}
|
||||
#endif
|
||||
env->cr[0] = new_cr0 | CR0_ET_MASK;
|
||||
|
||||
/* update PE flag in hidden flags */
|
||||
pe_state = (env->cr[0] & CR0_PE_MASK);
|
||||
env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
|
||||
/* ensure that ADDSEG is always set in real mode */
|
||||
env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
|
||||
/* update FPU flags */
|
||||
env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
|
||||
((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
|
||||
}
|
||||
|
||||
/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
|
||||
the PDPT */
|
||||
void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
|
||||
{
|
||||
env->cr[3] = new_cr3;
|
||||
if (env->cr[0] & CR0_PG_MASK) {
|
||||
qemu_log_mask(CPU_LOG_MMU,
|
||||
"CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
|
||||
tlb_flush(env_cpu(env));
|
||||
}
|
||||
}
|
||||
|
||||
void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
|
||||
{
|
||||
uint32_t hflags;
|
||||
|
||||
#if defined(DEBUG_MMU)
|
||||
printf("CR4 update: %08x -> %08x\n", (uint32_t)env->cr[4], new_cr4);
|
||||
#endif
|
||||
if ((new_cr4 ^ env->cr[4]) &
|
||||
(CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
|
||||
CR4_SMEP_MASK | CR4_SMAP_MASK | CR4_LA57_MASK)) {
|
||||
tlb_flush(env_cpu(env));
|
||||
}
|
||||
|
||||
/* Clear bits we're going to recompute. */
|
||||
hflags = env->hflags & ~(HF_OSFXSR_MASK | HF_SMAP_MASK);
|
||||
|
||||
/* SSE handling */
|
||||
if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
|
||||
new_cr4 &= ~CR4_OSFXSR_MASK;
|
||||
}
|
||||
if (new_cr4 & CR4_OSFXSR_MASK) {
|
||||
hflags |= HF_OSFXSR_MASK;
|
||||
}
|
||||
|
||||
if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
|
||||
new_cr4 &= ~CR4_SMAP_MASK;
|
||||
}
|
||||
if (new_cr4 & CR4_SMAP_MASK) {
|
||||
hflags |= HF_SMAP_MASK;
|
||||
}
|
||||
|
||||
if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKU)) {
|
||||
new_cr4 &= ~CR4_PKE_MASK;
|
||||
}
|
||||
|
||||
env->cr[4] = new_cr4;
|
||||
env->hflags = hflags;
|
||||
|
||||
cpu_sync_bndcs_hflags(env);
|
||||
}
|
||||
|
||||
hwaddr x86_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
|
||||
MemTxAttrs *attrs)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
target_ulong pde_addr, pte_addr;
|
||||
uint64_t pte;
|
||||
int32_t a20_mask;
|
||||
uint32_t page_offset;
|
||||
int page_size;
|
||||
|
||||
*attrs = cpu_get_mem_attrs(env);
|
||||
|
||||
a20_mask = x86_get_a20_mask(env);
|
||||
if (!(env->cr[0] & CR0_PG_MASK)) {
|
||||
pte = addr & a20_mask;
|
||||
page_size = 4096;
|
||||
} else if (env->cr[4] & CR4_PAE_MASK) {
|
||||
target_ulong pdpe_addr;
|
||||
uint64_t pde, pdpe;
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
if (env->hflags & HF_LMA_MASK) {
|
||||
bool la57 = env->cr[4] & CR4_LA57_MASK;
|
||||
uint64_t pml5e_addr, pml5e;
|
||||
uint64_t pml4e_addr, pml4e;
|
||||
int32_t sext;
|
||||
|
||||
/* test virtual address sign extension */
|
||||
sext = la57 ? (int64_t)addr >> 56 : (int64_t)addr >> 47;
|
||||
if (sext != 0 && sext != -1) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (la57) {
|
||||
pml5e_addr = ((env->cr[3] & ~0xfff) +
|
||||
(((addr >> 48) & 0x1ff) << 3)) & a20_mask;
|
||||
pml5e = x86_ldq_phys(cs, pml5e_addr);
|
||||
if (!(pml5e & PG_PRESENT_MASK)) {
|
||||
return -1;
|
||||
}
|
||||
} else {
|
||||
pml5e = env->cr[3];
|
||||
}
|
||||
|
||||
pml4e_addr = ((pml5e & PG_ADDRESS_MASK) +
|
||||
(((addr >> 39) & 0x1ff) << 3)) & a20_mask;
|
||||
pml4e = x86_ldq_phys(cs, pml4e_addr);
|
||||
if (!(pml4e & PG_PRESENT_MASK)) {
|
||||
return -1;
|
||||
}
|
||||
pdpe_addr = ((pml4e & PG_ADDRESS_MASK) +
|
||||
(((addr >> 30) & 0x1ff) << 3)) & a20_mask;
|
||||
pdpe = x86_ldq_phys(cs, pdpe_addr);
|
||||
if (!(pdpe & PG_PRESENT_MASK)) {
|
||||
return -1;
|
||||
}
|
||||
if (pdpe & PG_PSE_MASK) {
|
||||
page_size = 1024 * 1024 * 1024;
|
||||
pte = pdpe;
|
||||
goto out;
|
||||
}
|
||||
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
|
||||
a20_mask;
|
||||
pdpe = x86_ldq_phys(cs, pdpe_addr);
|
||||
if (!(pdpe & PG_PRESENT_MASK))
|
||||
return -1;
|
||||
}
|
||||
|
||||
pde_addr = ((pdpe & PG_ADDRESS_MASK) +
|
||||
(((addr >> 21) & 0x1ff) << 3)) & a20_mask;
|
||||
pde = x86_ldq_phys(cs, pde_addr);
|
||||
if (!(pde & PG_PRESENT_MASK)) {
|
||||
return -1;
|
||||
}
|
||||
if (pde & PG_PSE_MASK) {
|
||||
/* 2 MB page */
|
||||
page_size = 2048 * 1024;
|
||||
pte = pde;
|
||||
} else {
|
||||
/* 4 KB page */
|
||||
pte_addr = ((pde & PG_ADDRESS_MASK) +
|
||||
(((addr >> 12) & 0x1ff) << 3)) & a20_mask;
|
||||
page_size = 4096;
|
||||
pte = x86_ldq_phys(cs, pte_addr);
|
||||
}
|
||||
if (!(pte & PG_PRESENT_MASK)) {
|
||||
return -1;
|
||||
}
|
||||
} else {
|
||||
uint32_t pde;
|
||||
|
||||
/* page directory entry */
|
||||
pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & a20_mask;
|
||||
pde = x86_ldl_phys(cs, pde_addr);
|
||||
if (!(pde & PG_PRESENT_MASK))
|
||||
return -1;
|
||||
if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
|
||||
pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
|
||||
page_size = 4096 * 1024;
|
||||
} else {
|
||||
/* page directory entry */
|
||||
pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & a20_mask;
|
||||
pte = x86_ldl_phys(cs, pte_addr);
|
||||
if (!(pte & PG_PRESENT_MASK)) {
|
||||
return -1;
|
||||
}
|
||||
page_size = 4096;
|
||||
}
|
||||
pte = pte & a20_mask;
|
||||
}
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
out:
|
||||
#endif
|
||||
pte &= PG_ADDRESS_MASK & ~(page_size - 1);
|
||||
page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
|
||||
return pte | page_offset;
|
||||
}
|
||||
|
||||
int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
|
||||
target_ulong *base, unsigned int *limit,
|
||||
unsigned int *flags)
|
||||
{
|
||||
CPUState *cs = env_cpu(env);
|
||||
SegmentCache *dt;
|
||||
target_ulong ptr;
|
||||
uint32_t e1, e2;
|
||||
int index;
|
||||
|
||||
if (selector & 0x4)
|
||||
dt = &env->ldt;
|
||||
else
|
||||
dt = &env->gdt;
|
||||
index = selector & ~7;
|
||||
ptr = dt->base + index;
|
||||
if ((index + 7) > dt->limit
|
||||
|| cpu_memory_rw_debug(cs, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
|
||||
|| cpu_memory_rw_debug(cs, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
|
||||
return 0;
|
||||
|
||||
*base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
|
||||
*limit = (e1 & 0xffff) | (e2 & 0x000f0000);
|
||||
if (e2 & DESC_G_MASK)
|
||||
*limit = (*limit << 12) | 0xfff;
|
||||
*flags = e2;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
void do_cpu_init(X86CPU *cpu)
|
||||
{
|
||||
CPUState *cs = CPU(cpu);
|
||||
CPUX86State *env = &cpu->env;
|
||||
CPUX86State *save = g_new(CPUX86State, 1);
|
||||
int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
|
||||
|
||||
*save = *env;
|
||||
|
||||
cpu_reset(cs);
|
||||
cs->interrupt_request = sipi;
|
||||
memcpy(&env->start_init_save, &save->start_init_save,
|
||||
offsetof(CPUX86State, end_init_save) -
|
||||
offsetof(CPUX86State, start_init_save));
|
||||
g_free(save);
|
||||
|
||||
// apic_init_reset(cpu->apic_state);
|
||||
}
|
||||
|
||||
void do_cpu_sipi(X86CPU *cpu)
|
||||
{
|
||||
// apic_sipi(cpu->apic_state);
|
||||
}
|
||||
|
||||
/* Frob eflags into and out of the CPU temporary format. */
|
||||
|
||||
void x86_cpu_exec_enter(CPUState *cs)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
|
||||
CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
|
||||
env->df = 1 - (2 * ((env->eflags >> 10) & 1));
|
||||
CC_OP = CC_OP_EFLAGS;
|
||||
env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
|
||||
}
|
||||
|
||||
void x86_cpu_exec_exit(CPUState *cs)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
|
||||
env->eflags = cpu_compute_eflags(env);
|
||||
}
|
||||
|
||||
uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
MemTxAttrs attrs = cpu_get_mem_attrs(env);
|
||||
AddressSpace *as = cpu_addressspace(cs, attrs);
|
||||
|
||||
#ifdef UNICORN_ARCH_POSTFIX
|
||||
return glue(address_space_ldub, UNICORN_ARCH_POSTFIX)(as->uc, as, addr, attrs, NULL);
|
||||
#else
|
||||
return address_space_ldub(as->uc, as, addr, attrs, NULL);
|
||||
#endif
|
||||
}
|
||||
|
||||
uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
MemTxAttrs attrs = cpu_get_mem_attrs(env);
|
||||
AddressSpace *as = cpu_addressspace(cs, attrs);
|
||||
|
||||
#ifdef UNICORN_ARCH_POSTFIX
|
||||
return glue(address_space_lduw, UNICORN_ARCH_POSTFIX)(as->uc, as, addr, attrs, NULL);
|
||||
#else
|
||||
return address_space_lduw(as->uc, as, addr, attrs, NULL);
|
||||
#endif
|
||||
}
|
||||
|
||||
uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
MemTxAttrs attrs = cpu_get_mem_attrs(env);
|
||||
AddressSpace *as = cpu_addressspace(cs, attrs);
|
||||
|
||||
#ifdef UNICORN_ARCH_POSTFIX
|
||||
return glue(address_space_ldl, UNICORN_ARCH_POSTFIX)(as->uc, as, addr, attrs, NULL);
|
||||
#else
|
||||
return address_space_ldl(as->uc, as, addr, attrs, NULL);
|
||||
#endif
|
||||
}
|
||||
|
||||
uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
MemTxAttrs attrs = cpu_get_mem_attrs(env);
|
||||
AddressSpace *as = cpu_addressspace(cs, attrs);
|
||||
|
||||
#ifdef UNICORN_ARCH_POSTFIX
|
||||
return glue(address_space_ldq, UNICORN_ARCH_POSTFIX)(as->uc, as, addr, attrs, NULL);
|
||||
#else
|
||||
return address_space_ldq(as->uc, as, addr, attrs, NULL);
|
||||
#endif
|
||||
}
|
||||
|
||||
void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
MemTxAttrs attrs = cpu_get_mem_attrs(env);
|
||||
AddressSpace *as = cpu_addressspace(cs, attrs);
|
||||
|
||||
#ifdef UNICORN_ARCH_POSTFIX
|
||||
glue(address_space_stb, UNICORN_ARCH_POSTFIX)(as->uc, as, addr, val, attrs, NULL);
|
||||
#else
|
||||
address_space_stb(as->uc, as, addr, val, attrs, NULL);
|
||||
#endif
|
||||
}
|
||||
|
||||
void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
MemTxAttrs attrs = cpu_get_mem_attrs(env);
|
||||
AddressSpace *as = cpu_addressspace(cs, attrs);
|
||||
|
||||
#ifdef UNICORN_ARCH_POSTFIX
|
||||
glue(address_space_stl_notdirty, UNICORN_ARCH_POSTFIX)(as->uc, as, addr, val, attrs, NULL);
|
||||
#else
|
||||
address_space_stl_notdirty(as->uc, as, addr, val, attrs, NULL);
|
||||
#endif
|
||||
}
|
||||
|
||||
void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
MemTxAttrs attrs = cpu_get_mem_attrs(env);
|
||||
AddressSpace *as = cpu_addressspace(cs, attrs);
|
||||
|
||||
#ifdef UNICORN_ARCH_POSTFIX
|
||||
glue(address_space_stw,UNICORN_ARCH_POSTFIX)(as->uc, as, addr, val, attrs, NULL);
|
||||
#else
|
||||
address_space_stw(as->uc, as, addr, val, attrs, NULL);
|
||||
#endif
|
||||
}
|
||||
|
||||
void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
MemTxAttrs attrs = cpu_get_mem_attrs(env);
|
||||
AddressSpace *as = cpu_addressspace(cs, attrs);
|
||||
|
||||
#ifdef UNICORN_ARCH_POSTFIX
|
||||
glue(address_space_stl, UNICORN_ARCH_POSTFIX)(as->uc, as, addr, val, attrs, NULL);
|
||||
#else
|
||||
address_space_stl(as->uc, as, addr, val, attrs, NULL);
|
||||
#endif
|
||||
}
|
||||
|
||||
void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
MemTxAttrs attrs = cpu_get_mem_attrs(env);
|
||||
AddressSpace *as = cpu_addressspace(cs, attrs);
|
||||
|
||||
#ifdef UNICORN_ARCH_POSTFIX
|
||||
glue(address_space_stq, UNICORN_ARCH_POSTFIX)(as->uc, as, addr, val, attrs, NULL);
|
||||
#else
|
||||
address_space_stq(as->uc, as, addr, val, attrs, NULL);
|
||||
#endif
|
||||
}
|
||||
232
qemu/target/i386/helper.h
Normal file
232
qemu/target/i386/helper.h
Normal file
@@ -0,0 +1,232 @@
|
||||
DEF_HELPER_4(uc_tracecode, void, i32, i32, ptr, i64)
|
||||
|
||||
DEF_HELPER_FLAGS_4(cc_compute_all, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl, int)
|
||||
DEF_HELPER_FLAGS_4(cc_compute_c, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl, int)
|
||||
|
||||
DEF_HELPER_3(write_eflags, void, env, tl, i32)
|
||||
DEF_HELPER_1(read_eflags, tl, env)
|
||||
DEF_HELPER_2(divb_AL, void, env, tl)
|
||||
DEF_HELPER_2(idivb_AL, void, env, tl)
|
||||
DEF_HELPER_2(divw_AX, void, env, tl)
|
||||
DEF_HELPER_2(idivw_AX, void, env, tl)
|
||||
DEF_HELPER_2(divl_EAX, void, env, tl)
|
||||
DEF_HELPER_2(idivl_EAX, void, env, tl)
|
||||
#ifdef TARGET_X86_64
|
||||
DEF_HELPER_2(divq_EAX, void, env, tl)
|
||||
DEF_HELPER_2(idivq_EAX, void, env, tl)
|
||||
#endif
|
||||
DEF_HELPER_FLAGS_2(cr4_testbit, TCG_CALL_NO_WG, void, env, i32)
|
||||
|
||||
DEF_HELPER_FLAGS_2(bndck, TCG_CALL_NO_WG, void, env, i32)
|
||||
DEF_HELPER_FLAGS_3(bndldx32, TCG_CALL_NO_WG, i64, env, tl, tl)
|
||||
DEF_HELPER_FLAGS_3(bndldx64, TCG_CALL_NO_WG, i64, env, tl, tl)
|
||||
DEF_HELPER_FLAGS_5(bndstx32, TCG_CALL_NO_WG, void, env, tl, tl, i64, i64)
|
||||
DEF_HELPER_FLAGS_5(bndstx64, TCG_CALL_NO_WG, void, env, tl, tl, i64, i64)
|
||||
DEF_HELPER_1(bnd_jmp, void, env)
|
||||
|
||||
DEF_HELPER_2(aam, void, env, int)
|
||||
DEF_HELPER_2(aad, void, env, int)
|
||||
DEF_HELPER_1(aaa, void, env)
|
||||
DEF_HELPER_1(aas, void, env)
|
||||
DEF_HELPER_1(daa, void, env)
|
||||
DEF_HELPER_1(das, void, env)
|
||||
|
||||
DEF_HELPER_2(lsl, tl, env, tl)
|
||||
DEF_HELPER_2(lar, tl, env, tl)
|
||||
DEF_HELPER_2(verr, void, env, tl)
|
||||
DEF_HELPER_2(verw, void, env, tl)
|
||||
DEF_HELPER_2(lldt, void, env, int)
|
||||
DEF_HELPER_2(ltr, void, env, int)
|
||||
DEF_HELPER_3(load_seg, void, env, int, int)
|
||||
DEF_HELPER_4(ljmp_protected, void, env, int, tl, tl)
|
||||
DEF_HELPER_5(lcall_real, void, env, int, tl, int, int)
|
||||
DEF_HELPER_5(lcall_protected, void, env, int, tl, int, tl)
|
||||
DEF_HELPER_2(iret_real, void, env, int)
|
||||
DEF_HELPER_3(iret_protected, void, env, int, int)
|
||||
DEF_HELPER_3(lret_protected, void, env, int, int)
|
||||
DEF_HELPER_2(read_crN, tl, env, int)
|
||||
DEF_HELPER_3(write_crN, void, env, int, tl)
|
||||
DEF_HELPER_2(lmsw, void, env, tl)
|
||||
DEF_HELPER_1(clts, void, env)
|
||||
DEF_HELPER_FLAGS_3(set_dr, TCG_CALL_NO_WG, void, env, int, tl)
|
||||
DEF_HELPER_FLAGS_2(get_dr, TCG_CALL_NO_WG, tl, env, int)
|
||||
DEF_HELPER_2(invlpg, void, env, tl)
|
||||
|
||||
DEF_HELPER_2(sysenter, void, env, int)
|
||||
DEF_HELPER_2(sysexit, void, env, int)
|
||||
#ifdef TARGET_X86_64
|
||||
DEF_HELPER_2(syscall, void, env, int)
|
||||
DEF_HELPER_2(sysret, void, env, int)
|
||||
#endif
|
||||
DEF_HELPER_2(hlt, void, env, int)
|
||||
DEF_HELPER_2(monitor, void, env, tl)
|
||||
DEF_HELPER_2(mwait, void, env, int)
|
||||
DEF_HELPER_2(pause, void, env, int)
|
||||
DEF_HELPER_1(debug, void, env)
|
||||
DEF_HELPER_1(reset_rf, void, env)
|
||||
DEF_HELPER_3(raise_interrupt, void, env, int, int)
|
||||
DEF_HELPER_2(raise_exception, void, env, int)
|
||||
DEF_HELPER_1(cli, void, env)
|
||||
DEF_HELPER_1(sti, void, env)
|
||||
DEF_HELPER_1(clac, void, env)
|
||||
DEF_HELPER_1(stac, void, env)
|
||||
DEF_HELPER_3(boundw, void, env, tl, int)
|
||||
DEF_HELPER_3(boundl, void, env, tl, int)
|
||||
DEF_HELPER_1(rsm, void, env)
|
||||
DEF_HELPER_2(into, void, env, int)
|
||||
DEF_HELPER_2(cmpxchg8b_unlocked, void, env, tl)
|
||||
DEF_HELPER_2(cmpxchg8b, void, env, tl)
|
||||
#ifdef TARGET_X86_64
|
||||
DEF_HELPER_2(cmpxchg16b_unlocked, void, env, tl)
|
||||
DEF_HELPER_2(cmpxchg16b, void, env, tl)
|
||||
#endif
|
||||
DEF_HELPER_1(single_step, void, env)
|
||||
DEF_HELPER_1(rechecking_single_step, void, env)
|
||||
DEF_HELPER_1(cpuid, void, env)
|
||||
DEF_HELPER_1(rdtsc, void, env)
|
||||
DEF_HELPER_1(rdtscp, void, env)
|
||||
DEF_HELPER_1(rdpmc, void, env)
|
||||
DEF_HELPER_1(rdmsr, void, env)
|
||||
DEF_HELPER_1(wrmsr, void, env)
|
||||
|
||||
DEF_HELPER_2(check_iob, void, env, i32)
|
||||
DEF_HELPER_2(check_iow, void, env, i32)
|
||||
DEF_HELPER_2(check_iol, void, env, i32)
|
||||
DEF_HELPER_3(outb, void, env, i32, i32)
|
||||
DEF_HELPER_2(inb, tl, env, i32)
|
||||
DEF_HELPER_3(outw, void, env, i32, i32)
|
||||
DEF_HELPER_2(inw, tl, env, i32)
|
||||
DEF_HELPER_3(outl, void, env, i32, i32)
|
||||
DEF_HELPER_2(inl, tl, env, i32)
|
||||
DEF_HELPER_FLAGS_4(bpt_io, TCG_CALL_NO_WG, void, env, i32, i32, tl)
|
||||
|
||||
DEF_HELPER_3(svm_check_intercept_param, void, env, i32, i64)
|
||||
DEF_HELPER_4(svm_check_io, void, env, i32, i32, i32)
|
||||
DEF_HELPER_3(vmrun, void, env, int, int)
|
||||
DEF_HELPER_1(vmmcall, void, env)
|
||||
DEF_HELPER_2(vmload, void, env, int)
|
||||
DEF_HELPER_2(vmsave, void, env, int)
|
||||
DEF_HELPER_1(stgi, void, env)
|
||||
DEF_HELPER_1(clgi, void, env)
|
||||
DEF_HELPER_1(skinit, void, env)
|
||||
DEF_HELPER_2(invlpga, void, env, int)
|
||||
|
||||
/* x86 FPU */
|
||||
|
||||
DEF_HELPER_2(flds_FT0, void, env, i32)
|
||||
DEF_HELPER_2(fldl_FT0, void, env, i64)
|
||||
DEF_HELPER_2(fildl_FT0, void, env, s32)
|
||||
DEF_HELPER_2(flds_ST0, void, env, i32)
|
||||
DEF_HELPER_2(fldl_ST0, void, env, i64)
|
||||
DEF_HELPER_2(fildl_ST0, void, env, s32)
|
||||
DEF_HELPER_2(fildll_ST0, void, env, s64)
|
||||
DEF_HELPER_1(fsts_ST0, i32, env)
|
||||
DEF_HELPER_1(fstl_ST0, i64, env)
|
||||
DEF_HELPER_1(fist_ST0, s32, env)
|
||||
DEF_HELPER_1(fistl_ST0, s32, env)
|
||||
DEF_HELPER_1(fistll_ST0, s64, env)
|
||||
DEF_HELPER_1(fistt_ST0, s32, env)
|
||||
DEF_HELPER_1(fisttl_ST0, s32, env)
|
||||
DEF_HELPER_1(fisttll_ST0, s64, env)
|
||||
DEF_HELPER_2(fldt_ST0, void, env, tl)
|
||||
DEF_HELPER_2(fstt_ST0, void, env, tl)
|
||||
DEF_HELPER_1(fpush, void, env)
|
||||
DEF_HELPER_1(fpop, void, env)
|
||||
DEF_HELPER_1(fdecstp, void, env)
|
||||
DEF_HELPER_1(fincstp, void, env)
|
||||
DEF_HELPER_2(ffree_STN, void, env, int)
|
||||
DEF_HELPER_1(fmov_ST0_FT0, void, env)
|
||||
DEF_HELPER_2(fmov_FT0_STN, void, env, int)
|
||||
DEF_HELPER_2(fmov_ST0_STN, void, env, int)
|
||||
DEF_HELPER_2(fmov_STN_ST0, void, env, int)
|
||||
DEF_HELPER_2(fxchg_ST0_STN, void, env, int)
|
||||
DEF_HELPER_1(fcom_ST0_FT0, void, env)
|
||||
DEF_HELPER_1(fucom_ST0_FT0, void, env)
|
||||
DEF_HELPER_1(fcomi_ST0_FT0, void, env)
|
||||
DEF_HELPER_1(fucomi_ST0_FT0, void, env)
|
||||
DEF_HELPER_1(fadd_ST0_FT0, void, env)
|
||||
DEF_HELPER_1(fmul_ST0_FT0, void, env)
|
||||
DEF_HELPER_1(fsub_ST0_FT0, void, env)
|
||||
DEF_HELPER_1(fsubr_ST0_FT0, void, env)
|
||||
DEF_HELPER_1(fdiv_ST0_FT0, void, env)
|
||||
DEF_HELPER_1(fdivr_ST0_FT0, void, env)
|
||||
DEF_HELPER_2(fadd_STN_ST0, void, env, int)
|
||||
DEF_HELPER_2(fmul_STN_ST0, void, env, int)
|
||||
DEF_HELPER_2(fsub_STN_ST0, void, env, int)
|
||||
DEF_HELPER_2(fsubr_STN_ST0, void, env, int)
|
||||
DEF_HELPER_2(fdiv_STN_ST0, void, env, int)
|
||||
DEF_HELPER_2(fdivr_STN_ST0, void, env, int)
|
||||
DEF_HELPER_1(fchs_ST0, void, env)
|
||||
DEF_HELPER_1(fabs_ST0, void, env)
|
||||
DEF_HELPER_1(fxam_ST0, void, env)
|
||||
DEF_HELPER_1(fld1_ST0, void, env)
|
||||
DEF_HELPER_1(fldl2t_ST0, void, env)
|
||||
DEF_HELPER_1(fldl2e_ST0, void, env)
|
||||
DEF_HELPER_1(fldpi_ST0, void, env)
|
||||
DEF_HELPER_1(fldlg2_ST0, void, env)
|
||||
DEF_HELPER_1(fldln2_ST0, void, env)
|
||||
DEF_HELPER_1(fldz_ST0, void, env)
|
||||
DEF_HELPER_1(fldz_FT0, void, env)
|
||||
DEF_HELPER_1(fnstsw, i32, env)
|
||||
DEF_HELPER_1(fnstcw, i32, env)
|
||||
DEF_HELPER_2(fldcw, void, env, i32)
|
||||
DEF_HELPER_1(fclex, void, env)
|
||||
DEF_HELPER_1(fwait, void, env)
|
||||
DEF_HELPER_1(fninit, void, env)
|
||||
DEF_HELPER_2(fbld_ST0, void, env, tl)
|
||||
DEF_HELPER_2(fbst_ST0, void, env, tl)
|
||||
DEF_HELPER_1(f2xm1, void, env)
|
||||
DEF_HELPER_1(fyl2x, void, env)
|
||||
DEF_HELPER_1(fptan, void, env)
|
||||
DEF_HELPER_1(fpatan, void, env)
|
||||
DEF_HELPER_1(fxtract, void, env)
|
||||
DEF_HELPER_1(fprem1, void, env)
|
||||
DEF_HELPER_1(fprem, void, env)
|
||||
DEF_HELPER_1(fyl2xp1, void, env)
|
||||
DEF_HELPER_1(fsqrt, void, env)
|
||||
DEF_HELPER_1(fsincos, void, env)
|
||||
DEF_HELPER_1(frndint, void, env)
|
||||
DEF_HELPER_1(fscale, void, env)
|
||||
DEF_HELPER_1(fsin, void, env)
|
||||
DEF_HELPER_1(fcos, void, env)
|
||||
DEF_HELPER_3(fstenv, void, env, tl, int)
|
||||
DEF_HELPER_3(fldenv, void, env, tl, int)
|
||||
DEF_HELPER_3(fsave, void, env, tl, int)
|
||||
DEF_HELPER_3(frstor, void, env, tl, int)
|
||||
DEF_HELPER_FLAGS_2(fxsave, TCG_CALL_NO_WG, void, env, tl)
|
||||
DEF_HELPER_FLAGS_2(fxrstor, TCG_CALL_NO_WG, void, env, tl)
|
||||
DEF_HELPER_FLAGS_3(xsave, TCG_CALL_NO_WG, void, env, tl, i64)
|
||||
DEF_HELPER_FLAGS_3(xsaveopt, TCG_CALL_NO_WG, void, env, tl, i64)
|
||||
DEF_HELPER_FLAGS_3(xrstor, TCG_CALL_NO_WG, void, env, tl, i64)
|
||||
DEF_HELPER_FLAGS_2(xgetbv, TCG_CALL_NO_WG, i64, env, i32)
|
||||
DEF_HELPER_FLAGS_3(xsetbv, TCG_CALL_NO_WG, void, env, i32, i64)
|
||||
DEF_HELPER_FLAGS_2(rdpkru, TCG_CALL_NO_WG, i64, env, i32)
|
||||
DEF_HELPER_FLAGS_3(wrpkru, TCG_CALL_NO_WG, void, env, i32, i64)
|
||||
|
||||
DEF_HELPER_FLAGS_2(pdep, TCG_CALL_NO_RWG_SE, tl, tl, tl)
|
||||
DEF_HELPER_FLAGS_2(pext, TCG_CALL_NO_RWG_SE, tl, tl, tl)
|
||||
|
||||
/* MMX/SSE */
|
||||
|
||||
DEF_HELPER_2(ldmxcsr, void, env, i32)
|
||||
DEF_HELPER_1(enter_mmx, void, env)
|
||||
DEF_HELPER_1(emms, void, env)
|
||||
DEF_HELPER_3(movq, void, env, ptr, ptr)
|
||||
|
||||
#define SHIFT 0
|
||||
#include "ops_sse_header.h"
|
||||
#define SHIFT 1
|
||||
#include "ops_sse_header.h"
|
||||
|
||||
DEF_HELPER_3(rclb, tl, env, tl, tl)
|
||||
DEF_HELPER_3(rclw, tl, env, tl, tl)
|
||||
DEF_HELPER_3(rcll, tl, env, tl, tl)
|
||||
DEF_HELPER_3(rcrb, tl, env, tl, tl)
|
||||
DEF_HELPER_3(rcrw, tl, env, tl, tl)
|
||||
DEF_HELPER_3(rcrl, tl, env, tl, tl)
|
||||
#ifdef TARGET_X86_64
|
||||
DEF_HELPER_3(rclq, tl, env, tl, tl)
|
||||
DEF_HELPER_3(rcrq, tl, env, tl, tl)
|
||||
#endif
|
||||
|
||||
DEF_HELPER_1(rdrand, tl, env)
|
||||
491
qemu/target/i386/int_helper.c
Normal file
491
qemu/target/i386/int_helper.c
Normal file
@@ -0,0 +1,491 @@
|
||||
/*
|
||||
* x86 integer helpers
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "cpu.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "qemu/host-utils.h"
|
||||
#include "exec/helper-proto.h"
|
||||
#include "qemu/guest-random.h"
|
||||
|
||||
//#define DEBUG_MULDIV
|
||||
|
||||
/* modulo 9 table */
|
||||
static const uint8_t rclb_table[32] = {
|
||||
0, 1, 2, 3, 4, 5, 6, 7,
|
||||
8, 0, 1, 2, 3, 4, 5, 6,
|
||||
7, 8, 0, 1, 2, 3, 4, 5,
|
||||
6, 7, 8, 0, 1, 2, 3, 4,
|
||||
};
|
||||
|
||||
/* modulo 17 table */
|
||||
static const uint8_t rclw_table[32] = {
|
||||
0, 1, 2, 3, 4, 5, 6, 7,
|
||||
8, 9, 10, 11, 12, 13, 14, 15,
|
||||
16, 0, 1, 2, 3, 4, 5, 6,
|
||||
7, 8, 9, 10, 11, 12, 13, 14,
|
||||
};
|
||||
|
||||
/* division, flags are undefined */
|
||||
|
||||
void helper_divb_AL(CPUX86State *env, target_ulong t0)
|
||||
{
|
||||
unsigned int num, den, q, r;
|
||||
|
||||
num = (env->regs[R_EAX] & 0xffff);
|
||||
den = (t0 & 0xff);
|
||||
if (den == 0) {
|
||||
raise_exception_ra(env, EXCP00_DIVZ, GETPC());
|
||||
}
|
||||
q = (num / den);
|
||||
if (q > 0xff) {
|
||||
raise_exception_ra(env, EXCP00_DIVZ, GETPC());
|
||||
}
|
||||
q &= 0xff;
|
||||
r = (num % den) & 0xff;
|
||||
env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | (r << 8) | q;
|
||||
}
|
||||
|
||||
void helper_idivb_AL(CPUX86State *env, target_ulong t0)
|
||||
{
|
||||
int num, den, q, r;
|
||||
|
||||
num = (int16_t)env->regs[R_EAX];
|
||||
den = (int8_t)t0;
|
||||
if (den == 0) {
|
||||
raise_exception_ra(env, EXCP00_DIVZ, GETPC());
|
||||
}
|
||||
q = (num / den);
|
||||
if (q != (int8_t)q) {
|
||||
raise_exception_ra(env, EXCP00_DIVZ, GETPC());
|
||||
}
|
||||
q &= 0xff;
|
||||
r = (num % den) & 0xff;
|
||||
env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | (r << 8) | q;
|
||||
}
|
||||
|
||||
void helper_divw_AX(CPUX86State *env, target_ulong t0)
|
||||
{
|
||||
unsigned int num, den, q, r;
|
||||
|
||||
num = (env->regs[R_EAX] & 0xffff) | ((env->regs[R_EDX] & 0xffff) << 16);
|
||||
den = (t0 & 0xffff);
|
||||
if (den == 0) {
|
||||
raise_exception_ra(env, EXCP00_DIVZ, GETPC());
|
||||
}
|
||||
q = (num / den);
|
||||
if (q > 0xffff) {
|
||||
raise_exception_ra(env, EXCP00_DIVZ, GETPC());
|
||||
}
|
||||
q &= 0xffff;
|
||||
r = (num % den) & 0xffff;
|
||||
env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | q;
|
||||
env->regs[R_EDX] = (env->regs[R_EDX] & ~0xffff) | r;
|
||||
}
|
||||
|
||||
void helper_idivw_AX(CPUX86State *env, target_ulong t0)
|
||||
{
|
||||
int num, den, q, r;
|
||||
|
||||
num = (env->regs[R_EAX] & 0xffff) | ((env->regs[R_EDX] & 0xffff) << 16);
|
||||
den = (int16_t)t0;
|
||||
if (den == 0) {
|
||||
raise_exception_ra(env, EXCP00_DIVZ, GETPC());
|
||||
}
|
||||
q = ((int64_t)num / den);
|
||||
if (q != (int16_t)q) {
|
||||
raise_exception_ra(env, EXCP00_DIVZ, GETPC());
|
||||
}
|
||||
q &= 0xffff;
|
||||
r = (num % den) & 0xffff;
|
||||
env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | q;
|
||||
env->regs[R_EDX] = (env->regs[R_EDX] & ~0xffff) | r;
|
||||
}
|
||||
|
||||
void helper_divl_EAX(CPUX86State *env, target_ulong t0)
|
||||
{
|
||||
unsigned int den, r;
|
||||
uint64_t num, q;
|
||||
|
||||
num = ((uint32_t)env->regs[R_EAX]) | ((uint64_t)((uint32_t)env->regs[R_EDX]) << 32);
|
||||
den = (unsigned int)t0;
|
||||
if (den == 0) {
|
||||
raise_exception_ra(env, EXCP00_DIVZ, GETPC());
|
||||
}
|
||||
q = (num / den);
|
||||
r = (num % den);
|
||||
if (q > 0xffffffff) {
|
||||
raise_exception_ra(env, EXCP00_DIVZ, GETPC());
|
||||
}
|
||||
env->regs[R_EAX] = (uint32_t)q;
|
||||
env->regs[R_EDX] = (uint32_t)r;
|
||||
}
|
||||
|
||||
void helper_idivl_EAX(CPUX86State *env, target_ulong t0)
|
||||
{
|
||||
int den, r;
|
||||
int64_t num, q;
|
||||
|
||||
num = ((uint32_t)env->regs[R_EAX]) | ((uint64_t)((uint32_t)env->regs[R_EDX]) << 32);
|
||||
den = (int)t0;
|
||||
if (den == 0) {
|
||||
raise_exception_ra(env, EXCP00_DIVZ, GETPC());
|
||||
}
|
||||
q = (num / den);
|
||||
r = (num % den);
|
||||
if (q != (int32_t)q) {
|
||||
raise_exception_ra(env, EXCP00_DIVZ, GETPC());
|
||||
}
|
||||
env->regs[R_EAX] = (uint32_t)q;
|
||||
env->regs[R_EDX] = (uint32_t)r;
|
||||
}
|
||||
|
||||
/* bcd */
|
||||
|
||||
/* XXX: exception */
|
||||
void helper_aam(CPUX86State *env, int base)
|
||||
{
|
||||
int al, ah;
|
||||
|
||||
al = env->regs[R_EAX] & 0xff;
|
||||
ah = al / base;
|
||||
al = al % base;
|
||||
env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | al | (ah << 8);
|
||||
CC_DST = al;
|
||||
}
|
||||
|
||||
void helper_aad(CPUX86State *env, int base)
|
||||
{
|
||||
int al, ah;
|
||||
|
||||
al = env->regs[R_EAX] & 0xff;
|
||||
ah = (env->regs[R_EAX] >> 8) & 0xff;
|
||||
al = ((ah * base) + al) & 0xff;
|
||||
env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | al;
|
||||
CC_DST = al;
|
||||
}
|
||||
|
||||
void helper_aaa(CPUX86State *env)
|
||||
{
|
||||
int icarry;
|
||||
int al, ah, af;
|
||||
int eflags;
|
||||
|
||||
eflags = cpu_cc_compute_all(env, CC_OP);
|
||||
af = eflags & CC_A;
|
||||
al = env->regs[R_EAX] & 0xff;
|
||||
ah = (env->regs[R_EAX] >> 8) & 0xff;
|
||||
|
||||
icarry = (al > 0xf9);
|
||||
if (((al & 0x0f) > 9) || af) {
|
||||
al = (al + 6) & 0x0f;
|
||||
ah = (ah + 1 + icarry) & 0xff;
|
||||
eflags |= CC_C | CC_A;
|
||||
} else {
|
||||
eflags &= ~(CC_C | CC_A);
|
||||
al &= 0x0f;
|
||||
}
|
||||
env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | al | (ah << 8);
|
||||
CC_SRC = eflags;
|
||||
}
|
||||
|
||||
void helper_aas(CPUX86State *env)
|
||||
{
|
||||
int icarry;
|
||||
int al, ah, af;
|
||||
int eflags;
|
||||
|
||||
eflags = cpu_cc_compute_all(env, CC_OP);
|
||||
af = eflags & CC_A;
|
||||
al = env->regs[R_EAX] & 0xff;
|
||||
ah = (env->regs[R_EAX] >> 8) & 0xff;
|
||||
|
||||
icarry = (al < 6);
|
||||
if (((al & 0x0f) > 9) || af) {
|
||||
al = (al - 6) & 0x0f;
|
||||
ah = (ah - 1 - icarry) & 0xff;
|
||||
eflags |= CC_C | CC_A;
|
||||
} else {
|
||||
eflags &= ~(CC_C | CC_A);
|
||||
al &= 0x0f;
|
||||
}
|
||||
env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | al | (ah << 8);
|
||||
CC_SRC = eflags;
|
||||
}
|
||||
|
||||
void helper_daa(CPUX86State *env)
|
||||
{
|
||||
int old_al, al, af, cf;
|
||||
int eflags;
|
||||
|
||||
eflags = cpu_cc_compute_all(env, CC_OP);
|
||||
cf = eflags & CC_C;
|
||||
af = eflags & CC_A;
|
||||
old_al = al = env->regs[R_EAX] & 0xff;
|
||||
|
||||
eflags = 0;
|
||||
if (((al & 0x0f) > 9) || af) {
|
||||
al = (al + 6) & 0xff;
|
||||
eflags |= CC_A;
|
||||
}
|
||||
if ((old_al > 0x99) || cf) {
|
||||
al = (al + 0x60) & 0xff;
|
||||
eflags |= CC_C;
|
||||
}
|
||||
env->regs[R_EAX] = (env->regs[R_EAX] & ~0xff) | al;
|
||||
/* well, speed is not an issue here, so we compute the flags by hand */
|
||||
eflags |= (al == 0) << 6; /* zf */
|
||||
eflags |= parity_table[al]; /* pf */
|
||||
eflags |= (al & 0x80); /* sf */
|
||||
CC_SRC = eflags;
|
||||
}
|
||||
|
||||
void helper_das(CPUX86State *env)
|
||||
{
|
||||
int al, al1, af, cf;
|
||||
int eflags;
|
||||
|
||||
eflags = cpu_cc_compute_all(env, CC_OP);
|
||||
cf = eflags & CC_C;
|
||||
af = eflags & CC_A;
|
||||
al = env->regs[R_EAX] & 0xff;
|
||||
|
||||
eflags = 0;
|
||||
al1 = al;
|
||||
if (((al & 0x0f) > 9) || af) {
|
||||
eflags |= CC_A;
|
||||
if (al < 6 || cf) {
|
||||
eflags |= CC_C;
|
||||
}
|
||||
al = (al - 6) & 0xff;
|
||||
}
|
||||
if ((al1 > 0x99) || cf) {
|
||||
al = (al - 0x60) & 0xff;
|
||||
eflags |= CC_C;
|
||||
}
|
||||
env->regs[R_EAX] = (env->regs[R_EAX] & ~0xff) | al;
|
||||
/* well, speed is not an issue here, so we compute the flags by hand */
|
||||
eflags |= (al == 0) << 6; /* zf */
|
||||
eflags |= parity_table[al]; /* pf */
|
||||
eflags |= (al & 0x80); /* sf */
|
||||
CC_SRC = eflags;
|
||||
}
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
|
||||
{
|
||||
*plow += a;
|
||||
/* carry test */
|
||||
if (*plow < a) {
|
||||
(*phigh)++;
|
||||
}
|
||||
*phigh += b;
|
||||
}
|
||||
|
||||
static void neg128(uint64_t *plow, uint64_t *phigh)
|
||||
{
|
||||
*plow = ~*plow;
|
||||
*phigh = ~*phigh;
|
||||
add128(plow, phigh, 1, 0);
|
||||
}
|
||||
|
||||
/* return TRUE if overflow */
|
||||
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
|
||||
{
|
||||
uint64_t q, r, a1, a0;
|
||||
int i, qb, ab;
|
||||
|
||||
a0 = *plow;
|
||||
a1 = *phigh;
|
||||
if (a1 == 0) {
|
||||
q = a0 / b;
|
||||
r = a0 % b;
|
||||
*plow = q;
|
||||
*phigh = r;
|
||||
} else {
|
||||
if (a1 >= b) {
|
||||
return 1;
|
||||
}
|
||||
/* XXX: use a better algorithm */
|
||||
for (i = 0; i < 64; i++) {
|
||||
ab = a1 >> 63;
|
||||
a1 = (a1 << 1) | (a0 >> 63);
|
||||
if (ab || a1 >= b) {
|
||||
a1 -= b;
|
||||
qb = 1;
|
||||
} else {
|
||||
qb = 0;
|
||||
}
|
||||
a0 = (a0 << 1) | qb;
|
||||
}
|
||||
#if defined(DEBUG_MULDIV)
|
||||
printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64
|
||||
": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
|
||||
*phigh, *plow, b, a0, a1);
|
||||
#endif
|
||||
*plow = a0;
|
||||
*phigh = a1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* return TRUE if overflow */
|
||||
static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
|
||||
{
|
||||
int sa, sb;
|
||||
|
||||
sa = ((int64_t)*phigh < 0);
|
||||
if (sa) {
|
||||
neg128(plow, phigh);
|
||||
}
|
||||
sb = (b < 0);
|
||||
if (sb && (b != 0x8000000000000000LL)) {
|
||||
b = -b;
|
||||
}
|
||||
if (div64(plow, phigh, b) != 0) {
|
||||
return 1;
|
||||
}
|
||||
if (sa ^ sb) {
|
||||
if (*plow > (1ULL << 63)) {
|
||||
return 1;
|
||||
}
|
||||
*plow = 0-*plow;
|
||||
} else {
|
||||
if (*plow >= (1ULL << 63)) {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
if (sa) {
|
||||
*phigh = 0-*phigh;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void helper_divq_EAX(CPUX86State *env, target_ulong t0)
|
||||
{
|
||||
uint64_t r0, r1;
|
||||
|
||||
if (t0 == 0) {
|
||||
raise_exception_ra(env, EXCP00_DIVZ, GETPC());
|
||||
}
|
||||
r0 = env->regs[R_EAX];
|
||||
r1 = env->regs[R_EDX];
|
||||
if (div64(&r0, &r1, t0)) {
|
||||
raise_exception_ra(env, EXCP00_DIVZ, GETPC());
|
||||
}
|
||||
env->regs[R_EAX] = r0;
|
||||
env->regs[R_EDX] = r1;
|
||||
}
|
||||
|
||||
void helper_idivq_EAX(CPUX86State *env, target_ulong t0)
|
||||
{
|
||||
uint64_t r0, r1;
|
||||
|
||||
if (t0 == 0) {
|
||||
raise_exception_ra(env, EXCP00_DIVZ, GETPC());
|
||||
}
|
||||
r0 = env->regs[R_EAX];
|
||||
r1 = env->regs[R_EDX];
|
||||
if (idiv64(&r0, &r1, t0)) {
|
||||
raise_exception_ra(env, EXCP00_DIVZ, GETPC());
|
||||
}
|
||||
env->regs[R_EAX] = r0;
|
||||
env->regs[R_EDX] = r1;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if TARGET_LONG_BITS == 32
|
||||
# define ctztl ctz32
|
||||
# define clztl clz32
|
||||
#else
|
||||
# define ctztl ctz64
|
||||
# define clztl clz64
|
||||
#endif
|
||||
|
||||
target_ulong helper_pdep(target_ulong src, target_ulong mask)
|
||||
{
|
||||
target_ulong dest = 0;
|
||||
int i, o;
|
||||
|
||||
for (i = 0; mask != 0; i++) {
|
||||
o = ctztl(mask);
|
||||
mask &= mask - 1;
|
||||
dest |= ((src >> i) & 1) << o;
|
||||
}
|
||||
return dest;
|
||||
}
|
||||
|
||||
target_ulong helper_pext(target_ulong src, target_ulong mask)
|
||||
{
|
||||
target_ulong dest = 0;
|
||||
int i, o;
|
||||
|
||||
for (o = 0; mask != 0; o++) {
|
||||
i = ctztl(mask);
|
||||
mask &= mask - 1;
|
||||
dest |= ((src >> i) & 1) << o;
|
||||
}
|
||||
return dest;
|
||||
}
|
||||
|
||||
#define SHIFT 0
|
||||
#include "shift_helper_template.h"
|
||||
#undef SHIFT
|
||||
|
||||
#define SHIFT 1
|
||||
#include "shift_helper_template.h"
|
||||
#undef SHIFT
|
||||
|
||||
#define SHIFT 2
|
||||
#include "shift_helper_template.h"
|
||||
#undef SHIFT
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
#define SHIFT 3
|
||||
#include "shift_helper_template.h"
|
||||
#undef SHIFT
|
||||
#endif
|
||||
|
||||
/* Test that BIT is enabled in CR4. If not, raise an illegal opcode
|
||||
exception. This reduces the requirements for rare CR4 bits being
|
||||
mapped into HFLAGS. */
|
||||
void helper_cr4_testbit(CPUX86State *env, uint32_t bit)
|
||||
{
|
||||
if (unlikely((env->cr[4] & bit) == 0)) {
|
||||
raise_exception_ra(env, EXCP06_ILLOP, GETPC());
|
||||
}
|
||||
}
|
||||
|
||||
target_ulong HELPER(rdrand)(CPUX86State *env)
|
||||
{
|
||||
target_ulong ret;
|
||||
|
||||
if (qemu_guest_getrandom(&ret, sizeof(ret)) < 0) {
|
||||
// qemu_log_mask(LOG_UNIMP, "rdrand: Crypto failure: %s",
|
||||
// error_get_pretty(err));
|
||||
// error_free(err);
|
||||
/* Failure clears CF and all other flags, and returns 0. */
|
||||
env->cc_src = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Success sets CF and clears all others. */
|
||||
env->cc_src = CC_C;
|
||||
return ret;
|
||||
}
|
||||
23
qemu/target/i386/machine.c
Normal file
23
qemu/target/i386/machine.c
Normal file
@@ -0,0 +1,23 @@
|
||||
#include "qemu/osdep.h"
|
||||
#include "cpu.h"
|
||||
#include "exec/exec-all.h"
|
||||
|
||||
#include "sysemu/tcg.h"
|
||||
|
||||
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f)
|
||||
{
|
||||
CPU_LDoubleU temp;
|
||||
|
||||
temp.d = f;
|
||||
*pmant = temp.l.lower;
|
||||
*pexp = temp.l.upper;
|
||||
}
|
||||
|
||||
floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper)
|
||||
{
|
||||
CPU_LDoubleU temp;
|
||||
|
||||
temp.l.upper = upper;
|
||||
temp.l.lower = mant;
|
||||
return temp.d;
|
||||
}
|
||||
184
qemu/target/i386/mem_helper.c
Normal file
184
qemu/target/i386/mem_helper.c
Normal file
@@ -0,0 +1,184 @@
|
||||
/*
|
||||
* x86 memory access helpers
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "cpu.h"
|
||||
#include "exec/helper-proto.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "exec/cpu_ldst.h"
|
||||
#include "qemu/int128.h"
|
||||
#include "qemu/atomic128.h"
|
||||
#include "tcg/tcg.h"
|
||||
|
||||
|
||||
void helper_cmpxchg8b_unlocked(CPUX86State *env, target_ulong a0)
|
||||
{
|
||||
uintptr_t ra = GETPC();
|
||||
uint64_t oldv, cmpv, newv;
|
||||
int eflags;
|
||||
|
||||
eflags = cpu_cc_compute_all(env, CC_OP);
|
||||
|
||||
cmpv = deposit64(env->regs[R_EAX], 32, 32, env->regs[R_EDX]);
|
||||
newv = deposit64(env->regs[R_EBX], 32, 32, env->regs[R_ECX]);
|
||||
|
||||
oldv = cpu_ldq_data_ra(env, a0, ra);
|
||||
newv = (cmpv == oldv ? newv : oldv);
|
||||
/* always do the store */
|
||||
cpu_stq_data_ra(env, a0, newv, ra);
|
||||
|
||||
if (oldv == cmpv) {
|
||||
eflags |= CC_Z;
|
||||
} else {
|
||||
env->regs[R_EAX] = (uint32_t)oldv;
|
||||
env->regs[R_EDX] = (uint32_t)(oldv >> 32);
|
||||
eflags &= ~CC_Z;
|
||||
}
|
||||
CC_SRC = eflags;
|
||||
}
|
||||
|
||||
void helper_cmpxchg8b(CPUX86State *env, target_ulong a0)
|
||||
{
|
||||
#ifdef CONFIG_ATOMIC64
|
||||
uint64_t oldv, cmpv, newv;
|
||||
int eflags;
|
||||
|
||||
eflags = cpu_cc_compute_all(env, CC_OP);
|
||||
|
||||
cmpv = deposit64(env->regs[R_EAX], 32, 32, env->regs[R_EDX]);
|
||||
newv = deposit64(env->regs[R_EBX], 32, 32, env->regs[R_ECX]);
|
||||
|
||||
{
|
||||
uintptr_t ra = GETPC();
|
||||
int mem_idx = cpu_mmu_index(env, false);
|
||||
TCGMemOpIdx oi = make_memop_idx(MO_TEQ, mem_idx);
|
||||
oldv = helper_atomic_cmpxchgq_le_mmu(env, a0, cmpv, newv, oi, ra);
|
||||
}
|
||||
|
||||
if (oldv == cmpv) {
|
||||
eflags |= CC_Z;
|
||||
} else {
|
||||
env->regs[R_EAX] = (uint32_t)oldv;
|
||||
env->regs[R_EDX] = (uint32_t)(oldv >> 32);
|
||||
eflags &= ~CC_Z;
|
||||
}
|
||||
CC_SRC = eflags;
|
||||
#else
|
||||
cpu_loop_exit_atomic(env_cpu(env), GETPC());
|
||||
#endif /* CONFIG_ATOMIC64 */
|
||||
}
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
void helper_cmpxchg16b_unlocked(CPUX86State *env, target_ulong a0)
|
||||
{
|
||||
uintptr_t ra = GETPC();
|
||||
Int128 oldv, cmpv, newv;
|
||||
uint64_t o0, o1;
|
||||
int eflags;
|
||||
bool success;
|
||||
|
||||
if ((a0 & 0xf) != 0) {
|
||||
raise_exception_ra(env, EXCP0D_GPF, GETPC());
|
||||
}
|
||||
eflags = cpu_cc_compute_all(env, CC_OP);
|
||||
|
||||
cmpv = int128_make128(env->regs[R_EAX], env->regs[R_EDX]);
|
||||
newv = int128_make128(env->regs[R_EBX], env->regs[R_ECX]);
|
||||
|
||||
o0 = cpu_ldq_data_ra(env, a0 + 0, ra);
|
||||
o1 = cpu_ldq_data_ra(env, a0 + 8, ra);
|
||||
|
||||
oldv = int128_make128(o0, o1);
|
||||
success = int128_eq(oldv, cmpv);
|
||||
if (!success) {
|
||||
newv = oldv;
|
||||
}
|
||||
|
||||
cpu_stq_data_ra(env, a0 + 0, int128_getlo(newv), ra);
|
||||
cpu_stq_data_ra(env, a0 + 8, int128_gethi(newv), ra);
|
||||
|
||||
if (success) {
|
||||
eflags |= CC_Z;
|
||||
} else {
|
||||
env->regs[R_EAX] = int128_getlo(oldv);
|
||||
env->regs[R_EDX] = int128_gethi(oldv);
|
||||
eflags &= ~CC_Z;
|
||||
}
|
||||
CC_SRC = eflags;
|
||||
}
|
||||
|
||||
void helper_cmpxchg16b(CPUX86State *env, target_ulong a0)
|
||||
{
|
||||
uintptr_t ra = GETPC();
|
||||
|
||||
if ((a0 & 0xf) != 0) {
|
||||
raise_exception_ra(env, EXCP0D_GPF, ra);
|
||||
} else if (HAVE_CMPXCHG128) {
|
||||
int eflags = cpu_cc_compute_all(env, CC_OP);
|
||||
|
||||
Int128 cmpv = int128_make128(env->regs[R_EAX], env->regs[R_EDX]);
|
||||
Int128 newv = int128_make128(env->regs[R_EBX], env->regs[R_ECX]);
|
||||
|
||||
int mem_idx = cpu_mmu_index(env, false);
|
||||
TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
|
||||
Int128 oldv = helper_atomic_cmpxchgo_le_mmu(env, a0, cmpv,
|
||||
newv, oi, ra);
|
||||
|
||||
if (int128_eq(oldv, cmpv)) {
|
||||
eflags |= CC_Z;
|
||||
} else {
|
||||
env->regs[R_EAX] = int128_getlo(oldv);
|
||||
env->regs[R_EDX] = int128_gethi(oldv);
|
||||
eflags &= ~CC_Z;
|
||||
}
|
||||
CC_SRC = eflags;
|
||||
} else {
|
||||
cpu_loop_exit_atomic(env_cpu(env), ra);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void helper_boundw(CPUX86State *env, target_ulong a0, int v)
|
||||
{
|
||||
int low, high;
|
||||
|
||||
low = cpu_ldsw_data_ra(env, a0, GETPC());
|
||||
high = cpu_ldsw_data_ra(env, a0 + 2, GETPC());
|
||||
v = (int16_t)v;
|
||||
if (v < low || v > high) {
|
||||
if (env->hflags & HF_MPX_EN_MASK) {
|
||||
env->bndcs_regs.sts = 0;
|
||||
}
|
||||
raise_exception_ra(env, EXCP05_BOUND, GETPC());
|
||||
}
|
||||
}
|
||||
|
||||
void helper_boundl(CPUX86State *env, target_ulong a0, int v)
|
||||
{
|
||||
int low, high;
|
||||
|
||||
low = cpu_ldl_data_ra(env, a0, GETPC());
|
||||
high = cpu_ldl_data_ra(env, a0 + 4, GETPC());
|
||||
if (v < low || v > high) {
|
||||
if (env->hflags & HF_MPX_EN_MASK) {
|
||||
env->bndcs_regs.sts = 0;
|
||||
}
|
||||
raise_exception_ra(env, EXCP05_BOUND, GETPC());
|
||||
}
|
||||
}
|
||||
633
qemu/target/i386/misc_helper.c
Normal file
633
qemu/target/i386/misc_helper.c
Normal file
@@ -0,0 +1,633 @@
|
||||
/*
|
||||
* x86 misc helpers
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "cpu.h"
|
||||
#include "exec/helper-proto.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "exec/cpu_ldst.h"
|
||||
#include "exec/ioport.h"
|
||||
|
||||
#include "uc_priv.h"
|
||||
|
||||
void helper_outb(CPUX86State *env, uint32_t port, uint32_t data)
|
||||
{
|
||||
// #ifdef UNICORN_ARCH_POSTFIX
|
||||
// glue(address_space_stb, UNICORN_ARCH_POSTFIX)(env->uc, &env->uc->address_space_io, port, data & 0xff,
|
||||
// #else
|
||||
// address_space_stb(env->uc, &env->uc->address_space_io, port, data & 0xff,
|
||||
// #endif
|
||||
// cpu_get_mem_attrs(env), NULL);
|
||||
return cpu_outb(env->uc, port, data);
|
||||
}
|
||||
|
||||
target_ulong helper_inb(CPUX86State *env, uint32_t port)
|
||||
{
|
||||
// #ifdef UNICORN_ARCH_POSTFIX
|
||||
// return glue(address_space_ldub, UNICORN_ARCH_POSTFIX)(env->uc, &env->uc->address_space_io, port,
|
||||
// #else
|
||||
// return address_space_ldub(env->uc, &env->uc->address_space_io, port,
|
||||
// #endif
|
||||
// cpu_get_mem_attrs(env), NULL);
|
||||
return cpu_inb(env->uc, port);
|
||||
}
|
||||
|
||||
void helper_outw(CPUX86State *env, uint32_t port, uint32_t data)
|
||||
{
|
||||
// #ifdef UNICORN_ARCH_POSTFIX
|
||||
// glue(address_space_stw, UNICORN_ARCH_POSTFIX)(env->uc, &env->uc->address_space_io, port, data & 0xffff,
|
||||
// #else
|
||||
// address_space_stw(env->uc, &env->uc->address_space_io, port, data & 0xffff,
|
||||
// #endif
|
||||
// cpu_get_mem_attrs(env), NULL);
|
||||
return cpu_outw(env->uc, port, data);
|
||||
}
|
||||
|
||||
target_ulong helper_inw(CPUX86State *env, uint32_t port)
|
||||
{
|
||||
// #ifdef UNICORN_ARCH_POSTFIX
|
||||
// return glue(address_space_lduw, UNICORN_ARCH_POSTFIX)(env->uc, &env->uc->address_space_io, port,
|
||||
// #else
|
||||
// return address_space_lduw(env->uc, &env->uc->address_space_io, port,
|
||||
// #endif
|
||||
// cpu_get_mem_attrs(env), NULL);
|
||||
return cpu_inw(env->uc, port);
|
||||
}
|
||||
|
||||
void helper_outl(CPUX86State *env, uint32_t port, uint32_t data)
|
||||
{
|
||||
// #ifdef UNICORN_ARCH_POSTFIX
|
||||
// glue(address_space_stl, UNICORN_ARCH_POSTFIX)(env->uc, &env->uc->address_space_io, port, data,
|
||||
// #else
|
||||
// address_space_stl(env->uc, &env->uc->address_space_io, port, data,
|
||||
// #endif
|
||||
// cpu_get_mem_attrs(env), NULL);
|
||||
return cpu_outl(env->uc, port, data);
|
||||
}
|
||||
|
||||
target_ulong helper_inl(CPUX86State *env, uint32_t port)
|
||||
{
|
||||
// #ifdef UNICORN_ARCH_POSTFIX
|
||||
// return glue(address_space_ldl, UNICORN_ARCH_POSTFIX)(env->uc, &env->uc->address_space_io, port,
|
||||
// #else
|
||||
// return address_space_ldl(env->uc, &env->uc->address_space_io, port,
|
||||
// #endif
|
||||
// cpu_get_mem_attrs(env), NULL);
|
||||
return cpu_inl(env->uc, port);
|
||||
}
|
||||
|
||||
void helper_into(CPUX86State *env, int next_eip_addend)
|
||||
{
|
||||
int eflags;
|
||||
|
||||
eflags = cpu_cc_compute_all(env, CC_OP);
|
||||
if (eflags & CC_O) {
|
||||
raise_interrupt(env, EXCP04_INTO, 1, 0, next_eip_addend);
|
||||
}
|
||||
}
|
||||
|
||||
void helper_cpuid(CPUX86State *env)
|
||||
{
|
||||
uint32_t eax, ebx, ecx, edx;
|
||||
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_CPUID, 0, GETPC());
|
||||
|
||||
cpu_x86_cpuid(env, (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
|
||||
&eax, &ebx, &ecx, &edx);
|
||||
env->regs[R_EAX] = eax;
|
||||
env->regs[R_EBX] = ebx;
|
||||
env->regs[R_ECX] = ecx;
|
||||
env->regs[R_EDX] = edx;
|
||||
}
|
||||
|
||||
target_ulong helper_read_crN(CPUX86State *env, int reg)
|
||||
{
|
||||
target_ulong val;
|
||||
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_READ_CR0 + reg, 0, GETPC());
|
||||
switch (reg) {
|
||||
default:
|
||||
val = env->cr[reg];
|
||||
break;
|
||||
case 8:
|
||||
if (!(env->hflags2 & HF2_VINTR_MASK)) {
|
||||
// val = cpu_get_apic_tpr(env_archcpu(env)->apic_state);
|
||||
val = 0;
|
||||
} else {
|
||||
val = env->v_tpr;
|
||||
}
|
||||
break;
|
||||
}
|
||||
return val;
|
||||
}
|
||||
|
||||
void helper_write_crN(CPUX86State *env, int reg, target_ulong t0)
|
||||
{
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_WRITE_CR0 + reg, 0, GETPC());
|
||||
switch (reg) {
|
||||
case 0:
|
||||
cpu_x86_update_cr0(env, (uint32_t)t0);
|
||||
break;
|
||||
case 3:
|
||||
cpu_x86_update_cr3(env, t0);
|
||||
break;
|
||||
case 4:
|
||||
cpu_x86_update_cr4(env, (uint32_t)t0);
|
||||
break;
|
||||
case 8:
|
||||
#if 0
|
||||
if (!(env->hflags2 & HF2_VINTR_MASK)) {
|
||||
cpu_set_apic_tpr(env_archcpu(env)->apic_state, t0);
|
||||
}
|
||||
#endif
|
||||
env->v_tpr = t0 & 0x0f;
|
||||
break;
|
||||
default:
|
||||
env->cr[reg] = t0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void helper_lmsw(CPUX86State *env, target_ulong t0)
|
||||
{
|
||||
/* only 4 lower bits of CR0 are modified. PE cannot be set to zero
|
||||
if already set to one. */
|
||||
t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
|
||||
helper_write_crN(env, 0, t0);
|
||||
}
|
||||
|
||||
void helper_invlpg(CPUX86State *env, target_ulong addr)
|
||||
{
|
||||
X86CPU *cpu = env_archcpu(env);
|
||||
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPG, 0, GETPC());
|
||||
tlb_flush_page(CPU(cpu), addr);
|
||||
}
|
||||
|
||||
void helper_rdtsc(CPUX86State *env)
|
||||
{
|
||||
uint64_t val;
|
||||
|
||||
if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
|
||||
raise_exception_ra(env, EXCP0D_GPF, GETPC());
|
||||
}
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_RDTSC, 0, GETPC());
|
||||
|
||||
val = cpu_get_tsc(env) + env->tsc_offset;
|
||||
env->regs[R_EAX] = (uint32_t)(val);
|
||||
env->regs[R_EDX] = (uint32_t)(val >> 32);
|
||||
}
|
||||
|
||||
void helper_rdtscp(CPUX86State *env)
|
||||
{
|
||||
helper_rdtsc(env);
|
||||
env->regs[R_ECX] = (uint32_t)(env->tsc_aux);
|
||||
}
|
||||
|
||||
void helper_rdpmc(CPUX86State *env)
|
||||
{
|
||||
if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
|
||||
raise_exception_ra(env, EXCP0D_GPF, GETPC());
|
||||
}
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_RDPMC, 0, GETPC());
|
||||
|
||||
/* currently unimplemented */
|
||||
qemu_log_mask(LOG_UNIMP, "x86: unimplemented rdpmc\n");
|
||||
raise_exception_err(env, EXCP06_ILLOP, 0);
|
||||
}
|
||||
|
||||
void helper_wrmsr(CPUX86State *env)
|
||||
{
|
||||
uint64_t val;
|
||||
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 1, GETPC());
|
||||
|
||||
val = ((uint32_t)env->regs[R_EAX]) |
|
||||
((uint64_t)((uint32_t)env->regs[R_EDX]) << 32);
|
||||
|
||||
switch ((uint32_t)env->regs[R_ECX]) {
|
||||
case MSR_IA32_SYSENTER_CS:
|
||||
env->sysenter_cs = val & 0xffff;
|
||||
break;
|
||||
case MSR_IA32_SYSENTER_ESP:
|
||||
env->sysenter_esp = val;
|
||||
break;
|
||||
case MSR_IA32_SYSENTER_EIP:
|
||||
env->sysenter_eip = val;
|
||||
break;
|
||||
case MSR_IA32_APICBASE:
|
||||
// cpu_set_apic_base(env_archcpu(env)->apic_state, val);
|
||||
break;
|
||||
case MSR_EFER:
|
||||
{
|
||||
uint64_t update_mask;
|
||||
|
||||
update_mask = 0;
|
||||
if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_SYSCALL) {
|
||||
update_mask |= MSR_EFER_SCE;
|
||||
}
|
||||
if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
|
||||
update_mask |= MSR_EFER_LME;
|
||||
}
|
||||
if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) {
|
||||
update_mask |= MSR_EFER_FFXSR;
|
||||
}
|
||||
if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_NX) {
|
||||
update_mask |= MSR_EFER_NXE;
|
||||
}
|
||||
if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
|
||||
update_mask |= MSR_EFER_SVME;
|
||||
}
|
||||
if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) {
|
||||
update_mask |= MSR_EFER_FFXSR;
|
||||
}
|
||||
cpu_load_efer(env, (env->efer & ~update_mask) |
|
||||
(val & update_mask));
|
||||
}
|
||||
break;
|
||||
case MSR_STAR:
|
||||
env->star = val;
|
||||
break;
|
||||
case MSR_PAT:
|
||||
env->pat = val;
|
||||
break;
|
||||
case MSR_VM_HSAVE_PA:
|
||||
env->vm_hsave = val;
|
||||
break;
|
||||
#ifdef TARGET_X86_64
|
||||
case MSR_LSTAR:
|
||||
env->lstar = val;
|
||||
break;
|
||||
case MSR_CSTAR:
|
||||
env->cstar = val;
|
||||
break;
|
||||
case MSR_FMASK:
|
||||
env->fmask = val;
|
||||
break;
|
||||
case MSR_FSBASE:
|
||||
env->segs[R_FS].base = val;
|
||||
break;
|
||||
case MSR_GSBASE:
|
||||
env->segs[R_GS].base = val;
|
||||
break;
|
||||
case MSR_KERNELGSBASE:
|
||||
env->kernelgsbase = val;
|
||||
break;
|
||||
#endif
|
||||
case MSR_MTRRphysBase(0):
|
||||
case MSR_MTRRphysBase(1):
|
||||
case MSR_MTRRphysBase(2):
|
||||
case MSR_MTRRphysBase(3):
|
||||
case MSR_MTRRphysBase(4):
|
||||
case MSR_MTRRphysBase(5):
|
||||
case MSR_MTRRphysBase(6):
|
||||
case MSR_MTRRphysBase(7):
|
||||
env->mtrr_var[((uint32_t)env->regs[R_ECX] -
|
||||
MSR_MTRRphysBase(0)) / 2].base = val;
|
||||
break;
|
||||
case MSR_MTRRphysMask(0):
|
||||
case MSR_MTRRphysMask(1):
|
||||
case MSR_MTRRphysMask(2):
|
||||
case MSR_MTRRphysMask(3):
|
||||
case MSR_MTRRphysMask(4):
|
||||
case MSR_MTRRphysMask(5):
|
||||
case MSR_MTRRphysMask(6):
|
||||
case MSR_MTRRphysMask(7):
|
||||
env->mtrr_var[((uint32_t)env->regs[R_ECX] -
|
||||
MSR_MTRRphysMask(0)) / 2].mask = val;
|
||||
break;
|
||||
case MSR_MTRRfix64K_00000:
|
||||
env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
|
||||
MSR_MTRRfix64K_00000] = val;
|
||||
break;
|
||||
case MSR_MTRRfix16K_80000:
|
||||
case MSR_MTRRfix16K_A0000:
|
||||
env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
|
||||
MSR_MTRRfix16K_80000 + 1] = val;
|
||||
break;
|
||||
case MSR_MTRRfix4K_C0000:
|
||||
case MSR_MTRRfix4K_C8000:
|
||||
case MSR_MTRRfix4K_D0000:
|
||||
case MSR_MTRRfix4K_D8000:
|
||||
case MSR_MTRRfix4K_E0000:
|
||||
case MSR_MTRRfix4K_E8000:
|
||||
case MSR_MTRRfix4K_F0000:
|
||||
case MSR_MTRRfix4K_F8000:
|
||||
env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
|
||||
MSR_MTRRfix4K_C0000 + 3] = val;
|
||||
break;
|
||||
case MSR_MTRRdefType:
|
||||
env->mtrr_deftype = val;
|
||||
break;
|
||||
case MSR_MCG_STATUS:
|
||||
env->mcg_status = val;
|
||||
break;
|
||||
case MSR_MCG_CTL:
|
||||
if ((env->mcg_cap & MCG_CTL_P)
|
||||
&& (val == 0 || val == ~(uint64_t)0)) {
|
||||
env->mcg_ctl = val;
|
||||
}
|
||||
break;
|
||||
case MSR_TSC_AUX:
|
||||
env->tsc_aux = val;
|
||||
break;
|
||||
case MSR_IA32_MISC_ENABLE:
|
||||
env->msr_ia32_misc_enable = val;
|
||||
break;
|
||||
case MSR_IA32_BNDCFGS:
|
||||
/* FIXME: #GP if reserved bits are set. */
|
||||
/* FIXME: Extend highest implemented bit of linear address. */
|
||||
env->msr_bndcfgs = val;
|
||||
cpu_sync_bndcs_hflags(env);
|
||||
break;
|
||||
default:
|
||||
if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL
|
||||
&& (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL +
|
||||
(4 * env->mcg_cap & 0xff)) {
|
||||
uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL;
|
||||
if ((offset & 0x3) != 0
|
||||
|| (val == 0 || val == ~(uint64_t)0)) {
|
||||
env->mce_banks[offset] = val;
|
||||
}
|
||||
break;
|
||||
}
|
||||
/* XXX: exception? */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void helper_rdmsr(CPUX86State *env)
|
||||
{
|
||||
X86CPU *x86_cpu = env_archcpu(env);
|
||||
uint64_t val;
|
||||
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 0, GETPC());
|
||||
|
||||
switch ((uint32_t)env->regs[R_ECX]) {
|
||||
case MSR_IA32_SYSENTER_CS:
|
||||
val = env->sysenter_cs;
|
||||
break;
|
||||
case MSR_IA32_SYSENTER_ESP:
|
||||
val = env->sysenter_esp;
|
||||
break;
|
||||
case MSR_IA32_SYSENTER_EIP:
|
||||
val = env->sysenter_eip;
|
||||
break;
|
||||
case MSR_IA32_APICBASE:
|
||||
val = 0; // cpu_get_apic_base(env_archcpu(env)->apic_state);
|
||||
break;
|
||||
case MSR_EFER:
|
||||
val = env->efer;
|
||||
break;
|
||||
case MSR_STAR:
|
||||
val = env->star;
|
||||
break;
|
||||
case MSR_PAT:
|
||||
val = env->pat;
|
||||
break;
|
||||
case MSR_VM_HSAVE_PA:
|
||||
val = env->vm_hsave;
|
||||
break;
|
||||
case MSR_IA32_PERF_STATUS:
|
||||
/* tsc_increment_by_tick */
|
||||
val = 1000ULL;
|
||||
/* CPU multiplier */
|
||||
val |= (((uint64_t)4ULL) << 40);
|
||||
break;
|
||||
#ifdef TARGET_X86_64
|
||||
case MSR_LSTAR:
|
||||
val = env->lstar;
|
||||
break;
|
||||
case MSR_CSTAR:
|
||||
val = env->cstar;
|
||||
break;
|
||||
case MSR_FMASK:
|
||||
val = env->fmask;
|
||||
break;
|
||||
case MSR_FSBASE:
|
||||
val = env->segs[R_FS].base;
|
||||
break;
|
||||
case MSR_GSBASE:
|
||||
val = env->segs[R_GS].base;
|
||||
break;
|
||||
case MSR_KERNELGSBASE:
|
||||
val = env->kernelgsbase;
|
||||
break;
|
||||
case MSR_TSC_AUX:
|
||||
val = env->tsc_aux;
|
||||
break;
|
||||
#endif
|
||||
case MSR_SMI_COUNT:
|
||||
val = env->msr_smi_count;
|
||||
break;
|
||||
case MSR_MTRRphysBase(0):
|
||||
case MSR_MTRRphysBase(1):
|
||||
case MSR_MTRRphysBase(2):
|
||||
case MSR_MTRRphysBase(3):
|
||||
case MSR_MTRRphysBase(4):
|
||||
case MSR_MTRRphysBase(5):
|
||||
case MSR_MTRRphysBase(6):
|
||||
case MSR_MTRRphysBase(7):
|
||||
val = env->mtrr_var[((uint32_t)env->regs[R_ECX] -
|
||||
MSR_MTRRphysBase(0)) / 2].base;
|
||||
break;
|
||||
case MSR_MTRRphysMask(0):
|
||||
case MSR_MTRRphysMask(1):
|
||||
case MSR_MTRRphysMask(2):
|
||||
case MSR_MTRRphysMask(3):
|
||||
case MSR_MTRRphysMask(4):
|
||||
case MSR_MTRRphysMask(5):
|
||||
case MSR_MTRRphysMask(6):
|
||||
case MSR_MTRRphysMask(7):
|
||||
val = env->mtrr_var[((uint32_t)env->regs[R_ECX] -
|
||||
MSR_MTRRphysMask(0)) / 2].mask;
|
||||
break;
|
||||
case MSR_MTRRfix64K_00000:
|
||||
val = env->mtrr_fixed[0];
|
||||
break;
|
||||
case MSR_MTRRfix16K_80000:
|
||||
case MSR_MTRRfix16K_A0000:
|
||||
val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
|
||||
MSR_MTRRfix16K_80000 + 1];
|
||||
break;
|
||||
case MSR_MTRRfix4K_C0000:
|
||||
case MSR_MTRRfix4K_C8000:
|
||||
case MSR_MTRRfix4K_D0000:
|
||||
case MSR_MTRRfix4K_D8000:
|
||||
case MSR_MTRRfix4K_E0000:
|
||||
case MSR_MTRRfix4K_E8000:
|
||||
case MSR_MTRRfix4K_F0000:
|
||||
case MSR_MTRRfix4K_F8000:
|
||||
val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
|
||||
MSR_MTRRfix4K_C0000 + 3];
|
||||
break;
|
||||
case MSR_MTRRdefType:
|
||||
val = env->mtrr_deftype;
|
||||
break;
|
||||
case MSR_MTRRcap:
|
||||
if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
|
||||
val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT |
|
||||
MSR_MTRRcap_WC_SUPPORTED;
|
||||
} else {
|
||||
/* XXX: exception? */
|
||||
val = 0;
|
||||
}
|
||||
break;
|
||||
case MSR_MCG_CAP:
|
||||
val = env->mcg_cap;
|
||||
break;
|
||||
case MSR_MCG_CTL:
|
||||
if (env->mcg_cap & MCG_CTL_P) {
|
||||
val = env->mcg_ctl;
|
||||
} else {
|
||||
val = 0;
|
||||
}
|
||||
break;
|
||||
case MSR_MCG_STATUS:
|
||||
val = env->mcg_status;
|
||||
break;
|
||||
case MSR_IA32_MISC_ENABLE:
|
||||
val = env->msr_ia32_misc_enable;
|
||||
break;
|
||||
case MSR_IA32_BNDCFGS:
|
||||
val = env->msr_bndcfgs;
|
||||
break;
|
||||
case MSR_IA32_UCODE_REV:
|
||||
val = x86_cpu->ucode_rev;
|
||||
break;
|
||||
default:
|
||||
if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL
|
||||
&& (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL +
|
||||
(4 * env->mcg_cap & 0xff)) {
|
||||
uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL;
|
||||
val = env->mce_banks[offset];
|
||||
break;
|
||||
}
|
||||
/* XXX: exception? */
|
||||
val = 0;
|
||||
break;
|
||||
}
|
||||
env->regs[R_EAX] = (uint32_t)(val);
|
||||
env->regs[R_EDX] = (uint32_t)(val >> 32);
|
||||
}
|
||||
|
||||
static void do_pause(X86CPU *cpu)
|
||||
{
|
||||
CPUState *cs = CPU(cpu);
|
||||
|
||||
/* Just let another CPU run. */
|
||||
cs->exception_index = EXCP_INTERRUPT;
|
||||
cpu_loop_exit(cs);
|
||||
}
|
||||
|
||||
static void do_hlt(X86CPU *cpu)
|
||||
{
|
||||
CPUState *cs = CPU(cpu);
|
||||
CPUX86State *env = &cpu->env;
|
||||
|
||||
env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
|
||||
cs->halted = 1;
|
||||
cs->exception_index = EXCP_HLT;
|
||||
cpu_loop_exit(cs);
|
||||
}
|
||||
|
||||
void helper_hlt(CPUX86State *env, int next_eip_addend)
|
||||
{
|
||||
X86CPU *cpu = env_archcpu(env);
|
||||
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_HLT, 0, GETPC());
|
||||
env->eip += next_eip_addend;
|
||||
|
||||
do_hlt(cpu);
|
||||
}
|
||||
|
||||
void helper_monitor(CPUX86State *env, target_ulong ptr)
|
||||
{
|
||||
if ((uint32_t)env->regs[R_ECX] != 0) {
|
||||
raise_exception_ra(env, EXCP0D_GPF, GETPC());
|
||||
}
|
||||
/* XXX: store address? */
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_MONITOR, 0, GETPC());
|
||||
}
|
||||
|
||||
void helper_mwait(CPUX86State *env, int next_eip_addend)
|
||||
{
|
||||
CPUState *cs = env_cpu(env);
|
||||
X86CPU *cpu = env_archcpu(env);
|
||||
|
||||
if ((uint32_t)env->regs[R_ECX] != 0) {
|
||||
raise_exception_ra(env, EXCP0D_GPF, GETPC());
|
||||
}
|
||||
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_MWAIT, 0, GETPC());
|
||||
env->eip += next_eip_addend;
|
||||
|
||||
/* XXX: not complete but not completely erroneous */
|
||||
// if (cs->cpu_index != 0 || CPU_NEXT(cs) != NULL) { // TODO
|
||||
if (cs->cpu_index != 0) {
|
||||
// do_pause(cpu);
|
||||
} else {
|
||||
do_hlt(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
void helper_pause(CPUX86State *env, int next_eip_addend)
|
||||
{
|
||||
X86CPU *cpu = env_archcpu(env);
|
||||
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_PAUSE, 0, GETPC());
|
||||
env->eip += next_eip_addend;
|
||||
|
||||
do_pause(cpu);
|
||||
}
|
||||
|
||||
void helper_debug(CPUX86State *env)
|
||||
{
|
||||
CPUState *cs = env_cpu(env);
|
||||
|
||||
cs->exception_index = EXCP_DEBUG;
|
||||
cpu_loop_exit(cs);
|
||||
}
|
||||
|
||||
uint64_t helper_rdpkru(CPUX86State *env, uint32_t ecx)
|
||||
{
|
||||
if ((env->cr[4] & CR4_PKE_MASK) == 0) {
|
||||
raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
|
||||
}
|
||||
if (ecx != 0) {
|
||||
raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
|
||||
}
|
||||
|
||||
return env->pkru;
|
||||
}
|
||||
|
||||
void helper_wrpkru(CPUX86State *env, uint32_t ecx, uint64_t val)
|
||||
{
|
||||
CPUState *cs = env_cpu(env);
|
||||
|
||||
if ((env->cr[4] & CR4_PKE_MASK) == 0) {
|
||||
raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
|
||||
}
|
||||
if (ecx != 0 || (val & 0xFFFFFFFF00000000ull)) {
|
||||
raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
|
||||
}
|
||||
|
||||
env->pkru = val;
|
||||
tlb_flush(cs);
|
||||
}
|
||||
138
qemu/target/i386/mpx_helper.c
Normal file
138
qemu/target/i386/mpx_helper.c
Normal file
@@ -0,0 +1,138 @@
|
||||
/*
|
||||
* x86 MPX helpers
|
||||
*
|
||||
* Copyright (c) 2015 Red Hat, Inc.
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "cpu.h"
|
||||
#include "exec/helper-proto.h"
|
||||
#include "exec/cpu_ldst.h"
|
||||
#include "exec/exec-all.h"
|
||||
|
||||
|
||||
void helper_bndck(CPUX86State *env, uint32_t fail)
|
||||
{
|
||||
if (unlikely(fail)) {
|
||||
env->bndcs_regs.sts = 1;
|
||||
raise_exception_ra(env, EXCP05_BOUND, GETPC());
|
||||
}
|
||||
}
|
||||
|
||||
static uint64_t lookup_bte64(CPUX86State *env, uint64_t base, uintptr_t ra)
|
||||
{
|
||||
uint64_t bndcsr, bde, bt;
|
||||
|
||||
if ((env->hflags & HF_CPL_MASK) == 3) {
|
||||
bndcsr = env->bndcs_regs.cfgu;
|
||||
} else {
|
||||
bndcsr = env->msr_bndcfgs;
|
||||
}
|
||||
|
||||
bde = (extract64(base, 20, 28) << 3) + (extract64(bndcsr, 20, 44) << 12);
|
||||
bt = cpu_ldq_data_ra(env, bde, ra);
|
||||
if ((bt & 1) == 0) {
|
||||
env->bndcs_regs.sts = bde | 2;
|
||||
raise_exception_ra(env, EXCP05_BOUND, ra);
|
||||
}
|
||||
|
||||
return (extract64(base, 3, 17) << 5) + (bt & ~7);
|
||||
}
|
||||
|
||||
static uint32_t lookup_bte32(CPUX86State *env, uint32_t base, uintptr_t ra)
|
||||
{
|
||||
uint32_t bndcsr, bde, bt;
|
||||
|
||||
if ((env->hflags & HF_CPL_MASK) == 3) {
|
||||
bndcsr = env->bndcs_regs.cfgu;
|
||||
} else {
|
||||
bndcsr = env->msr_bndcfgs;
|
||||
}
|
||||
|
||||
bde = (extract32(base, 12, 20) << 2) + (bndcsr & TARGET_PAGE_MASK);
|
||||
bt = cpu_ldl_data_ra(env, bde, ra);
|
||||
if ((bt & 1) == 0) {
|
||||
env->bndcs_regs.sts = bde | 2;
|
||||
raise_exception_ra(env, EXCP05_BOUND, ra);
|
||||
}
|
||||
|
||||
return (extract32(base, 2, 10) << 4) + (bt & ~3);
|
||||
}
|
||||
|
||||
uint64_t helper_bndldx64(CPUX86State *env, target_ulong base, target_ulong ptr)
|
||||
{
|
||||
uintptr_t ra = GETPC();
|
||||
uint64_t bte, lb, ub, pt;
|
||||
|
||||
bte = lookup_bte64(env, base, ra);
|
||||
lb = cpu_ldq_data_ra(env, bte, ra);
|
||||
ub = cpu_ldq_data_ra(env, bte + 8, ra);
|
||||
pt = cpu_ldq_data_ra(env, bte + 16, ra);
|
||||
|
||||
if (pt != ptr) {
|
||||
lb = ub = 0;
|
||||
}
|
||||
env->mmx_t0.MMX_Q(0) = ub;
|
||||
return lb;
|
||||
}
|
||||
|
||||
uint64_t helper_bndldx32(CPUX86State *env, target_ulong base, target_ulong ptr)
|
||||
{
|
||||
uintptr_t ra = GETPC();
|
||||
uint32_t bte, lb, ub, pt;
|
||||
|
||||
bte = lookup_bte32(env, base, ra);
|
||||
lb = cpu_ldl_data_ra(env, bte, ra);
|
||||
ub = cpu_ldl_data_ra(env, bte + 4, ra);
|
||||
pt = cpu_ldl_data_ra(env, bte + 8, ra);
|
||||
|
||||
if (pt != ptr) {
|
||||
lb = ub = 0;
|
||||
}
|
||||
return ((uint64_t)ub << 32) | lb;
|
||||
}
|
||||
|
||||
void helper_bndstx64(CPUX86State *env, target_ulong base, target_ulong ptr,
|
||||
uint64_t lb, uint64_t ub)
|
||||
{
|
||||
uintptr_t ra = GETPC();
|
||||
uint64_t bte;
|
||||
|
||||
bte = lookup_bte64(env, base, ra);
|
||||
cpu_stq_data_ra(env, bte, lb, ra);
|
||||
cpu_stq_data_ra(env, bte + 8, ub, ra);
|
||||
cpu_stq_data_ra(env, bte + 16, ptr, ra);
|
||||
}
|
||||
|
||||
void helper_bndstx32(CPUX86State *env, target_ulong base, target_ulong ptr,
|
||||
uint64_t lb, uint64_t ub)
|
||||
{
|
||||
uintptr_t ra = GETPC();
|
||||
uint32_t bte;
|
||||
|
||||
bte = lookup_bte32(env, base, ra);
|
||||
cpu_stl_data_ra(env, bte, lb, ra);
|
||||
cpu_stl_data_ra(env, bte + 4, ub, ra);
|
||||
cpu_stl_data_ra(env, bte + 8, ptr, ra);
|
||||
}
|
||||
|
||||
void helper_bnd_jmp(CPUX86State *env)
|
||||
{
|
||||
if (!(env->hflags2 & HF2_MPX_PR_MASK)) {
|
||||
memset(env->bnd_regs, 0, sizeof(env->bnd_regs));
|
||||
env->hflags &= ~HF_MPX_IU_MASK;
|
||||
}
|
||||
}
|
||||
2309
qemu/target/i386/ops_sse.h
Normal file
2309
qemu/target/i386/ops_sse.h
Normal file
File diff suppressed because it is too large
Load Diff
359
qemu/target/i386/ops_sse_header.h
Normal file
359
qemu/target/i386/ops_sse_header.h
Normal file
@@ -0,0 +1,359 @@
|
||||
/*
|
||||
* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4/PNI support
|
||||
*
|
||||
* Copyright (c) 2005 Fabrice Bellard
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#if SHIFT == 0
|
||||
#define Reg MMXReg
|
||||
#define SUFFIX _mmx
|
||||
#else
|
||||
#define Reg ZMMReg
|
||||
#define SUFFIX _xmm
|
||||
#endif
|
||||
|
||||
#define dh_alias_Reg ptr
|
||||
#define dh_alias_ZMMReg ptr
|
||||
#define dh_alias_MMXReg ptr
|
||||
#define dh_ctype_Reg Reg *
|
||||
#define dh_ctype_ZMMReg ZMMReg *
|
||||
#define dh_ctype_MMXReg MMXReg *
|
||||
#define dh_is_signed_Reg dh_is_signed_ptr
|
||||
#define dh_is_signed_ZMMReg dh_is_signed_ptr
|
||||
#define dh_is_signed_MMXReg dh_is_signed_ptr
|
||||
|
||||
DEF_HELPER_3(glue(psrlw, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(psraw, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(psllw, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(psrld, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(psrad, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pslld, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(psrlq, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(psllq, SUFFIX), void, env, Reg, Reg)
|
||||
|
||||
#if SHIFT == 1
|
||||
DEF_HELPER_3(glue(psrldq, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pslldq, SUFFIX), void, env, Reg, Reg)
|
||||
#endif
|
||||
|
||||
#define SSE_HELPER_B(name, F)\
|
||||
DEF_HELPER_3(glue(name, SUFFIX), void, env, Reg, Reg)
|
||||
|
||||
#define SSE_HELPER_W(name, F)\
|
||||
DEF_HELPER_3(glue(name, SUFFIX), void, env, Reg, Reg)
|
||||
|
||||
#define SSE_HELPER_L(name, F)\
|
||||
DEF_HELPER_3(glue(name, SUFFIX), void, env, Reg, Reg)
|
||||
|
||||
#define SSE_HELPER_Q(name, F)\
|
||||
DEF_HELPER_3(glue(name, SUFFIX), void, env, Reg, Reg)
|
||||
|
||||
SSE_HELPER_B(paddb, FADD)
|
||||
SSE_HELPER_W(paddw, FADD)
|
||||
SSE_HELPER_L(paddl, FADD)
|
||||
SSE_HELPER_Q(paddq, FADD)
|
||||
|
||||
SSE_HELPER_B(psubb, FSUB)
|
||||
SSE_HELPER_W(psubw, FSUB)
|
||||
SSE_HELPER_L(psubl, FSUB)
|
||||
SSE_HELPER_Q(psubq, FSUB)
|
||||
|
||||
SSE_HELPER_B(paddusb, FADDUB)
|
||||
SSE_HELPER_B(paddsb, FADDSB)
|
||||
SSE_HELPER_B(psubusb, FSUBUB)
|
||||
SSE_HELPER_B(psubsb, FSUBSB)
|
||||
|
||||
SSE_HELPER_W(paddusw, FADDUW)
|
||||
SSE_HELPER_W(paddsw, FADDSW)
|
||||
SSE_HELPER_W(psubusw, FSUBUW)
|
||||
SSE_HELPER_W(psubsw, FSUBSW)
|
||||
|
||||
SSE_HELPER_B(pminub, FMINUB)
|
||||
SSE_HELPER_B(pmaxub, FMAXUB)
|
||||
|
||||
SSE_HELPER_W(pminsw, FMINSW)
|
||||
SSE_HELPER_W(pmaxsw, FMAXSW)
|
||||
|
||||
SSE_HELPER_Q(pand, FAND)
|
||||
SSE_HELPER_Q(pandn, FANDN)
|
||||
SSE_HELPER_Q(por, FOR)
|
||||
SSE_HELPER_Q(pxor, FXOR)
|
||||
|
||||
SSE_HELPER_B(pcmpgtb, FCMPGTB)
|
||||
SSE_HELPER_W(pcmpgtw, FCMPGTW)
|
||||
SSE_HELPER_L(pcmpgtl, FCMPGTL)
|
||||
|
||||
SSE_HELPER_B(pcmpeqb, FCMPEQ)
|
||||
SSE_HELPER_W(pcmpeqw, FCMPEQ)
|
||||
SSE_HELPER_L(pcmpeql, FCMPEQ)
|
||||
|
||||
SSE_HELPER_W(pmullw, FMULLW)
|
||||
#if SHIFT == 0
|
||||
SSE_HELPER_W(pmulhrw, FMULHRW)
|
||||
#endif
|
||||
SSE_HELPER_W(pmulhuw, FMULHUW)
|
||||
SSE_HELPER_W(pmulhw, FMULHW)
|
||||
|
||||
SSE_HELPER_B(pavgb, FAVG)
|
||||
SSE_HELPER_W(pavgw, FAVG)
|
||||
|
||||
DEF_HELPER_3(glue(pmuludq, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pmaddwd, SUFFIX), void, env, Reg, Reg)
|
||||
|
||||
DEF_HELPER_3(glue(psadbw, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_4(glue(maskmov, SUFFIX), void, env, Reg, Reg, tl)
|
||||
DEF_HELPER_2(glue(movl_mm_T0, SUFFIX), void, Reg, i32)
|
||||
#ifdef TARGET_X86_64
|
||||
DEF_HELPER_2(glue(movq_mm_T0, SUFFIX), void, Reg, i64)
|
||||
#endif
|
||||
|
||||
#if SHIFT == 0
|
||||
DEF_HELPER_3(glue(pshufw, SUFFIX), void, Reg, Reg, int)
|
||||
#else
|
||||
DEF_HELPER_3(shufps, void, Reg, Reg, int)
|
||||
DEF_HELPER_3(shufpd, void, Reg, Reg, int)
|
||||
DEF_HELPER_3(glue(pshufd, SUFFIX), void, Reg, Reg, int)
|
||||
DEF_HELPER_3(glue(pshuflw, SUFFIX), void, Reg, Reg, int)
|
||||
DEF_HELPER_3(glue(pshufhw, SUFFIX), void, Reg, Reg, int)
|
||||
#endif
|
||||
|
||||
#if SHIFT == 1
|
||||
/* FPU ops */
|
||||
/* XXX: not accurate */
|
||||
|
||||
#define SSE_HELPER_S(name, F) \
|
||||
DEF_HELPER_3(name ## ps, void, env, Reg, Reg) \
|
||||
DEF_HELPER_3(name ## ss, void, env, Reg, Reg) \
|
||||
DEF_HELPER_3(name ## pd, void, env, Reg, Reg) \
|
||||
DEF_HELPER_3(name ## sd, void, env, Reg, Reg)
|
||||
|
||||
SSE_HELPER_S(add, FPU_ADD)
|
||||
SSE_HELPER_S(sub, FPU_SUB)
|
||||
SSE_HELPER_S(mul, FPU_MUL)
|
||||
SSE_HELPER_S(div, FPU_DIV)
|
||||
SSE_HELPER_S(min, FPU_MIN)
|
||||
SSE_HELPER_S(max, FPU_MAX)
|
||||
SSE_HELPER_S(sqrt, FPU_SQRT)
|
||||
|
||||
|
||||
DEF_HELPER_3(cvtps2pd, void, env, Reg, Reg)
|
||||
DEF_HELPER_3(cvtpd2ps, void, env, Reg, Reg)
|
||||
DEF_HELPER_3(cvtss2sd, void, env, Reg, Reg)
|
||||
DEF_HELPER_3(cvtsd2ss, void, env, Reg, Reg)
|
||||
DEF_HELPER_3(cvtdq2ps, void, env, Reg, Reg)
|
||||
DEF_HELPER_3(cvtdq2pd, void, env, Reg, Reg)
|
||||
DEF_HELPER_3(cvtpi2ps, void, env, ZMMReg, MMXReg)
|
||||
DEF_HELPER_3(cvtpi2pd, void, env, ZMMReg, MMXReg)
|
||||
DEF_HELPER_3(cvtsi2ss, void, env, ZMMReg, i32)
|
||||
DEF_HELPER_3(cvtsi2sd, void, env, ZMMReg, i32)
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
DEF_HELPER_3(cvtsq2ss, void, env, ZMMReg, i64)
|
||||
DEF_HELPER_3(cvtsq2sd, void, env, ZMMReg, i64)
|
||||
#endif
|
||||
|
||||
DEF_HELPER_3(cvtps2dq, void, env, ZMMReg, ZMMReg)
|
||||
DEF_HELPER_3(cvtpd2dq, void, env, ZMMReg, ZMMReg)
|
||||
DEF_HELPER_3(cvtps2pi, void, env, MMXReg, ZMMReg)
|
||||
DEF_HELPER_3(cvtpd2pi, void, env, MMXReg, ZMMReg)
|
||||
DEF_HELPER_2(cvtss2si, s32, env, ZMMReg)
|
||||
DEF_HELPER_2(cvtsd2si, s32, env, ZMMReg)
|
||||
#ifdef TARGET_X86_64
|
||||
DEF_HELPER_2(cvtss2sq, s64, env, ZMMReg)
|
||||
DEF_HELPER_2(cvtsd2sq, s64, env, ZMMReg)
|
||||
#endif
|
||||
|
||||
DEF_HELPER_3(cvttps2dq, void, env, ZMMReg, ZMMReg)
|
||||
DEF_HELPER_3(cvttpd2dq, void, env, ZMMReg, ZMMReg)
|
||||
DEF_HELPER_3(cvttps2pi, void, env, MMXReg, ZMMReg)
|
||||
DEF_HELPER_3(cvttpd2pi, void, env, MMXReg, ZMMReg)
|
||||
DEF_HELPER_2(cvttss2si, s32, env, ZMMReg)
|
||||
DEF_HELPER_2(cvttsd2si, s32, env, ZMMReg)
|
||||
#ifdef TARGET_X86_64
|
||||
DEF_HELPER_2(cvttss2sq, s64, env, ZMMReg)
|
||||
DEF_HELPER_2(cvttsd2sq, s64, env, ZMMReg)
|
||||
#endif
|
||||
|
||||
DEF_HELPER_3(rsqrtps, void, env, ZMMReg, ZMMReg)
|
||||
DEF_HELPER_3(rsqrtss, void, env, ZMMReg, ZMMReg)
|
||||
DEF_HELPER_3(rcpps, void, env, ZMMReg, ZMMReg)
|
||||
DEF_HELPER_3(rcpss, void, env, ZMMReg, ZMMReg)
|
||||
DEF_HELPER_3(extrq_r, void, env, ZMMReg, ZMMReg)
|
||||
DEF_HELPER_4(extrq_i, void, env, ZMMReg, int, int)
|
||||
DEF_HELPER_3(insertq_r, void, env, ZMMReg, ZMMReg)
|
||||
DEF_HELPER_4(insertq_i, void, env, ZMMReg, int, int)
|
||||
DEF_HELPER_3(haddps, void, env, ZMMReg, ZMMReg)
|
||||
DEF_HELPER_3(haddpd, void, env, ZMMReg, ZMMReg)
|
||||
DEF_HELPER_3(hsubps, void, env, ZMMReg, ZMMReg)
|
||||
DEF_HELPER_3(hsubpd, void, env, ZMMReg, ZMMReg)
|
||||
DEF_HELPER_3(addsubps, void, env, ZMMReg, ZMMReg)
|
||||
DEF_HELPER_3(addsubpd, void, env, ZMMReg, ZMMReg)
|
||||
|
||||
#define SSE_HELPER_CMP(name, F) \
|
||||
DEF_HELPER_3(name ## ps, void, env, Reg, Reg) \
|
||||
DEF_HELPER_3(name ## ss, void, env, Reg, Reg) \
|
||||
DEF_HELPER_3(name ## pd, void, env, Reg, Reg) \
|
||||
DEF_HELPER_3(name ## sd, void, env, Reg, Reg)
|
||||
|
||||
SSE_HELPER_CMP(cmpeq, FPU_CMPEQ)
|
||||
SSE_HELPER_CMP(cmplt, FPU_CMPLT)
|
||||
SSE_HELPER_CMP(cmple, FPU_CMPLE)
|
||||
SSE_HELPER_CMP(cmpunord, FPU_CMPUNORD)
|
||||
SSE_HELPER_CMP(cmpneq, FPU_CMPNEQ)
|
||||
SSE_HELPER_CMP(cmpnlt, FPU_CMPNLT)
|
||||
SSE_HELPER_CMP(cmpnle, FPU_CMPNLE)
|
||||
SSE_HELPER_CMP(cmpord, FPU_CMPORD)
|
||||
|
||||
DEF_HELPER_3(ucomiss, void, env, Reg, Reg)
|
||||
DEF_HELPER_3(comiss, void, env, Reg, Reg)
|
||||
DEF_HELPER_3(ucomisd, void, env, Reg, Reg)
|
||||
DEF_HELPER_3(comisd, void, env, Reg, Reg)
|
||||
DEF_HELPER_2(movmskps, i32, env, Reg)
|
||||
DEF_HELPER_2(movmskpd, i32, env, Reg)
|
||||
#endif
|
||||
|
||||
DEF_HELPER_2(glue(pmovmskb, SUFFIX), i32, env, Reg)
|
||||
DEF_HELPER_3(glue(packsswb, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(packuswb, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(packssdw, SUFFIX), void, env, Reg, Reg)
|
||||
#define UNPCK_OP(base_name, base) \
|
||||
DEF_HELPER_3(glue(punpck ## base_name ## bw, SUFFIX), void, env, Reg, Reg) \
|
||||
DEF_HELPER_3(glue(punpck ## base_name ## wd, SUFFIX), void, env, Reg, Reg) \
|
||||
DEF_HELPER_3(glue(punpck ## base_name ## dq, SUFFIX), void, env, Reg, Reg)
|
||||
|
||||
UNPCK_OP(l, 0)
|
||||
UNPCK_OP(h, 1)
|
||||
|
||||
#if SHIFT == 1
|
||||
DEF_HELPER_3(glue(punpcklqdq, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(punpckhqdq, SUFFIX), void, env, Reg, Reg)
|
||||
#endif
|
||||
|
||||
/* 3DNow! float ops */
|
||||
#if SHIFT == 0
|
||||
DEF_HELPER_3(pi2fd, void, env, MMXReg, MMXReg)
|
||||
DEF_HELPER_3(pi2fw, void, env, MMXReg, MMXReg)
|
||||
DEF_HELPER_3(pf2id, void, env, MMXReg, MMXReg)
|
||||
DEF_HELPER_3(pf2iw, void, env, MMXReg, MMXReg)
|
||||
DEF_HELPER_3(pfacc, void, env, MMXReg, MMXReg)
|
||||
DEF_HELPER_3(pfadd, void, env, MMXReg, MMXReg)
|
||||
DEF_HELPER_3(pfcmpeq, void, env, MMXReg, MMXReg)
|
||||
DEF_HELPER_3(pfcmpge, void, env, MMXReg, MMXReg)
|
||||
DEF_HELPER_3(pfcmpgt, void, env, MMXReg, MMXReg)
|
||||
DEF_HELPER_3(pfmax, void, env, MMXReg, MMXReg)
|
||||
DEF_HELPER_3(pfmin, void, env, MMXReg, MMXReg)
|
||||
DEF_HELPER_3(pfmul, void, env, MMXReg, MMXReg)
|
||||
DEF_HELPER_3(pfnacc, void, env, MMXReg, MMXReg)
|
||||
DEF_HELPER_3(pfpnacc, void, env, MMXReg, MMXReg)
|
||||
DEF_HELPER_3(pfrcp, void, env, MMXReg, MMXReg)
|
||||
DEF_HELPER_3(pfrsqrt, void, env, MMXReg, MMXReg)
|
||||
DEF_HELPER_3(pfsub, void, env, MMXReg, MMXReg)
|
||||
DEF_HELPER_3(pfsubr, void, env, MMXReg, MMXReg)
|
||||
DEF_HELPER_3(pswapd, void, env, MMXReg, MMXReg)
|
||||
#endif
|
||||
|
||||
/* SSSE3 op helpers */
|
||||
DEF_HELPER_3(glue(phaddw, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(phaddd, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(phaddsw, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(phsubw, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(phsubd, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(phsubsw, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pabsb, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pabsw, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pabsd, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pmaddubsw, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pmulhrsw, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pshufb, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(psignb, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(psignw, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(psignd, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_4(glue(palignr, SUFFIX), void, env, Reg, Reg, s32)
|
||||
|
||||
/* SSE4.1 op helpers */
|
||||
#if SHIFT == 1
|
||||
DEF_HELPER_3(glue(pblendvb, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(blendvps, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(blendvpd, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(ptest, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pmovsxbw, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pmovsxbd, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pmovsxbq, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pmovsxwd, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pmovsxwq, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pmovsxdq, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pmovzxbw, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pmovzxbd, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pmovzxbq, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pmovzxwd, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pmovzxwq, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pmovzxdq, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pmuldq, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pcmpeqq, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(packusdw, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pminsb, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pminsd, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pminuw, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pminud, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pmaxsb, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pmaxsd, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pmaxuw, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pmaxud, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(pmulld, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(phminposuw, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_4(glue(roundps, SUFFIX), void, env, Reg, Reg, i32)
|
||||
DEF_HELPER_4(glue(roundpd, SUFFIX), void, env, Reg, Reg, i32)
|
||||
DEF_HELPER_4(glue(roundss, SUFFIX), void, env, Reg, Reg, i32)
|
||||
DEF_HELPER_4(glue(roundsd, SUFFIX), void, env, Reg, Reg, i32)
|
||||
DEF_HELPER_4(glue(blendps, SUFFIX), void, env, Reg, Reg, i32)
|
||||
DEF_HELPER_4(glue(blendpd, SUFFIX), void, env, Reg, Reg, i32)
|
||||
DEF_HELPER_4(glue(pblendw, SUFFIX), void, env, Reg, Reg, i32)
|
||||
DEF_HELPER_4(glue(dpps, SUFFIX), void, env, Reg, Reg, i32)
|
||||
DEF_HELPER_4(glue(dppd, SUFFIX), void, env, Reg, Reg, i32)
|
||||
DEF_HELPER_4(glue(mpsadbw, SUFFIX), void, env, Reg, Reg, i32)
|
||||
#endif
|
||||
|
||||
/* SSE4.2 op helpers */
|
||||
#if SHIFT == 1
|
||||
DEF_HELPER_3(glue(pcmpgtq, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_4(glue(pcmpestri, SUFFIX), void, env, Reg, Reg, i32)
|
||||
DEF_HELPER_4(glue(pcmpestrm, SUFFIX), void, env, Reg, Reg, i32)
|
||||
DEF_HELPER_4(glue(pcmpistri, SUFFIX), void, env, Reg, Reg, i32)
|
||||
DEF_HELPER_4(glue(pcmpistrm, SUFFIX), void, env, Reg, Reg, i32)
|
||||
DEF_HELPER_3(crc32, tl, i32, tl, i32)
|
||||
#endif
|
||||
|
||||
/* AES-NI op helpers */
|
||||
#if SHIFT == 1
|
||||
DEF_HELPER_3(glue(aesdec, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(aesdeclast, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(aesenc, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(aesenclast, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_3(glue(aesimc, SUFFIX), void, env, Reg, Reg)
|
||||
DEF_HELPER_4(glue(aeskeygenassist, SUFFIX), void, env, Reg, Reg, i32)
|
||||
DEF_HELPER_4(glue(pclmulqdq, SUFFIX), void, env, Reg, Reg, i32)
|
||||
#endif
|
||||
|
||||
#undef SHIFT
|
||||
#undef Reg
|
||||
#undef SUFFIX
|
||||
|
||||
#undef SSE_HELPER_B
|
||||
#undef SSE_HELPER_W
|
||||
#undef SSE_HELPER_L
|
||||
#undef SSE_HELPER_Q
|
||||
#undef SSE_HELPER_S
|
||||
#undef SSE_HELPER_CMP
|
||||
#undef UNPCK_OP
|
||||
2631
qemu/target/i386/seg_helper.c
Normal file
2631
qemu/target/i386/seg_helper.c
Normal file
File diff suppressed because it is too large
Load Diff
108
qemu/target/i386/shift_helper_template.h
Normal file
108
qemu/target/i386/shift_helper_template.h
Normal file
@@ -0,0 +1,108 @@
|
||||
/*
|
||||
* x86 shift helpers
|
||||
*
|
||||
* Copyright (c) 2008 Fabrice Bellard
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#define DATA_BITS (1 << (3 + SHIFT))
|
||||
#define SHIFT_MASK (DATA_BITS - 1)
|
||||
#if DATA_BITS <= 32
|
||||
#define SHIFT1_MASK 0x1f
|
||||
#else
|
||||
#define SHIFT1_MASK 0x3f
|
||||
#endif
|
||||
|
||||
#if DATA_BITS == 8
|
||||
#define SUFFIX b
|
||||
#define DATA_MASK 0xff
|
||||
#elif DATA_BITS == 16
|
||||
#define SUFFIX w
|
||||
#define DATA_MASK 0xffff
|
||||
#elif DATA_BITS == 32
|
||||
#define SUFFIX l
|
||||
#define DATA_MASK 0xffffffff
|
||||
#elif DATA_BITS == 64
|
||||
#define SUFFIX q
|
||||
#define DATA_MASK 0xffffffffffffffffULL
|
||||
#else
|
||||
#error unhandled operand size
|
||||
#endif
|
||||
|
||||
target_ulong glue(helper_rcl, SUFFIX)(CPUX86State *env, target_ulong t0,
|
||||
target_ulong t1)
|
||||
{
|
||||
int count, eflags;
|
||||
target_ulong src;
|
||||
target_long res;
|
||||
|
||||
count = t1 & SHIFT1_MASK;
|
||||
#if DATA_BITS == 16
|
||||
count = rclw_table[count];
|
||||
#elif DATA_BITS == 8
|
||||
count = rclb_table[count];
|
||||
#endif
|
||||
if (count) {
|
||||
eflags = (int)env->cc_src;
|
||||
t0 &= DATA_MASK;
|
||||
src = t0;
|
||||
res = (t0 << count) | ((target_ulong)(eflags & CC_C) << (count - 1));
|
||||
if (count > 1) {
|
||||
res |= t0 >> (DATA_BITS + 1 - count);
|
||||
}
|
||||
t0 = res;
|
||||
env->cc_src = (eflags & ~(CC_C | CC_O)) |
|
||||
(lshift(src ^ t0, 11 - (DATA_BITS - 1)) & CC_O) |
|
||||
((src >> (DATA_BITS - count)) & CC_C);
|
||||
}
|
||||
return t0;
|
||||
}
|
||||
|
||||
target_ulong glue(helper_rcr, SUFFIX)(CPUX86State *env, target_ulong t0,
|
||||
target_ulong t1)
|
||||
{
|
||||
int count, eflags;
|
||||
target_ulong src;
|
||||
target_long res;
|
||||
|
||||
count = t1 & SHIFT1_MASK;
|
||||
#if DATA_BITS == 16
|
||||
count = rclw_table[count];
|
||||
#elif DATA_BITS == 8
|
||||
count = rclb_table[count];
|
||||
#endif
|
||||
if (count) {
|
||||
eflags = (int)env->cc_src;
|
||||
t0 &= DATA_MASK;
|
||||
src = t0;
|
||||
res = (t0 >> count) |
|
||||
((target_ulong)(eflags & CC_C) << (DATA_BITS - count));
|
||||
if (count > 1) {
|
||||
res |= t0 << (DATA_BITS + 1 - count);
|
||||
}
|
||||
t0 = res;
|
||||
env->cc_src = (eflags & ~(CC_C | CC_O)) |
|
||||
(lshift(src ^ t0, 11 - (DATA_BITS - 1)) & CC_O) |
|
||||
((src >> (count - 1)) & CC_C);
|
||||
}
|
||||
return t0;
|
||||
}
|
||||
|
||||
#undef DATA_BITS
|
||||
#undef SHIFT_MASK
|
||||
#undef SHIFT1_MASK
|
||||
#undef DATA_TYPE
|
||||
#undef DATA_MASK
|
||||
#undef SUFFIX
|
||||
315
qemu/target/i386/smm_helper.c
Normal file
315
qemu/target/i386/smm_helper.c
Normal file
@@ -0,0 +1,315 @@
|
||||
/*
|
||||
* x86 SMM helpers
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "cpu.h"
|
||||
#include "exec/helper-proto.h"
|
||||
|
||||
/* SMM support */
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
#define SMM_REVISION_ID 0x00020064
|
||||
#else
|
||||
#define SMM_REVISION_ID 0x00020000
|
||||
#endif
|
||||
|
||||
void do_smm_enter(X86CPU *cpu)
|
||||
{
|
||||
CPUX86State *env = &cpu->env;
|
||||
CPUState *cs = CPU(cpu);
|
||||
target_ulong sm_state;
|
||||
SegmentCache *dt;
|
||||
int i, offset;
|
||||
|
||||
// qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
|
||||
// log_cpu_state_mask(CPU_LOG_INT, CPU(cpu), CPU_DUMP_CCOP);
|
||||
|
||||
env->msr_smi_count++;
|
||||
env->hflags |= HF_SMM_MASK;
|
||||
if (env->hflags2 & HF2_NMI_MASK) {
|
||||
env->hflags2 |= HF2_SMM_INSIDE_NMI_MASK;
|
||||
} else {
|
||||
env->hflags2 |= HF2_NMI_MASK;
|
||||
}
|
||||
|
||||
sm_state = env->smbase + 0x8000;
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
for (i = 0; i < 6; i++) {
|
||||
dt = &env->segs[i];
|
||||
offset = 0x7e00 + i * 16;
|
||||
x86_stw_phys(cs, sm_state + offset, dt->selector);
|
||||
x86_stw_phys(cs, sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
|
||||
x86_stl_phys(cs, sm_state + offset + 4, dt->limit);
|
||||
x86_stq_phys(cs, sm_state + offset + 8, dt->base);
|
||||
}
|
||||
|
||||
x86_stq_phys(cs, sm_state + 0x7e68, env->gdt.base);
|
||||
x86_stl_phys(cs, sm_state + 0x7e64, env->gdt.limit);
|
||||
|
||||
x86_stw_phys(cs, sm_state + 0x7e70, env->ldt.selector);
|
||||
x86_stq_phys(cs, sm_state + 0x7e78, env->ldt.base);
|
||||
x86_stl_phys(cs, sm_state + 0x7e74, env->ldt.limit);
|
||||
x86_stw_phys(cs, sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
|
||||
|
||||
x86_stq_phys(cs, sm_state + 0x7e88, env->idt.base);
|
||||
x86_stl_phys(cs, sm_state + 0x7e84, env->idt.limit);
|
||||
|
||||
x86_stw_phys(cs, sm_state + 0x7e90, env->tr.selector);
|
||||
x86_stq_phys(cs, sm_state + 0x7e98, env->tr.base);
|
||||
x86_stl_phys(cs, sm_state + 0x7e94, env->tr.limit);
|
||||
x86_stw_phys(cs, sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
|
||||
|
||||
/* ??? Vol 1, 16.5.6 Intel MPX and SMM says that IA32_BNDCFGS
|
||||
is saved at offset 7ED0. Vol 3, 34.4.1.1, Table 32-2, has
|
||||
7EA0-7ED7 as "reserved". What's this, and what's really
|
||||
supposed to happen? */
|
||||
x86_stq_phys(cs, sm_state + 0x7ed0, env->efer);
|
||||
|
||||
x86_stq_phys(cs, sm_state + 0x7ff8, env->regs[R_EAX]);
|
||||
x86_stq_phys(cs, sm_state + 0x7ff0, env->regs[R_ECX]);
|
||||
x86_stq_phys(cs, sm_state + 0x7fe8, env->regs[R_EDX]);
|
||||
x86_stq_phys(cs, sm_state + 0x7fe0, env->regs[R_EBX]);
|
||||
x86_stq_phys(cs, sm_state + 0x7fd8, env->regs[R_ESP]);
|
||||
x86_stq_phys(cs, sm_state + 0x7fd0, env->regs[R_EBP]);
|
||||
x86_stq_phys(cs, sm_state + 0x7fc8, env->regs[R_ESI]);
|
||||
x86_stq_phys(cs, sm_state + 0x7fc0, env->regs[R_EDI]);
|
||||
for (i = 8; i < 16; i++) {
|
||||
x86_stq_phys(cs, sm_state + 0x7ff8 - i * 8, env->regs[i]);
|
||||
}
|
||||
x86_stq_phys(cs, sm_state + 0x7f78, env->eip);
|
||||
x86_stl_phys(cs, sm_state + 0x7f70, cpu_compute_eflags(env));
|
||||
x86_stl_phys(cs, sm_state + 0x7f68, env->dr[6]);
|
||||
x86_stl_phys(cs, sm_state + 0x7f60, env->dr[7]);
|
||||
|
||||
x86_stl_phys(cs, sm_state + 0x7f48, env->cr[4]);
|
||||
x86_stq_phys(cs, sm_state + 0x7f50, env->cr[3]);
|
||||
x86_stl_phys(cs, sm_state + 0x7f58, env->cr[0]);
|
||||
|
||||
x86_stl_phys(cs, sm_state + 0x7efc, SMM_REVISION_ID);
|
||||
x86_stl_phys(cs, sm_state + 0x7f00, env->smbase);
|
||||
#else
|
||||
x86_stl_phys(cs, sm_state + 0x7ffc, env->cr[0]);
|
||||
x86_stl_phys(cs, sm_state + 0x7ff8, env->cr[3]);
|
||||
x86_stl_phys(cs, sm_state + 0x7ff4, cpu_compute_eflags(env));
|
||||
x86_stl_phys(cs, sm_state + 0x7ff0, env->eip);
|
||||
x86_stl_phys(cs, sm_state + 0x7fec, env->regs[R_EDI]);
|
||||
x86_stl_phys(cs, sm_state + 0x7fe8, env->regs[R_ESI]);
|
||||
x86_stl_phys(cs, sm_state + 0x7fe4, env->regs[R_EBP]);
|
||||
x86_stl_phys(cs, sm_state + 0x7fe0, env->regs[R_ESP]);
|
||||
x86_stl_phys(cs, sm_state + 0x7fdc, env->regs[R_EBX]);
|
||||
x86_stl_phys(cs, sm_state + 0x7fd8, env->regs[R_EDX]);
|
||||
x86_stl_phys(cs, sm_state + 0x7fd4, env->regs[R_ECX]);
|
||||
x86_stl_phys(cs, sm_state + 0x7fd0, env->regs[R_EAX]);
|
||||
x86_stl_phys(cs, sm_state + 0x7fcc, env->dr[6]);
|
||||
x86_stl_phys(cs, sm_state + 0x7fc8, env->dr[7]);
|
||||
|
||||
x86_stl_phys(cs, sm_state + 0x7fc4, env->tr.selector);
|
||||
x86_stl_phys(cs, sm_state + 0x7f64, env->tr.base);
|
||||
x86_stl_phys(cs, sm_state + 0x7f60, env->tr.limit);
|
||||
x86_stl_phys(cs, sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
|
||||
|
||||
x86_stl_phys(cs, sm_state + 0x7fc0, env->ldt.selector);
|
||||
x86_stl_phys(cs, sm_state + 0x7f80, env->ldt.base);
|
||||
x86_stl_phys(cs, sm_state + 0x7f7c, env->ldt.limit);
|
||||
x86_stl_phys(cs, sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
|
||||
|
||||
x86_stl_phys(cs, sm_state + 0x7f74, env->gdt.base);
|
||||
x86_stl_phys(cs, sm_state + 0x7f70, env->gdt.limit);
|
||||
|
||||
x86_stl_phys(cs, sm_state + 0x7f58, env->idt.base);
|
||||
x86_stl_phys(cs, sm_state + 0x7f54, env->idt.limit);
|
||||
|
||||
for (i = 0; i < 6; i++) {
|
||||
dt = &env->segs[i];
|
||||
if (i < 3) {
|
||||
offset = 0x7f84 + i * 12;
|
||||
} else {
|
||||
offset = 0x7f2c + (i - 3) * 12;
|
||||
}
|
||||
x86_stl_phys(cs, sm_state + 0x7fa8 + i * 4, dt->selector);
|
||||
x86_stl_phys(cs, sm_state + offset + 8, dt->base);
|
||||
x86_stl_phys(cs, sm_state + offset + 4, dt->limit);
|
||||
x86_stl_phys(cs, sm_state + offset, (dt->flags >> 8) & 0xf0ff);
|
||||
}
|
||||
x86_stl_phys(cs, sm_state + 0x7f14, env->cr[4]);
|
||||
|
||||
x86_stl_phys(cs, sm_state + 0x7efc, SMM_REVISION_ID);
|
||||
x86_stl_phys(cs, sm_state + 0x7ef8, env->smbase);
|
||||
#endif
|
||||
/* init SMM cpu state */
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
cpu_load_efer(env, 0);
|
||||
#endif
|
||||
cpu_load_eflags(env, 0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C |
|
||||
DF_MASK));
|
||||
env->eip = 0x00008000;
|
||||
cpu_x86_update_cr0(env,
|
||||
env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK |
|
||||
CR0_PG_MASK));
|
||||
cpu_x86_update_cr4(env, 0);
|
||||
env->dr[7] = 0x00000400;
|
||||
|
||||
cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
|
||||
0xffffffff,
|
||||
DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
|
||||
DESC_G_MASK | DESC_A_MASK);
|
||||
cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff,
|
||||
DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
|
||||
DESC_G_MASK | DESC_A_MASK);
|
||||
cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff,
|
||||
DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
|
||||
DESC_G_MASK | DESC_A_MASK);
|
||||
cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff,
|
||||
DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
|
||||
DESC_G_MASK | DESC_A_MASK);
|
||||
cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff,
|
||||
DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
|
||||
DESC_G_MASK | DESC_A_MASK);
|
||||
cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff,
|
||||
DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
|
||||
DESC_G_MASK | DESC_A_MASK);
|
||||
}
|
||||
|
||||
void helper_rsm(CPUX86State *env)
|
||||
{
|
||||
CPUState *cs = env_cpu(env);
|
||||
target_ulong sm_state;
|
||||
int i, offset;
|
||||
uint32_t val;
|
||||
|
||||
sm_state = env->smbase + 0x8000;
|
||||
#ifdef TARGET_X86_64
|
||||
cpu_load_efer(env, x86_ldq_phys(cs, sm_state + 0x7ed0));
|
||||
|
||||
env->gdt.base = x86_ldq_phys(cs, sm_state + 0x7e68);
|
||||
env->gdt.limit = x86_ldl_phys(cs, sm_state + 0x7e64);
|
||||
|
||||
env->ldt.selector = x86_lduw_phys(cs, sm_state + 0x7e70);
|
||||
env->ldt.base = x86_ldq_phys(cs, sm_state + 0x7e78);
|
||||
env->ldt.limit = x86_ldl_phys(cs, sm_state + 0x7e74);
|
||||
env->ldt.flags = (x86_lduw_phys(cs, sm_state + 0x7e72) & 0xf0ff) << 8;
|
||||
|
||||
env->idt.base = x86_ldq_phys(cs, sm_state + 0x7e88);
|
||||
env->idt.limit = x86_ldl_phys(cs, sm_state + 0x7e84);
|
||||
|
||||
env->tr.selector = x86_lduw_phys(cs, sm_state + 0x7e90);
|
||||
env->tr.base = x86_ldq_phys(cs, sm_state + 0x7e98);
|
||||
env->tr.limit = x86_ldl_phys(cs, sm_state + 0x7e94);
|
||||
env->tr.flags = (x86_lduw_phys(cs, sm_state + 0x7e92) & 0xf0ff) << 8;
|
||||
|
||||
env->regs[R_EAX] = x86_ldq_phys(cs, sm_state + 0x7ff8);
|
||||
env->regs[R_ECX] = x86_ldq_phys(cs, sm_state + 0x7ff0);
|
||||
env->regs[R_EDX] = x86_ldq_phys(cs, sm_state + 0x7fe8);
|
||||
env->regs[R_EBX] = x86_ldq_phys(cs, sm_state + 0x7fe0);
|
||||
env->regs[R_ESP] = x86_ldq_phys(cs, sm_state + 0x7fd8);
|
||||
env->regs[R_EBP] = x86_ldq_phys(cs, sm_state + 0x7fd0);
|
||||
env->regs[R_ESI] = x86_ldq_phys(cs, sm_state + 0x7fc8);
|
||||
env->regs[R_EDI] = x86_ldq_phys(cs, sm_state + 0x7fc0);
|
||||
for (i = 8; i < 16; i++) {
|
||||
env->regs[i] = x86_ldq_phys(cs, sm_state + 0x7ff8 - i * 8);
|
||||
}
|
||||
env->eip = x86_ldq_phys(cs, sm_state + 0x7f78);
|
||||
cpu_load_eflags(env, x86_ldl_phys(cs, sm_state + 0x7f70),
|
||||
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
|
||||
env->dr[6] = x86_ldl_phys(cs, sm_state + 0x7f68);
|
||||
env->dr[7] = x86_ldl_phys(cs, sm_state + 0x7f60);
|
||||
|
||||
cpu_x86_update_cr4(env, x86_ldl_phys(cs, sm_state + 0x7f48));
|
||||
cpu_x86_update_cr3(env, x86_ldq_phys(cs, sm_state + 0x7f50));
|
||||
cpu_x86_update_cr0(env, x86_ldl_phys(cs, sm_state + 0x7f58));
|
||||
|
||||
for (i = 0; i < 6; i++) {
|
||||
offset = 0x7e00 + i * 16;
|
||||
cpu_x86_load_seg_cache(env, i,
|
||||
x86_lduw_phys(cs, sm_state + offset),
|
||||
x86_ldq_phys(cs, sm_state + offset + 8),
|
||||
x86_ldl_phys(cs, sm_state + offset + 4),
|
||||
(x86_lduw_phys(cs, sm_state + offset + 2) &
|
||||
0xf0ff) << 8);
|
||||
}
|
||||
|
||||
val = x86_ldl_phys(cs, sm_state + 0x7efc); /* revision ID */
|
||||
if (val & 0x20000) {
|
||||
env->smbase = x86_ldl_phys(cs, sm_state + 0x7f00);
|
||||
}
|
||||
#else
|
||||
cpu_x86_update_cr0(env, x86_ldl_phys(cs, sm_state + 0x7ffc));
|
||||
cpu_x86_update_cr3(env, x86_ldl_phys(cs, sm_state + 0x7ff8));
|
||||
cpu_load_eflags(env, x86_ldl_phys(cs, sm_state + 0x7ff4),
|
||||
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
|
||||
env->eip = x86_ldl_phys(cs, sm_state + 0x7ff0);
|
||||
env->regs[R_EDI] = x86_ldl_phys(cs, sm_state + 0x7fec);
|
||||
env->regs[R_ESI] = x86_ldl_phys(cs, sm_state + 0x7fe8);
|
||||
env->regs[R_EBP] = x86_ldl_phys(cs, sm_state + 0x7fe4);
|
||||
env->regs[R_ESP] = x86_ldl_phys(cs, sm_state + 0x7fe0);
|
||||
env->regs[R_EBX] = x86_ldl_phys(cs, sm_state + 0x7fdc);
|
||||
env->regs[R_EDX] = x86_ldl_phys(cs, sm_state + 0x7fd8);
|
||||
env->regs[R_ECX] = x86_ldl_phys(cs, sm_state + 0x7fd4);
|
||||
env->regs[R_EAX] = x86_ldl_phys(cs, sm_state + 0x7fd0);
|
||||
env->dr[6] = x86_ldl_phys(cs, sm_state + 0x7fcc);
|
||||
env->dr[7] = x86_ldl_phys(cs, sm_state + 0x7fc8);
|
||||
|
||||
env->tr.selector = x86_ldl_phys(cs, sm_state + 0x7fc4) & 0xffff;
|
||||
env->tr.base = x86_ldl_phys(cs, sm_state + 0x7f64);
|
||||
env->tr.limit = x86_ldl_phys(cs, sm_state + 0x7f60);
|
||||
env->tr.flags = (x86_ldl_phys(cs, sm_state + 0x7f5c) & 0xf0ff) << 8;
|
||||
|
||||
env->ldt.selector = x86_ldl_phys(cs, sm_state + 0x7fc0) & 0xffff;
|
||||
env->ldt.base = x86_ldl_phys(cs, sm_state + 0x7f80);
|
||||
env->ldt.limit = x86_ldl_phys(cs, sm_state + 0x7f7c);
|
||||
env->ldt.flags = (x86_ldl_phys(cs, sm_state + 0x7f78) & 0xf0ff) << 8;
|
||||
|
||||
env->gdt.base = x86_ldl_phys(cs, sm_state + 0x7f74);
|
||||
env->gdt.limit = x86_ldl_phys(cs, sm_state + 0x7f70);
|
||||
|
||||
env->idt.base = x86_ldl_phys(cs, sm_state + 0x7f58);
|
||||
env->idt.limit = x86_ldl_phys(cs, sm_state + 0x7f54);
|
||||
|
||||
for (i = 0; i < 6; i++) {
|
||||
if (i < 3) {
|
||||
offset = 0x7f84 + i * 12;
|
||||
} else {
|
||||
offset = 0x7f2c + (i - 3) * 12;
|
||||
}
|
||||
cpu_x86_load_seg_cache(env, i,
|
||||
x86_ldl_phys(cs,
|
||||
sm_state + 0x7fa8 + i * 4) & 0xffff,
|
||||
x86_ldl_phys(cs, sm_state + offset + 8),
|
||||
x86_ldl_phys(cs, sm_state + offset + 4),
|
||||
(x86_ldl_phys(cs,
|
||||
sm_state + offset) & 0xf0ff) << 8);
|
||||
}
|
||||
cpu_x86_update_cr4(env, x86_ldl_phys(cs, sm_state + 0x7f14));
|
||||
|
||||
val = x86_ldl_phys(cs, sm_state + 0x7efc); /* revision ID */
|
||||
if (val & 0x20000) {
|
||||
env->smbase = x86_ldl_phys(cs, sm_state + 0x7ef8);
|
||||
}
|
||||
#endif
|
||||
if ((env->hflags2 & HF2_SMM_INSIDE_NMI_MASK) == 0) {
|
||||
env->hflags2 &= ~HF2_NMI_MASK;
|
||||
}
|
||||
env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK;
|
||||
env->hflags &= ~HF_SMM_MASK;
|
||||
|
||||
// qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
|
||||
// log_cpu_state_mask(CPU_LOG_INT, CPU(cpu), CPU_DUMP_CCOP);
|
||||
}
|
||||
238
qemu/target/i386/svm.h
Normal file
238
qemu/target/i386/svm.h
Normal file
@@ -0,0 +1,238 @@
|
||||
#ifndef SVM_H
|
||||
#define SVM_H
|
||||
|
||||
#include "qemu/compiler.h"
|
||||
|
||||
#define TLB_CONTROL_DO_NOTHING 0
|
||||
#define TLB_CONTROL_FLUSH_ALL_ASID 1
|
||||
|
||||
#define V_TPR_MASK 0x0f
|
||||
|
||||
#define V_IRQ_SHIFT 8
|
||||
#define V_IRQ_MASK (1 << V_IRQ_SHIFT)
|
||||
|
||||
#define V_INTR_PRIO_SHIFT 16
|
||||
#define V_INTR_PRIO_MASK (0x0f << V_INTR_PRIO_SHIFT)
|
||||
|
||||
#define V_IGN_TPR_SHIFT 20
|
||||
#define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT)
|
||||
|
||||
#define V_INTR_MASKING_SHIFT 24
|
||||
#define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT)
|
||||
|
||||
#define SVM_INTERRUPT_SHADOW_MASK 1
|
||||
|
||||
#define SVM_IOIO_STR_SHIFT 2
|
||||
#define SVM_IOIO_REP_SHIFT 3
|
||||
#define SVM_IOIO_SIZE_SHIFT 4
|
||||
#define SVM_IOIO_ASIZE_SHIFT 7
|
||||
|
||||
#define SVM_IOIO_TYPE_MASK 1
|
||||
#define SVM_IOIO_STR_MASK (1 << SVM_IOIO_STR_SHIFT)
|
||||
#define SVM_IOIO_REP_MASK (1 << SVM_IOIO_REP_SHIFT)
|
||||
#define SVM_IOIO_SIZE_MASK (7 << SVM_IOIO_SIZE_SHIFT)
|
||||
#define SVM_IOIO_ASIZE_MASK (7 << SVM_IOIO_ASIZE_SHIFT)
|
||||
|
||||
#define SVM_EVTINJ_VEC_MASK 0xff
|
||||
|
||||
#define SVM_EVTINJ_TYPE_SHIFT 8
|
||||
#define SVM_EVTINJ_TYPE_MASK (7 << SVM_EVTINJ_TYPE_SHIFT)
|
||||
|
||||
#define SVM_EVTINJ_TYPE_INTR (0 << SVM_EVTINJ_TYPE_SHIFT)
|
||||
#define SVM_EVTINJ_TYPE_NMI (2 << SVM_EVTINJ_TYPE_SHIFT)
|
||||
#define SVM_EVTINJ_TYPE_EXEPT (3 << SVM_EVTINJ_TYPE_SHIFT)
|
||||
#define SVM_EVTINJ_TYPE_SOFT (4 << SVM_EVTINJ_TYPE_SHIFT)
|
||||
|
||||
#define SVM_EVTINJ_VALID (1 << 31)
|
||||
#define SVM_EVTINJ_VALID_ERR (1 << 11)
|
||||
|
||||
#define SVM_EXITINTINFO_VEC_MASK SVM_EVTINJ_VEC_MASK
|
||||
|
||||
#define SVM_EXITINTINFO_TYPE_INTR SVM_EVTINJ_TYPE_INTR
|
||||
#define SVM_EXITINTINFO_TYPE_NMI SVM_EVTINJ_TYPE_NMI
|
||||
#define SVM_EXITINTINFO_TYPE_EXEPT SVM_EVTINJ_TYPE_EXEPT
|
||||
#define SVM_EXITINTINFO_TYPE_SOFT SVM_EVTINJ_TYPE_SOFT
|
||||
|
||||
#define SVM_EXITINTINFO_VALID SVM_EVTINJ_VALID
|
||||
#define SVM_EXITINTINFO_VALID_ERR SVM_EVTINJ_VALID_ERR
|
||||
|
||||
#define SVM_EXIT_READ_CR0 0x000
|
||||
#define SVM_EXIT_READ_CR3 0x003
|
||||
#define SVM_EXIT_READ_CR4 0x004
|
||||
#define SVM_EXIT_READ_CR8 0x008
|
||||
#define SVM_EXIT_WRITE_CR0 0x010
|
||||
#define SVM_EXIT_WRITE_CR3 0x013
|
||||
#define SVM_EXIT_WRITE_CR4 0x014
|
||||
#define SVM_EXIT_WRITE_CR8 0x018
|
||||
#define SVM_EXIT_READ_DR0 0x020
|
||||
#define SVM_EXIT_READ_DR1 0x021
|
||||
#define SVM_EXIT_READ_DR2 0x022
|
||||
#define SVM_EXIT_READ_DR3 0x023
|
||||
#define SVM_EXIT_READ_DR4 0x024
|
||||
#define SVM_EXIT_READ_DR5 0x025
|
||||
#define SVM_EXIT_READ_DR6 0x026
|
||||
#define SVM_EXIT_READ_DR7 0x027
|
||||
#define SVM_EXIT_WRITE_DR0 0x030
|
||||
#define SVM_EXIT_WRITE_DR1 0x031
|
||||
#define SVM_EXIT_WRITE_DR2 0x032
|
||||
#define SVM_EXIT_WRITE_DR3 0x033
|
||||
#define SVM_EXIT_WRITE_DR4 0x034
|
||||
#define SVM_EXIT_WRITE_DR5 0x035
|
||||
#define SVM_EXIT_WRITE_DR6 0x036
|
||||
#define SVM_EXIT_WRITE_DR7 0x037
|
||||
#define SVM_EXIT_EXCP_BASE 0x040
|
||||
#define SVM_EXIT_INTR 0x060
|
||||
#define SVM_EXIT_NMI 0x061
|
||||
#define SVM_EXIT_SMI 0x062
|
||||
#define SVM_EXIT_INIT 0x063
|
||||
#define SVM_EXIT_VINTR 0x064
|
||||
#define SVM_EXIT_CR0_SEL_WRITE 0x065
|
||||
#define SVM_EXIT_IDTR_READ 0x066
|
||||
#define SVM_EXIT_GDTR_READ 0x067
|
||||
#define SVM_EXIT_LDTR_READ 0x068
|
||||
#define SVM_EXIT_TR_READ 0x069
|
||||
#define SVM_EXIT_IDTR_WRITE 0x06a
|
||||
#define SVM_EXIT_GDTR_WRITE 0x06b
|
||||
#define SVM_EXIT_LDTR_WRITE 0x06c
|
||||
#define SVM_EXIT_TR_WRITE 0x06d
|
||||
#define SVM_EXIT_RDTSC 0x06e
|
||||
#define SVM_EXIT_RDPMC 0x06f
|
||||
#define SVM_EXIT_PUSHF 0x070
|
||||
#define SVM_EXIT_POPF 0x071
|
||||
#define SVM_EXIT_CPUID 0x072
|
||||
#define SVM_EXIT_RSM 0x073
|
||||
#define SVM_EXIT_IRET 0x074
|
||||
#define SVM_EXIT_SWINT 0x075
|
||||
#define SVM_EXIT_INVD 0x076
|
||||
#define SVM_EXIT_PAUSE 0x077
|
||||
#define SVM_EXIT_HLT 0x078
|
||||
#define SVM_EXIT_INVLPG 0x079
|
||||
#define SVM_EXIT_INVLPGA 0x07a
|
||||
#define SVM_EXIT_IOIO 0x07b
|
||||
#define SVM_EXIT_MSR 0x07c
|
||||
#define SVM_EXIT_TASK_SWITCH 0x07d
|
||||
#define SVM_EXIT_FERR_FREEZE 0x07e
|
||||
#define SVM_EXIT_SHUTDOWN 0x07f
|
||||
#define SVM_EXIT_VMRUN 0x080
|
||||
#define SVM_EXIT_VMMCALL 0x081
|
||||
#define SVM_EXIT_VMLOAD 0x082
|
||||
#define SVM_EXIT_VMSAVE 0x083
|
||||
#define SVM_EXIT_STGI 0x084
|
||||
#define SVM_EXIT_CLGI 0x085
|
||||
#define SVM_EXIT_SKINIT 0x086
|
||||
#define SVM_EXIT_RDTSCP 0x087
|
||||
#define SVM_EXIT_ICEBP 0x088
|
||||
#define SVM_EXIT_WBINVD 0x089
|
||||
/* only included in documentation, maybe wrong */
|
||||
#define SVM_EXIT_MONITOR 0x08a
|
||||
#define SVM_EXIT_MWAIT 0x08b
|
||||
#define SVM_EXIT_NPF 0x400
|
||||
|
||||
#define SVM_EXIT_ERR -1
|
||||
|
||||
#define SVM_CR0_SELECTIVE_MASK (1 << 3 | 1) /* TS and MP */
|
||||
|
||||
#define SVM_NPT_ENABLED (1 << 0)
|
||||
|
||||
#define SVM_NPT_PAE (1 << 0)
|
||||
#define SVM_NPT_LMA (1 << 1)
|
||||
#define SVM_NPT_NXE (1 << 2)
|
||||
|
||||
#define SVM_NPTEXIT_P (1ULL << 0)
|
||||
#define SVM_NPTEXIT_RW (1ULL << 1)
|
||||
#define SVM_NPTEXIT_US (1ULL << 2)
|
||||
#define SVM_NPTEXIT_RSVD (1ULL << 3)
|
||||
#define SVM_NPTEXIT_ID (1ULL << 4)
|
||||
#define SVM_NPTEXIT_GPA (1ULL << 32)
|
||||
#define SVM_NPTEXIT_GPT (1ULL << 33)
|
||||
|
||||
QEMU_PACK(struct vmcb_control_area {
|
||||
uint16_t intercept_cr_read;
|
||||
uint16_t intercept_cr_write;
|
||||
uint16_t intercept_dr_read;
|
||||
uint16_t intercept_dr_write;
|
||||
uint32_t intercept_exceptions;
|
||||
uint64_t intercept;
|
||||
uint8_t reserved_1[44];
|
||||
uint64_t iopm_base_pa;
|
||||
uint64_t msrpm_base_pa;
|
||||
uint64_t tsc_offset;
|
||||
uint32_t asid;
|
||||
uint8_t tlb_ctl;
|
||||
uint8_t reserved_2[3];
|
||||
uint32_t int_ctl;
|
||||
uint32_t int_vector;
|
||||
uint32_t int_state;
|
||||
uint8_t reserved_3[4];
|
||||
uint64_t exit_code;
|
||||
uint64_t exit_info_1;
|
||||
uint64_t exit_info_2;
|
||||
uint32_t exit_int_info;
|
||||
uint32_t exit_int_info_err;
|
||||
uint64_t nested_ctl;
|
||||
uint8_t reserved_4[16];
|
||||
uint32_t event_inj;
|
||||
uint32_t event_inj_err;
|
||||
uint64_t nested_cr3;
|
||||
uint64_t lbr_ctl;
|
||||
uint8_t reserved_5[832];
|
||||
});
|
||||
|
||||
QEMU_PACK(struct vmcb_seg {
|
||||
uint16_t selector;
|
||||
uint16_t attrib;
|
||||
uint32_t limit;
|
||||
uint64_t base;
|
||||
});
|
||||
|
||||
QEMU_PACK(struct vmcb_save_area {
|
||||
struct vmcb_seg es;
|
||||
struct vmcb_seg cs;
|
||||
struct vmcb_seg ss;
|
||||
struct vmcb_seg ds;
|
||||
struct vmcb_seg fs;
|
||||
struct vmcb_seg gs;
|
||||
struct vmcb_seg gdtr;
|
||||
struct vmcb_seg ldtr;
|
||||
struct vmcb_seg idtr;
|
||||
struct vmcb_seg tr;
|
||||
uint8_t reserved_1[43];
|
||||
uint8_t cpl;
|
||||
uint8_t reserved_2[4];
|
||||
uint64_t efer;
|
||||
uint8_t reserved_3[112];
|
||||
uint64_t cr4;
|
||||
uint64_t cr3;
|
||||
uint64_t cr0;
|
||||
uint64_t dr7;
|
||||
uint64_t dr6;
|
||||
uint64_t rflags;
|
||||
uint64_t rip;
|
||||
uint8_t reserved_4[88];
|
||||
uint64_t rsp;
|
||||
uint8_t reserved_5[24];
|
||||
uint64_t rax;
|
||||
uint64_t star;
|
||||
uint64_t lstar;
|
||||
uint64_t cstar;
|
||||
uint64_t sfmask;
|
||||
uint64_t kernel_gs_base;
|
||||
uint64_t sysenter_cs;
|
||||
uint64_t sysenter_esp;
|
||||
uint64_t sysenter_eip;
|
||||
uint64_t cr2;
|
||||
uint8_t reserved_6[32];
|
||||
uint64_t g_pat;
|
||||
uint64_t dbgctl;
|
||||
uint64_t br_from;
|
||||
uint64_t br_to;
|
||||
uint64_t last_excp_from;
|
||||
uint64_t last_excp_to;
|
||||
});
|
||||
|
||||
QEMU_PACK(struct vmcb {
|
||||
struct vmcb_control_area control;
|
||||
struct vmcb_save_area save;
|
||||
});
|
||||
|
||||
#endif
|
||||
726
qemu/target/i386/svm_helper.c
Normal file
726
qemu/target/i386/svm_helper.c
Normal file
@@ -0,0 +1,726 @@
|
||||
/*
|
||||
* x86 SVM helpers
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "cpu.h"
|
||||
#include "exec/helper-proto.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "exec/cpu_ldst.h"
|
||||
|
||||
/* Secure Virtual Machine helpers */
|
||||
static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
|
||||
const SegmentCache *sc)
|
||||
{
|
||||
CPUState *cs = env_cpu(env);
|
||||
|
||||
x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, selector),
|
||||
sc->selector);
|
||||
x86_stq_phys(cs, addr + offsetof(struct vmcb_seg, base),
|
||||
sc->base);
|
||||
x86_stl_phys(cs, addr + offsetof(struct vmcb_seg, limit),
|
||||
sc->limit);
|
||||
x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, attrib),
|
||||
((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
|
||||
}
|
||||
|
||||
static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
|
||||
SegmentCache *sc)
|
||||
{
|
||||
CPUState *cs = env_cpu(env);
|
||||
unsigned int flags;
|
||||
|
||||
sc->selector = x86_lduw_phys(cs,
|
||||
addr + offsetof(struct vmcb_seg, selector));
|
||||
sc->base = x86_ldq_phys(cs, addr + offsetof(struct vmcb_seg, base));
|
||||
sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit));
|
||||
flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib));
|
||||
sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
|
||||
}
|
||||
|
||||
static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
|
||||
int seg_reg)
|
||||
{
|
||||
SegmentCache sc1, *sc = &sc1;
|
||||
|
||||
svm_load_seg(env, addr, sc);
|
||||
cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
|
||||
sc->base, sc->limit, sc->flags);
|
||||
}
|
||||
|
||||
void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
|
||||
{
|
||||
CPUState *cs = env_cpu(env);
|
||||
target_ulong addr;
|
||||
uint64_t nested_ctl;
|
||||
uint32_t event_inj;
|
||||
uint32_t int_ctl;
|
||||
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0, GETPC());
|
||||
|
||||
if (aflag == 2) {
|
||||
addr = env->regs[R_EAX];
|
||||
} else {
|
||||
addr = (uint32_t)env->regs[R_EAX];
|
||||
}
|
||||
|
||||
// qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
|
||||
|
||||
env->vm_vmcb = addr;
|
||||
|
||||
/* save the current CPU state in the hsave page */
|
||||
x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
|
||||
env->gdt.base);
|
||||
x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
|
||||
env->gdt.limit);
|
||||
|
||||
x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
|
||||
env->idt.base);
|
||||
x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
|
||||
env->idt.limit);
|
||||
|
||||
x86_stq_phys(cs,
|
||||
env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
|
||||
x86_stq_phys(cs,
|
||||
env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
|
||||
x86_stq_phys(cs,
|
||||
env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
|
||||
x86_stq_phys(cs,
|
||||
env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
|
||||
x86_stq_phys(cs,
|
||||
env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
|
||||
x86_stq_phys(cs,
|
||||
env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
|
||||
|
||||
x86_stq_phys(cs,
|
||||
env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
|
||||
x86_stq_phys(cs,
|
||||
env->vm_hsave + offsetof(struct vmcb, save.rflags),
|
||||
cpu_compute_eflags(env));
|
||||
|
||||
svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
|
||||
&env->segs[R_ES]);
|
||||
svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
|
||||
&env->segs[R_CS]);
|
||||
svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
|
||||
&env->segs[R_SS]);
|
||||
svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
|
||||
&env->segs[R_DS]);
|
||||
|
||||
x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip),
|
||||
env->eip + next_eip_addend);
|
||||
x86_stq_phys(cs,
|
||||
env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
|
||||
x86_stq_phys(cs,
|
||||
env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
|
||||
|
||||
/* load the interception bitmaps so we do not need to access the
|
||||
vmcb in svm mode */
|
||||
env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
|
||||
control.intercept));
|
||||
env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb +
|
||||
offsetof(struct vmcb,
|
||||
control.intercept_cr_read));
|
||||
env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb +
|
||||
offsetof(struct vmcb,
|
||||
control.intercept_cr_write));
|
||||
env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb +
|
||||
offsetof(struct vmcb,
|
||||
control.intercept_dr_read));
|
||||
env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb +
|
||||
offsetof(struct vmcb,
|
||||
control.intercept_dr_write));
|
||||
env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb +
|
||||
offsetof(struct vmcb,
|
||||
control.intercept_exceptions
|
||||
));
|
||||
|
||||
nested_ctl = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
|
||||
control.nested_ctl));
|
||||
if (nested_ctl & SVM_NPT_ENABLED) {
|
||||
env->nested_cr3 = x86_ldq_phys(cs,
|
||||
env->vm_vmcb + offsetof(struct vmcb,
|
||||
control.nested_cr3));
|
||||
env->hflags2 |= HF2_NPT_MASK;
|
||||
|
||||
env->nested_pg_mode = 0;
|
||||
if (env->cr[4] & CR4_PAE_MASK) {
|
||||
env->nested_pg_mode |= SVM_NPT_PAE;
|
||||
}
|
||||
if (env->hflags & HF_LMA_MASK) {
|
||||
env->nested_pg_mode |= SVM_NPT_LMA;
|
||||
}
|
||||
if (env->efer & MSR_EFER_NXE) {
|
||||
env->nested_pg_mode |= SVM_NPT_NXE;
|
||||
}
|
||||
}
|
||||
|
||||
/* enable intercepts */
|
||||
env->hflags |= HF_GUEST_MASK;
|
||||
|
||||
env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
|
||||
offsetof(struct vmcb, control.tsc_offset));
|
||||
|
||||
env->gdt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
|
||||
save.gdtr.base));
|
||||
env->gdt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
|
||||
save.gdtr.limit));
|
||||
|
||||
env->idt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
|
||||
save.idtr.base));
|
||||
env->idt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
|
||||
save.idtr.limit));
|
||||
|
||||
/* clear exit_info_2 so we behave like the real hardware */
|
||||
x86_stq_phys(cs,
|
||||
env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
|
||||
|
||||
cpu_x86_update_cr0(env, x86_ldq_phys(cs,
|
||||
env->vm_vmcb + offsetof(struct vmcb,
|
||||
save.cr0)));
|
||||
cpu_x86_update_cr4(env, x86_ldq_phys(cs,
|
||||
env->vm_vmcb + offsetof(struct vmcb,
|
||||
save.cr4)));
|
||||
cpu_x86_update_cr3(env, x86_ldq_phys(cs,
|
||||
env->vm_vmcb + offsetof(struct vmcb,
|
||||
save.cr3)));
|
||||
env->cr[2] = x86_ldq_phys(cs,
|
||||
env->vm_vmcb + offsetof(struct vmcb, save.cr2));
|
||||
int_ctl = x86_ldl_phys(cs,
|
||||
env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
|
||||
env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
|
||||
if (int_ctl & V_INTR_MASKING_MASK) {
|
||||
env->v_tpr = int_ctl & V_TPR_MASK;
|
||||
env->hflags2 |= HF2_VINTR_MASK;
|
||||
if (env->eflags & IF_MASK) {
|
||||
env->hflags2 |= HF2_HIF_MASK;
|
||||
}
|
||||
}
|
||||
|
||||
cpu_load_efer(env,
|
||||
x86_ldq_phys(cs,
|
||||
env->vm_vmcb + offsetof(struct vmcb, save.efer)));
|
||||
env->eflags = 0;
|
||||
cpu_load_eflags(env, x86_ldq_phys(cs,
|
||||
env->vm_vmcb + offsetof(struct vmcb,
|
||||
save.rflags)),
|
||||
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
|
||||
|
||||
svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
|
||||
R_ES);
|
||||
svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
|
||||
R_CS);
|
||||
svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
|
||||
R_SS);
|
||||
svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
|
||||
R_DS);
|
||||
|
||||
env->eip = x86_ldq_phys(cs,
|
||||
env->vm_vmcb + offsetof(struct vmcb, save.rip));
|
||||
|
||||
env->regs[R_ESP] = x86_ldq_phys(cs,
|
||||
env->vm_vmcb + offsetof(struct vmcb, save.rsp));
|
||||
env->regs[R_EAX] = x86_ldq_phys(cs,
|
||||
env->vm_vmcb + offsetof(struct vmcb, save.rax));
|
||||
env->dr[7] = x86_ldq_phys(cs,
|
||||
env->vm_vmcb + offsetof(struct vmcb, save.dr7));
|
||||
env->dr[6] = x86_ldq_phys(cs,
|
||||
env->vm_vmcb + offsetof(struct vmcb, save.dr6));
|
||||
|
||||
/* FIXME: guest state consistency checks */
|
||||
|
||||
switch (x86_ldub_phys(cs,
|
||||
env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
|
||||
case TLB_CONTROL_DO_NOTHING:
|
||||
break;
|
||||
case TLB_CONTROL_FLUSH_ALL_ASID:
|
||||
/* FIXME: this is not 100% correct but should work for now */
|
||||
tlb_flush(cs);
|
||||
break;
|
||||
}
|
||||
|
||||
env->hflags2 |= HF2_GIF_MASK;
|
||||
|
||||
if (int_ctl & V_IRQ_MASK) {
|
||||
CPUState *cs = env_cpu(env);
|
||||
|
||||
cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
|
||||
}
|
||||
|
||||
/* maybe we need to inject an event */
|
||||
event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
|
||||
control.event_inj));
|
||||
if (event_inj & SVM_EVTINJ_VALID) {
|
||||
uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
|
||||
// uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
|
||||
uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb +
|
||||
offsetof(struct vmcb,
|
||||
control.event_inj_err));
|
||||
|
||||
// qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
|
||||
/* FIXME: need to implement valid_err */
|
||||
switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
|
||||
case SVM_EVTINJ_TYPE_INTR:
|
||||
cs->exception_index = vector;
|
||||
env->error_code = event_inj_err;
|
||||
env->exception_is_int = 0;
|
||||
env->exception_next_eip = -1;
|
||||
// qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
|
||||
/* XXX: is it always correct? */
|
||||
do_interrupt_x86_hardirq(env, vector, 1);
|
||||
break;
|
||||
case SVM_EVTINJ_TYPE_NMI:
|
||||
cs->exception_index = EXCP02_NMI;
|
||||
env->error_code = event_inj_err;
|
||||
env->exception_is_int = 0;
|
||||
env->exception_next_eip = env->eip;
|
||||
// qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
|
||||
cpu_loop_exit(cs);
|
||||
break;
|
||||
case SVM_EVTINJ_TYPE_EXEPT:
|
||||
cs->exception_index = vector;
|
||||
env->error_code = event_inj_err;
|
||||
env->exception_is_int = 0;
|
||||
env->exception_next_eip = -1;
|
||||
// qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
|
||||
cpu_loop_exit(cs);
|
||||
break;
|
||||
case SVM_EVTINJ_TYPE_SOFT:
|
||||
cs->exception_index = vector;
|
||||
env->error_code = event_inj_err;
|
||||
env->exception_is_int = 1;
|
||||
env->exception_next_eip = env->eip;
|
||||
// qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
|
||||
cpu_loop_exit(cs);
|
||||
break;
|
||||
}
|
||||
// qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
|
||||
// env->error_code);
|
||||
}
|
||||
}
|
||||
|
||||
void helper_vmmcall(CPUX86State *env)
|
||||
{
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0, GETPC());
|
||||
raise_exception(env, EXCP06_ILLOP);
|
||||
}
|
||||
|
||||
void helper_vmload(CPUX86State *env, int aflag)
|
||||
{
|
||||
CPUState *cs = env_cpu(env);
|
||||
target_ulong addr;
|
||||
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0, GETPC());
|
||||
|
||||
if (aflag == 2) {
|
||||
addr = env->regs[R_EAX];
|
||||
} else {
|
||||
addr = (uint32_t)env->regs[R_EAX];
|
||||
}
|
||||
|
||||
// qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
|
||||
// "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
|
||||
// addr, x86_ldq_phys(cs, addr + offsetof(struct vmcb,
|
||||
// save.fs.base)),
|
||||
// env->segs[R_FS].base);
|
||||
|
||||
svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
|
||||
svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
|
||||
svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
|
||||
svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
env->kernelgsbase = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
|
||||
save.kernel_gs_base));
|
||||
env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar));
|
||||
env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar));
|
||||
env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask));
|
||||
#endif
|
||||
env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star));
|
||||
env->sysenter_cs = x86_ldq_phys(cs,
|
||||
addr + offsetof(struct vmcb, save.sysenter_cs));
|
||||
env->sysenter_esp = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
|
||||
save.sysenter_esp));
|
||||
env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
|
||||
save.sysenter_eip));
|
||||
}
|
||||
|
||||
void helper_vmsave(CPUX86State *env, int aflag)
|
||||
{
|
||||
CPUState *cs = env_cpu(env);
|
||||
target_ulong addr;
|
||||
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0, GETPC());
|
||||
|
||||
if (aflag == 2) {
|
||||
addr = env->regs[R_EAX];
|
||||
} else {
|
||||
addr = (uint32_t)env->regs[R_EAX];
|
||||
}
|
||||
|
||||
// qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
|
||||
// "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
|
||||
// addr, x86_ldq_phys(cs,
|
||||
// addr + offsetof(struct vmcb, save.fs.base)),
|
||||
// env->segs[R_FS].base);
|
||||
|
||||
svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
|
||||
&env->segs[R_FS]);
|
||||
svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
|
||||
&env->segs[R_GS]);
|
||||
svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
|
||||
&env->tr);
|
||||
svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
|
||||
&env->ldt);
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
x86_stq_phys(cs, addr + offsetof(struct vmcb, save.kernel_gs_base),
|
||||
env->kernelgsbase);
|
||||
x86_stq_phys(cs, addr + offsetof(struct vmcb, save.lstar), env->lstar);
|
||||
x86_stq_phys(cs, addr + offsetof(struct vmcb, save.cstar), env->cstar);
|
||||
x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
|
||||
#endif
|
||||
x86_stq_phys(cs, addr + offsetof(struct vmcb, save.star), env->star);
|
||||
x86_stq_phys(cs,
|
||||
addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
|
||||
x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_esp),
|
||||
env->sysenter_esp);
|
||||
x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_eip),
|
||||
env->sysenter_eip);
|
||||
}
|
||||
|
||||
void helper_stgi(CPUX86State *env)
|
||||
{
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0, GETPC());
|
||||
env->hflags2 |= HF2_GIF_MASK;
|
||||
}
|
||||
|
||||
void helper_clgi(CPUX86State *env)
|
||||
{
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0, GETPC());
|
||||
env->hflags2 &= ~HF2_GIF_MASK;
|
||||
}
|
||||
|
||||
void helper_skinit(CPUX86State *env)
|
||||
{
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0, GETPC());
|
||||
/* XXX: not implemented */
|
||||
raise_exception(env, EXCP06_ILLOP);
|
||||
}
|
||||
|
||||
void helper_invlpga(CPUX86State *env, int aflag)
|
||||
{
|
||||
X86CPU *cpu = env_archcpu(env);
|
||||
target_ulong addr;
|
||||
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0, GETPC());
|
||||
|
||||
if (aflag == 2) {
|
||||
addr = env->regs[R_EAX];
|
||||
} else {
|
||||
addr = (uint32_t)env->regs[R_EAX];
|
||||
}
|
||||
|
||||
/* XXX: could use the ASID to see if it is needed to do the
|
||||
flush */
|
||||
tlb_flush_page(CPU(cpu), addr);
|
||||
}
|
||||
|
||||
void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
|
||||
uint64_t param, uintptr_t retaddr)
|
||||
{
|
||||
CPUState *cs = env_cpu(env);
|
||||
|
||||
if (likely(!(env->hflags & HF_GUEST_MASK))) {
|
||||
return;
|
||||
}
|
||||
|
||||
if ((int32_t)type >= SVM_EXIT_READ_CR0 && type <= SVM_EXIT_READ_CR0 + 8) {
|
||||
if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
|
||||
cpu_vmexit(env, type, param, retaddr);
|
||||
}
|
||||
} else if (type >= SVM_EXIT_WRITE_CR0 && type <= SVM_EXIT_WRITE_CR0 + 8) {
|
||||
if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
|
||||
cpu_vmexit(env, type, param, retaddr);
|
||||
}
|
||||
} else if (type >= SVM_EXIT_READ_DR0 && type <= SVM_EXIT_READ_DR0 + 7) {
|
||||
if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
|
||||
cpu_vmexit(env, type, param, retaddr);
|
||||
}
|
||||
} else if (type >= SVM_EXIT_WRITE_DR0 && type <= SVM_EXIT_WRITE_DR0 + 7) {
|
||||
if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
|
||||
cpu_vmexit(env, type, param, retaddr);
|
||||
}
|
||||
} else if (type >= SVM_EXIT_EXCP_BASE && type <= SVM_EXIT_EXCP_BASE + 31) {
|
||||
if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
|
||||
cpu_vmexit(env, type, param, retaddr);
|
||||
}
|
||||
} else if (type == SVM_EXIT_MSR) {
|
||||
if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
|
||||
/* FIXME: this should be read in at vmrun (faster this way?) */
|
||||
uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
|
||||
offsetof(struct vmcb,
|
||||
control.msrpm_base_pa));
|
||||
uint32_t t0, t1, ecx;
|
||||
|
||||
ecx = env->regs[R_ECX];
|
||||
#define XRANGE(x, a, b) (x >= a && x <= b)
|
||||
if (XRANGE(ecx, 0, 0x1fff)) {
|
||||
t0 = (ecx * 2) % 8;
|
||||
t1 = (ecx * 2) / 8;
|
||||
} else if (XRANGE(ecx, 0xc0000000, 0xc0001fff)) {
|
||||
t0 = (8192 + ecx - 0xc0000000) * 2;
|
||||
t1 = (t0 / 8);
|
||||
t0 %= 8;
|
||||
} else if (XRANGE(ecx, 0xc0010000, 0xc0011fff)) {
|
||||
t0 = (16384 + ecx - 0xc0010000) * 2;
|
||||
t1 = (t0 / 8);
|
||||
t0 %= 8;
|
||||
} else {
|
||||
cpu_vmexit(env, type, param, retaddr);
|
||||
t0 = 0;
|
||||
t1 = 0;
|
||||
}
|
||||
#undef XRANGE
|
||||
|
||||
if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) {
|
||||
cpu_vmexit(env, type, param, retaddr);
|
||||
}
|
||||
}
|
||||
} else if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
|
||||
cpu_vmexit(env, type, param, retaddr);
|
||||
}
|
||||
}
|
||||
|
||||
void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
|
||||
uint64_t param)
|
||||
{
|
||||
cpu_svm_check_intercept_param(env, type, param, GETPC());
|
||||
}
|
||||
|
||||
void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
|
||||
uint32_t next_eip_addend)
|
||||
{
|
||||
CPUState *cs = env_cpu(env);
|
||||
|
||||
if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
|
||||
/* FIXME: this should be read in at vmrun (faster this way?) */
|
||||
uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
|
||||
offsetof(struct vmcb, control.iopm_base_pa));
|
||||
uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
|
||||
|
||||
if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) {
|
||||
/* next env->eip */
|
||||
x86_stq_phys(cs,
|
||||
env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
|
||||
env->eip + next_eip_addend);
|
||||
cpu_vmexit(env, SVM_EXIT_IOIO, param | (port << 16), GETPC());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1,
|
||||
uintptr_t retaddr)
|
||||
{
|
||||
CPUState *cs = env_cpu(env);
|
||||
|
||||
cpu_restore_state(cs, retaddr, true);
|
||||
|
||||
// qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
|
||||
// PRIx64 ", " TARGET_FMT_lx ")!\n",
|
||||
// exit_code, exit_info_1,
|
||||
// x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
|
||||
// control.exit_info_2)),
|
||||
// env->eip);
|
||||
|
||||
cs->exception_index = EXCP_VMEXIT + exit_code;
|
||||
env->error_code = exit_info_1;
|
||||
|
||||
/* remove any pending exception */
|
||||
env->old_exception = -1;
|
||||
cpu_loop_exit(cs);
|
||||
}
|
||||
|
||||
void do_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
|
||||
{
|
||||
CPUState *cs = env_cpu(env);
|
||||
uint32_t int_ctl;
|
||||
|
||||
if (env->hflags & HF_INHIBIT_IRQ_MASK) {
|
||||
x86_stl_phys(cs,
|
||||
env->vm_vmcb + offsetof(struct vmcb, control.int_state),
|
||||
SVM_INTERRUPT_SHADOW_MASK);
|
||||
env->hflags &= ~HF_INHIBIT_IRQ_MASK;
|
||||
} else {
|
||||
x86_stl_phys(cs,
|
||||
env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
|
||||
}
|
||||
env->hflags2 &= ~HF2_NPT_MASK;
|
||||
|
||||
/* Save the VM state in the vmcb */
|
||||
svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
|
||||
&env->segs[R_ES]);
|
||||
svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
|
||||
&env->segs[R_CS]);
|
||||
svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
|
||||
&env->segs[R_SS]);
|
||||
svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
|
||||
&env->segs[R_DS]);
|
||||
|
||||
x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
|
||||
env->gdt.base);
|
||||
x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
|
||||
env->gdt.limit);
|
||||
|
||||
x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
|
||||
env->idt.base);
|
||||
x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
|
||||
env->idt.limit);
|
||||
|
||||
x86_stq_phys(cs,
|
||||
env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
|
||||
x86_stq_phys(cs,
|
||||
env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
|
||||
x86_stq_phys(cs,
|
||||
env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
|
||||
x86_stq_phys(cs,
|
||||
env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
|
||||
x86_stq_phys(cs,
|
||||
env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
|
||||
|
||||
int_ctl = x86_ldl_phys(cs,
|
||||
env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
|
||||
int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
|
||||
int_ctl |= env->v_tpr & V_TPR_MASK;
|
||||
if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
|
||||
int_ctl |= V_IRQ_MASK;
|
||||
}
|
||||
x86_stl_phys(cs,
|
||||
env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
|
||||
|
||||
x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
|
||||
cpu_compute_eflags(env));
|
||||
x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip),
|
||||
env->eip);
|
||||
x86_stq_phys(cs,
|
||||
env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
|
||||
x86_stq_phys(cs,
|
||||
env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
|
||||
x86_stq_phys(cs,
|
||||
env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
|
||||
x86_stq_phys(cs,
|
||||
env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
|
||||
x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
|
||||
env->hflags & HF_CPL_MASK);
|
||||
|
||||
/* Reload the host state from vm_hsave */
|
||||
env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
|
||||
env->hflags &= ~HF_GUEST_MASK;
|
||||
env->intercept = 0;
|
||||
env->intercept_exceptions = 0;
|
||||
cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
|
||||
env->tsc_offset = 0;
|
||||
|
||||
env->gdt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
|
||||
save.gdtr.base));
|
||||
env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
|
||||
save.gdtr.limit));
|
||||
|
||||
env->idt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
|
||||
save.idtr.base));
|
||||
env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
|
||||
save.idtr.limit));
|
||||
|
||||
cpu_x86_update_cr0(env, x86_ldq_phys(cs,
|
||||
env->vm_hsave + offsetof(struct vmcb,
|
||||
save.cr0)) |
|
||||
CR0_PE_MASK);
|
||||
cpu_x86_update_cr4(env, x86_ldq_phys(cs,
|
||||
env->vm_hsave + offsetof(struct vmcb,
|
||||
save.cr4)));
|
||||
cpu_x86_update_cr3(env, x86_ldq_phys(cs,
|
||||
env->vm_hsave + offsetof(struct vmcb,
|
||||
save.cr3)));
|
||||
/* we need to set the efer after the crs so the hidden flags get
|
||||
set properly */
|
||||
cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
|
||||
save.efer)));
|
||||
env->eflags = 0;
|
||||
cpu_load_eflags(env, x86_ldq_phys(cs,
|
||||
env->vm_hsave + offsetof(struct vmcb,
|
||||
save.rflags)),
|
||||
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
|
||||
VM_MASK));
|
||||
|
||||
svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
|
||||
R_ES);
|
||||
svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
|
||||
R_CS);
|
||||
svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
|
||||
R_SS);
|
||||
svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
|
||||
R_DS);
|
||||
|
||||
env->eip = x86_ldq_phys(cs,
|
||||
env->vm_hsave + offsetof(struct vmcb, save.rip));
|
||||
env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave +
|
||||
offsetof(struct vmcb, save.rsp));
|
||||
env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave +
|
||||
offsetof(struct vmcb, save.rax));
|
||||
|
||||
env->dr[6] = x86_ldq_phys(cs,
|
||||
env->vm_hsave + offsetof(struct vmcb, save.dr6));
|
||||
env->dr[7] = x86_ldq_phys(cs,
|
||||
env->vm_hsave + offsetof(struct vmcb, save.dr7));
|
||||
|
||||
/* other setups */
|
||||
x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
|
||||
exit_code);
|
||||
x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
|
||||
exit_info_1);
|
||||
|
||||
x86_stl_phys(cs,
|
||||
env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
|
||||
x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
|
||||
control.event_inj)));
|
||||
x86_stl_phys(cs,
|
||||
env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
|
||||
x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
|
||||
control.event_inj_err)));
|
||||
x86_stl_phys(cs,
|
||||
env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
|
||||
|
||||
env->hflags2 &= ~HF2_GIF_MASK;
|
||||
/* FIXME: Resets the current ASID register to zero (host ASID). */
|
||||
|
||||
/* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
|
||||
|
||||
/* Clears the TSC_OFFSET inside the processor. */
|
||||
|
||||
/* If the host is in PAE mode, the processor reloads the host's PDPEs
|
||||
from the page table indicated the host's CR3. If the PDPEs contain
|
||||
illegal state, the processor causes a shutdown. */
|
||||
|
||||
/* Disables all breakpoints in the host DR7 register. */
|
||||
|
||||
/* Checks the reloaded host state for consistency. */
|
||||
|
||||
/* If the host's rIP reloaded by #VMEXIT is outside the limit of the
|
||||
host's code segment or non-canonical (in the case of long mode), a
|
||||
#GP fault is delivered inside the host. */
|
||||
}
|
||||
9373
qemu/target/i386/translate.c
Normal file
9373
qemu/target/i386/translate.c
Normal file
File diff suppressed because it is too large
Load Diff
1601
qemu/target/i386/unicorn.c
Normal file
1601
qemu/target/i386/unicorn.c
Normal file
File diff suppressed because it is too large
Load Diff
17
qemu/target/i386/unicorn.h
Normal file
17
qemu/target/i386/unicorn.h
Normal file
@@ -0,0 +1,17 @@
|
||||
/* Unicorn Emulator Engine */
|
||||
/* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015 */
|
||||
/* Modified for Unicorn Engine by Chen Huitao<chenhuitao@hfmrit.com>, 2020 */
|
||||
|
||||
#ifndef UC_QEMU_TARGET_I386_H
|
||||
#define UC_QEMU_TARGET_I386_H
|
||||
|
||||
// functions to read & write registers
|
||||
int x86_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count);
|
||||
int x86_reg_write(struct uc_struct *uc, unsigned int *regs, void *const *vals, int count);
|
||||
int x86_context_reg_read(struct uc_context *ctx, unsigned int *regs, void **vals, int count);
|
||||
int x86_context_reg_write(struct uc_context *ctx, unsigned int *regs, void *const *vals, int count);
|
||||
|
||||
void x86_reg_reset(struct uc_struct *uc);
|
||||
|
||||
void x86_uc_init(struct uc_struct* uc);
|
||||
#endif
|
||||
112
qemu/target/i386/xsave_helper.c
Normal file
112
qemu/target/i386/xsave_helper.c
Normal file
@@ -0,0 +1,112 @@
|
||||
/*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
#include "qemu/osdep.h"
|
||||
|
||||
#include "cpu.h"
|
||||
|
||||
void x86_cpu_xsave_all_areas(X86CPU *cpu, X86XSaveArea *buf)
|
||||
{
|
||||
CPUX86State *env = &cpu->env;
|
||||
X86XSaveArea *xsave = buf;
|
||||
|
||||
uint16_t cwd, swd, twd;
|
||||
int i;
|
||||
memset(xsave, 0, sizeof(X86XSaveArea));
|
||||
twd = 0;
|
||||
swd = env->fpus & ~(7 << 11);
|
||||
swd |= (env->fpstt & 7) << 11;
|
||||
cwd = env->fpuc;
|
||||
for (i = 0; i < 8; ++i) {
|
||||
twd |= (!env->fptags[i]) << i;
|
||||
}
|
||||
xsave->legacy.fcw = cwd;
|
||||
xsave->legacy.fsw = swd;
|
||||
xsave->legacy.ftw = twd;
|
||||
xsave->legacy.fpop = env->fpop;
|
||||
xsave->legacy.fpip = env->fpip;
|
||||
xsave->legacy.fpdp = env->fpdp;
|
||||
memcpy(&xsave->legacy.fpregs, env->fpregs,
|
||||
sizeof env->fpregs);
|
||||
xsave->legacy.mxcsr = env->mxcsr;
|
||||
xsave->header.xstate_bv = env->xstate_bv;
|
||||
memcpy(&xsave->bndreg_state.bnd_regs, env->bnd_regs,
|
||||
sizeof env->bnd_regs);
|
||||
xsave->bndcsr_state.bndcsr = env->bndcs_regs;
|
||||
memcpy(&xsave->opmask_state.opmask_regs, env->opmask_regs,
|
||||
sizeof env->opmask_regs);
|
||||
|
||||
for (i = 0; i < CPU_NB_REGS; i++) {
|
||||
uint8_t *xmm = xsave->legacy.xmm_regs[i];
|
||||
uint8_t *ymmh = xsave->avx_state.ymmh[i];
|
||||
uint8_t *zmmh = xsave->zmm_hi256_state.zmm_hi256[i];
|
||||
stq_p(xmm, env->xmm_regs[i].ZMM_Q(0));
|
||||
stq_p(xmm+8, env->xmm_regs[i].ZMM_Q(1));
|
||||
stq_p(ymmh, env->xmm_regs[i].ZMM_Q(2));
|
||||
stq_p(ymmh+8, env->xmm_regs[i].ZMM_Q(3));
|
||||
stq_p(zmmh, env->xmm_regs[i].ZMM_Q(4));
|
||||
stq_p(zmmh+8, env->xmm_regs[i].ZMM_Q(5));
|
||||
stq_p(zmmh+16, env->xmm_regs[i].ZMM_Q(6));
|
||||
stq_p(zmmh+24, env->xmm_regs[i].ZMM_Q(7));
|
||||
}
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
memcpy(&xsave->hi16_zmm_state.hi16_zmm, &env->xmm_regs[16],
|
||||
16 * sizeof env->xmm_regs[16]);
|
||||
memcpy(&xsave->pkru_state, &env->pkru, sizeof env->pkru);
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
void x86_cpu_xrstor_all_areas(X86CPU *cpu, const X86XSaveArea *buf)
|
||||
{
|
||||
|
||||
CPUX86State *env = &cpu->env;
|
||||
const X86XSaveArea *xsave = buf;
|
||||
|
||||
int i;
|
||||
uint16_t cwd, swd, twd;
|
||||
cwd = xsave->legacy.fcw;
|
||||
swd = xsave->legacy.fsw;
|
||||
twd = xsave->legacy.ftw;
|
||||
env->fpop = xsave->legacy.fpop;
|
||||
env->fpstt = (swd >> 11) & 7;
|
||||
env->fpus = swd;
|
||||
env->fpuc = cwd;
|
||||
for (i = 0; i < 8; ++i) {
|
||||
env->fptags[i] = !((twd >> i) & 1);
|
||||
}
|
||||
env->fpip = xsave->legacy.fpip;
|
||||
env->fpdp = xsave->legacy.fpdp;
|
||||
env->mxcsr = xsave->legacy.mxcsr;
|
||||
memcpy(env->fpregs, &xsave->legacy.fpregs,
|
||||
sizeof env->fpregs);
|
||||
env->xstate_bv = xsave->header.xstate_bv;
|
||||
memcpy(env->bnd_regs, &xsave->bndreg_state.bnd_regs,
|
||||
sizeof env->bnd_regs);
|
||||
env->bndcs_regs = xsave->bndcsr_state.bndcsr;
|
||||
memcpy(env->opmask_regs, &xsave->opmask_state.opmask_regs,
|
||||
sizeof env->opmask_regs);
|
||||
|
||||
for (i = 0; i < CPU_NB_REGS; i++) {
|
||||
const uint8_t *xmm = xsave->legacy.xmm_regs[i];
|
||||
const uint8_t *ymmh = xsave->avx_state.ymmh[i];
|
||||
const uint8_t *zmmh = xsave->zmm_hi256_state.zmm_hi256[i];
|
||||
env->xmm_regs[i].ZMM_Q(0) = ldq_p(xmm);
|
||||
env->xmm_regs[i].ZMM_Q(1) = ldq_p(xmm+8);
|
||||
env->xmm_regs[i].ZMM_Q(2) = ldq_p(ymmh);
|
||||
env->xmm_regs[i].ZMM_Q(3) = ldq_p(ymmh+8);
|
||||
env->xmm_regs[i].ZMM_Q(4) = ldq_p(zmmh);
|
||||
env->xmm_regs[i].ZMM_Q(5) = ldq_p(zmmh+8);
|
||||
env->xmm_regs[i].ZMM_Q(6) = ldq_p(zmmh+16);
|
||||
env->xmm_regs[i].ZMM_Q(7) = ldq_p(zmmh+24);
|
||||
}
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
memcpy(&env->xmm_regs[16], &xsave->hi16_zmm_state.hi16_zmm,
|
||||
16 * sizeof env->xmm_regs[16]);
|
||||
memcpy(&env->pkru, &xsave->pkru_state, sizeof env->pkru);
|
||||
#endif
|
||||
|
||||
}
|
||||
Reference in New Issue
Block a user