import
This commit is contained in:
35
qemu/include/exec/address-spaces.h
Normal file
35
qemu/include/exec/address-spaces.h
Normal file
@@ -0,0 +1,35 @@
|
||||
/*
|
||||
* Internal memory management interfaces
|
||||
*
|
||||
* Copyright 2011 Red Hat, Inc. and/or its affiliates
|
||||
*
|
||||
* Authors:
|
||||
* Avi Kivity <avi@redhat.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2. See
|
||||
* the COPYING file in the top-level directory.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef EXEC_MEMORY_H
|
||||
#define EXEC_MEMORY_H
|
||||
|
||||
/*
|
||||
* Internal interfaces between memory.c/exec.c/vl.c. Do not #include unless
|
||||
* you're one of them.
|
||||
*/
|
||||
|
||||
#include "exec/memory.h"
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
|
||||
/* Get the root memory region. This interface should only be used temporarily
|
||||
* until a proper bus interface is available.
|
||||
*/
|
||||
MemoryRegion *get_system_memory(struct uc_struct *uc);
|
||||
|
||||
extern AddressSpace address_space_memory;
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
309
qemu/include/exec/cpu-all.h
Normal file
309
qemu/include/exec/cpu-all.h
Normal file
@@ -0,0 +1,309 @@
|
||||
/*
|
||||
* defines common to all virtual CPUs
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#ifndef CPU_ALL_H
|
||||
#define CPU_ALL_H
|
||||
|
||||
#include "qemu-common.h"
|
||||
#include "exec/cpu-common.h"
|
||||
#include "exec/memory.h"
|
||||
#include "qemu/thread.h"
|
||||
#include "qom/cpu.h"
|
||||
|
||||
/* some important defines:
|
||||
*
|
||||
* WORDS_ALIGNED : if defined, the host cpu can only make word aligned
|
||||
* memory accesses.
|
||||
*
|
||||
* HOST_WORDS_BIGENDIAN : if defined, the host cpu is big endian and
|
||||
* otherwise little endian.
|
||||
*
|
||||
* (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet))
|
||||
*
|
||||
* TARGET_WORDS_BIGENDIAN : same for target cpu
|
||||
*/
|
||||
|
||||
#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
|
||||
#define BSWAP_NEEDED
|
||||
#endif
|
||||
|
||||
#ifdef BSWAP_NEEDED
|
||||
|
||||
static inline uint16_t tswap16(uint16_t s)
|
||||
{
|
||||
return bswap16(s);
|
||||
}
|
||||
|
||||
static inline uint32_t tswap32(uint32_t s)
|
||||
{
|
||||
return bswap32(s);
|
||||
}
|
||||
|
||||
static inline uint64_t tswap64(uint64_t s)
|
||||
{
|
||||
return bswap64(s);
|
||||
}
|
||||
|
||||
static inline void tswap16s(uint16_t *s)
|
||||
{
|
||||
*s = bswap16(*s);
|
||||
}
|
||||
|
||||
static inline void tswap32s(uint32_t *s)
|
||||
{
|
||||
*s = bswap32(*s);
|
||||
}
|
||||
|
||||
static inline void tswap64s(uint64_t *s)
|
||||
{
|
||||
*s = bswap64(*s);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline uint16_t tswap16(uint16_t s)
|
||||
{
|
||||
return s;
|
||||
}
|
||||
|
||||
static inline uint32_t tswap32(uint32_t s)
|
||||
{
|
||||
return s;
|
||||
}
|
||||
|
||||
static inline uint64_t tswap64(uint64_t s)
|
||||
{
|
||||
return s;
|
||||
}
|
||||
|
||||
static inline void tswap16s(uint16_t *s)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void tswap32s(uint32_t *s)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void tswap64s(uint64_t *s)
|
||||
{
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#if TARGET_LONG_SIZE == 4
|
||||
#define tswapl(s) tswap32(s)
|
||||
#define tswapls(s) tswap32s((uint32_t *)(s))
|
||||
#define bswaptls(s) bswap32s(s)
|
||||
#else
|
||||
#define tswapl(s) tswap64(s)
|
||||
#define tswapls(s) tswap64s((uint64_t *)(s))
|
||||
#define bswaptls(s) bswap64s(s)
|
||||
#endif
|
||||
|
||||
/* CPU memory access without any memory or io remapping */
|
||||
|
||||
/*
|
||||
* the generic syntax for the memory accesses is:
|
||||
*
|
||||
* load: ld{type}{sign}{size}{endian}_{access_type}(ptr)
|
||||
*
|
||||
* store: st{type}{size}{endian}_{access_type}(ptr, val)
|
||||
*
|
||||
* type is:
|
||||
* (empty): integer access
|
||||
* f : float access
|
||||
*
|
||||
* sign is:
|
||||
* (empty): for floats or 32 bit size
|
||||
* u : unsigned
|
||||
* s : signed
|
||||
*
|
||||
* size is:
|
||||
* b: 8 bits
|
||||
* w: 16 bits
|
||||
* l: 32 bits
|
||||
* q: 64 bits
|
||||
*
|
||||
* endian is:
|
||||
* (empty): target cpu endianness or 8 bit access
|
||||
* r : reversed target cpu endianness (not implemented yet)
|
||||
* be : big endian (not implemented yet)
|
||||
* le : little endian (not implemented yet)
|
||||
*
|
||||
* access_type is:
|
||||
* raw : host memory access
|
||||
* user : user mode access using soft MMU
|
||||
* kernel : kernel mode access using soft MMU
|
||||
*/
|
||||
|
||||
/* target-endianness CPU memory access functions */
|
||||
#if defined(TARGET_WORDS_BIGENDIAN)
|
||||
#define lduw_p(p) lduw_be_p(p)
|
||||
#define ldsw_p(p) ldsw_be_p(p)
|
||||
#define ldl_p(p) ldl_be_p(p)
|
||||
#define ldq_p(p) ldq_be_p(p)
|
||||
#define ldfl_p(p) ldfl_be_p(p)
|
||||
#define ldfq_p(p) ldfq_be_p(p)
|
||||
#define stw_p(p, v) stw_be_p(p, v)
|
||||
#define stl_p(p, v) stl_be_p(p, v)
|
||||
#define stq_p(p, v) stq_be_p(p, v)
|
||||
#define stfl_p(p, v) stfl_be_p(p, v)
|
||||
#define stfq_p(p, v) stfq_be_p(p, v)
|
||||
#else
|
||||
#define lduw_p(p) lduw_le_p(p)
|
||||
#define ldsw_p(p) ldsw_le_p(p)
|
||||
#define ldl_p(p) ldl_le_p(p)
|
||||
#define ldq_p(p) ldq_le_p(p)
|
||||
#define ldfl_p(p) ldfl_le_p(p)
|
||||
#define ldfq_p(p) ldfq_le_p(p)
|
||||
#define stw_p(p, v) stw_le_p(p, v)
|
||||
#define stl_p(p, v) stl_le_p(p, v)
|
||||
#define stq_p(p, v) stq_le_p(p, v)
|
||||
#define stfl_p(p, v) stfl_le_p(p, v)
|
||||
#define stfq_p(p, v) stfq_le_p(p, v)
|
||||
#endif
|
||||
|
||||
/* MMU memory access macros */
|
||||
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
#include <assert.h>
|
||||
#include "exec/user/abitypes.h"
|
||||
|
||||
/* On some host systems the guest address space is reserved on the host.
|
||||
* This allows the guest address space to be offset to a convenient location.
|
||||
*/
|
||||
#if defined(CONFIG_USE_GUEST_BASE)
|
||||
extern unsigned long guest_base;
|
||||
extern int have_guest_base;
|
||||
extern unsigned long reserved_va;
|
||||
#define GUEST_BASE guest_base
|
||||
#define RESERVED_VA reserved_va
|
||||
#else
|
||||
#define GUEST_BASE 0ul
|
||||
#define RESERVED_VA 0ul
|
||||
#endif
|
||||
|
||||
#define GUEST_ADDR_MAX (RESERVED_VA ? RESERVED_VA : \
|
||||
(1ul << TARGET_VIRT_ADDR_SPACE_BITS) - 1)
|
||||
#endif
|
||||
|
||||
/* page related stuff */
|
||||
|
||||
#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
|
||||
#define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
|
||||
#define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK)
|
||||
|
||||
#define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask)
|
||||
|
||||
/* same as PROT_xxx */
|
||||
#define PAGE_READ 0x0001
|
||||
#define PAGE_WRITE 0x0002
|
||||
#define PAGE_EXEC 0x0004
|
||||
#define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
|
||||
#define PAGE_VALID 0x0008
|
||||
/* original state of the write flag (used when tracking self-modifying
|
||||
code */
|
||||
#define PAGE_WRITE_ORG 0x0010
|
||||
#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
|
||||
/* FIXME: Code that sets/uses this is broken and needs to go away. */
|
||||
#define PAGE_RESERVED 0x0020
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
//void page_dump(FILE *f);
|
||||
|
||||
int page_get_flags(target_ulong address);
|
||||
#endif
|
||||
|
||||
CPUArchState *cpu_copy(CPUArchState *env);
|
||||
|
||||
/* Flags for use in ENV->INTERRUPT_PENDING.
|
||||
|
||||
The numbers assigned here are non-sequential in order to preserve
|
||||
binary compatibility with the vmstate dump. Bit 0 (0x0001) was
|
||||
previously used for CPU_INTERRUPT_EXIT, and is cleared when loading
|
||||
the vmstate dump. */
|
||||
|
||||
/* External hardware interrupt pending. This is typically used for
|
||||
interrupts from devices. */
|
||||
#define CPU_INTERRUPT_HARD 0x0002
|
||||
|
||||
/* Exit the current TB. This is typically used when some system-level device
|
||||
makes some change to the memory mapping. E.g. the a20 line change. */
|
||||
#define CPU_INTERRUPT_EXITTB 0x0004
|
||||
|
||||
/* Halt the CPU. */
|
||||
#define CPU_INTERRUPT_HALT 0x0020
|
||||
|
||||
/* Debug event pending. */
|
||||
#define CPU_INTERRUPT_DEBUG 0x0080
|
||||
|
||||
/* Reset signal. */
|
||||
#define CPU_INTERRUPT_RESET 0x0400
|
||||
|
||||
/* Several target-specific external hardware interrupts. Each target/cpu.h
|
||||
should define proper names based on these defines. */
|
||||
#define CPU_INTERRUPT_TGT_EXT_0 0x0008
|
||||
#define CPU_INTERRUPT_TGT_EXT_1 0x0010
|
||||
#define CPU_INTERRUPT_TGT_EXT_2 0x0040
|
||||
#define CPU_INTERRUPT_TGT_EXT_3 0x0200
|
||||
#define CPU_INTERRUPT_TGT_EXT_4 0x1000
|
||||
|
||||
/* Several target-specific internal interrupts. These differ from the
|
||||
preceding target-specific interrupts in that they are intended to
|
||||
originate from within the cpu itself, typically in response to some
|
||||
instruction being executed. These, therefore, are not masked while
|
||||
single-stepping within the debugger. */
|
||||
#define CPU_INTERRUPT_TGT_INT_0 0x0100
|
||||
#define CPU_INTERRUPT_TGT_INT_1 0x0800
|
||||
#define CPU_INTERRUPT_TGT_INT_2 0x2000
|
||||
|
||||
/* First unused bit: 0x4000. */
|
||||
|
||||
/* The set of all bits that should be masked when single-stepping. */
|
||||
#define CPU_INTERRUPT_SSTEP_MASK \
|
||||
(CPU_INTERRUPT_HARD \
|
||||
| CPU_INTERRUPT_TGT_EXT_0 \
|
||||
| CPU_INTERRUPT_TGT_EXT_1 \
|
||||
| CPU_INTERRUPT_TGT_EXT_2 \
|
||||
| CPU_INTERRUPT_TGT_EXT_3 \
|
||||
| CPU_INTERRUPT_TGT_EXT_4)
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
|
||||
/* memory API */
|
||||
|
||||
/* Flags stored in the low bits of the TLB virtual address. These are
|
||||
defined so that fast path ram access is all zeros. */
|
||||
/* Zero if TLB entry is valid. */
|
||||
#define TLB_INVALID_MASK (1 << 3)
|
||||
/* Set if TLB entry references a clean RAM page. The iotlb entry will
|
||||
contain the page physical address. */
|
||||
#define TLB_NOTDIRTY (1 << 4)
|
||||
/* Set if TLB entry is an IO callback. */
|
||||
#define TLB_MMIO (1 << 5)
|
||||
|
||||
ram_addr_t last_ram_offset(struct uc_struct *uc);
|
||||
void qemu_mutex_lock_ramlist(struct uc_struct *uc);
|
||||
void qemu_mutex_unlock_ramlist(struct uc_struct *uc);
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
|
||||
uint8_t *buf, int len, int is_write);
|
||||
|
||||
#endif /* CPU_ALL_H */
|
||||
124
qemu/include/exec/cpu-common.h
Normal file
124
qemu/include/exec/cpu-common.h
Normal file
@@ -0,0 +1,124 @@
|
||||
#ifndef CPU_COMMON_H
|
||||
#define CPU_COMMON_H 1
|
||||
|
||||
/* CPU interfaces that are target independent. */
|
||||
|
||||
struct uc_struct;
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
#include "exec/hwaddr.h"
|
||||
#endif
|
||||
|
||||
#ifndef NEED_CPU_H
|
||||
#include "exec/poison.h"
|
||||
#endif
|
||||
|
||||
#include "qemu/bswap.h"
|
||||
#include "qemu/queue.h"
|
||||
|
||||
typedef enum MMUAccessType {
|
||||
MMU_DATA_LOAD = 0,
|
||||
MMU_DATA_STORE = 1,
|
||||
MMU_INST_FETCH = 2
|
||||
} MMUAccessType;
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
|
||||
enum device_endian {
|
||||
DEVICE_NATIVE_ENDIAN,
|
||||
DEVICE_BIG_ENDIAN,
|
||||
DEVICE_LITTLE_ENDIAN,
|
||||
};
|
||||
|
||||
/* address in the RAM (different from a physical address) */
|
||||
#if defined(CONFIG_XEN_BACKEND)
|
||||
typedef uint64_t ram_addr_t;
|
||||
# define RAM_ADDR_MAX UINT64_MAX
|
||||
# define RAM_ADDR_FMT "%" PRIx64
|
||||
#else
|
||||
typedef uintptr_t ram_addr_t;
|
||||
# define RAM_ADDR_MAX UINTPTR_MAX
|
||||
# define RAM_ADDR_FMT "%" PRIxPTR
|
||||
#endif
|
||||
|
||||
extern ram_addr_t ram_size;
|
||||
|
||||
/* memory API */
|
||||
|
||||
typedef void CPUWriteMemoryFunc(void *opaque, hwaddr addr, uint32_t value);
|
||||
typedef uint32_t CPUReadMemoryFunc(void *opaque, hwaddr addr);
|
||||
|
||||
void qemu_ram_remap(struct uc_struct *uc, ram_addr_t addr, ram_addr_t length);
|
||||
/* This should not be used by devices. */
|
||||
MemoryRegion *qemu_ram_addr_from_host(struct uc_struct* uc, void *ptr, ram_addr_t *ram_addr);
|
||||
void qemu_ram_set_idstr(struct uc_struct *uc, ram_addr_t addr, const char *name, DeviceState *dev);
|
||||
void qemu_ram_unset_idstr(struct uc_struct *uc, ram_addr_t addr);
|
||||
|
||||
bool cpu_physical_memory_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
|
||||
int len, int is_write);
|
||||
static inline void cpu_physical_memory_read(AddressSpace *as, hwaddr addr,
|
||||
void *buf, int len)
|
||||
{
|
||||
cpu_physical_memory_rw(as, addr, buf, len, 0);
|
||||
}
|
||||
static inline void cpu_physical_memory_write(AddressSpace *as, hwaddr addr,
|
||||
const void *buf, int len)
|
||||
{
|
||||
cpu_physical_memory_rw(as, addr, (void *)buf, len, 1);
|
||||
}
|
||||
void *cpu_physical_memory_map(AddressSpace *as, hwaddr addr,
|
||||
hwaddr *plen,
|
||||
int is_write);
|
||||
void cpu_physical_memory_unmap(AddressSpace *as, void *buffer, hwaddr len,
|
||||
int is_write, hwaddr access_len);
|
||||
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque));
|
||||
|
||||
bool cpu_physical_memory_is_io(AddressSpace *as, hwaddr phys_addr);
|
||||
|
||||
/* Coalesced MMIO regions are areas where write operations can be reordered.
|
||||
* This usually implies that write operations are side-effect free. This allows
|
||||
* batching which can make a major impact on performance when using
|
||||
* virtualization.
|
||||
*/
|
||||
void qemu_flush_coalesced_mmio_buffer(void);
|
||||
|
||||
uint32_t ldub_phys(AddressSpace *as, hwaddr addr);
|
||||
uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr);
|
||||
uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr);
|
||||
uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr);
|
||||
uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr);
|
||||
uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr);
|
||||
uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr);
|
||||
void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val);
|
||||
void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val);
|
||||
void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val);
|
||||
void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val);
|
||||
void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val);
|
||||
void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val);
|
||||
void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val);
|
||||
|
||||
#ifdef NEED_CPU_H
|
||||
uint32_t lduw_phys(AddressSpace *as, hwaddr addr);
|
||||
uint32_t ldl_phys(AddressSpace *as, hwaddr addr);
|
||||
uint64_t ldq_phys(AddressSpace *as, hwaddr addr);
|
||||
void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val);
|
||||
void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val);
|
||||
void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val);
|
||||
void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val);
|
||||
#endif
|
||||
|
||||
void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
|
||||
const uint8_t *buf, int len);
|
||||
void cpu_flush_icache_range(AddressSpace *as, hwaddr start, int len);
|
||||
|
||||
extern struct MemoryRegion io_mem_rom;
|
||||
extern struct MemoryRegion io_mem_notdirty;
|
||||
|
||||
typedef void (RAMBlockIterFunc)(void *host_addr,
|
||||
ram_addr_t offset, ram_addr_t length, void *opaque);
|
||||
|
||||
void qemu_ram_foreach_block(struct uc_struct *uc, RAMBlockIterFunc func, void *opaque);
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* !CPU_COMMON_H */
|
||||
132
qemu/include/exec/cpu-defs.h
Normal file
132
qemu/include/exec/cpu-defs.h
Normal file
@@ -0,0 +1,132 @@
|
||||
/*
|
||||
* common defines for all CPUs
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#ifndef CPU_DEFS_H
|
||||
#define CPU_DEFS_H
|
||||
|
||||
#ifndef NEED_CPU_H
|
||||
#error cpu.h included from common code
|
||||
#endif
|
||||
|
||||
#include "config.h"
|
||||
#include <inttypes.h>
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/queue.h"
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
#include "exec/hwaddr.h"
|
||||
#endif
|
||||
|
||||
#ifndef TARGET_LONG_BITS
|
||||
#error TARGET_LONG_BITS must be defined before including this header
|
||||
#endif
|
||||
|
||||
#define TARGET_LONG_SIZE (TARGET_LONG_BITS / 8)
|
||||
|
||||
/* target_ulong is the type of a virtual address */
|
||||
#if TARGET_LONG_SIZE == 4
|
||||
typedef int32_t target_long;
|
||||
typedef uint32_t target_ulong;
|
||||
#define TARGET_FMT_lx "%08x"
|
||||
#define TARGET_FMT_ld "%d"
|
||||
#define TARGET_FMT_lu "%u"
|
||||
#elif TARGET_LONG_SIZE == 8
|
||||
typedef int64_t target_long;
|
||||
typedef uint64_t target_ulong;
|
||||
#define TARGET_FMT_lx "%016" PRIx64
|
||||
#define TARGET_FMT_ld "%" PRId64
|
||||
#define TARGET_FMT_lu "%" PRIu64
|
||||
#else
|
||||
#error TARGET_LONG_SIZE undefined
|
||||
#endif
|
||||
|
||||
#define EXCP_INTERRUPT 0x10000 /* async interruption */
|
||||
#define EXCP_HLT 0x10001 /* hlt instruction reached */
|
||||
#define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */
|
||||
#define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */
|
||||
#define EXCP_YIELD 0x10004 /* cpu wants to yield timeslice to another */
|
||||
|
||||
/* Only the bottom TB_JMP_PAGE_BITS of the jump cache hash bits vary for
|
||||
addresses on the same page. The top bits are the same. This allows
|
||||
TLB invalidation to quickly clear a subset of the hash table. */
|
||||
#define TB_JMP_PAGE_BITS (TB_JMP_CACHE_BITS / 2)
|
||||
#define TB_JMP_PAGE_SIZE (1 << TB_JMP_PAGE_BITS)
|
||||
#define TB_JMP_ADDR_MASK (TB_JMP_PAGE_SIZE - 1)
|
||||
#define TB_JMP_PAGE_MASK (TB_JMP_CACHE_SIZE - TB_JMP_PAGE_SIZE)
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
#define CPU_TLB_BITS 8
|
||||
#define CPU_TLB_SIZE (1 << CPU_TLB_BITS)
|
||||
/* use a fully associative victim tlb of 8 entries */
|
||||
#define CPU_VTLB_SIZE 8
|
||||
|
||||
#if HOST_LONG_BITS == 32 && TARGET_LONG_BITS == 32
|
||||
#define CPU_TLB_ENTRY_BITS 4
|
||||
#else
|
||||
#define CPU_TLB_ENTRY_BITS 5
|
||||
#endif
|
||||
|
||||
typedef struct CPUTLBEntry {
|
||||
/* bit TARGET_LONG_BITS to TARGET_PAGE_BITS : virtual address
|
||||
bit TARGET_PAGE_BITS-1..4 : Nonzero for accesses that should not
|
||||
go directly to ram.
|
||||
bit 3 : indicates that the entry is invalid
|
||||
bit 2..0 : zero
|
||||
*/
|
||||
target_ulong addr_read;
|
||||
target_ulong addr_write;
|
||||
target_ulong addr_code;
|
||||
/* Addend to virtual address to get host address. IO accesses
|
||||
use the corresponding iotlb value. */
|
||||
uintptr_t addend;
|
||||
/* padding to get a power of two size */
|
||||
uint8_t dummy[(1 << CPU_TLB_ENTRY_BITS) -
|
||||
(sizeof(target_ulong) * 3 +
|
||||
((-sizeof(target_ulong) * 3) & (sizeof(uintptr_t) - 1)) +
|
||||
sizeof(uintptr_t))];
|
||||
} CPUTLBEntry;
|
||||
|
||||
QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS));
|
||||
|
||||
#define CPU_COMMON_TLB \
|
||||
/* The meaning of the MMU modes is defined in the target code. */ \
|
||||
CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \
|
||||
CPUTLBEntry tlb_v_table[NB_MMU_MODES][CPU_VTLB_SIZE]; \
|
||||
hwaddr iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \
|
||||
hwaddr iotlb_v[NB_MMU_MODES][CPU_VTLB_SIZE]; \
|
||||
target_ulong tlb_flush_addr; \
|
||||
target_ulong tlb_flush_mask; \
|
||||
target_ulong vtlb_index; \
|
||||
|
||||
#else
|
||||
|
||||
#define CPU_COMMON_TLB
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
#define CPU_TEMP_BUF_NLONGS 128
|
||||
|
||||
// Unicorn engine
|
||||
// @invalid_addr: invalid memory access address
|
||||
// @invalid_error: error code for memory access (1 = READ, 2 = WRITE)
|
||||
#define CPU_COMMON \
|
||||
/* soft mmu support */ \
|
||||
CPU_COMMON_TLB \
|
||||
uint64_t invalid_addr; \
|
||||
int invalid_error;
|
||||
#endif
|
||||
400
qemu/include/exec/cpu_ldst.h
Normal file
400
qemu/include/exec/cpu_ldst.h
Normal file
@@ -0,0 +1,400 @@
|
||||
/*
|
||||
* Software MMU support
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*
|
||||
*/
|
||||
|
||||
/*
|
||||
* Generate inline load/store functions for all MMU modes (typically
|
||||
* at least _user and _kernel) as well as _data versions, for all data
|
||||
* sizes.
|
||||
*
|
||||
* Used by target op helpers.
|
||||
*
|
||||
* MMU mode suffixes are defined in target cpu.h.
|
||||
*/
|
||||
#ifndef CPU_LDST_H
|
||||
#define CPU_LDST_H
|
||||
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
/* All direct uses of g2h and h2g need to go away for usermode softmmu. */
|
||||
#define g2h(x) ((void *)((unsigned long)(target_ulong)(x) + GUEST_BASE))
|
||||
|
||||
#if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS
|
||||
#define h2g_valid(x) 1
|
||||
#else
|
||||
#define h2g_valid(x) ({ \
|
||||
unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \
|
||||
(__guest < (1ul << TARGET_VIRT_ADDR_SPACE_BITS)) && \
|
||||
(!RESERVED_VA || (__guest < RESERVED_VA)); \
|
||||
})
|
||||
#endif
|
||||
|
||||
#define h2g_nocheck(x) ({ \
|
||||
unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \
|
||||
(abi_ulong)__ret; \
|
||||
})
|
||||
|
||||
#define h2g(x) ({ \
|
||||
/* Check if given address fits target address space */ \
|
||||
assert(h2g_valid(x)); \
|
||||
h2g_nocheck(x); \
|
||||
})
|
||||
|
||||
#define saddr(x) g2h(x)
|
||||
#define laddr(x) g2h(x)
|
||||
|
||||
#else /* !CONFIG_USER_ONLY */
|
||||
/* NOTE: we use double casts if pointers and target_ulong have
|
||||
different sizes */
|
||||
#define saddr(x) (uint8_t *)(intptr_t)(x)
|
||||
#define laddr(x) (uint8_t *)(intptr_t)(x)
|
||||
#endif
|
||||
|
||||
#define ldub_raw(p) ldub_p(laddr((p)))
|
||||
#define ldsb_raw(p) ldsb_p(laddr((p)))
|
||||
#define lduw_raw(p) lduw_p(laddr((p)))
|
||||
#define ldsw_raw(p) ldsw_p(laddr((p)))
|
||||
#define ldl_raw(p) ldl_p(laddr((p)))
|
||||
#define ldq_raw(p) ldq_p(laddr((p)))
|
||||
#define ldfl_raw(p) ldfl_p(laddr((p)))
|
||||
#define ldfq_raw(p) ldfq_p(laddr((p)))
|
||||
#define stb_raw(p, v) stb_p(saddr((p)), v)
|
||||
#define stw_raw(p, v) stw_p(saddr((p)), v)
|
||||
#define stl_raw(p, v) stl_p(saddr((p)), v)
|
||||
#define stq_raw(p, v) stq_p(saddr((p)), v)
|
||||
#define stfl_raw(p, v) stfl_p(saddr((p)), v)
|
||||
#define stfq_raw(p, v) stfq_p(saddr((p)), v)
|
||||
|
||||
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
|
||||
/* if user mode, no other memory access functions */
|
||||
#define ldub(p) ldub_raw(p)
|
||||
#define ldsb(p) ldsb_raw(p)
|
||||
#define lduw(p) lduw_raw(p)
|
||||
#define ldsw(p) ldsw_raw(p)
|
||||
#define ldl(p) ldl_raw(p)
|
||||
#define ldq(p) ldq_raw(p)
|
||||
#define ldfl(p) ldfl_raw(p)
|
||||
#define ldfq(p) ldfq_raw(p)
|
||||
#define stb(p, v) stb_raw(p, v)
|
||||
#define stw(p, v) stw_raw(p, v)
|
||||
#define stl(p, v) stl_raw(p, v)
|
||||
#define stq(p, v) stq_raw(p, v)
|
||||
#define stfl(p, v) stfl_raw(p, v)
|
||||
#define stfq(p, v) stfq_raw(p, v)
|
||||
|
||||
#define cpu_ldub_code(env1, p) ldub_raw(p)
|
||||
#define cpu_ldsb_code(env1, p) ldsb_raw(p)
|
||||
#define cpu_lduw_code(env1, p) lduw_raw(p)
|
||||
#define cpu_ldsw_code(env1, p) ldsw_raw(p)
|
||||
#define cpu_ldl_code(env1, p) ldl_raw(p)
|
||||
#define cpu_ldq_code(env1, p) ldq_raw(p)
|
||||
|
||||
#define cpu_ldub_data(env, addr) ldub_raw(addr)
|
||||
#define cpu_lduw_data(env, addr) lduw_raw(addr)
|
||||
#define cpu_ldsw_data(env, addr) ldsw_raw(addr)
|
||||
#define cpu_ldl_data(env, addr) ldl_raw(addr)
|
||||
#define cpu_ldq_data(env, addr) ldq_raw(addr)
|
||||
|
||||
#define cpu_stb_data(env, addr, data) stb_raw(addr, data)
|
||||
#define cpu_stw_data(env, addr, data) stw_raw(addr, data)
|
||||
#define cpu_stl_data(env, addr, data) stl_raw(addr, data)
|
||||
#define cpu_stq_data(env, addr, data) stq_raw(addr, data)
|
||||
|
||||
#define cpu_ldub_kernel(env, addr) ldub_raw(addr)
|
||||
#define cpu_lduw_kernel(env, addr) lduw_raw(addr)
|
||||
#define cpu_ldsw_kernel(env, addr) ldsw_raw(addr)
|
||||
#define cpu_ldl_kernel(env, addr) ldl_raw(addr)
|
||||
#define cpu_ldq_kernel(env, addr) ldq_raw(addr)
|
||||
|
||||
#define cpu_stb_kernel(env, addr, data) stb_raw(addr, data)
|
||||
#define cpu_stw_kernel(env, addr, data) stw_raw(addr, data)
|
||||
#define cpu_stl_kernel(env, addr, data) stl_raw(addr, data)
|
||||
#define cpu_stq_kernel(env, addr, data) stq_raw(addr, data)
|
||||
|
||||
#define ldub_kernel(p) ldub_raw(p)
|
||||
#define ldsb_kernel(p) ldsb_raw(p)
|
||||
#define lduw_kernel(p) lduw_raw(p)
|
||||
#define ldsw_kernel(p) ldsw_raw(p)
|
||||
#define ldl_kernel(p) ldl_raw(p)
|
||||
#define ldq_kernel(p) ldq_raw(p)
|
||||
#define ldfl_kernel(p) ldfl_raw(p)
|
||||
#define ldfq_kernel(p) ldfq_raw(p)
|
||||
#define stb_kernel(p, v) stb_raw(p, v)
|
||||
#define stw_kernel(p, v) stw_raw(p, v)
|
||||
#define stl_kernel(p, v) stl_raw(p, v)
|
||||
#define stq_kernel(p, v) stq_raw(p, v)
|
||||
#define stfl_kernel(p, v) stfl_raw(p, v)
|
||||
#define stfq_kernel(p, vt) stfq_raw(p, v)
|
||||
|
||||
#define cpu_ldub_data(env, addr) ldub_raw(addr)
|
||||
#define cpu_lduw_data(env, addr) lduw_raw(addr)
|
||||
#define cpu_ldl_data(env, addr) ldl_raw(addr)
|
||||
|
||||
#define cpu_stb_data(env, addr, data) stb_raw(addr, data)
|
||||
#define cpu_stw_data(env, addr, data) stw_raw(addr, data)
|
||||
#define cpu_stl_data(env, addr, data) stl_raw(addr, data)
|
||||
|
||||
#else
|
||||
|
||||
/* XXX: find something cleaner.
|
||||
* Furthermore, this is false for 64 bits targets
|
||||
*/
|
||||
#define ldul_user ldl_user
|
||||
#define ldul_kernel ldl_kernel
|
||||
#define ldul_hypv ldl_hypv
|
||||
#define ldul_executive ldl_executive
|
||||
#define ldul_supervisor ldl_supervisor
|
||||
|
||||
/* The memory helpers for tcg-generated code need tcg_target_long etc. */
|
||||
#include "tcg.h"
|
||||
|
||||
uint8_t helper_ldb_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
|
||||
uint16_t helper_ldw_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
|
||||
uint32_t helper_ldl_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
|
||||
uint64_t helper_ldq_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
|
||||
|
||||
void helper_stb_mmu(CPUArchState *env, target_ulong addr,
|
||||
uint8_t val, int mmu_idx);
|
||||
void helper_stw_mmu(CPUArchState *env, target_ulong addr,
|
||||
uint16_t val, int mmu_idx);
|
||||
void helper_stl_mmu(CPUArchState *env, target_ulong addr,
|
||||
uint32_t val, int mmu_idx);
|
||||
void helper_stq_mmu(CPUArchState *env, target_ulong addr,
|
||||
uint64_t val, int mmu_idx);
|
||||
|
||||
uint8_t helper_ldb_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
|
||||
uint16_t helper_ldw_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
|
||||
uint32_t helper_ldl_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
|
||||
uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
|
||||
|
||||
#define CPU_MMU_INDEX 0
|
||||
#define MEMSUFFIX MMU_MODE0_SUFFIX
|
||||
#define DATA_SIZE 1
|
||||
#include "exec/cpu_ldst_template.h"
|
||||
|
||||
#define DATA_SIZE 2
|
||||
#include "exec/cpu_ldst_template.h"
|
||||
|
||||
#define DATA_SIZE 4
|
||||
#include "exec/cpu_ldst_template.h"
|
||||
|
||||
#define DATA_SIZE 8
|
||||
#include "exec/cpu_ldst_template.h"
|
||||
#undef CPU_MMU_INDEX
|
||||
#undef MEMSUFFIX
|
||||
|
||||
#define CPU_MMU_INDEX 1
|
||||
#define MEMSUFFIX MMU_MODE1_SUFFIX
|
||||
#define DATA_SIZE 1
|
||||
#include "exec/cpu_ldst_template.h"
|
||||
|
||||
#define DATA_SIZE 2
|
||||
#include "exec/cpu_ldst_template.h"
|
||||
|
||||
#define DATA_SIZE 4
|
||||
#include "exec/cpu_ldst_template.h"
|
||||
|
||||
#define DATA_SIZE 8
|
||||
#include "exec/cpu_ldst_template.h"
|
||||
#undef CPU_MMU_INDEX
|
||||
#undef MEMSUFFIX
|
||||
|
||||
#if (NB_MMU_MODES >= 3)
|
||||
|
||||
#define CPU_MMU_INDEX 2
|
||||
#define MEMSUFFIX MMU_MODE2_SUFFIX
|
||||
#define DATA_SIZE 1
|
||||
#include "exec/cpu_ldst_template.h"
|
||||
|
||||
#define DATA_SIZE 2
|
||||
#include "exec/cpu_ldst_template.h"
|
||||
|
||||
#define DATA_SIZE 4
|
||||
#include "exec/cpu_ldst_template.h"
|
||||
|
||||
#define DATA_SIZE 8
|
||||
#include "exec/cpu_ldst_template.h"
|
||||
#undef CPU_MMU_INDEX
|
||||
#undef MEMSUFFIX
|
||||
#endif /* (NB_MMU_MODES >= 3) */
|
||||
|
||||
#if (NB_MMU_MODES >= 4)
|
||||
|
||||
#define CPU_MMU_INDEX 3
|
||||
#define MEMSUFFIX MMU_MODE3_SUFFIX
|
||||
#define DATA_SIZE 1
|
||||
#include "exec/cpu_ldst_template.h"
|
||||
|
||||
#define DATA_SIZE 2
|
||||
#include "exec/cpu_ldst_template.h"
|
||||
|
||||
#define DATA_SIZE 4
|
||||
#include "exec/cpu_ldst_template.h"
|
||||
|
||||
#define DATA_SIZE 8
|
||||
#include "exec/cpu_ldst_template.h"
|
||||
#undef CPU_MMU_INDEX
|
||||
#undef MEMSUFFIX
|
||||
#endif /* (NB_MMU_MODES >= 4) */
|
||||
|
||||
#if (NB_MMU_MODES >= 5)
|
||||
|
||||
#define CPU_MMU_INDEX 4
|
||||
#define MEMSUFFIX MMU_MODE4_SUFFIX
|
||||
#define DATA_SIZE 1
|
||||
#include "exec/cpu_ldst_template.h"
|
||||
|
||||
#define DATA_SIZE 2
|
||||
#include "exec/cpu_ldst_template.h"
|
||||
|
||||
#define DATA_SIZE 4
|
||||
#include "exec/cpu_ldst_template.h"
|
||||
|
||||
#define DATA_SIZE 8
|
||||
#include "exec/cpu_ldst_template.h"
|
||||
#undef CPU_MMU_INDEX
|
||||
#undef MEMSUFFIX
|
||||
#endif /* (NB_MMU_MODES >= 5) */
|
||||
|
||||
#if (NB_MMU_MODES >= 6)
|
||||
|
||||
#define CPU_MMU_INDEX 5
|
||||
#define MEMSUFFIX MMU_MODE5_SUFFIX
|
||||
#define DATA_SIZE 1
|
||||
#include "exec/cpu_ldst_template.h"
|
||||
|
||||
#define DATA_SIZE 2
|
||||
#include "exec/cpu_ldst_template.h"
|
||||
|
||||
#define DATA_SIZE 4
|
||||
#include "exec/cpu_ldst_template.h"
|
||||
|
||||
#define DATA_SIZE 8
|
||||
#include "exec/cpu_ldst_template.h"
|
||||
#undef CPU_MMU_INDEX
|
||||
#undef MEMSUFFIX
|
||||
#endif /* (NB_MMU_MODES >= 6) */
|
||||
|
||||
#if (NB_MMU_MODES > 6)
|
||||
#error "NB_MMU_MODES > 6 is not supported for now"
|
||||
#endif /* (NB_MMU_MODES > 6) */
|
||||
|
||||
/* these access are slower, they must be as rare as possible */
|
||||
#define CPU_MMU_INDEX (cpu_mmu_index(env))
|
||||
#define MEMSUFFIX _data
|
||||
#define DATA_SIZE 1
|
||||
#include "exec/cpu_ldst_template.h"
|
||||
|
||||
#define DATA_SIZE 2
|
||||
#include "exec/cpu_ldst_template.h"
|
||||
|
||||
#define DATA_SIZE 4
|
||||
#include "exec/cpu_ldst_template.h"
|
||||
|
||||
#define DATA_SIZE 8
|
||||
#include "exec/cpu_ldst_template.h"
|
||||
#undef CPU_MMU_INDEX
|
||||
#undef MEMSUFFIX
|
||||
|
||||
#define ldub(p) ldub_data(p)
|
||||
#define ldsb(p) ldsb_data(p)
|
||||
#define lduw(p) lduw_data(p)
|
||||
#define ldsw(p) ldsw_data(p)
|
||||
#define ldl(p) ldl_data(p)
|
||||
#define ldq(p) ldq_data(p)
|
||||
|
||||
#define stb(p, v) stb_data(p, v)
|
||||
#define stw(p, v) stw_data(p, v)
|
||||
#define stl(p, v) stl_data(p, v)
|
||||
#define stq(p, v) stq_data(p, v)
|
||||
|
||||
#define CPU_MMU_INDEX (cpu_mmu_index(env))
|
||||
#define MEMSUFFIX _code
|
||||
#define SOFTMMU_CODE_ACCESS
|
||||
|
||||
#define DATA_SIZE 1
|
||||
#include "exec/cpu_ldst_template.h"
|
||||
|
||||
#define DATA_SIZE 2
|
||||
#include "exec/cpu_ldst_template.h"
|
||||
|
||||
#define DATA_SIZE 4
|
||||
#include "exec/cpu_ldst_template.h"
|
||||
|
||||
#define DATA_SIZE 8
|
||||
#include "exec/cpu_ldst_template.h"
|
||||
|
||||
#undef CPU_MMU_INDEX
|
||||
#undef MEMSUFFIX
|
||||
#undef SOFTMMU_CODE_ACCESS
|
||||
|
||||
/**
|
||||
* tlb_vaddr_to_host:
|
||||
* @env: CPUArchState
|
||||
* @addr: guest virtual address to look up
|
||||
* @access_type: 0 for read, 1 for write, 2 for execute
|
||||
* @mmu_idx: MMU index to use for lookup
|
||||
*
|
||||
* Look up the specified guest virtual index in the TCG softmmu TLB.
|
||||
* If the TLB contains a host virtual address suitable for direct RAM
|
||||
* access, then return it. Otherwise (TLB miss, TLB entry is for an
|
||||
* I/O access, etc) return NULL.
|
||||
*
|
||||
* This is the equivalent of the initial fast-path code used by
|
||||
* TCG backends for guest load and store accesses.
|
||||
*/
|
||||
static inline void *tlb_vaddr_to_host(CPUArchState *env, target_ulong addr,
|
||||
int access_type, int mmu_idx)
|
||||
{
|
||||
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
||||
CPUTLBEntry *tlbentry = &env->tlb_table[mmu_idx][index];
|
||||
target_ulong tlb_addr;
|
||||
uintptr_t haddr;
|
||||
|
||||
switch (access_type) {
|
||||
case 0:
|
||||
tlb_addr = tlbentry->addr_read;
|
||||
break;
|
||||
case 1:
|
||||
tlb_addr = tlbentry->addr_write;
|
||||
break;
|
||||
case 2:
|
||||
tlb_addr = tlbentry->addr_code;
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
if ((addr & TARGET_PAGE_MASK)
|
||||
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
||||
/* TLB entry is for a different page */
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (tlb_addr & ~TARGET_PAGE_MASK) {
|
||||
/* IO access */
|
||||
return NULL;
|
||||
}
|
||||
|
||||
haddr = addr + env->tlb_table[mmu_idx][index].addend;
|
||||
return (void *)haddr;
|
||||
}
|
||||
|
||||
#endif /* defined(CONFIG_USER_ONLY) */
|
||||
|
||||
#endif /* CPU_LDST_H */
|
||||
193
qemu/include/exec/cpu_ldst_template.h
Normal file
193
qemu/include/exec/cpu_ldst_template.h
Normal file
@@ -0,0 +1,193 @@
|
||||
/*
|
||||
* Software MMU support
|
||||
*
|
||||
* Generate inline load/store functions for one MMU mode and data
|
||||
* size.
|
||||
*
|
||||
* Generate a store function as well as signed and unsigned loads. For
|
||||
* 32 and 64 bit cases, also generate floating point functions with
|
||||
* the same size.
|
||||
*
|
||||
* Not used directly but included from cpu_ldst.h.
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#if DATA_SIZE == 8
|
||||
#define SUFFIX q
|
||||
#define USUFFIX q
|
||||
#define DATA_TYPE uint64_t
|
||||
#elif DATA_SIZE == 4
|
||||
#define SUFFIX l
|
||||
#define USUFFIX l
|
||||
#define DATA_TYPE uint32_t
|
||||
#elif DATA_SIZE == 2
|
||||
#define SUFFIX w
|
||||
#define USUFFIX uw
|
||||
#define DATA_TYPE uint16_t
|
||||
#define DATA_STYPE int16_t
|
||||
#elif DATA_SIZE == 1
|
||||
#define SUFFIX b
|
||||
#define USUFFIX ub
|
||||
#define DATA_TYPE uint8_t
|
||||
#define DATA_STYPE int8_t
|
||||
#else
|
||||
#error unsupported data size
|
||||
#endif
|
||||
|
||||
#if DATA_SIZE == 8
|
||||
#define RES_TYPE uint64_t
|
||||
#else
|
||||
#define RES_TYPE uint32_t
|
||||
#endif
|
||||
|
||||
#ifdef SOFTMMU_CODE_ACCESS
|
||||
#define ADDR_READ addr_code
|
||||
#define MMUSUFFIX _cmmu
|
||||
#else
|
||||
#define ADDR_READ addr_read
|
||||
#define MMUSUFFIX _mmu
|
||||
#endif
|
||||
|
||||
/* generic load/store macros */
|
||||
|
||||
static inline RES_TYPE
|
||||
glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
|
||||
{
|
||||
int page_index;
|
||||
RES_TYPE res;
|
||||
target_ulong addr;
|
||||
int mmu_idx;
|
||||
|
||||
addr = ptr;
|
||||
page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
||||
mmu_idx = CPU_MMU_INDEX;
|
||||
if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ !=
|
||||
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
|
||||
res = glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(env, addr, mmu_idx);
|
||||
} else {
|
||||
uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
|
||||
res = glue(glue(ld, USUFFIX), _raw)(hostaddr);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
#if DATA_SIZE <= 2
|
||||
static inline int
|
||||
glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
|
||||
{
|
||||
int res, page_index;
|
||||
target_ulong addr;
|
||||
int mmu_idx;
|
||||
|
||||
addr = ptr;
|
||||
page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
||||
mmu_idx = CPU_MMU_INDEX;
|
||||
if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ !=
|
||||
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
|
||||
res = (DATA_STYPE)glue(glue(helper_ld, SUFFIX),
|
||||
MMUSUFFIX)(env, addr, mmu_idx);
|
||||
} else {
|
||||
uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
|
||||
res = glue(glue(lds, SUFFIX), _raw)(hostaddr);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef SOFTMMU_CODE_ACCESS
|
||||
|
||||
/* generic store macro */
|
||||
|
||||
static inline void
|
||||
glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr,
|
||||
RES_TYPE v)
|
||||
{
|
||||
int page_index;
|
||||
target_ulong addr;
|
||||
int mmu_idx;
|
||||
|
||||
addr = ptr;
|
||||
page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
||||
mmu_idx = CPU_MMU_INDEX;
|
||||
if (unlikely(env->tlb_table[mmu_idx][page_index].addr_write !=
|
||||
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
|
||||
glue(glue(helper_st, SUFFIX), MMUSUFFIX)(env, addr, v, mmu_idx);
|
||||
} else {
|
||||
uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
|
||||
glue(glue(st, SUFFIX), _raw)(hostaddr, v);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
#if DATA_SIZE == 8
|
||||
static inline float64 glue(cpu_ldfq, MEMSUFFIX)(CPUArchState *env,
|
||||
target_ulong ptr)
|
||||
{
|
||||
union {
|
||||
float64 d;
|
||||
uint64_t i;
|
||||
} u;
|
||||
u.i = glue(cpu_ldq, MEMSUFFIX)(env, ptr);
|
||||
return u.d;
|
||||
}
|
||||
|
||||
static inline void glue(cpu_stfq, MEMSUFFIX)(CPUArchState *env,
|
||||
target_ulong ptr, float64 v)
|
||||
{
|
||||
union {
|
||||
float64 d;
|
||||
uint64_t i;
|
||||
} u;
|
||||
u.d = v;
|
||||
glue(cpu_stq, MEMSUFFIX)(env, ptr, u.i);
|
||||
}
|
||||
#endif /* DATA_SIZE == 8 */
|
||||
|
||||
#if DATA_SIZE == 4
|
||||
static inline float32 glue(cpu_ldfl, MEMSUFFIX)(CPUArchState *env,
|
||||
target_ulong ptr)
|
||||
{
|
||||
union {
|
||||
float32 f;
|
||||
uint32_t i;
|
||||
} u;
|
||||
u.i = glue(cpu_ldl, MEMSUFFIX)(env, ptr);
|
||||
return u.f;
|
||||
}
|
||||
|
||||
static inline void glue(cpu_stfl, MEMSUFFIX)(CPUArchState *env,
|
||||
target_ulong ptr, float32 v)
|
||||
{
|
||||
union {
|
||||
float32 f;
|
||||
uint32_t i;
|
||||
} u;
|
||||
u.f = v;
|
||||
glue(cpu_stl, MEMSUFFIX)(env, ptr, u.i);
|
||||
}
|
||||
#endif /* DATA_SIZE == 4 */
|
||||
|
||||
#endif /* !SOFTMMU_CODE_ACCESS */
|
||||
|
||||
#undef RES_TYPE
|
||||
#undef DATA_TYPE
|
||||
#undef DATA_STYPE
|
||||
#undef SUFFIX
|
||||
#undef USUFFIX
|
||||
#undef DATA_SIZE
|
||||
#undef MMUSUFFIX
|
||||
#undef ADDR_READ
|
||||
48
qemu/include/exec/cputlb.h
Normal file
48
qemu/include/exec/cputlb.h
Normal file
@@ -0,0 +1,48 @@
|
||||
/*
|
||||
* Common CPU TLB handling
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#ifndef CPUTLB_H
|
||||
#define CPUTLB_H
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
/* cputlb.c */
|
||||
void tlb_protect_code(struct uc_struct *uc, ram_addr_t ram_addr);
|
||||
void tlb_unprotect_code_phys(CPUState *cpu, ram_addr_t ram_addr,
|
||||
target_ulong vaddr);
|
||||
void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
|
||||
uintptr_t start, uintptr_t length);
|
||||
void cpu_tlb_reset_dirty_all(struct uc_struct *uc, ram_addr_t start1, ram_addr_t length);
|
||||
void tlb_set_dirty(CPUArchState *env, target_ulong vaddr);
|
||||
//extern int tlb_flush_count;
|
||||
|
||||
/* exec.c */
|
||||
void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr);
|
||||
|
||||
MemoryRegionSection *
|
||||
address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
|
||||
hwaddr *plen);
|
||||
hwaddr memory_region_section_get_iotlb(CPUState *cpu,
|
||||
MemoryRegionSection *section,
|
||||
target_ulong vaddr,
|
||||
hwaddr paddr, hwaddr xlat,
|
||||
int prot,
|
||||
target_ulong *address);
|
||||
bool memory_region_is_unassigned(struct uc_struct* uc, MemoryRegion *mr);
|
||||
|
||||
#endif
|
||||
#endif
|
||||
380
qemu/include/exec/exec-all.h
Normal file
380
qemu/include/exec/exec-all.h
Normal file
@@ -0,0 +1,380 @@
|
||||
/*
|
||||
* internal execution defines for qemu
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef _EXEC_ALL_H_
|
||||
#define _EXEC_ALL_H_
|
||||
|
||||
#include "qemu-common.h"
|
||||
|
||||
/* allow to see translation results - the slowdown should be negligible, so we leave it */
|
||||
#define DEBUG_DISAS
|
||||
|
||||
/* Page tracking code uses ram addresses in system mode, and virtual
|
||||
addresses in userspace mode. Define tb_page_addr_t to be an appropriate
|
||||
type. */
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
typedef abi_ulong tb_page_addr_t;
|
||||
#else
|
||||
typedef ram_addr_t tb_page_addr_t;
|
||||
#endif
|
||||
|
||||
/* is_jmp field values */
|
||||
#define DISAS_NEXT 0 /* next instruction can be analyzed */
|
||||
#define DISAS_JUMP 1 /* only pc was modified dynamically */
|
||||
#define DISAS_UPDATE 2 /* cpu state was modified dynamically */
|
||||
#define DISAS_TB_JUMP 3 /* only pc was modified statically */
|
||||
|
||||
struct TranslationBlock;
|
||||
typedef struct TranslationBlock TranslationBlock;
|
||||
|
||||
/* XXX: make safe guess about sizes */
|
||||
#define MAX_OP_PER_INSTR 266
|
||||
|
||||
#if HOST_LONG_BITS == 32
|
||||
#define MAX_OPC_PARAM_PER_ARG 2
|
||||
#else
|
||||
#define MAX_OPC_PARAM_PER_ARG 1
|
||||
#endif
|
||||
#define MAX_OPC_PARAM_IARGS 5
|
||||
#define MAX_OPC_PARAM_OARGS 1
|
||||
#define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
|
||||
|
||||
/* A Call op needs up to 4 + 2N parameters on 32-bit archs,
|
||||
* and up to 4 + N parameters on 64-bit archs
|
||||
* (N = number of input arguments + output arguments). */
|
||||
#define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
|
||||
#define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
|
||||
|
||||
/* Maximum size a TCG op can expand to. This is complicated because a
|
||||
single op may require several host instructions and register reloads.
|
||||
For now take a wild guess at 192 bytes, which should allow at least
|
||||
a couple of fixup instructions per argument. */
|
||||
#define TCG_MAX_OP_SIZE 192
|
||||
|
||||
#define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM)
|
||||
|
||||
#include "qemu/log.h"
|
||||
|
||||
void gen_intermediate_code(CPUArchState *env, struct TranslationBlock *tb);
|
||||
void gen_intermediate_code_pc(CPUArchState *env, struct TranslationBlock *tb);
|
||||
void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb,
|
||||
int pc_pos);
|
||||
bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc);
|
||||
|
||||
void QEMU_NORETURN cpu_resume_from_signal(CPUState *cpu, void *puc);
|
||||
|
||||
void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
|
||||
TranslationBlock *tb_gen_code(CPUState *cpu,
|
||||
target_ulong pc, target_ulong cs_base, int flags,
|
||||
int cflags);
|
||||
void cpu_exec_init(CPUArchState *env, void *opaque);
|
||||
|
||||
void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
|
||||
|
||||
void tb_invalidate_phys_page_range(struct uc_struct *uc, tb_page_addr_t start, tb_page_addr_t end,
|
||||
int is_cpu_write_access);
|
||||
void tb_invalidate_phys_range(struct uc_struct *uc, tb_page_addr_t start, tb_page_addr_t end,
|
||||
int is_cpu_write_access);
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as);
|
||||
/* cputlb.c */
|
||||
void tlb_flush_page(CPUState *cpu, target_ulong addr);
|
||||
void tlb_flush(CPUState *cpu, int flush_global);
|
||||
void tlb_set_page(CPUState *cpu, target_ulong vaddr,
|
||||
hwaddr paddr, int prot,
|
||||
int mmu_idx, target_ulong size);
|
||||
|
||||
void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr);
|
||||
|
||||
#else
|
||||
static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void tlb_flush(CPUState *cpu, int flush_global)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
|
||||
|
||||
#define CODE_GEN_PHYS_HASH_BITS 15
|
||||
#define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS)
|
||||
|
||||
/* estimated block size for TB allocation */
|
||||
/* XXX: use a per code average code fragment size and modulate it
|
||||
according to the host CPU */
|
||||
#if defined(CONFIG_SOFTMMU)
|
||||
#define CODE_GEN_AVG_BLOCK_SIZE 128
|
||||
#else
|
||||
#define CODE_GEN_AVG_BLOCK_SIZE 64
|
||||
#endif
|
||||
|
||||
#if defined(__arm__) || defined(_ARCH_PPC) \
|
||||
|| defined(__x86_64__) || defined(__i386__) \
|
||||
|| defined(__sparc__) || defined(__aarch64__) \
|
||||
|| defined(__s390x__) || defined(__mips__) \
|
||||
|| defined(CONFIG_TCG_INTERPRETER)
|
||||
#define USE_DIRECT_JUMP
|
||||
#endif
|
||||
|
||||
struct TranslationBlock {
|
||||
target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
|
||||
target_ulong cs_base; /* CS base for this block */
|
||||
uint64_t flags; /* flags defining in which context the code was generated */
|
||||
uint16_t size; /* size of target code for this block (1 <=
|
||||
size <= TARGET_PAGE_SIZE) */
|
||||
uint16_t cflags; /* compile flags */
|
||||
#define CF_COUNT_MASK 0x7fff
|
||||
#define CF_LAST_IO 0x8000 /* Last insn may be an IO access. */
|
||||
|
||||
void *tc_ptr; /* pointer to the translated code */
|
||||
/* next matching tb for physical address. */
|
||||
struct TranslationBlock *phys_hash_next;
|
||||
/* first and second physical page containing code. The lower bit
|
||||
of the pointer tells the index in page_next[] */
|
||||
struct TranslationBlock *page_next[2];
|
||||
tb_page_addr_t page_addr[2];
|
||||
|
||||
/* the following data are used to directly call another TB from
|
||||
the code of this one. */
|
||||
uint16_t tb_next_offset[2]; /* offset of original jump target */
|
||||
#ifdef USE_DIRECT_JUMP
|
||||
uint16_t tb_jmp_offset[2]; /* offset of jump instruction */
|
||||
#else
|
||||
uintptr_t tb_next[2]; /* address of jump generated code */
|
||||
#endif
|
||||
/* list of TBs jumping to this one. This is a circular list using
|
||||
the two least significant bits of the pointers to tell what is
|
||||
the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
|
||||
jmp_first */
|
||||
struct TranslationBlock *jmp_next[2];
|
||||
struct TranslationBlock *jmp_first;
|
||||
uint32_t icount;
|
||||
};
|
||||
|
||||
#include "exec/spinlock.h"
|
||||
|
||||
typedef struct TBContext TBContext;
|
||||
|
||||
struct TBContext {
|
||||
|
||||
TranslationBlock *tbs;
|
||||
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
|
||||
int nb_tbs;
|
||||
/* any access to the tbs or the page table must use this lock */
|
||||
spinlock_t tb_lock;
|
||||
|
||||
/* statistics */
|
||||
int tb_flush_count;
|
||||
int tb_phys_invalidate_count;
|
||||
|
||||
int tb_invalidated_flag;
|
||||
};
|
||||
|
||||
static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc)
|
||||
{
|
||||
target_ulong tmp;
|
||||
tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
|
||||
return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK;
|
||||
}
|
||||
|
||||
static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
|
||||
{
|
||||
target_ulong tmp;
|
||||
tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
|
||||
return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK)
|
||||
| (tmp & TB_JMP_ADDR_MASK));
|
||||
}
|
||||
|
||||
static inline unsigned int tb_phys_hash_func(tb_page_addr_t pc)
|
||||
{
|
||||
return (pc >> 2) & (CODE_GEN_PHYS_HASH_SIZE - 1);
|
||||
}
|
||||
|
||||
void tb_free(struct uc_struct *uc, TranslationBlock *tb);
|
||||
void tb_flush(CPUArchState *env);
|
||||
void tb_phys_invalidate(struct uc_struct *uc,
|
||||
TranslationBlock *tb, tb_page_addr_t page_addr);
|
||||
|
||||
#if defined(USE_DIRECT_JUMP)
|
||||
|
||||
#if defined(CONFIG_TCG_INTERPRETER)
|
||||
static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
|
||||
{
|
||||
/* patch the branch destination */
|
||||
*(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
|
||||
/* no need to flush icache explicitly */
|
||||
}
|
||||
#elif defined(_ARCH_PPC)
|
||||
void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
|
||||
#define tb_set_jmp_target1 ppc_tb_set_jmp_target
|
||||
#elif defined(__i386__) || defined(__x86_64__)
|
||||
static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
|
||||
{
|
||||
/* patch the branch destination */
|
||||
stl_le_p((void*)jmp_addr, addr - (jmp_addr + 4));
|
||||
/* no need to flush icache explicitly */
|
||||
}
|
||||
#elif defined(__s390x__)
|
||||
static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
|
||||
{
|
||||
/* patch the branch destination */
|
||||
intptr_t disp = addr - (jmp_addr - 2);
|
||||
stl_be_p((void*)jmp_addr, disp / 2);
|
||||
/* no need to flush icache explicitly */
|
||||
}
|
||||
#elif defined(__aarch64__)
|
||||
void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
|
||||
#define tb_set_jmp_target1 aarch64_tb_set_jmp_target
|
||||
#elif defined(__arm__)
|
||||
static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
|
||||
{
|
||||
#if !QEMU_GNUC_PREREQ(4, 1)
|
||||
register unsigned long _beg __asm ("a1");
|
||||
register unsigned long _end __asm ("a2");
|
||||
register unsigned long _flg __asm ("a3");
|
||||
#endif
|
||||
|
||||
/* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */
|
||||
*(uint32_t *)jmp_addr =
|
||||
(*(uint32_t *)jmp_addr & ~0xffffff)
|
||||
| (((addr - (jmp_addr + 8)) >> 2) & 0xffffff);
|
||||
|
||||
#if QEMU_GNUC_PREREQ(4, 1)
|
||||
__builtin___clear_cache((char *) jmp_addr, (char *) jmp_addr + 4);
|
||||
#else
|
||||
/* flush icache */
|
||||
_beg = jmp_addr;
|
||||
_end = jmp_addr + 4;
|
||||
_flg = 0;
|
||||
__asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg));
|
||||
#endif
|
||||
}
|
||||
#elif defined(__sparc__) || defined(__mips__)
|
||||
void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr);
|
||||
#else
|
||||
#error tb_set_jmp_target1 is missing
|
||||
#endif
|
||||
|
||||
static inline void tb_set_jmp_target(TranslationBlock *tb,
|
||||
int n, uintptr_t addr)
|
||||
{
|
||||
uint16_t offset = tb->tb_jmp_offset[n];
|
||||
tb_set_jmp_target1((uintptr_t)(tb->tc_ptr + offset), addr);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
/* set the jump target */
|
||||
static inline void tb_set_jmp_target(TranslationBlock *tb,
|
||||
int n, uintptr_t addr)
|
||||
{
|
||||
tb->tb_next[n] = addr;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static inline void tb_add_jump(TranslationBlock *tb, int n,
|
||||
TranslationBlock *tb_next)
|
||||
{
|
||||
/* NOTE: this test is only needed for thread safety */
|
||||
if (!tb->jmp_next[n]) {
|
||||
/* patch the native jump address */
|
||||
tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr);
|
||||
|
||||
/* add in TB jmp circular list */
|
||||
tb->jmp_next[n] = tb_next->jmp_first;
|
||||
tb_next->jmp_first = (TranslationBlock *)((uintptr_t)(tb) | (n));
|
||||
}
|
||||
}
|
||||
|
||||
/* GETRA is the true target of the return instruction that we'll execute,
|
||||
defined here for simplicity of defining the follow-up macros. */
|
||||
#if defined(CONFIG_TCG_INTERPRETER)
|
||||
extern uintptr_t tci_tb_ptr;
|
||||
# define GETRA() tci_tb_ptr
|
||||
#else
|
||||
# define GETRA() \
|
||||
((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
|
||||
#endif
|
||||
|
||||
/* The true return address will often point to a host insn that is part of
|
||||
the next translated guest insn. Adjust the address backward to point to
|
||||
the middle of the call insn. Subtracting one would do the job except for
|
||||
several compressed mode architectures (arm, mips) which set the low bit
|
||||
to indicate the compressed mode; subtracting two works around that. It
|
||||
is also the case that there are no host isas that contain a call insn
|
||||
smaller than 4 bytes, so we don't worry about special-casing this. */
|
||||
#if defined(CONFIG_TCG_INTERPRETER)
|
||||
# define GETPC_ADJ 0
|
||||
#else
|
||||
# define GETPC_ADJ 2
|
||||
#endif
|
||||
|
||||
#define GETPC() (GETRA() - GETPC_ADJ)
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
|
||||
void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align));
|
||||
|
||||
struct MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index);
|
||||
bool io_mem_read(struct MemoryRegion *mr, hwaddr addr,
|
||||
uint64_t *pvalue, unsigned size);
|
||||
bool io_mem_write(struct MemoryRegion *mr, hwaddr addr,
|
||||
uint64_t value, unsigned size);
|
||||
|
||||
|
||||
void tlb_fill(CPUState *cpu, target_ulong addr, int is_write, int mmu_idx,
|
||||
uintptr_t retaddr);
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
|
||||
{
|
||||
return addr;
|
||||
}
|
||||
#else
|
||||
/* cputlb.c */
|
||||
tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
|
||||
#endif
|
||||
|
||||
/* vl.c */
|
||||
extern int singlestep;
|
||||
|
||||
/* cpu-exec.c */
|
||||
extern volatile sig_atomic_t exit_request;
|
||||
|
||||
/**
|
||||
* cpu_can_do_io:
|
||||
* @cpu: The CPU for which to check IO.
|
||||
*
|
||||
* Deterministic execution requires that IO only be performed on the last
|
||||
* instruction of a TB so that interrupts take effect immediately.
|
||||
*
|
||||
* Returns: %true if memory-mapped IO is safe, %false otherwise.
|
||||
*/
|
||||
static inline bool cpu_can_do_io(CPUState *cpu)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
void phys_mem_clean(struct uc_struct* uc);
|
||||
|
||||
#endif
|
||||
72
qemu/include/exec/gen-icount.h
Normal file
72
qemu/include/exec/gen-icount.h
Normal file
@@ -0,0 +1,72 @@
|
||||
#ifndef GEN_ICOUNT_H
|
||||
#define GEN_ICOUNT_H 1
|
||||
|
||||
#include "qemu/timer.h"
|
||||
|
||||
/* Helpers for instruction counting code generation. */
|
||||
|
||||
//static TCGArg *icount_arg;
|
||||
//static int icount_label;
|
||||
|
||||
static inline void gen_tb_start(TCGContext *tcg_ctx)
|
||||
{
|
||||
// TCGv_i32 count;
|
||||
TCGv_i32 flag;
|
||||
|
||||
tcg_ctx->exitreq_label = gen_new_label(tcg_ctx);
|
||||
flag = tcg_temp_new_i32(tcg_ctx);
|
||||
tcg_gen_ld_i32(tcg_ctx, flag, tcg_ctx->cpu_env,
|
||||
offsetof(CPUState, tcg_exit_req) - ENV_OFFSET);
|
||||
tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_NE, flag, 0, tcg_ctx->exitreq_label);
|
||||
tcg_temp_free_i32(tcg_ctx, flag);
|
||||
|
||||
#if 0
|
||||
if (!use_icount)
|
||||
return;
|
||||
|
||||
icount_label = gen_new_label();
|
||||
count = tcg_temp_local_new_i32();
|
||||
tcg_gen_ld_i32(count, cpu_env,
|
||||
-ENV_OFFSET + offsetof(CPUState, icount_decr.u32));
|
||||
/* This is a horrid hack to allow fixing up the value later. */
|
||||
icount_arg = tcg_ctx.gen_opparam_ptr + 1;
|
||||
tcg_gen_subi_i32(count, count, 0xdeadbeef);
|
||||
|
||||
tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, icount_label);
|
||||
tcg_gen_st16_i32(count, cpu_env,
|
||||
-ENV_OFFSET + offsetof(CPUState, icount_decr.u16.low));
|
||||
tcg_temp_free_i32(count);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void gen_tb_end(TCGContext *tcg_ctx, TranslationBlock *tb, int num_insns)
|
||||
{
|
||||
gen_set_label(tcg_ctx, tcg_ctx->exitreq_label);
|
||||
tcg_gen_exit_tb(tcg_ctx, (uintptr_t)tb + TB_EXIT_REQUESTED);
|
||||
|
||||
#if 0
|
||||
if (use_icount) {
|
||||
*icount_arg = num_insns;
|
||||
gen_set_label(icount_label);
|
||||
tcg_gen_exit_tb((uintptr_t)tb + TB_EXIT_ICOUNT_EXPIRED);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#if 0
|
||||
static inline void gen_io_start(void)
|
||||
{
|
||||
TCGv_i32 tmp = tcg_const_i32(1);
|
||||
tcg_gen_st_i32(tmp, cpu_env, -ENV_OFFSET + offsetof(CPUState, can_do_io));
|
||||
tcg_temp_free_i32(tmp);
|
||||
}
|
||||
|
||||
static inline void gen_io_end(void)
|
||||
{
|
||||
TCGv_i32 tmp = tcg_const_i32(0);
|
||||
tcg_gen_st_i32(tmp, cpu_env, -ENV_OFFSET + offsetof(CPUState, can_do_io));
|
||||
tcg_temp_free_i32(tmp);
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
70
qemu/include/exec/helper-gen.h
Normal file
70
qemu/include/exec/helper-gen.h
Normal file
@@ -0,0 +1,70 @@
|
||||
/* Helper file for declaring TCG helper functions.
|
||||
This one expands generation functions for tcg opcodes. */
|
||||
|
||||
#ifndef HELPER_GEN_H
|
||||
#define HELPER_GEN_H 1
|
||||
|
||||
#include <exec/helper-head.h>
|
||||
|
||||
#define DEF_HELPER_FLAGS_0(name, flags, ret) \
|
||||
static inline void glue(gen_helper_, name)(TCGContext *tcg_ctx, dh_retvar_decl0(ret)) \
|
||||
{ \
|
||||
tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 0, NULL); \
|
||||
}
|
||||
|
||||
#define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \
|
||||
static inline void glue(gen_helper_, name)(TCGContext *tcg_ctx, dh_retvar_decl(ret) \
|
||||
dh_arg_decl(t1, 1)) \
|
||||
{ \
|
||||
TCGArg args[1] = { dh_arg(t1, 1) }; \
|
||||
tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 1, args); \
|
||||
}
|
||||
|
||||
#define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \
|
||||
static inline void glue(gen_helper_, name)(TCGContext *tcg_ctx, dh_retvar_decl(ret) \
|
||||
dh_arg_decl(t1, 1), dh_arg_decl(t2, 2)) \
|
||||
{ \
|
||||
TCGArg args[2] = { dh_arg(t1, 1), dh_arg(t2, 2) }; \
|
||||
tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 2, args); \
|
||||
}
|
||||
|
||||
#define DEF_HELPER_FLAGS_3(name, flags, ret, t1, t2, t3) \
|
||||
static inline void glue(gen_helper_, name)(TCGContext *tcg_ctx, dh_retvar_decl(ret) \
|
||||
dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3)) \
|
||||
{ \
|
||||
TCGArg args[3] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3) }; \
|
||||
tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 3, args); \
|
||||
}
|
||||
|
||||
#define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \
|
||||
static inline void glue(gen_helper_, name)(TCGContext *tcg_ctx, dh_retvar_decl(ret) \
|
||||
dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), \
|
||||
dh_arg_decl(t3, 3), dh_arg_decl(t4, 4)) \
|
||||
{ \
|
||||
TCGArg args[4] = { dh_arg(t1, 1), dh_arg(t2, 2), \
|
||||
dh_arg(t3, 3), dh_arg(t4, 4) }; \
|
||||
tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 4, args); \
|
||||
}
|
||||
|
||||
#define DEF_HELPER_FLAGS_5(name, flags, ret, t1, t2, t3, t4, t5) \
|
||||
static inline void glue(gen_helper_, name)(TCGContext *tcg_ctx, dh_retvar_decl(ret) \
|
||||
dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
|
||||
dh_arg_decl(t4, 4), dh_arg_decl(t5, 5)) \
|
||||
{ \
|
||||
TCGArg args[5] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
|
||||
dh_arg(t4, 4), dh_arg(t5, 5) }; \
|
||||
tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 5, args); \
|
||||
}
|
||||
|
||||
#include "helper.h"
|
||||
#include "tcg-runtime.h"
|
||||
|
||||
#undef DEF_HELPER_FLAGS_0
|
||||
#undef DEF_HELPER_FLAGS_1
|
||||
#undef DEF_HELPER_FLAGS_2
|
||||
#undef DEF_HELPER_FLAGS_3
|
||||
#undef DEF_HELPER_FLAGS_4
|
||||
#undef DEF_HELPER_FLAGS_5
|
||||
#undef GEN_HELPER
|
||||
|
||||
#endif /* HELPER_GEN_H */
|
||||
134
qemu/include/exec/helper-head.h
Normal file
134
qemu/include/exec/helper-head.h
Normal file
@@ -0,0 +1,134 @@
|
||||
/* Helper file for declaring TCG helper functions.
|
||||
Used by other helper files.
|
||||
|
||||
Targets should use DEF_HELPER_N and DEF_HELPER_FLAGS_N to declare helper
|
||||
functions. Names should be specified without the helper_ prefix, and
|
||||
the return and argument types specified. 3 basic types are understood
|
||||
(i32, i64 and ptr). Additional aliases are provided for convenience and
|
||||
to match the types used by the C helper implementation.
|
||||
|
||||
The target helper.h should be included in all files that use/define
|
||||
helper functions. THis will ensure that function prototypes are
|
||||
consistent. In addition it should be included an extra two times for
|
||||
helper.c, defining:
|
||||
GEN_HELPER 1 to produce op generation functions (gen_helper_*)
|
||||
GEN_HELPER 2 to do runtime registration helper functions.
|
||||
*/
|
||||
|
||||
#ifndef DEF_HELPER_H
|
||||
#define DEF_HELPER_H 1
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
|
||||
#define HELPER(name) glue(helper_, name)
|
||||
|
||||
#define GET_TCGV_i32 GET_TCGV_I32
|
||||
#define GET_TCGV_i64 GET_TCGV_I64
|
||||
#define GET_TCGV_ptr GET_TCGV_PTR
|
||||
|
||||
/* Some types that make sense in C, but not for TCG. */
|
||||
#define dh_alias_i32 i32
|
||||
#define dh_alias_s32 i32
|
||||
#define dh_alias_int i32
|
||||
#define dh_alias_i64 i64
|
||||
#define dh_alias_s64 i64
|
||||
#define dh_alias_f32 i32
|
||||
#define dh_alias_f64 i64
|
||||
#ifdef TARGET_LONG_BITS
|
||||
# if TARGET_LONG_BITS == 32
|
||||
# define dh_alias_tl i32
|
||||
# else
|
||||
# define dh_alias_tl i64
|
||||
# endif
|
||||
#endif
|
||||
#define dh_alias_ptr ptr
|
||||
#define dh_alias_void void
|
||||
#define dh_alias_noreturn noreturn
|
||||
#define dh_alias_env ptr
|
||||
#define dh_alias(t) glue(dh_alias_, t)
|
||||
|
||||
#define dh_ctype_i32 uint32_t
|
||||
#define dh_ctype_s32 int32_t
|
||||
#define dh_ctype_int int
|
||||
#define dh_ctype_i64 uint64_t
|
||||
#define dh_ctype_s64 int64_t
|
||||
#define dh_ctype_f32 float32
|
||||
#define dh_ctype_f64 float64
|
||||
#define dh_ctype_tl target_ulong
|
||||
#define dh_ctype_ptr void *
|
||||
#define dh_ctype_void void
|
||||
#define dh_ctype_noreturn void QEMU_NORETURN
|
||||
#define dh_ctype_env CPUArchState *
|
||||
#define dh_ctype(t) dh_ctype_##t
|
||||
|
||||
/* We can't use glue() here because it falls foul of C preprocessor
|
||||
recursive expansion rules. */
|
||||
#define dh_retvar_decl0_void void
|
||||
#define dh_retvar_decl0_noreturn void
|
||||
#define dh_retvar_decl0_i32 TCGv_i32 retval
|
||||
#define dh_retvar_decl0_i64 TCGv_i64 retval
|
||||
#define dh_retvar_decl0_ptr TCGv_ptr retval
|
||||
#define dh_retvar_decl0(t) glue(dh_retvar_decl0_, dh_alias(t))
|
||||
|
||||
#define dh_retvar_decl_void
|
||||
#define dh_retvar_decl_noreturn
|
||||
#define dh_retvar_decl_i32 TCGv_i32 retval,
|
||||
#define dh_retvar_decl_i64 TCGv_i64 retval,
|
||||
#define dh_retvar_decl_ptr TCGv_ptr retval,
|
||||
#define dh_retvar_decl(t) glue(dh_retvar_decl_, dh_alias(t))
|
||||
|
||||
#define dh_retvar_void TCG_CALL_DUMMY_ARG
|
||||
#define dh_retvar_noreturn TCG_CALL_DUMMY_ARG
|
||||
#define dh_retvar_i32 GET_TCGV_i32(retval)
|
||||
#define dh_retvar_i64 GET_TCGV_i64(retval)
|
||||
#define dh_retvar_ptr GET_TCGV_ptr(retval)
|
||||
#define dh_retvar(t) glue(dh_retvar_, dh_alias(t))
|
||||
|
||||
#define dh_is_64bit_void 0
|
||||
#define dh_is_64bit_noreturn 0
|
||||
#define dh_is_64bit_i32 0
|
||||
#define dh_is_64bit_i64 1
|
||||
#define dh_is_64bit_ptr (sizeof(void *) == 8)
|
||||
#define dh_is_64bit(t) glue(dh_is_64bit_, dh_alias(t))
|
||||
|
||||
#define dh_is_signed_void 0
|
||||
#define dh_is_signed_noreturn 0
|
||||
#define dh_is_signed_i32 0
|
||||
#define dh_is_signed_s32 1
|
||||
#define dh_is_signed_i64 0
|
||||
#define dh_is_signed_s64 1
|
||||
#define dh_is_signed_f32 0
|
||||
#define dh_is_signed_f64 0
|
||||
#define dh_is_signed_tl 0
|
||||
#define dh_is_signed_int 1
|
||||
/* ??? This is highly specific to the host cpu. There are even special
|
||||
extension instructions that may be required, e.g. ia64's addp4. But
|
||||
for now we don't support any 64-bit targets with 32-bit pointers. */
|
||||
#define dh_is_signed_ptr 0
|
||||
#define dh_is_signed_env dh_is_signed_ptr
|
||||
#define dh_is_signed(t) dh_is_signed_##t
|
||||
|
||||
#define dh_sizemask(t, n) \
|
||||
((dh_is_64bit(t) << (n*2)) | (dh_is_signed(t) << (n*2+1)))
|
||||
|
||||
#define dh_arg(t, n) \
|
||||
glue(GET_TCGV_, dh_alias(t))(glue(arg, n))
|
||||
|
||||
#define dh_arg_decl(t, n) glue(TCGv_, dh_alias(t)) glue(arg, n)
|
||||
|
||||
#define DEF_HELPER_0(name, ret) \
|
||||
DEF_HELPER_FLAGS_0(name, 0, ret)
|
||||
#define DEF_HELPER_1(name, ret, t1) \
|
||||
DEF_HELPER_FLAGS_1(name, 0, ret, t1)
|
||||
#define DEF_HELPER_2(name, ret, t1, t2) \
|
||||
DEF_HELPER_FLAGS_2(name, 0, ret, t1, t2)
|
||||
#define DEF_HELPER_3(name, ret, t1, t2, t3) \
|
||||
DEF_HELPER_FLAGS_3(name, 0, ret, t1, t2, t3)
|
||||
#define DEF_HELPER_4(name, ret, t1, t2, t3, t4) \
|
||||
DEF_HELPER_FLAGS_4(name, 0, ret, t1, t2, t3, t4)
|
||||
#define DEF_HELPER_5(name, ret, t1, t2, t3, t4, t5) \
|
||||
DEF_HELPER_FLAGS_5(name, 0, ret, t1, t2, t3, t4, t5)
|
||||
|
||||
/* MAX_OPC_PARAM_IARGS must be set to n if last entry is DEF_HELPER_FLAGS_n. */
|
||||
|
||||
#endif /* DEF_HELPER_H */
|
||||
39
qemu/include/exec/helper-proto.h
Normal file
39
qemu/include/exec/helper-proto.h
Normal file
@@ -0,0 +1,39 @@
|
||||
/* Helper file for declaring TCG helper functions.
|
||||
This one expands prototypes for the helper functions. */
|
||||
|
||||
#ifndef HELPER_PROTO_H
|
||||
#define HELPER_PROTO_H 1
|
||||
|
||||
#include <exec/helper-head.h>
|
||||
|
||||
#define DEF_HELPER_FLAGS_0(name, flags, ret) \
|
||||
dh_ctype(ret) HELPER(name) (void);
|
||||
|
||||
#define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \
|
||||
dh_ctype(ret) HELPER(name) (dh_ctype(t1));
|
||||
|
||||
#define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \
|
||||
dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2));
|
||||
|
||||
#define DEF_HELPER_FLAGS_3(name, flags, ret, t1, t2, t3) \
|
||||
dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3));
|
||||
|
||||
#define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \
|
||||
dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
|
||||
dh_ctype(t4));
|
||||
|
||||
#define DEF_HELPER_FLAGS_5(name, flags, ret, t1, t2, t3, t4, t5) \
|
||||
dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
|
||||
dh_ctype(t4), dh_ctype(t5));
|
||||
|
||||
#include "helper.h"
|
||||
#include "tcg-runtime.h"
|
||||
|
||||
#undef DEF_HELPER_FLAGS_0
|
||||
#undef DEF_HELPER_FLAGS_1
|
||||
#undef DEF_HELPER_FLAGS_2
|
||||
#undef DEF_HELPER_FLAGS_3
|
||||
#undef DEF_HELPER_FLAGS_4
|
||||
#undef DEF_HELPER_FLAGS_5
|
||||
|
||||
#endif /* HELPER_PROTO_H */
|
||||
48
qemu/include/exec/helper-tcg.h
Normal file
48
qemu/include/exec/helper-tcg.h
Normal file
@@ -0,0 +1,48 @@
|
||||
/* Helper file for declaring TCG helper functions.
|
||||
This one defines data structures private to tcg.c. */
|
||||
|
||||
#ifndef HELPER_TCG_H
|
||||
#define HELPER_TCG_H 1
|
||||
|
||||
#include <exec/helper-head.h>
|
||||
|
||||
#define DEF_HELPER_FLAGS_0(NAME, FLAGS, ret) \
|
||||
{ .func = HELPER(NAME), .name = #NAME, .flags = FLAGS, \
|
||||
.sizemask = dh_sizemask(ret, 0) },
|
||||
|
||||
#define DEF_HELPER_FLAGS_1(NAME, FLAGS, ret, t1) \
|
||||
{ .func = HELPER(NAME), .name = #NAME, .flags = FLAGS, \
|
||||
.sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) },
|
||||
|
||||
#define DEF_HELPER_FLAGS_2(NAME, FLAGS, ret, t1, t2) \
|
||||
{ .func = HELPER(NAME), .name = #NAME, .flags = FLAGS, \
|
||||
.sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \
|
||||
| dh_sizemask(t2, 2) },
|
||||
|
||||
#define DEF_HELPER_FLAGS_3(NAME, FLAGS, ret, t1, t2, t3) \
|
||||
{ .func = HELPER(NAME), .name = #NAME, .flags = FLAGS, \
|
||||
.sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \
|
||||
| dh_sizemask(t2, 2) | dh_sizemask(t3, 3) },
|
||||
|
||||
#define DEF_HELPER_FLAGS_4(NAME, FLAGS, ret, t1, t2, t3, t4) \
|
||||
{ .func = HELPER(NAME), .name = #NAME, .flags = FLAGS, \
|
||||
.sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \
|
||||
| dh_sizemask(t2, 2) | dh_sizemask(t3, 3) | dh_sizemask(t4, 4) },
|
||||
|
||||
#define DEF_HELPER_FLAGS_5(NAME, FLAGS, ret, t1, t2, t3, t4, t5) \
|
||||
{ .func = HELPER(NAME), .name = #NAME, .flags = FLAGS, \
|
||||
.sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \
|
||||
| dh_sizemask(t2, 2) | dh_sizemask(t3, 3) | dh_sizemask(t4, 4) \
|
||||
| dh_sizemask(t5, 5) },
|
||||
|
||||
#include "helper.h"
|
||||
#include "tcg-runtime.h"
|
||||
|
||||
#undef DEF_HELPER_FLAGS_0
|
||||
#undef DEF_HELPER_FLAGS_1
|
||||
#undef DEF_HELPER_FLAGS_2
|
||||
#undef DEF_HELPER_FLAGS_3
|
||||
#undef DEF_HELPER_FLAGS_4
|
||||
#undef DEF_HELPER_FLAGS_5
|
||||
|
||||
#endif /* HELPER_TCG_H */
|
||||
22
qemu/include/exec/hwaddr.h
Normal file
22
qemu/include/exec/hwaddr.h
Normal file
@@ -0,0 +1,22 @@
|
||||
/* Define hwaddr if it exists. */
|
||||
|
||||
#ifndef HWADDR_H
|
||||
#define HWADDR_H
|
||||
|
||||
#define HWADDR_BITS 64
|
||||
/* hwaddr is the type of a physical address (its size can
|
||||
be different from 'target_ulong'). */
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
typedef uint64_t hwaddr;
|
||||
#define HWADDR_MAX UINT64_MAX
|
||||
#define TARGET_FMT_plx "%016" PRIx64
|
||||
#define HWADDR_PRId PRId64
|
||||
#define HWADDR_PRIi PRIi64
|
||||
#define HWADDR_PRIo PRIo64
|
||||
#define HWADDR_PRIu PRIu64
|
||||
#define HWADDR_PRIx PRIx64
|
||||
#define HWADDR_PRIX PRIX64
|
||||
|
||||
#endif
|
||||
59
qemu/include/exec/ioport.h
Normal file
59
qemu/include/exec/ioport.h
Normal file
@@ -0,0 +1,59 @@
|
||||
/*
|
||||
* defines ioport related functions
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
/**************************************************************************
|
||||
* IO ports API
|
||||
*/
|
||||
|
||||
#ifndef IOPORT_H
|
||||
#define IOPORT_H
|
||||
|
||||
#include "qemu-common.h"
|
||||
#include "qom/object.h"
|
||||
#include "exec/memory.h"
|
||||
|
||||
typedef uint32_t pio_addr_t;
|
||||
#define FMT_pioaddr PRIx32
|
||||
|
||||
#define MAX_IOPORTS (64 * 1024)
|
||||
#define IOPORTS_MASK (MAX_IOPORTS - 1)
|
||||
|
||||
typedef struct MemoryRegionPortio {
|
||||
uint32_t offset;
|
||||
uint32_t len;
|
||||
unsigned size;
|
||||
uint32_t (*read)(void *opaque, uint32_t address);
|
||||
void (*write)(void *opaque, uint32_t address, uint32_t data);
|
||||
uint32_t base; /* private field */
|
||||
} MemoryRegionPortio;
|
||||
|
||||
#define PORTIO_END_OF_LIST() { }
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
extern const MemoryRegionOps unassigned_io_ops;
|
||||
#endif
|
||||
|
||||
void cpu_outb(struct uc_struct *uc, pio_addr_t addr, uint8_t val);
|
||||
void cpu_outw(struct uc_struct *uc, pio_addr_t addr, uint16_t val);
|
||||
void cpu_outl(struct uc_struct *uc, pio_addr_t addr, uint32_t val);
|
||||
uint8_t cpu_inb(struct uc_struct *uc, pio_addr_t addr);
|
||||
uint16_t cpu_inw(struct uc_struct *uc, pio_addr_t addr);
|
||||
uint32_t cpu_inl(struct uc_struct *uc, pio_addr_t addr);
|
||||
|
||||
#endif /* IOPORT_H */
|
||||
36
qemu/include/exec/memory-internal.h
Normal file
36
qemu/include/exec/memory-internal.h
Normal file
@@ -0,0 +1,36 @@
|
||||
/*
|
||||
* Declarations for obsolete exec.c functions
|
||||
*
|
||||
* Copyright 2011 Red Hat, Inc. and/or its affiliates
|
||||
*
|
||||
* Authors:
|
||||
* Avi Kivity <avi@redhat.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or
|
||||
* later. See the COPYING file in the top-level directory.
|
||||
*
|
||||
*/
|
||||
|
||||
/*
|
||||
* This header is for use by exec.c and memory.c ONLY. Do not include it.
|
||||
* The functions declared here will be removed soon.
|
||||
*/
|
||||
|
||||
#ifndef MEMORY_INTERNAL_H
|
||||
#define MEMORY_INTERNAL_H
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
typedef struct AddressSpaceDispatch AddressSpaceDispatch;
|
||||
|
||||
void address_space_init_dispatch(AddressSpace *as);
|
||||
void address_space_destroy_dispatch(AddressSpace *as);
|
||||
|
||||
extern const MemoryRegionOps unassigned_mem_ops;
|
||||
|
||||
bool memory_region_access_valid(MemoryRegion *mr, hwaddr addr,
|
||||
unsigned size, bool is_write);
|
||||
|
||||
void address_space_unregister(AddressSpace *as);
|
||||
|
||||
#endif
|
||||
#endif
|
||||
942
qemu/include/exec/memory.h
Normal file
942
qemu/include/exec/memory.h
Normal file
@@ -0,0 +1,942 @@
|
||||
/*
|
||||
* Physical memory management API
|
||||
*
|
||||
* Copyright 2011 Red Hat, Inc. and/or its affiliates
|
||||
*
|
||||
* Authors:
|
||||
* Avi Kivity <avi@redhat.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2. See
|
||||
* the COPYING file in the top-level directory.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef MEMORY_H
|
||||
#define MEMORY_H
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
|
||||
#define DIRTY_MEMORY_VGA 0
|
||||
#define DIRTY_MEMORY_CODE 1
|
||||
#define DIRTY_MEMORY_MIGRATION 2
|
||||
#define DIRTY_MEMORY_NUM 3 /* num of dirty bits */
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
#include "qemu-common.h"
|
||||
#include "exec/cpu-common.h"
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
#include "exec/hwaddr.h"
|
||||
#endif
|
||||
#include "qemu/queue.h"
|
||||
#include "qemu/int128.h"
|
||||
#include "qemu/notify.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qom/object.h"
|
||||
|
||||
#define MAX_PHYS_ADDR_SPACE_BITS 62
|
||||
#define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1)
|
||||
|
||||
#define TYPE_MEMORY_REGION "qemu:memory-region"
|
||||
#define MEMORY_REGION(uc, obj) \
|
||||
OBJECT_CHECK(uc, MemoryRegion, (obj), TYPE_MEMORY_REGION)
|
||||
|
||||
typedef struct MemoryRegionOps MemoryRegionOps;
|
||||
typedef struct MemoryRegionMmio MemoryRegionMmio;
|
||||
|
||||
struct MemoryRegionMmio {
|
||||
CPUReadMemoryFunc *read[3];
|
||||
CPUWriteMemoryFunc *write[3];
|
||||
};
|
||||
|
||||
typedef struct IOMMUTLBEntry IOMMUTLBEntry;
|
||||
|
||||
/* See address_space_translate: bit 0 is read, bit 1 is write. */
|
||||
typedef enum {
|
||||
IOMMU_NONE = 0,
|
||||
IOMMU_RO = 1,
|
||||
IOMMU_WO = 2,
|
||||
IOMMU_RW = 3,
|
||||
} IOMMUAccessFlags;
|
||||
|
||||
struct IOMMUTLBEntry {
|
||||
AddressSpace *target_as;
|
||||
hwaddr iova;
|
||||
hwaddr translated_addr;
|
||||
hwaddr addr_mask; /* 0xfff = 4k translation */
|
||||
IOMMUAccessFlags perm;
|
||||
};
|
||||
|
||||
/*
|
||||
* Memory region callbacks
|
||||
*/
|
||||
struct MemoryRegionOps {
|
||||
/* Read from the memory region. @addr is relative to @mr; @size is
|
||||
* in bytes. */
|
||||
uint64_t (*read)(struct uc_struct* uc, void *opaque,
|
||||
hwaddr addr,
|
||||
unsigned size);
|
||||
/* Write to the memory region. @addr is relative to @mr; @size is
|
||||
* in bytes. */
|
||||
void (*write)(struct uc_struct* uc, void *opaque,
|
||||
hwaddr addr,
|
||||
uint64_t data,
|
||||
unsigned size);
|
||||
|
||||
enum device_endian endianness;
|
||||
/* Guest-visible constraints: */
|
||||
struct {
|
||||
/* If nonzero, specify bounds on access sizes beyond which a machine
|
||||
* check is thrown.
|
||||
*/
|
||||
unsigned min_access_size;
|
||||
unsigned max_access_size;
|
||||
/* If true, unaligned accesses are supported. Otherwise unaligned
|
||||
* accesses throw machine checks.
|
||||
*/
|
||||
bool unaligned;
|
||||
/*
|
||||
* If present, and returns #false, the transaction is not accepted
|
||||
* by the device (and results in machine dependent behaviour such
|
||||
* as a machine check exception).
|
||||
*/
|
||||
bool (*accepts)(void *opaque, hwaddr addr,
|
||||
unsigned size, bool is_write);
|
||||
} valid;
|
||||
/* Internal implementation constraints: */
|
||||
struct {
|
||||
/* If nonzero, specifies the minimum size implemented. Smaller sizes
|
||||
* will be rounded upwards and a partial result will be returned.
|
||||
*/
|
||||
unsigned min_access_size;
|
||||
/* If nonzero, specifies the maximum size implemented. Larger sizes
|
||||
* will be done as a series of accesses with smaller sizes.
|
||||
*/
|
||||
unsigned max_access_size;
|
||||
/* If true, unaligned accesses are supported. Otherwise all accesses
|
||||
* are converted to (possibly multiple) naturally aligned accesses.
|
||||
*/
|
||||
bool unaligned;
|
||||
} impl;
|
||||
|
||||
/* If .read and .write are not present, old_mmio may be used for
|
||||
* backwards compatibility with old mmio registration
|
||||
*/
|
||||
const MemoryRegionMmio old_mmio;
|
||||
};
|
||||
|
||||
typedef struct MemoryRegionIOMMUOps MemoryRegionIOMMUOps;
|
||||
|
||||
struct MemoryRegionIOMMUOps {
|
||||
/* Return a TLB entry that contains a given address. */
|
||||
IOMMUTLBEntry (*translate)(MemoryRegion *iommu, hwaddr addr, bool is_write);
|
||||
};
|
||||
|
||||
typedef struct CoalescedMemoryRange CoalescedMemoryRange;
|
||||
typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
|
||||
|
||||
struct MemoryRegion {
|
||||
Object parent_obj;
|
||||
/* All fields are private - violators will be prosecuted */
|
||||
const MemoryRegionOps *ops;
|
||||
const MemoryRegionIOMMUOps *iommu_ops;
|
||||
void *opaque;
|
||||
MemoryRegion *container;
|
||||
Int128 size;
|
||||
hwaddr addr;
|
||||
void (*destructor)(MemoryRegion *mr);
|
||||
ram_addr_t ram_addr;
|
||||
uint64_t align;
|
||||
bool subpage;
|
||||
bool terminates;
|
||||
bool romd_mode;
|
||||
bool ram;
|
||||
bool skip_dump;
|
||||
bool readonly; /* For RAM regions */
|
||||
bool enabled;
|
||||
bool rom_device;
|
||||
bool warning_printed; /* For reservations */
|
||||
bool flush_coalesced_mmio;
|
||||
MemoryRegion *alias;
|
||||
hwaddr alias_offset;
|
||||
int32_t priority;
|
||||
bool may_overlap;
|
||||
QTAILQ_HEAD(subregions, MemoryRegion) subregions;
|
||||
QTAILQ_ENTRY(MemoryRegion) subregions_link;
|
||||
QTAILQ_HEAD(coalesced_ranges, CoalescedMemoryRange) coalesced;
|
||||
const char *name;
|
||||
uint8_t dirty_log_mask;
|
||||
unsigned ioeventfd_nb;
|
||||
MemoryRegionIoeventfd *ioeventfds;
|
||||
NotifierList iommu_notify;
|
||||
struct uc_struct *uc;
|
||||
};
|
||||
|
||||
/**
|
||||
* MemoryListener: callbacks structure for updates to the physical memory map
|
||||
*
|
||||
* Allows a component to adjust to changes in the guest-visible memory map.
|
||||
* Use with memory_listener_register() and memory_listener_unregister().
|
||||
*/
|
||||
struct MemoryListener {
|
||||
void (*begin)(MemoryListener *listener);
|
||||
void (*commit)(MemoryListener *listener);
|
||||
void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
|
||||
void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
|
||||
void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
|
||||
void (*log_start)(MemoryListener *listener, MemoryRegionSection *section);
|
||||
void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section);
|
||||
void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
|
||||
void (*log_global_start)(MemoryListener *listener);
|
||||
void (*log_global_stop)(MemoryListener *listener);
|
||||
void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
|
||||
bool match_data, uint64_t data, EventNotifier *e);
|
||||
void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
|
||||
bool match_data, uint64_t data, EventNotifier *e);
|
||||
void (*coalesced_mmio_add)(MemoryListener *listener, MemoryRegionSection *section,
|
||||
hwaddr addr, hwaddr len);
|
||||
void (*coalesced_mmio_del)(MemoryListener *listener, MemoryRegionSection *section,
|
||||
hwaddr addr, hwaddr len);
|
||||
/* Lower = earlier (during add), later (during del) */
|
||||
unsigned priority;
|
||||
AddressSpace *address_space_filter;
|
||||
QTAILQ_ENTRY(MemoryListener) link;
|
||||
};
|
||||
|
||||
/**
|
||||
* AddressSpace: describes a mapping of addresses to #MemoryRegion objects
|
||||
*/
|
||||
struct AddressSpace {
|
||||
/* All fields are private. */
|
||||
char *name;
|
||||
MemoryRegion *root;
|
||||
struct FlatView *current_map;
|
||||
int ioeventfd_nb;
|
||||
struct MemoryRegionIoeventfd *ioeventfds;
|
||||
struct AddressSpaceDispatch *dispatch;
|
||||
struct AddressSpaceDispatch *next_dispatch;
|
||||
MemoryListener dispatch_listener;
|
||||
struct uc_struct* uc;
|
||||
|
||||
QTAILQ_ENTRY(AddressSpace) address_spaces_link;
|
||||
};
|
||||
|
||||
/**
|
||||
* MemoryRegionSection: describes a fragment of a #MemoryRegion
|
||||
*
|
||||
* @mr: the region, or %NULL if empty
|
||||
* @address_space: the address space the region is mapped in
|
||||
* @offset_within_region: the beginning of the section, relative to @mr's start
|
||||
* @size: the size of the section; will not exceed @mr's boundaries
|
||||
* @offset_within_address_space: the address of the first byte of the section
|
||||
* relative to the region's address space
|
||||
* @readonly: writes to this section are ignored
|
||||
*/
|
||||
struct MemoryRegionSection {
|
||||
MemoryRegion *mr;
|
||||
AddressSpace *address_space;
|
||||
hwaddr offset_within_region;
|
||||
Int128 size;
|
||||
hwaddr offset_within_address_space;
|
||||
bool readonly;
|
||||
};
|
||||
|
||||
/**
|
||||
* memory_region_init: Initialize a memory region
|
||||
*
|
||||
* The region typically acts as a container for other memory regions. Use
|
||||
* memory_region_add_subregion() to add subregions.
|
||||
*
|
||||
* @mr: the #MemoryRegion to be initialized
|
||||
* @owner: the object that tracks the region's reference count
|
||||
* @name: used for debugging; not visible to the user or ABI
|
||||
* @size: size of the region; any subregions beyond this size will be clipped
|
||||
*/
|
||||
void memory_region_init(struct uc_struct *uc, MemoryRegion *mr,
|
||||
struct Object *owner,
|
||||
const char *name,
|
||||
uint64_t size);
|
||||
|
||||
/**
|
||||
* memory_region_ref: Add 1 to a memory region's reference count
|
||||
*
|
||||
* Whenever memory regions are accessed outside the BQL, they need to be
|
||||
* preserved against hot-unplug. MemoryRegions actually do not have their
|
||||
* own reference count; they piggyback on a QOM object, their "owner".
|
||||
* This function adds a reference to the owner.
|
||||
*
|
||||
* All MemoryRegions must have an owner if they can disappear, even if the
|
||||
* device they belong to operates exclusively under the BQL. This is because
|
||||
* the region could be returned at any time by memory_region_find, and this
|
||||
* is usually under guest control.
|
||||
*
|
||||
* @mr: the #MemoryRegion
|
||||
*/
|
||||
void memory_region_ref(MemoryRegion *mr);
|
||||
|
||||
/**
|
||||
* memory_region_unref: Remove 1 to a memory region's reference count
|
||||
*
|
||||
* Whenever memory regions are accessed outside the BQL, they need to be
|
||||
* preserved against hot-unplug. MemoryRegions actually do not have their
|
||||
* own reference count; they piggyback on a QOM object, their "owner".
|
||||
* This function removes a reference to the owner and possibly destroys it.
|
||||
*
|
||||
* @mr: the #MemoryRegion
|
||||
*/
|
||||
void memory_region_unref(MemoryRegion *mr);
|
||||
|
||||
/**
|
||||
* memory_region_init_io: Initialize an I/O memory region.
|
||||
*
|
||||
* Accesses into the region will cause the callbacks in @ops to be called.
|
||||
* if @size is nonzero, subregions will be clipped to @size.
|
||||
*
|
||||
* @mr: the #MemoryRegion to be initialized.
|
||||
* @owner: the object that tracks the region's reference count
|
||||
* @ops: a structure containing read and write callbacks to be used when
|
||||
* I/O is performed on the region.
|
||||
* @opaque: passed to to the read and write callbacks of the @ops structure.
|
||||
* @name: used for debugging; not visible to the user or ABI
|
||||
* @size: size of the region.
|
||||
*/
|
||||
void memory_region_init_io(struct uc_struct *uc, MemoryRegion *mr,
|
||||
struct Object *owner,
|
||||
const MemoryRegionOps *ops,
|
||||
void *opaque,
|
||||
const char *name,
|
||||
uint64_t size);
|
||||
|
||||
/**
|
||||
* memory_region_init_ram: Initialize RAM memory region. Accesses into the
|
||||
* region will modify memory directly.
|
||||
*
|
||||
* @mr: the #MemoryRegion to be initialized.
|
||||
* @owner: the object that tracks the region's reference count
|
||||
* @name: the name of the region.
|
||||
* @size: size of the region.
|
||||
* @errp: pointer to Error*, to store an error if it happens.
|
||||
*/
|
||||
void memory_region_init_ram(struct uc_struct *uc, MemoryRegion *mr,
|
||||
struct Object *owner,
|
||||
const char *name,
|
||||
uint64_t size,
|
||||
Error **errp);
|
||||
|
||||
/**
|
||||
* memory_region_init_ram_ptr: Initialize RAM memory region from a
|
||||
* user-provided pointer. Accesses into the
|
||||
* region will modify memory directly.
|
||||
*
|
||||
* @mr: the #MemoryRegion to be initialized.
|
||||
* @owner: the object that tracks the region's reference count
|
||||
* @name: the name of the region.
|
||||
* @size: size of the region.
|
||||
* @ptr: memory to be mapped; must contain at least @size bytes.
|
||||
*/
|
||||
void memory_region_init_ram_ptr(struct uc_struct *uc, MemoryRegion *mr,
|
||||
struct Object *owner,
|
||||
const char *name,
|
||||
uint64_t size,
|
||||
void *ptr);
|
||||
|
||||
/**
|
||||
* memory_region_init_alias: Initialize a memory region that aliases all or a
|
||||
* part of another memory region.
|
||||
*
|
||||
* @mr: the #MemoryRegion to be initialized.
|
||||
* @owner: the object that tracks the region's reference count
|
||||
* @name: used for debugging; not visible to the user or ABI
|
||||
* @orig: the region to be referenced; @mr will be equivalent to
|
||||
* @orig between @offset and @offset + @size - 1.
|
||||
* @offset: start of the section in @orig to be referenced.
|
||||
* @size: size of the region.
|
||||
*/
|
||||
void memory_region_init_alias(struct uc_struct *uc, MemoryRegion *mr,
|
||||
struct Object *owner,
|
||||
const char *name,
|
||||
MemoryRegion *orig,
|
||||
hwaddr offset,
|
||||
uint64_t size);
|
||||
|
||||
/**
|
||||
* memory_region_init_rom_device: Initialize a ROM memory region. Writes are
|
||||
* handled via callbacks.
|
||||
*
|
||||
* @mr: the #MemoryRegion to be initialized.
|
||||
* @owner: the object that tracks the region's reference count
|
||||
* @ops: callbacks for write access handling.
|
||||
* @name: the name of the region.
|
||||
* @size: size of the region.
|
||||
* @errp: pointer to Error*, to store an error if it happens.
|
||||
*/
|
||||
void memory_region_init_rom_device(MemoryRegion *mr,
|
||||
struct Object *owner,
|
||||
const MemoryRegionOps *ops,
|
||||
void *opaque,
|
||||
const char *name,
|
||||
uint64_t size,
|
||||
Error **errp);
|
||||
|
||||
/**
|
||||
* memory_region_init_reservation: Initialize a memory region that reserves
|
||||
* I/O space.
|
||||
*
|
||||
* A reservation region primariy serves debugging purposes. It claims I/O
|
||||
* space that is not supposed to be handled by QEMU itself. Any access via
|
||||
* the memory API will cause an abort().
|
||||
*
|
||||
* @mr: the #MemoryRegion to be initialized
|
||||
* @owner: the object that tracks the region's reference count
|
||||
* @name: used for debugging; not visible to the user or ABI
|
||||
* @size: size of the region.
|
||||
*/
|
||||
void memory_region_init_reservation(struct uc_struct *uc, MemoryRegion *mr,
|
||||
struct Object *owner,
|
||||
const char *name,
|
||||
uint64_t size);
|
||||
|
||||
/**
|
||||
* memory_region_init_iommu: Initialize a memory region that translates
|
||||
* addresses
|
||||
*
|
||||
* An IOMMU region translates addresses and forwards accesses to a target
|
||||
* memory region.
|
||||
*
|
||||
* @mr: the #MemoryRegion to be initialized
|
||||
* @owner: the object that tracks the region's reference count
|
||||
* @ops: a function that translates addresses into the @target region
|
||||
* @name: used for debugging; not visible to the user or ABI
|
||||
* @size: size of the region.
|
||||
*/
|
||||
void memory_region_init_iommu(MemoryRegion *mr,
|
||||
struct Object *owner,
|
||||
const MemoryRegionIOMMUOps *ops,
|
||||
const char *name,
|
||||
uint64_t size);
|
||||
|
||||
/**
|
||||
* memory_region_size: get a memory region's size.
|
||||
*
|
||||
* @mr: the memory region being queried.
|
||||
*/
|
||||
uint64_t memory_region_size(MemoryRegion *mr);
|
||||
|
||||
/**
|
||||
* memory_region_is_ram: check whether a memory region is random access
|
||||
*
|
||||
* Returns %true is a memory region is random access.
|
||||
*
|
||||
* @mr: the memory region being queried
|
||||
*/
|
||||
bool memory_region_is_ram(MemoryRegion *mr);
|
||||
|
||||
/**
|
||||
* memory_region_is_skip_dump: check whether a memory region should not be
|
||||
* dumped
|
||||
*
|
||||
* Returns %true is a memory region should not be dumped(e.g. VFIO BAR MMAP).
|
||||
*
|
||||
* @mr: the memory region being queried
|
||||
*/
|
||||
bool memory_region_is_skip_dump(MemoryRegion *mr);
|
||||
|
||||
/**
|
||||
* memory_region_set_skip_dump: Set skip_dump flag, dump will ignore this memory
|
||||
* region
|
||||
*
|
||||
* @mr: the memory region being queried
|
||||
*/
|
||||
void memory_region_set_skip_dump(MemoryRegion *mr);
|
||||
|
||||
/**
|
||||
* memory_region_is_romd: check whether a memory region is in ROMD mode
|
||||
*
|
||||
* Returns %true if a memory region is a ROM device and currently set to allow
|
||||
* direct reads.
|
||||
*
|
||||
* @mr: the memory region being queried
|
||||
*/
|
||||
static inline bool memory_region_is_romd(MemoryRegion *mr)
|
||||
{
|
||||
return mr->rom_device && mr->romd_mode;
|
||||
}
|
||||
|
||||
/**
|
||||
* memory_region_is_iommu: check whether a memory region is an iommu
|
||||
*
|
||||
* Returns %true is a memory region is an iommu.
|
||||
*
|
||||
* @mr: the memory region being queried
|
||||
*/
|
||||
bool memory_region_is_iommu(MemoryRegion *mr);
|
||||
|
||||
/**
|
||||
* memory_region_notify_iommu: notify a change in an IOMMU translation entry.
|
||||
*
|
||||
* @mr: the memory region that was changed
|
||||
* @entry: the new entry in the IOMMU translation table. The entry
|
||||
* replaces all old entries for the same virtual I/O address range.
|
||||
* Deleted entries have .@perm == 0.
|
||||
*/
|
||||
void memory_region_notify_iommu(MemoryRegion *mr,
|
||||
IOMMUTLBEntry entry);
|
||||
|
||||
/**
|
||||
* memory_region_register_iommu_notifier: register a notifier for changes to
|
||||
* IOMMU translation entries.
|
||||
*
|
||||
* @mr: the memory region to observe
|
||||
* @n: the notifier to be added; the notifier receives a pointer to an
|
||||
* #IOMMUTLBEntry as the opaque value; the pointer ceases to be
|
||||
* valid on exit from the notifier.
|
||||
*/
|
||||
void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n);
|
||||
|
||||
/**
|
||||
* memory_region_unregister_iommu_notifier: unregister a notifier for
|
||||
* changes to IOMMU translation entries.
|
||||
*
|
||||
* @n: the notifier to be removed.
|
||||
*/
|
||||
void memory_region_unregister_iommu_notifier(Notifier *n);
|
||||
|
||||
/**
|
||||
* memory_region_name: get a memory region's name
|
||||
*
|
||||
* Returns the string that was used to initialize the memory region.
|
||||
*
|
||||
* @mr: the memory region being queried
|
||||
*/
|
||||
const char *memory_region_name(const MemoryRegion *mr);
|
||||
|
||||
/**
|
||||
* memory_region_is_logging: return whether a memory region is logging writes
|
||||
*
|
||||
* Returns %true if the memory region is logging writes
|
||||
*
|
||||
* @mr: the memory region being queried
|
||||
*/
|
||||
bool memory_region_is_logging(MemoryRegion *mr);
|
||||
|
||||
/**
|
||||
* memory_region_is_rom: check whether a memory region is ROM
|
||||
*
|
||||
* Returns %true is a memory region is read-only memory.
|
||||
*
|
||||
* @mr: the memory region being queried
|
||||
*/
|
||||
bool memory_region_is_rom(MemoryRegion *mr);
|
||||
|
||||
/**
|
||||
* memory_region_get_fd: Get a file descriptor backing a RAM memory region.
|
||||
*
|
||||
* Returns a file descriptor backing a file-based RAM memory region,
|
||||
* or -1 if the region is not a file-based RAM memory region.
|
||||
*
|
||||
* @mr: the RAM or alias memory region being queried.
|
||||
*/
|
||||
int memory_region_get_fd(MemoryRegion *mr);
|
||||
|
||||
/**
|
||||
* memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
|
||||
*
|
||||
* Returns a host pointer to a RAM memory region (created with
|
||||
* memory_region_init_ram() or memory_region_init_ram_ptr()). Use with
|
||||
* care.
|
||||
*
|
||||
* @mr: the memory region being queried.
|
||||
*/
|
||||
void *memory_region_get_ram_ptr(MemoryRegion *mr);
|
||||
|
||||
/**
|
||||
* memory_region_set_readonly: Turn a memory region read-only (or read-write)
|
||||
*
|
||||
* Allows a memory region to be marked as read-only (turning it into a ROM).
|
||||
* only useful on RAM regions.
|
||||
*
|
||||
* @mr: the region being updated.
|
||||
* @readonly: whether rhe region is to be ROM or RAM.
|
||||
*/
|
||||
void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
|
||||
|
||||
/**
|
||||
* memory_region_rom_device_set_romd: enable/disable ROMD mode
|
||||
*
|
||||
* Allows a ROM device (initialized with memory_region_init_rom_device() to
|
||||
* set to ROMD mode (default) or MMIO mode. When it is in ROMD mode, the
|
||||
* device is mapped to guest memory and satisfies read access directly.
|
||||
* When in MMIO mode, reads are forwarded to the #MemoryRegion.read function.
|
||||
* Writes are always handled by the #MemoryRegion.write function.
|
||||
*
|
||||
* @mr: the memory region to be updated
|
||||
* @romd_mode: %true to put the region into ROMD mode
|
||||
*/
|
||||
void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode);
|
||||
|
||||
/**
|
||||
* memory_region_clear_coalescing: Disable MMIO coalescing for the region.
|
||||
*
|
||||
* Disables any coalescing caused by memory_region_set_coalescing() or
|
||||
* memory_region_add_coalescing(). Roughly equivalent to uncacheble memory
|
||||
* hardware.
|
||||
*
|
||||
* @mr: the memory region to be updated.
|
||||
*/
|
||||
void memory_region_clear_coalescing(MemoryRegion *mr);
|
||||
|
||||
/**
|
||||
* memory_region_add_eventfd: Request an eventfd to be triggered when a word
|
||||
* is written to a location.
|
||||
*
|
||||
* Marks a word in an IO region (initialized with memory_region_init_io())
|
||||
* as a trigger for an eventfd event. The I/O callback will not be called.
|
||||
* The caller must be prepared to handle failure (that is, take the required
|
||||
* action if the callback _is_ called).
|
||||
*
|
||||
* @mr: the memory region being updated.
|
||||
* @addr: the address within @mr that is to be monitored
|
||||
* @size: the size of the access to trigger the eventfd
|
||||
* @match_data: whether to match against @data, instead of just @addr
|
||||
* @data: the data to match against the guest write
|
||||
* @fd: the eventfd to be triggered when @addr, @size, and @data all match.
|
||||
**/
|
||||
void memory_region_add_eventfd(MemoryRegion *mr,
|
||||
hwaddr addr,
|
||||
unsigned size,
|
||||
bool match_data,
|
||||
uint64_t data,
|
||||
EventNotifier *e);
|
||||
|
||||
/**
|
||||
* memory_region_del_eventfd: Cancel an eventfd.
|
||||
*
|
||||
* Cancels an eventfd trigger requested by a previous
|
||||
* memory_region_add_eventfd() call.
|
||||
*
|
||||
* @mr: the memory region being updated.
|
||||
* @addr: the address within @mr that is to be monitored
|
||||
* @size: the size of the access to trigger the eventfd
|
||||
* @match_data: whether to match against @data, instead of just @addr
|
||||
* @data: the data to match against the guest write
|
||||
* @fd: the eventfd to be triggered when @addr, @size, and @data all match.
|
||||
*/
|
||||
void memory_region_del_eventfd(MemoryRegion *mr,
|
||||
hwaddr addr,
|
||||
unsigned size,
|
||||
bool match_data,
|
||||
uint64_t data,
|
||||
EventNotifier *e);
|
||||
|
||||
/**
|
||||
* memory_region_add_subregion: Add a subregion to a container.
|
||||
*
|
||||
* Adds a subregion at @offset. The subregion may not overlap with other
|
||||
* subregions (except for those explicitly marked as overlapping). A region
|
||||
* may only be added once as a subregion (unless removed with
|
||||
* memory_region_del_subregion()); use memory_region_init_alias() if you
|
||||
* want a region to be a subregion in multiple locations.
|
||||
*
|
||||
* @mr: the region to contain the new subregion; must be a container
|
||||
* initialized with memory_region_init().
|
||||
* @offset: the offset relative to @mr where @subregion is added.
|
||||
* @subregion: the subregion to be added.
|
||||
*/
|
||||
void memory_region_add_subregion(MemoryRegion *mr,
|
||||
hwaddr offset,
|
||||
MemoryRegion *subregion);
|
||||
/**
|
||||
* memory_region_add_subregion_overlap: Add a subregion to a container
|
||||
* with overlap.
|
||||
*
|
||||
* Adds a subregion at @offset. The subregion may overlap with other
|
||||
* subregions. Conflicts are resolved by having a higher @priority hide a
|
||||
* lower @priority. Subregions without priority are taken as @priority 0.
|
||||
* A region may only be added once as a subregion (unless removed with
|
||||
* memory_region_del_subregion()); use memory_region_init_alias() if you
|
||||
* want a region to be a subregion in multiple locations.
|
||||
*
|
||||
* @mr: the region to contain the new subregion; must be a container
|
||||
* initialized with memory_region_init().
|
||||
* @offset: the offset relative to @mr where @subregion is added.
|
||||
* @subregion: the subregion to be added.
|
||||
* @priority: used for resolving overlaps; highest priority wins.
|
||||
*/
|
||||
void memory_region_add_subregion_overlap(MemoryRegion *mr,
|
||||
hwaddr offset,
|
||||
MemoryRegion *subregion,
|
||||
int priority);
|
||||
|
||||
/**
|
||||
* memory_region_get_ram_addr: Get the ram address associated with a memory
|
||||
* region
|
||||
*
|
||||
* DO NOT USE THIS FUNCTION. This is a temporary workaround while the Xen
|
||||
* code is being reworked.
|
||||
*/
|
||||
ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
|
||||
|
||||
uint64_t memory_region_get_alignment(const MemoryRegion *mr);
|
||||
|
||||
/**
|
||||
* memory_region_del_subregion: Remove a subregion.
|
||||
*
|
||||
* Removes a subregion from its container.
|
||||
*
|
||||
* @mr: the container to be updated.
|
||||
* @subregion: the region being removed; must be a current subregion of @mr.
|
||||
*/
|
||||
void memory_region_del_subregion(MemoryRegion *mr,
|
||||
MemoryRegion *subregion);
|
||||
|
||||
/*
|
||||
* memory_region_set_enabled: dynamically enable or disable a region
|
||||
*
|
||||
* Enables or disables a memory region. A disabled memory region
|
||||
* ignores all accesses to itself and its subregions. It does not
|
||||
* obscure sibling subregions with lower priority - it simply behaves as
|
||||
* if it was removed from the hierarchy.
|
||||
*
|
||||
* Regions default to being enabled.
|
||||
*
|
||||
* @mr: the region to be updated
|
||||
* @enabled: whether to enable or disable the region
|
||||
*/
|
||||
void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
|
||||
|
||||
/*
|
||||
* memory_region_set_address: dynamically update the address of a region
|
||||
*
|
||||
* Dynamically updates the address of a region, relative to its container.
|
||||
* May be used on regions are currently part of a memory hierarchy.
|
||||
*
|
||||
* @mr: the region to be updated
|
||||
* @addr: new address, relative to container region
|
||||
*/
|
||||
void memory_region_set_address(MemoryRegion *mr, hwaddr addr);
|
||||
|
||||
/*
|
||||
* memory_region_set_alias_offset: dynamically update a memory alias's offset
|
||||
*
|
||||
* Dynamically updates the offset into the target region that an alias points
|
||||
* to, as if the fourth argument to memory_region_init_alias() has changed.
|
||||
*
|
||||
* @mr: the #MemoryRegion to be updated; should be an alias.
|
||||
* @offset: the new offset into the target memory region
|
||||
*/
|
||||
void memory_region_set_alias_offset(MemoryRegion *mr,
|
||||
hwaddr offset);
|
||||
|
||||
/**
|
||||
* memory_region_present: checks if an address relative to a @container
|
||||
* translates into #MemoryRegion within @container
|
||||
*
|
||||
* Answer whether a #MemoryRegion within @container covers the address
|
||||
* @addr.
|
||||
*
|
||||
* @container: a #MemoryRegion within which @addr is a relative address
|
||||
* @addr: the area within @container to be searched
|
||||
*/
|
||||
bool memory_region_present(MemoryRegion *container, hwaddr addr);
|
||||
|
||||
/**
|
||||
* memory_region_is_mapped: returns true if #MemoryRegion is mapped
|
||||
* into any address space.
|
||||
*
|
||||
* @mr: a #MemoryRegion which should be checked if it's mapped
|
||||
*/
|
||||
bool memory_region_is_mapped(MemoryRegion *mr);
|
||||
|
||||
/**
|
||||
* memory_region_find: translate an address/size relative to a
|
||||
* MemoryRegion into a #MemoryRegionSection.
|
||||
*
|
||||
* Locates the first #MemoryRegion within @mr that overlaps the range
|
||||
* given by @addr and @size.
|
||||
*
|
||||
* Returns a #MemoryRegionSection that describes a contiguous overlap.
|
||||
* It will have the following characteristics:
|
||||
* .@size = 0 iff no overlap was found
|
||||
* .@mr is non-%NULL iff an overlap was found
|
||||
*
|
||||
* Remember that in the return value the @offset_within_region is
|
||||
* relative to the returned region (in the .@mr field), not to the
|
||||
* @mr argument.
|
||||
*
|
||||
* Similarly, the .@offset_within_address_space is relative to the
|
||||
* address space that contains both regions, the passed and the
|
||||
* returned one. However, in the special case where the @mr argument
|
||||
* has no container (and thus is the root of the address space), the
|
||||
* following will hold:
|
||||
* .@offset_within_address_space >= @addr
|
||||
* .@offset_within_address_space + .@size <= @addr + @size
|
||||
*
|
||||
* @mr: a MemoryRegion within which @addr is a relative address
|
||||
* @addr: start of the area within @as to be searched
|
||||
* @size: size of the area to be searched
|
||||
*/
|
||||
MemoryRegionSection memory_region_find(MemoryRegion *mr,
|
||||
hwaddr addr, uint64_t size);
|
||||
|
||||
/**
|
||||
* memory_region_transaction_begin: Start a transaction.
|
||||
*
|
||||
* During a transaction, changes will be accumulated and made visible
|
||||
* only when the transaction ends (is committed).
|
||||
*/
|
||||
void memory_region_transaction_begin(struct uc_struct*);
|
||||
|
||||
/**
|
||||
* memory_region_transaction_commit: Commit a transaction and make changes
|
||||
* visible to the guest.
|
||||
*/
|
||||
void memory_region_transaction_commit(struct uc_struct*);
|
||||
|
||||
/**
|
||||
* memory_listener_register: register callbacks to be called when memory
|
||||
* sections are mapped or unmapped into an address
|
||||
* space
|
||||
*
|
||||
* @listener: an object containing the callbacks to be called
|
||||
* @filter: if non-%NULL, only regions in this address space will be observed
|
||||
*/
|
||||
void memory_listener_register(struct uc_struct* uc, MemoryListener *listener, AddressSpace *filter);
|
||||
|
||||
/**
|
||||
* memory_listener_unregister: undo the effect of memory_listener_register()
|
||||
*
|
||||
* @listener: an object containing the callbacks to be removed
|
||||
*/
|
||||
void memory_listener_unregister(struct uc_struct* uc, MemoryListener *listener);
|
||||
|
||||
/**
|
||||
* address_space_init: initializes an address space
|
||||
*
|
||||
* @as: an uninitialized #AddressSpace
|
||||
* @root: a #MemoryRegion that routes addesses for the address space
|
||||
* @name: an address space name. The name is only used for debugging
|
||||
* output.
|
||||
*/
|
||||
void address_space_init(struct uc_struct *uc, AddressSpace *as, MemoryRegion *root, const char *name);
|
||||
|
||||
|
||||
/**
|
||||
* address_space_destroy: destroy an address space
|
||||
*
|
||||
* Releases all resources associated with an address space. After an address space
|
||||
* is destroyed, its root memory region (given by address_space_init()) may be destroyed
|
||||
* as well.
|
||||
*
|
||||
* @as: address space to be destroyed
|
||||
*/
|
||||
void address_space_destroy(AddressSpace *as);
|
||||
|
||||
/**
|
||||
* address_space_rw: read from or write to an address space.
|
||||
*
|
||||
* Return true if the operation hit any unassigned memory or encountered an
|
||||
* IOMMU fault.
|
||||
*
|
||||
* @as: #AddressSpace to be accessed
|
||||
* @addr: address within that address space
|
||||
* @buf: buffer with the data transferred
|
||||
* @is_write: indicates the transfer direction
|
||||
*/
|
||||
bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
|
||||
int len, bool is_write);
|
||||
|
||||
/**
|
||||
* address_space_write: write to address space.
|
||||
*
|
||||
* Return true if the operation hit any unassigned memory or encountered an
|
||||
* IOMMU fault.
|
||||
*
|
||||
* @as: #AddressSpace to be accessed
|
||||
* @addr: address within that address space
|
||||
* @buf: buffer with the data transferred
|
||||
*/
|
||||
bool address_space_write(AddressSpace *as, hwaddr addr,
|
||||
const uint8_t *buf, int len);
|
||||
|
||||
/**
|
||||
* address_space_read: read from an address space.
|
||||
*
|
||||
* Return true if the operation hit any unassigned memory or encountered an
|
||||
* IOMMU fault.
|
||||
*
|
||||
* @as: #AddressSpace to be accessed
|
||||
* @addr: address within that address space
|
||||
* @buf: buffer with the data transferred
|
||||
*/
|
||||
bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len);
|
||||
|
||||
/* address_space_translate: translate an address range into an address space
|
||||
* into a MemoryRegion and an address range into that section
|
||||
*
|
||||
* @as: #AddressSpace to be accessed
|
||||
* @addr: address within that address space
|
||||
* @xlat: pointer to address within the returned memory region section's
|
||||
* #MemoryRegion.
|
||||
* @len: pointer to length
|
||||
* @is_write: indicates the transfer direction
|
||||
*/
|
||||
MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
|
||||
hwaddr *xlat, hwaddr *len,
|
||||
bool is_write);
|
||||
|
||||
/* address_space_access_valid: check for validity of accessing an address
|
||||
* space range
|
||||
*
|
||||
* Check whether memory is assigned to the given address space range, and
|
||||
* access is permitted by any IOMMU regions that are active for the address
|
||||
* space.
|
||||
*
|
||||
* For now, addr and len should be aligned to a page size. This limitation
|
||||
* will be lifted in the future.
|
||||
*
|
||||
* @as: #AddressSpace to be accessed
|
||||
* @addr: address within that address space
|
||||
* @len: length of the area to be checked
|
||||
* @is_write: indicates the transfer direction
|
||||
*/
|
||||
bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write);
|
||||
|
||||
/* address_space_map: map a physical memory region into a host virtual address
|
||||
*
|
||||
* May map a subset of the requested range, given by and returned in @plen.
|
||||
* May return %NULL if resources needed to perform the mapping are exhausted.
|
||||
* Use only for reads OR writes - not for read-modify-write operations.
|
||||
* Use cpu_register_map_client() to know when retrying the map operation is
|
||||
* likely to succeed.
|
||||
*
|
||||
* @as: #AddressSpace to be accessed
|
||||
* @addr: address within that address space
|
||||
* @plen: pointer to length of buffer; updated on return
|
||||
* @is_write: indicates the transfer direction
|
||||
*/
|
||||
void *address_space_map(AddressSpace *as, hwaddr addr,
|
||||
hwaddr *plen, bool is_write);
|
||||
|
||||
/* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
|
||||
*
|
||||
* Will also mark the memory as dirty if @is_write == %true. @access_len gives
|
||||
* the amount of memory that was actually read or written by the caller.
|
||||
*
|
||||
* @as: #AddressSpace used
|
||||
* @addr: address within that address space
|
||||
* @len: buffer length as returned by address_space_map()
|
||||
* @access_len: amount of data actually transferred
|
||||
* @is_write: indicates the transfer direction
|
||||
*/
|
||||
void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
|
||||
int is_write, hwaddr access_len);
|
||||
|
||||
|
||||
void memory_register_types(struct uc_struct *uc);
|
||||
|
||||
int memory_map(struct uc_struct *uc, ram_addr_t begin, size_t size);
|
||||
int memory_free(struct uc_struct *uc);
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
62
qemu/include/exec/poison.h
Normal file
62
qemu/include/exec/poison.h
Normal file
@@ -0,0 +1,62 @@
|
||||
/* Poison identifiers that should not be used when building
|
||||
target independent device code. */
|
||||
|
||||
#ifndef HW_POISON_H
|
||||
#define HW_POISON_H
|
||||
#ifdef __GNUC__
|
||||
|
||||
#pragma GCC poison TARGET_I386
|
||||
#pragma GCC poison TARGET_X86_64
|
||||
#pragma GCC poison TARGET_ALPHA
|
||||
#pragma GCC poison TARGET_ARM
|
||||
#pragma GCC poison TARGET_CRIS
|
||||
#pragma GCC poison TARGET_LM32
|
||||
#pragma GCC poison TARGET_M68K
|
||||
#pragma GCC poison TARGET_MIPS
|
||||
#pragma GCC poison TARGET_MIPS64
|
||||
#pragma GCC poison TARGET_OPENRISC
|
||||
#pragma GCC poison TARGET_PPC
|
||||
#pragma GCC poison TARGET_PPCEMB
|
||||
#pragma GCC poison TARGET_PPC64
|
||||
#pragma GCC poison TARGET_ABI32
|
||||
#pragma GCC poison TARGET_SH4
|
||||
#pragma GCC poison TARGET_SPARC
|
||||
#pragma GCC poison TARGET_SPARC64
|
||||
|
||||
#pragma GCC poison TARGET_WORDS_BIGENDIAN
|
||||
#pragma GCC poison BSWAP_NEEDED
|
||||
|
||||
#pragma GCC poison TARGET_LONG_BITS
|
||||
#pragma GCC poison TARGET_FMT_lx
|
||||
#pragma GCC poison TARGET_FMT_ld
|
||||
|
||||
#pragma GCC poison TARGET_PAGE_SIZE
|
||||
#pragma GCC poison TARGET_PAGE_MASK
|
||||
#pragma GCC poison TARGET_PAGE_BITS
|
||||
#pragma GCC poison TARGET_PAGE_ALIGN
|
||||
|
||||
#pragma GCC poison CPUArchState
|
||||
|
||||
#pragma GCC poison lduw_phys
|
||||
#pragma GCC poison ldl_phys
|
||||
#pragma GCC poison ldq_phys
|
||||
#pragma GCC poison stl_phys_notdirty
|
||||
#pragma GCC poison stw_phys
|
||||
#pragma GCC poison stl_phys
|
||||
#pragma GCC poison stq_phys
|
||||
|
||||
#pragma GCC poison CPU_INTERRUPT_HARD
|
||||
#pragma GCC poison CPU_INTERRUPT_EXITTB
|
||||
#pragma GCC poison CPU_INTERRUPT_HALT
|
||||
#pragma GCC poison CPU_INTERRUPT_DEBUG
|
||||
#pragma GCC poison CPU_INTERRUPT_TGT_EXT_0
|
||||
#pragma GCC poison CPU_INTERRUPT_TGT_EXT_1
|
||||
#pragma GCC poison CPU_INTERRUPT_TGT_EXT_2
|
||||
#pragma GCC poison CPU_INTERRUPT_TGT_EXT_3
|
||||
#pragma GCC poison CPU_INTERRUPT_TGT_EXT_4
|
||||
#pragma GCC poison CPU_INTERRUPT_TGT_INT_0
|
||||
#pragma GCC poison CPU_INTERRUPT_TGT_INT_1
|
||||
#pragma GCC poison CPU_INTERRUPT_TGT_INT_2
|
||||
|
||||
#endif
|
||||
#endif
|
||||
187
qemu/include/exec/ram_addr.h
Normal file
187
qemu/include/exec/ram_addr.h
Normal file
@@ -0,0 +1,187 @@
|
||||
/*
|
||||
* Declarations for cpu physical memory functions
|
||||
*
|
||||
* Copyright 2011 Red Hat, Inc. and/or its affiliates
|
||||
*
|
||||
* Authors:
|
||||
* Avi Kivity <avi@redhat.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or
|
||||
* later. See the COPYING file in the top-level directory.
|
||||
*
|
||||
*/
|
||||
|
||||
/*
|
||||
* This header is for use by exec.c and memory.c ONLY. Do not include it.
|
||||
* The functions declared here will be removed soon.
|
||||
*/
|
||||
|
||||
#ifndef RAM_ADDR_H
|
||||
#define RAM_ADDR_H
|
||||
|
||||
#include "uc_priv.h"
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
|
||||
ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
|
||||
MemoryRegion *mr, Error **errp);
|
||||
ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp);
|
||||
int qemu_get_ram_fd(struct uc_struct *uc, ram_addr_t addr);
|
||||
void *qemu_get_ram_block_host_ptr(struct uc_struct *uc, ram_addr_t addr);
|
||||
void *qemu_get_ram_ptr(struct uc_struct *uc, ram_addr_t addr);
|
||||
void qemu_ram_free(struct uc_struct *c, ram_addr_t addr);
|
||||
void qemu_ram_free_from_ptr(struct uc_struct *uc, ram_addr_t addr);
|
||||
|
||||
static inline bool cpu_physical_memory_get_dirty(struct uc_struct *uc, ram_addr_t start,
|
||||
ram_addr_t length,
|
||||
unsigned client)
|
||||
{
|
||||
unsigned long end, page, next;
|
||||
|
||||
assert(client < DIRTY_MEMORY_NUM);
|
||||
|
||||
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
|
||||
page = start >> TARGET_PAGE_BITS;
|
||||
next = find_next_bit(uc->ram_list.dirty_memory[client], end, page);
|
||||
|
||||
return next < end;
|
||||
}
|
||||
|
||||
static inline bool cpu_physical_memory_get_clean(struct uc_struct *uc, ram_addr_t start,
|
||||
ram_addr_t length,
|
||||
unsigned client)
|
||||
{
|
||||
unsigned long end, page, next;
|
||||
|
||||
assert(client < DIRTY_MEMORY_NUM);
|
||||
|
||||
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
|
||||
page = start >> TARGET_PAGE_BITS;
|
||||
next = find_next_zero_bit(uc->ram_list.dirty_memory[client], end, page);
|
||||
|
||||
return next < end;
|
||||
}
|
||||
|
||||
static inline bool cpu_physical_memory_get_dirty_flag(struct uc_struct *uc, ram_addr_t addr,
|
||||
unsigned client)
|
||||
{
|
||||
return cpu_physical_memory_get_dirty(uc, addr, 1, client);
|
||||
}
|
||||
|
||||
static inline bool cpu_physical_memory_is_clean(struct uc_struct *uc, ram_addr_t addr)
|
||||
{
|
||||
bool vga = cpu_physical_memory_get_dirty_flag(uc, addr, DIRTY_MEMORY_VGA);
|
||||
bool code = cpu_physical_memory_get_dirty_flag(uc, addr, DIRTY_MEMORY_CODE);
|
||||
bool migration =
|
||||
cpu_physical_memory_get_dirty_flag(uc, addr, DIRTY_MEMORY_MIGRATION);
|
||||
return !(vga && code && migration);
|
||||
}
|
||||
|
||||
static inline bool cpu_physical_memory_range_includes_clean(struct uc_struct *uc, ram_addr_t start,
|
||||
ram_addr_t length)
|
||||
{
|
||||
bool vga = cpu_physical_memory_get_clean(uc, start, length, DIRTY_MEMORY_VGA);
|
||||
bool code = cpu_physical_memory_get_clean(uc, start, length, DIRTY_MEMORY_CODE);
|
||||
bool migration =
|
||||
cpu_physical_memory_get_clean(uc, start, length, DIRTY_MEMORY_MIGRATION);
|
||||
return vga || code || migration;
|
||||
}
|
||||
|
||||
static inline void cpu_physical_memory_set_dirty_flag(struct uc_struct *uc, ram_addr_t addr,
|
||||
unsigned client)
|
||||
{
|
||||
assert(client < DIRTY_MEMORY_NUM);
|
||||
set_bit(addr >> TARGET_PAGE_BITS, uc->ram_list.dirty_memory[client]);
|
||||
}
|
||||
|
||||
static inline void cpu_physical_memory_set_dirty_range_nocode(struct uc_struct *uc, ram_addr_t start,
|
||||
ram_addr_t length)
|
||||
{
|
||||
unsigned long end, page;
|
||||
|
||||
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
|
||||
page = start >> TARGET_PAGE_BITS;
|
||||
bitmap_set(uc->ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION], page, end - page);
|
||||
bitmap_set(uc->ram_list.dirty_memory[DIRTY_MEMORY_VGA], page, end - page);
|
||||
}
|
||||
|
||||
static inline void cpu_physical_memory_set_dirty_range(struct uc_struct *uc, ram_addr_t start,
|
||||
ram_addr_t length)
|
||||
{
|
||||
unsigned long end, page;
|
||||
|
||||
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
|
||||
page = start >> TARGET_PAGE_BITS;
|
||||
bitmap_set(uc->ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION], page, end - page);
|
||||
bitmap_set(uc->ram_list.dirty_memory[DIRTY_MEMORY_VGA], page, end - page);
|
||||
bitmap_set(uc->ram_list.dirty_memory[DIRTY_MEMORY_CODE], page, end - page);
|
||||
}
|
||||
|
||||
#if !defined(_WIN32)
|
||||
static inline void cpu_physical_memory_set_dirty_lebitmap(struct uc_struct *uc, unsigned long *bitmap,
|
||||
ram_addr_t start,
|
||||
ram_addr_t pages)
|
||||
{
|
||||
unsigned long i, j;
|
||||
unsigned long page_number, c;
|
||||
hwaddr addr;
|
||||
ram_addr_t ram_addr;
|
||||
unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
|
||||
unsigned long hpratio = getpagesize() / TARGET_PAGE_SIZE;
|
||||
unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
|
||||
|
||||
/* start address is aligned at the start of a word? */
|
||||
if ((((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) &&
|
||||
(hpratio == 1)) {
|
||||
long k;
|
||||
long nr = BITS_TO_LONGS(pages);
|
||||
|
||||
for (k = 0; k < nr; k++) {
|
||||
if (bitmap[k]) {
|
||||
unsigned long temp = leul_to_cpu(bitmap[k]);
|
||||
|
||||
uc->ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION][page + k] |= temp;
|
||||
uc->ram_list.dirty_memory[DIRTY_MEMORY_VGA][page + k] |= temp;
|
||||
uc->ram_list.dirty_memory[DIRTY_MEMORY_CODE][page + k] |= temp;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* bitmap-traveling is faster than memory-traveling (for addr...)
|
||||
* especially when most of the memory is not dirty.
|
||||
*/
|
||||
for (i = 0; i < len; i++) {
|
||||
if (bitmap[i] != 0) {
|
||||
c = leul_to_cpu(bitmap[i]);
|
||||
do {
|
||||
j = ctzl(c);
|
||||
c &= ~(1ul << j);
|
||||
page_number = (i * HOST_LONG_BITS + j) * hpratio;
|
||||
addr = page_number * TARGET_PAGE_SIZE;
|
||||
ram_addr = start + addr;
|
||||
cpu_physical_memory_set_dirty_range(uc, ram_addr,
|
||||
TARGET_PAGE_SIZE * hpratio);
|
||||
} while (c != 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif /* not _WIN32 */
|
||||
|
||||
static inline void cpu_physical_memory_clear_dirty_range(struct uc_struct *uc, ram_addr_t start,
|
||||
ram_addr_t length,
|
||||
unsigned client)
|
||||
{
|
||||
unsigned long end, page;
|
||||
|
||||
assert(client < DIRTY_MEMORY_NUM);
|
||||
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
|
||||
page = start >> TARGET_PAGE_BITS;
|
||||
bitmap_clear(uc->ram_list.dirty_memory[client], page, end - page);
|
||||
}
|
||||
|
||||
void cpu_physical_memory_reset_dirty(struct uc_struct *uc,
|
||||
ram_addr_t start, ram_addr_t length, unsigned client);
|
||||
|
||||
#endif
|
||||
#endif
|
||||
79
qemu/include/exec/softmmu-semi.h
Normal file
79
qemu/include/exec/softmmu-semi.h
Normal file
@@ -0,0 +1,79 @@
|
||||
/*
|
||||
* Helper routines to provide target memory access for semihosting
|
||||
* syscalls in system emulation mode.
|
||||
*
|
||||
* Copyright (c) 2007 CodeSourcery.
|
||||
*
|
||||
* This code is licensed under the GPL
|
||||
*/
|
||||
#ifndef SOFTMMU_SEMI_H
|
||||
#define SOFTMMU_SEMI_H 1
|
||||
|
||||
static inline uint32_t softmmu_tget32(CPUArchState *env, uint32_t addr)
|
||||
{
|
||||
uint32_t val;
|
||||
|
||||
cpu_memory_rw_debug(ENV_GET_CPU(env), addr, (uint8_t *)&val, 4, 0);
|
||||
return tswap32(val);
|
||||
}
|
||||
static inline uint32_t softmmu_tget8(CPUArchState *env, uint32_t addr)
|
||||
{
|
||||
uint8_t val;
|
||||
|
||||
cpu_memory_rw_debug(ENV_GET_CPU(env), addr, &val, 1, 0);
|
||||
return val;
|
||||
}
|
||||
|
||||
#define get_user_u32(arg, p) ({ arg = softmmu_tget32(env, p) ; 0; })
|
||||
#define get_user_u8(arg, p) ({ arg = softmmu_tget8(env, p) ; 0; })
|
||||
#define get_user_ual(arg, p) get_user_u32(arg, p)
|
||||
|
||||
static inline void softmmu_tput32(CPUArchState *env, uint32_t addr, uint32_t val)
|
||||
{
|
||||
val = tswap32(val);
|
||||
cpu_memory_rw_debug(ENV_GET_CPU(env), addr, (uint8_t *)&val, 4, 1);
|
||||
}
|
||||
#define put_user_u32(arg, p) ({ softmmu_tput32(env, p, arg) ; 0; })
|
||||
#define put_user_ual(arg, p) put_user_u32(arg, p)
|
||||
|
||||
static void *softmmu_lock_user(CPUArchState *env, uint32_t addr, uint32_t len,
|
||||
int copy)
|
||||
{
|
||||
uint8_t *p;
|
||||
/* TODO: Make this something that isn't fixed size. */
|
||||
p = malloc(len);
|
||||
if (p && copy) {
|
||||
cpu_memory_rw_debug(ENV_GET_CPU(env), addr, p, len, 0);
|
||||
}
|
||||
return p;
|
||||
}
|
||||
#define lock_user(type, p, len, copy) softmmu_lock_user(env, p, len, copy)
|
||||
static char *softmmu_lock_user_string(CPUArchState *env, uint32_t addr)
|
||||
{
|
||||
char *p;
|
||||
char *s;
|
||||
uint8_t c;
|
||||
/* TODO: Make this something that isn't fixed size. */
|
||||
s = p = malloc(1024);
|
||||
if (!s) {
|
||||
return NULL;
|
||||
}
|
||||
do {
|
||||
cpu_memory_rw_debug(ENV_GET_CPU(env), addr, &c, 1, 0);
|
||||
addr++;
|
||||
*(p++) = c;
|
||||
} while (c);
|
||||
return s;
|
||||
}
|
||||
#define lock_user_string(p) softmmu_lock_user_string(env, p)
|
||||
static void softmmu_unlock_user(CPUArchState *env, void *p, target_ulong addr,
|
||||
target_ulong len)
|
||||
{
|
||||
if (len) {
|
||||
cpu_memory_rw_debug(ENV_GET_CPU(env), addr, p, len, 1);
|
||||
}
|
||||
free(p);
|
||||
}
|
||||
#define unlock_user(s, args, len) softmmu_unlock_user(env, s, args, len)
|
||||
|
||||
#endif
|
||||
53
qemu/include/exec/spinlock.h
Normal file
53
qemu/include/exec/spinlock.h
Normal file
@@ -0,0 +1,53 @@
|
||||
/*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>
|
||||
*/
|
||||
|
||||
/* configure guarantees us that we have pthreads on any host except
|
||||
* mingw32, which doesn't support any of the user-only targets.
|
||||
* So we can simply assume we have pthread mutexes here.
|
||||
*/
|
||||
#ifndef QEMU_EXEC_SPINLOCK_H
|
||||
#define QEMU_EXEC_SPINLOCK_H
|
||||
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
|
||||
#include <pthread.h>
|
||||
#define spin_lock pthread_mutex_lock
|
||||
#define spin_unlock pthread_mutex_unlock
|
||||
#define spinlock_t pthread_mutex_t
|
||||
#define SPIN_LOCK_UNLOCKED PTHREAD_MUTEX_INITIALIZER
|
||||
|
||||
#else
|
||||
|
||||
/* Empty implementations, on the theory that system mode emulation
|
||||
* is single-threaded. This means that these functions should only
|
||||
* be used from code run in the TCG cpu thread, and cannot protect
|
||||
* data structures which might also be accessed from the IO thread
|
||||
* or from signal handlers.
|
||||
*/
|
||||
typedef int spinlock_t;
|
||||
#define SPIN_LOCK_UNLOCKED 0
|
||||
|
||||
static inline void spin_lock(spinlock_t *lock)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void spin_unlock(spinlock_t *lock)
|
||||
{
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif
|
||||
Reference in New Issue
Block a user