import Unicorn2

This commit is contained in:
Nguyen Anh Quynh
2021-10-03 22:14:44 +08:00
parent 772558119a
commit aaaea14214
837 changed files with 368717 additions and 200912 deletions

View File

@@ -1,22 +0,0 @@
/*
* Misc ARM declarations
*
* Copyright (c) 2006 CodeSourcery.
* Written by Paul Brook
*
* This code is licensed under the LGPL.
*
*/
#ifndef ARM_MISC_H
#define ARM_MISC_H
#include "exec/memory.h"
void tosa_machine_init(struct uc_struct *uc);
void machvirt_machine_init(struct uc_struct *uc); // ARM64
void arm_cpu_register_types(void *opaque);
void aarch64_cpu_register_types(void *opaque);
#endif /* !ARM_MISC_H */

View File

@@ -1,82 +0,0 @@
/* Declarations for use by board files for creating devices. */
#ifndef HW_BOARDS_H
#define HW_BOARDS_H
#include "qemu/typedefs.h"
#include "sysemu/accel.h"
#include "hw/qdev.h"
#include "qom/object.h"
#include "uc_priv.h"
typedef int QEMUMachineInitFunc(struct uc_struct *uc, MachineState *ms);
typedef void QEMUMachineResetFunc(void);
struct QEMUMachine {
const char *family; /* NULL iff @name identifies a standalone machtype */
const char *name;
QEMUMachineInitFunc *init;
QEMUMachineResetFunc *reset;
int max_cpus;
int is_default;
int arch;
};
void memory_region_allocate_system_memory(MemoryRegion *mr, Object *owner,
const char *name,
uint64_t ram_size);
void qemu_register_machine(struct uc_struct *uc, QEMUMachine *m, const char *type_machine,
void (*init)(struct uc_struct *uc, ObjectClass *oc, void *data));
#define TYPE_MACHINE_SUFFIX "-machine"
#define TYPE_MACHINE "machine"
#undef MACHINE /* BSD defines it and QEMU does not use it */
#define MACHINE(uc, obj) \
OBJECT_CHECK(uc, MachineState, (obj), TYPE_MACHINE)
#define MACHINE_GET_CLASS(uc, obj) \
OBJECT_GET_CLASS(uc, MachineClass, (obj), TYPE_MACHINE)
#define MACHINE_CLASS(uc, klass) \
OBJECT_CLASS_CHECK(uc, MachineClass, (klass), TYPE_MACHINE)
MachineClass *find_default_machine(struct uc_struct *uc, int arch);
/**
* MachineClass:
* @qemu_machine: #QEMUMachine
*/
struct MachineClass {
/*< private >*/
ObjectClass parent_class;
/*< public >*/
const char *family; /* NULL iff @name identifies a standalone machtype */
const char *name;
int (*init)(struct uc_struct *uc, MachineState *state);
void (*reset)(void);
int max_cpus;
int is_default;
int arch;
};
/**
* MachineState:
*/
struct MachineState {
/*< private >*/
Object parent_obj;
/*< public >*/
ram_addr_t ram_size;
ram_addr_t maxram_size;
const char *cpu_model;
struct uc_struct *uc;
AccelState *accelerator;
};
void machine_register_types(struct uc_struct *uc);
#endif

615
qemu/include/hw/core/cpu.h Normal file
View File

@@ -0,0 +1,615 @@
/*
* QEMU CPU model
*
* Copyright (c) 2012 SUSE LINUX Products GmbH
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see
* <http://www.gnu.org/licenses/gpl-2.0.html>
*/
#ifndef QEMU_CPU_H
#define QEMU_CPU_H
#include "exec/hwaddr.h"
#include "exec/memattrs.h"
#include "qemu/bitmap.h"
#include "qemu/queue.h"
#include "qemu/thread.h"
/**
* vaddr:
* Type wide enough to contain any #target_ulong virtual address.
*/
typedef uint64_t vaddr;
#define VADDR_PRId PRId64
#define VADDR_PRIu PRIu64
#define VADDR_PRIo PRIo64
#define VADDR_PRIx PRIx64
#define VADDR_PRIX PRIX64
#define VADDR_MAX UINT64_MAX
typedef enum MMUAccessType {
MMU_DATA_LOAD = 0,
MMU_DATA_STORE = 1,
MMU_INST_FETCH = 2
} MMUAccessType;
typedef struct CPUWatchpoint CPUWatchpoint;
struct TranslationBlock;
/**
* CPUClass:
* @class_by_name: Callback to map -cpu command line model name to an
* instantiatable CPU type.
* @has_work: Callback for checking if there is work to do.
* @do_interrupt: Callback for interrupt handling.
* @do_unaligned_access: Callback for unaligned access handling, if
* the target defines #TARGET_ALIGNED_ONLY.
* @do_transaction_failed: Callback for handling failed memory transactions
* (ie bus faults or external aborts; not MMU faults)
* @get_arch_id: Callback for getting architecture-dependent CPU ID.
* @get_paging_enabled: Callback for inquiring whether paging is enabled.
* @get_memory_mapping: Callback for obtaining the memory mappings.
* @set_pc: Callback for setting the Program Counter register. This
* should have the semantics used by the target architecture when
* setting the PC from a source such as an ELF file entry point;
* for example on Arm it will also set the Thumb mode bit based
* on the least significant bit of the new PC value.
* If the target behaviour here is anything other than "set
* the PC register to the value passed in" then the target must
* also implement the synchronize_from_tb hook.
* @synchronize_from_tb: Callback for synchronizing state from a TCG
* #TranslationBlock. This is called when we abandon execution
* of a TB before starting it, and must set all parts of the CPU
* state which the previous TB in the chain may not have updated.
* This always includes at least the program counter; some targets
* will need to do more. If this hook is not implemented then the
* default is to call @set_pc(tb->pc).
* @tlb_fill: Callback for handling a softmmu tlb miss or user-only
* address fault. For system mode, if the access is valid, call
* tlb_set_page and return true; if the access is invalid, and
* probe is true, return false; otherwise raise an exception and
* do not return. For user-only mode, always raise an exception
* and do not return.
* @get_phys_page_debug: Callback for obtaining a physical address.
* @get_phys_page_attrs_debug: Callback for obtaining a physical address and the
* associated memory transaction attributes to use for the access.
* CPUs which use memory transaction attributes should implement this
* instead of get_phys_page_debug.
* @asidx_from_attrs: Callback to return the CPU AddressSpace to use for
* a memory access with the specified memory transaction attributes.
* @debug_check_watchpoint: Callback: return true if the architectural
* watchpoint whose address has matched should really fire.
* @debug_excp_handler: Callback for handling debug exceptions.
* @cpu_exec_enter: Callback for cpu_exec preparation.
* @cpu_exec_exit: Callback for cpu_exec cleanup.
* @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec.
* @adjust_watchpoint_address: Perform a target-specific adjustment to an
* address before attempting to match it against watchpoints.
*
* Represents a CPU family or model.
*/
typedef struct CPUClass {
/* no DeviceClass->reset(), add here. */
void (*reset)(CPUState *cpu);
bool (*has_work)(CPUState *cpu);
void (*do_interrupt)(CPUState *cpu);
void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr);
int64_t (*get_arch_id)(CPUState *cpu);
bool (*get_paging_enabled)(const CPUState *cpu);
void (*get_memory_mapping)(CPUState *cpu, MemoryMappingList *list);
void (*set_pc)(CPUState *cpu, vaddr value);
void (*synchronize_from_tb)(CPUState *cpu, struct TranslationBlock *tb);
bool (*tlb_fill)(CPUState *cpu, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr);
hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr);
hwaddr (*get_phys_page_attrs_debug)(CPUState *cpu, vaddr addr,
MemTxAttrs *attrs);
int (*asidx_from_attrs)(CPUState *cpu, MemTxAttrs attrs);
bool (*debug_check_watchpoint)(CPUState *cpu, CPUWatchpoint *wp);
void (*debug_excp_handler)(CPUState *cpu);
void (*cpu_exec_enter)(CPUState *cpu);
void (*cpu_exec_exit)(CPUState *cpu);
bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
vaddr (*adjust_watchpoint_address)(CPUState *cpu, vaddr addr, int len);
void (*tcg_initialize)(struct uc_struct *uc);
} CPUClass;
/*
* Low 16 bits: number of cycles left, used only in icount mode.
* High 16 bits: Set to -1 to force TCG to stop executing linked TBs
* for this CPU and return to its top level loop (even in non-icount mode).
* This allows a single read-compare-cbranch-write sequence to test
* for both decrementer underflow and exceptions.
*/
typedef union IcountDecr {
uint32_t u32;
struct {
#ifdef HOST_WORDS_BIGENDIAN
uint16_t high;
uint16_t low;
#else
uint16_t low;
uint16_t high;
#endif
} u16;
} IcountDecr;
typedef struct CPUBreakpoint {
vaddr pc;
int flags; /* BP_* */
QTAILQ_ENTRY(CPUBreakpoint) entry;
} CPUBreakpoint;
struct CPUWatchpoint {
vaddr vaddr;
vaddr len;
vaddr hitaddr;
MemTxAttrs hitattrs;
int flags; /* BP_* */
QTAILQ_ENTRY(CPUWatchpoint) entry;
};
#define TB_JMP_CACHE_BITS 12
#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
/* work queue */
/* The union type allows passing of 64 bit target pointers on 32 bit
* hosts in a single parameter
*/
typedef union {
int host_int;
unsigned long host_ulong;
void *host_ptr;
vaddr target_ptr;
} run_on_cpu_data;
#define RUN_ON_CPU_HOST_PTR(p) ((run_on_cpu_data){.host_ptr = (p)})
#define RUN_ON_CPU_HOST_INT(i) ((run_on_cpu_data){.host_int = (i)})
#define RUN_ON_CPU_HOST_ULONG(ul) ((run_on_cpu_data){.host_ulong = (ul)})
#define RUN_ON_CPU_TARGET_PTR(v) ((run_on_cpu_data){.target_ptr = (v)})
#define RUN_ON_CPU_NULL RUN_ON_CPU_HOST_PTR(NULL)
typedef void (*run_on_cpu_func)(CPUState *cpu, run_on_cpu_data data);
struct qemu_work_item;
#define CPU_UNSET_NUMA_NODE_ID -1
#define CPU_TRACE_DSTATE_MAX_EVENTS 32
/**
* CPUState:
* @cpu_index: CPU index (informative).
* @cluster_index: Identifies which cluster this CPU is in.
* For boards which don't define clusters or for "loose" CPUs not assigned
* to a cluster this will be UNASSIGNED_CLUSTER_INDEX; otherwise it will
* be the same as the cluster-id property of the CPU object's TYPE_CPU_CLUSTER
* QOM parent.
* @nr_cores: Number of cores within this CPU package.
* @nr_threads: Number of threads within this CPU.
* @running: #true if CPU is currently running (lockless).
* @has_waiter: #true if a CPU is currently waiting for the cpu_exec_end;
* valid under cpu_list_lock.
* @created: Indicates whether the CPU thread has been successfully created.
* @interrupt_request: Indicates a pending interrupt request.
* @halted: Nonzero if the CPU is in suspended state.
* @stop: Indicates a pending stop request.
* @stopped: Indicates the CPU has been artificially stopped.
* @unplug: Indicates a pending CPU unplug request.
* @crash_occurred: Indicates the OS reported a crash (panic) for this CPU
* @singlestep_enabled: Flags for single-stepping.
* @icount_extra: Instructions until next timer event.
* @can_do_io: Nonzero if memory-mapped IO is safe. Deterministic execution
* requires that IO only be performed on the last instruction of a TB
* so that interrupts take effect immediately.
* @cpu_ases: Pointer to array of CPUAddressSpaces (which define the
* AddressSpaces this CPU has)
* @num_ases: number of CPUAddressSpaces in @cpu_ases
* @as: Pointer to the first AddressSpace, for the convenience of targets which
* only have a single AddressSpace
* @env_ptr: Pointer to subclass-specific CPUArchState field.
* @icount_decr_ptr: Pointer to IcountDecr field within subclass.
* @next_cpu: Next CPU sharing TB cache.
* @opaque: User data.
* @mem_io_pc: Host Program Counter at which the memory was accessed.
* @work_mutex: Lock to prevent multiple access to queued_work_*.
* @queued_work_first: First asynchronous work pending.
* @trace_dstate_delayed: Delayed changes to trace_dstate (includes all changes
* to @trace_dstate).
* @trace_dstate: Dynamic tracing state of events for this vCPU (bitmask).
* @ignore_memory_transaction_failures: Cached copy of the MachineState
* flag of the same name: allows the board to suppress calling of the
* CPU do_transaction_failed hook function.
*
* State of one CPU core or thread.
*/
struct CPUState {
int nr_cores;
int nr_threads;
struct QemuThread *thread;
#ifdef _WIN32
HANDLE hThread;
#endif
#if 0
int thread_id;
bool running, has_waiter;
struct QemuCond *halt_cond;
bool thread_kicked;
#endif
bool created;
bool stop;
bool stopped;
bool unplug;
bool crash_occurred;
bool exit_request;
bool in_exclusive_context;
uint32_t cflags_next_tb;
/* updates protected by BQL */
uint32_t interrupt_request;
int singlestep_enabled;
int64_t icount_budget;
int64_t icount_extra;
uint64_t random_seed;
sigjmp_buf jmp_env;
CPUAddressSpace *cpu_ases;
int num_ases;
AddressSpace *as;
MemoryRegion *memory;
void *env_ptr; /* CPUArchState */
IcountDecr *icount_decr_ptr;
/* Accessed in parallel; all accesses must be atomic */
struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];
QTAILQ_ENTRY(CPUState) node;
/* ice debug support */
QTAILQ_HEAD(, CPUBreakpoint) breakpoints;
QTAILQ_HEAD(, CPUWatchpoint) watchpoints;
CPUWatchpoint *watchpoint_hit;
void *opaque;
/* In order to avoid passing too many arguments to the MMIO helpers,
* we store some rarely used information in the CPU context.
*/
uintptr_t mem_io_pc;
/* Used for events with 'vcpu' and *without* the 'disabled' properties */
DECLARE_BITMAP(trace_dstate_delayed, CPU_TRACE_DSTATE_MAX_EVENTS);
DECLARE_BITMAP(trace_dstate, CPU_TRACE_DSTATE_MAX_EVENTS);
/* TODO Move common fields from CPUArchState here. */
int cpu_index;
int cluster_index;
uint32_t halted;
uint32_t can_do_io;
int32_t exception_index;
struct uc_struct* uc;
/* pointer to CPUArchState.cc */
struct CPUClass *cc;
// Set to force TCG to stop executing linked TBs for this
// CPU and return to its top level loop.
volatile sig_atomic_t tcg_exit_req;
};
#define CPU(obj) ((CPUState *)(obj))
#define CPU_CLASS(class) ((CPUClass *)class)
#define CPU_GET_CLASS(obj) (((CPUState *)obj)->cc)
static inline void cpu_tb_jmp_cache_clear(CPUState *cpu)
{
unsigned int i;
for (i = 0; i < TB_JMP_CACHE_SIZE; i++) {
cpu->tb_jmp_cache[i] = NULL;
}
}
/**
* cpu_paging_enabled:
* @cpu: The CPU whose state is to be inspected.
*
* Returns: %true if paging is enabled, %false otherwise.
*/
bool cpu_paging_enabled(const CPUState *cpu);
/**
* cpu_get_memory_mapping:
* @cpu: The CPU whose memory mappings are to be obtained.
* @list: Where to write the memory mappings to.
*/
void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list);
/**
* CPUDumpFlags:
* @CPU_DUMP_CODE:
* @CPU_DUMP_FPU: dump FPU register state, not just integer
* @CPU_DUMP_CCOP: dump info about TCG QEMU's condition code optimization state
*/
enum CPUDumpFlags {
CPU_DUMP_CODE = 0x00010000,
CPU_DUMP_FPU = 0x00020000,
CPU_DUMP_CCOP = 0x00040000,
};
/**
* cpu_get_phys_page_attrs_debug:
* @cpu: The CPU to obtain the physical page address for.
* @addr: The virtual address.
* @attrs: Updated on return with the memory transaction attributes to use
* for this access.
*
* Obtains the physical page corresponding to a virtual one, together
* with the corresponding memory transaction attributes to use for the access.
* Use it only for debugging because no protection checks are done.
*
* Returns: Corresponding physical page address or -1 if no page found.
*/
static inline hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
MemTxAttrs *attrs)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
if (cc->get_phys_page_attrs_debug) {
return cc->get_phys_page_attrs_debug(cpu, addr, attrs);
}
/* Fallback for CPUs which don't implement the _attrs_ hook */
*attrs = MEMTXATTRS_UNSPECIFIED;
return cc->get_phys_page_debug(cpu, addr);
}
/**
* cpu_get_phys_page_debug:
* @cpu: The CPU to obtain the physical page address for.
* @addr: The virtual address.
*
* Obtains the physical page corresponding to a virtual one.
* Use it only for debugging because no protection checks are done.
*
* Returns: Corresponding physical page address or -1 if no page found.
*/
static inline hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr)
{
MemTxAttrs attrs = { 0 };
return cpu_get_phys_page_attrs_debug(cpu, addr, &attrs);
}
/** cpu_asidx_from_attrs:
* @cpu: CPU
* @attrs: memory transaction attributes
*
* Returns the address space index specifying the CPU AddressSpace
* to use for a memory access with the given transaction attributes.
*/
static inline int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
int ret = 0;
if (cc->asidx_from_attrs) {
ret = cc->asidx_from_attrs(cpu, attrs);
assert(ret < cpu->num_ases && ret >= 0);
}
return ret;
}
/**
* cpu_reset:
* @cpu: The CPU whose state is to be reset.
*/
void cpu_reset(CPUState *cpu);
/**
* cpu_has_work:
* @cpu: The vCPU to check.
*
* Checks whether the CPU has work to do.
*
* Returns: %true if the CPU has work, %false otherwise.
*/
static inline bool cpu_has_work(CPUState *cpu)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
g_assert(cc->has_work);
return cc->has_work(cpu);
}
/**
* cpu_is_stopped:
* @cpu: The CPU to check.
*
* Checks whether the CPU is stopped.
*
* Returns: %true if run state is not running or if artificially stopped;
* %false otherwise.
*/
bool cpu_is_stopped(CPUState *cpu);
typedef void (*CPUInterruptHandler)(CPUState *, int);
extern CPUInterruptHandler cpu_interrupt_handler;
/**
* cpu_interrupt:
* @cpu: The CPU to set an interrupt on.
* @mask: The interrupts to set.
*
* Invokes the interrupt handler.
*/
static inline void cpu_interrupt(CPUState *cpu, int mask)
{
cpu_interrupt_handler(cpu, mask);
}
#ifdef NEED_CPU_H
static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
cc->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr);
}
#endif /* NEED_CPU_H */
/**
* cpu_set_pc:
* @cpu: The CPU to set the program counter for.
* @addr: Program counter value.
*
* Sets the program counter for a CPU.
*/
static inline void cpu_set_pc(CPUState *cpu, vaddr addr)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
cc->set_pc(cpu, addr);
}
/**
* cpu_reset_interrupt:
* @cpu: The CPU to clear the interrupt on.
* @mask: The interrupt mask to clear.
*
* Resets interrupts on the vCPU @cpu.
*/
void cpu_reset_interrupt(CPUState *cpu, int mask);
/**
* cpu_exit:
* @cpu: The CPU to exit.
*
* Requests the CPU @cpu to exit execution.
*/
void cpu_exit(CPUState *cpu);
/**
* cpu_resume:
* @cpu: The CPU to resume.
*
* Resumes CPU, i.e. puts CPU into runnable state.
*/
void cpu_resume(CPUState *cpu);
/**
* qemu_init_vcpu:
* @cpu: The vCPU to initialize.
*
* Initializes a vCPU.
*/
void qemu_init_vcpu(CPUState *cpu);
#define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */
#define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */
#define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */
/* Breakpoint/watchpoint flags */
#define BP_MEM_READ 0x01
#define BP_MEM_WRITE 0x02
#define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE)
#define BP_STOP_BEFORE_ACCESS 0x04
/* 0x08 currently unused */
#define BP_GDB 0x10
#define BP_CPU 0x20
#define BP_ANY (BP_GDB | BP_CPU)
#define BP_WATCHPOINT_HIT_READ 0x40
#define BP_WATCHPOINT_HIT_WRITE 0x80
#define BP_WATCHPOINT_HIT (BP_WATCHPOINT_HIT_READ | BP_WATCHPOINT_HIT_WRITE)
int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
CPUBreakpoint **breakpoint);
int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags);
void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint);
void cpu_breakpoint_remove_all(CPUState *cpu, int mask);
/* Return true if PC matches an installed breakpoint. */
static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask)
{
CPUBreakpoint *bp;
if (unlikely(!QTAILQ_EMPTY(&cpu->breakpoints))) {
QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
if (bp->pc == pc && (bp->flags & mask)) {
return true;
}
}
}
return false;
}
int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
int flags, CPUWatchpoint **watchpoint);
int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
vaddr len, int flags);
void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint);
void cpu_watchpoint_remove_all(CPUState *cpu, int mask);
void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
MemTxAttrs attrs, int flags, uintptr_t ra);
int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len);
/**
* cpu_get_address_space:
* @cpu: CPU to get address space from
* @asidx: index identifying which address space to get
*
* Return the requested address space of this CPU. @asidx
* specifies which address space to read.
*/
AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx);
void QEMU_NORETURN cpu_abort(CPUState *cpu, const char *fmt, ...)
GCC_FMT_ATTR(2, 3);
void cpu_exec_initfn(CPUState *cpu);
void cpu_exec_realizefn(CPUState *cpu);
void cpu_exec_unrealizefn(CPUState *cpu);
/**
* target_words_bigendian:
* Returns true if the (default) endianness of the target is big endian,
* false otherwise. Note that in target-specific code, you can use
* TARGET_WORDS_BIGENDIAN directly instead. On the other hand, common
* code should normally never need to know about the endianness of the
* target, so please do *not* use this function unless you know very well
* what you are doing!
*/
bool target_words_bigendian(void);
/* use original func name. */
void cpu_class_init(struct uc_struct *uc, CPUClass *k);
void cpu_common_initfn(struct uc_struct *uc, CPUState *cs);
void cpu_stop(struct uc_struct *uc);
#define UNASSIGNED_CPU_INDEX -1
#define UNASSIGNED_CLUSTER_INDEX -1
#endif

View File

@@ -1,79 +0,0 @@
/* icc_bus.h
* emulate x86 ICC (Interrupt Controller Communications) bus
*
* Copyright (c) 2013 Red Hat, Inc
*
* Authors:
* Igor Mammedov <imammedo@redhat.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>
*/
#ifndef ICC_BUS_H
#define ICC_BUS_H
#include "exec/memory.h"
#include "hw/qdev-core.h"
#define TYPE_ICC_BUS "icc-bus"
#ifndef CONFIG_USER_ONLY
/**
* ICCBus:
*
* ICC bus
*/
typedef struct ICCBus {
/*< private >*/
BusState parent_obj;
/*< public >*/
MemoryRegion *apic_address_space;
} ICCBus;
#define ICC_BUS(uc, obj) OBJECT_CHECK(uc, ICCBus, (obj), TYPE_ICC_BUS)
/**
* ICCDevice:
*
* ICC device
*/
typedef struct ICCDevice {
/*< private >*/
DeviceState qdev;
/*< public >*/
} ICCDevice;
/**
* ICCDeviceClass:
* @init: Initialization callback for derived classes.
*
* ICC device class
*/
typedef struct ICCDeviceClass {
/*< private >*/
DeviceClass parent_class;
/*< public >*/
DeviceRealize realize;
} ICCDeviceClass;
#define TYPE_ICC_DEVICE "icc-device"
#define ICC_DEVICE_CLASS(uc, klass) \
OBJECT_CLASS_CHECK(uc, ICCDeviceClass, (klass), TYPE_ICC_DEVICE)
void icc_bus_register_types(struct uc_struct *uc);
#endif /* CONFIG_USER_ONLY */
#endif

View File

@@ -1,41 +0,0 @@
/* Declarations for use by hardware emulation. */
#ifndef QEMU_HW_H
#define QEMU_HW_H
#include "qemu-common.h"
#if !defined(CONFIG_USER_ONLY) && !defined(NEED_CPU_H)
#include "exec/cpu-common.h"
#endif
#include "exec/ioport.h"
#include "qemu/log.h"
#ifdef NEED_CPU_H
#if TARGET_LONG_BITS == 64
#define qemu_put_betl qemu_put_be64
#define qemu_get_betl qemu_get_be64
#define qemu_put_betls qemu_put_be64s
#define qemu_get_betls qemu_get_be64s
#define qemu_put_sbetl qemu_put_sbe64
#define qemu_get_sbetl qemu_get_sbe64
#define qemu_put_sbetls qemu_put_sbe64s
#define qemu_get_sbetls qemu_get_sbe64s
#else
#define qemu_put_betl qemu_put_be32
#define qemu_get_betl qemu_get_be32
#define qemu_put_betls qemu_put_be32s
#define qemu_get_betls qemu_get_be32s
#define qemu_put_sbetl qemu_put_sbe32
#define qemu_get_sbetl qemu_get_sbe32
#define qemu_put_sbetls qemu_put_sbe32s
#define qemu_get_sbetls qemu_get_sbe32s
#endif
#endif
typedef void QEMUResetHandler(void *opaque);
void qemu_register_reset(QEMUResetHandler *func, void *opaque);
void qemu_unregister_reset(QEMUResetHandler *func, void *opaque);
#endif

View File

@@ -1,29 +0,0 @@
#ifndef APIC_H
#define APIC_H
#include "qemu-common.h"
/* apic.c */
int apic_accept_pic_intr(DeviceState *s);
int apic_get_interrupt(DeviceState *s);
void cpu_set_apic_base(struct uc_struct *uc, DeviceState *s, uint64_t val);
uint64_t cpu_get_apic_base(struct uc_struct *uc, DeviceState *s);
void cpu_set_apic_tpr(struct uc_struct *uc, DeviceState *s, uint8_t val);
uint8_t cpu_get_apic_tpr(struct uc_struct *uc, DeviceState *s);
void apic_init_reset(struct uc_struct *uc, DeviceState *s);
void apic_sipi(DeviceState *s);
void apic_handle_tpr_access_report(DeviceState *d, target_ulong ip,
TPRAccess access);
void apic_poll_irq(DeviceState *d);
void apic_designate_bsp(struct uc_struct *uc, DeviceState *d);
/* pc.c */
DeviceState *cpu_get_current_apic(struct uc_struct *uc);
/* cpu.c */
bool cpu_is_bsp(X86CPU *cpu);
void apic_register_types(struct uc_struct *uc);
void apic_common_register_types(struct uc_struct *uc);
#endif

View File

@@ -1,147 +0,0 @@
/*
* APIC support - internal interfaces
*
* Copyright (c) 2004-2005 Fabrice Bellard
* Copyright (c) 2011 Jan Kiszka, Siemens AG
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>
*/
#ifndef QEMU_APIC_INTERNAL_H
#define QEMU_APIC_INTERNAL_H
#include "exec/memory.h"
#include "hw/cpu/icc_bus.h"
#include "qemu/timer.h"
/* APIC Local Vector Table */
#define APIC_LVT_TIMER 0
#define APIC_LVT_THERMAL 1
#define APIC_LVT_PERFORM 2
#define APIC_LVT_LINT0 3
#define APIC_LVT_LINT1 4
#define APIC_LVT_ERROR 5
#define APIC_LVT_NB 6
/* APIC delivery modes */
#define APIC_DM_FIXED 0
#define APIC_DM_LOWPRI 1
#define APIC_DM_SMI 2
#define APIC_DM_NMI 4
#define APIC_DM_INIT 5
#define APIC_DM_SIPI 6
#define APIC_DM_EXTINT 7
/* APIC destination mode */
#define APIC_DESTMODE_FLAT 0xf
#define APIC_DESTMODE_CLUSTER 1
#define APIC_TRIGGER_EDGE 0
#define APIC_TRIGGER_LEVEL 1
#define APIC_LVT_TIMER_PERIODIC (1<<17)
#define APIC_LVT_MASKED (1<<16)
#define APIC_LVT_LEVEL_TRIGGER (1<<15)
#define APIC_LVT_REMOTE_IRR (1<<14)
#define APIC_INPUT_POLARITY (1<<13)
#define APIC_SEND_PENDING (1<<12)
#define ESR_ILLEGAL_ADDRESS (1 << 7)
#define APIC_SV_DIRECTED_IO (1<<12)
#define APIC_SV_ENABLE (1<<8)
#define VAPIC_ENABLE_BIT 0
#define VAPIC_ENABLE_MASK (1 << VAPIC_ENABLE_BIT)
#define MAX_APICS 255
typedef struct APICCommonState APICCommonState;
#define TYPE_APIC_COMMON "apic-common"
#define APIC_COMMON(uc, obj) \
OBJECT_CHECK(uc, APICCommonState, (obj), TYPE_APIC_COMMON)
#define APIC_COMMON_CLASS(uc, klass) \
OBJECT_CLASS_CHECK(uc, APICCommonClass, (klass), TYPE_APIC_COMMON)
#define APIC_COMMON_GET_CLASS(uc, obj) \
OBJECT_GET_CLASS(uc, APICCommonClass, (obj), TYPE_APIC_COMMON)
typedef struct APICCommonClass
{
ICCDeviceClass parent_class;
DeviceRealize realize;
void (*set_base)(APICCommonState *s, uint64_t val);
void (*set_tpr)(APICCommonState *s, uint8_t val);
uint8_t (*get_tpr)(APICCommonState *s);
void (*enable_tpr_reporting)(APICCommonState *s, bool enable);
void (*vapic_base_update)(APICCommonState *s);
void (*external_nmi)(APICCommonState *s);
void (*pre_save)(APICCommonState *s);
void (*post_load)(APICCommonState *s);
void (*reset)(APICCommonState *s);
} APICCommonClass;
struct APICCommonState {
ICCDevice busdev;
MemoryRegion io_memory;
X86CPU *cpu;
uint32_t apicbase;
uint8_t id;
uint8_t version;
uint8_t arb_id;
uint8_t tpr;
uint32_t spurious_vec;
uint8_t log_dest;
uint8_t dest_mode;
uint32_t isr[8]; /* in service register */
uint32_t tmr[8]; /* trigger mode register */
uint32_t irr[8]; /* interrupt request register */
uint32_t lvt[APIC_LVT_NB];
uint32_t esr; /* error register */
uint32_t icr[2];
uint32_t divide_conf;
int count_shift;
uint32_t initial_count;
int64_t initial_count_load_time;
int64_t next_time;
int idx;
QEMUTimer *timer;
int64_t timer_expiry;
int sipi_vector;
int wait_for_sipi;
uint32_t vapic_control;
DeviceState *vapic;
hwaddr vapic_paddr; /* note: persistence via kvmvapic */
};
QEMU_PACK( typedef struct VAPICState {
uint8_t tpr;
uint8_t isr;
uint8_t zero;
uint8_t irr;
uint8_t enabled;
}) VAPICState;
extern bool apic_report_tpr_access;
bool apic_next_timer(APICCommonState *s, int64_t current_time);
void apic_enable_vapic(struct uc_struct *uc, DeviceState *d, hwaddr paddr);
void vapic_report_tpr_access(DeviceState *dev, CPUState *cpu, target_ulong ip,
TPRAccess access);
#endif /* !QEMU_APIC_INTERNAL_H */

View File

@@ -1,52 +0,0 @@
#ifndef HW_PC_H
#define HW_PC_H
#include "hw/boards.h"
/**
* PCMachineState:
*/
struct PCMachineState {
/*< private >*/
MachineState parent_obj;
uint64_t max_ram_below_4g;
};
#define PC_MACHINE_MAX_RAM_BELOW_4G "max-ram-below-4g"
/**
* PCMachineClass:
*/
struct PCMachineClass {
/*< private >*/
MachineClass parent_class;
};
typedef struct PCMachineState PCMachineState;
typedef struct PCMachineClass PCMachineClass;
#define TYPE_PC_MACHINE "generic-pc-machine"
#define PC_MACHINE(uc, obj) \
OBJECT_CHECK(uc, PCMachineState, (obj), TYPE_PC_MACHINE)
#define PC_MACHINE_GET_CLASS(obj) \
OBJECT_GET_CLASS(PCMachineClass, (obj), TYPE_PC_MACHINE)
#define PC_MACHINE_CLASS(klass) \
OBJECT_CLASS_CHECK(PCMachineClass, (klass), TYPE_PC_MACHINE)
int pc_cpus_init(struct uc_struct *uc, const char *cpu_model);
FWCfgState *pc_memory_init(MachineState *machine,
MemoryRegion *system_memory,
ram_addr_t begin,
MemoryRegion **ram_memory);
typedef void (*cpu_set_smm_t)(int smm, void *arg);
void cpu_smm_register(cpu_set_smm_t callback, void *arg);
void pc_machine_register_types(struct uc_struct *uc);
void x86_cpu_register_types(struct uc_struct *uc);
#define PC_DEFAULT_MACHINE_OPTIONS \
.max_cpus = 255
#endif

View File

@@ -0,0 +1,271 @@
/*
* x86 CPU topology data structures and functions
*
* Copyright (c) 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef HW_I386_TOPOLOGY_H
#define HW_I386_TOPOLOGY_H
/* This file implements the APIC-ID-based CPU topology enumeration logic,
* documented at the following document:
* Intel® 64 Architecture Processor Topology Enumeration
* http://software.intel.com/en-us/articles/intel-64-architecture-processor-topology-enumeration/
*
* This code should be compatible with AMD's "Extended Method" described at:
* AMD CPUID Specification (Publication #25481)
* Section 3: Multiple Core Calcuation
* as long as:
* nr_threads is set to 1;
* OFFSET_IDX is assumed to be 0;
* CPUID Fn8000_0008_ECX[ApicIdCoreIdSize[3:0]] is set to apicid_core_width().
*/
#include "qemu/bitops.h"
/* APIC IDs can be 32-bit, but beware: APIC IDs > 255 require x2APIC support
*/
typedef uint32_t apic_id_t;
typedef struct X86CPUTopoIDs {
unsigned pkg_id;
unsigned node_id;
unsigned die_id;
unsigned core_id;
unsigned smt_id;
} X86CPUTopoIDs;
typedef struct X86CPUTopoInfo {
unsigned nodes_per_pkg;
unsigned dies_per_pkg;
unsigned cores_per_die;
unsigned threads_per_core;
} X86CPUTopoInfo;
/* Return the bit width needed for 'count' IDs
*/
static unsigned apicid_bitwidth_for_count(unsigned count)
{
g_assert(count >= 1);
count -= 1;
return count ? 32 - clz32(count) : 0;
}
/* Bit width of the SMT_ID (thread ID) field on the APIC ID
*/
static inline unsigned apicid_smt_width(X86CPUTopoInfo *topo_info)
{
return apicid_bitwidth_for_count(topo_info->threads_per_core);
}
/* Bit width of the Core_ID field
*/
static inline unsigned apicid_core_width(X86CPUTopoInfo *topo_info)
{
return apicid_bitwidth_for_count(topo_info->cores_per_die);
}
/* Bit width of the Die_ID field */
static inline unsigned apicid_die_width(X86CPUTopoInfo *topo_info)
{
return apicid_bitwidth_for_count(topo_info->dies_per_pkg);
}
/* Bit width of the node_id field per socket */
static inline unsigned apicid_node_width_epyc(X86CPUTopoInfo *topo_info)
{
return apicid_bitwidth_for_count(MAX(topo_info->nodes_per_pkg, 1));
}
/* Bit offset of the Core_ID field
*/
static inline unsigned apicid_core_offset(X86CPUTopoInfo *topo_info)
{
return apicid_smt_width(topo_info);
}
/* Bit offset of the Die_ID field */
static inline unsigned apicid_die_offset(X86CPUTopoInfo *topo_info)
{
return apicid_core_offset(topo_info) + apicid_core_width(topo_info);
}
/* Bit offset of the Pkg_ID (socket ID) field
*/
static inline unsigned apicid_pkg_offset(X86CPUTopoInfo *topo_info)
{
return apicid_die_offset(topo_info) + apicid_die_width(topo_info);
}
#define NODE_ID_OFFSET 3 /* Minimum node_id offset if numa configured */
/*
* Bit offset of the node_id field
*
* Make sure nodes_per_pkg > 0 if numa configured else zero.
*/
static inline unsigned apicid_node_offset_epyc(X86CPUTopoInfo *topo_info)
{
unsigned offset = apicid_die_offset(topo_info) +
apicid_die_width(topo_info);
if (topo_info->nodes_per_pkg) {
return MAX(NODE_ID_OFFSET, offset);
} else {
return offset;
}
}
/* Bit offset of the Pkg_ID (socket ID) field */
static inline unsigned apicid_pkg_offset_epyc(X86CPUTopoInfo *topo_info)
{
return apicid_node_offset_epyc(topo_info) +
apicid_node_width_epyc(topo_info);
}
/*
* Make APIC ID for the CPU based on Pkg_ID, Core_ID, SMT_ID
*
* The caller must make sure core_id < nr_cores and smt_id < nr_threads.
*/
static inline apic_id_t
x86_apicid_from_topo_ids_epyc(X86CPUTopoInfo *topo_info,
const X86CPUTopoIDs *topo_ids)
{
return (topo_ids->pkg_id << apicid_pkg_offset_epyc(topo_info)) |
(topo_ids->node_id << apicid_node_offset_epyc(topo_info)) |
(topo_ids->die_id << apicid_die_offset(topo_info)) |
(topo_ids->core_id << apicid_core_offset(topo_info)) |
topo_ids->smt_id;
}
static inline void x86_topo_ids_from_idx_epyc(X86CPUTopoInfo *topo_info,
unsigned cpu_index,
X86CPUTopoIDs *topo_ids)
{
unsigned nr_nodes = MAX(topo_info->nodes_per_pkg, 1);
unsigned nr_dies = topo_info->dies_per_pkg;
unsigned nr_cores = topo_info->cores_per_die;
unsigned nr_threads = topo_info->threads_per_core;
unsigned cores_per_node = DIV_ROUND_UP((nr_dies * nr_cores * nr_threads),
nr_nodes);
topo_ids->pkg_id = cpu_index / (nr_dies * nr_cores * nr_threads);
topo_ids->node_id = (cpu_index / cores_per_node) % nr_nodes;
topo_ids->die_id = cpu_index / (nr_cores * nr_threads) % nr_dies;
topo_ids->core_id = cpu_index / nr_threads % nr_cores;
topo_ids->smt_id = cpu_index % nr_threads;
}
/*
* Calculate thread/core/package IDs for a specific topology,
* based on APIC ID
*/
static inline void x86_topo_ids_from_apicid_epyc(apic_id_t apicid,
X86CPUTopoInfo *topo_info,
X86CPUTopoIDs *topo_ids)
{
topo_ids->smt_id = apicid &
~(0xFFFFFFFFUL << apicid_smt_width(topo_info));
topo_ids->core_id =
(apicid >> apicid_core_offset(topo_info)) &
~(0xFFFFFFFFUL << apicid_core_width(topo_info));
topo_ids->die_id =
(apicid >> apicid_die_offset(topo_info)) &
~(0xFFFFFFFFUL << apicid_die_width(topo_info));
topo_ids->node_id =
(apicid >> apicid_node_offset_epyc(topo_info)) &
~(0xFFFFFFFFUL << apicid_node_width_epyc(topo_info));
topo_ids->pkg_id = apicid >> apicid_pkg_offset_epyc(topo_info);
}
/*
* Make APIC ID for the CPU 'cpu_index'
*
* 'cpu_index' is a sequential, contiguous ID for the CPU.
*/
static inline apic_id_t x86_apicid_from_cpu_idx_epyc(X86CPUTopoInfo *topo_info,
unsigned cpu_index)
{
X86CPUTopoIDs topo_ids;
x86_topo_ids_from_idx_epyc(topo_info, cpu_index, &topo_ids);
return x86_apicid_from_topo_ids_epyc(topo_info, &topo_ids);
}
/* Make APIC ID for the CPU based on Pkg_ID, Core_ID, SMT_ID
*
* The caller must make sure core_id < nr_cores and smt_id < nr_threads.
*/
static inline apic_id_t x86_apicid_from_topo_ids(X86CPUTopoInfo *topo_info,
const X86CPUTopoIDs *topo_ids)
{
return (topo_ids->pkg_id << apicid_pkg_offset(topo_info)) |
(topo_ids->die_id << apicid_die_offset(topo_info)) |
(topo_ids->core_id << apicid_core_offset(topo_info)) |
topo_ids->smt_id;
}
/* Calculate thread/core/package IDs for a specific topology,
* based on (contiguous) CPU index
*/
static inline void x86_topo_ids_from_idx(X86CPUTopoInfo *topo_info,
unsigned cpu_index,
X86CPUTopoIDs *topo_ids)
{
unsigned nr_dies = topo_info->dies_per_pkg;
unsigned nr_cores = topo_info->cores_per_die;
unsigned nr_threads = topo_info->threads_per_core;
topo_ids->pkg_id = cpu_index / (nr_dies * nr_cores * nr_threads);
topo_ids->die_id = cpu_index / (nr_cores * nr_threads) % nr_dies;
topo_ids->core_id = cpu_index / nr_threads % nr_cores;
topo_ids->smt_id = cpu_index % nr_threads;
}
/* Calculate thread/core/package IDs for a specific topology,
* based on APIC ID
*/
static inline void x86_topo_ids_from_apicid(apic_id_t apicid,
X86CPUTopoInfo *topo_info,
X86CPUTopoIDs *topo_ids)
{
topo_ids->smt_id = apicid &
~(0xFFFFFFFFUL << apicid_smt_width(topo_info));
topo_ids->core_id =
(apicid >> apicid_core_offset(topo_info)) &
~(0xFFFFFFFFUL << apicid_core_width(topo_info));
topo_ids->die_id =
(apicid >> apicid_die_offset(topo_info)) &
~(0xFFFFFFFFUL << apicid_die_width(topo_info));
topo_ids->pkg_id = apicid >> apicid_pkg_offset(topo_info);
}
/* Make APIC ID for the CPU 'cpu_index'
*
* 'cpu_index' is a sequential, contiguous ID for the CPU.
*/
static inline apic_id_t x86_apicid_from_cpu_idx(X86CPUTopoInfo *topo_info,
unsigned cpu_index)
{
X86CPUTopoIDs topo_ids;
x86_topo_ids_from_idx(topo_info, cpu_index, &topo_ids);
return x86_apicid_from_topo_ids(topo_info, &topo_ids);
}
#endif /* HW_I386_TOPOLOGY_H */

View File

@@ -1,10 +0,0 @@
#ifndef HW_M68K_H
#define HW_M68K_H
#include "uc_priv.h"
void dummy_m68k_machine_init(struct uc_struct *uc);
void m68k_cpu_register_types(void *opaque);
#endif

View File

@@ -1,10 +1,21 @@
#ifndef HW_MIPS_CPUDEVS_H
#define HW_MIPS_CPUDEVS_H
#include "target/mips/cpu-qom.h"
/* Definitions for MIPS CPU internal devices. */
/* mips_addr.c */
/* addr.c */
uint64_t cpu_mips_kseg0_to_phys(void *opaque, uint64_t addr);
uint64_t cpu_mips_phys_to_kseg0(void *opaque, uint64_t addr);
uint64_t cpu_mips_kvm_um_phys_to_kseg0(void *opaque, uint64_t addr);
bool mips_um_ksegs_enabled(void);
void mips_um_ksegs_enable(void);
/* mips_int.c */
void cpu_mips_irq_init_cpu(MIPSCPU *cpu);
/* mips_timer.c */
void cpu_mips_clock_init(MIPSCPU *cpu);
#endif

View File

@@ -1,7 +0,0 @@
#ifndef HW_MIPS_H
#define HW_MIPS_H
void mips_machine_init(struct uc_struct *uc);
void mips_cpu_register_types(void *opaque);
#endif

115
qemu/include/hw/ppc/ppc.h Normal file
View File

@@ -0,0 +1,115 @@
#ifndef HW_PPC_H
#define HW_PPC_H
#include "target/ppc/cpu-qom.h"
void ppc_set_irq(PowerPCCPU *cpu, int n_IRQ, int level);
PowerPCCPU *ppc_get_vcpu_by_pir(int pir);
int ppc_cpu_pir(PowerPCCPU *cpu);
/* PowerPC hardware exceptions management helpers */
typedef void (*clk_setup_cb)(void *opaque, uint32_t freq);
typedef struct clk_setup_t clk_setup_t;
struct clk_setup_t {
clk_setup_cb cb;
void *opaque;
};
static inline void clk_setup (clk_setup_t *clk, uint32_t freq)
{
if (clk->cb != NULL)
(*clk->cb)(clk->opaque, freq);
}
struct ppc_tb_t {
/* Time base management */
int64_t tb_offset; /* Compensation */
int64_t atb_offset; /* Compensation */
int64_t vtb_offset;
uint32_t tb_freq; /* TB frequency */
/* Decrementer management */
uint64_t decr_next; /* Tick for next decr interrupt */
uint32_t decr_freq; /* decrementer frequency */
QEMUTimer *decr_timer;
/* Hypervisor decrementer management */
uint64_t hdecr_next; /* Tick for next hdecr interrupt */
QEMUTimer *hdecr_timer;
int64_t purr_offset;
void *opaque;
uint32_t flags;
};
/* PPC Timers flags */
#define PPC_TIMER_BOOKE (1 << 0) /* Enable Booke support */
#define PPC_TIMER_E500 (1 << 1) /* Enable e500 support */
#define PPC_DECR_UNDERFLOW_TRIGGERED (1 << 2) /* Decr interrupt triggered when
* the most significant bit
* changes from 0 to 1.
*/
#define PPC_DECR_ZERO_TRIGGERED (1 << 3) /* Decr interrupt triggered when
* the decrementer reaches zero.
*/
#define PPC_DECR_UNDERFLOW_LEVEL (1 << 4) /* Decr interrupt active when
* the most significant bit is 1.
*/
uint64_t cpu_ppc_get_tb(ppc_tb_t *tb_env, uint64_t vmclk, int64_t tb_offset);
clk_setup_cb cpu_ppc_tb_init (CPUPPCState *env, uint32_t freq);
/* Embedded PowerPC DCR management */
typedef uint32_t (*dcr_read_cb)(void *opaque, int dcrn);
typedef void (*dcr_write_cb)(void *opaque, int dcrn, uint32_t val);
int ppc_dcr_init (CPUPPCState *env, int (*dcr_read_error)(int dcrn),
int (*dcr_write_error)(int dcrn));
int ppc_dcr_register (CPUPPCState *env, int dcrn, void *opaque,
dcr_read_cb drc_read, dcr_write_cb dcr_write);
clk_setup_cb ppc_40x_timers_init (CPUPPCState *env, uint32_t freq,
unsigned int decr_excp);
/* Embedded PowerPC reset */
void ppc40x_core_reset(PowerPCCPU *cpu);
void ppc40x_chip_reset(PowerPCCPU *cpu);
void ppc40x_system_reset(PowerPCCPU *cpu);
#if defined(CONFIG_USER_ONLY)
static inline void ppc40x_irq_init(PowerPCCPU *cpu) {}
static inline void ppc6xx_irq_init(PowerPCCPU *cpu) {}
static inline void ppc970_irq_init(PowerPCCPU *cpu) {}
static inline void ppcPOWER7_irq_init(PowerPCCPU *cpu) {}
static inline void ppcPOWER9_irq_init(PowerPCCPU *cpu) {}
static inline void ppce500_irq_init(PowerPCCPU *cpu) {}
static inline void ppc_irq_reset(PowerPCCPU *cpu) {}
#else
void ppc40x_irq_init(PowerPCCPU *cpu);
void ppce500_irq_init(PowerPCCPU *cpu);
void ppc6xx_irq_init(PowerPCCPU *cpu);
void ppc970_irq_init(PowerPCCPU *cpu);
void ppcPOWER7_irq_init(PowerPCCPU *cpu);
void ppcPOWER9_irq_init(PowerPCCPU *cpu);
void ppc_irq_reset(PowerPCCPU *cpu);
#endif
/* PPC machines for OpenBIOS */
enum {
ARCH_PREP = 0,
ARCH_MAC99,
ARCH_HEATHROW,
ARCH_MAC99_U3,
};
#define FW_CFG_PPC_WIDTH (FW_CFG_ARCH_LOCAL + 0x00)
#define FW_CFG_PPC_HEIGHT (FW_CFG_ARCH_LOCAL + 0x01)
#define FW_CFG_PPC_DEPTH (FW_CFG_ARCH_LOCAL + 0x02)
#define FW_CFG_PPC_TBFREQ (FW_CFG_ARCH_LOCAL + 0x03)
#define FW_CFG_PPC_CLOCKFREQ (FW_CFG_ARCH_LOCAL + 0x04)
#define FW_CFG_PPC_IS_KVM (FW_CFG_ARCH_LOCAL + 0x05)
#define FW_CFG_PPC_KVM_HC (FW_CFG_ARCH_LOCAL + 0x06)
#define FW_CFG_PPC_KVM_PID (FW_CFG_ARCH_LOCAL + 0x07)
#define FW_CFG_PPC_NVRAM_ADDR (FW_CFG_ARCH_LOCAL + 0x08)
#define FW_CFG_PPC_BUSFREQ (FW_CFG_ARCH_LOCAL + 0x09)
#define FW_CFG_PPC_NVRAM_FLAT (FW_CFG_ARCH_LOCAL + 0x0a)
#define FW_CFG_PPC_VIACONFIG (FW_CFG_ARCH_LOCAL + 0x0b)
#define PPC_SERIAL_MM_BAUDBASE 399193
/* ppc_booke.c */
void ppc_booke_timers_init(PowerPCCPU *cpu, uint32_t freq, uint32_t flags);
#endif

View File

@@ -1,353 +0,0 @@
#ifndef QDEV_CORE_H
#define QDEV_CORE_H
#include "qemu/queue.h"
#include "qemu/typedefs.h"
#include "qemu/bitmap.h"
#include "qom/object.h"
#include "qapi/error.h"
enum {
DEV_NVECTORS_UNSPECIFIED = -1,
};
#define TYPE_DEVICE "device"
#define DEVICE(uc, obj) OBJECT_CHECK(uc, DeviceState, (obj), TYPE_DEVICE)
#define DEVICE_CLASS(uc, klass) OBJECT_CLASS_CHECK(uc, DeviceClass, (klass), TYPE_DEVICE)
#define DEVICE_GET_CLASS(uc, obj) OBJECT_GET_CLASS(uc, DeviceClass, (obj), TYPE_DEVICE)
typedef enum DeviceCategory {
DEVICE_CATEGORY_BRIDGE,
DEVICE_CATEGORY_USB,
DEVICE_CATEGORY_STORAGE,
DEVICE_CATEGORY_NETWORK,
DEVICE_CATEGORY_INPUT,
DEVICE_CATEGORY_DISPLAY,
DEVICE_CATEGORY_SOUND,
DEVICE_CATEGORY_MISC,
DEVICE_CATEGORY_MAX
} DeviceCategory;
typedef int (*qdev_initfn)(DeviceState *dev);
typedef int (*qdev_event)(DeviceState *dev);
typedef void (*qdev_resetfn)(DeviceState *dev);
typedef int (*DeviceRealize)(struct uc_struct *uc, DeviceState *dev, Error **errp);
typedef void (*DeviceUnrealize)(DeviceState *dev, Error **errp);
typedef void (*BusRealize)(BusState *bus, Error **errp);
typedef void (*BusUnrealize)(BusState *bus, Error **errp);
struct VMStateDescription;
/**
* DeviceClass:
* @props: Properties accessing state fields.
* @realize: Callback function invoked when the #DeviceState:realized
* property is changed to %true. The default invokes @init if not %NULL.
* @unrealize: Callback function invoked when the #DeviceState:realized
* property is changed to %false.
* @init: Callback function invoked when the #DeviceState::realized property
* is changed to %true. Deprecated, new types inheriting directly from
* TYPE_DEVICE should use @realize instead, new leaf types should consult
* their respective parent type.
* @hotpluggable: indicates if #DeviceClass is hotpluggable, available
* as readonly "hotpluggable" property of #DeviceState instance
*
* # Realization #
* Devices are constructed in two stages,
* 1) object instantiation via object_initialize() and
* 2) device realization via #DeviceState:realized property.
* The former may not fail (it might assert or exit), the latter may return
* error information to the caller and must be re-entrant.
* Trivial field initializations should go into #TypeInfo.instance_init.
* Operations depending on @props static properties should go into @realize.
* After successful realization, setting static properties will fail.
*
* As an interim step, the #DeviceState:realized property is set by deprecated
* functions qdev_init() and qdev_init_nofail().
* In the future, devices will propagate this state change to their children
* and along busses they expose.
* The point in time will be deferred to machine creation, so that values
* set in @realize will not be introspectable beforehand. Therefore devices
* must not create children during @realize; they should initialize them via
* object_initialize() in their own #TypeInfo.instance_init and forward the
* realization events appropriately.
*
* The @init callback is considered private to a particular bus implementation
* (immediate abstract child types of TYPE_DEVICE). Derived leaf types set an
* "init" callback on their parent class instead.
*
* Any type may override the @realize and/or @unrealize callbacks but needs
* to call the parent type's implementation if keeping their functionality
* is desired. Refer to QOM documentation for further discussion and examples.
*
* <note>
* <para>
* If a type derived directly from TYPE_DEVICE implements @realize, it does
* not need to implement @init and therefore does not need to store and call
* #DeviceClass' default @realize callback.
* For other types consult the documentation and implementation of the
* respective parent types.
* </para>
* </note>
*/
typedef struct DeviceClass {
/*< private >*/
ObjectClass parent_class;
/*< public >*/
DECLARE_BITMAP(categories, DEVICE_CATEGORY_MAX);
const char *fw_name;
const char *desc;
Property *props;
/*
* Shall we hide this device model from -device / device_add?
* All devices should support instantiation with device_add, and
* this flag should not exist. But we're not there, yet. Some
* devices fail to instantiate with cryptic error messages.
* Others instantiate, but don't work. Exposing users to such
* behavior would be cruel; this flag serves to protect them. It
* should never be set without a comment explaining why it is set.
* TODO remove once we're there
*/
bool cannot_instantiate_with_device_add_yet;
bool hotpluggable;
/* callbacks */
void (*reset)(struct uc_struct *uc, DeviceState *dev);
DeviceRealize realize;
DeviceUnrealize unrealize;
/* device state */
const struct VMStateDescription *vmsd;
/* Private to qdev / bus. */
qdev_initfn init; /* TODO remove, once users are converted to realize */
qdev_event exit; /* TODO remove, once users are converted to unrealize */
const char *bus_type;
} DeviceClass;
typedef struct NamedGPIOList NamedGPIOList;
struct NamedGPIOList {
char *name;
int num_in;
int num_out;
QLIST_ENTRY(NamedGPIOList) node;
};
/**
* DeviceState:
* @realized: Indicates whether the device has been fully constructed.
*
* This structure should not be accessed directly. We declare it here
* so that it can be embedded in individual device state structures.
*/
struct DeviceState {
/*< private >*/
Object parent_obj;
/*< public >*/
const char *id;
bool realized;
bool pending_deleted_event;
int hotplugged;
BusState *parent_bus;
QLIST_HEAD(, NamedGPIOList) gpios;
QLIST_HEAD(, BusState) child_bus;
int num_child_bus;
int instance_id_alias;
int alias_required_for_version;
};
#define TYPE_BUS "bus"
#define BUS(uc, obj) OBJECT_CHECK(uc, BusState, (obj), TYPE_BUS)
#define BUS_CLASS(klass) OBJECT_CLASS_CHECK(BusClass, (klass), TYPE_BUS)
#define BUS_GET_CLASS(obj) OBJECT_GET_CLASS(BusClass, (obj), TYPE_BUS)
struct BusClass {
ObjectClass parent_class;
/* FIXME first arg should be BusState */
char *(*get_dev_path)(DeviceState *dev);
/*
* This callback is used to create Open Firmware device path in accordance
* with OF spec http://forthworks.com/standards/of1275.pdf. Individual bus
* bindings can be found at http://playground.sun.com/1275/bindings/.
*/
char *(*get_fw_dev_path)(DeviceState *dev);
void (*reset)(BusState *bus);
BusRealize realize;
BusUnrealize unrealize;
/* maximum devices allowed on the bus, 0: no limit. */
int max_dev;
/* number of automatically allocated bus ids (e.g. ide.0) */
int automatic_ids;
};
typedef struct BusChild {
DeviceState *child;
int index;
QTAILQ_ENTRY(BusChild) sibling;
} BusChild;
#define QDEV_HOTPLUG_HANDLER_PROPERTY "hotplug-handler"
/**
* BusState:
* @hotplug_device: link to a hotplug device associated with bus.
*/
struct BusState {
Object obj;
DeviceState *parent;
const char *name;
int max_index;
bool realized;
QTAILQ_HEAD(ChildrenHead, BusChild) children;
QLIST_ENTRY(BusState) sibling;
};
struct Property {
const char *name;
PropertyInfo *info;
int offset;
uint8_t bitnr;
uint8_t qtype;
int64_t defval;
int arrayoffset;
PropertyInfo *arrayinfo;
int arrayfieldsize;
};
struct PropertyInfo {
const char *name;
const char *description;
const char **enum_table;
int (*print)(DeviceState *dev, Property *prop, char *dest, size_t len);
ObjectPropertyAccessor *get;
ObjectPropertyAccessor *set;
ObjectPropertyRelease *release;
};
/**
* GlobalProperty:
* @user_provided: Set to true if property comes from user-provided config
* (command-line or config file).
* @used: Set to true if property was used when initializing a device.
*/
typedef struct GlobalProperty {
const char *driver;
const char *property;
const char *value;
bool user_provided;
bool used;
QTAILQ_ENTRY(GlobalProperty) next;
} GlobalProperty;
/*** Board API. This should go away once we have a machine config file. ***/
DeviceState *qdev_create(BusState *bus, const char *name);
DeviceState *qdev_try_create(BusState *bus, const char *name);
int qdev_init(DeviceState *dev) QEMU_WARN_UNUSED_RESULT;
void qdev_init_nofail(DeviceState *dev);
void qdev_set_legacy_instance_id(DeviceState *dev, int alias_id,
int required_for_version);
void qdev_unplug(DeviceState *dev, Error **errp);
void qdev_machine_creation_done(void);
bool qdev_machine_modified(void);
BusState *qdev_get_child_bus(DeviceState *dev, const char *name);
/*** Device API. ***/
/* Register device properties. */
/* GPIO inputs also double as IRQ sinks. */
void qdev_pass_gpios(DeviceState *dev, DeviceState *container,
const char *name);
BusState *qdev_get_parent_bus(DeviceState *dev);
/*** BUS API. ***/
DeviceState *qdev_find_recursive(BusState *bus, const char *id);
/* Returns 0 to walk children, > 0 to skip walk, < 0 to terminate walk. */
typedef int (qbus_walkerfn)(BusState *bus, void *opaque);
typedef int (qdev_walkerfn)(DeviceState *dev, void *opaque);
void qbus_create_inplace(void *bus, size_t size, const char *typename_,
DeviceState *parent, const char *name);
BusState *qbus_create(const char *typename_, DeviceState *parent, const char *name);
/* Returns > 0 if either devfn or busfn skip walk somewhere in cursion,
* < 0 if either devfn or busfn terminate walk somewhere in cursion,
* 0 otherwise. */
int qbus_walk_children(BusState *bus,
qdev_walkerfn *pre_devfn, qbus_walkerfn *pre_busfn,
qdev_walkerfn *post_devfn, qbus_walkerfn *post_busfn,
void *opaque);
int qdev_walk_children(DeviceState *dev,
qdev_walkerfn *pre_devfn, qbus_walkerfn *pre_busfn,
qdev_walkerfn *post_devfn, qbus_walkerfn *post_busfn,
void *opaque);
void qdev_reset_all(DeviceState *dev);
/**
* @qbus_reset_all:
* @bus: Bus to be reset.
*
* Reset @bus and perform a bus-level ("hard") reset of all devices connected
* to it, including recursive processing of all buses below @bus itself. A
* hard reset means that qbus_reset_all will reset all state of the device.
* For PCI devices, for example, this will include the base address registers
* or configuration space.
*/
void qbus_reset_all(BusState *bus);
void qbus_reset_all_fn(void *opaque);
/* This should go away once we get rid of the NULL bus hack */
BusState *sysbus_get_default(void);
char *qdev_get_fw_dev_path(DeviceState *dev);
/**
* @qdev_machine_init
*
* Initialize platform devices before machine init. This is a hack until full
* support for composition is added.
*/
void qdev_machine_init(void);
/**
* @device_reset
*
* Reset a single device (by calling the reset method).
*/
void device_reset(DeviceState *dev);
const struct VMStateDescription *qdev_get_vmsd(DeviceState *dev);
const char *qdev_fw_name(DeviceState *dev);
Object *qdev_get_machine(struct uc_struct *);
/* FIXME: make this a link<> */
void qdev_set_parent_bus(DeviceState *dev, BusState *bus);
extern int qdev_hotplug;
char *qdev_get_dev_path(DeviceState *dev);
GSList *qdev_build_hotpluggable_device_list(Object *peripheral);
void qbus_set_hotplug_handler(BusState *bus, DeviceState *handler,
Error **errp);
void qbus_set_bus_hotplug_handler(BusState *bus, Error **errp);
void qdev_register_types(struct uc_struct *uc);
void sysbus_register_types(struct uc_struct *uc);
#endif

View File

@@ -1,7 +0,0 @@
#ifndef QDEV_H
#define QDEV_H
#include "hw/hw.h"
#include "hw/qdev-core.h"
#endif

View File

@@ -0,0 +1,99 @@
/*
* Register Definition API: field macros
*
* Copyright (c) 2016 Xilinx Inc.
* Copyright (c) 2013 Peter Crosthwaite <peter.crosthwaite@xilinx.com>
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
*/
#ifndef REGISTERFIELDS_H
#define REGISTERFIELDS_H
#include "qemu/bitops.h"
/* Define constants for a 32 bit register */
/* This macro will define A_FOO, for the byte address of a register
* as well as R_FOO for the uint32_t[] register number (A_FOO / 4).
*/
#define REG32(reg, addr) \
enum { A_ ## reg = (addr) }; \
enum { R_ ## reg = (addr) / 4 };
#define REG8(reg, addr) \
enum { A_ ## reg = (addr) }; \
enum { R_ ## reg = (addr) };
#define REG16(reg, addr) \
enum { A_ ## reg = (addr) }; \
enum { R_ ## reg = (addr) / 2 };
/* Define SHIFT, LENGTH and MASK constants for a field within a register */
/* This macro will define R_FOO_BAR_MASK, R_FOO_BAR_SHIFT and R_FOO_BAR_LENGTH
* constants for field BAR in register FOO.
*/
#define FIELD(reg, field, shift, length) \
enum { R_ ## reg ## _ ## field ## _SHIFT = (shift)}; \
enum { R_ ## reg ## _ ## field ## _LENGTH = (length)}; \
enum { R_ ## reg ## _ ## field ## _MASK = \
MAKE_64BIT_MASK(shift, length)};
/* Extract a field from a register */
#define FIELD_EX8(storage, reg, field) \
extract8((storage), R_ ## reg ## _ ## field ## _SHIFT, \
R_ ## reg ## _ ## field ## _LENGTH)
#define FIELD_EX16(storage, reg, field) \
extract16((storage), R_ ## reg ## _ ## field ## _SHIFT, \
R_ ## reg ## _ ## field ## _LENGTH)
#define FIELD_EX32(storage, reg, field) \
extract32((storage), R_ ## reg ## _ ## field ## _SHIFT, \
R_ ## reg ## _ ## field ## _LENGTH)
#define FIELD_EX64(storage, reg, field) \
extract64((storage), R_ ## reg ## _ ## field ## _SHIFT, \
R_ ## reg ## _ ## field ## _LENGTH)
/* Extract a field from an array of registers */
#define ARRAY_FIELD_EX32(regs, reg, field) \
FIELD_EX32((regs)[R_ ## reg], reg, field)
/* Deposit a register field.
* Assigning values larger then the target field will result in
* compilation warnings.
*/
#define FIELD_DP8(storage, reg, field, val, d) { \
struct { \
unsigned int v:R_ ## reg ## _ ## field ## _LENGTH; \
} v = { .v = val }; \
d = deposit32((storage), R_ ## reg ## _ ## field ## _SHIFT, \
R_ ## reg ## _ ## field ## _LENGTH, v.v); \
}
#define FIELD_DP16(storage, reg, field, val, d) { \
struct { \
unsigned int v:R_ ## reg ## _ ## field ## _LENGTH; \
} v = { .v = val }; \
d = deposit32((storage), R_ ## reg ## _ ## field ## _SHIFT, \
R_ ## reg ## _ ## field ## _LENGTH, v.v); \
}
#define FIELD_DP32(storage, reg, field, val, d) { \
struct { \
unsigned int v:R_ ## reg ## _ ## field ## _LENGTH; \
} v = { .v = val }; \
d = deposit32((storage), R_ ## reg ## _ ## field ## _SHIFT, \
R_ ## reg ## _ ## field ## _LENGTH, v.v); \
}
#define FIELD_DP64(storage, reg, field, val, d) { \
struct { \
unsigned int v:R_ ## reg ## _ ## field ## _LENGTH; \
} v = { .v = val }; \
d = deposit64((storage), R_ ## reg ## _ ## field ## _SHIFT, \
R_ ## reg ## _ ## field ## _LENGTH, v.v); \
}
/* Deposit a field to array of registers. */
#define ARRAY_FIELD_DP32(regs, reg, field, val) \
(regs)[R_ ## reg] = FIELD_DP32((regs)[R_ ## reg], reg, field, val);
#endif

View File

@@ -1,8 +0,0 @@
#ifndef HW_SPARC_H
#define HW_SPARC_H
void sparc_cpu_register_types(void *opaque);
void leon3_machine_init(struct uc_struct *uc);
void sun4u_machine_init(struct uc_struct *uc);
#endif