Optimize Notdirty write (#2031)
* enable notdirty_write for snapshots when possible Snapshots only happens when the priority of the memory region is smaller then the snapshot_level. After a snapshot notdirty can be set. * disable notdirty_write for self modifying code When SMC access the memory region more then once the tb must be rebuild multible times. fixes #2029 * notdirty_write better hook check Check all relevant memory hooks before enabling notdirty write. This also checks if the memory hook is registered for the affected region. So it is possible to use notdirty write and have some hooks on different addresses. * notdirty_write check for addr_write in snapshot case * self modifying code clear recursive mem access when self modifying code does unaligned memory accese sometimes uc->size_recur_mem is changed but for notdirty write not changed back. This causes mem_hooks to be missed. To fix this uc->size_recur_mem is set to 0 before each cpu_exec() call.
This commit is contained in:
@@ -1188,15 +1188,15 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
|
||||
|
||||
static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
|
||||
CPUIOTLBEntry *iotlbentry, uintptr_t retaddr,
|
||||
MemoryRegion *mr)
|
||||
CPUTLBEntry *tlbe)
|
||||
{
|
||||
#ifdef TARGET_ARM
|
||||
struct uc_struct *uc = cpu->uc;
|
||||
#endif
|
||||
ram_addr_t ram_addr = mem_vaddr + iotlbentry->addr;
|
||||
MemoryRegion *mr = cpu->uc->memory_mapping(cpu->uc, tlbe->paddr | (mem_vaddr & ~TARGET_PAGE_MASK));
|
||||
|
||||
if (mr == NULL) {
|
||||
mr = cpu->uc->memory_mapping(cpu->uc, mem_vaddr);
|
||||
}
|
||||
|
||||
if ((mr->perms & UC_PROT_EXEC) != 0) {
|
||||
if (mr && (mr->perms & UC_PROT_EXEC) != 0) {
|
||||
struct page_collection *pages
|
||||
= page_collection_lock(cpu->uc, ram_addr, ram_addr + size);
|
||||
tb_invalidate_phys_page_fast(cpu->uc, pages, ram_addr, size, retaddr);
|
||||
@@ -1208,8 +1208,9 @@ static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
|
||||
// - have memory hooks installed
|
||||
// - or doing snapshot
|
||||
// , then never clean the tlb
|
||||
if (!(cpu->uc->snapshot_level > 0 || mr->priority > 0) &&
|
||||
!(HOOK_EXISTS(cpu->uc, UC_HOOK_MEM_READ) || HOOK_EXISTS(cpu->uc, UC_HOOK_MEM_WRITE))) {
|
||||
if (!(!mr || (tlbe->addr_write != -1 && mr->priority < cpu->uc->snapshot_level)) &&
|
||||
!(tlbe->addr_code != -1) &&
|
||||
!uc_mem_hook_installed(cpu->uc, tlbe->paddr | (mem_vaddr & ~TARGET_PAGE_MASK))) {
|
||||
tlb_set_dirty(cpu, mem_vaddr);
|
||||
}
|
||||
}
|
||||
@@ -1288,7 +1289,7 @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
|
||||
|
||||
/* Handle clean RAM pages. */
|
||||
if (tlb_addr & TLB_NOTDIRTY) {
|
||||
notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr, NULL);
|
||||
notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr, entry);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1414,7 +1415,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
|
||||
|
||||
if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
|
||||
notdirty_write(env_cpu(env), addr, 1 << s_bits,
|
||||
&env_tlb(env)->d[mmu_idx].iotlb[index], retaddr, NULL);
|
||||
&env_tlb(env)->d[mmu_idx].iotlb[index], retaddr, tlbe);
|
||||
}
|
||||
|
||||
return hostaddr;
|
||||
@@ -2273,7 +2274,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
|
||||
|
||||
/* Handle clean RAM pages. */
|
||||
if (tlb_addr & TLB_NOTDIRTY) {
|
||||
notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr, mr);
|
||||
notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr, entry);
|
||||
}
|
||||
|
||||
haddr = (void *)((uintptr_t)addr + entry->addend);
|
||||
|
||||
Reference in New Issue
Block a user