Support uc_mem_protect on mmio regions
Also make mmio ranges return the correct errors on wrong protection
This commit is contained in:
@@ -1657,6 +1657,13 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
|
|||||||
res = load_memop(haddr, op);
|
res = load_memop(haddr, op);
|
||||||
|
|
||||||
_out:
|
_out:
|
||||||
|
// mmio error check
|
||||||
|
if (uc->invalid_error != UC_ERR_OK) {
|
||||||
|
uc->invalid_addr = addr;
|
||||||
|
cpu_exit(uc->cpu);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
// Unicorn: callback on successful data read
|
// Unicorn: callback on successful data read
|
||||||
if (!code_read) {
|
if (!code_read) {
|
||||||
if (!uc->size_recur_mem) { // disabling read callback if in recursive call
|
if (!uc->size_recur_mem) { // disabling read callback if in recursive call
|
||||||
|
|||||||
@@ -88,6 +88,7 @@ static uint64_t mmio_read_wrapper(struct uc_struct *uc, void *opaque, hwaddr add
|
|||||||
if (cbs->read) {
|
if (cbs->read) {
|
||||||
return cbs->read(uc, addr, size, cbs->user_data_read);
|
return cbs->read(uc, addr, size, cbs->user_data_read);
|
||||||
} else {
|
} else {
|
||||||
|
uc->invalid_error = UC_ERR_READ_PROT;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -100,6 +101,8 @@ static void mmio_write_wrapper(struct uc_struct *uc, void *opaque, hwaddr addr,
|
|||||||
addr = addr & ( (target_ulong)(-1) );
|
addr = addr & ( (target_ulong)(-1) );
|
||||||
if (cbs->write) {
|
if (cbs->write) {
|
||||||
cbs->write(uc, addr, size, data, cbs->user_data_write);
|
cbs->write(uc, addr, size, data, cbs->user_data_write);
|
||||||
|
} else {
|
||||||
|
uc->invalid_error = UC_ERR_WRITE_PROT;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -123,7 +126,9 @@ MemoryRegion *memory_map_io(struct uc_struct *uc, ram_addr_t begin, size_t size,
|
|||||||
memset(ops, 0, sizeof(*ops));
|
memset(ops, 0, sizeof(*ops));
|
||||||
|
|
||||||
ops->read = mmio_read_wrapper;
|
ops->read = mmio_read_wrapper;
|
||||||
|
ops->read_with_attrs = NULL;
|
||||||
ops->write = mmio_write_wrapper;
|
ops->write = mmio_write_wrapper;
|
||||||
|
ops->write_with_attrs = NULL;
|
||||||
ops->endianness = DEVICE_NATIVE_ENDIAN;
|
ops->endianness = DEVICE_NATIVE_ENDIAN;
|
||||||
|
|
||||||
memory_region_init_io(uc, mmio, ops, opaques, size);
|
memory_region_init_io(uc, mmio, ops, opaques, size);
|
||||||
|
|||||||
@@ -219,6 +219,43 @@ static void test_mem_protect_remove_exec(void)
|
|||||||
OK(uc_close(uc));
|
OK(uc_close(uc));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static uint64_t test_mem_protect_mmio_read_cb(struct uc_struct *uc, uint64_t addr, unsigned size, void *user_data) {
|
||||||
|
TEST_CHECK(addr == 0x20); // note, it's not 0x1020
|
||||||
|
|
||||||
|
*(uint64_t*)user_data = *(uint64_t*)user_data + 1;
|
||||||
|
return 0x114514;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void test_mem_protect_mmio_write_cb(struct uc_struct *uc, uint64_t addr, unsigned size, uint64_t data, void *user_data) {
|
||||||
|
TEST_CHECK(false);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void test_mem_protect_mmio(void)
|
||||||
|
{
|
||||||
|
uc_engine *uc;
|
||||||
|
// mov eax, [0x2020]; mov [0x2020], eax
|
||||||
|
char code[] = "\xa1\x20\x20\x00\x00\x00\x00\x00\x00\xa3\x20\x20\x00\x00\x00\x00\x00\x00";
|
||||||
|
uint64_t called = 0;
|
||||||
|
uint64_t r_eax;
|
||||||
|
|
||||||
|
OK(uc_open(UC_ARCH_X86, UC_MODE_64, &uc));
|
||||||
|
OK(uc_mem_map(uc, 0x8000, 0x1000, UC_PROT_ALL));
|
||||||
|
OK(uc_mem_write(uc, 0x8000, code, sizeof(code) - 1));
|
||||||
|
|
||||||
|
OK(uc_mmio_map(uc, 0x1000, 0x3000, test_mem_protect_mmio_read_cb, (void*)&called, test_mem_protect_mmio_write_cb, (void*)&called));
|
||||||
|
OK(uc_mem_protect(uc, 0x2000, 0x1000, UC_PROT_READ));
|
||||||
|
|
||||||
|
uc_assert_err(UC_ERR_WRITE_PROT, uc_emu_start(uc, 0x8000, 0x8000 + sizeof(code) - 1, 0, 0));
|
||||||
|
OK(uc_reg_read(uc, UC_X86_REG_RAX, &r_eax));
|
||||||
|
|
||||||
|
TEST_CHECK(called == 1);
|
||||||
|
TEST_CHECK(r_eax == 0x114514);
|
||||||
|
|
||||||
|
OK(uc_close(uc));
|
||||||
|
}
|
||||||
|
|
||||||
TEST_LIST = {{"test_map_correct", test_map_correct},
|
TEST_LIST = {{"test_map_correct", test_map_correct},
|
||||||
{"test_map_wrapping", test_map_wrapping},
|
{"test_map_wrapping", test_map_wrapping},
|
||||||
{"test_mem_protect", test_mem_protect},
|
{"test_mem_protect", test_mem_protect},
|
||||||
@@ -229,4 +266,5 @@ TEST_LIST = {{"test_map_correct", test_map_correct},
|
|||||||
{"test_map_wrap", test_map_wrap},
|
{"test_map_wrap", test_map_wrap},
|
||||||
{"test_map_big_memory", test_map_big_memory},
|
{"test_map_big_memory", test_map_big_memory},
|
||||||
{"test_mem_protect_remove_exec", test_mem_protect_remove_exec},
|
{"test_mem_protect_remove_exec", test_mem_protect_remove_exec},
|
||||||
|
{"test_mem_protect_mmio", test_mem_protect_mmio},
|
||||||
{NULL, NULL}};
|
{NULL, NULL}};
|
||||||
|
|||||||
55
uc.c
55
uc.c
@@ -1114,15 +1114,13 @@ static uint8_t *copy_region(struct uc_struct *uc, MemoryRegion *mr)
|
|||||||
/*
|
/*
|
||||||
This function is similar to split_region, but for MMIO memory.
|
This function is similar to split_region, but for MMIO memory.
|
||||||
|
|
||||||
This function would delete the region unconditionally.
|
|
||||||
|
|
||||||
Note this function may be called recursively.
|
Note this function may be called recursively.
|
||||||
*/
|
*/
|
||||||
static bool split_mmio_region(struct uc_struct *uc, MemoryRegion *mr,
|
static bool split_mmio_region(struct uc_struct *uc, MemoryRegion *mr,
|
||||||
uint64_t address, size_t size)
|
uint64_t address, size_t size, bool do_delete)
|
||||||
{
|
{
|
||||||
uint64_t begin, end, chunk_end;
|
uint64_t begin, end, chunk_end;
|
||||||
size_t l_size, r_size;
|
size_t l_size, r_size, m_size;
|
||||||
mmio_cbs backup;
|
mmio_cbs backup;
|
||||||
|
|
||||||
chunk_end = address + size;
|
chunk_end = address + size;
|
||||||
@@ -1165,6 +1163,7 @@ static bool split_mmio_region(struct uc_struct *uc, MemoryRegion *mr,
|
|||||||
// compute sub region sizes
|
// compute sub region sizes
|
||||||
l_size = (size_t)(address - begin);
|
l_size = (size_t)(address - begin);
|
||||||
r_size = (size_t)(end - chunk_end);
|
r_size = (size_t)(end - chunk_end);
|
||||||
|
m_size = (size_t)(chunk_end - address);
|
||||||
|
|
||||||
if (l_size > 0) {
|
if (l_size > 0) {
|
||||||
if (uc_mmio_map(uc, begin, l_size, backup.read, backup.user_data_read,
|
if (uc_mmio_map(uc, begin, l_size, backup.read, backup.user_data_read,
|
||||||
@@ -1173,6 +1172,13 @@ static bool split_mmio_region(struct uc_struct *uc, MemoryRegion *mr,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (m_size > 0 && !do_delete) {
|
||||||
|
if (uc_mmio_map(uc, address, m_size, backup.read, backup.user_data_read,
|
||||||
|
backup.write, backup.user_data_write) != UC_ERR_OK) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (r_size > 0) {
|
if (r_size > 0) {
|
||||||
if (uc_mmio_map(uc, chunk_end, r_size, backup.read,
|
if (uc_mmio_map(uc, chunk_end, r_size, backup.read,
|
||||||
backup.user_data_read, backup.write,
|
backup.user_data_read, backup.write,
|
||||||
@@ -1360,6 +1366,7 @@ uc_err uc_mem_protect(struct uc_struct *uc, uint64_t address, size_t size,
|
|||||||
uint64_t addr = address;
|
uint64_t addr = address;
|
||||||
uint64_t pc;
|
uint64_t pc;
|
||||||
size_t count, len;
|
size_t count, len;
|
||||||
|
mmio_cbs* new_cb;
|
||||||
bool remove_exec = false;
|
bool remove_exec = false;
|
||||||
|
|
||||||
UC_INIT(uc);
|
UC_INIT(uc);
|
||||||
@@ -1400,18 +1407,36 @@ uc_err uc_mem_protect(struct uc_struct *uc, uint64_t address, size_t size,
|
|||||||
while (count < size) {
|
while (count < size) {
|
||||||
mr = memory_mapping(uc, addr);
|
mr = memory_mapping(uc, addr);
|
||||||
len = (size_t)MIN(size - count, mr->end - addr);
|
len = (size_t)MIN(size - count, mr->end - addr);
|
||||||
if (!split_region(uc, mr, addr, len, false)) {
|
if (mr->ram) {
|
||||||
return UC_ERR_NOMEM;
|
if (!split_region(uc, mr, addr, len, false)) {
|
||||||
}
|
return UC_ERR_NOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
mr = memory_mapping(uc, addr);
|
mr = memory_mapping(uc, addr);
|
||||||
// will this remove EXEC permission?
|
// will this remove EXEC permission?
|
||||||
if (((mr->perms & UC_PROT_EXEC) != 0) &&
|
if (((mr->perms & UC_PROT_EXEC) != 0) &&
|
||||||
((perms & UC_PROT_EXEC) == 0)) {
|
((perms & UC_PROT_EXEC) == 0)) {
|
||||||
remove_exec = true;
|
remove_exec = true;
|
||||||
|
}
|
||||||
|
mr->perms = perms;
|
||||||
|
uc->readonly_mem(mr, (perms & UC_PROT_WRITE) == 0);
|
||||||
|
|
||||||
|
} else {
|
||||||
|
if(!split_mmio_region(uc, mr, addr, len, false)) {
|
||||||
|
return UC_ERR_NOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
mr = memory_mapping(uc, addr);
|
||||||
|
new_cb = (mmio_cbs*)mr->opaque;
|
||||||
|
|
||||||
|
if (!(perms & UC_PROT_READ)) {
|
||||||
|
new_cb->read = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!(perms & UC_PROT_WRITE)) {
|
||||||
|
new_cb->write = NULL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
mr->perms = perms;
|
|
||||||
uc->readonly_mem(mr, (perms & UC_PROT_WRITE) == 0);
|
|
||||||
|
|
||||||
count += len;
|
count += len;
|
||||||
addr += len;
|
addr += len;
|
||||||
@@ -1471,7 +1496,7 @@ uc_err uc_mem_unmap(struct uc_struct *uc, uint64_t address, size_t size)
|
|||||||
mr = memory_mapping(uc, addr);
|
mr = memory_mapping(uc, addr);
|
||||||
len = (size_t)MIN(size - count, mr->end - addr);
|
len = (size_t)MIN(size - count, mr->end - addr);
|
||||||
if (!mr->ram) {
|
if (!mr->ram) {
|
||||||
if (!split_mmio_region(uc, mr, addr, len)) {
|
if (!split_mmio_region(uc, mr, addr, len, true)) {
|
||||||
return UC_ERR_NOMEM;
|
return UC_ERR_NOMEM;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
Reference in New Issue
Block a user