JTI: Fixed crash when using Blizzard accelerators and x86-64 JIT

This commit is contained in:
Frode Solheim 2015-10-12 20:50:54 +02:00
parent 83095ed674
commit 12567d307d
3 changed files with 87 additions and 58 deletions

View File

@ -28,6 +28,7 @@
#include "flashrom.h"
#include "uae.h"
#include "uae/ppc.h"
#include "uae/vm.h"
#include "idecontrollers.h"
#include "scsi.h"
#include "cpummu030.h"
@ -1402,7 +1403,13 @@ void cpuboard_cleanup(void)
if (blizzard_jit) {
mapped_free(&blizzardram_bank);
} else {
xfree(blizzardram_nojit_bank.baseaddr);
if (blizzardram_nojit_bank.baseaddr) {
#ifdef CPU_64_BIT
uae_vm_free(blizzardram_nojit_bank.baseaddr, blizzardram_nojit_bank.allocated);
#else
xfree(blizzardram_nojit_bank.baseaddr);
#endif
}
}
if (blizzardmaprom_bank_mapped)
mapped_free(&blizzardmaprom_bank);
@ -1546,8 +1553,18 @@ void cpuboard_init(void)
mapped_malloc(&blizzardram_bank);
}
} else {
if (cpuboard_size)
if (cpuboard_size) {
#ifdef CPU_64_BIT
blizzardram_bank.baseaddr = (uae_u8 *) uae_vm_alloc(
blizzardram_bank.allocated, UAE_VM_32BIT, UAE_VM_READ_WRITE);
#else
blizzardram_bank.baseaddr = xmalloc(uae_u8, blizzardram_bank.allocated);
#endif
write_log("MMAN: Allocated %d bytes (%d MB) for blizzardram_bank at %p\n",
blizzardram_bank.allocated,
blizzardram_bank.allocated / (1024 * 1024),
blizzardram_bank.baseaddr);
}
}
blizzardram_nojit_bank.baseaddr = blizzardram_bank.baseaddr;

View File

@ -440,6 +440,7 @@ static int doinit_shm (void)
addr = expansion_startaddress(addr, z3rtgallocsize);
if (gfxboard_get_configtype(changed_prefs.rtgmem_type) == 3) {
p96base_offset = addr;
write_log("NATMEM: p96base_offset = 0x%x\n", p96base_offset);
// adjust p96mem_offset to beginning of natmem
// by subtracting start of original p96mem_offset from natmem_offset
if (p96base_offset >= 0x10000000) {
@ -457,7 +458,7 @@ static int doinit_shm (void)
natmem_offset, (uae_u8*)natmem_offset + natmemsize,
natmemsize, natmemsize / (1024 * 1024));
if (changed_prefs.rtgmem_size)
write_log (_T("NATMEM: P96 special area: 0x%p-0x%p (%08x %dM)\n"),
write_log (_T("NATMEM: P96 special area: %p-%p (0x%08x %dM)\n"),
p96mem_offset, (uae_u8*)p96mem_offset + changed_prefs.rtgmem_size,
changed_prefs.rtgmem_size, changed_prefs.rtgmem_size >> 20);
canbang = jit_direct_compatible_memory ? 1 : 0;

121
vm.cpp
View File

@ -23,15 +23,13 @@
#include <sys/sysctl.h>
#endif
#ifdef LINUX
#if defined(LINUX) && defined(CPU_x86_64)
#define HAVE_MAP_32BIT 1
#endif
/* Commiting memory on Windows zeroes the region. We do the same on other
* platforms so the functions exhibits the same behavior. I.e. a decommit
* followed by a commit results in zeroed memory. */
#define CLEAR_MEMORY_ON_COMMIT
// #define CLEAR_MEMORY_ON_COMMIT
// #define LOG_ALLOCATIONS
// #define TRACK_ALLOCATIONS
#ifdef TRACK_ALLOCATIONS
@ -136,66 +134,77 @@ int uae_vm_page_size(void)
static void *uae_vm_alloc_with_flags(uae_u32 size, int flags, int protect)
{
void *address = NULL;
static bool first_allocation = true;
if (first_allocation) {
/* FIXME: log contents of /proc/self/maps on Linux */
/* FIXME: use VirtualQuery function on Windows? */
first_allocation = false;
}
#ifdef LOG_ALLOCATIONS
uae_log("VM: Allocate 0x%-8x bytes [%d] (%s)\n",
size, flags, protect_description(protect));
#endif
#ifdef _WIN32
int va_type = MEM_COMMIT | MEM_RESERVE;
if (flags & UAE_VM_WRITE_WATCH) {
va_type |= MEM_WRITE_WATCH;
}
int va_protect = protect_to_native(protect);
#ifdef CPU_64_BIT
if (flags & UAE_VM_32BIT) {
/* Stupid algorithm to find available space, but should
* work well enough when there is not a lot of allocations. */
uae_u8 *p = (uae_u8 *) 0x50000000;
while (address == NULL) {
if (p >= (void*) 0x60000000) {
break;
}
address = VirtualAlloc(p, size, va_type, va_protect);
p += uae_vm_page_size();
}
}
#endif
if (!address) {
address = VirtualAlloc(NULL, size, va_type, va_protect);
}
#else
//size = size < uae_vm_page_size() ? uae_vm_page_size() : size;
int mmap_flags = MAP_PRIVATE | MAP_ANON;
int mmap_prot = protect_to_native(protect);
#ifdef CPU_64_BIT
#endif
#ifndef CPU_64_BIT
flags &= ~UAE_VM_32BIT;
#endif
if (flags & UAE_VM_32BIT) {
#ifdef HAVE_MAP_32BIT
mmap_flags |= MAP_32BIT;
#else
/* Stupid algorithm to find available space, but should
* work well enough when there is not a lot of allocations. */
uae_u8 *p = natmem_offset - 0x10000000;
uae_u8 *p_end = p + 0x10000000;
while (address == NULL) {
if (p >= p_end) {
break;
}
printf("%p\n", p);
address = mmap(p, size, mmap_prot, mmap_flags, -1, 0);
/* FIXME: check 32-bit result */
if (address == MAP_FAILED) {
address = NULL;
}
p += uae_vm_page_size();
int step = uae_vm_page_size();
uae_u8 *p = (uae_u8 *) 0x40000000;
uae_u8 *p_end = natmem_reserved - size;
if (size > 1024 * 1024) {
/* Reserve some space for smaller allocations */
p += 32 * 1024 * 1024;
step = 1024 * 1024;
}
#ifdef HAVE_MAP_32BIT
address = mmap(0, size, mmap_prot, mmap_flags | MAP_32BIT, -1, 0);
if (address == MAP_FAILED) {
address = NULL;
}
#endif
}
while (address == NULL) {
if (p > p_end) {
break;
}
#ifdef _WIN32
address = VirtualAlloc(p, size, va_type, va_protect);
#else
address = mmap(p, size, mmap_prot, mmap_flags, -1, 0);
// write_log("VM: trying %p step is 0x%x = %p\n", p, step, address);
if (address == MAP_FAILED) {
address = NULL;
} else if (((uintptr_t) address) + size > (uintptr_t) 0xffffffff) {
munmap(address, size);
address = NULL;
}
#endif
if (address == NULL) {
p += step;
}
} else {
#ifdef _WIN32
address = VirtualAlloc(NULL, size, va_type, va_protect);
#else
address = mmap(0, size, mmap_prot, mmap_flags, -1, 0);
if (address == MAP_FAILED) {
address = NULL;
}
}
#endif
}
if (address == NULL) {
uae_log("VM: uae_vm_alloc(%u, %d, %d) mmap failed (%d)\n",
size, flags, protect, errno);
@ -204,7 +213,9 @@ static void *uae_vm_alloc_with_flags(uae_u32 size, int flags, int protect)
#ifdef TRACK_ALLOCATIONS
add_allocation(address, size);
#endif
#ifdef LOG_ALLOCATIONS
uae_log("VM: %p\n", address);
#endif
return address;
}
@ -328,7 +339,7 @@ void *uae_vm_reserve(uae_u32 size, int flags)
#else
if (true) {
#endif
uintptr_t try_addr = 0x50000000;
uintptr_t try_addr = 0x80000000;
while (address == NULL) {
address = try_reserve(try_addr, size, flags);
if (address == NULL) {
@ -395,16 +406,16 @@ bool uae_vm_decommit(void *address, uae_u32 size)
#ifdef _WIN32
return VirtualFree (address, size, MEM_DECOMMIT) != 0;
#else
#if 0
/* FIXME: perhaps we can unmmap and mmap the memory region again to
* allow the operating system to throw away the (unused) pages. Of
* course, we have problem if re-mmaping it fails. If we do this,
* we may be able to remove the memory clear operation in
* uae_vm_commit. We might also be able to use mmap with MAP_FIXED
* to more "safely" overwrite the old mapping. */
munmap(address, size);
mmap(...);
#endif
return do_protect(address, size, UAE_VM_NO_ACCESS);
/* Re-map the memory so we get fresh unused pages (and the old ones can be
* released and physical memory reclaimed). We also assume that the new
* pages will be zero-initialized (tested on Linux and OS X). */
void *result = mmap(address, size, PROT_NONE,
MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
if (result == MAP_FAILED) {
uae_log("VM: Warning - could not re-map with MAP_FIXED at %p\n",
address);
do_protect(address, size, UAE_VM_NO_ACCESS);
}
return result != MAP_FAILED;
#endif
}