1010#include " xenia/base/memory.h"
1111
1212#include < fcntl.h>
13+ #include < semaphore.h>
1314#include < sys/mman.h>
1415#include < unistd.h>
1516#include < cstddef>
17+ #include < mutex>
1618
1719#include " xenia/base/math.h"
1820#include " xenia/base/platform.h"
@@ -81,12 +83,55 @@ uint32_t ToPosixProtectFlags(PageAccess access) {
8183
8284bool IsWritableExecutableMemorySupported () { return true ; }
8385
86+ struct MappedFileRange {
87+ size_t region_begin;
88+ size_t region_end;
89+ };
90+
91+ std::vector<struct MappedFileRange > mapped_file_ranges;
92+ std::mutex g_mapped_file_ranges_mutex;
93+
8494void * AllocFixed (void * base_address, size_t length,
8595 AllocationType allocation_type, PageAccess access) {
8696 // mmap does not support reserve / commit, so ignore allocation_type.
8797 uint32_t prot = ToPosixProtectFlags (access);
88- void * result = mmap (base_address, length, prot,
89- MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS, -1 , 0 );
98+ int flags = MAP_PRIVATE | MAP_ANONYMOUS;
99+
100+ if (base_address != nullptr ) {
101+ bool should_protect = allocation_type == AllocationType::kCommit ;
102+ if (should_protect) {
103+ if (Protect (base_address, length, access)){
104+ return base_address;
105+ } else {
106+ return nullptr ;
107+ }
108+ }
109+
110+ const size_t region_begin = (size_t )base_address;
111+ const size_t region_end = (size_t )base_address + length;
112+
113+ std::lock_guard<std::mutex> guard (g_mapped_file_ranges_mutex);
114+ for (const auto & mapped_range : mapped_file_ranges) {
115+ // Check if the allocation is within this range...
116+ if (region_begin >= mapped_range.region_begin &&
117+ region_end <= mapped_range.region_end ) {
118+ if (allocation_type == AllocationType::kReserveCommit ) {
119+ if (Protect (base_address, length, access)) {
120+ return base_address;
121+ } else {
122+ assert_always (" Error: Could not change protection of mapped range!" );
123+ }
124+ } else {
125+ return base_address;
126+ }
127+ }
128+ }
129+
130+ flags |= MAP_FIXED_NOREPLACE;
131+ }
132+
133+ void * result = mmap (base_address, length, prot, flags, -1 , 0 );
134+
90135 if (result == MAP_FAILED) {
91136 return nullptr ;
92137 } else {
@@ -96,6 +141,17 @@ void* AllocFixed(void* base_address, size_t length,
96141
97142bool DeallocFixed (void * base_address, size_t length,
98143 DeallocationType deallocation_type) {
144+ const size_t region_begin = (size_t )base_address;
145+ const size_t region_end = (size_t )base_address + length;
146+
147+ std::lock_guard<std::mutex> guard (g_mapped_file_ranges_mutex);
148+ for (const auto & mapped_range : mapped_file_ranges) {
149+ if (region_begin >= mapped_range.region_begin &&
150+ region_end <= mapped_range.region_end ) {
151+ return Protect (base_address, length, PageAccess::kNoAccess );
152+ }
153+ }
154+
99155 return munmap (base_address, length) == 0 ;
100156}
101157
@@ -178,12 +234,39 @@ void CloseFileMappingHandle(FileMappingHandle handle,
178234void * MapFileView (FileMappingHandle handle, void * base_address, size_t length,
179235 PageAccess access, size_t file_offset) {
180236 uint32_t prot = ToPosixProtectFlags (access);
181- return mmap64 (base_address, length, prot, MAP_PRIVATE | MAP_ANONYMOUS, handle,
237+
238+ int flags = MAP_SHARED;
239+ if (base_address != nullptr ) {
240+ flags |= MAP_FIXED_NOREPLACE;
241+ }
242+
243+ void * result = mmap (base_address, length, prot, flags, handle,
182244 file_offset);
245+
246+ if (result == MAP_FAILED) {
247+ return nullptr ;
248+ } else {
249+ std::lock_guard<std::mutex> guard (g_mapped_file_ranges_mutex);
250+ mapped_file_ranges.push_back ({(size_t )result, (size_t )result + length});
251+ return result;
252+ }
183253}
184254
185255bool UnmapFileView (FileMappingHandle handle, void * base_address,
186256 size_t length) {
257+ std::lock_guard<std::mutex> guard (g_mapped_file_ranges_mutex);
258+ for (auto mapped_range = mapped_file_ranges.begin ();
259+ mapped_range != mapped_file_ranges.end ();) {
260+ if (mapped_range->region_begin == (size_t )base_address &&
261+ mapped_range->region_end == (size_t )base_address + length) {
262+ mapped_file_ranges.erase (mapped_range);
263+ return munmap (base_address, length) == 0 ;
264+ } else {
265+ mapped_range++;
266+ }
267+ }
268+ // TODO: Implement partial file unmapping.
269+ assert_always (" Error: Partial unmapping of files not yet supported." );
187270 return munmap (base_address, length) == 0 ;
188271}
189272
0 commit comments