2222# endif
2323# include < stdio.h>
2424
25+ // Start searching for available memory region past PAGEZERO, which is
26+ // 4KB on 32-bit and 4GB on 64-bit.
27+ # define GAP_SEARCH_START_ADDRESS \
28+ ((SANITIZER_WORDSIZE == 32 ) ? 0x000000001000 : 0x000100000000 )
29+
2530# include " sanitizer_common.h"
2631# include " sanitizer_file.h"
2732# include " sanitizer_flags.h"
@@ -58,6 +63,7 @@ extern char ***_NSGetArgv(void);
5863# include < dlfcn.h> // for dladdr()
5964# include < errno.h>
6065# include < fcntl.h>
66+ # include < inttypes.h>
6167# include < libkern/OSAtomic.h>
6268# include < mach-o/dyld.h>
6369# include < mach/mach.h>
@@ -1100,6 +1106,67 @@ static void StripEnv() {
11001106}
11011107#endif // SANITIZER_GO
11021108
1109+ // Prints out a consolidated memory map: contiguous regions
1110+ // are merged together.
1111+ static void PrintVmmap () {
1112+ const mach_vm_address_t max_vm_address = GetMaxVirtualAddress () + 1 ;
1113+ mach_vm_address_t address = GAP_SEARCH_START_ADDRESS;
1114+ kern_return_t kr = KERN_SUCCESS;
1115+
1116+ Report (" Memory map:\n " );
1117+ mach_vm_address_t last = 0 ;
1118+ mach_vm_address_t lastsz = 0 ;
1119+
1120+ while (1 ) {
1121+ mach_vm_size_t vmsize = 0 ;
1122+ natural_t depth = 0 ;
1123+ vm_region_submap_short_info_data_64_t vminfo;
1124+ mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
1125+ kr = mach_vm_region_recurse (mach_task_self (), &address, &vmsize, &depth,
1126+ (vm_region_info_t )&vminfo, &count);
1127+
1128+ if (kr == KERN_DENIED) {
1129+ Report (
1130+ " ERROR: mach_vm_region_recurse got KERN_DENIED when printing memory "
1131+ " map.\n " );
1132+ Report (
1133+ " HINT: Check whether mach_vm_region_recurse is allowed by "
1134+ " sandbox.\n " );
1135+ }
1136+
1137+ if (kr == KERN_SUCCESS && address < max_vm_address) {
1138+ if (last + lastsz == address) {
1139+ // This region is contiguous with the last; merge together.
1140+ lastsz += vmsize;
1141+ } else {
1142+ if (lastsz)
1143+ Printf (" || `[%p, %p]` || size=0x%016" PRIx64 " ||\n " , last,
1144+ last + lastsz, lastsz);
1145+
1146+ last = address;
1147+ lastsz = vmsize;
1148+ }
1149+ address += vmsize;
1150+ } else {
1151+ // We've reached the end of the memory map. Print the last remaining
1152+ // region, if there is one.
1153+ if (lastsz)
1154+ Printf (" || `[%p, %p]` || size=0x%016" PRIx64 " ||\n " , last,
1155+ last + lastsz, lastsz);
1156+
1157+ break ;
1158+ }
1159+ }
1160+ }
1161+
1162+ static void ReportShadowAllocFail (uptr shadow_size_bytes, uptr alignment) {
1163+ Report (
1164+ " FATAL: Failed to allocate shadow memory. Tried to allocate %p bytes "
1165+ " (alignment=%p).\n " ,
1166+ shadow_size_bytes, alignment);
1167+ PrintVmmap ();
1168+ }
1169+
11031170char **GetArgv () {
11041171 return *_NSGetArgv ();
11051172}
@@ -1207,10 +1274,11 @@ uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
12071274 if (new_max_vm < max_occupied_addr) {
12081275 Report (" Unable to find a memory range for dynamic shadow.\n " );
12091276 Report (
1210- " space_size = %p, largest_gap_found = %p, max_occupied_addr = %p, "
1211- " new_max_vm = %p\n " ,
1212- (void *)space_size, (void *)largest_gap_found,
1213- (void *)max_occupied_addr, (void *)new_max_vm);
1277+ " \t space_size = %p\n\t largest_gap_found = %p\n\t max_occupied_addr "
1278+ " = %p\n\t new_max_vm = %p\n " ,
1279+ (void *)space_size, (void *)largest_gap_found, (void *)max_occupied_addr,
1280+ (void *)new_max_vm);
1281+ ReportShadowAllocFail (shadow_size_bytes, alignment);
12141282 CHECK (0 && " cannot place shadow" );
12151283 }
12161284 RestrictMemoryToMaxAddress (new_max_vm);
@@ -1221,6 +1289,7 @@ uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
12211289 nullptr , nullptr );
12221290 if (shadow_start == 0 ) {
12231291 Report (" Unable to find a memory range after restricting VM.\n " );
1292+ ReportShadowAllocFail (shadow_size_bytes, alignment);
12241293 CHECK (0 && " cannot place shadow after restricting vm" );
12251294 }
12261295 }
@@ -1236,26 +1305,19 @@ uptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size,
12361305}
12371306
12381307uptr FindAvailableMemoryRange (uptr size, uptr alignment, uptr left_padding,
1239- uptr *largest_gap_found,
1240- uptr *max_occupied_addr) {
1241- typedef vm_region_submap_short_info_data_64_t RegionInfo;
1242- enum { kRegionInfoSize = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64 };
1243- // Start searching for available memory region past PAGEZERO, which is
1244- // 4KB on 32-bit and 4GB on 64-bit.
1245- mach_vm_address_t start_address =
1246- (SANITIZER_WORDSIZE == 32 ) ? 0x000000001000 : 0x000100000000 ;
1247-
1308+ uptr* largest_gap_found,
1309+ uptr* max_occupied_addr) {
12481310 const mach_vm_address_t max_vm_address = GetMaxVirtualAddress () + 1 ;
1249- mach_vm_address_t address = start_address ;
1250- mach_vm_address_t free_begin = start_address ;
1311+ mach_vm_address_t address = GAP_SEARCH_START_ADDRESS ;
1312+ mach_vm_address_t free_begin = GAP_SEARCH_START_ADDRESS ;
12511313 kern_return_t kr = KERN_SUCCESS;
12521314 if (largest_gap_found) *largest_gap_found = 0 ;
12531315 if (max_occupied_addr) *max_occupied_addr = 0 ;
12541316 while (kr == KERN_SUCCESS) {
12551317 mach_vm_size_t vmsize = 0 ;
12561318 natural_t depth = 0 ;
1257- RegionInfo vminfo;
1258- mach_msg_type_number_t count = kRegionInfoSize ;
1319+ vm_region_submap_short_info_data_64_t vminfo;
1320+ mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64 ;
12591321 kr = mach_vm_region_recurse (mach_task_self (), &address, &vmsize, &depth,
12601322 (vm_region_info_t )&vminfo, &count);
12611323
0 commit comments