2222# endif
2323# include < stdio.h>
2424
25+ // Start searching for available memory region past PAGEZERO, which is
26+ // 4KB on 32-bit and 4GB on 64-bit.
27+ # define GAP_SEARCH_START_ADDRESS \
28+ ((SANITIZER_WORDSIZE == 32 ) ? 0x000000001000 : 0x000100000000 )
29+
2530# include " sanitizer_common.h"
2631# include " sanitizer_file.h"
2732# include " sanitizer_flags.h"
@@ -58,9 +63,11 @@ extern char ***_NSGetArgv(void);
5863# include < dlfcn.h> // for dladdr()
5964# include < errno.h>
6065# include < fcntl.h>
66+ # include < inttypes.h>
6167# include < libkern/OSAtomic.h>
6268# include < mach-o/dyld.h>
6369# include < mach/mach.h>
70+ # include < mach/mach_error.h>
6471# include < mach/mach_time.h>
6572# include < mach/vm_statistics.h>
6673# include < malloc/malloc.h>
@@ -1100,6 +1107,67 @@ static void StripEnv() {
11001107}
11011108#endif // SANITIZER_GO
11021109
1110+ // Prints out a consolidated memory map: contiguous regions
1111+ // are merged together.
1112+ static void PrintVmmap () {
1113+ const mach_vm_address_t max_vm_address = GetMaxVirtualAddress () + 1 ;
1114+ mach_vm_address_t address = GAP_SEARCH_START_ADDRESS;
1115+ kern_return_t kr = KERN_SUCCESS;
1116+
1117+ Report (" Memory map:\n " );
1118+ mach_vm_address_t last = 0 ;
1119+ mach_vm_address_t lastsz = 0 ;
1120+
1121+ while (1 ) {
1122+ mach_vm_size_t vmsize = 0 ;
1123+ natural_t depth = 0 ;
1124+ vm_region_submap_short_info_data_64_t vminfo;
1125+ mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
1126+ kr = mach_vm_region_recurse (mach_task_self (), &address, &vmsize, &depth,
1127+ (vm_region_info_t )&vminfo, &count);
1128+
1129+ if (kr == KERN_DENIED) {
1130+ Report (
1131+ " ERROR: mach_vm_region_recurse got KERN_DENIED when printing memory "
1132+ " map.\n " );
1133+ Report (
1134+ " HINT: Check whether mach_vm_region_recurse is allowed by "
1135+ " sandbox.\n " );
1136+ }
1137+
1138+ if (kr == KERN_SUCCESS && address < max_vm_address) {
1139+ if (last + lastsz == address) {
1140+ // This region is contiguous with the last; merge together.
1141+ lastsz += vmsize;
1142+ } else {
1143+ if (lastsz)
1144+ Printf (" || `[%p, %p]` || size=0x%016" PRIx64 " ||\n " , (void *)last,
1145+ (void *)(last + lastsz), lastsz);
1146+
1147+ last = address;
1148+ lastsz = vmsize;
1149+ }
1150+ address += vmsize;
1151+ } else {
1152+ // We've reached the end of the memory map. Print the last remaining
1153+ // region, if there is one.
1154+ if (lastsz)
1155+ Printf (" || `[%p, %p]` || size=0x%016" PRIx64 " ||\n " , (void *)last,
1156+ (void *)(last + lastsz), lastsz);
1157+
1158+ break ;
1159+ }
1160+ }
1161+ }
1162+
1163+ static void ReportShadowAllocFail (uptr shadow_size_bytes, uptr alignment) {
1164+ Report (
1165+ " FATAL: Failed to allocate shadow memory. Tried to allocate %p bytes "
1166+ " (alignment=%p).\n " ,
1167+ (void *)shadow_size_bytes, (void *)alignment);
1168+ PrintVmmap ();
1169+ }
1170+
11031171char **GetArgv () {
11041172 return *_NSGetArgv ();
11051173}
@@ -1207,10 +1275,11 @@ uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
12071275 if (new_max_vm < max_occupied_addr) {
12081276 Report (" Unable to find a memory range for dynamic shadow.\n " );
12091277 Report (
1210- " space_size = %p, largest_gap_found = %p, max_occupied_addr = %p, "
1211- " new_max_vm = %p\n " ,
1212- (void *)space_size, (void *)largest_gap_found,
1213- (void *)max_occupied_addr, (void *)new_max_vm);
1278+ " \t space_size = %p\n\t largest_gap_found = %p\n\t max_occupied_addr "
1279+ " = %p\n\t new_max_vm = %p\n " ,
1280+ (void *)space_size, (void *)largest_gap_found, (void *)max_occupied_addr,
1281+ (void *)new_max_vm);
1282+ ReportShadowAllocFail (shadow_size_bytes, alignment);
12141283 CHECK (0 && " cannot place shadow" );
12151284 }
12161285 RestrictMemoryToMaxAddress (new_max_vm);
@@ -1221,6 +1290,7 @@ uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
12211290 nullptr , nullptr );
12221291 if (shadow_start == 0 ) {
12231292 Report (" Unable to find a memory range after restricting VM.\n " );
1293+ ReportShadowAllocFail (shadow_size_bytes, alignment);
12241294 CHECK (0 && " cannot place shadow after restricting vm" );
12251295 }
12261296 }
@@ -1236,40 +1306,51 @@ uptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size,
12361306}
12371307
12381308uptr FindAvailableMemoryRange (uptr size, uptr alignment, uptr left_padding,
1239- uptr *largest_gap_found,
1240- uptr *max_occupied_addr) {
1241- typedef vm_region_submap_short_info_data_64_t RegionInfo;
1242- enum { kRegionInfoSize = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64 };
1243- // Start searching for available memory region past PAGEZERO, which is
1244- // 4KB on 32-bit and 4GB on 64-bit.
1245- mach_vm_address_t start_address =
1246- (SANITIZER_WORDSIZE == 32 ) ? 0x000000001000 : 0x000100000000 ;
1247-
1309+ uptr* largest_gap_found,
1310+ uptr* max_occupied_addr) {
12481311 const mach_vm_address_t max_vm_address = GetMaxVirtualAddress () + 1 ;
1249- mach_vm_address_t address = start_address ;
1250- mach_vm_address_t free_begin = start_address ;
1312+ mach_vm_address_t address = GAP_SEARCH_START_ADDRESS ;
1313+ mach_vm_address_t free_begin = GAP_SEARCH_START_ADDRESS ;
12511314 kern_return_t kr = KERN_SUCCESS;
12521315 if (largest_gap_found) *largest_gap_found = 0 ;
12531316 if (max_occupied_addr) *max_occupied_addr = 0 ;
12541317 while (kr == KERN_SUCCESS) {
12551318 mach_vm_size_t vmsize = 0 ;
12561319 natural_t depth = 0 ;
1257- RegionInfo vminfo;
1258- mach_msg_type_number_t count = kRegionInfoSize ;
1320+ vm_region_submap_short_info_data_64_t vminfo;
1321+ mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64 ;
12591322 kr = mach_vm_region_recurse (mach_task_self (), &address, &vmsize, &depth,
12601323 (vm_region_info_t )&vminfo, &count);
12611324
1262- // There are cases where going beyond the processes' max vm does
1263- // not return KERN_INVALID_ADDRESS so we check for going beyond that
1264- // max address as well.
1265- if (kr == KERN_INVALID_ADDRESS || address > max_vm_address) {
1325+ if (kr == KERN_SUCCESS) {
1326+ // There are cases where going beyond the processes' max vm does
1327+ // not return KERN_INVALID_ADDRESS so we check for going beyond that
1328+ // max address as well.
1329+ if (address > max_vm_address) {
1330+ address = max_vm_address;
1331+ kr = -1 ; // break after this iteration.
1332+ }
1333+
1334+ if (max_occupied_addr)
1335+ *max_occupied_addr = address + vmsize;
1336+ } else if (kr == KERN_INVALID_ADDRESS) {
12661337 // No more regions beyond "address", consider the gap at the end of VM.
12671338 address = max_vm_address;
1268- vmsize = 0 ;
1269- kr = -1 ; // break after this iteration.
1339+
1340+ // We will break after this iteration anyway since kr != KERN_SUCCESS
1341+ } else if (kr == KERN_DENIED) {
1342+ Report (" ERROR: Unable to find a memory range for dynamic shadow.\n " );
1343+ Report (" HINT: Ensure mach_vm_region_recurse is allowed under sandbox.\n " );
1344+ Die ();
12701345 } else {
1271- if (max_occupied_addr) *max_occupied_addr = address + vmsize;
1346+ Report (
1347+ " WARNING: mach_vm_region_recurse returned unexpected code %d (%s)\n " ,
1348+ kr, mach_error_string (kr));
1349+ DCHECK (false && " mach_vm_region_recurse returned unexpected code" );
1350+ break ; // address is not valid unless KERN_SUCCESS, therefore we must not
1351+ // use it.
12721352 }
1353+
12731354 if (free_begin != address) {
12741355 // We found a free region [free_begin..address-1].
12751356 uptr gap_start = RoundUpTo ((uptr)free_begin + left_padding, alignment);
@@ -1292,6 +1373,29 @@ uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
12921373 return 0 ;
12931374}
12941375
1376+ // Returns true if the address is definitely mapped, and false if it is not
1377+ // mapped or could not be determined.
1378+ bool IsAddressInMappedRegion (uptr addr) {
1379+ mach_vm_size_t vmsize = 0 ;
1380+ natural_t depth = 0 ;
1381+ vm_region_submap_short_info_data_64_t vminfo;
1382+ mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
1383+ mach_vm_address_t address = addr;
1384+
1385+ kern_return_t kr =
1386+ mach_vm_region_recurse (mach_task_self (), &address, &vmsize, &depth,
1387+ (vm_region_info_t )&vminfo, &count);
1388+
1389+ if (kr == KERN_DENIED) {
1390+ Report (
1391+ " WARN: mach_vm_region_recurse returned KERN_DENIED when checking "
1392+ " whether an address is mapped.\n " );
1393+ Report (" HINT: Is mach_vm_region_recurse allowed by sandbox?\n " );
1394+ }
1395+
1396+ return (kr == KERN_SUCCESS && addr >= address && addr < address + vmsize);
1397+ }
1398+
12951399// FIXME implement on this platform.
12961400void GetMemoryProfile (fill_profile_f cb, uptr *stats) {}
12971401
0 commit comments