@@ -250,7 +250,7 @@ pub fn codegen_static<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
250250 unsafe {
251251 let g = get_static ( cx, def_id) ;
252252
253- let v = match :: mir:: codegen_static_initializer ( cx, def_id) {
253+ let ( v , alloc ) = match :: mir:: codegen_static_initializer ( cx, def_id) {
254254 Ok ( v) => v,
255255 // Error has already been reported
256256 Err ( _) => return ,
@@ -309,6 +309,44 @@ pub fn codegen_static<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
309309
310310 if attr:: contains_name ( attrs, "thread_local" ) {
311311 llvm:: set_thread_local_mode ( g, cx. tls_model ) ;
312+
313+ // Do not allow LLVM to change the alignment of a TLS on macOS.
314+ //
315+ // By default a global's alignment can be freely increased.
316+ // This allows LLVM to generate more performant instructions
317+ // e.g. using load-aligned into a SIMD register.
318+ //
319+ // However, on macOS 10.10 or below, the dynamic linker does not
320+ // respect any alignment given on the TLS (radar 24221680).
321+ // This will violate the alignment assumption, and causing segfault at runtime.
322+ //
323+ // This bug is very easy to trigger. In `println!` and `panic!`,
324+ // the `LOCAL_STDOUT`/`LOCAL_STDERR` handles are stored in a TLS,
325+ // which the values would be `mem::replace`d on initialization.
326+ // The implementation of `mem::replace` will use SIMD
327+ // whenever the size is 32 bytes or higher. LLVM notices SIMD is used
328+ // and tries to align `LOCAL_STDOUT`/`LOCAL_STDERR` to a 32-byte boundary,
329+ // which macOS's dyld disregarded and causing crashes
330+ // (see issues #51794, #51758, #50867, #48866 and #44056).
331+ //
332+ // To workaround the bug, we trick LLVM into not increasing
333+ // the global's alignment by explicitly assigning a section to it
334+ // (equivalent to automatically generating a `#[link_section]` attribute).
335+ // See the comment in the `GlobalValue::canIncreaseAlignment()` function
336+ // of `lib/IR/Globals.cpp` for why this works.
337+ //
338+ // When the alignment is not increased, the optimized `mem::replace`
339+ // will use load-unaligned instructions instead, and thus avoiding the crash.
340+ //
341+ // We could remove this hack whenever we decide to drop macOS 10.10 support.
342+ if cx. tcx . sess . target . target . options . is_like_osx {
343+ let sect_name = if alloc. bytes . iter ( ) . all ( |b| * b == 0 ) {
344+ CStr :: from_bytes_with_nul_unchecked ( b"__DATA,__thread_bss\0 " )
345+ } else {
346+ CStr :: from_bytes_with_nul_unchecked ( b"__DATA,__thread_data\0 " )
347+ } ;
348+ llvm:: LLVMSetSection ( g, sect_name. as_ptr ( ) ) ;
349+ }
312350 }
313351
314352 base:: set_link_section ( cx, g, attrs) ;
0 commit comments