@@ -530,14 +530,24 @@ fn get_instr_profile_output_path(config: &ModuleConfig) -> Option<CString> {
530530 config. instrument_coverage . then ( || c"default_%m_%p.profraw" . to_owned ( ) )
531531}
532532
533+ // PreAD will run llvm opts but disable size increasing opts (vectorization, loop unrolling)
534+ // DuringAD is the same as above, but also runs the enzyme opt and autodiff passes.
535+ // PostAD will run all opts, including size increasing opts.
536+ #[ derive( Debug , Eq , PartialEq ) ]
537+ pub ( crate ) enum AutodiffStage {
538+ PreAD ,
539+ DuringAD ,
540+ PostAD ,
541+ }
542+
533543pub ( crate ) unsafe fn llvm_optimize (
534544 cgcx : & CodegenContext < LlvmCodegenBackend > ,
535545 dcx : DiagCtxtHandle < ' _ > ,
536546 module : & ModuleCodegen < ModuleLlvm > ,
537547 config : & ModuleConfig ,
538548 opt_level : config:: OptLevel ,
539549 opt_stage : llvm:: OptStage ,
540- skip_size_increasing_opts : bool ,
550+ autodiff_stage : AutodiffStage ,
541551) -> Result < ( ) , FatalError > {
542552 // Enzyme:
543553 // The whole point of compiler based AD is to differentiate optimized IR instead of unoptimized
@@ -550,13 +560,16 @@ pub(crate) unsafe fn llvm_optimize(
550560 let unroll_loops;
551561 let vectorize_slp;
552562 let vectorize_loop;
563+ let run_enzyme = cfg ! ( llvm_enzyme) && autodiff_stage == AutodiffStage :: DuringAD ;
553564
554- let run_enzyme = cfg ! ( llvm_enzyme) ;
555565 // When we build rustc with enzyme/autodiff support, we want to postpone size-increasing
556- // optimizations until after differentiation. FIXME(ZuseZ4): Before shipping on nightly,
566+ // optimizations until after differentiation. Our pipeline is thus: (opt + enzyme), (full opt).
567+ // We therefore have two calls to llvm_optimize, if autodiff is used.
568+ //
569+ // FIXME(ZuseZ4): Before shipping on nightly,
557570 // we should make this more granular, or at least check that the user has at least one autodiff
558571 // call in their code, to justify altering the compilation pipeline.
559- if skip_size_increasing_opts && run_enzyme {
572+ if cfg ! ( llvm_enzyme ) && autodiff_stage != AutodiffStage :: PostAD {
560573 unroll_loops = false ;
561574 vectorize_slp = false ;
562575 vectorize_loop = false ;
@@ -566,7 +579,7 @@ pub(crate) unsafe fn llvm_optimize(
566579 vectorize_slp = config. vectorize_slp ;
567580 vectorize_loop = config. vectorize_loop ;
568581 }
569- trace ! ( ?unroll_loops, ?vectorize_slp, ?vectorize_loop) ;
582+ trace ! ( ?unroll_loops, ?vectorize_slp, ?vectorize_loop, ?run_enzyme ) ;
570583 let using_thin_buffers = opt_stage == llvm:: OptStage :: PreLinkThinLTO || config. bitcode_needed ( ) ;
571584 let pgo_gen_path = get_pgo_gen_path ( config) ;
572585 let pgo_use_path = get_pgo_use_path ( config) ;
@@ -686,18 +699,14 @@ pub(crate) unsafe fn optimize(
686699 _ => llvm:: OptStage :: PreLinkNoLTO ,
687700 } ;
688701
689- // If we know that we will later run AD, then we disable vectorization and loop unrolling
690- let skip_size_increasing_opts = cfg ! ( llvm_enzyme) ;
702+ // If we know that we will later run AD, then we disable vectorization and loop unrolling.
703+ // Otherwise we pretend AD is already done and run the normal opt pipeline (=PostAD).
704+ // FIXME(ZuseZ4): Make this more granular, only set PreAD if we actually have autodiff
705+ // usages, not just if we build rustc with autodiff support.
706+ let autodiff_stage =
707+ if cfg ! ( llvm_enzyme) { AutodiffStage :: PreAD } else { AutodiffStage :: PostAD } ;
691708 return unsafe {
692- llvm_optimize (
693- cgcx,
694- dcx,
695- module,
696- config,
697- opt_level,
698- opt_stage,
699- skip_size_increasing_opts,
700- )
709+ llvm_optimize ( cgcx, dcx, module, config, opt_level, opt_stage, autodiff_stage)
701710 } ;
702711 }
703712 Ok ( ( ) )
0 commit comments