@@ -23,6 +23,12 @@ import LibProc
2323
2424import TestsUtils
2525
26+ /// Sorry.
27+ private func ?? < T> ( _ x: T ? , _ y: @autoclosure ( ) async -> T ) async -> T {
28+ if let x { return x }
29+ return await y ( )
30+ }
31+
2632struct MeasurementMetadata {
2733 // Note: maxRSS and pages subtract the RSS measured
2834 // after the benchmark driver setup has finished.
@@ -198,10 +204,16 @@ struct TestConfig {
198204 action = c. action ?? . run
199205 allowNondeterministicHashing = c. allowNondeterministicHashing ?? false
200206 jsonOutput = c. jsonOutput ?? false
207+
208+ var skipTags : Set < BenchmarkCategory >
209+ skipTags = c. tags ?? [ . unstable, . skip]
210+ #if DEBUG
211+ skipTags. insert ( . long)
212+ #endif
201213 tests = TestConfig . filterTests ( registeredBenchmarks,
202214 tests: c. tests ?? [ ] ,
203215 tags: c. tags ?? [ ] ,
204- skipTags: c . skipTags ?? [ . unstable , . skip ] )
216+ skipTags: skipTags)
205217
206218 if tests. count > 0 {
207219 testNameLength = tests. map { $0. info. name. count} . sorted ( ) . reversed ( ) . first!
@@ -481,13 +493,13 @@ final class TestRunner {
481493 }
482494
483495 /// Measure the `fn` and return the average sample time per iteration (μs).
484- func measure( _ name: String , fn: ( Int ) -> Void , numIters: Int ) -> Double {
496+ func measure( _ name: String , fn: ( Int ) async -> Void , numIters: Int ) async -> Double {
485497#if SWIFT_RUNTIME_ENABLE_LEAK_CHECKER
486498 name. withCString { p in startTrackingObjects ( p) }
487499#endif
488500
489501 startMeasurement ( )
490- fn ( numIters)
502+ await fn ( numIters)
491503 stopMeasurement ( )
492504
493505#if SWIFT_RUNTIME_ENABLE_LEAK_CHECKER
@@ -502,7 +514,7 @@ final class TestRunner {
502514 }
503515
504516 /// Run the benchmark and return the measured results.
505- func run( _ test: BenchmarkInfo ) -> BenchResults ? {
517+ func run( _ test: BenchmarkInfo ) async -> BenchResults ? {
506518 // Before we do anything, check that we actually have a function to
507519 // run. If we don't it is because the benchmark is not supported on
508520 // the platform and we should skip it.
@@ -528,8 +540,8 @@ final class TestRunner {
528540 }
529541
530542 // Determine number of iterations for testFn to run for desired time.
531- func iterationsPerSampleTime( ) -> ( numIters: Int , oneIter: Double ) {
532- let oneIter = measure ( test. name, fn: testFn, numIters: 1 )
543+ func iterationsPerSampleTime( ) async -> ( numIters: Int , oneIter: Double ) {
544+ let oneIter = await measure ( test. name, fn: testFn, numIters: 1 )
533545 if oneIter > 0 {
534546 let timePerSample = c. sampleTime * 1_000_000.0 // microseconds (μs)
535547 return ( max ( Int ( timePerSample / oneIter) , 1 ) , oneIter)
@@ -540,28 +552,28 @@ final class TestRunner {
540552
541553 // Determine the scale of measurements. Re-use the calibration result if
542554 // it is just one measurement.
543- func calibrateMeasurements( ) -> Int {
544- let ( numIters, oneIter) = iterationsPerSampleTime ( )
555+ func calibrateMeasurements( ) async -> Int {
556+ let ( numIters, oneIter) = await iterationsPerSampleTime ( )
545557 if numIters == 1 { addSample ( oneIter) }
546558 else { resetMeasurements ( ) } // for accurate yielding reports
547559 return numIters
548560 }
549561
550562 let numIters = min ( // Cap to prevent overflow on 32-bit systems when scaled
551563 Int . max / 10_000 , // by the inner loop multiplier inside the `testFn`.
552- c. numIters ?? calibrateMeasurements ( ) )
564+ await c. numIters ?? ( await calibrateMeasurements ( ) ) )
553565
554- let numSamples = c. numSamples ??
566+ let numSamples = await c. numSamples ??
555567 // Compute the number of samples to measure for `sample-time`,
556568 // clamped in (`min-samples`, 200) range, if the `num-iters` are fixed.
557- max ( c. minSamples ?? 1 , min ( 200 , c. numIters == nil ? 1 :
558- calibrateMeasurements ( ) ) )
569+ ( max ( await c. minSamples ?? 1 , min ( 200 , c. numIters == nil ? 1 :
570+ await calibrateMeasurements ( ) ) ) )
559571
560572 samples. reserveCapacity ( numSamples)
561573 logVerbose ( " Collecting \( numSamples) samples. " )
562574 logVerbose ( " Measuring with scale \( numIters) . " )
563575 for _ in samples. count..< numSamples {
564- addSample ( measure ( test. name, fn: testFn, numIters: numIters) )
576+ addSample ( await measure ( test. name, fn: testFn, numIters: numIters) )
565577 }
566578
567579 test. tearDownFunction ? ( )
@@ -681,16 +693,16 @@ final class TestRunner {
681693 }
682694
683695 /// Run each benchmark and emit the results in JSON
684- func runBenchmarks( ) {
696+ func runBenchmarks( ) async {
685697 var testCount = 0
686698 if !c. jsonOutput {
687699 printTextHeading ( )
688700 }
689701 for (index, info) in c. tests {
690702 if c. jsonOutput {
691- printJSON ( index: index, info: info, results: run ( info) )
703+ printJSON ( index: index, info: info, results: await run ( info) )
692704 } else {
693- printText ( index: index, info: info, results: run ( info) )
705+ printText ( index: index, info: info, results: await run ( info) )
694706 }
695707 testCount += 1
696708 }
@@ -712,7 +724,7 @@ extension Hasher {
712724 }
713725}
714726
715- public func main( ) {
727+ public func main( ) async {
716728 let config = TestConfig ( registeredBenchmarks)
717729 switch ( config. action) {
718730 case . listTests:
@@ -742,7 +754,7 @@ public func main() {
742754 the option '--allow-nondeterministic-hashing to the benchmarking executable.
743755 """ )
744756 }
745- TestRunner ( config) . runBenchmarks ( )
757+ await TestRunner ( config) . runBenchmarks ( )
746758 if let x = config. afterRunSleep {
747759 sleep ( x)
748760 }
0 commit comments