Skip to content

Commit 7660be3

Browse files
committed
fix: setup and teardown functions when errors
1 parent c0dc397 commit 7660be3

8 files changed

+178
-8
lines changed

CHANGELOG.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
- Add two-way synchronization policy between `AGENTS.md` and `copilot-instructions.md` with automatic validation in task templates and PR checklist
88
- Add tasks storage policy clarifying `.tasks/` (versioned) vs `.task/` (private scratch, git-ignored)
99
- Include `set_test_title` helper in the single-file library
10+
- Fix lifecycle hooks capture-and-report flow errors
1011

1112
## [0.24.0](https://github.com/TypedDevs/bashunit/compare/0.23.0...0.24.0) - 2025-09-14
1213

docs/test-files.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,8 @@ function tear_down() {
8585
The `set_up_before_script` auxiliary function is called, if it is present in the test file, only once before all tests functions in the test file begin.
8686
This is useful for global setup that applies to all test functions in the script, such as loading shared resources.
8787
88+
If any command inside `set_up_before_script` fails, bashunit now halts the file immediately and reports the error (including the failing command and location) before any test functions run. This ensures misconfigured environments or missing dependencies surface clearly during setup.
89+
8890
::: code-group
8991
```bash [Example]
9092
function set_up_before_script() {
@@ -99,6 +101,8 @@ The `tear_down_after_script` auxiliary function is called, if it is present in t
99101
This auxiliary function is similar to how `set_up_before_script` works but at the end of the tests.
100102
It provides a hook for any cleanup that should occur after all tests have run, such as deleting temporary files or releasing resources.
101103
104+
Failures inside `tear_down_after_script` are also surfaced as dedicated errors after the final test output so cleanup problems (for example, missing tools or permissions) are visible in the run summary.
105+
102106
::: code-group
103107
```bash [Example]
104108
function tear_down_after_script() {

src/helpers.sh

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -137,9 +137,14 @@ function helper::get_functions_to_run() {
137137
# @param $1 string Eg: "do_something"
138138
#
139139
function helper::execute_function_if_exists() {
140-
if [[ "$(type -t "$1")" == "function" ]]; then
141-
"$1" 2>/dev/null
140+
local fn_name="$1"
141+
142+
if [[ "$(type -t "$fn_name")" == "function" ]]; then
143+
"$fn_name"
144+
return $?
142145
fi
146+
147+
return 0
143148
}
144149

145150
#

src/runner.sh

Lines changed: 76 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -17,13 +17,19 @@ function runner::load_test_files() {
1717
internal_log "Loading file" "$test_file"
1818
# shellcheck source=/dev/null
1919
source "$test_file"
20-
runner::run_set_up_before_script
20+
if ! runner::run_set_up_before_script "$test_file"; then
21+
runner::clean_set_up_and_tear_down_after_script
22+
if ! parallel::is_enabled; then
23+
cleanup_script_temp_files
24+
fi
25+
continue
26+
fi
2127
if parallel::is_enabled; then
2228
runner::call_test_functions "$test_file" "$filter" 2>/dev/null &
2329
else
2430
runner::call_test_functions "$test_file" "$filter"
2531
fi
26-
runner::run_tear_down_after_script
32+
runner::run_tear_down_after_script "$test_file"
2733
runner::clean_set_up_and_tear_down_after_script
2834
if ! parallel::is_enabled; then
2935
cleanup_script_temp_files
@@ -57,9 +63,13 @@ function runner::load_bench_files() {
5763
export BASHUNIT_CURRENT_SCRIPT_ID="$(helper::generate_id "${test_file}")"
5864
# shellcheck source=/dev/null
5965
source "$bench_file"
60-
runner::run_set_up_before_script
66+
if ! runner::run_set_up_before_script "$bench_file"; then
67+
runner::clean_set_up_and_tear_down_after_script
68+
cleanup_script_temp_files
69+
continue
70+
fi
6171
runner::call_bench_functions "$bench_file" "$filter"
62-
runner::run_tear_down_after_script
72+
runner::run_tear_down_after_script "$bench_file"
6373
runner::clean_set_up_and_tear_down_after_script
6474
cleanup_script_temp_files
6575
done
@@ -555,14 +565,73 @@ function runner::write_failure_result_output() {
555565
echo -e "$test_nr) $test_file:$line_number\n$error_msg" >> "$FAILURES_OUTPUT_PATH"
556566
}
557567

568+
function runner::record_file_hook_failure() {
569+
local hook_name="$1"
570+
local test_file="$2"
571+
local hook_output="$3"
572+
local status="$4"
573+
local render_header="${5:-false}"
574+
575+
if [[ "$render_header" == true ]]; then
576+
runner::render_running_file_header "$test_file"
577+
fi
578+
579+
if [[ -z "$hook_output" ]]; then
580+
hook_output="Hook '$hook_name' failed with exit code $status"
581+
fi
582+
583+
state::add_tests_failed
584+
console_results::print_error_test "$hook_name" "$hook_output"
585+
reports::add_test_failed "$test_file" "$(helper::normalize_test_function_name "$hook_name")" 0 0
586+
runner::write_failure_result_output "$test_file" "$hook_name" "$hook_output"
587+
588+
return "$status"
589+
}
590+
591+
function runner::execute_file_hook() {
592+
local hook_name="$1"
593+
local test_file="$2"
594+
local render_header="${3:-false}"
595+
596+
if [[ "$(type -t "$hook_name")" != "function" ]]; then
597+
return 0
598+
fi
599+
600+
local hook_output=""
601+
local status=0
602+
local hook_output_file
603+
hook_output_file=$(temp_file "${hook_name}_output")
604+
605+
{
606+
"$hook_name"
607+
} >"$hook_output_file" 2>&1 || status=$?
608+
609+
if [[ -f "$hook_output_file" ]]; then
610+
hook_output=$(cat "$hook_output_file")
611+
rm -f "$hook_output_file"
612+
fi
613+
614+
if [[ $status -ne 0 ]]; then
615+
runner::record_file_hook_failure "$hook_name" "$test_file" "$hook_output" "$status" "$render_header"
616+
return $status
617+
fi
618+
619+
if [[ -n "$hook_output" ]]; then
620+
printf "%s\n" "$hook_output"
621+
fi
622+
623+
return 0
624+
}
625+
558626
function runner::run_set_up() {
559627
internal_log "run_set_up"
560628
helper::execute_function_if_exists 'set_up'
561629
}
562630

563631
function runner::run_set_up_before_script() {
632+
local test_file="$1"
564633
internal_log "run_set_up_before_script"
565-
helper::execute_function_if_exists 'set_up_before_script'
634+
runner::execute_file_hook 'set_up_before_script' "$test_file" true
566635
}
567636

568637
function runner::run_tear_down() {
@@ -577,8 +646,9 @@ function runner::clear_mocks() {
577646
}
578647

579648
function runner::run_tear_down_after_script() {
649+
local test_file="$1"
580650
internal_log "run_tear_down_after_script"
581-
helper::execute_function_if_exists 'tear_down_after_script'
651+
runner::execute_file_hook 'tear_down_after_script' "$test_file"
582652
}
583653

584654
function runner::clean_set_up_and_tear_down_after_script() {
Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
#!/usr/bin/env bash
2+
set -euo pipefail
3+
4+
function set_up_before_script() {
5+
TEST_ENV_FILE="tests/acceptance/fixtures/.env.default"
6+
}
7+
8+
function strip_ansi() {
9+
sed -E 's/\x1B\[[0-9;]*[A-Za-z]//g'
10+
}
11+
12+
function test_bashunit_when_set_up_before_script_errors() {
13+
local test_file=./tests/acceptance/fixtures/test_bashunit_when_setup_before_script_errors.sh
14+
local fixture=$test_file
15+
16+
local header_line="Running $fixture"
17+
local error_line="✗ Error: Set up before script"
18+
local message_line=" $fixture: line 4: invalid_function_name: command not found"
19+
local tests_summary="Tests: 1 failed, 1 total"
20+
local assertions_summary="Assertions: 0 failed, 0 total"
21+
22+
local actual_raw
23+
set +e
24+
actual_raw="$(./bashunit --no-parallel --detailed --env "$TEST_ENV_FILE" "$test_file")"
25+
set -e
26+
27+
local actual
28+
actual="$(printf "%s" "$actual_raw" | strip_ansi)"
29+
30+
assert_contains "$header_line" "$actual"
31+
assert_contains "$error_line" "$actual"
32+
assert_contains "$message_line" "$actual"
33+
assert_contains "$tests_summary" "$actual"
34+
assert_contains "$assertions_summary" "$actual"
35+
assert_general_error "$(./bashunit --no-parallel --env "$TEST_ENV_FILE" "$test_file")"
36+
}
Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
#!/usr/bin/env bash
2+
set -euo pipefail
3+
4+
function set_up_before_script() {
5+
TEST_ENV_FILE="tests/acceptance/fixtures/.env.default"
6+
}
7+
8+
function strip_ansi() {
9+
sed -E 's/\x1B\[[0-9;]*[A-Za-z]//g'
10+
}
11+
12+
function test_bashunit_when_tear_down_after_script_errors() {
13+
local test_file=./tests/acceptance/fixtures/test_bashunit_when_teardown_after_script_errors.sh
14+
local fixture=$test_file
15+
16+
local header_line="Running $fixture"
17+
local error_line="✗ Error: Tear down after script"
18+
local message_line=" $fixture: line 4: missing_cleanup_command: command not found"
19+
local tests_summary="Tests: 1 passed, 1 failed, 2 total"
20+
local assertions_summary="Assertions: 0 passed, 0 failed, 0 total"
21+
22+
local actual_raw
23+
set +e
24+
actual_raw="$(./bashunit --no-parallel --detailed --env "$TEST_ENV_FILE" "$test_file")"
25+
set -e
26+
27+
local actual
28+
actual="$(printf "%s" "$actual_raw" | strip_ansi)"
29+
30+
assert_contains "$header_line" "$actual"
31+
assert_contains "$error_line" "$actual"
32+
assert_contains "$message_line" "$actual"
33+
assert_contains "$tests_summary" "$actual"
34+
assert_contains "$assertions_summary" "$actual"
35+
assert_general_error "$(./bashunit --no-parallel --env "$TEST_ENV_FILE" "$test_file")"
36+
}
Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
#!/usr/bin/env bash
2+
3+
function set_up_before_script() {
4+
invalid_function_name arg1
5+
}
6+
7+
function test_dummy() {
8+
:
9+
}
Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
#!/usr/bin/env bash
2+
3+
function tear_down_after_script() {
4+
missing_cleanup_command
5+
}
6+
7+
function test_sample() {
8+
:
9+
}

0 commit comments

Comments
 (0)