From f5f2934c540066c37d43c52c077ae5580c0ebe76 Mon Sep 17 00:00:00 2001 From: Michael Ryan Peter Date: Mon, 20 Oct 2025 11:18:23 -0400 Subject: [PATCH 01/10] Vale CQA scripts (AI generated - Gemini 2.5 Pro) --- scripts/vale-check.sh | 287 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 287 insertions(+) create mode 100755 scripts/vale-check.sh diff --git a/scripts/vale-check.sh b/scripts/vale-check.sh new file mode 100755 index 000000000000..8b4a7266b6fe --- /dev/null +++ b/scripts/vale-check.sh @@ -0,0 +1,287 @@ +#!/bin/bash +# shellcheck shell=bash + +# A unified script to run Vale with different scopes and configurations. +# +# SCOPES: +# pr: Checks all .adoc files changed on the current branch. +# dir: Finds and checks all assemblies in a directory (grouped output). +# assembly: Checks a single assembly file AND its included modules. +# +# FLAGS: +# --cqa: Uses a temporary config with AsciiDocDITA rules enabled. +# --repo: (Default) Uses the standard .vale.ini in the repository. +# --help, -h: Shows this help message. +# +# REQUIREMENTS: +# - bash 3.2+ +# - git +# - vale + +# --- Version Check --- +# We check for Bash 3+ and rely on the read loop's +# compatibility back to Bash 3.2 (e.g., default macOS). +if [ "${BASH_VERSINFO[0]}" -lt 3 ]; then + echo "Error: This script requires bash 3.2 or higher." >&2 + echo "Current version: $BASH_VERSION" >&2 + exit 1 +fi + +# --- Global Configuration --- +set -u # Treat unset variables as errors +set +e # Don't exit script if vale finds errors + +# Use an array for Vale CLI arguments to avoid quoting/word-splitting pitfalls +VALE_ARGS=() +ORIGINAL_VALE_INI=".vale.ini" + +# --- Help Function --- +show_help() { + echo "Usage: $0 [path] [--cqa|--repo]" + echo "" + echo "A unified script to run Vale with different scopes and configurations." + echo "" + echo "SCOPES:" + echo " pr Checks all files changed on the current branch (no path needed)." + echo " dir Finds and checks all assemblies in a directory (grouped output)." + echo " assembly Checks a single assembly file AND its included modules." + echo "" + echo "FLAGS:" + echo " --cqa Uses a temporary config with AsciiDocDITA rules enabled." + echo " --repo (Default) Uses the standard .vale.ini in the repository." + echo " --help, -h Shows this help message." + echo "" + echo "EXAMPLES:" + echo " $0 pr" + echo " $0 pr --cqa" + echo " $0 dir extensions/" + echo " $0 assembly extensions/my-assembly.adoc --cqa" +} + +# --- Config Function --- +# Sets VALE_ARGS based on the --cqa flag. +setup_vale_config() { + local use_cqa=$1 + if [ "$use_cqa" -eq 1 ]; then + if [ ! -f "$ORIGINAL_VALE_INI" ]; then + echo "Error: $ORIGINAL_VALE_INI not found in current directory." >&2 + exit 1 + fi + + local TEMP_VALE_INI + # Create the temp file in the CURRENT directory (./) so that + # relative paths in the config (like StylesPath) resolve correctly. + TEMP_VALE_INI=$(mktemp ./.vale.ini.temp.XXXXXX) + + if [ -z "$TEMP_VALE_INI" ]; then + echo "Error: Could not create temporary config file." >&2 + exit 1 + fi + + sed 's/^\(BasedOnStyles = .*\)$/\1, AsciiDocDITA/' "$ORIGINAL_VALE_INI" > "$TEMP_VALE_INI" + + # Capture the actual path now, so cleanup works even after local scope ends. + trap "rm -f '$TEMP_VALE_INI'" EXIT INT TERM + + VALE_ARGS=(--config "$TEMP_VALE_INI") + echo "Using temporary CQA config ($TEMP_VALE_INI)..." + else + VALE_ARGS=() + echo "Using default .vale.ini config..." + fi +} + +# --- Scope Function: pr --- +run_vale_pr() { + echo "Checking all changed files on this branch..." + + if ! git rev-parse --git-dir >/dev/null 2>&1; then + echo "Error: Not in a git repository." >&2 + exit 1 + fi + + local UPSTREAM_BRANCH + UPSTREAM_BRANCH=$(git rev-parse --abbrev-ref --symbolic-full-name '@{upstream}' 2>/dev/null) + + if [ -z "$UPSTREAM_BRANCH" ]; then + echo "Error: No upstream branch configured for the current branch." >&2 + echo "Please set one with: git branch --set-upstream-to=/" >&2 + exit 1 + fi + echo "Finding common ancestor with '$UPSTREAM_BRANCH'..." + + local MERGE_BASE + MERGE_BASE=$(git merge-base HEAD "$UPSTREAM_BRANCH") + if [ -z "$MERGE_BASE" ]; then + echo "Error: Could not find a common merge base with '$UPSTREAM_BRANCH'." >&2 + exit 1 + fi + + if git diff --quiet --diff-filter=d "$MERGE_BASE" HEAD; then + echo "No changed files found on this branch." + exit 0 + fi + + echo "Running Vale..." + + local output + output=$( ( + set -o pipefail + git diff -z --name-only --diff-filter=d "$MERGE_BASE" HEAD \ + | xargs -0 vale --output CLI --minAlertLevel=suggestion --no-exit "${VALE_ARGS[@]}" + ) ) + + if [ -z "$output" ]; then + echo "✅ No issues found in changed files." + else + echo "$output" + fi +} + +# --- Scope Function: assembly --- +run_vale_assembly() { + local assembly_file=$1 + if [ ! -f "$assembly_file" ]; then + echo "Error: Assembly file not found: $assembly_file" >&2 + exit 1 + fi + + echo "--- Linting Assembly: $assembly_file ---" + local assembly_output + # Use --output CLI for cleaner, grouped results + assembly_output=$(vale --output CLI "${VALE_ARGS[@]}" "$assembly_file") + + if [ -z "$assembly_output" ]; then + echo "✅ No issues found." + else + echo "$assembly_output" + fi + echo "" + + echo "--- Linting Modules from $assembly_file ---" + local MODULES + MODULES=$(awk -F'::|\\[|]' '/^include::/ { print $2 }' "$assembly_file") + + if [ -z "$MODULES" ]; then + echo "No modules found to lint." + return + fi + + local FILTERED_MODULES + FILTERED_MODULES=$(printf '%s\n' "$MODULES" | grep -v -E '(^|/)_' || true) + + if [ -z "$FILTERED_MODULES" ]; then + echo "All modules are partials (skipped)." + return + fi + + local modules_output + # Use --output CLI for cleaner, grouped results + modules_output=$(printf '%s\n' "$FILTERED_MODULES" | xargs vale --output CLI "${VALE_ARGS[@]}") + + if [ -z "$modules_output" ]; then + echo "✅ No issues found in modules." + else + echo "$modules_output" + fi +} + +# --- Scope Function: dir (replaces 'assemblies') --- +run_vale_dir() { + local scan_dir=$1 + if [ ! -d "$scan_dir" ]; then + echo "Error: Directory not found: $scan_dir" >&2 + exit 1 + fi + + echo "Scanning $scan_dir for assemblies..." + + local -a ASSEMBLIES + # Use the universally compatible while-read loop + while IFS= read -r line; do + ASSEMBLIES+=("$line") + done < <(find "$scan_dir" \ + \( -type d -name "_*" -prune \) \ + -o \ + \( -type f -name "*.adoc" -exec grep -lq "^include::.*modules/" {} \; -print \)) + + if [ ${#ASSEMBLIES[@]} -eq 0 ]; then + echo "No assembly files (containing 'include::...modules/...') found in $scan_dir." + exit 0 + fi + + echo "Found assemblies. Running grouped Vale scan..." + for assembly in "${ASSEMBLIES[@]}"; do + echo "=====================================================================" + echo "Checking Assembly and its Modules: $assembly" + echo "=====================================================================" + run_vale_assembly "$assembly" + echo "" + done +} + +# --- Main Script Logic --- + +if ! command -v vale >/dev/null 2>&1; then + echo "Error: vale is not installed or not in PATH." >&2 + echo "Please install vale: https://vale.sh/docs/vale-cli/installation/" >&2 + exit 1 +fi + +USE_CQA=0 +SCOPE="" +ARGS=() + +while [[ "$#" -gt 0 ]]; do + case "$1" in + --cqa) USE_CQA=1; shift ;; + --repo) USE_CQA=0; shift ;; + --help|-h) show_help; exit 0 ;; + -*) echo "Error: Unknown flag $1"; show_help; exit 1 ;; + *) + if [ -z "$SCOPE" ]; then + SCOPE="$1" + else + ARGS+=("$1") + fi + shift + ;; + esac +done + +if [ -z "$SCOPE" ]; then + echo "Error: No scope (pr, dir, assembly) provided." >&2 + show_help + exit 1 +fi + +setup_vale_config "$USE_CQA" + +case "$SCOPE" in + pr) + if [ ${#ARGS[@]} -ne 0 ]; then + echo "Error: 'pr' scope takes no path arguments." >&2 + show_help; exit 1 + fi + run_vale_pr + ;; + dir) + if [ ${#ARGS[@]} -ne 1 ]; then + echo "Error: 'dir' scope requires one directory path." >&2 + show_help; exit 1 + fi + run_vale_dir "${ARGS[0]}" + ;; + assembly) + if [ ${#ARGS[@]} -ne 1 ]; then + echo "Error: 'assembly' scope requires one file path." >&2 + show_help; exit 1 + fi + run_vale_assembly "${ARGS[0]}" + ;; + *) + echo "Error: Unknown scope '$SCOPE'." >&2 + show_help + exit 1 + ;; +esac From 620dcd24dbd75ee30f1881b2958957e7e772b5ea Mon Sep 17 00:00:00 2001 From: Michael Ryan Peter Date: Tue, 28 Oct 2025 12:18:23 -0400 Subject: [PATCH 02/10] Add flag to run a specific rule --- scripts/vale-check.sh | 107 +++++++++++++++++++++++------------------- 1 file changed, 60 insertions(+), 47 deletions(-) diff --git a/scripts/vale-check.sh b/scripts/vale-check.sh index 8b4a7266b6fe..ce31eaaa239d 100755 --- a/scripts/vale-check.sh +++ b/scripts/vale-check.sh @@ -9,9 +9,10 @@ # assembly: Checks a single assembly file AND its included modules. # # FLAGS: -# --cqa: Uses a temporary config with AsciiDocDITA rules enabled. -# --repo: (Default) Uses the standard .vale.ini in the repository. -# --help, -h: Shows this help message. +# --cqa: Uses a temporary config with all AsciiDocDITA rules enabled. +# --rule : Checks for only one specific rule (e.g., AsciiDocDITA.RelatedLinks). +# --repo: (Default) Uses the standard .vale.ini in the repository. +# --help, -h: Shows this help message. # # REQUIREMENTS: # - bash 3.2+ @@ -19,8 +20,6 @@ # - vale # --- Version Check --- -# We check for Bash 3+ and rely on the read loop's -# compatibility back to Bash 3.2 (e.g., default macOS). if [ "${BASH_VERSINFO[0]}" -lt 3 ]; then echo "Error: This script requires bash 3.2 or higher." >&2 echo "Current version: $BASH_VERSION" >&2 @@ -31,13 +30,12 @@ fi set -u # Treat unset variables as errors set +e # Don't exit script if vale finds errors -# Use an array for Vale CLI arguments to avoid quoting/word-splitting pitfalls VALE_ARGS=() ORIGINAL_VALE_INI=".vale.ini" # --- Help Function --- show_help() { - echo "Usage: $0 [path] [--cqa|--repo]" + echo "Usage: $0 [path] [--cqa|--repo] [--rule ]" echo "" echo "A unified script to run Vale with different scopes and configurations." echo "" @@ -47,51 +45,55 @@ show_help() { echo " assembly Checks a single assembly file AND its included modules." echo "" echo "FLAGS:" - echo " --cqa Uses a temporary config with AsciiDocDITA rules enabled." - echo " --repo (Default) Uses the standard .vale.ini in the repository." - echo " --help, -h Shows this help message." + echo " --cqa Uses a temporary config with AsciiDocDITA rules enabled." + echo " --repo (Default) Uses the standard .vale.ini in the repository." + echo " --rule Checks for only one specific rule (e.g., AsciiDocDITA.RelatedLinks)." + echo " --help, -h Shows this help message." echo "" echo "EXAMPLES:" echo " $0 pr" echo " $0 pr --cqa" echo " $0 dir extensions/" echo " $0 assembly extensions/my-assembly.adoc --cqa" + echo " $0 dir extensions/ --rule AsciiDocDITA.RelatedLinks" } -# --- Config Function --- -# Sets VALE_ARGS based on the --cqa flag. -setup_vale_config() { +# --- Setup Vale config and rule filtering --- +setup_vale_config_and_rule() { local use_cqa=$1 - if [ "$use_cqa" -eq 1 ]; then - if [ ! -f "$ORIGINAL_VALE_INI" ]; then - echo "Error: $ORIGINAL_VALE_INI not found in current directory." >&2 - exit 1 - fi + local single_rule=$2 + local base_ini="$ORIGINAL_VALE_INI" + local style_to_add="" - local TEMP_VALE_INI - # Create the temp file in the CURRENT directory (./) so that - # relative paths in the config (like StylesPath) resolve correctly. - TEMP_VALE_INI=$(mktemp ./.vale.ini.temp.XXXXXX) - - if [ -z "$TEMP_VALE_INI" ]; then - echo "Error: Could not create temporary config file." >&2 - exit 1 - fi + if [ -n "$single_rule" ]; then + style_to_add="${single_rule%%.*}" # Extract style from rule + fi - sed 's/^\(BasedOnStyles = .*\)$/\1, AsciiDocDITA/' "$ORIGINAL_VALE_INI" > "$TEMP_VALE_INI" - - # Capture the actual path now, so cleanup works even after local scope ends. + if [ "$use_cqa" -eq 1 ]; then + TEMP_VALE_INI=$(mktemp ./.vale.ini.temp.XXXXXX) + sed 's/^\(BasedOnStyles = .*\)$/\1, AsciiDocDITA/' "$base_ini" > "$TEMP_VALE_INI" trap "rm -f '$TEMP_VALE_INI'" EXIT INT TERM - VALE_ARGS=(--config "$TEMP_VALE_INI") - echo "Using temporary CQA config ($TEMP_VALE_INI)..." + + elif [ -n "$style_to_add" ]; then + if ! grep -q "BasedOnStyles.*$style_to_add" "$base_ini"; then + TEMP_VALE_INI=$(mktemp ./.vale.ini.temp.XXXXXX) + sed "s/^\(BasedOnStyles = .*\)$/\1, $style_to_add/" "$base_ini" > "$TEMP_VALE_INI" + trap "rm -f '$TEMP_VALE_INI'" EXIT INT TERM + VALE_ARGS=(--config "$TEMP_VALE_INI") + else + VALE_ARGS=() + fi else VALE_ARGS=() - echo "Using default .vale.ini config..." + fi + + if [ -n "$single_rule" ]; then + VALE_ARGS+=(--filter=".Name=='$single_rule'") fi } -# --- Scope Function: pr --- +# --- Scope: pr --- run_vale_pr() { echo "Checking all changed files on this branch..." @@ -123,14 +125,14 @@ run_vale_pr() { fi echo "Running Vale..." - + local output output=$( ( set -o pipefail git diff -z --name-only --diff-filter=d "$MERGE_BASE" HEAD \ | xargs -0 vale --output CLI --minAlertLevel=suggestion --no-exit "${VALE_ARGS[@]}" ) ) - + if [ -z "$output" ]; then echo "✅ No issues found in changed files." else @@ -138,7 +140,7 @@ run_vale_pr() { fi } -# --- Scope Function: assembly --- +# --- Scope: assembly --- run_vale_assembly() { local assembly_file=$1 if [ ! -f "$assembly_file" ]; then @@ -148,9 +150,8 @@ run_vale_assembly() { echo "--- Linting Assembly: $assembly_file ---" local assembly_output - # Use --output CLI for cleaner, grouped results assembly_output=$(vale --output CLI "${VALE_ARGS[@]}" "$assembly_file") - + if [ -z "$assembly_output" ]; then echo "✅ No issues found." else @@ -169,16 +170,15 @@ run_vale_assembly() { local FILTERED_MODULES FILTERED_MODULES=$(printf '%s\n' "$MODULES" | grep -v -E '(^|/)_' || true) - + if [ -z "$FILTERED_MODULES" ]; then echo "All modules are partials (skipped)." return fi local modules_output - # Use --output CLI for cleaner, grouped results modules_output=$(printf '%s\n' "$FILTERED_MODULES" | xargs vale --output CLI "${VALE_ARGS[@]}") - + if [ -z "$modules_output" ]; then echo "✅ No issues found in modules." else @@ -186,7 +186,7 @@ run_vale_assembly() { fi } -# --- Scope Function: dir (replaces 'assemblies') --- +# --- Scope: dir --- run_vale_dir() { local scan_dir=$1 if [ ! -d "$scan_dir" ]; then @@ -197,7 +197,6 @@ run_vale_dir() { echo "Scanning $scan_dir for assemblies..." local -a ASSEMBLIES - # Use the universally compatible while-read loop while IFS= read -r line; do ASSEMBLIES+=("$line") done < <(find "$scan_dir" \ @@ -220,8 +219,7 @@ run_vale_dir() { done } -# --- Main Script Logic --- - +# --- Main --- if ! command -v vale >/dev/null 2>&1; then echo "Error: vale is not installed or not in PATH." >&2 echo "Please install vale: https://vale.sh/docs/vale-cli/installation/" >&2 @@ -229,6 +227,7 @@ if ! command -v vale >/dev/null 2>&1; then fi USE_CQA=0 +SINGLE_RULE="" SCOPE="" ARGS=() @@ -236,6 +235,20 @@ while [[ "$#" -gt 0 ]]; do case "$1" in --cqa) USE_CQA=1; shift ;; --repo) USE_CQA=0; shift ;; + --rule=*) + SINGLE_RULE="${1#*=}" + shift + ;; + --rule) + if [[ -n "$2" && "${2:0:1}" != "-" ]]; then + SINGLE_RULE="$2" + shift 2 + else + echo "Error: --rule requires a argument." >&2 + show_help + exit 1 + fi + ;; --help|-h) show_help; exit 0 ;; -*) echo "Error: Unknown flag $1"; show_help; exit 1 ;; *) @@ -255,7 +268,7 @@ if [ -z "$SCOPE" ]; then exit 1 fi -setup_vale_config "$USE_CQA" +setup_vale_config_and_rule "$USE_CQA" "$SINGLE_RULE" case "$SCOPE" in pr) From 4a2a142fe2a214611cc0af544d9c7085cbf178ff Mon Sep 17 00:00:00 2001 From: Michael Ryan Peter Date: Tue, 28 Oct 2025 13:02:46 -0400 Subject: [PATCH 03/10] Change paths from relative to absolute and improve error handling - Change to absolute file paths - If modules are included but don't exist, throw an error and keep - processing (fail forward) --- scripts/vale-check.sh | 149 ++++++++++++++++++++++++------------------ 1 file changed, 84 insertions(+), 65 deletions(-) diff --git a/scripts/vale-check.sh b/scripts/vale-check.sh index ce31eaaa239d..4289c06995d0 100755 --- a/scripts/vale-check.sh +++ b/scripts/vale-check.sh @@ -19,46 +19,39 @@ # - git # - vale -# --- Version Check --- if [ "${BASH_VERSINFO[0]}" -lt 3 ]; then echo "Error: This script requires bash 3.2 or higher." >&2 echo "Current version: $BASH_VERSION" >&2 exit 1 fi -# --- Global Configuration --- -set -u # Treat unset variables as errors -set +e # Don't exit script if vale finds errors +set -u +set +e VALE_ARGS=() ORIGINAL_VALE_INI=".vale.ini" -# --- Help Function --- show_help() { echo "Usage: $0 [path] [--cqa|--repo] [--rule ]" echo "" - echo "A unified script to run Vale with different scopes and configurations." - echo "" echo "SCOPES:" - echo " pr Checks all files changed on the current branch (no path needed)." - echo " dir Finds and checks all assemblies in a directory (grouped output)." - echo " assembly Checks a single assembly file AND its included modules." + echo " pr Checks all changed .adoc files on current branch" + echo " dir Finds and checks assemblies in directory (grouped output)" + echo " assembly Checks one assembly file and its modules" echo "" echo "FLAGS:" - echo " --cqa Uses a temporary config with AsciiDocDITA rules enabled." - echo " --repo (Default) Uses the standard .vale.ini in the repository." - echo " --rule Checks for only one specific rule (e.g., AsciiDocDITA.RelatedLinks)." - echo " --help, -h Shows this help message." + echo " --cqa Use temporary config with AsciiDocDITA style enabled" + echo " --repo Use repository .vale.ini config (default)" + echo " --rule Run only one specific Vale rule" + echo " --help, -h Show this help message" echo "" echo "EXAMPLES:" echo " $0 pr" echo " $0 pr --cqa" echo " $0 dir extensions/" - echo " $0 assembly extensions/my-assembly.adoc --cqa" - echo " $0 dir extensions/ --rule AsciiDocDITA.RelatedLinks" + echo " $0 assembly path/to/file.adoc --rule AsciiDocDITA.RelatedLinks" } -# --- Setup Vale config and rule filtering --- setup_vale_config_and_rule() { local use_cqa=$1 local single_rule=$2 @@ -66,7 +59,7 @@ setup_vale_config_and_rule() { local style_to_add="" if [ -n "$single_rule" ]; then - style_to_add="${single_rule%%.*}" # Extract style from rule + style_to_add="${single_rule%%.*}" fi if [ "$use_cqa" -eq 1 ]; then @@ -93,54 +86,50 @@ setup_vale_config_and_rule() { fi } -# --- Scope: pr --- run_vale_pr() { - echo "Checking all changed files on this branch..." + echo "Checking changed files on this branch..." if ! git rev-parse --git-dir >/dev/null 2>&1; then - echo "Error: Not in a git repository." >&2 + echo "Error: Not a git repo." >&2 exit 1 fi - local UPSTREAM_BRANCH - UPSTREAM_BRANCH=$(git rev-parse --abbrev-ref --symbolic-full-name '@{upstream}' 2>/dev/null) + local upstream + upstream=$(git rev-parse --abbrev-ref --symbolic-full-name '@{upstream}' 2>/dev/null) - if [ -z "$UPSTREAM_BRANCH" ]; then - echo "Error: No upstream branch configured for the current branch." >&2 - echo "Please set one with: git branch --set-upstream-to=/" >&2 + if [ -z "$upstream" ]; then + echo "Error: No upstream set. Please set with git branch --set-upstream-to=/" >&2 exit 1 fi - echo "Finding common ancestor with '$UPSTREAM_BRANCH'..." - local MERGE_BASE - MERGE_BASE=$(git merge-base HEAD "$UPSTREAM_BRANCH") - if [ -z "$MERGE_BASE" ]; then - echo "Error: Could not find a common merge base with '$UPSTREAM_BRANCH'." >&2 + local base + base=$(git merge-base HEAD "$upstream") + if [ -z "$base" ]; then + echo "Error: Could not find merge base with $upstream." >&2 exit 1 fi - if git diff --quiet --diff-filter=d "$MERGE_BASE" HEAD; then - echo "No changed files found on this branch." + if git diff --quiet --diff-filter=d "$base" HEAD; then + echo "No changed files found." exit 0 fi echo "Running Vale..." local output - output=$( ( + output=$( set -o pipefail - git diff -z --name-only --diff-filter=d "$MERGE_BASE" HEAD \ + git diff -z --name-only --diff-filter=d "$base" HEAD \ | xargs -0 vale --output CLI --minAlertLevel=suggestion --no-exit "${VALE_ARGS[@]}" - ) ) + ) if [ -z "$output" ]; then - echo "✅ No issues found in changed files." + echo "✅ No issues found." else echo "$output" fi } -# --- Scope: assembly --- run_vale_assembly() { local assembly_file=$1 if [ ! -f "$assembly_file" ]; then @@ -160,24 +149,52 @@ run_vale_assembly() { echo "" echo "--- Linting Modules from $assembly_file ---" - local MODULES - MODULES=$(awk -F'::|\\[|]' '/^include::/ { print $2 }' "$assembly_file") + local modules + modules=$(awk -F'::|\\[|]' '/^include::/ { print $2 }' "$assembly_file") - if [ -z "$MODULES" ]; then - echo "No modules found to lint." + if [ -z "$modules" ]; then + echo "No modules found." return fi - local FILTERED_MODULES - FILTERED_MODULES=$(printf '%s\n' "$MODULES" | grep -v -E '(^|/)_' || true) + local filtered_modules + filtered_modules=$(printf '%s\n' "$modules" | grep -v -E '(^|/)_') - if [ -z "$FILTERED_MODULES" ]; then + if [ -z "$filtered_modules" ]; then echo "All modules are partials (skipped)." return fi + local repo_root + repo_root=$(git rev-parse --show-toplevel) + + local missing_files=() + local abs_modules=() + while IFS= read -r module_path; do + clean_path="${module_path#./}" + full_path="$repo_root/$clean_path" + if [ ! -f "$full_path" ]; then + missing_files+=("$full_path") + else + abs_modules+=("$full_path") + fi + done <<< "$filtered_modules" + + if [ ${#missing_files[@]} -gt 0 ]; then + echo "Warning: The following included module files do not exist:" >&2 + for missing_file in "${missing_files[@]}"; do + echo " $missing_file" >&2 + done + echo "Continuing linting, ignoring missing includes." >&2 + fi + + if [ ${#abs_modules[@]} -eq 0 ]; then + echo "No existing modules found to lint." + return + fi + local modules_output - modules_output=$(printf '%s\n' "$FILTERED_MODULES" | xargs vale --output CLI "${VALE_ARGS[@]}") + modules_output=$(printf '%s\n' "${abs_modules[@]}" | xargs vale --output CLI "${VALE_ARGS[@]}") if [ -z "$modules_output" ]; then echo "✅ No issues found in modules." @@ -186,7 +203,6 @@ run_vale_assembly() { fi } -# --- Scope: dir --- run_vale_dir() { local scan_dir=$1 if [ ! -d "$scan_dir" ]; then @@ -196,33 +212,33 @@ run_vale_dir() { echo "Scanning $scan_dir for assemblies..." - local -a ASSEMBLIES + local -a assemblies=() while IFS= read -r line; do - ASSEMBLIES+=("$line") + assemblies+=("$line") done < <(find "$scan_dir" \ \( -type d -name "_*" -prune \) \ -o \ \( -type f -name "*.adoc" -exec grep -lq "^include::.*modules/" {} \; -print \)) - if [ ${#ASSEMBLIES[@]} -eq 0 ]; then - echo "No assembly files (containing 'include::...modules/...') found in $scan_dir." + if [ ${#assemblies[@]} -eq 0 ]; then + echo "No assembly files found." exit 0 fi - echo "Found assemblies. Running grouped Vale scan..." - for assembly in "${ASSEMBLIES[@]}"; do + echo "Found assemblies. Running grouped scans..." + + for assembly in "${assemblies[@]}"; do echo "=====================================================================" - echo "Checking Assembly and its Modules: $assembly" + echo "Checking Assembly and Modules: $assembly" echo "=====================================================================" run_vale_assembly "$assembly" echo "" done } -# --- Main --- if ! command -v vale >/dev/null 2>&1; then echo "Error: vale is not installed or not in PATH." >&2 - echo "Please install vale: https://vale.sh/docs/vale-cli/installation/" >&2 + echo "Install: https://vale.sh/docs/vale-cli/installation/" >&2 exit 1 fi @@ -244,13 +260,13 @@ while [[ "$#" -gt 0 ]]; do SINGLE_RULE="$2" shift 2 else - echo "Error: --rule requires a argument." >&2 + echo "Error: --rule requires ." >&2 show_help exit 1 fi ;; --help|-h) show_help; exit 0 ;; - -*) echo "Error: Unknown flag $1"; show_help; exit 1 ;; + -*) echo "Error: Unknown flag $1" >&2; show_help; exit 1 ;; *) if [ -z "$SCOPE" ]; then SCOPE="$1" @@ -263,7 +279,7 @@ while [[ "$#" -gt 0 ]]; do done if [ -z "$SCOPE" ]; then - echo "Error: No scope (pr, dir, assembly) provided." >&2 + echo "Error: No scope provided." >&2 show_help exit 1 fi @@ -273,22 +289,25 @@ setup_vale_config_and_rule "$USE_CQA" "$SINGLE_RULE" case "$SCOPE" in pr) if [ ${#ARGS[@]} -ne 0 ]; then - echo "Error: 'pr' scope takes no path arguments." >&2 - show_help; exit 1 + echo "Error: 'pr' scope takes no args." >&2 + show_help + exit 1 fi run_vale_pr ;; dir) if [ ${#ARGS[@]} -ne 1 ]; then - echo "Error: 'dir' scope requires one directory path." >&2 - show_help; exit 1 + echo "Error: 'dir' scope requires one directory arg." >&2 + show_help + exit 1 fi run_vale_dir "${ARGS[0]}" ;; assembly) if [ ${#ARGS[@]} -ne 1 ]; then - echo "Error: 'assembly' scope requires one file path." >&2 - show_help; exit 1 + echo "Error: 'assembly' scope requires one file arg." >&2 + show_help + exit 1 fi run_vale_assembly "${ARGS[0]}" ;; From 27e7a335db028219e3f29bdf80b240c939a4897f Mon Sep 17 00:00:00 2001 From: Michael Ryan Peter Date: Tue, 28 Oct 2025 19:37:10 -0400 Subject: [PATCH 04/10] Claude code suggestions --- scripts/vale-check.sh | 81 +++++++++++++++++++++++++++++++++---------- 1 file changed, 63 insertions(+), 18 deletions(-) diff --git a/scripts/vale-check.sh b/scripts/vale-check.sh index 4289c06995d0..c8c912f6f893 100755 --- a/scripts/vale-check.sh +++ b/scripts/vale-check.sh @@ -30,6 +30,17 @@ set +e VALE_ARGS=() ORIGINAL_VALE_INI=".vale.ini" +TEMP_FILES=() + +# Cleanup function for temp files +cleanup_temp_files() { + for temp_file in "${TEMP_FILES[@]}"; do + [ -f "$temp_file" ] && rm -f "$temp_file" + done +} + +# Set trap once for all cleanup +trap cleanup_temp_files EXIT INT TERM show_help() { echo "Usage: $0 [path] [--cqa|--repo] [--rule ]" @@ -63,16 +74,18 @@ setup_vale_config_and_rule() { fi if [ "$use_cqa" -eq 1 ]; then - TEMP_VALE_INI=$(mktemp ./.vale.ini.temp.XXXXXX) + # Cross-platform temp file creation + TEMP_VALE_INI=$(mktemp "${TMPDIR:-.}/.vale.ini.temp.XXXXXX" 2>/dev/null || mktemp -t .vale.ini.temp) + TEMP_FILES+=("$TEMP_VALE_INI") sed 's/^\(BasedOnStyles = .*\)$/\1, AsciiDocDITA/' "$base_ini" > "$TEMP_VALE_INI" - trap "rm -f '$TEMP_VALE_INI'" EXIT INT TERM VALE_ARGS=(--config "$TEMP_VALE_INI") elif [ -n "$style_to_add" ]; then if ! grep -q "BasedOnStyles.*$style_to_add" "$base_ini"; then - TEMP_VALE_INI=$(mktemp ./.vale.ini.temp.XXXXXX) + # Cross-platform temp file creation + TEMP_VALE_INI=$(mktemp "${TMPDIR:-.}/.vale.ini.temp.XXXXXX" 2>/dev/null || mktemp -t .vale.ini.temp) + TEMP_FILES+=("$TEMP_VALE_INI") sed "s/^\(BasedOnStyles = .*\)$/\1, $style_to_add/" "$base_ini" > "$TEMP_VALE_INI" - trap "rm -f '$TEMP_VALE_INI'" EXIT INT TERM VALE_ARGS=(--config "$TEMP_VALE_INI") else VALE_ARGS=() @@ -117,11 +130,18 @@ run_vale_pr() { echo "Running Vale..." local output - output=$( - set -o pipefail - git diff -z --name-only --diff-filter=d "$base" HEAD \ - | xargs -0 vale --output CLI --minAlertLevel=suggestion --no-exit "${VALE_ARGS[@]}" - ) + local changed_files + changed_files=$(git diff -z --name-only --diff-filter=d "$base" HEAD) + + if [ -z "$changed_files" ]; then + output="" + else + output=$( + set -o pipefail + printf '%s' "$changed_files" \ + | xargs -0 vale --output CLI --minAlertLevel=suggestion --no-exit "${VALE_ARGS[@]}" 2>/dev/null || true + ) + fi if [ -z "$output" ]; then echo "✅ No issues found." @@ -171,8 +191,8 @@ run_vale_assembly() { local missing_files=() local abs_modules=() while IFS= read -r module_path; do - clean_path="${module_path#./}" - full_path="$repo_root/$clean_path" + local clean_path="${module_path#./}" + local full_path="$repo_root/$clean_path" if [ ! -f "$full_path" ]; then missing_files+=("$full_path") else @@ -194,7 +214,11 @@ run_vale_assembly() { fi local modules_output - modules_output=$(printf '%s\n' "${abs_modules[@]}" | xargs vale --output CLI "${VALE_ARGS[@]}") + if [ ${#abs_modules[@]} -gt 0 ]; then + modules_output=$(printf '%s\n' "${abs_modules[@]}" | xargs vale --output CLI "${VALE_ARGS[@]}" 2>/dev/null || true) + else + modules_output="" + fi if [ -z "$modules_output" ]; then echo "✅ No issues found in modules." @@ -213,12 +237,15 @@ run_vale_dir() { echo "Scanning $scan_dir for assemblies..." local -a assemblies=() - while IFS= read -r line; do - assemblies+=("$line") - done < <(find "$scan_dir" \ - \( -type d -name "_*" -prune \) \ - -o \ - \( -type f -name "*.adoc" -exec grep -lq "^include::.*modules/" {} \; -print \)) + # Find .adoc files, exclude directories starting with _, then grep for assemblies + local adoc_files + adoc_files=$(find "$scan_dir" -type f -name "*.adoc" ! -path "*/_*" -print0 2>/dev/null) + + if [ -n "$adoc_files" ]; then + while IFS= read -r line; do + [ -n "$line" ] && assemblies+=("$line") + done < <(printf '%s' "$adoc_files" | xargs -0 grep -l "^include::.*modules/" 2>/dev/null || true) + fi if [ ${#assemblies[@]} -eq 0 ]; then echo "No assembly files found." @@ -236,12 +263,30 @@ run_vale_dir() { done } +# Check required dependencies if ! command -v vale >/dev/null 2>&1; then echo "Error: vale is not installed or not in PATH." >&2 echo "Install: https://vale.sh/docs/vale-cli/installation/" >&2 exit 1 fi +if ! command -v git >/dev/null 2>&1; then + echo "Error: git is not installed or not in PATH." >&2 + exit 1 +fi + +if ! command -v awk >/dev/null 2>&1; then + echo "Error: awk is not installed or not in PATH." >&2 + exit 1 +fi + +# Validate .vale.ini exists +if [ ! -f "$ORIGINAL_VALE_INI" ]; then + echo "Error: Configuration file '$ORIGINAL_VALE_INI' not found." >&2 + echo "Please run this script from the repository root." >&2 + exit 1 +fi + USE_CQA=0 SINGLE_RULE="" SCOPE="" From c5d389b72d7d38a017cc76c6e6781e8c15bba542 Mon Sep 17 00:00:00 2001 From: Michael Ryan Peter Date: Fri, 31 Oct 2025 09:55:25 -0400 Subject: [PATCH 05/10] Address feedback on temp file issues - Instead of copying the related rules into a temp dir, Claude suggested moving the location of the temp file to the project root. This should resolve the issues. :) --- scripts/vale-check.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/scripts/vale-check.sh b/scripts/vale-check.sh index c8c912f6f893..e33d72f624cc 100755 --- a/scripts/vale-check.sh +++ b/scripts/vale-check.sh @@ -74,16 +74,16 @@ setup_vale_config_and_rule() { fi if [ "$use_cqa" -eq 1 ]; then - # Cross-platform temp file creation - TEMP_VALE_INI=$(mktemp "${TMPDIR:-.}/.vale.ini.temp.XXXXXX" 2>/dev/null || mktemp -t .vale.ini.temp) + # Create temp file in repo root so Vale can find styles/ and config/ directories + TEMP_VALE_INI=$(mktemp .vale.ini.temp.XXXXXX) TEMP_FILES+=("$TEMP_VALE_INI") sed 's/^\(BasedOnStyles = .*\)$/\1, AsciiDocDITA/' "$base_ini" > "$TEMP_VALE_INI" VALE_ARGS=(--config "$TEMP_VALE_INI") elif [ -n "$style_to_add" ]; then if ! grep -q "BasedOnStyles.*$style_to_add" "$base_ini"; then - # Cross-platform temp file creation - TEMP_VALE_INI=$(mktemp "${TMPDIR:-.}/.vale.ini.temp.XXXXXX" 2>/dev/null || mktemp -t .vale.ini.temp) + # Create temp file in repo root so Vale can find styles/ and config/ directories + TEMP_VALE_INI=$(mktemp .vale.ini.temp.XXXXXX) TEMP_FILES+=("$TEMP_VALE_INI") sed "s/^\(BasedOnStyles = .*\)$/\1, $style_to_add/" "$base_ini" > "$TEMP_VALE_INI" VALE_ARGS=(--config "$TEMP_VALE_INI") From 5c70e8f80c753b9aeed118db362db8fe5a8be433 Mon Sep 17 00:00:00 2001 From: Michael Ryan Peter Date: Fri, 31 Oct 2025 15:25:56 -0400 Subject: [PATCH 06/10] Add flags for error level output and style --- scripts/vale-check.sh | 84 +++++++++++++++++++++++++++++++++---------- 1 file changed, 65 insertions(+), 19 deletions(-) diff --git a/scripts/vale-check.sh b/scripts/vale-check.sh index e33d72f624cc..faad92d98ccf 100755 --- a/scripts/vale-check.sh +++ b/scripts/vale-check.sh @@ -10,7 +10,9 @@ # # FLAGS: # --cqa: Uses a temporary config with all AsciiDocDITA rules enabled. +# --style : Enables a specific style package (e.g., AsciiDocDITA, RedHat). # --rule : Checks for only one specific rule (e.g., AsciiDocDITA.RelatedLinks). +# --level : Filter output by severity: 'error', 'warning', or 'all' (default: all). # --repo: (Default) Uses the standard .vale.ini in the repository. # --help, -h: Shows this help message. # @@ -43,7 +45,7 @@ cleanup_temp_files() { trap cleanup_temp_files EXIT INT TERM show_help() { - echo "Usage: $0 [path] [--cqa|--repo] [--rule ]" + echo "Usage: $0 [path] [flags]" echo "" echo "SCOPES:" echo " pr Checks all changed .adoc files on current branch" @@ -51,36 +53,40 @@ show_help() { echo " assembly Checks one assembly file and its modules" echo "" echo "FLAGS:" - echo " --cqa Use temporary config with AsciiDocDITA style enabled" - echo " --repo Use repository .vale.ini config (default)" - echo " --rule Run only one specific Vale rule" - echo " --help, -h Show this help message" + echo " --cqa Use temporary config with AsciiDocDITA style enabled" + echo " --style Enable a specific style package (e.g., AsciiDocDITA, RedHat)" + echo " --rule Run only one specific Vale rule (e.g., AsciiDocDITA.RelatedLinks)" + echo " --level Filter output: 'error', 'warning', or 'all' (default: all)" + echo " --repo Use repository .vale.ini config (default)" + echo " --help, -h Show this help message" echo "" echo "EXAMPLES:" echo " $0 pr" echo " $0 pr --cqa" - echo " $0 dir extensions/" + echo " $0 pr --level error" + echo " $0 dir extensions/ --style RedHat" echo " $0 assembly path/to/file.adoc --rule AsciiDocDITA.RelatedLinks" + echo " $0 pr --style AsciiDocDITA --level warning" } setup_vale_config_and_rule() { local use_cqa=$1 local single_rule=$2 + local custom_style=$3 local base_ini="$ORIGINAL_VALE_INI" local style_to_add="" - if [ -n "$single_rule" ]; then + # Determine which style to add + if [ "$use_cqa" -eq 1 ]; then + style_to_add="AsciiDocDITA" + elif [ -n "$custom_style" ]; then + style_to_add="$custom_style" + elif [ -n "$single_rule" ]; then style_to_add="${single_rule%%.*}" fi - if [ "$use_cqa" -eq 1 ]; then - # Create temp file in repo root so Vale can find styles/ and config/ directories - TEMP_VALE_INI=$(mktemp .vale.ini.temp.XXXXXX) - TEMP_FILES+=("$TEMP_VALE_INI") - sed 's/^\(BasedOnStyles = .*\)$/\1, AsciiDocDITA/' "$base_ini" > "$TEMP_VALE_INI" - VALE_ARGS=(--config "$TEMP_VALE_INI") - - elif [ -n "$style_to_add" ]; then + # Create temp config if we need to add a style + if [ -n "$style_to_add" ]; then if ! grep -q "BasedOnStyles.*$style_to_add" "$base_ini"; then # Create temp file in repo root so Vale can find styles/ and config/ directories TEMP_VALE_INI=$(mktemp .vale.ini.temp.XXXXXX) @@ -94,6 +100,7 @@ setup_vale_config_and_rule() { VALE_ARGS=() fi + # Add rule filter if specified if [ -n "$single_rule" ]; then VALE_ARGS+=(--filter=".Name=='$single_rule'") fi @@ -139,7 +146,7 @@ run_vale_pr() { output=$( set -o pipefail printf '%s' "$changed_files" \ - | xargs -0 vale --output CLI --minAlertLevel=suggestion --no-exit "${VALE_ARGS[@]}" 2>/dev/null || true + | xargs -0 vale --output CLI --minAlertLevel="$ALERT_LEVEL" --no-exit "${VALE_ARGS[@]}" 2>/dev/null || true ) fi @@ -159,7 +166,7 @@ run_vale_assembly() { echo "--- Linting Assembly: $assembly_file ---" local assembly_output - assembly_output=$(vale --output CLI "${VALE_ARGS[@]}" "$assembly_file") + assembly_output=$(vale --output CLI --minAlertLevel="$ALERT_LEVEL" "${VALE_ARGS[@]}" "$assembly_file") if [ -z "$assembly_output" ]; then echo "✅ No issues found." @@ -215,7 +222,7 @@ run_vale_assembly() { local modules_output if [ ${#abs_modules[@]} -gt 0 ]; then - modules_output=$(printf '%s\n' "${abs_modules[@]}" | xargs vale --output CLI "${VALE_ARGS[@]}" 2>/dev/null || true) + modules_output=$(printf '%s\n' "${abs_modules[@]}" | xargs vale --output CLI --minAlertLevel="$ALERT_LEVEL" "${VALE_ARGS[@]}" 2>/dev/null || true) else modules_output="" fi @@ -289,6 +296,8 @@ fi USE_CQA=0 SINGLE_RULE="" +STYLE_TO_ADD="" +ALERT_LEVEL="suggestion" SCOPE="" ARGS=() @@ -310,6 +319,34 @@ while [[ "$#" -gt 0 ]]; do exit 1 fi ;; + --style=*) + STYLE_TO_ADD="${1#*=}" + shift + ;; + --style) + if [[ -n "$2" && "${2:0:1}" != "-" ]]; then + STYLE_TO_ADD="$2" + shift 2 + else + echo "Error: --style requires ." >&2 + show_help + exit 1 + fi + ;; + --level=*) + ALERT_LEVEL="${1#*=}" + shift + ;; + --level) + if [[ -n "$2" && "${2:0:1}" != "-" ]]; then + ALERT_LEVEL="$2" + shift 2 + else + echo "Error: --level requires error|warning|all." >&2 + show_help + exit 1 + fi + ;; --help|-h) show_help; exit 0 ;; -*) echo "Error: Unknown flag $1" >&2; show_help; exit 1 ;; *) @@ -329,7 +366,16 @@ if [ -z "$SCOPE" ]; then exit 1 fi -setup_vale_config_and_rule "$USE_CQA" "$SINGLE_RULE" +# Validate and normalize alert level +case "$ALERT_LEVEL" in + error) ALERT_LEVEL="error" ;; + warning) ALERT_LEVEL="warning" ;; + all) ALERT_LEVEL="suggestion" ;; + suggestion) ALERT_LEVEL="suggestion" ;; + *) echo "Error: --level must be 'error', 'warning', or 'all'." >&2; exit 1 ;; +esac + +setup_vale_config_and_rule "$USE_CQA" "$SINGLE_RULE" "$STYLE_TO_ADD" case "$SCOPE" in pr) From 28bb8dc156444f44e083532ccab33807795bb48c Mon Sep 17 00:00:00 2001 From: Michael Ryan Peter Date: Fri, 31 Oct 2025 15:33:43 -0400 Subject: [PATCH 07/10] Refactor for organization, functional principles, comments --- scripts/vale-check.sh | 400 +++++++++++++++++++++++++++++++++--------- 1 file changed, 313 insertions(+), 87 deletions(-) diff --git a/scripts/vale-check.sh b/scripts/vale-check.sh index faad92d98ccf..aeb8bd29887b 100755 --- a/scripts/vale-check.sh +++ b/scripts/vale-check.sh @@ -1,25 +1,36 @@ #!/bin/bash # shellcheck shell=bash -# A unified script to run Vale with different scopes and configurations. +################################################################################ +# vale-check.sh - A unified Vale linter script with multiple scopes +# +# This script runs Vale (a linting tool) on AsciiDoc documentation with +# different scopes and configurations. It supports checking changed files, +# directories, or individual assemblies with their modules. # # SCOPES: -# pr: Checks all .adoc files changed on the current branch. -# dir: Finds and checks all assemblies in a directory (grouped output). -# assembly: Checks a single assembly file AND its included modules. +# pr: Checks all .adoc files changed on the current branch +# dir: Finds and checks all assemblies in a directory (grouped output) +# assembly: Checks a single assembly file AND its included modules # # FLAGS: -# --cqa: Uses a temporary config with all AsciiDocDITA rules enabled. -# --style : Enables a specific style package (e.g., AsciiDocDITA, RedHat). -# --rule : Checks for only one specific rule (e.g., AsciiDocDITA.RelatedLinks). -# --level : Filter output by severity: 'error', 'warning', or 'all' (default: all). -# --repo: (Default) Uses the standard .vale.ini in the repository. -# --help, -h: Shows this help message. +# --cqa: Uses a temporary config with all AsciiDocDITA rules enabled +# --style : Enables a specific style package (e.g., AsciiDocDITA, RedHat) +# --rule : Checks for only one specific rule (e.g., AsciiDocDITA.RelatedLinks) +# --level : Filter output by severity: 'error', 'warning', or 'all' (default: all) +# --repo: (Default) Uses the standard .vale.ini in the repository +# --help, -h: Shows this help message # # REQUIREMENTS: # - bash 3.2+ # - git # - vale +# - awk +################################################################################ + +#=============================================================================== +# BASH VERSION CHECK +#=============================================================================== if [ "${BASH_VERSINFO[0]}" -lt 3 ]; then echo "Error: This script requires bash 3.2 or higher." >&2 @@ -27,23 +38,53 @@ if [ "${BASH_VERSINFO[0]}" -lt 3 ]; then exit 1 fi +# Exit on unset variables; don't exit on command failures (we handle errors) set -u set +e +#=============================================================================== +# GLOBAL VARIABLES +#=============================================================================== + +# Vale command-line arguments (built dynamically based on flags) VALE_ARGS=() + +# Path to the repository's Vale configuration file ORIGINAL_VALE_INI=".vale.ini" + +# Array to track temporary files for cleanup TEMP_FILES=() -# Cleanup function for temp files +# User-specified options (set during argument parsing) +USE_CQA=0 # Whether to use CQA (AsciiDocDITA) mode +SINGLE_RULE="" # Specific rule to check (e.g., "AsciiDocDITA.RelatedLinks") +STYLE_TO_ADD="" # Custom style to enable (e.g., "RedHat") +ALERT_LEVEL="suggestion" # Minimum alert level (error, warning, suggestion) +SCOPE="" # Scope of the check (pr, dir, assembly) +ARGS=() # Additional positional arguments + +#=============================================================================== +# CLEANUP AND ERROR HANDLING +#=============================================================================== + +# cleanup_temp_files - Removes all temporary files created during execution +# +# This function is called automatically on script exit (via trap). It removes +# any temporary .vale.ini files created when using --cqa, --style, or --rule. cleanup_temp_files() { for temp_file in "${TEMP_FILES[@]}"; do [ -f "$temp_file" ] && rm -f "$temp_file" done } -# Set trap once for all cleanup +# Register cleanup function to run on script exit, interrupt, or termination trap cleanup_temp_files EXIT INT TERM +#=============================================================================== +# HELP AND DOCUMENTATION +#=============================================================================== + +# show_help - Displays usage information and examples show_help() { echo "Usage: $0 [path] [flags]" echo "" @@ -69,6 +110,25 @@ show_help() { echo " $0 pr --style AsciiDocDITA --level warning" } +#=============================================================================== +# CONFIGURATION SETUP +#=============================================================================== + +# setup_vale_config_and_rule - Configures Vale arguments based on user flags +# +# This function determines which Vale configuration to use and builds the +# VALE_ARGS array. It handles: +# 1. Adding styles to BasedOnStyles (via temp config file) +# 2. Filtering to a single rule (via --filter) +# +# Args: +# $1 - use_cqa: 1 to enable AsciiDocDITA style, 0 otherwise +# $2 - single_rule: Specific rule name to filter (empty if not specified) +# $3 - custom_style: Custom style to enable (empty if not specified) +# +# Side effects: +# - Modifies global VALE_ARGS array +# - May create temporary .vale.ini file (tracked in TEMP_FILES) setup_vale_config_and_rule() { local use_cqa=$1 local single_rule=$2 @@ -76,44 +136,165 @@ setup_vale_config_and_rule() { local base_ini="$ORIGINAL_VALE_INI" local style_to_add="" - # Determine which style to add + # Determine which style needs to be added (priority order matters) if [ "$use_cqa" -eq 1 ]; then + # --cqa flag: Enable AsciiDocDITA style_to_add="AsciiDocDITA" elif [ -n "$custom_style" ]; then + # --style flag: Enable user-specified style style_to_add="$custom_style" elif [ -n "$single_rule" ]; then + # --rule flag: Extract and enable the style from the rule name + # (e.g., "AsciiDocDITA.RelatedLinks" -> "AsciiDocDITA") style_to_add="${single_rule%%.*}" fi - # Create temp config if we need to add a style + # Create a temporary config file if we need to add a style if [ -n "$style_to_add" ]; then + # Check if the style is already in the base config if ! grep -q "BasedOnStyles.*$style_to_add" "$base_ini"; then - # Create temp file in repo root so Vale can find styles/ and config/ directories + # Style not present: create temp config with style added + # Note: Created in repo root so Vale can find styles/ and config/ directories TEMP_VALE_INI=$(mktemp .vale.ini.temp.XXXXXX) TEMP_FILES+=("$TEMP_VALE_INI") + + # Use sed to append the style to the BasedOnStyles line sed "s/^\(BasedOnStyles = .*\)$/\1, $style_to_add/" "$base_ini" > "$TEMP_VALE_INI" VALE_ARGS=(--config "$TEMP_VALE_INI") else + # Style already present: use default config VALE_ARGS=() fi else + # No style to add: use default config VALE_ARGS=() fi - # Add rule filter if specified + # Add rule filter if a specific rule was requested if [ -n "$single_rule" ]; then + # Vale's --filter uses jq-style queries to filter alerts VALE_ARGS+=(--filter=".Name=='$single_rule'") fi } +#=============================================================================== +# VALE EXECUTION HELPERS +#=============================================================================== + +# run_vale - Executes Vale with consistent arguments and output handling +# +# This is a wrapper function that runs Vale with the configured arguments +# (from VALE_ARGS) and the user-specified alert level (from ALERT_LEVEL). +# +# Args: +# $@ - Files or paths to lint +# +# Returns: +# Vale output (may be empty if no issues found) +run_vale() { + vale --output CLI --minAlertLevel="$ALERT_LEVEL" "${VALE_ARGS[@]}" "$@" 2>/dev/null || true +} + +# display_output - Shows Vale results with consistent formatting +# +# Args: +# $1 - output: Vale output to display (may be empty) +# $2 - context: Description of what was checked (e.g., "Assembly", "modules") +display_output() { + local output="$1" + local context="${2:-files}" + + if [ -z "$output" ]; then + echo "✅ No issues found${context:+ in $context}." + else + echo "$output" + fi +} + +#=============================================================================== +# MODULE EXTRACTION AND RESOLUTION +#=============================================================================== + +# extract_module_paths - Extracts module paths from an assembly file +# +# Parses an assembly file for include:: directives and extracts the module paths. +# Filters out partials (files/directories starting with underscore). +# +# Args: +# $1 - assembly_file: Path to the assembly file +# +# Returns: +# Newline-separated list of module paths (may be empty) +extract_module_paths() { + local assembly_file="$1" + + # Use awk to parse include:: directives + # Field separator handles: include::path/to/file.adoc[] or include::path[options] + local modules + modules=$(awk -F'::|\\[|]' '/^include::/ { print $2 }' "$assembly_file") + + # Filter out partials (paths containing /_ or starting with _) + if [ -n "$modules" ]; then + printf '%s\n' "$modules" | grep -v -E '(^|/)_' || true + fi +} + +# resolve_module_paths - Converts relative module paths to absolute paths +# +# Takes a list of relative module paths and converts them to absolute paths +# relative to the repository root. Separates existing files from missing files. +# +# Args: +# $1 - filtered_modules: Newline-separated list of module paths +# +# Outputs: +# Sets global arrays (via named references would be better, but bash 3.2 compatible): +# - abs_modules: Array of absolute paths to existing files +# - missing_files: Array of absolute paths to missing files +resolve_module_paths() { + local filtered_modules="$1" + local repo_root + repo_root=$(git rev-parse --show-toplevel) + + # Reset arrays + missing_files=() + abs_modules=() + + # Process each module path + while IFS= read -r module_path; do + [ -z "$module_path" ] && continue + + # Remove leading ./ if present + local clean_path="${module_path#./}" + local full_path="$repo_root/$clean_path" + + # Separate existing vs missing files + if [ ! -f "$full_path" ]; then + missing_files+=("$full_path") + else + abs_modules+=("$full_path") + fi + done <<< "$filtered_modules" +} + +#=============================================================================== +# SCOPE EXECUTION FUNCTIONS +#=============================================================================== + +# run_vale_pr - Checks all changed .adoc files on the current branch +# +# This scope compares the current branch against its upstream and runs Vale +# on all changed .adoc files. Useful for PR/MR validation. run_vale_pr() { echo "Checking changed files on this branch..." + # Verify we're in a git repository if ! git rev-parse --git-dir >/dev/null 2>&1; then echo "Error: Not a git repo." >&2 exit 1 fi + # Get the upstream branch for comparison local upstream upstream=$(git rev-parse --abbrev-ref --symbolic-full-name '@{upstream}' 2>/dev/null) @@ -122,6 +303,7 @@ run_vale_pr() { exit 1 fi + # Find the merge base (common ancestor) with upstream local base base=$(git merge-base HEAD "$upstream") if [ -z "$base" ]; then @@ -129,6 +311,7 @@ run_vale_pr() { exit 1 fi + # Check if there are any changed files if git diff --quiet --diff-filter=d "$base" HEAD; then echo "No changed files found." exit 0 @@ -136,77 +319,65 @@ run_vale_pr() { echo "Running Vale..." - local output + # Get changed files (null-separated for safety with special characters) local changed_files changed_files=$(git diff -z --name-only --diff-filter=d "$base" HEAD) + # Run Vale on changed files + local output if [ -z "$changed_files" ]; then output="" else output=$( set -o pipefail - printf '%s' "$changed_files" \ - | xargs -0 vale --output CLI --minAlertLevel="$ALERT_LEVEL" --no-exit "${VALE_ARGS[@]}" 2>/dev/null || true + printf '%s' "$changed_files" | xargs -0 run_vale --no-exit ) fi - if [ -z "$output" ]; then - echo "✅ No issues found." - else - echo "$output" - fi + display_output "$output" } +# run_vale_assembly - Checks an assembly file and all its included modules +# +# This scope lints both the assembly file itself and all modules it includes. +# It provides separate output for the assembly and modules, and warns about +# missing module files. +# +# Args: +# $1 - assembly_file: Path to the assembly file run_vale_assembly() { - local assembly_file=$1 + local assembly_file="$1" + + # Validate assembly file exists if [ ! -f "$assembly_file" ]; then echo "Error: Assembly file not found: $assembly_file" >&2 exit 1 fi + # --- Lint the assembly file itself --- echo "--- Linting Assembly: $assembly_file ---" local assembly_output - assembly_output=$(vale --output CLI --minAlertLevel="$ALERT_LEVEL" "${VALE_ARGS[@]}" "$assembly_file") - - if [ -z "$assembly_output" ]; then - echo "✅ No issues found." - else - echo "$assembly_output" - fi + assembly_output=$(run_vale "$assembly_file") + display_output "$assembly_output" echo "" + # --- Extract and lint included modules --- echo "--- Linting Modules from $assembly_file ---" - local modules - modules=$(awk -F'::|\\[|]' '/^include::/ { print $2 }' "$assembly_file") - - if [ -z "$modules" ]; then - echo "No modules found." - return - fi + # Extract module paths from include:: directives local filtered_modules - filtered_modules=$(printf '%s\n' "$modules" | grep -v -E '(^|/)_') + filtered_modules=$(extract_module_paths "$assembly_file") if [ -z "$filtered_modules" ]; then - echo "All modules are partials (skipped)." + echo "No modules found." return fi - local repo_root - repo_root=$(git rev-parse --show-toplevel) - - local missing_files=() - local abs_modules=() - while IFS= read -r module_path; do - local clean_path="${module_path#./}" - local full_path="$repo_root/$clean_path" - if [ ! -f "$full_path" ]; then - missing_files+=("$full_path") - else - abs_modules+=("$full_path") - fi - done <<< "$filtered_modules" + # Resolve module paths to absolute paths + local missing_files abs_modules + resolve_module_paths "$filtered_modules" + # Warn about missing module files if [ ${#missing_files[@]} -gt 0 ]; then echo "Warning: The following included module files do not exist:" >&2 for missing_file in "${missing_files[@]}"; do @@ -215,27 +386,29 @@ run_vale_assembly() { echo "Continuing linting, ignoring missing includes." >&2 fi + # Lint existing modules if [ ${#abs_modules[@]} -eq 0 ]; then echo "No existing modules found to lint." return fi local modules_output - if [ ${#abs_modules[@]} -gt 0 ]; then - modules_output=$(printf '%s\n' "${abs_modules[@]}" | xargs vale --output CLI --minAlertLevel="$ALERT_LEVEL" "${VALE_ARGS[@]}" 2>/dev/null || true) - else - modules_output="" - fi - - if [ -z "$modules_output" ]; then - echo "✅ No issues found in modules." - else - echo "$modules_output" - fi + modules_output=$(printf '%s\n' "${abs_modules[@]}" | xargs run_vale) + display_output "$modules_output" "modules" } +# run_vale_dir - Finds and checks all assemblies in a directory +# +# This scope recursively finds all assembly files in a directory and runs +# the assembly scope on each one. Assemblies are identified by the presence +# of "include::.*modules/" directives. +# +# Args: +# $1 - scan_dir: Directory to scan for assemblies run_vale_dir() { - local scan_dir=$1 + local scan_dir="$1" + + # Validate directory exists if [ ! -d "$scan_dir" ]; then echo "Error: Directory not found: $scan_dir" >&2 exit 1 @@ -243,24 +416,28 @@ run_vale_dir() { echo "Scanning $scan_dir for assemblies..." - local -a assemblies=() - # Find .adoc files, exclude directories starting with _, then grep for assemblies + # Find all .adoc files, excluding paths with underscore directories (partials) local adoc_files adoc_files=$(find "$scan_dir" -type f -name "*.adoc" ! -path "*/_*" -print0 2>/dev/null) + # Filter for assemblies (files containing "include::.*modules/") + local -a assemblies=() if [ -n "$adoc_files" ]; then while IFS= read -r line; do [ -n "$line" ] && assemblies+=("$line") done < <(printf '%s' "$adoc_files" | xargs -0 grep -l "^include::.*modules/" 2>/dev/null || true) fi + # Check if any assemblies were found if [ ${#assemblies[@]} -eq 0 ]; then echo "No assembly files found." exit 0 fi - echo "Found assemblies. Running grouped scans..." + echo "Found ${#assemblies[@]} assemblies. Running grouped scans..." + echo "" + # Run assembly scope on each found assembly for assembly in "${assemblies[@]}"; do echo "=====================================================================" echo "Checking Assembly and Modules: $assembly" @@ -270,7 +447,11 @@ run_vale_dir() { done } -# Check required dependencies +#=============================================================================== +# DEPENDENCY VALIDATION +#=============================================================================== + +# Check for required command-line tools if ! command -v vale >/dev/null 2>&1; then echo "Error: vale is not installed or not in PATH." >&2 echo "Install: https://vale.sh/docs/vale-cli/installation/" >&2 @@ -287,29 +468,37 @@ if ! command -v awk >/dev/null 2>&1; then exit 1 fi -# Validate .vale.ini exists +# Validate .vale.ini exists in current directory if [ ! -f "$ORIGINAL_VALE_INI" ]; then echo "Error: Configuration file '$ORIGINAL_VALE_INI' not found." >&2 echo "Please run this script from the repository root." >&2 exit 1 fi -USE_CQA=0 -SINGLE_RULE="" -STYLE_TO_ADD="" -ALERT_LEVEL="suggestion" -SCOPE="" -ARGS=() +#=============================================================================== +# ARGUMENT PARSING +#=============================================================================== +# Parse command-line arguments while [[ "$#" -gt 0 ]]; do case "$1" in - --cqa) USE_CQA=1; shift ;; - --repo) USE_CQA=0; shift ;; + --cqa) + # Enable CQA mode (AsciiDocDITA style) + USE_CQA=1 + shift + ;; + --repo) + # Explicitly use repository config (default behavior) + USE_CQA=0 + shift + ;; --rule=*) + # Parse --rule=RuleName format SINGLE_RULE="${1#*=}" shift ;; --rule) + # Parse --rule RuleName format if [[ -n "$2" && "${2:0:1}" != "-" ]]; then SINGLE_RULE="$2" shift 2 @@ -320,10 +509,12 @@ while [[ "$#" -gt 0 ]]; do fi ;; --style=*) + # Parse --style=StyleName format STYLE_TO_ADD="${1#*=}" shift ;; --style) + # Parse --style StyleName format if [[ -n "$2" && "${2:0:1}" != "-" ]]; then STYLE_TO_ADD="$2" shift 2 @@ -334,10 +525,12 @@ while [[ "$#" -gt 0 ]]; do fi ;; --level=*) + # Parse --level=error format ALERT_LEVEL="${1#*=}" shift ;; --level) + # Parse --level error format if [[ -n "$2" && "${2:0:1}" != "-" ]]; then ALERT_LEVEL="$2" shift 2 @@ -347,9 +540,18 @@ while [[ "$#" -gt 0 ]]; do exit 1 fi ;; - --help|-h) show_help; exit 0 ;; - -*) echo "Error: Unknown flag $1" >&2; show_help; exit 1 ;; + --help|-h) + show_help + exit 0 + ;; + -*) + # Unknown flag + echo "Error: Unknown flag $1" >&2 + show_help + exit 1 + ;; *) + # Positional arguments: first is scope, rest are args if [ -z "$SCOPE" ]; then SCOPE="$1" else @@ -360,6 +562,11 @@ while [[ "$#" -gt 0 ]]; do esac done +#=============================================================================== +# VALIDATION AND SETUP +#=============================================================================== + +# Ensure a scope was provided if [ -z "$SCOPE" ]; then echo "Error: No scope provided." >&2 show_help @@ -367,18 +574,34 @@ if [ -z "$SCOPE" ]; then fi # Validate and normalize alert level +# User specifies "all" but Vale uses "suggestion" for all levels case "$ALERT_LEVEL" in - error) ALERT_LEVEL="error" ;; - warning) ALERT_LEVEL="warning" ;; - all) ALERT_LEVEL="suggestion" ;; - suggestion) ALERT_LEVEL="suggestion" ;; - *) echo "Error: --level must be 'error', 'warning', or 'all'." >&2; exit 1 ;; + error) + ALERT_LEVEL="error" + ;; + warning) + ALERT_LEVEL="warning" + ;; + all|suggestion) + ALERT_LEVEL="suggestion" + ;; + *) + echo "Error: --level must be 'error', 'warning', or 'all'." >&2 + exit 1 + ;; esac +# Configure Vale arguments based on flags setup_vale_config_and_rule "$USE_CQA" "$SINGLE_RULE" "$STYLE_TO_ADD" +#=============================================================================== +# SCOPE EXECUTION +#=============================================================================== + +# Execute the appropriate scope function case "$SCOPE" in pr) + # PR scope: no additional arguments required if [ ${#ARGS[@]} -ne 0 ]; then echo "Error: 'pr' scope takes no args." >&2 show_help @@ -387,6 +610,7 @@ case "$SCOPE" in run_vale_pr ;; dir) + # Directory scope: requires one directory path if [ ${#ARGS[@]} -ne 1 ]; then echo "Error: 'dir' scope requires one directory arg." >&2 show_help @@ -395,6 +619,7 @@ case "$SCOPE" in run_vale_dir "${ARGS[0]}" ;; assembly) + # Assembly scope: requires one assembly file path if [ ${#ARGS[@]} -ne 1 ]; then echo "Error: 'assembly' scope requires one file arg." >&2 show_help @@ -403,6 +628,7 @@ case "$SCOPE" in run_vale_assembly "${ARGS[0]}" ;; *) + # Unknown scope echo "Error: Unknown scope '$SCOPE'." >&2 show_help exit 1 From b4ca039e9a25c634def9b0842b069fb063accbf8 Mon Sep 17 00:00:00 2001 From: Michael Ryan Peter Date: Mon, 3 Nov 2025 10:07:40 -0500 Subject: [PATCH 08/10] Fix directory and assembly bug --- scripts/vale-check.sh | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/scripts/vale-check.sh b/scripts/vale-check.sh index aeb8bd29887b..c66045ae6b46 100755 --- a/scripts/vale-check.sh +++ b/scripts/vale-check.sh @@ -393,7 +393,7 @@ run_vale_assembly() { fi local modules_output - modules_output=$(printf '%s\n' "${abs_modules[@]}" | xargs run_vale) + modules_output=$(run_vale "${abs_modules[@]}") display_output "$modules_output" "modules" } @@ -417,16 +417,13 @@ run_vale_dir() { echo "Scanning $scan_dir for assemblies..." # Find all .adoc files, excluding paths with underscore directories (partials) - local adoc_files - adoc_files=$(find "$scan_dir" -type f -name "*.adoc" ! -path "*/_*" -print0 2>/dev/null) - - # Filter for assemblies (files containing "include::.*modules/") + # Then filter for assemblies (files containing "include::.*modules/") local -a assemblies=() - if [ -n "$adoc_files" ]; then - while IFS= read -r line; do - [ -n "$line" ] && assemblies+=("$line") - done < <(printf '%s' "$adoc_files" | xargs -0 grep -l "^include::.*modules/" 2>/dev/null || true) - fi + while IFS= read -r -d '' file; do + if grep -q "^include::.*modules/" "$file" 2>/dev/null; then + assemblies+=("$file") + fi + done < <(find "$scan_dir" -type f -name "*.adoc" ! -path "*/_*" -print0 2>/dev/null) # Check if any assemblies were found if [ ${#assemblies[@]} -eq 0 ]; then From dc90d5d375faf6b09dc02288a0292273728adc8a Mon Sep 17 00:00:00 2001 From: Michael Ryan Peter Date: Tue, 11 Nov 2025 17:06:37 -0500 Subject: [PATCH 09/10] Testing a /refactor-callouts slash command --- modules/olmv1-creating-a-cluster-role.adoc | 14 ++++++++------ modules/olmv1-installing-an-operator.adoc | 22 ++++++++++++---------- modules/olmv1-updating-an-operator.adoc | 12 ++++++------ 3 files changed, 26 insertions(+), 22 deletions(-) diff --git a/modules/olmv1-creating-a-cluster-role.adoc b/modules/olmv1-creating-a-cluster-role.adoc index de0c403d1e82..9284014893db 100644 --- a/modules/olmv1-creating-a-cluster-role.adoc +++ b/modules/olmv1-creating-a-cluster-role.adoc @@ -66,9 +66,9 @@ rules: - update # Scoped to the name of the ClusterExtension resourceNames: - - # <1> + - ---- -<1> Specifies the value from the `metadata.name` field from the custom resource (CR) of the extension. +The `` specifies the value from the `metadata.name` field from the custom resource (CR) of the extension. . Search for the `clusterrole` and `clusterrolebindings` values in the `rules.resources` field in the extension's CSV file. @@ -89,7 +89,7 @@ rules: resources: - clusterroles verbs: - - create # <1> + - create - list - watch - apiGroups: @@ -101,7 +101,7 @@ rules: - update - patch - delete - resourceNames: # <2> + resourceNames: - "*" - apiGroups: - rbac.authorization.k8s.io @@ -124,8 +124,10 @@ rules: - "*" # ... ---- -<1> You cannot scope `create`, `list`, and `watch` permissions to specific resource names (the `resourceNames` field). You must scope these permissions to their resources (the `resources` field). -<2> Some resource names are generated by using the following format: `.`. After you install the extension, look up the resource names for the cluster roles and cluster role bindings for the controller of the extension. Replace the wildcard characters in this example with the generated names and follow the principle of least privilege. +where: + +`create`, `list`, and `watch`:: Specifies permissions that cannot be scoped to specific resource names (the `resourceNames` field). You must scope these permissions to their resources (the `resources` field). +`resourceNames`:: Specifies some resource names that are generated by using the following format: `.`. After you install the extension, look up the resource names for the cluster roles and cluster role bindings for the controller of the extension. Replace the wildcard characters in this example with the generated names and follow the principle of least privilege. . Search for the `customresourcedefinitions` value in the `rules.resources` field in the extension's CSV file. diff --git a/modules/olmv1-installing-an-operator.adoc b/modules/olmv1-installing-an-operator.adoc index 4e5f2f8d8f67..4cfee5ab5d37 100644 --- a/modules/olmv1-installing-an-operator.adoc +++ b/modules/olmv1-installing-an-operator.adoc @@ -26,23 +26,25 @@ apiVersion: olm.operatorframework.io/v1 metadata: name: spec: - namespace: <1> + namespace: serviceAccount: - name: <2> + name: source: sourceType: Catalog catalog: packageName: channels: - - <3> - version: <4> - upgradeConstraintPolicy: CatalogProvided <5> + - + version: + upgradeConstraintPolicy: CatalogProvided ---- -<1> Specifies the namespace where you want the bundle installed, such as `pipelines` or `my-extension`. Extensions are still cluster-scoped and might contain resources that are installed in different namespaces. -<2> Specifies the name of the service account you created to install, update, and manage your extension. -<3> Optional: Specifies channel names as an array, such as `pipelines-1.14` or `latest`. -<4> Optional: Specifies the version or version range, such as `1.14.0`, `1.14.x`, or `>=1.16`, of the package you want to install or update. For more information, see "Example custom resources (CRs) that specify a target version" and "Support for version ranges". -<5> Optional: Specifies the upgrade constraint policy. If unspecified, the default setting is `CatalogProvided`. The `CatalogProvided` setting only updates if the new version satisfies the upgrade constraints set by the package author. To force an update or rollback, set the field to `SelfCertified`. For more information, see "Forcing an update or rollback". +where: + +``:: Specifies the namespace where you want the bundle installed, such as `pipelines` or `my-extension`. Extensions are still cluster-scoped and might contain resources that are installed in different namespaces. +``:: Specifies the name of the service account you created to install, update, and manage your extension. +``:: Optional: Specifies channel names as an array, such as `pipelines-1.14` or `latest`. +``:: Optional: Specifies the version or version range, such as `1.14.0`, `1.14.x`, or `>=1.16`, of the package you want to install or update. For more information, see "Example custom resources (CRs) that specify a target version" and "Support for version ranges". +`CatalogProvided`:: Optional: Specifies the upgrade constraint policy. If unspecified, the default setting is `CatalogProvided`. The `CatalogProvided` setting only updates if the new version satisfies the upgrade constraints set by the package author. To force an update or rollback, set the field to `SelfCertified`. For more information, see "Forcing an update or rollback". .Example `pipelines-operator.yaml` CR [source,yaml] diff --git a/modules/olmv1-updating-an-operator.adoc b/modules/olmv1-updating-an-operator.adoc index aa293e47eb56..a6cdc5ed340a 100644 --- a/modules/olmv1-updating-an-operator.adoc +++ b/modules/olmv1-updating-an-operator.adoc @@ -199,9 +199,9 @@ spec: sourceType: Catalog catalog: packageName: openshift-pipelines-operator-rh - version: "1.15.0" <1> + version: "1.15.0" ---- -<1> Update the version from `1.14.x` to `1.15.0` +Update the version from `1.14.x` to `1.15.0`. ** If you want to define a range of acceptable update versions, edit your CR similar to the following example: + @@ -220,9 +220,9 @@ spec: sourceType: Catalog catalog: packageName: openshift-pipelines-operator-rh - version: ">1.15, <1.17" <1> + version: ">1.15, <1.17" ---- -<1> Specifies that the desired version range is greater than version `1.15` and less than `1.17`. For more information, see "Support for version ranges" and "Version comparison strings". +The version range specifies that the desired version is greater than version `1.15` and less than `1.17`. For more information, see "Support for version ranges" and "Version comparison strings". ** If you want to update to the latest version that can be resolved from a channel, edit your CR similar to the following example: + @@ -242,9 +242,9 @@ spec: catalog: packageName: openshift-pipelines-operator-rh channels: - - latest <1> + - latest ---- -<1> Installs the latest release that can be resolved from the specified channel. Updates to the channel are automatically installed. Enter values as an array. +The `latest` channel installs the latest release that can be resolved from the specified channel. Updates to the channel are automatically installed. Enter values as an array. ** If you want to specify a channel and version or version range, edit your CR similar to the following example: + From cc7bf0eec15118a8ec5dfc8f6b898dd0d6594f45 Mon Sep 17 00:00:00 2001 From: Michael Ryan Peter Date: Tue, 11 Nov 2025 17:28:50 -0500 Subject: [PATCH 10/10] Testing /refactor-catalog slash command on the `extension/` dir --- modules/olm-creating-fb-catalog-image.adoc | 61 +++++++++++-------- modules/olm-deprecations-schema.adoc | 25 +++++--- modules/olmv1-about-target-versions.adoc | 12 ++-- modules/olmv1-adding-a-catalog.adoc | 33 +++++----- modules/olmv1-catalog-selection-by-name.adoc | 4 +- .../olmv1-forcing-an-update-or-rollback.adoc | 22 ++++--- .../olmv1-granting-user-access-binding.adoc | 4 +- modules/olmv1-ocp-compat.adoc | 4 +- modules/olmv1-red-hat-catalogs.adoc | 8 +-- 9 files changed, 96 insertions(+), 77 deletions(-) diff --git a/modules/olm-creating-fb-catalog-image.adoc b/modules/olm-creating-fb-catalog-image.adoc index 4cc8b8e873ca..b70f14021e1e 100644 --- a/modules/olm-creating-fb-catalog-image.adoc +++ b/modules/olm-creating-fb-catalog-image.adoc @@ -42,11 +42,11 @@ $ opm generate dockerfile endif::[] ifndef::openshift-origin[] $ opm generate dockerfile \ - -i {registry-image} <1> + -i {registry-image} endif::[] ---- ifndef::openshift-origin[] -<1> Specify the official Red Hat base image by using the `-i` flag, otherwise the Dockerfile uses the default upstream image. +The `-i` flag specifies the official Red Hat base image, otherwise the Dockerfile uses the default upstream image. endif::[] -- + @@ -56,32 +56,37 @@ The Dockerfile must be in the same parent directory as the catalog directory tha .Example directory structure [source,terminal] ---- -. <1> -├── <2> -└── .Dockerfile <3> +. +├── +└── .Dockerfile ---- -<1> Parent directory -<2> Catalog directory -<3> Dockerfile generated by the `opm generate dockerfile` command ++ +* `.` is the parent directory. ++ +* `` is the catalog directory. ++ +* `.Dockerfile` is the Dockerfile generated by the `opm generate dockerfile` command. -- .. Populate the catalog with the package definition for your Operator by running the `opm init` command: + [source,terminal] ---- -$ opm init \ <1> - --default-channel=preview \ <2> - --description=./README.md \ <3> - --icon=./operator-icon.svg \ <4> - --output yaml \ <5> - > /index.yaml <6> ----- -<1> Operator, or package, name -<2> Channel that subscriptions default to if unspecified -<3> Path to the Operator's `README.md` or other documentation -<4> Path to the Operator's icon -<5> Output format: JSON or YAML -<6> Path for creating the catalog configuration file +$ opm init \ + --default-channel=preview \ + --description=./README.md \ + --icon=./operator-icon.svg \ + --output yaml \ + > /index.yaml +---- +where: + +``:: Specifies the Operator, or package, name. +`--default-channel`:: Specifies the channel that subscriptions default to if unspecified. +`--description`:: Specifies the path to the Operator's `README.md` or other documentation. +`--icon`:: Specifies the path to the Operator's icon. +`--output`:: Specifies the output format: JSON or YAML. +`/index.yaml`:: Specifies the path for creating the catalog configuration file. + This command generates an `olm.package` declarative config blob in the specified catalog configuration file. @@ -89,12 +94,14 @@ This command generates an `olm.package` declarative config blob in the specified + [source,terminal] ---- -$ opm render //: \ <1> +$ opm render //: \ --output=yaml \ - >> /index.yaml <2> + >> /index.yaml ---- -<1> Pull spec for the bundle image -<2> Path to the catalog configuration file +where: + +`//:`:: Specifies the pull spec for the bundle image. +`/index.yaml`:: Specifies the path to the catalog configuration file. + [NOTE] ==== @@ -111,9 +118,9 @@ schema: olm.channel package: name: preview entries: - - name: .v0.1.0 <1> + - name: .v0.1.0 ---- -<1> Ensure that you include the period (`.`) after `` but before the `v` in the version. Otherwise, the entry fails to pass the `opm validate` command. +Ensure that you include the period (`.`) after `` but before the `v` in the version. Otherwise, the entry fails to pass the `opm validate` command. . Validate the file-based catalog: diff --git a/modules/olm-deprecations-schema.adoc b/modules/olm-deprecations-schema.adoc index 6338eca0b455..d115f887c05c 100644 --- a/modules/olm-deprecations-schema.adoc +++ b/modules/olm-deprecations-schema.adoc @@ -38,31 +38,36 @@ Each `reference` type has their own requirements, as detailed in the following e [source,yaml] ---- schema: olm.deprecations -package: my-operator <1> +package: my-operator entries: - reference: - schema: olm.package <2> - message: | <3> + schema: olm.package + message: | The 'my-operator' package is end of life. Please use the 'my-operator-new' package for support. - reference: schema: olm.channel - name: alpha <4> + name: alpha message: | The 'alpha' channel is no longer supported. Please switch to the 'stable' channel. - reference: schema: olm.bundle - name: my-operator.v1.68.0 <5> + name: my-operator.v1.68.0 message: | my-operator.v1.68.0 is deprecated. Uninstall my-operator.v1.68.0 and install my-operator.v1.72.0 for support. ---- -<1> Each deprecation schema must have a `package` value, and that package reference must be unique across the catalog. There must not be an associated `name` field. -<2> The `olm.package` schema must not include a `name` field, because it is determined by the `package` field defined earlier in the schema. -<3> All `message` fields, for any `reference` type, must be a non-zero length and represented as an opaque text blob. -<4> The `name` field for the `olm.channel` schema is required. -<5> The `name` field for the `olm.bundle` schema is required. ++ +* Each deprecation schema must have a `package` value, and that package reference must be unique across the catalog. There must not be an associated `name` field. ++ +* The `olm.package` schema must not include a `name` field, because it is determined by the `package` field defined earlier in the schema. ++ +* All `message` fields, for any `reference` type, must be a non-zero length and represented as an opaque text blob. ++ +* The `name` field for the `olm.channel` schema is required. ++ +* The `name` field for the `olm.bundle` schema is required. ==== [NOTE] diff --git a/modules/olmv1-about-target-versions.adoc b/modules/olmv1-about-target-versions.adoc index aaf4bf5b5c0f..545164c138cb 100644 --- a/modules/olmv1-about-target-versions.adoc +++ b/modules/olmv1-about-target-versions.adoc @@ -35,9 +35,9 @@ apiVersion: olm.operatorframework.io/v1 catalog: packageName: channels: - - latest <1> + - latest ---- -<1> Optional: Installs the latest release that can be resolved from the specified channel. Updates to the channel are automatically installed. Specify the value of the `channels` parameter as an array. +The optional `channels` parameter is an array value that installs the latest release that can be resolved from the specified channel. Updates to the channel are automatically installed. If you specify the Operator or extension's target version in the CR, {olmv1} installs the specified version. When the target version is specified in the CR, {olmv1} does not change the target version when updates are published to the catalog. @@ -58,9 +58,9 @@ apiVersion: olm.operatorframework.io/v1 sourceType: Catalog catalog: packageName: - version: "1.11.1" <1> + version: "1.11.1" ---- -<1> Optional: Specifies the target version. If you want to update the version of the Operator or extension that is installed, you must manually update this field the CR to the desired target version. +The optional `version` parameter specifies the target version. If you want to update the version of the Operator or extension that is installed, you must manually update this field in the CR to the desired target version. If you want to define a range of acceptable versions for an Operator or extension, you can specify a version range by using a comparison string. When you specify a version range, {olmv1} installs the latest version of an Operator or extension that can be resolved by the Operator Controller. @@ -79,9 +79,9 @@ apiVersion: olm.operatorframework.io/v1 sourceType: Catalog catalog: packageName: - version: ">1.11.1" <1> + version: ">1.11.1" ---- -<1> Optional: Specifies that the desired version range is greater than version `1.11.1`. For more information, see "Support for version ranges". +The optional `version` parameter specifies that the desired version range is greater than version `1.11.1`. For more information, see "Support for version ranges". After you create or update a CR, apply the configuration file by running the following command: diff --git a/modules/olmv1-adding-a-catalog.adoc b/modules/olmv1-adding-a-catalog.adoc index cc7814ccb1f6..504ae60bcf5d 100644 --- a/modules/olmv1-adding-a-catalog.adoc +++ b/modules/olmv1-adding-a-catalog.adoc @@ -19,19 +19,21 @@ To add a catalog to a cluster for {olmv1-first} usage, create a `ClusterCatalog` apiVersion: olm.operatorframework.io/v1 kind: ClusterCatalog metadata: - name: my-redhat-operators <1> + name: my-redhat-operators spec: - priority: 1000 <2> + priority: 1000 source: image: - pollIntervalMinutes: 10 <3> - ref: registry.redhat.io/redhat/community-operator-index:v{product-version} <4> + pollIntervalMinutes: 10 + ref: registry.redhat.io/redhat/community-operator-index:v{product-version} type: Image ---- -<1> The catalog is automatically labeled with the value of the `metadata.name` field when it is applied to the cluster. For more information about labels and catalog selection, see "Catalog content resolution". -<2> Optional: Specify the priority of the catalog in relation to the other catalogs on the cluster. For more information, see "Catalog selection by priority". -<3> Specify the interval in minutes for polling the remote registry for newer image digests. To disable polling, do not set the field. -<4> Specify the catalog image in the `spec.source.image.ref` field. +where: + +`metadata.name`:: Specifies the catalog name. The catalog is automatically labeled with the value of the `metadata.name` field when it is applied to the cluster. For more information about labels and catalog selection, see "Catalog content resolution". +`priority`:: Optional: Specifies the priority of the catalog in relation to the other catalogs on the cluster. For more information, see "Catalog selection by priority". +`pollIntervalMinutes`:: Specifies the interval in minutes for polling the remote registry for newer image digests. To disable polling, do not set the field. +`spec.source.image.ref`:: Specifies the catalog image. . Add the catalog to your cluster by running the following command: + @@ -99,12 +101,12 @@ Spec: Poll Interval Minutes: 10 Ref: registry.redhat.io/redhat/community-operator-index:v{product-version} Type: Image -Status: <1> +Status: Conditions: Last Transition Time: 2025-02-18T20:29:00Z Message: Successfully unpacked and stored content from resolved source Observed Generation: 1 - Reason: Succeeded <2> + Reason: Succeeded Status: True Type: Progressing Last Transition Time: 2025-02-18T20:29:00Z @@ -116,12 +118,15 @@ Status: <1> Last Unpacked: 2025-02-18T20:28:59Z Resolved Source: Image: - Ref: registry.redhat.io/redhat/community-operator-index@sha256:11627ea6fdd06b8092df815076e03cae9b7cede8b353c0b461328842d02896c5 <3> + Ref: registry.redhat.io/redhat/community-operator-index@sha256:11627ea6fdd06b8092df815076e03cae9b7cede8b353c0b461328842d02896c5 Type: Image Urls: Base: https://catalogd-service.openshift-catalogd.svc/catalogs/my-redhat-operators Events: ---- -<1> Describes the status of the catalog. -<2> Displays the reason the catalog is in the current state. -<3> Displays the image reference of the catalog. ++ +* `Status` describes the status of the catalog. ++ +* `Reason` displays the reason the catalog is in the current state. ++ +* `Resolved Source.Image.Ref` displays the image reference of the catalog. diff --git a/modules/olmv1-catalog-selection-by-name.adoc b/modules/olmv1-catalog-selection-by-name.adoc index b3ecfe7e6f06..eff0b8fd53f3 100644 --- a/modules/olmv1-catalog-selection-by-name.adoc +++ b/modules/olmv1-catalog-selection-by-name.adoc @@ -16,10 +16,10 @@ kind: ClusterExtension metadata: name: labels: - olm.operatorframework.io/metadata.name: <1> + olm.operatorframework.io/metadata.name: ... ---- -<1> A label derived from the `metadata.name` field and automatically added when the catalog is applied. +The `olm.operatorframework.io/metadata.name` label is derived from the `metadata.name` field and automatically added when the catalog is applied. The following example resolves the `-operator` package from a catalog with the `openshift-redhat-operators` label: diff --git a/modules/olmv1-forcing-an-update-or-rollback.adoc b/modules/olmv1-forcing-an-update-or-rollback.adoc index 0682ac058358..1c98e78a6685 100644 --- a/modules/olmv1-forcing-an-update-or-rollback.adoc +++ b/modules/olmv1-forcing-an-update-or-rollback.adoc @@ -32,23 +32,25 @@ apiVersion: olm.operatorframework.io/v1 metadata: name: spec: - namespace: <1> + namespace: serviceAccount: - name: <2> + name: source: sourceType: Catalog catalog: packageName: channels: - - <3> - version: <4> - upgradeConstraintPolicy: SelfCertified <5> + - + version: + upgradeConstraintPolicy: SelfCertified ---- -<1> Specifies the namespace where you want the bundle installed, such as `pipelines` or `my-extension`. Extensions are still cluster-scoped and might contain resources that are installed in different namespaces. -<2> Specifies the name of the service account you created to install, update, and manage your extension. -<3> Optional: Specifies channel names as an array, such as `pipelines-1.14` or `latest`. -<4> Optional: Specifies the version or version range, such as `1.14.0`, `1.14.x`, or `>=1.16`, of the package you want to install or update. For more information, see "Example custom resources (CRs) that specify a target version" and "Support for version ranges". -<5> Optional: Specifies the upgrade constraint policy. To force an update or rollback, set the field to `SelfCertified`. If unspecified, the default setting is `CatalogProvided`. The `CatalogProvided` setting only updates if the new version satisfies the upgrade constraints set by the package author. +where: + +`namespace`:: Specifies the namespace where you want the bundle installed, such as `pipelines` or `my-extension`. Extensions are still cluster-scoped and might contain resources that are installed in different namespaces. +`serviceAccount.name`:: Specifies the name of the service account you created to install, update, and manage your extension. +`channels`:: Optional: Specifies channel names as an array, such as `pipelines-1.14` or `latest`. +`version`:: Optional: Specifies the version or version range, such as `1.14.0`, `1.14.x`, or `>=1.16`, of the package you want to install or update. For more information, see "Example custom resources (CRs) that specify a target version" and "Support for version ranges". +`upgradeConstraintPolicy`:: Optional: Specifies the upgrade constraint policy. To force an update or rollback, set the field to `SelfCertified`. If unspecified, the default setting is `CatalogProvided`. The `CatalogProvided` setting only updates if the new version satisfies the upgrade constraints set by the package author. . Apply the changes to your Operator or extensions CR by running the following command: + diff --git a/modules/olmv1-granting-user-access-binding.adoc b/modules/olmv1-granting-user-access-binding.adoc index 2fb7cb789f13..21e7fdbedd7c 100644 --- a/modules/olmv1-granting-user-access-binding.adoc +++ b/modules/olmv1-granting-user-access-binding.adoc @@ -75,9 +75,9 @@ rules: resources: - verbs: - - '*' <1> + - '*' ---- -<1> Setting a wildcard (`*`) in `verbs` allows all actions on the specified resources. +Setting a wildcard (`*`) in `verbs` allows all actions on the specified resources. .. Create the cluster roles by running the following command for any YAML files you created: + diff --git a/modules/olmv1-ocp-compat.adoc b/modules/olmv1-ocp-compat.adoc index e53b03bfcf8e..8faa474e1cdd 100644 --- a/modules/olmv1-ocp-compat.adoc +++ b/modules/olmv1-ocp-compat.adoc @@ -21,9 +21,9 @@ apiVersion: operators.coreos.com/v1alpha1 kind: ClusterServiceVersion metadata: annotations: - "olm.properties": '[{"type": "olm.maxOpenShiftVersion", "value": ""}]' <1> + "olm.properties": '[{"type": "olm.maxOpenShiftVersion", "value": ""}]' ---- -<1> Specifies the latest minor version of {product-title} (4.y) that an Operator is compatible with. For example, setting `value` to `{product-version}` prevents cluster updates to minor versions later than {product-version} when this bundle is installed on a cluster. +The `olm.maxOpenShiftVersion` annotation specifies the latest minor version of {product-title} (4.y) that an Operator is compatible with. For example, setting `value` to `{product-version}` prevents cluster updates to minor versions later than {product-version} when this bundle is installed on a cluster. + If the `olm.maxOpenShiftVersion` field is omitted, cluster updates are not blocked by this Operator. diff --git a/modules/olmv1-red-hat-catalogs.adoc b/modules/olmv1-red-hat-catalogs.adoc index ef498c1e8fd2..c4553db086ca 100644 --- a/modules/olmv1-red-hat-catalogs.adoc +++ b/modules/olmv1-red-hat-catalogs.adoc @@ -22,11 +22,11 @@ spec: priority: -100 source: image: - pollIntervalMinutes: <1> + pollIntervalMinutes: ref: registry.redhat.io/redhat/redhat-operator-index:v{product-version} type: Image ---- -<1> Specify the interval in minutes for polling the remote registry for newer image digests. To disable polling, do not set the field. +The `pollIntervalMinutes` parameter specifies the interval in minutes for polling the remote registry for newer image digests. To disable polling, do not set the field. .Certified Operators catalog [source,yaml,subs="attributes+"] @@ -82,6 +82,6 @@ The following command adds a catalog to your cluster: .Command syntax [source,terminal] ---- -$ oc apply -f .yaml <1> +$ oc apply -f .yaml ---- -<1> Specifies the catalog CR, such as `my-catalog.yaml`. +The `.yaml` parameter specifies the catalog CR, such as `my-catalog.yaml`.