diff --git a/images/tests/Dockerfile b/images/tests/Dockerfile index 743b8232..7b871a7a 100644 --- a/images/tests/Dockerfile +++ b/images/tests/Dockerfile @@ -35,12 +35,20 @@ RUN poetry install --no-root --with test && rm -rf $POETRY_CACHE_DIR # Runtime stage FROM python:3.12-slim -# Install system dependencies for runtime +# Install system dependencies for runtime including Chrome for Selenium tests RUN apt-get update && \ apt-get install -y --no-install-recommends \ curl \ ca-certificates \ - && rm -rf /var/lib/apt/lists/* + wget \ + gnupg \ + && wget -q -O /tmp/google-chrome-key.pub https://dl-ssl.google.com/linux/linux_signing_key.pub \ + && gpg --dearmor -o /usr/share/keyrings/google-chrome-keyring.gpg /tmp/google-chrome-key.pub \ + && echo "deb [arch=amd64 signed-by=/usr/share/keyrings/google-chrome-keyring.gpg] http://dl.google.com/linux/chrome/deb/ stable main" > /etc/apt/sources.list.d/google-chrome.list \ + && apt-get update \ + && apt-get install -y --no-install-recommends \ + google-chrome-stable \ + && rm -rf /var/lib/apt/lists/* /tmp/google-chrome-key.pub # Install OpenShift CLI (oc) RUN curl -L https://mirror.openshift.com/pub/openshift-v4/clients/oc/latest/linux/oc.tar.gz | \ diff --git a/images/tests/run-tests.sh b/images/tests/run-tests.sh index 25bc0585..6648bb6a 100644 --- a/images/tests/run-tests.sh +++ b/images/tests/run-tests.sh @@ -387,6 +387,20 @@ cp "${TEMP_KUBECONFIG}" ~/.kube/config || { echo "Successfully logged in with TEST_USER" +# ============================================================================ +# Get RHOAI Dashboard URL for UI Tests +# ============================================================================ +echo "Retrieving RHOAI Dashboard URL..." +ODH_DASHBOARD_URL=$(oc get consolelink rhodslink -o jsonpath='{.spec.href}' 2>/dev/null) + +if [ -z "$ODH_DASHBOARD_URL" ]; then + echo "WARNING: Failed to retrieve Dashboard URL from consolelink rhodslink" + echo " UI tests will be skipped or may fail" +else + echo "Dashboard URL: $ODH_DASHBOARD_URL" + export ODH_DASHBOARD_URL +fi + # ============================================================================ # Run Tests # ============================================================================ diff --git a/poetry.lock b/poetry.lock index 0e2d4eac..6962efb3 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.1.4 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand. [[package]] name = "aiohappyeyeballs" @@ -2571,6 +2571,21 @@ files = [ opentelemetry-api = "1.34.1" typing-extensions = ">=4.5.0" +[[package]] +name = "outcome" +version = "1.3.0.post0" +description = "Capture the outcome of Python function calls." +optional = false +python-versions = ">=3.7" +groups = ["test"] +files = [ + {file = "outcome-1.3.0.post0-py2.py3-none-any.whl", hash = "sha256:e771c5ce06d1415e356078d3bdd68523f284b4ce5419828922b6871e65eda82b"}, + {file = "outcome-1.3.0.post0.tar.gz", hash = "sha256:9dcf02e65f2971b80047b377468e72a268e15c0af3cf1238e6ff14f7f91143b8"}, +] + +[package.dependencies] +attrs = ">=19.2.0" + [[package]] name = "overrides" version = "7.7.0" @@ -3362,6 +3377,19 @@ cffi = ">=1.4.1" docs = ["sphinx (>=1.6.5)", "sphinx-rtd-theme"] tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"] +[[package]] +name = "pysocks" +version = "1.7.1" +description = "A Python SOCKS client module. See https://github.com/Anorov/PySocks for more information." +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +groups = ["test"] +files = [ + {file = "PySocks-1.7.1-py27-none-any.whl", hash = "sha256:08e69f092cc6dbe92a0fdd16eeb9b9ffbc13cadfe5ca4c7bd92ffb078b293299"}, + {file = "PySocks-1.7.1-py3-none-any.whl", hash = "sha256:2725bd0a9925919b9b51739eea5f9e2bae91e83288108a9ad338b2e3a4435ee5"}, + {file = "PySocks-1.7.1.tar.gz", hash = "sha256:3f8804571ebe159c380ac6de37643bb4685970655d3bba243530d6558b799aa0"}, +] + [[package]] name = "pytest" version = "7.4.0" @@ -3436,6 +3464,21 @@ type = "legacy" url = "https://test.pypi.org/simple" reference = "testpypi" +[[package]] +name = "python-dotenv" +version = "1.2.1" +description = "Read key-value pairs from a .env file and set them as environment variables" +optional = false +python-versions = ">=3.9" +groups = ["test"] +files = [ + {file = "python_dotenv-1.2.1-py3-none-any.whl", hash = "sha256:b81ee9561e9ca4004139c6cbba3a238c32b03e4894671e181b671e8cb8425d61"}, + {file = "python_dotenv-1.2.1.tar.gz", hash = "sha256:42667e897e16ab0d66954af0e60a9caa94f0fd4ecf3aaf6d2d260eec1aa36ad6"}, +] + +[package.extras] +cli = ["click (>=5.0)"] + [[package]] name = "python-json-logger" version = "3.3.0" @@ -4021,6 +4064,26 @@ files = [ [package.dependencies] pyasn1 = ">=0.1.3" +[[package]] +name = "selenium" +version = "4.27.1" +description = "Official Python bindings for Selenium WebDriver" +optional = false +python-versions = ">=3.8" +groups = ["test"] +files = [ + {file = "selenium-4.27.1-py3-none-any.whl", hash = "sha256:b89b1f62b5cfe8025868556fe82360d6b649d464f75d2655cb966c8f8447ea18"}, + {file = "selenium-4.27.1.tar.gz", hash = "sha256:5296c425a75ff1b44d0d5199042b36a6d1ef76c04fb775b97b40be739a9caae2"}, +] + +[package.dependencies] +certifi = ">=2021.10.8" +trio = ">=0.17,<1.0" +trio-websocket = ">=0.9,<1.0" +typing_extensions = ">=4.9,<5.0" +urllib3 = {version = ">=1.26,<3", extras = ["socks"]} +websocket-client = ">=1.8,<2.0" + [[package]] name = "send2trash" version = "1.8.3" @@ -4121,6 +4184,18 @@ files = [ {file = "snowballstemmer-3.0.1.tar.gz", hash = "sha256:6d5eeeec8e9f84d4d56b847692bacf79bc2c8e90c7f80ca4444ff8b6f2e52895"}, ] +[[package]] +name = "sortedcontainers" +version = "2.4.0" +description = "Sorted Containers -- Sorted List, Sorted Dict, Sorted Set" +optional = false +python-versions = "*" +groups = ["test"] +files = [ + {file = "sortedcontainers-2.4.0-py2.py3-none-any.whl", hash = "sha256:a163dcaede0f1c021485e957a39245190e74249897e2ae4b2aa38595db237ee0"}, + {file = "sortedcontainers-2.4.0.tar.gz", hash = "sha256:25caa5a06cc30b6b83d11423433f65d1f9d76c4c6a0c90e3379eaa43b9bfdb88"}, +] + [[package]] name = "soupsieve" version = "2.7" @@ -4402,6 +4477,43 @@ files = [ docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0,<8.2)", "pytest-mock", "pytest-mypy-testing"] +[[package]] +name = "trio" +version = "0.32.0" +description = "A friendly Python library for async concurrency and I/O" +optional = false +python-versions = ">=3.10" +groups = ["test"] +files = [ + {file = "trio-0.32.0-py3-none-any.whl", hash = "sha256:4ab65984ef8370b79a76659ec87aa3a30c5c7c83ff250b4de88c29a8ab6123c5"}, + {file = "trio-0.32.0.tar.gz", hash = "sha256:150f29ec923bcd51231e1d4c71c7006e65247d68759dd1c19af4ea815a25806b"}, +] + +[package.dependencies] +attrs = ">=23.2.0" +cffi = {version = ">=1.14", markers = "os_name == \"nt\" and implementation_name != \"pypy\""} +idna = "*" +outcome = "*" +sniffio = ">=1.3.0" +sortedcontainers = "*" + +[[package]] +name = "trio-websocket" +version = "0.12.2" +description = "WebSocket library for Trio" +optional = false +python-versions = ">=3.8" +groups = ["test"] +files = [ + {file = "trio_websocket-0.12.2-py3-none-any.whl", hash = "sha256:df605665f1db533f4a386c94525870851096a223adcb97f72a07e8b4beba45b6"}, + {file = "trio_websocket-0.12.2.tar.gz", hash = "sha256:22c72c436f3d1e264d0910a3951934798dcc5b00ae56fc4ee079d46c7cf20fae"}, +] + +[package.dependencies] +outcome = ">=1.2.0" +trio = ">=0.11" +wsproto = ">=0.14" + [[package]] name = "types-python-dateutil" version = "2.9.0.20250708" @@ -4480,6 +4592,9 @@ files = [ {file = "urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760"}, ] +[package.dependencies] +pysocks = {version = ">=1.5.6,<1.5.7 || >1.5.7,<2.0", optional = true, markers = "extra == \"socks\""} + [package.extras] brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""] h2 = ["h2 (>=4,<5)"] @@ -4531,6 +4646,23 @@ files = [ {file = "webcolors-24.11.1.tar.gz", hash = "sha256:ecb3d768f32202af770477b8b65f318fa4f566c22948673a977b00d589dd80f6"}, ] +[[package]] +name = "webdriver-manager" +version = "4.0.2" +description = "Library provides the way to automatically manage drivers for different browsers" +optional = false +python-versions = ">=3.7" +groups = ["test"] +files = [ + {file = "webdriver_manager-4.0.2-py2.py3-none-any.whl", hash = "sha256:75908d92ecc45ff2b9953614459c633db8f9aa1ff30181cefe8696e312908129"}, + {file = "webdriver_manager-4.0.2.tar.gz", hash = "sha256:efedf428f92fd6d5c924a0d054e6d1322dd77aab790e834ee767af392b35590f"}, +] + +[package.dependencies] +packaging = "*" +python-dotenv = "*" +requests = "*" + [[package]] name = "webencodings" version = "0.5.1" @@ -4661,6 +4793,21 @@ files = [ {file = "wrapt-1.17.2.tar.gz", hash = "sha256:41388e9d4d1522446fe79d3213196bd9e3b301a336965b9e27ca2788ebd122f3"}, ] +[[package]] +name = "wsproto" +version = "1.3.1" +description = "Pure-Python WebSocket protocol implementation" +optional = false +python-versions = ">=3.10" +groups = ["test"] +files = [ + {file = "wsproto-1.3.1-py3-none-any.whl", hash = "sha256:297ce79322989c0d286cc158681641cd18bc7632dfb38cf4054696a89179b993"}, + {file = "wsproto-1.3.1.tar.gz", hash = "sha256:81529992325c28f0d9b86ca66fc973da96eb80ab53410249ce2e502749c7723c"}, +] + +[package.dependencies] +h11 = ">=0.16.0,<1" + [[package]] name = "yarl" version = "1.20.1" @@ -4803,4 +4950,4 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.1" python-versions = "^3.11" -content-hash = "1a3968dbde8f4356b4d93b17f5bcf75f2bc38587553273742de05d9f0f6ee87c" +content-hash = "433244ef9a8db3bfa6b9d0c2bbd4058256b76064aa88101d684502a2342b0c39" diff --git a/pyproject.toml b/pyproject.toml index 15edb00c..be93f16b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -66,6 +66,8 @@ coverage = "7.6.4" pytest-mock = "3.11.1" pytest-timeout = "2.3.1" jupyterlab = "4.3.1" +selenium = "4.27.1" +webdriver-manager = "4.0.2" [tool.poetry.group.dev.dependencies] @@ -83,7 +85,8 @@ markers = [ "smoke: Smoke tests - quick validation tests", "tier1: Tier1 tests - standard test suite", "pre_upgrade: Tests to run before upgrade", - "post_upgrade: Tests to run after upgrade" + "post_upgrade: Tests to run after upgrade", + "ui: UI tests requiring browser automation" ] addopts = "--timeout=900 --ignore=src/codeflare_sdk/vendored" testpaths = ["src/codeflare_sdk"] diff --git a/tests/ui/README.md b/tests/ui/README.md new file mode 100644 index 00000000..3f7f7c1c --- /dev/null +++ b/tests/ui/README.md @@ -0,0 +1,226 @@ +# RHOAI Dashboard UI Tests + +This directory contains UI tests for the RHOAI Dashboard, specifically targeting the Distributed Workloads page to verify Ray cluster visibility and functionality. + +## Overview + +The UI tests use Selenium WebDriver with Chrome (headless) to automate browser interactions with the RHOAI Dashboard. They are designed to work in conjunction with the upgrade tests to verify that Ray clusters created before an upgrade remain visible and functional after the upgrade. + +## Test Structure + +``` +tests/ui/ +├── conftest.py # Pytest fixtures for Selenium setup +├── pages/ +│ └── distributed_workloads_page.py # Page Object Model for Distributed Workloads page +└── README.md # This file + +tests/upgrade/ +├── 01_raycluster_sdk_upgrade_test.py # Pre/post upgrade backend tests (runs first) +├── 02_dashboard_ui_upgrade_test.py # Pre/post upgrade UI tests (runs second) +└── conftest.py # Imports UI fixtures for upgrade tests +``` + +**Note**: Test files are prefixed with numbers (`01_`, `02_`) to ensure proper execution order: +1. First, the Ray cluster is created (`01_raycluster_sdk_upgrade_test.py`) +2. Then, the UI tests verify the cluster appears in the dashboard (`02_dashboard_ui_upgrade_test.py`) + +## Prerequisites + +### Python Dependencies + +The UI tests require the following dependencies (already added to `pyproject.toml`): + +- `selenium >= 4.27.1` - Browser automation framework +- `webdriver-manager >= 4.0.2` - Automatic ChromeDriver management + +Install dependencies: +```bash +poetry install --with test +``` + +### System Requirements + +- **Chrome or Chromium browser** (required for headless execution) + - The Docker image includes Google Chrome Stable + - If running locally, ensure Chrome is installed + - UI tests will be skipped if Chrome is not available +- OpenShift CLI (`oc`) installed and configured +- Access to RHOAI Dashboard + +### Environment Variables + +The tests require the following environment variables: + +- `TEST_USER_USERNAME` - Username for RHOAI Dashboard login +- `TEST_USER_PASSWORD` - Password for RHOAI Dashboard login +- `ODH_DASHBOARD_URL` (optional) - Dashboard URL (auto-detected via `oc get consolelink rhodslink` if not set) +- `OPENSHIFT_IDP_NAME` (optional) - OpenShift identity provider name (e.g., "ldap", "htpasswd"). If not set, the fixture will try to auto-detect based on username pattern + +## Running the Tests + +### Run Pre-Upgrade UI Tests + +```bash +# Run all pre-upgrade tests including UI tests +poetry run pytest tests/upgrade/ -m pre_upgrade -v + +# Run only pre-upgrade UI tests +poetry run pytest tests/upgrade/ -m "pre_upgrade and ui" -v +``` + +### Run Post-Upgrade UI Tests + +```bash +# Run all post-upgrade tests including UI tests +poetry run pytest tests/upgrade/ -m post_upgrade -v + +# Run only post-upgrade UI tests +poetry run pytest tests/upgrade/ -m "post_upgrade and ui" -v +``` + +### Run All Upgrade Tests (Pre and Post) + +```bash +poetry run pytest tests/upgrade/ -m "pre_upgrade or post_upgrade" -v +``` + +### Skip UI Tests + +If you want to run upgrade tests but skip UI tests (e.g., if browser is not available): + +```bash +poetry run pytest tests/upgrade/ -m "pre_upgrade and not ui" -v +``` + +## Test Flow + +### Pre-Upgrade (`TestDistributedWorkloadsUIPreUpgrade`) + +1. Login to RHOAI Dashboard +2. Navigate to Distributed Workloads page +3. Select the test namespace (`test-ns-rayupgrade`) +4. Verify cluster is in "Running" state +5. Check Project Metrics tab shows resource metrics +6. Check Workload Status tab shows cluster with Running status + +### Post-Upgrade (`TestDistributedWorkloadsUIPostUpgrade`) + +1. Login to RHOAI Dashboard +2. Navigate to Distributed Workloads page +3. Select the test namespace (`test-ns-rayupgrade`) +4. Verify cluster is still in "Running" state after upgrade +5. Check Project Metrics tab still shows resource metrics +6. Check Workload Status tab still shows cluster with Running status + +## Page Object Model + +The tests use the Page Object Model (POM) design pattern to separate test logic from page interactions. The `DistributedWorkloadsPage` class encapsulates all interactions with the Distributed Workloads page. + +### Key Methods + +- `navigate()` - Navigate to the Distributed Workloads page +- `select_project(project_name)` - Select a project from the dropdown +- `verify_cluster_running()` - Check if any cluster shows "Running" status +- `click_project_metrics_tab()` - Switch to Project Metrics tab +- `verify_metrics_visible()` - Verify resource metrics are displayed +- `click_workload_status_tab()` - Switch to Workload Status tab +- `verify_cluster_in_workload_list(cluster_name)` - Verify cluster appears in list with Running status + +## Debugging + +### Enable Screenshots on Failure + +Screenshots are automatically saved to `/tmp/login_failure.png` if login fails. To capture screenshots on other failures, you can add: + +```python +try: + driver.save_screenshot("/tmp/test_failure.png") +except: + pass +``` + +### Run in Non-Headless Mode + +To see the browser during test execution (useful for debugging), modify `tests/ui/conftest.py`: + +```python +# Comment out this line: +# chrome_options.add_argument("--headless") +``` + +### Verbose Logging + +All page interactions print status messages. Run tests with `-s` flag to see them: + +```bash +poetry run pytest tests/upgrade/dashboard_ui_upgrade_test.py -m pre_upgrade -v -s +``` + +## Troubleshooting + +### ChromeDriver Issues + +If you encounter ChromeDriver compatibility issues: + +```bash +# Clear webdriver-manager cache +rm -rf ~/.wdm/ + +# Or manually specify ChromeDriver version +# Edit conftest.py and modify: +service = Service(ChromeDriverManager(version="specific_version").install()) +``` + +### Login Issues + +- Verify `TEST_USER_USERNAME` and `TEST_USER_PASSWORD` are set +- Check that the user has access to the RHOAI Dashboard +- Ensure the cluster's OAuth is properly configured + +**Identity Provider (IDP) Selection**: +- The fixture automatically tries to select the correct IDP based on your username + - Usernames containing "ldap" → selects LDAP IDP + - Usernames containing "htpasswd" → selects htpasswd IDP +- If auto-detection fails, set `OPENSHIFT_IDP_NAME` environment variable: + ```bash + export OPENSHIFT_IDP_NAME="ldap" # or "htpasswd", "kube:admin", etc. + ``` +- Check screenshot at `/tmp/login_failure.png` to see available IDPs + +### Dashboard URL Not Found + +If `oc get consolelink rhodslink` fails: + +```bash +# Manually check available consolelinks +oc get consolelink + +# Set URL manually +export ODH_DASHBOARD_URL="https://your-dashboard-url" +``` + +### Timeout Issues + +If elements are not found within the default 30-second timeout, you can adjust it: + +```python +dw_page = DistributedWorkloadsPage(driver, timeout=60) +``` + +## Integration with CI/CD + +The UI tests are designed to run in the same container as the other tests. The `run-tests.sh` script automatically: + +1. Retrieves the Dashboard URL via `oc get consolelink rhodslink` +2. Uses the same `TEST_USER_USERNAME` and `TEST_USER_PASSWORD` credentials +3. Runs UI tests alongside other upgrade tests when appropriate markers are specified + +## Future Enhancements + +- Add video recording of test execution +- Implement retry logic for flaky UI interactions +- Add cross-browser testing (Firefox, Edge) +- Expand coverage to other RHOAI Dashboard pages +- Add performance metrics collection + diff --git a/tests/ui/__init__.py b/tests/ui/__init__.py new file mode 100644 index 00000000..125905e4 --- /dev/null +++ b/tests/ui/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2024 IBM, Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/tests/ui/conftest.py b/tests/ui/conftest.py new file mode 100644 index 00000000..6ae8fd37 --- /dev/null +++ b/tests/ui/conftest.py @@ -0,0 +1,349 @@ +# Copyright 2024 IBM, Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +import os +from selenium import webdriver +from selenium.webdriver.common.by import By +from selenium.webdriver.support.ui import WebDriverWait +from selenium.webdriver.support import expected_conditions as EC +from selenium.webdriver.chrome.options import Options +from selenium.webdriver.chrome.service import Service +from webdriver_manager.chrome import ChromeDriverManager + + +@pytest.fixture(scope="class") +def selenium_driver(request): + """Setup Selenium WebDriver for UI tests""" + chrome_options = Options() + chrome_options.add_argument("--headless") + chrome_options.add_argument("--no-sandbox") + chrome_options.add_argument("--disable-dev-shm-usage") + chrome_options.add_argument("--ignore-certificate-errors") + chrome_options.add_argument("--window-size=1920,1080") + chrome_options.add_argument("--disable-gpu") + + try: + # Use webdriver-manager to automatically manage chromedriver + service = Service(ChromeDriverManager().install()) + driver = webdriver.Chrome(service=service, options=chrome_options) + driver.implicitly_wait(10) + + # Make driver available to the test class + if request.cls is not None: + request.cls.driver = driver + + yield driver + + # Cleanup + driver.quit() + except Exception as e: + pytest.skip( + f"Chrome/ChromeDriver not available, skipping UI test: {e}\n" + "To run UI tests, ensure Chrome is installed in the container." + ) + + +@pytest.fixture(scope="class") +def dashboard_url(): + """Get RHOAI Dashboard URL from environment or oc command""" + # First check if URL is provided as environment variable + url = os.getenv("ODH_DASHBOARD_URL") + + if url: + print(f"Using Dashboard URL from environment: {url}") + return url + + # If not provided, try to get it from oc command + try: + import subprocess + + result = subprocess.run( + ["oc", "get", "consolelink", "rhodslink", "-o", "jsonpath='{.spec.href}'"], + capture_output=True, + text=True, + check=True, + ) + url = result.stdout.strip().strip("'") + print(f"Retrieved Dashboard URL from oc command: {url}") + return url + except subprocess.CalledProcessError as e: + print(f"Failed to get Dashboard URL from oc command: {e}") + raise RuntimeError( + "ODH_DASHBOARD_URL not set and failed to retrieve from oc command" + ) + except FileNotFoundError: + raise RuntimeError( + "oc command not found. Please ensure OpenShift CLI is installed or set ODH_DASHBOARD_URL environment variable" + ) + + +@pytest.fixture(scope="class") +def test_credentials(): + """Get test user credentials from environment""" + username = os.getenv("TEST_USER_USERNAME") + password = os.getenv("TEST_USER_PASSWORD") + + if not username or not password: + raise RuntimeError( + "TEST_USER_USERNAME and TEST_USER_PASSWORD must be set in environment" + ) + + return {"username": username, "password": password} + + +@pytest.fixture(scope="class") +def login_to_dashboard(selenium_driver, dashboard_url, test_credentials): + """Login to RHOAI Dashboard""" + driver = selenium_driver + wait = WebDriverWait(driver, 30) + + print(f"Navigating to dashboard at: {dashboard_url}") + driver.get(dashboard_url) + + # Give page time to load + import time + time.sleep(3) + + try: + print(f"Current URL after navigation: {driver.current_url}") + print(f"Page title: {driver.title}") + + # First, check if we're already on the dashboard (no login required) + try: + # Try multiple possible dashboard indicators + dashboard_indicators = [ + (By.XPATH, "//h1[contains(text(), 'Applications')]"), + (By.XPATH, "//*[contains(text(), 'Data Science')]"), + (By.XPATH, "//a[contains(@href, 'distributed-workloads')]"), + (By.CSS_SELECTOR, "[data-id='distributed-workloads']"), + # Red Hat OpenShift AI specific indicators + (By.XPATH, "//title[contains(text(), 'Red Hat OpenShift AI')]"), + (By.XPATH, "//a[contains(@href, 'applications')]"), + (By.CSS_SELECTOR, "nav[aria-label='Global navigation']"), + (By.CSS_SELECTOR, "[class*='odh-dashboard']"), + (By.CSS_SELECTOR, "[class*='app-launcher']"), + ] + + for locator in dashboard_indicators: + try: + element = driver.find_element(*locator) + if element.is_displayed(): + print(f"Already on dashboard, no login required (found: {locator})") + return driver + except: + continue + except: + pass + + # Not on dashboard, try to login + print("Dashboard not found, attempting login...") + + # First, check if we need to select an identity provider (OpenShift OAuth page) + # This page typically shows buttons like "htpasswd", "ldap", etc. + try: + print("Checking for identity provider selection page...") + + # Try to find all available IDPs + idp_selectors = [ + (By.XPATH, "//a[contains(@href, 'oauth/authorize')]"), + (By.XPATH, "//div[@data-test-id='login']//a"), + (By.XPATH, "//div[contains(@class, 'login')]//a"), + ] + + all_idp_buttons = [] + for by, value in idp_selectors: + try: + elements = driver.find_elements(by, value) + for elem in elements: + elem_text = elem.text.lower() if elem.text else "" + elem_href = elem.get_attribute("href") or "" + # Check if it's an IDP link + if "authorize" in elem_href or any(keyword in elem_text for keyword in ["htpasswd", "ldap", "login", "log in", "sign in", "kube"]): + all_idp_buttons.append((elem, elem.text, elem_href)) + print(f"Found IDP option: text='{elem.text}', href='{elem_href[:100]}'") + except Exception as e: + continue + + if all_idp_buttons: + # Try to intelligently select the right IDP based on username + username = test_credentials["username"].lower() + selected_idp = None + + # Strategy 1: Match username pattern to IDP name + if "ldap" in username: + # Look for ldap IDP + for elem, text, href in all_idp_buttons: + if "ldap" in text.lower() or "ldap" in href.lower(): + selected_idp = (elem, text) + print(f"Selected LDAP IDP based on username pattern: {text}") + break + elif "htpasswd" in username or "admin" in username: + # Look for htpasswd IDP + for elem, text, href in all_idp_buttons: + if "htpasswd" in text.lower() or "htpasswd" in href.lower(): + selected_idp = (elem, text) + print(f"Selected htpasswd IDP based on username pattern: {text}") + break + + # Strategy 2: If no match, use environment variable if set + if not selected_idp: + idp_name = os.getenv("OPENSHIFT_IDP_NAME", "").lower() + if idp_name: + for elem, text, href in all_idp_buttons: + if idp_name in text.lower() or idp_name in href.lower(): + selected_idp = (elem, text) + print(f"Selected IDP from environment variable: {text}") + break + + # Strategy 3: If still no match and only one IDP, use it + if not selected_idp and len(all_idp_buttons) == 1: + selected_idp = (all_idp_buttons[0][0], all_idp_buttons[0][1]) + print(f"Only one IDP available, using: {selected_idp[1]}") + + # Strategy 4: If multiple IDPs and no match, skip IDP selection + # (some clusters may not require IDP selection if there's a default) + if not selected_idp: + print(f"Multiple IDPs found but couldn't determine which to use. Available: {[text for _, text, _ in all_idp_buttons]}") + print("Skipping IDP selection, will try direct login form") + else: + print(f"Clicking identity provider button: {selected_idp[1]}") + selected_idp[0].click() + time.sleep(3) # Wait for redirect to login form + print(f"After IDP click - URL: {driver.current_url}") + except Exception as e: + print(f"No identity provider selection needed or failed to handle: {e}") + + # Handle OpenShift OAuth login flow + # Wait for username field (various possible IDs depending on OAuth provider) + username_field = None + possible_username_selectors = [ + (By.ID, "inputUsername"), + (By.ID, "username"), + (By.ID, "login"), + (By.NAME, "username"), + (By.NAME, "login"), + (By.CSS_SELECTOR, "input[type='text'][name='username']"), + (By.CSS_SELECTOR, "input[type='text'][name='login']"), + ] + + for by, value in possible_username_selectors: + try: + username_field = WebDriverWait(driver, 5).until( + EC.presence_of_element_located((by, value)) + ) + print(f"Found username field using: {by}={value}") + break + except: + continue + + if not username_field: + print("ERROR: Could not find username field") + print(f"Page source preview (first 500 chars):\n{driver.page_source[:500]}") + raise RuntimeError( + "Could not find username field. " + f"Current URL: {driver.current_url}, " + f"Page title: {driver.title}" + ) + + username_field.send_keys(test_credentials["username"]) + print(f"Entered username: {test_credentials['username']}") + + # Find password field + password_field = None + possible_password_selectors = [ + (By.ID, "inputPassword"), + (By.ID, "password"), + (By.NAME, "password"), + (By.CSS_SELECTOR, "input[type='password']"), + ] + + for by, value in possible_password_selectors: + try: + password_field = driver.find_element(by, value) + print(f"Found password field using: {by}={value}") + break + except: + continue + + if not password_field: + raise RuntimeError("Could not find password field") + + password_field.send_keys(test_credentials["password"]) + print("Entered password") + + # Click login button + login_button = driver.find_element(By.CSS_SELECTOR, "button[type='submit']") + login_button.click() + print("Clicked login button") + + # Wait for dashboard to load + # Try multiple possible indicators that we're on the dashboard + print("Waiting for dashboard to load...") + dashboard_loaded = False + + for i in range(6): # Try for up to 30 seconds (6 * 5 seconds) + time.sleep(5) + print(f"Attempt {i+1}/6 - Current URL: {driver.current_url}") + print(f"Attempt {i+1}/6 - Page title: {driver.title}") + + # Check if page title indicates we're on the dashboard + if "Red Hat OpenShift AI" in driver.title or "OpenShift" in driver.title: + print(f"Dashboard loaded successfully (title: {driver.title})") + dashboard_loaded = True + break + + # Try finding dashboard elements + for locator in dashboard_indicators: + try: + element = driver.find_element(*locator) + if element.is_displayed(): + print(f"Dashboard loaded successfully (found: {locator})") + dashboard_loaded = True + break + except: + continue + + if dashboard_loaded: + break + + if not dashboard_loaded: + raise RuntimeError( + f"Dashboard did not load after login. " + f"Final URL: {driver.current_url}, " + f"Page title: {driver.title}" + ) + + print("Successfully logged in to RHOAI Dashboard") + + except Exception as e: + print(f"Login failed: {e}") + # Take screenshot for debugging + try: + screenshot_path = "/tmp/login_failure.png" + driver.save_screenshot(screenshot_path) + print(f"Screenshot saved to: {screenshot_path}") + except: + pass + + # Print page source for debugging (first 1000 chars) + try: + print(f"\nPage source preview:\n{driver.page_source[:1000]}") + except: + pass + + raise + + return driver + diff --git a/tests/ui/pages/__init__.py b/tests/ui/pages/__init__.py new file mode 100644 index 00000000..125905e4 --- /dev/null +++ b/tests/ui/pages/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2024 IBM, Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/tests/ui/pages/distributed_workloads_page.py b/tests/ui/pages/distributed_workloads_page.py new file mode 100644 index 00000000..2aefafe8 --- /dev/null +++ b/tests/ui/pages/distributed_workloads_page.py @@ -0,0 +1,646 @@ +# Copyright 2024 IBM, Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from selenium.webdriver.common.by import By +from selenium.webdriver.support.ui import WebDriverWait +from selenium.webdriver.support import expected_conditions as EC + + +class DistributedWorkloadsPage: + """Page Object Model for RHOAI Workload Metrics page (formerly Distributed Workloads)""" + + # Locators - multiple options for better compatibility + # New structure: "Workload metrics" is nested under "Observe & monitor" + OBSERVE_MONITOR_NAV_OPTIONS = [ + (By.XPATH, "//nav//button[contains(., 'Observe & monitor')]"), + (By.XPATH, "//nav//a[contains(., 'Observe & monitor')]"), + (By.XPATH, "//button[contains(@class, 'pf-v5-c-nav__link') and contains(., 'Observe')]"), + ] + + WORKLOAD_METRICS_NAV_OPTIONS = [ + (By.XPATH, "//a[contains(., 'Workload metrics')]"), + (By.XPATH, "//nav//a[contains(@href, 'workload-metrics')]"), + (By.XPATH, "//nav//a[contains(@href, 'workloadmetrics')]"), + # Fallback to old naming + (By.XPATH, "//a[contains(@href, 'distributed-workloads')]"), + (By.XPATH, "//a[contains(@href, 'distributedworkloads')]"), + (By.XPATH, "//a[contains(text(), 'Distributed Workload')]"), + (By.XPATH, "//a[contains(text(), 'Distributed workload')]"), + ] + + PAGE_TITLE_OPTIONS = [ + (By.XPATH, "//h1[contains(text(), 'Workload metrics')]"), + (By.XPATH, "//h1[contains(text(), 'Distributed workload')]"), # Fallback to old name + ] + # Project selector - multiple options for compatibility (PatternFly v6) + PROJECT_SELECTOR_OPTIONS = [ + (By.ID, "project-selector"), # Direct ID match + (By.CSS_SELECTOR, "[data-testid='project-selector-toggle']"), # Data testid + (By.XPATH, "//button[@id='project-selector']"), + (By.XPATH, "//button[@data-testid='project-selector-toggle']"), + (By.XPATH, "//button[contains(@class, 'pf-v6-c-menu-toggle')]"), # PatternFly v6 + (By.XPATH, "//button[contains(@class, 'pf-v5-c-menu-toggle')]"), # PatternFly v5 fallback + (By.XPATH, "//button[contains(@aria-label, 'project')]"), + (By.XPATH, "//button[contains(@aria-label, 'Project')]"), + ] + # Status locators - support multiple states (Admitted, Running, etc.) + STATUS_LABEL_OPTIONS = [ + # PatternFly v6 + (By.XPATH, "//span[contains(@class, 'pf-v6-c-label__text')]"), + # PatternFly v5 + (By.XPATH, "//span[contains(@class, 'pf-v5-c-label__text')]"), + # Generic + (By.XPATH, "//span[contains(@class, 'pf-c-label__text')]"), + ] + + # Workload metrics table + WORKLOAD_METRICS_TABLE = (By.CSS_SELECTOR, "[data-testid='workload-resource-metrics-table']") + + # Tab locators - support both PatternFly v5 and v6 + PROJECT_METRICS_TAB_OPTIONS = [ + (By.XPATH, "//button[contains(@class, 'pf-v6-c-tabs__link') and .//span[text()='Project metrics']]"), + (By.XPATH, "//button[contains(@class, 'pf-v5-c-tabs__link') and .//span[text()='Project metrics']]"), + (By.XPATH, "//button[@role='tab' and .//span[text()='Project metrics']]"), + ] + + WORKLOAD_STATUS_TAB_OPTIONS = [ + (By.XPATH, "//button[contains(@class, 'pf-v6-c-tabs__link') and .//span[text()='Distributed workload status']]"), + (By.XPATH, "//button[contains(@class, 'pf-v5-c-tabs__link') and .//span[text()='Workload status']]"), + (By.XPATH, "//button[@role='tab' and contains(.//span/text(), 'workload status')]"), + ] + + RESOURCE_METRICS_TITLE_OPTIONS = [ + (By.XPATH, "//*[contains(text(), 'Requested resources')]"), + (By.XPATH, "//h1[contains(text(), 'Project metrics')]"), + (By.XPATH, "//h2[contains(text(), 'Project metrics')]"), + (By.XPATH, "//*[contains(text(), 'Resource metrics')]"), + (By.CSS_SELECTOR, "[data-testid='dw-requested-resources']"), + (By.CSS_SELECTOR, "[data-testid='dw-workload-resource-metrics']"), + ] + + def __init__(self, driver, timeout=30): + self.driver = driver + self.wait = WebDriverWait(driver, timeout) + + def navigate(self): + """Navigate to Workload Metrics page (nested under Observe & monitor)""" + import time + + # Give React app time to fully render (increased from 3 to 10 seconds) + print("Waiting for dashboard React app to fully render...") + for i in range(10): + time.sleep(1) + # Check if body has content + try: + body = self.driver.find_element(By.TAG_NAME, "body") + if len(body.text) > 100: # Body has substantial content + print(f"Dashboard rendered after {i+1} seconds") + break + except: + pass + + time.sleep(2) # Extra wait for animations/transitions + + try: + # Step 1: Find and click "Observe & monitor" in the side nav + print("Searching for 'Observe & monitor' navigation item...") + observe_monitor_element = None + + for by, value in self.OBSERVE_MONITOR_NAV_OPTIONS: + try: + print(f"Trying locator: {by}={value}") + element = self.driver.find_element(by, value) + if element.is_displayed(): + observe_monitor_element = element + print(f"Found 'Observe & monitor' using: {by}={value}") + break + except Exception as e: + print(f"Locator {by}={value} not found: {str(e)[:100]}") + continue + + if observe_monitor_element: + # Check if it's expandable (has nested items) + print("Clicking 'Observe & monitor' to expand submenu...") + observe_monitor_element.click() + time.sleep(1) # Wait for submenu to expand + else: + print("Warning: Could not find 'Observe & monitor' navigation item") + + # Step 2: Find and click "Workload metrics" link + print("Searching for 'Workload metrics' navigation link...") + workload_metrics_link = None + + for by, value in self.WORKLOAD_METRICS_NAV_OPTIONS: + try: + print(f"Trying locator: {by}={value}") + element = self.driver.find_element(by, value) + if element.is_displayed(): + workload_metrics_link = element + print(f"Found 'Workload metrics' link using: {by}={value}") + break + except Exception as e: + print(f"Locator {by}={value} not found: {str(e)[:100]}") + continue + + if not workload_metrics_link: + print("\nCould not find navigation link, attempting direct URL navigation...") + # Try direct navigation to workload metrics page + current_url = self.driver.current_url + base_url = current_url.rstrip('/') + + # Try different possible URL patterns + possible_urls = [ + f"{base_url}/workloadMetrics", + f"{base_url}/workload-metrics", + f"{base_url}/workloadmetrics", + f"{base_url}/distributedWorkloads", + f"{base_url}/distributed-workloads", + f"{base_url}/workloads", + f"{base_url}/distributedworkloads", + ] + + navigation_successful = False + for url in possible_urls: + try: + print(f"Trying direct navigation to: {url}") + self.driver.get(url) + time.sleep(3) + + # Check if we got to a valid page (not 404) + if "404" not in self.driver.page_source and "not found" not in self.driver.page_source.lower(): + print(f"Successfully navigated to: {url}") + navigation_successful = True + break + except Exception as e: + print(f"Direct navigation to {url} failed: {str(e)[:100]}") + continue + + if not navigation_successful: + # Take screenshot for debugging + try: + screenshot_path = "/tmp/workload_metrics_nav_failure.png" + self.driver.save_screenshot(screenshot_path) + print(f"Screenshot saved to: {screenshot_path}") + except: + pass + + # Print more page source for debugging + page_source = self.driver.page_source + print(f"\nPage source (chars 1000-3000):\n{page_source[1000:3000]}") + + # Try to find any navigation links + print("\nSearching for any navigation links...") + try: + nav_links = self.driver.find_elements(By.XPATH, "//nav//a") + print(f"Found {len(nav_links)} navigation links:") + for link in nav_links[:20]: # Print first 20 + try: + print(f" - text: '{link.text}', href: '{link.get_attribute('href')}'") + except: + pass + except Exception as e: + print(f"Could not enumerate nav links: {e}") + + raise RuntimeError( + f"Could not find or navigate to Workload Metrics page. " + f"Current URL: {self.driver.current_url}, " + f"Page title: {self.driver.title}" + ) + else: + # Click the link + print("Clicking 'Workload metrics' link...") + workload_metrics_link.click() + + # Wait for page to load + print("Waiting for Workload Metrics page to load...") + time.sleep(3) + + # Verify we're on the right page + print(f"Final URL: {self.driver.current_url}") + if "distributed" in self.driver.current_url.lower() or "workload" in self.driver.current_url.lower(): + print("Successfully navigated to Workload Metrics page (URL indicates success)") + else: + print(f"Warning: URL might not be Workload Metrics page: {self.driver.current_url}") + + except Exception as e: + print(f"Failed to navigate to Workload Metrics page: {e}") + # Take screenshot on any failure + try: + screenshot_path = "/tmp/workload_metrics_nav_failure.png" + self.driver.save_screenshot(screenshot_path) + print(f"Screenshot saved to: {screenshot_path}") + except: + pass + raise + + def select_project(self, project_name): + """Select a project from the dropdown or by URL navigation""" + import time + import re + + try: + # First, wait a bit for the page to fully load + time.sleep(2) + + # Check current URL to see if we're already on the right project + current_url = self.driver.current_url + print(f"Current URL: {current_url}") + + # Try to find the project selector dropdown using multiple locators + print(f"Searching for project selector dropdown...") + project_dropdown = None + + for by, value in self.PROJECT_SELECTOR_OPTIONS: + try: + print(f"Trying locator: {by}={value}") + element = self.driver.find_element(by, value) + if element.is_displayed(): + # Check if the project is already selected + current_text = element.text + if project_name in current_text: + print(f"Project '{project_name}' is already selected") + return + + project_dropdown = element + print(f"Found project selector using: {by}={value}") + break + except Exception as e: + print(f"Locator {by}={value} not found: {str(e)[:100]}") + continue + + if not project_dropdown: + print("Could not find project selector dropdown, will try URL navigation fallback") + self._select_project_by_url(project_name) + return + + # Try to click the dropdown and select the project + try: + print(f"Clicking project selector dropdown...") + project_dropdown.click() + time.sleep(1) # Wait for dropdown to expand + + # Try different locators for the project option + print(f"Searching for project option: {project_name}") + project_option_locators = [ + (By.XPATH, f"//button[contains(text(), '{project_name}')]"), + (By.XPATH, f"//li[contains(text(), '{project_name}')]"), + (By.XPATH, f"//a[contains(text(), '{project_name}')]"), + (By.XPATH, f"//*[@role='option' and contains(text(), '{project_name}')]"), + (By.XPATH, f"//*[@role='menuitem' and contains(text(), '{project_name}')]"), + ] + + project_option = None + for by, value in project_option_locators: + try: + print(f"Trying locator: {by}={value}") + element = self.driver.find_element(by, value) + if element.is_displayed(): + project_option = element + print(f"Found project option using: {by}={value}") + break + except Exception as e: + print(f"Locator {by}={value} not found: {str(e)[:100]}") + continue + + if not project_option: + print("Could not find project in dropdown, falling back to URL navigation") + # Close dropdown if it's open + try: + project_dropdown.click() + time.sleep(0.5) + except: + pass + self._select_project_by_url(project_name) + return + + project_option.click() + time.sleep(1) # Wait for project to be selected + print(f"Successfully selected project: {project_name}") + + except Exception as e: + print(f"Error during dropdown selection: {e}") + print("Falling back to URL navigation") + self._select_project_by_url(project_name) + + except Exception as e: + print(f"Failed to select project {project_name}: {e}") + # Take screenshot on any failure + try: + screenshot_path = "/tmp/select_project_failure.png" + self.driver.save_screenshot(screenshot_path) + print(f"Screenshot saved to: {screenshot_path}") + except: + pass + raise + + def _select_project_by_url(self, project_name): + """Fallback: Select project by navigating to the URL with the project name""" + import time + import re + + current_url = self.driver.current_url + print(f"Attempting to select project via URL navigation") + print(f"Current URL: {current_url}") + + # URL pattern: .../observe-monitor/workload-metrics/workload-status/{project_name} + # Replace the last path segment with the project name + if '/workload-metrics/' in current_url or '/distributedWorkloads/' in current_url or '/distributed-workloads/' in current_url: + # Find the last path segment and replace it + url_parts = current_url.rstrip('/').split('/') + + # If URL ends with a project name, replace it + if len(url_parts) >= 2: + # Replace last segment with project_name + url_parts[-1] = project_name + new_url = '/'.join(url_parts) + else: + # Append project name to URL + new_url = f"{current_url.rstrip('/')}/{project_name}" + + print(f"Navigating to: {new_url}") + self.driver.get(new_url) + time.sleep(3) # Wait for page to load + + print(f"New URL after navigation: {self.driver.current_url}") + print(f"Successfully navigated to project: {project_name}") + else: + raise RuntimeError( + f"Cannot determine correct URL pattern to select project. " + f"Current URL: {current_url}" + ) + + def verify_cluster_running(self): + """Verify that a cluster is in Running or Admitted state""" + import time + + try: + # Wait a bit for the page to load + time.sleep(2) + + # Try to find status labels using multiple locators + print("Searching for cluster status...") + status_found = False + + for by, value in self.STATUS_LABEL_OPTIONS: + try: + print(f"Trying locator: {by}={value}") + status_elements = self.driver.find_elements(by, value) + + if status_elements: + print(f"Found {len(status_elements)} status label(s)") + for elem in status_elements: + if elem.is_displayed(): + status_text = elem.text + print(f"Status text: {status_text}") + # Accept both "Running" and "Admitted" as valid states + if status_text in ["Running", "Admitted"]: + print(f"✓ Cluster is in {status_text} state") + status_found = True + break + + if status_found: + break + + except Exception as e: + print(f"Locator {by}={value} error: {str(e)[:100]}") + continue + + if status_found: + return True + + # Fallback: Try to find the workload metrics table as indication of success + print("Status label not found, checking for workload metrics table...") + try: + table = self.driver.find_element(*self.WORKLOAD_METRICS_TABLE) + if table.is_displayed(): + print("✓ Workload metrics table is visible (cluster exists)") + return True + except: + pass + + print("✗ Could not verify cluster status") + return False + + except Exception as e: + print(f"Failed to verify cluster status: {e}") + import traceback + traceback.print_exc() + return False + + def click_project_metrics_tab(self): + """Click on the Project Metrics tab""" + import time + + try: + # Try to find the tab using multiple locators + print("Searching for Project Metrics tab...") + tab = None + + for by, value in self.PROJECT_METRICS_TAB_OPTIONS: + try: + print(f"Trying locator: {by}={value}") + element = self.driver.find_element(by, value) + if element.is_displayed(): + tab = element + print(f"Found Project Metrics tab using: {by}={value}") + break + except Exception as e: + print(f"Locator {by}={value} not found: {str(e)[:100]}") + continue + + if not tab: + # Take screenshot for debugging + try: + screenshot_path = "/tmp/project_metrics_tab_not_found.png" + self.driver.save_screenshot(screenshot_path) + print(f"Screenshot saved to: {screenshot_path}") + except: + pass + raise RuntimeError("Could not find Project Metrics tab") + + tab.click() + time.sleep(2) # Wait for tab content to load + print("Successfully clicked Project Metrics tab") + except Exception as e: + print(f"Failed to click Project Metrics tab: {e}") + raise + + def verify_metrics_visible(self): + """Verify that resource metrics are visible""" + import time + + try: + # Wait a bit for content to load + time.sleep(2) + + # Try to find metrics using multiple locators + print("Searching for resource metrics indicators...") + + for by, value in self.RESOURCE_METRICS_TITLE_OPTIONS: + try: + print(f"Trying locator: {by}={value}") + element = self.driver.find_element(by, value) + if element.is_displayed(): + print(f"Found resource metrics using: {by}={value}") + return True + except Exception as e: + print(f"Locator {by}={value} not found: {str(e)[:100]}") + continue + + # If no specific metrics title found, check if the tab content area exists + try: + # Look for the project-metrics tab content section + tab_content = self.driver.find_element( + By.XPATH, + "//section[@id='project-metrics-tab-content' or contains(@aria-labelledby, 'project-metrics')]" + ) + if tab_content.is_displayed(): + print("Project metrics tab content is visible") + return True + except: + pass + + # Take screenshot for debugging + try: + screenshot_path = "/tmp/metrics_not_visible.png" + self.driver.save_screenshot(screenshot_path) + print(f"Screenshot saved to: {screenshot_path}") + except: + pass + + print("Resource metrics not visible") + return False + except Exception as e: + print(f"Failed to verify metrics visibility: {e}") + return False + + def click_workload_status_tab(self): + """Click on the Workload Status tab""" + import time + + try: + # Try to find the tab using multiple locators + print("Searching for Workload Status tab...") + tab = None + + for by, value in self.WORKLOAD_STATUS_TAB_OPTIONS: + try: + print(f"Trying locator: {by}={value}") + element = self.driver.find_element(by, value) + if element.is_displayed(): + tab = element + print(f"Found Workload Status tab using: {by}={value}") + break + except Exception as e: + print(f"Locator {by}={value} not found: {str(e)[:100]}") + continue + + if not tab: + # Take screenshot for debugging + try: + screenshot_path = "/tmp/workload_status_tab_not_found.png" + self.driver.save_screenshot(screenshot_path) + print(f"Screenshot saved to: {screenshot_path}") + except: + pass + raise RuntimeError("Could not find Workload Status tab") + + tab.click() + time.sleep(2) # Wait for tab content to load + print("Successfully clicked Workload Status tab") + except Exception as e: + print(f"Failed to click Workload Status tab: {e}") + raise + + def verify_cluster_in_workload_list(self, cluster_name): + """Verify that a cluster appears in the workload list with Running or Admitted status""" + import time + + try: + # Wait for table to load + time.sleep(2) + + # Look for the cluster name in the table + cluster_cell = self.wait.until( + EC.presence_of_element_located( + (By.XPATH, f"//td[contains(text(), '{cluster_name}')]") + ) + ) + is_visible = cluster_cell.is_displayed() + print(f"Cluster {cluster_name} found in workload list: {is_visible}") + + if not is_visible: + return False + + # Find the parent row + cluster_row = cluster_cell.find_element(By.XPATH, "./ancestor::tr") + + # Find the status cell within the row (PatternFly v6 label structure) + # Try multiple approaches to find the status + status_found = False + status_text = None + + # Approach 1: Look for pf-v6-c-label__text within the row + try: + status_label = cluster_row.find_element( + By.XPATH, + ".//td[@data-label='Status']//span[contains(@class, 'pf-v6-c-label__text')]" + ) + status_text = status_label.text + print(f"Found status (v6 label): {status_text}") + status_found = True + except: + pass + + # Approach 2: Try PatternFly v5 + if not status_found: + try: + status_label = cluster_row.find_element( + By.XPATH, + ".//td[@data-label='Status']//span[contains(@class, 'pf-v5-c-label__text')]" + ) + status_text = status_label.text + print(f"Found status (v5 label): {status_text}") + status_found = True + except: + pass + + # Approach 3: Generic approach - find any text in status cell + if not status_found: + try: + status_cell = cluster_row.find_element(By.XPATH, ".//td[@data-label='Status']") + status_text = status_cell.text + print(f"Found status (generic): {status_text}") + status_found = True + except: + pass + + if not status_found: + print("Could not find status cell") + return False + + # Check if status is Running or Admitted + if status_text in ["Running", "Admitted"]: + print(f"✓ Cluster {cluster_name} status is {status_text}") + return True + else: + print(f"✗ Cluster {cluster_name} has unexpected status: {status_text}") + return False + + except Exception as e: + print(f"Failed to verify cluster {cluster_name} in workload list: {e}") + import traceback + traceback.print_exc() + return False + diff --git a/tests/upgrade/raycluster_sdk_upgrade_test.py b/tests/upgrade/01_raycluster_sdk_upgrade_test.py similarity index 91% rename from tests/upgrade/raycluster_sdk_upgrade_test.py rename to tests/upgrade/01_raycluster_sdk_upgrade_test.py index 7a917750..3307eda8 100644 --- a/tests/upgrade/raycluster_sdk_upgrade_test.py +++ b/tests/upgrade/01_raycluster_sdk_upgrade_test.py @@ -32,6 +32,30 @@ def setup_method(self): delete_kueue_resources(self) return _kube_api_error_handling(e) + @pytest.fixture(autouse=True) + def cleanup_on_failure(self, request): + """Fixture to cleanup namespace and resources if pre-upgrade test fails""" + # This runs after the test + yield + + # Check if the test failed + test_failed = ( + request.node.rep_call.failed + if hasattr(request.node, "rep_call") + else False + ) + + if test_failed: + print( + f"\n=== Pre-upgrade test failed, cleaning up namespace: {namespace} ===" + ) + try: + delete_namespace(self) + delete_kueue_resources(self) + print(f"Successfully cleaned up namespace: {namespace}") + except Exception as e: + print(f"Warning: Failed to cleanup namespace {namespace}: {e}") + def test_mnist_ray_cluster_sdk_auth(self): self.run_mnist_raycluster_sdk_oauth() diff --git a/tests/upgrade/02_dashboard_ui_upgrade_test.py b/tests/upgrade/02_dashboard_ui_upgrade_test.py new file mode 100644 index 00000000..acd150c7 --- /dev/null +++ b/tests/upgrade/02_dashboard_ui_upgrade_test.py @@ -0,0 +1,172 @@ +# Copyright 2024 IBM, Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +import sys +import os + +# Add tests/ui to path to import page objects +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "ui")) + +from pages.distributed_workloads_page import DistributedWorkloadsPage + +# Import cleanup functions +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "e2e")) +from support import delete_namespace, delete_kueue_resources, initialize_kubernetes_client + +# Fixtures are imported via conftest.py in this directory + +# Test configuration - should match the cluster created in raycluster_sdk_upgrade_test.py +NAMESPACE = "test-ns-rayupgrade" +CLUSTER_NAME = "mnist" + + +@pytest.mark.pre_upgrade +@pytest.mark.ui +class TestDistributedWorkloadsUIPreUpgrade: + """ + UI tests to verify Ray cluster appears in RHOAI Dashboard before upgrade. + These tests validate that the cluster created by TestMNISTRayClusterApply + is visible and properly displayed in the Workload Metrics UI (under Observe & monitor). + """ + + @pytest.fixture(autouse=True) + def cleanup_on_failure(self, request): + """Fixture to cleanup namespace and resources if pre-upgrade UI test fails""" + # Initialize kubernetes client for cleanup + initialize_kubernetes_client(self) + self.namespace = NAMESPACE + + # This runs after the test + yield + + # Check if the test failed + test_failed = ( + request.node.rep_call.failed + if hasattr(request.node, "rep_call") + else False + ) + + if test_failed: + print( + f"\n=== Pre-upgrade UI test failed, cleaning up namespace: {NAMESPACE} ===" + ) + try: + delete_namespace(self) + # Note: Kueue resources might have been already cleaned by TestMNISTRayClusterApply + # but we try to clean them again just in case + try: + delete_kueue_resources(self) + except: + pass # May have already been deleted + print(f"Successfully cleaned up namespace: {NAMESPACE}") + except Exception as e: + print(f"Warning: Failed to cleanup namespace {NAMESPACE}: {e}") + + def test_verify_cluster_in_distributed_workloads_ui( + self, selenium_driver, login_to_dashboard + ): + """ + Verify that the Ray cluster is visible in the Workload Metrics UI + and shows correct status and metrics before upgrade. + """ + driver = selenium_driver + dw_page = DistributedWorkloadsPage(driver) + + # Navigate to Workload Metrics page (under Observe & monitor) + print("\n=== Navigating to Workload Metrics page ===") + dw_page.navigate() + + # Select the project + print(f"\n=== Selecting project: {NAMESPACE} ===") + dw_page.select_project(NAMESPACE) + + # Verify cluster is Running or Admitted + print("\n=== Verifying cluster is in Running or Admitted state ===") + assert dw_page.verify_cluster_running(), ( + f"Cluster in {NAMESPACE} should be in Running or Admitted state before upgrade" + ) + + # Click Project Metrics tab and verify metrics are visible + print("\n=== Checking Project Metrics tab ===") + dw_page.click_project_metrics_tab() + assert dw_page.verify_metrics_visible(), ( + "Resource metrics should be visible on Project Metrics tab" + ) + + # Click Workload Status tab and verify cluster appears in the list + print("\n=== Checking Workload Status tab ===") + dw_page.click_workload_status_tab() + assert dw_page.verify_cluster_in_workload_list(CLUSTER_NAME), ( + f"Cluster '{CLUSTER_NAME}' should appear in workload list with Running or Admitted status" + ) + + print("\n=== Pre-upgrade UI verification completed successfully ===") + + +@pytest.mark.post_upgrade +@pytest.mark.ui +class TestDistributedWorkloadsUIPostUpgrade: + """ + UI tests to verify Ray cluster persists in RHOAI Dashboard after upgrade. + These tests validate that the cluster created before the upgrade is still + visible and functional in the Workload Metrics UI (under Observe & monitor) after the upgrade completes. + """ + + def test_verify_cluster_persists_after_upgrade( + self, selenium_driver, login_to_dashboard + ): + """ + Verify that the Ray cluster is still visible in the Workload Metrics UI + and shows correct status and metrics after upgrade. + + This test performs the same verifications as the pre-upgrade test to ensure + the cluster survived the upgrade process. + """ + driver = selenium_driver + dw_page = DistributedWorkloadsPage(driver) + + # Navigate to Workload Metrics page (under Observe & monitor) + print("\n=== Navigating to Workload Metrics page ===") + dw_page.navigate() + + # Select the project + print(f"\n=== Selecting project: {NAMESPACE} ===") + dw_page.select_project(NAMESPACE) + + # Verify cluster is still Running or Admitted after upgrade + print("\n=== Verifying cluster is still in Running or Admitted state after upgrade ===") + assert dw_page.verify_cluster_running(), ( + f"Cluster in {NAMESPACE} should still be in Running or Admitted state after upgrade" + ) + + # Click Project Metrics tab and verify metrics are still accessible + print("\n=== Checking Project Metrics tab ===") + dw_page.click_project_metrics_tab() + assert dw_page.verify_metrics_visible(), ( + "Resource metrics should still be visible on Project Metrics tab after upgrade" + ) + + # Click Workload Status tab and verify cluster still appears in the list + print("\n=== Checking Workload Status tab ===") + dw_page.click_workload_status_tab() + assert dw_page.verify_cluster_in_workload_list(CLUSTER_NAME), ( + f"Cluster '{CLUSTER_NAME}' should still appear in workload list with Running or Admitted status after upgrade" + ) + + print("\n=== Post-upgrade UI verification completed successfully ===") + print( + "The cluster has successfully persisted through the upgrade and remains functional." + ) + diff --git a/tests/upgrade/conftest.py b/tests/upgrade/conftest.py new file mode 100644 index 00000000..426f6197 --- /dev/null +++ b/tests/upgrade/conftest.py @@ -0,0 +1,49 @@ +# Copyright 2024 IBM, Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Conftest for upgrade tests - imports UI fixtures for dashboard tests +""" + +import sys +import os +import pytest + +# Add parent test directory to path +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +# Import all fixtures from ui/conftest.py +from ui.conftest import ( + selenium_driver, + dashboard_url, + test_credentials, + login_to_dashboard, +) + +__all__ = ["selenium_driver", "dashboard_url", "test_credentials", "login_to_dashboard"] + + +# Hook to capture test results for teardown methods +@pytest.hookimpl(tryfirst=True, hookwrapper=True) +def pytest_runtest_makereport(item, call): + """ + Hook to capture test results and make them available to teardown methods. + This allows teardown_method to check if the test failed. + """ + outcome = yield + rep = outcome.get_result() + + # Store the result in the item so teardown can access it + setattr(item, f"rep_{rep.when}", rep) +