From f3e089d283dd81b09e0e567a6447fc82c0a2e91c Mon Sep 17 00:00:00 2001 From: Julio Pasinatto Date: Tue, 23 Sep 2025 09:21:52 -0300 Subject: [PATCH 1/4] Initial pytest test migration --- .github/workflows/e2e-py-check.yml | 31 + .gitignore | 4 + e2e-tests/conftest.py | 346 +++++++++ e2e-tests/finalizer/test_finalizer.py | 66 ++ e2e-tests/init-deploy/compare/find-1.json | 1 + e2e-tests/init-deploy/compare/find-2.json | 1 + e2e-tests/init-deploy/compare/find-3.json | 1 + .../statefulset_another-name-rs0-4-oc.yml | 2 +- .../statefulset_another-name-rs0-oc.yml | 2 +- .../compare/statefulset_another-name-rs0.yml | 2 +- .../init-deploy/conf/another-name-rs0.yml | 2 +- e2e-tests/init-deploy/test_init_deploy.py | 206 +++++ e2e-tests/tools.py | 718 ++++++++++++++++++ pyproject.toml | 31 + uv.lock | 299 ++++++++ 15 files changed, 1708 insertions(+), 4 deletions(-) create mode 100644 .github/workflows/e2e-py-check.yml create mode 100644 e2e-tests/conftest.py create mode 100644 e2e-tests/finalizer/test_finalizer.py create mode 100644 e2e-tests/init-deploy/compare/find-1.json create mode 100644 e2e-tests/init-deploy/compare/find-2.json create mode 100644 e2e-tests/init-deploy/compare/find-3.json create mode 100644 e2e-tests/init-deploy/test_init_deploy.py create mode 100644 e2e-tests/tools.py create mode 100644 pyproject.toml create mode 100644 uv.lock diff --git a/.github/workflows/e2e-py-check.yml b/.github/workflows/e2e-py-check.yml new file mode 100644 index 0000000000..f4b5ae9aa5 --- /dev/null +++ b/.github/workflows/e2e-py-check.yml @@ -0,0 +1,31 @@ +name: e2e-tests Python Quality Check + +on: + pull_request: + paths: + - 'e2e-tests/**/*.py' + +jobs: + quality-check: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Install uv + uses: astral-sh/setup-uv@v5 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version-file: "pyproject.toml" + + - name: Install dependencies + run: uv sync --locked + + - name: Run ruff check + run: uv run ruff check e2e-tests/ + + - name: Run mypy + run: uv run mypy e2e-tests/ \ No newline at end of file diff --git a/.gitignore b/.gitignore index 2c8d6a1d78..69c56fdfa3 100644 --- a/.gitignore +++ b/.gitignore @@ -189,3 +189,7 @@ bin/ projects/ installers/olm/operator_*.yaml installers/olm/bundles + +# Test Reports +e2e-tests/reports/ +e2e-tests/**/__pycache__/ \ No newline at end of file diff --git a/e2e-tests/conftest.py b/e2e-tests/conftest.py new file mode 100644 index 0000000000..f19d666a63 --- /dev/null +++ b/e2e-tests/conftest.py @@ -0,0 +1,346 @@ +import os +import pytest +import subprocess +import logging +import yaml +import json +import time +import random + +from pathlib import Path +from concurrent.futures import ThreadPoolExecutor + +import tools + +logging.getLogger("pytest_dependency").setLevel(logging.WARNING) +logger = logging.getLogger(__name__) + + +@pytest.fixture(scope="session", autouse=True) +def setup_env_vars(): + """Setup environment variables for the test session.""" + git_branch = tools.get_git_branch() + git_version, kube_version = tools.get_kubernetes_versions() + + os.environ.setdefault("KUBE_VERSION", kube_version) + os.environ.setdefault("EKS", "1" if "eks" in git_version else "0") + os.environ.setdefault("GKE", "1" if "gke" in git_version else "0") + os.environ.setdefault("OPENSHIFT", "0") + + os.environ.setdefault("API", "psmdb.percona.com/v1") + os.environ.setdefault("GIT_COMMIT", tools.get_git_commit()) + os.environ.setdefault("GIT_BRANCH", git_branch) + os.environ.setdefault("OPERATOR_VERSION", tools.get_cr_version()) + os.environ.setdefault("IMAGE", f"perconalab/percona-server-mongodb-operator:{git_branch}") + os.environ.setdefault( + "IMAGE_MONGOD", "perconalab/percona-server-mongodb-operator:main-mongod7.0" + ) + os.environ.setdefault( + "IMAGE_MONGOD_CHAIN", + "perconalab/percona-server-mongodb-operator:main-mongod6.0\n" + "perconalab/percona-server-mongodb-operator:main-mongod7.0\n" + "perconalab/percona-server-mongodb-operator:main-mongod8.0", + ) + os.environ.setdefault("IMAGE_BACKUP", "perconalab/percona-server-mongodb-operator:main-backup") + os.environ.setdefault("IMAGE_PMM_CLIENT", "percona/pmm-client:2.44.1-1") + os.environ.setdefault("IMAGE_PMM_SERVER", "percona/pmm-server:2.44.1-1") + os.environ.setdefault("IMAGE_PMM3_CLIENT", "perconalab/pmm-client:3.1.0") + os.environ.setdefault("IMAGE_PMM3_SERVER", "perconalab/pmm-server:3.1.0") + + os.environ.setdefault("CERT_MANAGER_VER", "1.18.2") + os.environ.setdefault("CHAOS_MESH_VER", "2.7.1") + os.environ.setdefault("MINIO_VER", "5.4.0") + os.environ.setdefault("PMM_SERVER_VER", "9.9.9") + + os.environ.setdefault("CLEAN_NAMESPACE", "0") + os.environ.setdefault("DELETE_CRD_ON_START", "1") + os.environ.setdefault("SKIP_DELETE", "0") + os.environ.setdefault("SKIP_BACKUPS_TO_AWS_GCP_AZURE", "1") + os.environ.setdefault("UPDATE_COMPARE_FILES", "0") + + +@pytest.fixture(scope="class") +def test_paths(request): + """Fixture to provide paths relative to the test file.""" + test_file = Path(request.fspath) + test_dir = test_file.parent + conf_dir = test_dir.parent / "conf" + src_dir = test_dir.parent.parent + + return {"test_file": test_file, "test_dir": test_dir, "conf_dir": conf_dir, "src_dir": src_dir} + + +@pytest.fixture(scope="class") +def create_namespace(): + def _create_namespace(namespace): + """Create kubernetes namespace and clean up if exists.""" + operator_ns = os.environ.get("OPERATOR_NS") + + if int(os.environ.get("CLEAN_NAMESPACE")): + tools.clean_all_namespaces() + + if int(os.environ.get("OPENSHIFT")): + logger.info("Cleaning up all old namespaces from openshift") + + if operator_ns: + try: + result = subprocess.run( + ["oc", "get", "project", operator_ns, "-o", "json"], + capture_output=True, + text=True, + check=False, + ) + + if result.returncode == 0: + project_data = json.loads(result.stdout) + if project_data.get("metadata", {}).get("name"): + subprocess.run( + [ + "oc", + "delete", + "--grace-period=0", + "--force=true", + "project", + namespace, + ], + check=False, + ) + time.sleep(120) + else: + subprocess.run(["oc", "delete", "project", namespace], check=False) + time.sleep(40) + except Exception: + pass + + logger.info(f"Create namespace {namespace}") + subprocess.run(["oc", "new-project", namespace], check=True) + subprocess.run(["oc", "project", namespace], check=True) + subprocess.run( + ["oc", "adm", "policy", "add-scc-to-user", "hostaccess", "-z", "default"], + check=False, + ) + else: + logger.info("Cleaning up existing namespace") + + # Delete namespace if exists + try: + tools.kubectl_bin("delete", "namespace", namespace, "--ignore-not-found") + tools.kubectl_bin("wait", "--for=delete", f"namespace/{namespace}") + except subprocess.CalledProcessError: + pass + + logger.info(f"Create namespace {namespace}") + tools.kubectl_bin("create", "namespace", namespace) + tools.kubectl_bin("config", "set-context", "--current", f"--namespace={namespace}") + return namespace + + return _create_namespace + + +@pytest.fixture(scope="class") +def create_infra(test_paths, create_namespace): + def _create_infra(test_name): + """Create the necessary infrastructure for the tests.""" + logger.info("Creating test environment") + if os.environ.get("DELETE_CRD_ON_START") == "1": + tools.delete_crd_rbac(test_paths["src_dir"]) + tools.check_crd_for_deletion(f"{test_paths['src_dir']}/deploy/crd.yaml") + + if os.environ.get("OPERATOR_NS"): + create_namespace(os.environ.get("OPERATOR_NS")) + tools.deploy_operator(test_paths["test_dir"], test_paths["src_dir"]) + namespace = create_namespace(f"{test_name}-{random.randint(0, 32767)}") + else: + namespace = create_namespace(f"{test_name}-{random.randint(0, 32767)}") + tools.deploy_operator(test_paths["test_dir"], test_paths["src_dir"]) + + return namespace + + return _create_infra + + +@pytest.fixture(scope="class") +def destroy_infra(test_paths): + """Destroy the infrastructure created for the tests.""" + + def _destroy_infra(namespace): + if os.environ.get("SKIP_DELETE") == "1": + logger.info("SKIP_DELETE = 1. Skipping test environment cleanup") + return + + def run_cmd(cmd): + try: + tools.kubectl_bin(*cmd) + except (subprocess.CalledProcessError, FileNotFoundError, OSError) as e: + logger.debug(f"Command failed (continuing cleanup): {' '.join(cmd)}, error: {e}") + + def cleanup_crd(): + crd_file = f"{test_paths['src_dir']}/deploy/crd.yaml" + run_cmd(["delete", "-f", crd_file, "--ignore-not-found", "--wait=false"]) + + try: + with open(crd_file, "r") as f: + for doc in f.read().split("---"): + if not doc.strip(): + continue + crd_name = yaml.safe_load(doc)["metadata"]["name"] + run_cmd( + [ + "patch", + crd_name, + "--all-namespaces", + "--type=merge", + "-p", + '{"metadata":{"finalizers":[]}}', + ] + ) + run_cmd(["wait", "--for=delete", "crd", crd_name]) + except (FileNotFoundError, yaml.YAMLError, KeyError, TypeError) as e: + logger.debug(f"CRD cleanup failed (continuing): {e}") + + logger.info("Cleaning up test environment") + + commands = [ + ["delete", "psmdb-backup", "--all", "--ignore-not-found"], + [ + "delete", + "-f", + f"{test_paths['test_dir']}/../conf/container-rc.yaml", + "--ignore-not-found", + ], + [ + "delete", + "-f", + f"{test_paths['src_dir']}/deploy/{'cw-' if os.environ.get('OPERATOR_NS') else ''}rbac.yaml", + "--ignore-not-found", + ], + ] + + with ThreadPoolExecutor(max_workers=3) as executor: + futures = [executor.submit(run_cmd, cmd) for cmd in commands] + futures.append(executor.submit(cleanup_crd)) + + namespace_commands = [ + ["delete", "--grace-period=0", "--force", "namespace", namespace, "--ignore-not-found"] + ] + if os.environ.get("OPERATOR_NS"): + namespace_commands.append( + [ + "delete", + "--grace-period=0", + "--force", + "namespace", + os.environ.get("OPERATOR_NS"), + "--ignore-not-found", + ] + ) + + for cmd in namespace_commands: + run_cmd(cmd) + + return _destroy_infra + + +@pytest.fixture(scope="class") +def deploy_chaos_mesh(namespace): + """Deploy Chaos Mesh and clean up after tests.""" + try: + subprocess.run( + ["helm", "repo", "add", "chaos-mesh", "https://charts.chaos-mesh.org"], check=True + ) + subprocess.run(["helm", "repo", "update"], check=True) + subprocess.run( + [ + "helm", + "install", + "chaos-mesh", + "chaos-mesh/chaos-mesh", + "--namespace", + namespace, + "--version", + os.environ["CHAOS_MESH_VER"], + "--set", + "dashboard.create=false", + "--set", + "chaosDaemon.runtime=containerd", + "--set", + "chaosDaemon.socketPath=/run/containerd/containerd.sock", + "--wait", + ], + check=True, + ) + + except subprocess.CalledProcessError as e: + try: + subprocess.run( + [ + "helm", + "uninstall", + "chaos-mesh", + "--namespace", + namespace, + "--ignore-not-found", + "--wait", + "--timeout", + "60s", + ] + ) + except (subprocess.CalledProcessError, FileNotFoundError, OSError) as cleanup_error: + logger.warning(f"Failed to cleanup chaos-mesh during error handling: {cleanup_error}") + raise e + + yield + + try: + subprocess.run( + [ + "helm", + "uninstall", + "chaos-mesh", + "--namespace", + namespace, + "--wait", + "--timeout", + "60s", + ], + check=True, + ) + except subprocess.CalledProcessError as e: + logger.error(f"Failed to cleanup chaos-mesh: {e}") + + +@pytest.fixture(scope="class") +def deploy_cert_manager(): + """Deploy Cert Manager and clean up after tests.""" + logger.info("Deploying cert-manager") + cert_manager_url = f"https://github.com/cert-manager/cert-manager/releases/download/v{os.environ.get('CERT_MANAGER_VER')}/cert-manager.yaml" + try: + tools.kubectl_bin("create", "namespace", "cert-manager") + tools.kubectl_bin( + "label", "namespace", "cert-manager", "certmanager.k8s.io/disable-validation=true" + ) + tools.kubectl_bin("apply", "-f", cert_manager_url, "--validate=false") + tools.kubectl_bin( + "wait", + "pod", + "-l", + "app.kubernetes.io/instance=cert-manager", + "--for=condition=ready", + "-n", + "cert-manager", + ) + except Exception as e: + try: + tools.kubectl_bin("delete", "-f", cert_manager_url, "--ignore-not-found") + except (subprocess.CalledProcessError, FileNotFoundError, OSError) as cleanup_error: + logger.warning( + f"Failed to cleanup cert-manager during error handling: {cleanup_error}" + ) + raise e + + yield + + try: + tools.kubectl_bin("delete", "-f", cert_manager_url, "--ignore-not-found") + except Exception as e: + logger.error(f"Failed to cleanup cert-manager: {e}") diff --git a/e2e-tests/finalizer/test_finalizer.py b/e2e-tests/finalizer/test_finalizer.py new file mode 100644 index 0000000000..fbc20e1434 --- /dev/null +++ b/e2e-tests/finalizer/test_finalizer.py @@ -0,0 +1,66 @@ +import pytest +import logging + +from types import SimpleNamespace + +import tools + +logger = logging.getLogger(__name__) + + +class TestFinalizer: + """Test MongoDB cluster finalizers""" + + @pytest.fixture(scope="class", autouse=True) + def env(self, create_infra, destroy_infra, test_paths): + """Setup test environment and cleanup after tests""" + try: + namespace = create_infra("finalizer") + tools.kubectl_bin( + "apply", + "-f", + f"{test_paths['conf_dir']}/secrets_with_tls.yml", + "-f", + f"{test_paths['conf_dir']}/client-70.yml", + ) + + yield SimpleNamespace( + test_dir=test_paths["test_dir"], + namespace=namespace, + cluster="some-name", + ) + except Exception as e: + pytest.fail(f"Environment setup failed: {e}") + finally: + destroy_infra(namespace) + + @pytest.mark.dependency() + def test_create_cluster(self, env): + tools.apply_cluster(f"{env.test_dir}/conf/{env.cluster}.yml") + tools.wait_for_running(f"{env.cluster}-rs0", 3) + tools.wait_for_running(f"{env.cluster}-cfg", 3) + + @pytest.mark.dependency(depends=["TestFinalizer::test_create_cluster"]) + def test_kill_primary_should_elect_new_one(self, env): + primary = tools.get_mongo_primary( + f"clusterAdmin:clusterAdmin123456@{env.cluster}-rs0.{env.namespace}", env.cluster + ) + if primary == f"{env.cluster}-rs0-0": + tools.kubectl_bin("delete", "pod", "--grace-period=0", "--force", primary) + tools.wait_for_running(f"{env.cluster}-rs0", 3) + new_primary = tools.get_mongo_primary( + f"clusterAdmin:clusterAdmin123456@{env.cluster}-rs0.{env.namespace}", env.cluster + ) + assert new_primary != primary, "Primary did not change after killing the pod" + + @pytest.mark.dependency(depends=["TestFinalizer::test_kill_primary_should_elect_new_one"]) + def test_delete_cluster(self, env): + tools.kubectl_bin("delete", "psmdb", env.cluster, "--wait=false") + tools.wait_for_delete(f"psmdb/{env.cluster}") + + tools.wait_for_delete(f"pvc/mongod-data-{env.cluster}-cfg-0") + tools.wait_for_delete(f"pvc/mongod-data-{env.cluster}-cfg-1") + tools.wait_for_delete(f"pvc/mongod-data-{env.cluster}-cfg-2") + tools.wait_for_delete(f"pvc/mongod-data-{env.cluster}-rs0-0") + tools.wait_for_delete(f"pvc/mongod-data-{env.cluster}-rs0-1") + tools.wait_for_delete(f"pvc/mongod-data-{env.cluster}-rs0-2") diff --git a/e2e-tests/init-deploy/compare/find-1.json b/e2e-tests/init-deploy/compare/find-1.json new file mode 100644 index 0000000000..2d84f1ffee --- /dev/null +++ b/e2e-tests/init-deploy/compare/find-1.json @@ -0,0 +1 @@ +[ { "x": 100500 } ] \ No newline at end of file diff --git a/e2e-tests/init-deploy/compare/find-2.json b/e2e-tests/init-deploy/compare/find-2.json new file mode 100644 index 0000000000..38993741e3 --- /dev/null +++ b/e2e-tests/init-deploy/compare/find-2.json @@ -0,0 +1 @@ +[ { "x": 100500 }, { "x" : 100501 } ] \ No newline at end of file diff --git a/e2e-tests/init-deploy/compare/find-3.json b/e2e-tests/init-deploy/compare/find-3.json new file mode 100644 index 0000000000..665e842fe7 --- /dev/null +++ b/e2e-tests/init-deploy/compare/find-3.json @@ -0,0 +1 @@ +[ { "x": 100502 } ] \ No newline at end of file diff --git a/e2e-tests/init-deploy/compare/statefulset_another-name-rs0-4-oc.yml b/e2e-tests/init-deploy/compare/statefulset_another-name-rs0-4-oc.yml index 69fb0febf2..1cf091e173 100644 --- a/e2e-tests/init-deploy/compare/statefulset_another-name-rs0-4-oc.yml +++ b/e2e-tests/init-deploy/compare/statefulset_another-name-rs0-4-oc.yml @@ -242,7 +242,7 @@ spec: securityContext: {} serviceAccount: default serviceAccountName: default - terminationGracePeriodSeconds: 300 + terminationGracePeriodSeconds: 30 volumes: - name: another-name-mongodb-keyfile secret: diff --git a/e2e-tests/init-deploy/compare/statefulset_another-name-rs0-oc.yml b/e2e-tests/init-deploy/compare/statefulset_another-name-rs0-oc.yml index d094842953..03189a2f5a 100644 --- a/e2e-tests/init-deploy/compare/statefulset_another-name-rs0-oc.yml +++ b/e2e-tests/init-deploy/compare/statefulset_another-name-rs0-oc.yml @@ -241,7 +241,7 @@ spec: securityContext: {} serviceAccount: default serviceAccountName: default - terminationGracePeriodSeconds: 300 + terminationGracePeriodSeconds: 30 volumes: - name: another-name-mongodb-keyfile secret: diff --git a/e2e-tests/init-deploy/compare/statefulset_another-name-rs0.yml b/e2e-tests/init-deploy/compare/statefulset_another-name-rs0.yml index febc511905..ef3361edba 100644 --- a/e2e-tests/init-deploy/compare/statefulset_another-name-rs0.yml +++ b/e2e-tests/init-deploy/compare/statefulset_another-name-rs0.yml @@ -244,7 +244,7 @@ spec: fsGroup: 1001 serviceAccount: default serviceAccountName: default - terminationGracePeriodSeconds: 300 + terminationGracePeriodSeconds: 30 volumes: - name: another-name-mongodb-keyfile secret: diff --git a/e2e-tests/init-deploy/conf/another-name-rs0.yml b/e2e-tests/init-deploy/conf/another-name-rs0.yml index 9b538ef891..2db147c84d 100644 --- a/e2e-tests/init-deploy/conf/another-name-rs0.yml +++ b/e2e-tests/init-deploy/conf/another-name-rs0.yml @@ -17,7 +17,7 @@ spec: # tasks: replsets: - name: rs0 - terminationGracePeriodSeconds: 300 + terminationGracePeriodSeconds: 30 configuration: | operationProfiling: mode: slowOp diff --git a/e2e-tests/init-deploy/test_init_deploy.py b/e2e-tests/init-deploy/test_init_deploy.py new file mode 100644 index 0000000000..dac4f1d8af --- /dev/null +++ b/e2e-tests/init-deploy/test_init_deploy.py @@ -0,0 +1,206 @@ +#!/usr/bin/env python3 + +import pytest +import time +import logging + +from types import SimpleNamespace + +import tools + +logger = logging.getLogger(__name__) + + +class TestInitDeploy: + """Test MongoDB cluster deployment and operations""" + + @pytest.fixture(scope="class", autouse=True) + def env(self, create_infra, destroy_infra, test_paths): + """Setup test environment and cleanup after tests""" + try: + namespace = create_infra("init-deploy") + tools.kubectl_bin( + "apply", + "-f", + f"{test_paths['test_dir']}/conf/secrets_with_tls.yml", + "-f", + f"{test_paths['test_dir']}/../conf/client-70.yml", + ) + tools.apply_runtime_class(test_paths["test_dir"]) + + yield SimpleNamespace( + test_dir=test_paths["test_dir"], + conf_dir=test_paths["conf_dir"], + src_dir=test_paths["src_dir"], + namespace=namespace, + cluster="some-name-rs0", + cluster2="another-name-rs0", + max_conn=17, + ) + except Exception as e: + pytest.fail(f"Environment setup failed: {e}") + finally: + destroy_infra(namespace) + + @pytest.mark.dependency() + def test_create_first_cluster(self, env): + """Create first PSMDB cluster""" + tools.apply_cluster(f"{env.test_dir}/../conf/{env.cluster}.yml") + tools.wait_for_running(env.cluster, 3) + + tools.compare_kubectl(env.test_dir, f"statefulset/{env.cluster}", env.namespace) + tools.compare_kubectl(env.test_dir, f"service/{env.cluster}", env.namespace) + + @pytest.mark.dependency(depends=["TestInitDeploy::test_create_first_cluster"]) + def test_verify_users_created(self, env): + """Check if users created with correct permissions""" + secret_name = "some-users" + + # Test userAdmin user + user = tools.get_user_data(secret_name, "MONGODB_USER_ADMIN_USER") + password = tools.get_user_data(secret_name, "MONGODB_USER_ADMIN_PASSWORD") + tools.compare_mongo_user( + f"{user}:{password}@{env.cluster}.{env.namespace}", "userAdmin", env.test_dir + ) + + # Test backup user + user = tools.get_user_data(secret_name, "MONGODB_BACKUP_USER") + password = tools.get_user_data(secret_name, "MONGODB_BACKUP_PASSWORD") + tools.compare_mongo_user( + f"{user}:{password}@{env.cluster}.{env.namespace}", "backup", env.test_dir + ) + + # Test clusterAdmin user + user = tools.get_user_data(secret_name, "MONGODB_CLUSTER_ADMIN_USER") + password = tools.get_user_data(secret_name, "MONGODB_CLUSTER_ADMIN_PASSWORD") + tools.compare_mongo_user( + f"{user}:{password}@{env.cluster}.{env.namespace}", "clusterAdmin", env.test_dir + ) + + # Test clusterMonitor user + user = tools.get_user_data(secret_name, "MONGODB_CLUSTER_MONITOR_USER") + password = tools.get_user_data(secret_name, "MONGODB_CLUSTER_MONITOR_PASSWORD") + tools.compare_mongo_user( + f"{user}:{password}@{env.cluster}.{env.namespace}", "clusterMonitor", env.test_dir + ) + + # Test that unauthorized user is rejected + result = tools.run_mongosh( + "db.runCommand({connectionStatus:1,showPrivileges:true})", + f"test:test@{env.cluster}.{env.namespace}", + ) + assert "Authentication failed" in result + + @pytest.mark.dependency(depends=["TestInitDeploy::test_verify_users_created"]) + def test_write_and_read_data(self, env): + """Write data and read from all nodes""" + + tools.run_mongosh( + 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})', + f"userAdmin:userAdmin123456@{env.cluster}.{env.namespace}", + ) + + # Wait for user to be fully created + time.sleep(2) + + tools.run_mongosh( + "db.getSiblingDB('myApp').test.insertOne({ x: 100500 })", + f"myApp:myPass@{env.cluster}.{env.namespace}", + ) + + for i in range(3): + tools.compare_mongo_cmd( + "find({}, { _id: 0 }).toArray()", + f"myApp:myPass@{env.cluster}-{i}.{env.cluster}.{env.namespace}", + test_file=f"{env.test_dir}/compare/find-1.json", + ) + + @pytest.mark.dependency(depends=["TestInitDeploy::test_write_and_read_data"]) + def test_connection_count(self, env): + """Check number of connections doesn't exceed maximum""" + conn_count = int( + tools.run_mongosh( + "db.serverStatus().connections.current", + f"clusterAdmin:clusterAdmin123456@{env.cluster}.{env.namespace}", + ).strip() + ) + assert conn_count <= env.max_conn, ( + f"Connection count {conn_count} exceeds maximum {env.max_conn}" + ) + + @pytest.mark.dependency(depends=["TestInitDeploy::test_connection_count"]) + def test_primary_failover(self, env): + """Kill Primary Pod, check reelection, check data""" + initial_primary = tools.get_mongo_primary( + f"clusterAdmin:clusterAdmin123456@{env.cluster}.{env.namespace}", env.cluster + ) + assert initial_primary, "Failed to get initial primary" + + tools.kubectl_bin( + "delete", "pods", "--grace-period=0", "--force", initial_primary, "-n", env.namespace + ) + tools.wait_for_running(env.cluster, 3) + + changed_primary = tools.get_mongo_primary( + f"clusterAdmin:clusterAdmin123456@{env.cluster}.{env.namespace}", env.cluster + ) + assert initial_primary != changed_primary, "Primary didn't change after pod deletion" + + tools.run_mongosh( + "db.getSiblingDB('myApp').test.insertOne({ x: 100501 })", + f"myApp:myPass@{env.cluster}.{env.namespace}", + ) + + for i in range(3): + tools.compare_mongo_cmd( + "find({}, { _id: 0 }).toArray()", + f"myApp:myPass@{env.cluster}-{i}.{env.cluster}.{env.namespace}", + "-2nd", + test_file=f"{env.test_dir}/compare/find-2.json", + ) + + @pytest.mark.dependency(depends=["TestInitDeploy::test_primary_failover"]) + def test_create_second_cluster(self, env): + """Check if possible to create second cluster""" + tools.apply_cluster(f"{env.test_dir}/conf/{env.cluster2}.yml") + tools.wait_for_running(env.cluster2, 3) + tools.compare_kubectl(env.test_dir, f"statefulset/{env.cluster2}", env.namespace) + tools.compare_kubectl(env.test_dir, f"service/{env.cluster2}", env.namespace) + + @pytest.mark.dependency(depends=["TestInitDeploy::test_create_second_cluster"]) + def test_second_cluster_data_operations(self, env): + """Write data and read from all nodes in second cluster""" + # Create user + tools.run_mongosh( + 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})', + f"userAdmin:userAdmin123456@{env.cluster2}.{env.namespace}", + ) + + # Write data + tools.run_mongosh( + "db.getSiblingDB('myApp').test.insertOne({ x: 100502 })", + f"myApp:myPass@{env.cluster2}.{env.namespace}", + ) + + # Read from all nodes + for i in range(3): + tools.compare_mongo_cmd( + "find({}, { _id: 0 }).toArray()", + f"myApp:myPass@{env.cluster2}-{i}.{env.cluster2}.{env.namespace}", + "-3rd", + test_file=f"{env.test_dir}/compare/find-3.json", + ) + + @pytest.mark.dependency(depends=["TestInitDeploy::test_second_cluster_data_operations"]) + def test_log_files_exist(self, env): + """Check if mongod log files exist in pod""" + result = tools.kubectl_bin( + "exec", f"{env.cluster2}-0", "-c", "mongod", "--", "ls", "/data/db/logs" + ) + + assert "mongod.log" in result, "mongod.log not found" + assert "mongod.full.log" in result, "mongod.full.log not found" + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/e2e-tests/tools.py b/e2e-tests/tools.py new file mode 100644 index 0000000000..399d9f4477 --- /dev/null +++ b/e2e-tests/tools.py @@ -0,0 +1,718 @@ +import os +import re +import time +import yaml +import json +import logging +import base64 +import urllib.parse +import subprocess + +from deepdiff import DeepDiff +from pathlib import Path + +logger = logging.getLogger(__name__) + + +def kubectl_bin(*args, check: bool = True, input_data: str = "") -> str: + """Execute kubectl command""" + cmd = ["kubectl"] + list(args) + logger.debug(" ".join(map(str, cmd))) + result = subprocess.run(cmd, check=check, capture_output=True, text=True, input=input_data) + + if result.stderr: + logger.warning(f"kubectl error: {result.stderr}") + return result.stderr + + return result.stdout + + +def cat_config(config_file: str) -> str: + """Process config file with yq transformations""" + with open(config_file, "r") as f: + config = yaml.safe_load(f) + + # Apply transformations similar to yq eval commands + if "spec" in config: + spec = config["spec"] + + # Set mongod image if not present + if "image" not in spec or spec["image"] is None: + spec["image"] = os.environ.get("IMAGE_MONGOD") + + # Set PMM client image + if "pmm" in spec: + spec["pmm"]["image"] = os.environ.get("IMAGE_PMM_CLIENT") + + # Set init image + if "initImage" in spec: + spec["initImage"] = os.environ.get("IMAGE") + + # Set backup image + if "backup" in spec: + spec["backup"]["image"] = os.environ.get("IMAGE_BACKUP") + + # Set upgrade options + if "upgradeOptions" not in spec: + spec["upgradeOptions"] = {} + spec["upgradeOptions"]["apply"] = "Never" + + return yaml.dump(config) + + +def apply_cluster(config_file: str) -> None: + """Apply cluster configuration""" + logger.info("Creating PSMDB cluster") + skip_backups = os.environ.get("SKIP_BACKUPS_TO_AWS_GCP_AZURE") + + if not skip_backups: + config_yaml = cat_config(config_file) + kubectl_bin("apply", "-f", "-", input_data=config_yaml) + else: + config_yaml = cat_config(config_file) + config = yaml.safe_load(config_yaml) + + # Remove backup tasks + if "spec" in config and "backup" in config["spec"] and "tasks" in config["spec"]["backup"]: + tasks = config["spec"]["backup"]["tasks"] + # Remove tasks at index 1 three times (reverse order to maintain indices) + for _ in range(3): + if len(tasks) > 1: + del tasks[1] + + modified_yaml = yaml.dump(config) + kubectl_bin("apply", "-f", "-", input_data=modified_yaml) + + +def delete_crd_rbac(src_dir: Path) -> None: + logger.info("Deleting old CRDs and RBACs") + crd_path = (src_dir / "deploy" / "crd.yaml").resolve() + + docs = list(yaml.safe_load_all(crd_path.read_text())) + crd_names = [] + resource_kinds = [] + for doc in docs: + if doc and doc.get("kind") == "CustomResourceDefinition": + crd_names.append(doc["metadata"]["name"]) + group = doc["spec"]["group"] + plural = doc["spec"]["names"]["plural"] + resource_kinds.append(f"{plural}.{group}") + + kubectl_bin("delete", "-f", str(crd_path), "--ignore-not-found", "--wait=false", check=False) + + for kind in resource_kinds: + try: + items_json = kubectl_bin("get", kind, "--all-namespaces", "-o", "json") + data = json.loads(items_json) + for item in data.get("items", []): + ns = item["metadata"]["namespace"] + name = item["metadata"]["name"] + kubectl_bin( + "patch", + kind, + "-n", + ns, + name, + "--type=merge", + "-p", + '{"metadata":{"finalizers":[]}}', + ) + except subprocess.CalledProcessError: + # Kind may not exist or no instances exist; ignore + pass + + for name in crd_names: + kubectl_bin("wait", "--for=delete", "crd", name, check=False) + + +def check_crd_for_deletion(file_path: str) -> None: + """Check and remove finalizers from CRDs to allow deletion""" + with open(file_path, "r") as f: + yaml_content = f.read() + + for doc in yaml_content.split("---"): + if not doc.strip(): + continue + try: + parsed_doc = yaml.safe_load(doc) + if not parsed_doc or "metadata" not in parsed_doc: + continue + + crd_name = parsed_doc["metadata"]["name"] + + result = kubectl_bin( + "get", + f"crd/{crd_name}", + "-o", + "jsonpath={.status.conditions[-1].type}", + "--ignore-not-found", + ) + is_crd_terminating = result.strip() == "Terminating" + + if is_crd_terminating: + logger.info(f"Removing finalizers from CRD {crd_name} to allow deletion") + kubectl_bin( + "patch", + f"crd/{crd_name}", + "--type=merge", + "-p", + '{"metadata":{"finalizers":[]}}', + ) + try: + kubectl_bin( + "patch", + crd_name, + "--all-namespaces", + "--type=merge", + "-p", + '{"metadata":{"finalizers":[]}}', + ) + except Exception as patch_error: + logger.warning( + f"Could not patch {crd_name} instances (may not exist): {patch_error}" + ) + + except yaml.YAMLError as yaml_error: + logger.error(f"Error parsing YAML document: {yaml_error}") + except Exception as e: + logger.error(f"Error removing finalizers from CRD: {e}") + + +def deploy_operator(test_dir: str, src_dir: str) -> None: + """Deploy the operator with simplified logic.""" + logger.info("Start PSMDB operator") + operator_ns = os.environ.get("OPERATOR_NS") + + crd_file = f"{test_dir}/conf/crd.yaml" + if not os.path.isfile(crd_file): + crd_file = f"{src_dir}/deploy/crd.yaml" + + kubectl_bin("apply", "--server-side", "--force-conflicts", "-f", crd_file) + + rbac_type = "cw-rbac" if operator_ns else "rbac" + operator_file = f"{src_dir}/deploy/{'cw-' if operator_ns else ''}operator.yaml" + + apply_rbac(src_dir, rbac_type) + + with open(operator_file, "r") as f: + data = yaml.safe_load(f) + + for container in data["spec"]["template"]["spec"]["containers"]: + container["image"] = os.environ.get("IMAGE") + if "env" in container: + env_vars = {env["name"]: env for env in container["env"]} + if "DISABLE_TELEMETRY" in env_vars: + env_vars["DISABLE_TELEMETRY"]["value"] = "true" + if "LOG_LEVEL" in env_vars: + env_vars["LOG_LEVEL"]["value"] = "DEBUG" + + yaml_content = yaml.dump(data, default_flow_style=False) + kubectl_bin("apply", "-f", "-", input_data=yaml_content) + operator_pod = get_operator_pod() + wait_pod(operator_pod) + + logs = kubectl_bin("logs", operator_pod) + startup_logs = [line for line in logs.splitlines() if "Manager starting up" in line] + if startup_logs: + logger.info(f"Operator startup: {startup_logs[0]}") + else: + logger.warning("No 'Manager starting up' message found in logs") + + +def get_operator_pod() -> str: + """Get the operator pod name""" + args = [ + "get", + "pods", + "--selector=name=percona-server-mongodb-operator", + "-o", + "jsonpath={.items[].metadata.name}", + ] + operator_ns = os.environ.get("OPERATOR_NS") + if operator_ns: + args.extend(["-n", operator_ns]) + try: + out = kubectl_bin(*args) + names = [n for n in out.strip().split() if n] + if not names: + raise RuntimeError( + "No Running operator pod found. Ensure the operator deployment succeeded" + ) + if len(names) > 1: + raise RuntimeError(f"Multiple operator pods found: {names}") + return names[0] + except Exception as e: + raise RuntimeError(f"Failed to get operator pod: {e}") from e + + +def apply_rbac(src_dir: str, rbac: str = "rbac") -> None: + """Apply RBAC YAML with namespace substitution""" + operator_ns = os.getenv("OPERATOR_NS", "psmdb-operator") + path = Path(src_dir) / "deploy" / f"{rbac}.yaml" + + yaml_content = path.read_text() + modified_yaml = re.sub( + r"^(\s*)namespace:\s*.*$", rf"\1namespace: {operator_ns}", yaml_content, flags=re.MULTILINE + ) + + args = ["apply", "-f", "-"] + if os.getenv("OPERATOR_NS"): + args = ["apply", "-n", operator_ns, "-f", "-"] + + kubectl_bin(*args, input_data=modified_yaml) + + +def clean_all_namespaces() -> None: + """Delete all namespaces except system ones.""" + try: + logger.info("Cleaning up all old namespaces") + result = kubectl_bin("get", "ns", "-o", "jsonpath={.items[*].metadata.name}") + excluded_prefixes = { + "kube-", + "default", + "Terminating", + "psmdb-operator", + "openshift", + "gke-", + "gmp-", + } + + namespaces = [ + ns + for ns in result.strip().split() + if not any(prefix in ns for prefix in excluded_prefixes) + ] + + if namespaces: + subprocess.Popen(["kubectl", "delete", "ns"] + namespaces) + except subprocess.CalledProcessError: + logger.error("Failed to clean namespaces") + + +def destroy(namespace: str) -> None: + """Destroy test infrastructure""" + try: + kubectl_bin("delete", "namespace", namespace, "--ignore-not-found") + except subprocess.CalledProcessError: + pass + + +def wait_pod(pod_name: str, timeout: str = "360") -> None: + """Wait for pod to be ready.""" + logger.info(f"Waiting for pod/{pod_name} to be ready...") + time.sleep(2) + try: + kubectl_bin("wait", f"pod/{pod_name}", "--for=condition=ready", f"--timeout={timeout}s") + logger.info(f"Pod {pod_name} is ready") + except subprocess.CalledProcessError as e: + raise TimeoutError(f"Pod {pod_name} did not become ready within {timeout}s") from e + + +def wait_for_running( + cluster_name: str, expected_pods: int, check_cluster_readyness: bool = True, timeout: int = 600 +) -> None: + """Wait for pods to be in running state using custom label selector""" + last_pod = expected_pods - 1 + rs_name = cluster_name.split("-")[-1] + + # Wait for regular pods + for i in range(last_pod + 1): + if i == last_pod and get_jsonpath(cluster_name, rs_name, "arbiter.enabled") == "true": + wait_pod(f"{cluster_name}-arbiter-0") + else: + wait_pod(f"{cluster_name}-{i}") + + # Wait for non-voting pods if enabled + if get_jsonpath(cluster_name, rs_name, "non_voting.enabled") == "true": + size = get_jsonpath(cluster_name, rs_name, "non_voting.size") + if size: + for i in range(int(size)): + wait_pod(f"{cluster_name}-nv-{i}") + + # Wait for hidden pods if enabled + if get_jsonpath(cluster_name, rs_name, "hidden.enabled") == "true": + size = get_jsonpath(cluster_name, rs_name, "hidden.size") + if size: + for i in range(int(size)): + wait_pod(f"{cluster_name}-hidden-{i}") + + cluster_name = cluster_name.replace(f"-{rs_name}", "") + if check_cluster_readyness: + start_time = time.time() + logger.info(f"Waiting for Cluster {cluster_name} readiness") + while time.time() - start_time < timeout: + try: + result = kubectl_bin( + "get", "psmdb", cluster_name, "-o", "jsonpath={.status.state}" + ).strip("'") + if result == "ready": + logger.info(f"Cluster {cluster_name} is ready") + return + except subprocess.CalledProcessError: + logger.error(f"Error checking cluster {cluster_name} readiness") + pass + time.sleep(1) + raise TimeoutError(f"Timeout waiting for {cluster_name} to be ready") + + +def wait_for_delete(resource: str, timeout: int = 180) -> None: + """Wait for a specific resource to be deleted""" + logger.info(f"Waiting for {resource} to be deleted") + time.sleep(1) + try: + kubectl_bin("wait", "--for=delete", resource, f"--timeout={timeout}s") + except subprocess.CalledProcessError as e: + raise TimeoutError(f"Resource {resource} was not deleted within {timeout}s") from e + logger.info(f"{resource} was deleted") + + +def get_jsonpath(cluster_name: str, rs_name: str, path: str) -> str: + """Get value from PSMDB resource using JSONPath""" + jsonpath = f'{{.spec.replsets[?(@.name=="{rs_name}")].{path}}}' + try: + return kubectl_bin("get", "psmdb", cluster_name, "-o", f"jsonpath={jsonpath}") + except subprocess.CalledProcessError: + return "" + + +def compare_kubectl(test_dir: str, resource: str, namespace: str, postfix: str = "") -> None: + """Compare kubectl resource with expected output using yq filtering""" + expected_result = f"{test_dir}/compare/{resource.replace('/', '_')}{postfix}.yml" + + try: + actual_yaml = kubectl_bin("get", resource, "-o", "yaml") + with open(expected_result, "r") as f: + expected_yaml = f.read() + + filtered_actual = filter_yaml_with_yq(actual_yaml, namespace) + filtered_expected = filter_yaml_with_yq(expected_yaml, namespace) + + actual_data = yaml.safe_load(filtered_actual) + expected_data = yaml.safe_load(filtered_expected) + + diff = DeepDiff(expected_data, actual_data) + assert not diff, f"YAML files differ: {diff.pretty()}" + + except subprocess.CalledProcessError as e: + raise ValueError(f"Failed to process resource {resource}: {e}") + + +def get_mongo_primary(uri: str, cluster_name: str) -> str: + """Get current MongoDB primary node""" + primary_endpoint = run_mongosh("EJSON.stringify(db.hello().me)", uri) + + if cluster_name in primary_endpoint: + return primary_endpoint.split(".")[0].replace('"', "") + else: + endpoint_host = primary_endpoint.split(":")[0] + result = kubectl_bin("get", "service", "-o", "wide") + + for line in result.splitlines(): + if endpoint_host in line: + return line.split()[0].replace('"', "") + raise ValueError("Primary node not found in service list") + + +def compare_mongo_cmd( + command: str, + uri: str, + postfix: str = "", + suffix: str = "", + database: str = "myApp", + collection: str = "test", + sort: str = "", + test_file: str = "", +) -> None: + """Compare MongoDB command output""" + full_cmd = f"{collection}.{command}" + if sort: + full_cmd = f"{collection}.{command}.{sort}" + + logger.info(f"Running command: {full_cmd} on database: {database}") + + mongo_expr = f"EJSON.stringify(db.getSiblingDB('{database}').{full_cmd})" + result = json.loads(run_mongosh(mongo_expr, uri, "mongodb")) + + logger.info(f"MongoDB command output: {result}") + + with open(test_file) as file: + expected = json.load(file) + + diff = DeepDiff(expected, result) + assert not diff, f"MongoDB command output differs: {diff.pretty()}" + + +def compare_mongo_user(uri: str, expected_role: str, test_dir) -> None: + """Compare MongoDB user permissions""" + + def get_expected_file(test_dir, user): + """Get the appropriate expected file based on MongoDB version""" + base_path = Path(test_dir) / "compare" + base_file = base_path / f"{user}.json" + + # Check for version-specific files + image_mongod = os.environ.get("IMAGE_MONGOD", "") + version_mappings = [("8.0", "-80"), ("7.0", "-70"), ("6.0", "-60")] + + for version, suffix in version_mappings: + if version in image_mongod: + version_file = base_path / f"{user}{suffix}.json" + if version_file.exists(): + logger.info(f"Using version-specific file: {version_file}") + with open(version_file) as f: + return json.load(f) + + # Fall back to base file + if base_file.exists(): + logger.info(f"Using base file: {base_file}") + with open(base_file) as f: + return json.load(f) + else: + raise FileNotFoundError(f"Expected file not found: {base_file}") + + def clean_mongo_json(data): + """Remove timestamps and metadata from MongoDB response""" + + def remove_timestamps(obj): + if isinstance(obj, dict): + return { + k: remove_timestamps(v) + for k, v in obj.items() + if k not in {"ok", "$clusterTime", "operationTime"} + } + elif isinstance(obj, list): + return [remove_timestamps(v) for v in obj] + elif isinstance(obj, str): + # Remove ISO timestamp patterns + return re.sub(r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}[\+\-]\d{4}", "", obj) + else: + return obj + + return remove_timestamps(data) + + # TODO: consider a different approach to ignore order when comparing + def ordered(obj): + if isinstance(obj, dict): + return sorted((k, ordered(v)) for k, v in obj.items()) + if isinstance(obj, list): + return sorted(ordered(x) for x in obj) + else: + return obj + + # Get actual MongoDB user permissions + try: + result = run_mongosh( + "EJSON.stringify(db.runCommand({connectionStatus:1,showPrivileges:true}))", + uri, + ) + actual_data = clean_mongo_json(json.loads(result)) + + except Exception as e: + raise RuntimeError(f"Failed to get MongoDB user permissions: {e}") + + expected_data = get_expected_file(test_dir, expected_role) + expected_data = ordered(expected_data) + actual_data = ordered(actual_data) + + diff = DeepDiff(expected_data, actual_data, ignore_order=True) + assert not diff, f"MongoDB user permissions differ: {diff.pretty()}" + + +def apply_runtime_class(test_dir: str) -> None: + """Apply runtime class configuration""" + + # from K8s 1.24 and later, runc is used + logger.info("Applying runc runtime class") + with open(f"{test_dir}/../conf/container-rc.yaml", "r") as f: + content = f.read() + if os.environ.get("EKS"): + content = content.replace("docker", "runc") + kubectl_bin("apply", "-f", "-", input_data=content) + + +def detect_k8s_provider(provider: str) -> str: + """Detect if the Kubernetes provider matches the given string""" + try: + output = kubectl_bin("version", "-o", "json") + git_version = json.loads(output)["serverVersion"]["gitVersion"] + return "1" if provider in git_version else "0" + except Exception as e: + logger.error(f"Failed to detect Kubernetes provider: {e}") + return "0" + + +def get_git_commit() -> str: + result = subprocess.run(["git", "rev-parse", "HEAD"], capture_output=True, text=True) + return result.stdout.strip() + + +def get_cr_version() -> str: + """Get CR version from cr.yaml""" + try: + with open( + os.path.realpath(os.path.join(os.path.dirname(__file__), "..", "deploy", "cr.yaml")) + ) as f: + return next(line.split()[1] for line in f if "crVersion" in line) + except (StopIteration, Exception) as e: + logger.error(f"Failed to get CR version: {e}") + raise RuntimeError("CR version not found in cr.yaml") + + +def get_git_branch() -> str: + """Get current git branch or version from environment variable""" + if version := os.environ.get("VERSION"): + return version + + try: + result = subprocess.run( + ["git", "rev-parse", "--abbrev-ref", "HEAD"], + capture_output=True, + text=True, + check=True, + ) + branch = result.stdout.strip() + except (subprocess.CalledProcessError, FileNotFoundError): + return "unknown" + + return re.sub(r"[^a-zA-Z0-9-]", "-", branch.lower()) + + +def get_secret_data(secret_name: str, data_key: str) -> str: + """Get and decode secret data from Kubernetes""" + try: + result = kubectl_bin( + "get", f"secrets/{secret_name}", "-o", f"jsonpath={{.data.{data_key}}}" + ).strip() + decoded_data = base64.b64decode(result).decode("utf-8") + return decoded_data + except subprocess.CalledProcessError as e: + logger.error(f"Error: {e.stderr}") + return "" + + +def get_user_data(secret_name: str, data_key: str) -> str: + """Get and URL-encode secret data""" + secret_data = get_secret_data(secret_name, data_key) + return urllib.parse.quote(secret_data, safe="") + + +def filter_yaml_with_yq( + yaml_content: str, namespace: str, resource: str = "", skip_generation_check: bool = False +) -> str: + """Filter YAML content using yq command""" + + # TODO: consider using Python for filtering instead of yq + yq_filter = f""" + del(.metadata.ownerReferences[].apiVersion) | + del(.metadata.managedFields) | + del(.. | select(has("creationTimestamp")).creationTimestamp) | + del(.. | select(has("namespace")).namespace) | + del(.. | select(has("uid")).uid) | + del(.metadata.resourceVersion) | + del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | + del(.metadata.selfLink) | + del(.metadata.annotations."cloud.google.com/neg") | + del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | + del(.. | select(has("image")).image) | + del(.. | select(has("clusterIP")).clusterIP) | + del(.. | select(has("clusterIPs")).clusterIPs) | + del(.. | select(has("dataSource")).dataSource) | + del(.. | select(has("procMount")).procMount) | + del(.. | select(has("storageClassName")).storageClassName) | + del(.. | select(has("finalizers")).finalizers) | + del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | + del(.. | select(has("volumeName")).volumeName) | + del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | + del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | + del(.spec.volumeMode) | + del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | + del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | + del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | + del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | + del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | + del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | + del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | + del(.. | select(has("nodePort")).nodePort) | + del(.status) | + (.. | select(tag == "!!str")) |= sub("{namespace}"; "NAME_SPACE") | + del(.spec.volumeClaimTemplates[].apiVersion) | + del(.spec.volumeClaimTemplates[].kind) | + del(.spec.ipFamilies) | + del(.spec.ipFamilyPolicy) | + del(.spec.persistentVolumeClaimRetentionPolicy) | + del(.spec.internalTrafficPolicy) | + del(.spec.allocateLoadBalancerNodePorts) | + (.. | select(. == "extensions/v1beta1")) = "apps/v1" | + (.. | select(. == "batch/v1beta1")) = "batch/v1" + """ + + cmd = ["yq", "eval", yq_filter.strip(), "-"] + result = subprocess.run(cmd, input=yaml_content, text=True, capture_output=True, check=True) + filtered_yaml = result.stdout + + # Remove generation for cronjobs or if skip_generation_check is True + if "cronjob" in resource.lower() or skip_generation_check: + cmd = ["yq", "eval", "del(.metadata.generation)", "-"] + result = subprocess.run( + cmd, input=filtered_yaml, text=True, capture_output=True, check=True + ) + filtered_yaml = result.stdout + + return filtered_yaml + + +def run_mongosh( + command: str, + uri: str, + driver: str = "mongodb+srv", + suffix: str = ".svc.cluster.local", + mongo_flag: str = "", +) -> str: + """Execute mongosh command in PSMDB client container.""" + client_container = get_client_container() + + replica_set = "cfg" if "cfg" in uri else "rs0" + connection_string = f"{driver}://{uri}{suffix}/admin?ssl=false&replicaSet={replica_set}" + if mongo_flag: + connection_string += f" {mongo_flag}" + + result = kubectl_bin( + "exec", + client_container, + "--", + "mongosh", + f"{connection_string}", + "--eval", + command, + "--quiet", + check=False, + ) + return result + + +def get_kubernetes_versions() -> tuple[str, str]: + """Get Kubernetes git version and semantic version.""" + output = kubectl_bin("version", "-o", "json") + version_info = json.loads(output)["serverVersion"] + + git_version = version_info["gitVersion"] + major = version_info["major"] + minor = version_info["minor"].rstrip("+") + kube_version = f"{major}.{minor}" + + return git_version, kube_version + + +# TODO: Cache the client container name to avoid repeated kubectl calls. +def get_client_container(): + """Get the client container name once per test session.""" + result = kubectl_bin( + "get", "pods", "--selector=name=psmdb-client", "-o", "jsonpath={.items[].metadata.name}" + ) + return result.strip() + + +# TODO: implement this function +def check_passwords_leak(namespace=None): + """Check for password leaks in Kubernetes pod logs.""" + pass diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000..17a91cc0b6 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,31 @@ +[project] +name = "psmdb-pytest" +version = "0.1.0" +description = "Tests for PSMDB Operator" +requires-python = ">=3.13" +dependencies = [ + "deepdiff>=8.5.0", + "mypy>=1.16.0", + "pytest>=8.4.0", + "pytest-dependency>=0.6.0", + "pytest-html>=4.1.1", + "pytest-json-report>=1.5.0", + "pyyaml>=6.0.2", + "ruff>=0.11.12", + "types-pyyaml>=6.0.12.20250915", +] + +[tool.pytest.ini_options] +log_cli = true +log_level = "INFO" +log_format = "%(levelname)s %(asctime)s %(message)s" +log_date_format = "%Y-%m-%dT%H:%M:%SZ" +addopts = "--html=e2e-tests/reports/report.html --self-contained-html --json-report --json-report-file=e2e-tests/reports/report.json --junitxml=e2e-tests/reports/report.xml" +render_collapsed = "all" + +[[tool.mypy.overrides]] +module = ["pytest_html.*"] +follow_untyped_imports = true + +[tool.ruff] +line-length = 99 diff --git a/uv.lock b/uv.lock new file mode 100644 index 0000000000..7d9281e01e --- /dev/null +++ b/uv.lock @@ -0,0 +1,299 @@ +version = 1 +revision = 1 +requires-python = ">=3.13" + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335 }, +] + +[[package]] +name = "deepdiff" +version = "8.5.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "orderly-set" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0a/0f/9cd2624f7dcd755cbf1fa21fb7234541f19a1be96a56f387ec9053ebe220/deepdiff-8.5.0.tar.gz", hash = "sha256:a4dd3529fa8d4cd5b9cbb6e3ea9c95997eaa919ba37dac3966c1b8f872dc1cd1", size = 538517 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4a/3b/2e0797200c51531a6d8c97a8e4c9fa6fb56de7e6e2a15c1c067b6b10a0b0/deepdiff-8.5.0-py3-none-any.whl", hash = "sha256:d4599db637f36a1c285f5fdfc2cd8d38bde8d8be8636b65ab5e425b67c54df26", size = 85112 }, +] + +[[package]] +name = "iniconfig" +version = "2.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f2/97/ebf4da567aa6827c909642694d71c9fcf53e5b504f2d96afea02718862f3/iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7", size = 4793 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/e1/e6716421ea10d38022b952c159d5161ca1193197fb744506875fbb87ea7b/iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760", size = 6050 }, +] + +[[package]] +name = "jinja2" +version = "3.1.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markupsafe" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/df/bf/f7da0350254c0ed7c72f3e33cef02e048281fec7ecec5f032d4aac52226b/jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d", size = 245115 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67", size = 134899 }, +] + +[[package]] +name = "markupsafe" +version = "3.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b2/97/5d42485e71dfc078108a86d6de8fa46db44a1a9295e89c5d6d4a06e23a62/markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0", size = 20537 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/83/0e/67eb10a7ecc77a0c2bbe2b0235765b98d164d81600746914bebada795e97/MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd", size = 14274 }, + { url = "https://files.pythonhosted.org/packages/2b/6d/9409f3684d3335375d04e5f05744dfe7e9f120062c9857df4ab490a1031a/MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430", size = 12352 }, + { url = "https://files.pythonhosted.org/packages/d2/f5/6eadfcd3885ea85fe2a7c128315cc1bb7241e1987443d78c8fe712d03091/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094", size = 24122 }, + { url = "https://files.pythonhosted.org/packages/0c/91/96cf928db8236f1bfab6ce15ad070dfdd02ed88261c2afafd4b43575e9e9/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396", size = 23085 }, + { url = "https://files.pythonhosted.org/packages/c2/cf/c9d56af24d56ea04daae7ac0940232d31d5a8354f2b457c6d856b2057d69/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79", size = 22978 }, + { url = "https://files.pythonhosted.org/packages/2a/9f/8619835cd6a711d6272d62abb78c033bda638fdc54c4e7f4272cf1c0962b/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a", size = 24208 }, + { url = "https://files.pythonhosted.org/packages/f9/bf/176950a1792b2cd2102b8ffeb5133e1ed984547b75db47c25a67d3359f77/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca", size = 23357 }, + { url = "https://files.pythonhosted.org/packages/ce/4f/9a02c1d335caabe5c4efb90e1b6e8ee944aa245c1aaaab8e8a618987d816/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c", size = 23344 }, + { url = "https://files.pythonhosted.org/packages/ee/55/c271b57db36f748f0e04a759ace9f8f759ccf22b4960c270c78a394f58be/MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1", size = 15101 }, + { url = "https://files.pythonhosted.org/packages/29/88/07df22d2dd4df40aba9f3e402e6dc1b8ee86297dddbad4872bd5e7b0094f/MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f", size = 15603 }, + { url = "https://files.pythonhosted.org/packages/62/6a/8b89d24db2d32d433dffcd6a8779159da109842434f1dd2f6e71f32f738c/MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c", size = 14510 }, + { url = "https://files.pythonhosted.org/packages/7a/06/a10f955f70a2e5a9bf78d11a161029d278eeacbd35ef806c3fd17b13060d/MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb", size = 12486 }, + { url = "https://files.pythonhosted.org/packages/34/cf/65d4a571869a1a9078198ca28f39fba5fbb910f952f9dbc5220afff9f5e6/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c", size = 25480 }, + { url = "https://files.pythonhosted.org/packages/0c/e3/90e9651924c430b885468b56b3d597cabf6d72be4b24a0acd1fa0e12af67/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d", size = 23914 }, + { url = "https://files.pythonhosted.org/packages/66/8c/6c7cf61f95d63bb866db39085150df1f2a5bd3335298f14a66b48e92659c/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe", size = 23796 }, + { url = "https://files.pythonhosted.org/packages/bb/35/cbe9238ec3f47ac9a7c8b3df7a808e7cb50fe149dc7039f5f454b3fba218/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5", size = 25473 }, + { url = "https://files.pythonhosted.org/packages/e6/32/7621a4382488aa283cc05e8984a9c219abad3bca087be9ec77e89939ded9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a", size = 24114 }, + { url = "https://files.pythonhosted.org/packages/0d/80/0985960e4b89922cb5a0bac0ed39c5b96cbc1a536a99f30e8c220a996ed9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9", size = 24098 }, + { url = "https://files.pythonhosted.org/packages/82/78/fedb03c7d5380df2427038ec8d973587e90561b2d90cd472ce9254cf348b/MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6", size = 15208 }, + { url = "https://files.pythonhosted.org/packages/4f/65/6079a46068dfceaeabb5dcad6d674f5f5c61a6fa5673746f42a9f4c233b3/MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f", size = 15739 }, +] + +[[package]] +name = "mypy" +version = "1.16.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mypy-extensions" }, + { name = "pathspec" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d4/38/13c2f1abae94d5ea0354e146b95a1be9b2137a0d506728e0da037c4276f6/mypy-1.16.0.tar.gz", hash = "sha256:84b94283f817e2aa6350a14b4a8fb2a35a53c286f97c9d30f53b63620e7af8ab", size = 3323139 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/97/9c/ca03bdbefbaa03b264b9318a98950a9c683e06472226b55472f96ebbc53d/mypy-1.16.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a9e056237c89f1587a3be1a3a70a06a698d25e2479b9a2f57325ddaaffc3567b", size = 11059753 }, + { url = "https://files.pythonhosted.org/packages/36/92/79a969b8302cfe316027c88f7dc6fee70129490a370b3f6eb11d777749d0/mypy-1.16.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0b07e107affb9ee6ce1f342c07f51552d126c32cd62955f59a7db94a51ad12c0", size = 10073338 }, + { url = "https://files.pythonhosted.org/packages/14/9b/a943f09319167da0552d5cd722104096a9c99270719b1afeea60d11610aa/mypy-1.16.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c6fb60cbd85dc65d4d63d37cb5c86f4e3a301ec605f606ae3a9173e5cf34997b", size = 11827764 }, + { url = "https://files.pythonhosted.org/packages/ec/64/ff75e71c65a0cb6ee737287c7913ea155845a556c64144c65b811afdb9c7/mypy-1.16.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a7e32297a437cc915599e0578fa6bc68ae6a8dc059c9e009c628e1c47f91495d", size = 12701356 }, + { url = "https://files.pythonhosted.org/packages/0a/ad/0e93c18987a1182c350f7a5fab70550852f9fabe30ecb63bfbe51b602074/mypy-1.16.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:afe420c9380ccec31e744e8baff0d406c846683681025db3531b32db56962d52", size = 12900745 }, + { url = "https://files.pythonhosted.org/packages/28/5d/036c278d7a013e97e33f08c047fe5583ab4f1fc47c9a49f985f1cdd2a2d7/mypy-1.16.0-cp313-cp313-win_amd64.whl", hash = "sha256:55f9076c6ce55dd3f8cd0c6fff26a008ca8e5131b89d5ba6d86bd3f47e736eeb", size = 9572200 }, + { url = "https://files.pythonhosted.org/packages/99/a3/6ed10530dec8e0fdc890d81361260c9ef1f5e5c217ad8c9b21ecb2b8366b/mypy-1.16.0-py3-none-any.whl", hash = "sha256:29e1499864a3888bca5c1542f2d7232c6e586295183320caa95758fc84034031", size = 2265773 }, +] + +[[package]] +name = "mypy-extensions" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/6e/371856a3fb9d31ca8dac321cda606860fa4548858c0cc45d9d1d4ca2628b/mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558", size = 6343 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/79/7b/2c79738432f5c924bef5071f933bcc9efd0473bac3b4aa584a6f7c1c8df8/mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505", size = 4963 }, +] + +[[package]] +name = "orderly-set" +version = "5.4.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/03/4a/38030da31c13dcd5a531490006e63a0954083fb115113be9393179738e25/orderly_set-5.4.1.tar.gz", hash = "sha256:a1fb5a4fdc5e234e9e8d8e5c1bbdbc4540f4dfe50d12bf17c8bc5dbf1c9c878d", size = 20943 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/12/bc/e0dfb4db9210d92b44e49d6e61ba5caefbd411958357fa9d7ff489eeb835/orderly_set-5.4.1-py3-none-any.whl", hash = "sha256:b5e21d21680bd9ef456885db800c5cb4f76a03879880c0175e1b077fb166fd83", size = 12339 }, +] + +[[package]] +name = "packaging" +version = "25.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469 }, +] + +[[package]] +name = "pathspec" +version = "0.12.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ca/bc/f35b8446f4531a7cb215605d100cd88b7ac6f44ab3fc94870c120ab3adbf/pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712", size = 51043 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cc/20/ff623b09d963f88bfde16306a54e12ee5ea43e9b597108672ff3a408aad6/pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", size = 31191 }, +] + +[[package]] +name = "pluggy" +version = "1.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538 }, +] + +[[package]] +name = "psmdb-pytest" +version = "0.1.0" +source = { virtual = "." } +dependencies = [ + { name = "deepdiff" }, + { name = "mypy" }, + { name = "pytest" }, + { name = "pytest-dependency" }, + { name = "pytest-html" }, + { name = "pytest-json-report" }, + { name = "pyyaml" }, + { name = "ruff" }, +] + +[package.metadata] +requires-dist = [ + { name = "deepdiff", specifier = ">=8.5.0" }, + { name = "mypy", specifier = ">=1.16.0" }, + { name = "pytest", specifier = ">=8.4.0" }, + { name = "pytest-dependency", specifier = ">=0.6.0" }, + { name = "pytest-html", specifier = ">=4.1.1" }, + { name = "pytest-json-report", specifier = ">=1.5.0" }, + { name = "pyyaml", specifier = ">=6.0.2" }, + { name = "ruff", specifier = ">=0.11.12" }, +] + +[[package]] +name = "pygments" +version = "2.19.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7c/2d/c3338d48ea6cc0feb8446d8e6937e1408088a72a39937982cc6111d17f84/pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f", size = 4968581 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8a/0b/9fcc47d19c48b59121088dd6da2488a49d5f72dacf8262e2790a1d2c7d15/pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c", size = 1225293 }, +] + +[[package]] +name = "pytest" +version = "8.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "iniconfig" }, + { name = "packaging" }, + { name = "pluggy" }, + { name = "pygments" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fb/aa/405082ce2749be5398045152251ac69c0f3578c7077efc53431303af97ce/pytest-8.4.0.tar.gz", hash = "sha256:14d920b48472ea0dbf68e45b96cd1ffda4705f33307dcc86c676c1b5104838a6", size = 1515232 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2f/de/afa024cbe022b1b318a3d224125aa24939e99b4ff6f22e0ba639a2eaee47/pytest-8.4.0-py3-none-any.whl", hash = "sha256:f40f825768ad76c0977cbacdf1fd37c6f7a468e460ea6a0636078f8972d4517e", size = 363797 }, +] + +[[package]] +name = "pytest-dependency" +version = "0.6.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pytest" }, + { name = "setuptools" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7e/3b/317cc04e77d707d338540ca67b619df8f247f3f4c9f40e67bf5ea503ad94/pytest-dependency-0.6.0.tar.gz", hash = "sha256:934b0e6a39d95995062c193f7eaeed8a8ffa06ff1bcef4b62b0dc74a708bacc1", size = 19499 } + +[[package]] +name = "pytest-html" +version = "4.1.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jinja2" }, + { name = "pytest" }, + { name = "pytest-metadata" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/bb/ab/4862dcb5a8a514bd87747e06b8d55483c0c9e987e1b66972336946e49b49/pytest_html-4.1.1.tar.gz", hash = "sha256:70a01e8ae5800f4a074b56a4cb1025c8f4f9b038bba5fe31e3c98eb996686f07", size = 150773 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c8/c7/c160021cbecd956cc1a6f79e5fe155f7868b2e5b848f1320dad0b3e3122f/pytest_html-4.1.1-py3-none-any.whl", hash = "sha256:c8152cea03bd4e9bee6d525573b67bbc6622967b72b9628dda0ea3e2a0b5dd71", size = 23491 }, +] + +[[package]] +name = "pytest-json-report" +version = "1.5.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pytest" }, + { name = "pytest-metadata" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4f/d3/765dae9712fcd68d820338908c1337e077d5fdadccd5cacf95b9b0bea278/pytest-json-report-1.5.0.tar.gz", hash = "sha256:2dde3c647851a19b5f3700729e8310a6e66efb2077d674f27ddea3d34dc615de", size = 21241 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/81/35/d07400c715bf8a88aa0c1ee9c9eb6050ca7fe5b39981f0eea773feeb0681/pytest_json_report-1.5.0-py3-none-any.whl", hash = "sha256:9897b68c910b12a2e48dd849f9a284b2c79a732a8a9cb398452ddd23d3c8c325", size = 13222 }, +] + +[[package]] +name = "pytest-metadata" +version = "3.1.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a6/85/8c969f8bec4e559f8f2b958a15229a35495f5b4ce499f6b865eac54b878d/pytest_metadata-3.1.1.tar.gz", hash = "sha256:d2a29b0355fbc03f168aa96d41ff88b1a3b44a3b02acbe491801c98a048017c8", size = 9952 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3e/43/7e7b2ec865caa92f67b8f0e9231a798d102724ca4c0e1f414316be1c1ef2/pytest_metadata-3.1.1-py3-none-any.whl", hash = "sha256:c8e0844db684ee1c798cfa38908d20d67d0463ecb6137c72e91f418558dd5f4b", size = 11428 }, +] + +[[package]] +name = "pyyaml" +version = "6.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/54/ed/79a089b6be93607fa5cdaedf301d7dfb23af5f25c398d5ead2525b063e17/pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", size = 130631 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/e3/3af305b830494fa85d95f6d95ef7fa73f2ee1cc8ef5b495c7c3269fb835f/PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", size = 181309 }, + { url = "https://files.pythonhosted.org/packages/45/9f/3b1c20a0b7a3200524eb0076cc027a970d320bd3a6592873c85c92a08731/PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", size = 171679 }, + { url = "https://files.pythonhosted.org/packages/7c/9a/337322f27005c33bcb656c655fa78325b730324c78620e8328ae28b64d0c/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", size = 733428 }, + { url = "https://files.pythonhosted.org/packages/a3/69/864fbe19e6c18ea3cc196cbe5d392175b4cf3d5d0ac1403ec3f2d237ebb5/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", size = 763361 }, + { url = "https://files.pythonhosted.org/packages/04/24/b7721e4845c2f162d26f50521b825fb061bc0a5afcf9a386840f23ea19fa/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", size = 759523 }, + { url = "https://files.pythonhosted.org/packages/2b/b2/e3234f59ba06559c6ff63c4e10baea10e5e7df868092bf9ab40e5b9c56b6/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", size = 726660 }, + { url = "https://files.pythonhosted.org/packages/fe/0f/25911a9f080464c59fab9027482f822b86bf0608957a5fcc6eaac85aa515/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", size = 751597 }, + { url = "https://files.pythonhosted.org/packages/14/0d/e2c3b43bbce3cf6bd97c840b46088a3031085179e596d4929729d8d68270/PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", size = 140527 }, + { url = "https://files.pythonhosted.org/packages/fa/de/02b54f42487e3d3c6efb3f89428677074ca7bf43aae402517bc7cca949f3/PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", size = 156446 }, +] + +[[package]] +name = "ruff" +version = "0.11.13" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ed/da/9c6f995903b4d9474b39da91d2d626659af3ff1eeb43e9ae7c119349dba6/ruff-0.11.13.tar.gz", hash = "sha256:26fa247dc68d1d4e72c179e08889a25ac0c7ba4d78aecfc835d49cbfd60bf514", size = 4282054 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7d/ce/a11d381192966e0b4290842cc8d4fac7dc9214ddf627c11c1afff87da29b/ruff-0.11.13-py3-none-linux_armv6l.whl", hash = "sha256:4bdfbf1240533f40042ec00c9e09a3aade6f8c10b6414cf11b519488d2635d46", size = 10292516 }, + { url = "https://files.pythonhosted.org/packages/78/db/87c3b59b0d4e753e40b6a3b4a2642dfd1dcaefbff121ddc64d6c8b47ba00/ruff-0.11.13-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:aef9c9ed1b5ca28bb15c7eac83b8670cf3b20b478195bd49c8d756ba0a36cf48", size = 11106083 }, + { url = "https://files.pythonhosted.org/packages/77/79/d8cec175856ff810a19825d09ce700265f905c643c69f45d2b737e4a470a/ruff-0.11.13-py3-none-macosx_11_0_arm64.whl", hash = "sha256:53b15a9dfdce029c842e9a5aebc3855e9ab7771395979ff85b7c1dedb53ddc2b", size = 10436024 }, + { url = "https://files.pythonhosted.org/packages/8b/5b/f6d94f2980fa1ee854b41568368a2e1252681b9238ab2895e133d303538f/ruff-0.11.13-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ab153241400789138d13f362c43f7edecc0edfffce2afa6a68434000ecd8f69a", size = 10646324 }, + { url = "https://files.pythonhosted.org/packages/6c/9c/b4c2acf24ea4426016d511dfdc787f4ce1ceb835f3c5fbdbcb32b1c63bda/ruff-0.11.13-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6c51f93029d54a910d3d24f7dd0bb909e31b6cd989a5e4ac513f4eb41629f0dc", size = 10174416 }, + { url = "https://files.pythonhosted.org/packages/f3/10/e2e62f77c65ede8cd032c2ca39c41f48feabedb6e282bfd6073d81bb671d/ruff-0.11.13-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1808b3ed53e1a777c2ef733aca9051dc9bf7c99b26ece15cb59a0320fbdbd629", size = 11724197 }, + { url = "https://files.pythonhosted.org/packages/bb/f0/466fe8469b85c561e081d798c45f8a1d21e0b4a5ef795a1d7f1a9a9ec182/ruff-0.11.13-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:d28ce58b5ecf0f43c1b71edffabe6ed7f245d5336b17805803312ec9bc665933", size = 12511615 }, + { url = "https://files.pythonhosted.org/packages/17/0e/cefe778b46dbd0cbcb03a839946c8f80a06f7968eb298aa4d1a4293f3448/ruff-0.11.13-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:55e4bc3a77842da33c16d55b32c6cac1ec5fb0fbec9c8c513bdce76c4f922165", size = 12117080 }, + { url = "https://files.pythonhosted.org/packages/5d/2c/caaeda564cbe103bed145ea557cb86795b18651b0f6b3ff6a10e84e5a33f/ruff-0.11.13-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:633bf2c6f35678c56ec73189ba6fa19ff1c5e4807a78bf60ef487b9dd272cc71", size = 11326315 }, + { url = "https://files.pythonhosted.org/packages/75/f0/782e7d681d660eda8c536962920c41309e6dd4ebcea9a2714ed5127d44bd/ruff-0.11.13-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ffbc82d70424b275b089166310448051afdc6e914fdab90e08df66c43bb5ca9", size = 11555640 }, + { url = "https://files.pythonhosted.org/packages/5d/d4/3d580c616316c7f07fb3c99dbecfe01fbaea7b6fd9a82b801e72e5de742a/ruff-0.11.13-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:4a9ddd3ec62a9a89578c85842b836e4ac832d4a2e0bfaad3b02243f930ceafcc", size = 10507364 }, + { url = "https://files.pythonhosted.org/packages/5a/dc/195e6f17d7b3ea6b12dc4f3e9de575db7983db187c378d44606e5d503319/ruff-0.11.13-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:d237a496e0778d719efb05058c64d28b757c77824e04ffe8796c7436e26712b7", size = 10141462 }, + { url = "https://files.pythonhosted.org/packages/f4/8e/39a094af6967faa57ecdeacb91bedfb232474ff8c3d20f16a5514e6b3534/ruff-0.11.13-py3-none-musllinux_1_2_i686.whl", hash = "sha256:26816a218ca6ef02142343fd24c70f7cd8c5aa6c203bca284407adf675984432", size = 11121028 }, + { url = "https://files.pythonhosted.org/packages/5a/c0/b0b508193b0e8a1654ec683ebab18d309861f8bd64e3a2f9648b80d392cb/ruff-0.11.13-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:51c3f95abd9331dc5b87c47ac7f376db5616041173826dfd556cfe3d4977f492", size = 11602992 }, + { url = "https://files.pythonhosted.org/packages/7c/91/263e33ab93ab09ca06ce4f8f8547a858cc198072f873ebc9be7466790bae/ruff-0.11.13-py3-none-win32.whl", hash = "sha256:96c27935418e4e8e77a26bb05962817f28b8ef3843a6c6cc49d8783b5507f250", size = 10474944 }, + { url = "https://files.pythonhosted.org/packages/46/f4/7c27734ac2073aae8efb0119cae6931b6fb48017adf048fdf85c19337afc/ruff-0.11.13-py3-none-win_amd64.whl", hash = "sha256:29c3189895a8a6a657b7af4e97d330c8a3afd2c9c8f46c81e2fc5a31866517e3", size = 11548669 }, + { url = "https://files.pythonhosted.org/packages/ec/bf/b273dd11673fed8a6bd46032c0ea2a04b2ac9bfa9c628756a5856ba113b0/ruff-0.11.13-py3-none-win_arm64.whl", hash = "sha256:b4385285e9179d608ff1d2fb9922062663c658605819a6876d8beef0c30b7f3b", size = 10683928 }, +] + +[[package]] +name = "setuptools" +version = "80.9.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/18/5d/3bf57dcd21979b887f014ea83c24ae194cfcd12b9e0fda66b957c69d1fca/setuptools-80.9.0.tar.gz", hash = "sha256:f36b47402ecde768dbfafc46e8e4207b4360c654f1f3bb84475f0a28628fb19c", size = 1319958 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a3/dc/17031897dae0efacfea57dfd3a82fdd2a2aeb58e0ff71b77b87e44edc772/setuptools-80.9.0-py3-none-any.whl", hash = "sha256:062d34222ad13e0cc312a4c02d73f059e86a4acbfbdea8f8f76b28c99f306922", size = 1201486 }, +] + +[[package]] +name = "typing-extensions" +version = "4.14.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d1/bc/51647cd02527e87d05cb083ccc402f93e441606ff1f01739a62c8ad09ba5/typing_extensions-4.14.0.tar.gz", hash = "sha256:8676b788e32f02ab42d9e7c61324048ae4c6d844a399eebace3d4979d75ceef4", size = 107423 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/69/e0/552843e0d356fbb5256d21449fa957fa4eff3bbc135a74a691ee70c7c5da/typing_extensions-4.14.0-py3-none-any.whl", hash = "sha256:a1514509136dd0b477638fc68d6a91497af5076466ad0fa6c338e44e359944af", size = 43839 }, +] From fb96a0c21656f90ea932d4bc6a834c6a51f60609 Mon Sep 17 00:00:00 2001 From: Julio Pasinatto Date: Wed, 24 Sep 2025 13:16:58 -0300 Subject: [PATCH 2/4] Refactor to adress comments --- e2e-tests/conftest.py | 169 +++++----- e2e-tests/finalizer/test_finalizer.py | 84 +++-- e2e-tests/init-deploy/test_init_deploy.py | 193 ++++++------ e2e-tests/tools.py | 367 ++++++++++++---------- uv.lock | 11 + 5 files changed, 439 insertions(+), 385 deletions(-) diff --git a/e2e-tests/conftest.py b/e2e-tests/conftest.py index f19d666a63..89149e392a 100644 --- a/e2e-tests/conftest.py +++ b/e2e-tests/conftest.py @@ -139,6 +139,8 @@ def _create_namespace(namespace): @pytest.fixture(scope="class") def create_infra(test_paths, create_namespace): + created_namespaces = [] + def _create_infra(test_name): """Create the necessary infrastructure for the tests.""" logger.info("Creating test environment") @@ -154,91 +156,90 @@ def _create_infra(test_name): namespace = create_namespace(f"{test_name}-{random.randint(0, 32767)}") tools.deploy_operator(test_paths["test_dir"], test_paths["src_dir"]) + # Track created namespace for cleanup + created_namespaces.append(namespace) return namespace - return _create_infra - + yield _create_infra -@pytest.fixture(scope="class") -def destroy_infra(test_paths): - """Destroy the infrastructure created for the tests.""" + # Teardown code + if os.environ.get("SKIP_DELETE") == "1": + logger.info("SKIP_DELETE = 1. Skipping test environment cleanup") + return - def _destroy_infra(namespace): - if os.environ.get("SKIP_DELETE") == "1": - logger.info("SKIP_DELETE = 1. Skipping test environment cleanup") - return + def run_cmd(cmd): + try: + tools.kubectl_bin(*cmd) + except (subprocess.CalledProcessError, FileNotFoundError, OSError) as e: + logger.debug(f"Command failed (continuing cleanup): {' '.join(cmd)}, error: {e}") - def run_cmd(cmd): - try: - tools.kubectl_bin(*cmd) - except (subprocess.CalledProcessError, FileNotFoundError, OSError) as e: - logger.debug(f"Command failed (continuing cleanup): {' '.join(cmd)}, error: {e}") + def cleanup_crd(): + crd_file = f"{test_paths['src_dir']}/deploy/crd.yaml" + run_cmd(["delete", "-f", crd_file, "--ignore-not-found", "--wait=false"]) - def cleanup_crd(): - crd_file = f"{test_paths['src_dir']}/deploy/crd.yaml" - run_cmd(["delete", "-f", crd_file, "--ignore-not-found", "--wait=false"]) + try: + with open(crd_file, "r") as f: + for doc in f.read().split("---"): + if not doc.strip(): + continue + crd_name = yaml.safe_load(doc)["metadata"]["name"] + run_cmd( + [ + "patch", + "crd", + crd_name, + "--type=merge", + "-p", + '{"metadata":{"finalizers":[]}}', + ] + ) + run_cmd(["wait", "--for=delete", "crd", crd_name, "--timeout=60s"]) + except (FileNotFoundError, yaml.YAMLError, KeyError, TypeError) as e: + logger.debug(f"CRD cleanup failed (continuing): {e}") + + logger.info("Cleaning up test environment") + + commands = [ + ["delete", "psmdb-backup", "--all", "--ignore-not-found"], + [ + "delete", + "-f", + f"{test_paths['test_dir']}/../conf/container-rc.yaml", + "--ignore-not-found", + ], + [ + "delete", + "-f", + f"{test_paths['src_dir']}/deploy/{'cw-' if os.environ.get('OPERATOR_NS') else ''}rbac.yaml", + "--ignore-not-found", + ], + ] + + with ThreadPoolExecutor(max_workers=3) as executor: + futures = [executor.submit(run_cmd, cmd) for cmd in commands] + futures.append(executor.submit(cleanup_crd)) + + # Clean up all created namespaces + namespace_commands = [] + for namespace in created_namespaces: + namespace_commands.append( + ["delete", "--grace-period=0", "--force", "namespace", namespace, "--ignore-not-found"] + ) - try: - with open(crd_file, "r") as f: - for doc in f.read().split("---"): - if not doc.strip(): - continue - crd_name = yaml.safe_load(doc)["metadata"]["name"] - run_cmd( - [ - "patch", - crd_name, - "--all-namespaces", - "--type=merge", - "-p", - '{"metadata":{"finalizers":[]}}', - ] - ) - run_cmd(["wait", "--for=delete", "crd", crd_name]) - except (FileNotFoundError, yaml.YAMLError, KeyError, TypeError) as e: - logger.debug(f"CRD cleanup failed (continuing): {e}") - - logger.info("Cleaning up test environment") - - commands = [ - ["delete", "psmdb-backup", "--all", "--ignore-not-found"], + if os.environ.get("OPERATOR_NS"): + namespace_commands.append( [ "delete", - "-f", - f"{test_paths['test_dir']}/../conf/container-rc.yaml", + "--grace-period=0", + "--force", + "namespace", + os.environ.get("OPERATOR_NS"), "--ignore-not-found", - ], - [ - "delete", - "-f", - f"{test_paths['src_dir']}/deploy/{'cw-' if os.environ.get('OPERATOR_NS') else ''}rbac.yaml", - "--ignore-not-found", - ], - ] - - with ThreadPoolExecutor(max_workers=3) as executor: - futures = [executor.submit(run_cmd, cmd) for cmd in commands] - futures.append(executor.submit(cleanup_crd)) - - namespace_commands = [ - ["delete", "--grace-period=0", "--force", "namespace", namespace, "--ignore-not-found"] - ] - if os.environ.get("OPERATOR_NS"): - namespace_commands.append( - [ - "delete", - "--grace-period=0", - "--force", - "namespace", - os.environ.get("OPERATOR_NS"), - "--ignore-not-found", - ] - ) - - for cmd in namespace_commands: - run_cmd(cmd) + ] + ) - return _destroy_infra + for cmd in namespace_commands: + run_cmd(cmd) @pytest.fixture(scope="class") @@ -344,3 +345,23 @@ def deploy_cert_manager(): tools.kubectl_bin("delete", "-f", cert_manager_url, "--ignore-not-found") except Exception as e: logger.error(f"Failed to cleanup cert-manager: {e}") + + +@pytest.fixture(scope="class") +def psmdb_client(test_paths): + """Deploy and get the client pod name.""" + tools.kubectl_bin("apply", "-f", f"{test_paths['conf_dir']}/client-70.yml") + + result = tools.retry( + lambda: tools.kubectl_bin( + "get", + "pods", + "--selector=name=psmdb-client", + "-o", + "jsonpath={.items[].metadata.name}", + ), + condition=lambda result: "container not found" not in result, + ) + + pod_name = result.strip() + return tools.MongoManager(pod_name) diff --git a/e2e-tests/finalizer/test_finalizer.py b/e2e-tests/finalizer/test_finalizer.py index fbc20e1434..e87bcd84ad 100644 --- a/e2e-tests/finalizer/test_finalizer.py +++ b/e2e-tests/finalizer/test_finalizer.py @@ -1,66 +1,58 @@ import pytest import logging -from types import SimpleNamespace - import tools logger = logging.getLogger(__name__) -class TestFinalizer: - """Test MongoDB cluster finalizers""" +@pytest.fixture(scope="class", autouse=True) +def config(create_infra): + """Configuration for tests""" + return { + "namespace": create_infra("finalizer"), + "cluster": "some-name", + } + - @pytest.fixture(scope="class", autouse=True) - def env(self, create_infra, destroy_infra, test_paths): - """Setup test environment and cleanup after tests""" - try: - namespace = create_infra("finalizer") - tools.kubectl_bin( - "apply", - "-f", - f"{test_paths['conf_dir']}/secrets_with_tls.yml", - "-f", - f"{test_paths['conf_dir']}/client-70.yml", - ) +@pytest.fixture(scope="class", autouse=True) +def setup_tests(test_paths): + """Setup test environment""" + tools.kubectl_bin("apply", "-f", f"{test_paths['conf_dir']}/secrets_with_tls.yml") - yield SimpleNamespace( - test_dir=test_paths["test_dir"], - namespace=namespace, - cluster="some-name", - ) - except Exception as e: - pytest.fail(f"Environment setup failed: {e}") - finally: - destroy_infra(namespace) + +class TestFinalizer: + """Test MongoDB cluster finalizers""" @pytest.mark.dependency() - def test_create_cluster(self, env): - tools.apply_cluster(f"{env.test_dir}/conf/{env.cluster}.yml") - tools.wait_for_running(f"{env.cluster}-rs0", 3) - tools.wait_for_running(f"{env.cluster}-cfg", 3) + def test_create_cluster(self, config, test_paths): + tools.apply_cluster(f"{test_paths['test_dir']}/conf/{config['cluster']}.yml") + tools.wait_for_running(f"{config['cluster']}-rs0", 3, False) + tools.wait_for_running(f"{config['cluster']}-cfg", 3) @pytest.mark.dependency(depends=["TestFinalizer::test_create_cluster"]) - def test_kill_primary_should_elect_new_one(self, env): - primary = tools.get_mongo_primary( - f"clusterAdmin:clusterAdmin123456@{env.cluster}-rs0.{env.namespace}", env.cluster + def test_kill_primary_should_elect_new_one(self, config, psmdb_client): + primary = psmdb_client.get_mongo_primary( + f"clusterAdmin:clusterAdmin123456@{config['cluster']}-rs0.{config['namespace']}", + config["cluster"], ) - if primary == f"{env.cluster}-rs0-0": + if primary == f"{config['cluster']}-rs0-0": tools.kubectl_bin("delete", "pod", "--grace-period=0", "--force", primary) - tools.wait_for_running(f"{env.cluster}-rs0", 3) - new_primary = tools.get_mongo_primary( - f"clusterAdmin:clusterAdmin123456@{env.cluster}-rs0.{env.namespace}", env.cluster + tools.wait_for_running(f"{config['cluster']}-rs0", 3) + new_primary = psmdb_client.get_mongo_primary( + f"clusterAdmin:clusterAdmin123456@{config['cluster']}-rs0.{config['namespace']}", + config["cluster"], ) assert new_primary != primary, "Primary did not change after killing the pod" @pytest.mark.dependency(depends=["TestFinalizer::test_kill_primary_should_elect_new_one"]) - def test_delete_cluster(self, env): - tools.kubectl_bin("delete", "psmdb", env.cluster, "--wait=false") - tools.wait_for_delete(f"psmdb/{env.cluster}") - - tools.wait_for_delete(f"pvc/mongod-data-{env.cluster}-cfg-0") - tools.wait_for_delete(f"pvc/mongod-data-{env.cluster}-cfg-1") - tools.wait_for_delete(f"pvc/mongod-data-{env.cluster}-cfg-2") - tools.wait_for_delete(f"pvc/mongod-data-{env.cluster}-rs0-0") - tools.wait_for_delete(f"pvc/mongod-data-{env.cluster}-rs0-1") - tools.wait_for_delete(f"pvc/mongod-data-{env.cluster}-rs0-2") + def test_delete_cluster(self, config): + tools.kubectl_bin("delete", "psmdb", config["cluster"], "--wait=false") + tools.wait_for_delete(f"psmdb/{config['cluster']}") + + tools.wait_for_delete(f"pvc/mongod-data-{config['cluster']}-cfg-0") + tools.wait_for_delete(f"pvc/mongod-data-{config['cluster']}-cfg-1") + tools.wait_for_delete(f"pvc/mongod-data-{config['cluster']}-cfg-2") + tools.wait_for_delete(f"pvc/mongod-data-{config['cluster']}-rs0-0") + tools.wait_for_delete(f"pvc/mongod-data-{config['cluster']}-rs0-1") + tools.wait_for_delete(f"pvc/mongod-data-{config['cluster']}-rs0-2") diff --git a/e2e-tests/init-deploy/test_init_deploy.py b/e2e-tests/init-deploy/test_init_deploy.py index dac4f1d8af..a343a1b401 100644 --- a/e2e-tests/init-deploy/test_init_deploy.py +++ b/e2e-tests/init-deploy/test_init_deploy.py @@ -1,201 +1,212 @@ #!/usr/bin/env python3 import pytest -import time import logging -from types import SimpleNamespace - import tools logger = logging.getLogger(__name__) +@pytest.fixture(scope="class", autouse=True) +def config(create_infra): + """Configuration for tests""" + return { + "namespace": create_infra("init-deploy"), + "cluster": "some-name-rs0", + "cluster2": "another-name-rs0", + "max_conn": 17, + } + + +@pytest.fixture(scope="class", autouse=True) +def setup_tests(test_paths): + """Setup test environment""" + tools.kubectl_bin("apply", "-f", f"{test_paths['conf_dir']}/secrets_with_tls.yml") + tools.apply_runtime_class(test_paths["test_dir"]) + + class TestInitDeploy: """Test MongoDB cluster deployment and operations""" - @pytest.fixture(scope="class", autouse=True) - def env(self, create_infra, destroy_infra, test_paths): - """Setup test environment and cleanup after tests""" - try: - namespace = create_infra("init-deploy") - tools.kubectl_bin( - "apply", - "-f", - f"{test_paths['test_dir']}/conf/secrets_with_tls.yml", - "-f", - f"{test_paths['test_dir']}/../conf/client-70.yml", - ) - tools.apply_runtime_class(test_paths["test_dir"]) - - yield SimpleNamespace( - test_dir=test_paths["test_dir"], - conf_dir=test_paths["conf_dir"], - src_dir=test_paths["src_dir"], - namespace=namespace, - cluster="some-name-rs0", - cluster2="another-name-rs0", - max_conn=17, - ) - except Exception as e: - pytest.fail(f"Environment setup failed: {e}") - finally: - destroy_infra(namespace) - @pytest.mark.dependency() - def test_create_first_cluster(self, env): + def test_create_first_cluster(self, config, test_paths): """Create first PSMDB cluster""" - tools.apply_cluster(f"{env.test_dir}/../conf/{env.cluster}.yml") - tools.wait_for_running(env.cluster, 3) + tools.apply_cluster(f"{test_paths['test_dir']}/../conf/{config['cluster']}.yml") + tools.wait_for_running(config["cluster"], 3) - tools.compare_kubectl(env.test_dir, f"statefulset/{env.cluster}", env.namespace) - tools.compare_kubectl(env.test_dir, f"service/{env.cluster}", env.namespace) + tools.compare_kubectl( + test_paths["test_dir"], f"statefulset/{config['cluster']}", config["namespace"] + ) + tools.compare_kubectl( + test_paths["test_dir"], f"service/{config['cluster']}", config["namespace"] + ) @pytest.mark.dependency(depends=["TestInitDeploy::test_create_first_cluster"]) - def test_verify_users_created(self, env): + def test_verify_users_created(self, config, test_paths, psmdb_client): """Check if users created with correct permissions""" secret_name = "some-users" # Test userAdmin user user = tools.get_user_data(secret_name, "MONGODB_USER_ADMIN_USER") password = tools.get_user_data(secret_name, "MONGODB_USER_ADMIN_PASSWORD") - tools.compare_mongo_user( - f"{user}:{password}@{env.cluster}.{env.namespace}", "userAdmin", env.test_dir + psmdb_client.compare_mongo_user( + f"{user}:{password}@{config['cluster']}.{config['namespace']}", + "userAdmin", + test_paths["test_dir"], ) # Test backup user user = tools.get_user_data(secret_name, "MONGODB_BACKUP_USER") password = tools.get_user_data(secret_name, "MONGODB_BACKUP_PASSWORD") - tools.compare_mongo_user( - f"{user}:{password}@{env.cluster}.{env.namespace}", "backup", env.test_dir + psmdb_client.compare_mongo_user( + f"{user}:{password}@{config['cluster']}.{config['namespace']}", + "backup", + test_paths["test_dir"], ) # Test clusterAdmin user user = tools.get_user_data(secret_name, "MONGODB_CLUSTER_ADMIN_USER") password = tools.get_user_data(secret_name, "MONGODB_CLUSTER_ADMIN_PASSWORD") - tools.compare_mongo_user( - f"{user}:{password}@{env.cluster}.{env.namespace}", "clusterAdmin", env.test_dir + psmdb_client.compare_mongo_user( + f"{user}:{password}@{config['cluster']}.{config['namespace']}", + "clusterAdmin", + test_paths["test_dir"], ) # Test clusterMonitor user user = tools.get_user_data(secret_name, "MONGODB_CLUSTER_MONITOR_USER") password = tools.get_user_data(secret_name, "MONGODB_CLUSTER_MONITOR_PASSWORD") - tools.compare_mongo_user( - f"{user}:{password}@{env.cluster}.{env.namespace}", "clusterMonitor", env.test_dir + psmdb_client.compare_mongo_user( + f"{user}:{password}@{config['cluster']}.{config['namespace']}", + "clusterMonitor", + test_paths["test_dir"], ) # Test that unauthorized user is rejected - result = tools.run_mongosh( + result = psmdb_client.run_mongosh( "db.runCommand({connectionStatus:1,showPrivileges:true})", - f"test:test@{env.cluster}.{env.namespace}", + f"test:test@{config['cluster']}.{config['namespace']}", ) assert "Authentication failed" in result @pytest.mark.dependency(depends=["TestInitDeploy::test_verify_users_created"]) - def test_write_and_read_data(self, env): + def test_write_and_read_data(self, config, test_paths, psmdb_client): """Write data and read from all nodes""" - tools.run_mongosh( + psmdb_client.run_mongosh( 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})', - f"userAdmin:userAdmin123456@{env.cluster}.{env.namespace}", + f"userAdmin:userAdmin123456@{config['cluster']}.{config['namespace']}", ) - # Wait for user to be fully created - time.sleep(2) - - tools.run_mongosh( - "db.getSiblingDB('myApp').test.insertOne({ x: 100500 })", - f"myApp:myPass@{env.cluster}.{env.namespace}", + tools.retry( + lambda: psmdb_client.run_mongosh( + "db.getSiblingDB('myApp').test.insertOne({ x: 100500 })", + f"myApp:myPass@{config['cluster']}.{config['namespace']}", + ), + condition=lambda result: "acknowledged: true" in result, ) for i in range(3): - tools.compare_mongo_cmd( + psmdb_client.compare_mongo_cmd( "find({}, { _id: 0 }).toArray()", - f"myApp:myPass@{env.cluster}-{i}.{env.cluster}.{env.namespace}", - test_file=f"{env.test_dir}/compare/find-1.json", + f"myApp:myPass@{config['cluster']}-{i}.{config['cluster']}.{config['namespace']}", + test_file=f"{test_paths['test_dir']}/compare/find-1.json", ) @pytest.mark.dependency(depends=["TestInitDeploy::test_write_and_read_data"]) - def test_connection_count(self, env): + def test_connection_count(self, config, psmdb_client): """Check number of connections doesn't exceed maximum""" conn_count = int( - tools.run_mongosh( + psmdb_client.run_mongosh( "db.serverStatus().connections.current", - f"clusterAdmin:clusterAdmin123456@{env.cluster}.{env.namespace}", + f"clusterAdmin:clusterAdmin123456@{config['cluster']}.{config['namespace']}", ).strip() ) - assert conn_count <= env.max_conn, ( - f"Connection count {conn_count} exceeds maximum {env.max_conn}" + assert conn_count <= config["max_conn"], ( + f"Connection count {conn_count} exceeds maximum {config['max_conn']}" ) @pytest.mark.dependency(depends=["TestInitDeploy::test_connection_count"]) - def test_primary_failover(self, env): + def test_primary_failover(self, config, test_paths, psmdb_client): """Kill Primary Pod, check reelection, check data""" - initial_primary = tools.get_mongo_primary( - f"clusterAdmin:clusterAdmin123456@{env.cluster}.{env.namespace}", env.cluster + initial_primary = psmdb_client.get_mongo_primary( + f"clusterAdmin:clusterAdmin123456@{config['cluster']}.{config['namespace']}", + config["cluster"], ) assert initial_primary, "Failed to get initial primary" tools.kubectl_bin( - "delete", "pods", "--grace-period=0", "--force", initial_primary, "-n", env.namespace + "delete", + "pods", + "--grace-period=0", + "--force", + initial_primary, + "-n", + config["namespace"], ) - tools.wait_for_running(env.cluster, 3) + tools.wait_for_running(config["cluster"], 3) - changed_primary = tools.get_mongo_primary( - f"clusterAdmin:clusterAdmin123456@{env.cluster}.{env.namespace}", env.cluster + changed_primary = psmdb_client.get_mongo_primary( + f"clusterAdmin:clusterAdmin123456@{config['cluster']}.{config['namespace']}", + config["cluster"], ) assert initial_primary != changed_primary, "Primary didn't change after pod deletion" - tools.run_mongosh( + psmdb_client.run_mongosh( "db.getSiblingDB('myApp').test.insertOne({ x: 100501 })", - f"myApp:myPass@{env.cluster}.{env.namespace}", + f"myApp:myPass@{config['cluster']}.{config['namespace']}", ) for i in range(3): - tools.compare_mongo_cmd( + psmdb_client.compare_mongo_cmd( "find({}, { _id: 0 }).toArray()", - f"myApp:myPass@{env.cluster}-{i}.{env.cluster}.{env.namespace}", + f"myApp:myPass@{config['cluster']}-{i}.{config['cluster']}.{config['namespace']}", "-2nd", - test_file=f"{env.test_dir}/compare/find-2.json", + test_file=f"{test_paths['test_dir']}/compare/find-2.json", ) @pytest.mark.dependency(depends=["TestInitDeploy::test_primary_failover"]) - def test_create_second_cluster(self, env): + def test_create_second_cluster(self, config, test_paths): """Check if possible to create second cluster""" - tools.apply_cluster(f"{env.test_dir}/conf/{env.cluster2}.yml") - tools.wait_for_running(env.cluster2, 3) - tools.compare_kubectl(env.test_dir, f"statefulset/{env.cluster2}", env.namespace) - tools.compare_kubectl(env.test_dir, f"service/{env.cluster2}", env.namespace) + tools.apply_cluster(f"{test_paths['test_dir']}/conf/{config['cluster2']}.yml") + tools.wait_for_running(config["cluster2"], 3) + tools.compare_kubectl( + test_paths["test_dir"], f"statefulset/{config['cluster2']}", config["namespace"] + ) + tools.compare_kubectl( + test_paths["test_dir"], f"service/{config['cluster2']}", config["namespace"] + ) @pytest.mark.dependency(depends=["TestInitDeploy::test_create_second_cluster"]) - def test_second_cluster_data_operations(self, env): + def test_second_cluster_data_operations(self, config, test_paths, psmdb_client): """Write data and read from all nodes in second cluster""" # Create user - tools.run_mongosh( + psmdb_client.run_mongosh( 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})', - f"userAdmin:userAdmin123456@{env.cluster2}.{env.namespace}", + f"userAdmin:userAdmin123456@{config['cluster2']}.{config['namespace']}", ) # Write data - tools.run_mongosh( + psmdb_client.run_mongosh( "db.getSiblingDB('myApp').test.insertOne({ x: 100502 })", - f"myApp:myPass@{env.cluster2}.{env.namespace}", + f"myApp:myPass@{config['cluster2']}.{config['namespace']}", ) # Read from all nodes for i in range(3): - tools.compare_mongo_cmd( + psmdb_client.compare_mongo_cmd( "find({}, { _id: 0 }).toArray()", - f"myApp:myPass@{env.cluster2}-{i}.{env.cluster2}.{env.namespace}", + f"myApp:myPass@{config['cluster2']}-{i}.{config['cluster2']}.{config['namespace']}", "-3rd", - test_file=f"{env.test_dir}/compare/find-3.json", + test_file=f"{test_paths['test_dir']}/compare/find-3.json", ) @pytest.mark.dependency(depends=["TestInitDeploy::test_second_cluster_data_operations"]) - def test_log_files_exist(self, env): + def test_log_files_exist(self, config): """Check if mongod log files exist in pod""" result = tools.kubectl_bin( - "exec", f"{env.cluster2}-0", "-c", "mongod", "--", "ls", "/data/db/logs" + "exec", f"{config['cluster2']}-0", "-c", "mongod", "--", "ls", "/data/db/logs" ) assert "mongod.log" in result, "mongod.log not found" diff --git a/e2e-tests/tools.py b/e2e-tests/tools.py index 399d9f4477..bb38efa7ac 100644 --- a/e2e-tests/tools.py +++ b/e2e-tests/tools.py @@ -10,9 +10,18 @@ from deepdiff import DeepDiff from pathlib import Path +from typing import Callable, Optional, Any logger = logging.getLogger(__name__) +RED = "\033[31m" +GREEN = "\033[32m" +YELLOW = "\033[33m" +BLUE = "\033[34m" +MAGENTA = "\033[35m" +CYAN = "\033[36m" +RESET = "\033[0m" + def kubectl_bin(*args, check: bool = True, input_data: str = "") -> str: """Execute kubectl command""" @@ -289,21 +298,13 @@ def clean_all_namespaces() -> None: logger.error("Failed to clean namespaces") -def destroy(namespace: str) -> None: - """Destroy test infrastructure""" - try: - kubectl_bin("delete", "namespace", namespace, "--ignore-not-found") - except subprocess.CalledProcessError: - pass - - def wait_pod(pod_name: str, timeout: str = "360") -> None: """Wait for pod to be ready.""" - logger.info(f"Waiting for pod/{pod_name} to be ready...") - time.sleep(2) + logger.info(f"Waiting for {CYAN}pod/{pod_name}{RESET} to be ready...") + time.sleep(4) try: kubectl_bin("wait", f"pod/{pod_name}", "--for=condition=ready", f"--timeout={timeout}s") - logger.info(f"Pod {pod_name} is ready") + logger.info(f"Pod {CYAN}{pod_name}{RESET} is ready") except subprocess.CalledProcessError as e: raise TimeoutError(f"Pod {pod_name} did not become ready within {timeout}s") from e @@ -339,17 +340,17 @@ def wait_for_running( cluster_name = cluster_name.replace(f"-{rs_name}", "") if check_cluster_readyness: start_time = time.time() - logger.info(f"Waiting for Cluster {cluster_name} readiness") + logger.info(f"Waiting for cluster {CYAN}{cluster_name}{RESET} readiness") while time.time() - start_time < timeout: try: result = kubectl_bin( "get", "psmdb", cluster_name, "-o", "jsonpath={.status.state}" ).strip("'") if result == "ready": - logger.info(f"Cluster {cluster_name} is ready") + logger.info(f"Cluster {CYAN}{cluster_name}{RESET} is ready") return except subprocess.CalledProcessError: - logger.error(f"Error checking cluster {cluster_name} readiness") + logger.error(f"Error checking cluster {CYAN}{cluster_name}{RESET} readiness") pass time.sleep(1) raise TimeoutError(f"Timeout waiting for {cluster_name} to be ready") @@ -357,7 +358,7 @@ def wait_for_running( def wait_for_delete(resource: str, timeout: int = 180) -> None: """Wait for a specific resource to be deleted""" - logger.info(f"Waiting for {resource} to be deleted") + logger.info(f"Waiting for {CYAN}{resource}{RESET} to be deleted") time.sleep(1) try: kubectl_bin("wait", "--for=delete", resource, f"--timeout={timeout}s") @@ -397,127 +398,6 @@ def compare_kubectl(test_dir: str, resource: str, namespace: str, postfix: str = raise ValueError(f"Failed to process resource {resource}: {e}") -def get_mongo_primary(uri: str, cluster_name: str) -> str: - """Get current MongoDB primary node""" - primary_endpoint = run_mongosh("EJSON.stringify(db.hello().me)", uri) - - if cluster_name in primary_endpoint: - return primary_endpoint.split(".")[0].replace('"', "") - else: - endpoint_host = primary_endpoint.split(":")[0] - result = kubectl_bin("get", "service", "-o", "wide") - - for line in result.splitlines(): - if endpoint_host in line: - return line.split()[0].replace('"', "") - raise ValueError("Primary node not found in service list") - - -def compare_mongo_cmd( - command: str, - uri: str, - postfix: str = "", - suffix: str = "", - database: str = "myApp", - collection: str = "test", - sort: str = "", - test_file: str = "", -) -> None: - """Compare MongoDB command output""" - full_cmd = f"{collection}.{command}" - if sort: - full_cmd = f"{collection}.{command}.{sort}" - - logger.info(f"Running command: {full_cmd} on database: {database}") - - mongo_expr = f"EJSON.stringify(db.getSiblingDB('{database}').{full_cmd})" - result = json.loads(run_mongosh(mongo_expr, uri, "mongodb")) - - logger.info(f"MongoDB command output: {result}") - - with open(test_file) as file: - expected = json.load(file) - - diff = DeepDiff(expected, result) - assert not diff, f"MongoDB command output differs: {diff.pretty()}" - - -def compare_mongo_user(uri: str, expected_role: str, test_dir) -> None: - """Compare MongoDB user permissions""" - - def get_expected_file(test_dir, user): - """Get the appropriate expected file based on MongoDB version""" - base_path = Path(test_dir) / "compare" - base_file = base_path / f"{user}.json" - - # Check for version-specific files - image_mongod = os.environ.get("IMAGE_MONGOD", "") - version_mappings = [("8.0", "-80"), ("7.0", "-70"), ("6.0", "-60")] - - for version, suffix in version_mappings: - if version in image_mongod: - version_file = base_path / f"{user}{suffix}.json" - if version_file.exists(): - logger.info(f"Using version-specific file: {version_file}") - with open(version_file) as f: - return json.load(f) - - # Fall back to base file - if base_file.exists(): - logger.info(f"Using base file: {base_file}") - with open(base_file) as f: - return json.load(f) - else: - raise FileNotFoundError(f"Expected file not found: {base_file}") - - def clean_mongo_json(data): - """Remove timestamps and metadata from MongoDB response""" - - def remove_timestamps(obj): - if isinstance(obj, dict): - return { - k: remove_timestamps(v) - for k, v in obj.items() - if k not in {"ok", "$clusterTime", "operationTime"} - } - elif isinstance(obj, list): - return [remove_timestamps(v) for v in obj] - elif isinstance(obj, str): - # Remove ISO timestamp patterns - return re.sub(r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}[\+\-]\d{4}", "", obj) - else: - return obj - - return remove_timestamps(data) - - # TODO: consider a different approach to ignore order when comparing - def ordered(obj): - if isinstance(obj, dict): - return sorted((k, ordered(v)) for k, v in obj.items()) - if isinstance(obj, list): - return sorted(ordered(x) for x in obj) - else: - return obj - - # Get actual MongoDB user permissions - try: - result = run_mongosh( - "EJSON.stringify(db.runCommand({connectionStatus:1,showPrivileges:true}))", - uri, - ) - actual_data = clean_mongo_json(json.loads(result)) - - except Exception as e: - raise RuntimeError(f"Failed to get MongoDB user permissions: {e}") - - expected_data = get_expected_file(test_dir, expected_role) - expected_data = ordered(expected_data) - actual_data = ordered(actual_data) - - diff = DeepDiff(expected_data, actual_data, ignore_order=True) - assert not diff, f"MongoDB user permissions differ: {diff.pretty()}" - - def apply_runtime_class(test_dir: str) -> None: """Apply runtime class configuration""" @@ -661,35 +541,6 @@ def filter_yaml_with_yq( return filtered_yaml -def run_mongosh( - command: str, - uri: str, - driver: str = "mongodb+srv", - suffix: str = ".svc.cluster.local", - mongo_flag: str = "", -) -> str: - """Execute mongosh command in PSMDB client container.""" - client_container = get_client_container() - - replica_set = "cfg" if "cfg" in uri else "rs0" - connection_string = f"{driver}://{uri}{suffix}/admin?ssl=false&replicaSet={replica_set}" - if mongo_flag: - connection_string += f" {mongo_flag}" - - result = kubectl_bin( - "exec", - client_container, - "--", - "mongosh", - f"{connection_string}", - "--eval", - command, - "--quiet", - check=False, - ) - return result - - def get_kubernetes_versions() -> tuple[str, str]: """Get Kubernetes git version and semantic version.""" output = kubectl_bin("version", "-o", "json") @@ -703,16 +554,184 @@ def get_kubernetes_versions() -> tuple[str, str]: return git_version, kube_version -# TODO: Cache the client container name to avoid repeated kubectl calls. -def get_client_container(): - """Get the client container name once per test session.""" - result = kubectl_bin( - "get", "pods", "--selector=name=psmdb-client", "-o", "jsonpath={.items[].metadata.name}" - ) - return result.strip() - - # TODO: implement this function def check_passwords_leak(namespace=None): """Check for password leaks in Kubernetes pod logs.""" pass + + +def retry( + func: Callable[[], Any], + max_attempts: int = 5, + delay: int = 1, + condition: Optional[Callable[[Any], bool]] = None, +) -> Any: + """Retry a function until it succeeds or max attempts reached.""" + for attempt in range(max_attempts): + try: + result = func() + if condition is None or condition(result): + return result + except Exception: + if attempt == max_attempts - 1: + raise + + time.sleep(delay) + + raise Exception(f"Max attempts ({max_attempts}) reached") + + +class MongoManager: + def __init__(self, client: str): + self.client = client + + def run_mongosh( + self, + command: str, + uri: str, + driver: str = "mongodb+srv", + suffix: str = ".svc.cluster.local", + mongo_flag: str = "", + timeout: int = 30, + ) -> str: + """Execute mongosh command in PSMDB client container.""" + replica_set = "cfg" if "cfg" in uri else "rs0" + connection_string = f"{driver}://{uri}{suffix}/admin?ssl=false&replicaSet={replica_set}" + if mongo_flag: + connection_string += f" {mongo_flag}" + + result = kubectl_bin( + "exec", + self.client, + "--", + "timeout", + str(timeout), + "mongosh", + f"{connection_string}", + "--eval", + command, + "--quiet", + check=False, + ) + return result + + def compare_mongo_user(self, uri: str, expected_role: str, test_dir) -> None: + """Compare MongoDB user permissions""" + + def get_expected_file(test_dir, user): + """Get the appropriate expected file based on MongoDB version""" + base_path = Path(test_dir) / "compare" + base_file = base_path / f"{user}.json" + + # Check for version-specific files + image_mongod = os.environ.get("IMAGE_MONGOD", "") + version_mappings = [("8.0", "-80"), ("7.0", "-70"), ("6.0", "-60")] + + for version, suffix in version_mappings: + if version in image_mongod: + version_file = base_path / f"{user}{suffix}.json" + if version_file.exists(): + logger.info(f"Using version-specific file: {version_file}") + with open(version_file) as f: + return json.load(f) + + # Fall back to base file + if base_file.exists(): + logger.info(f"Using base file: {base_file}") + with open(base_file) as f: + return json.load(f) + else: + raise FileNotFoundError(f"Expected file not found: {base_file}") + + def clean_mongo_json(data): + """Remove timestamps and metadata from MongoDB response""" + + def remove_timestamps(obj): + if isinstance(obj, dict): + return { + k: remove_timestamps(v) + for k, v in obj.items() + if k not in {"ok", "$clusterTime", "operationTime"} + } + elif isinstance(obj, list): + return [remove_timestamps(v) for v in obj] + elif isinstance(obj, str): + # Remove ISO timestamp patterns + return re.sub( + r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}[\+\-]\d{4}", "", obj + ) + else: + return obj + + return remove_timestamps(data) + + # TODO: consider a different approach to ignore order when comparing + def ordered(obj): + if isinstance(obj, dict): + return sorted((k, ordered(v)) for k, v in obj.items()) + if isinstance(obj, list): + return sorted(ordered(x) for x in obj) + else: + return obj + + # Get actual MongoDB user permissions + try: + result = self.run_mongosh( + "EJSON.stringify(db.runCommand({connectionStatus:1,showPrivileges:true}))", + uri, + ) + actual_data = clean_mongo_json(json.loads(result)) + + except Exception as e: + raise RuntimeError(f"Failed to get MongoDB user permissions: {e}") + + expected_data = get_expected_file(test_dir, expected_role) + expected_data = ordered(expected_data) + actual_data = ordered(actual_data) + + diff = DeepDiff(expected_data, actual_data, ignore_order=True) + assert not diff, f"MongoDB user permissions differ: {diff.pretty()}" + + def compare_mongo_cmd( + self, + command: str, + uri: str, + postfix: str = "", + suffix: str = "", + database: str = "myApp", + collection: str = "test", + sort: str = "", + test_file: str = "", + ) -> None: + """Compare MongoDB command output""" + full_cmd = f"{collection}.{command}" + if sort: + full_cmd = f"{collection}.{command}.{sort}" + + logger.info(f"Running: {CYAN}{full_cmd}{RESET} on db {CYAN}{database}{RESET}") + + mongo_expr = f"EJSON.stringify(db.getSiblingDB('{database}').{full_cmd})" + result = json.loads(self.run_mongosh(mongo_expr, uri, "mongodb")) + + logger.info(f"MongoDB output: {CYAN}{result}{RESET}") + + with open(test_file) as file: + expected = json.load(file) + + diff = DeepDiff(expected, result) + assert not diff, f"MongoDB command output differs: {diff.pretty()}" + + def get_mongo_primary(self, uri: str, cluster_name: str) -> str: + """Get current MongoDB primary node""" + primary_endpoint = self.run_mongosh("EJSON.stringify(db.hello().me)", uri) + + if cluster_name in primary_endpoint: + return primary_endpoint.split(".")[0].replace('"', "") + else: + endpoint_host = primary_endpoint.split(":")[0] + result = kubectl_bin("get", "service", "-o", "wide") + + for line in result.splitlines(): + if endpoint_host in line: + return line.split()[0].replace('"', "") + raise ValueError("Primary node not found in service list") diff --git a/uv.lock b/uv.lock index 7d9281e01e..17bd039d25 100644 --- a/uv.lock +++ b/uv.lock @@ -150,6 +150,7 @@ dependencies = [ { name = "pytest-json-report" }, { name = "pyyaml" }, { name = "ruff" }, + { name = "types-pyyaml" }, ] [package.metadata] @@ -162,6 +163,7 @@ requires-dist = [ { name = "pytest-json-report", specifier = ">=1.5.0" }, { name = "pyyaml", specifier = ">=6.0.2" }, { name = "ruff", specifier = ">=0.11.12" }, + { name = "types-pyyaml", specifier = ">=6.0.12.20250915" }, ] [[package]] @@ -289,6 +291,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/a3/dc/17031897dae0efacfea57dfd3a82fdd2a2aeb58e0ff71b77b87e44edc772/setuptools-80.9.0-py3-none-any.whl", hash = "sha256:062d34222ad13e0cc312a4c02d73f059e86a4acbfbdea8f8f76b28c99f306922", size = 1201486 }, ] +[[package]] +name = "types-pyyaml" +version = "6.0.12.20250915" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7e/69/3c51b36d04da19b92f9e815be12753125bd8bc247ba0470a982e6979e71c/types_pyyaml-6.0.12.20250915.tar.gz", hash = "sha256:0f8b54a528c303f0e6f7165687dd33fafa81c807fcac23f632b63aa624ced1d3", size = 17522 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bd/e0/1eed384f02555dde685fff1a1ac805c1c7dcb6dd019c916fe659b1c1f9ec/types_pyyaml-6.0.12.20250915-py3-none-any.whl", hash = "sha256:e7d4d9e064e89a3b3cae120b4990cd370874d2bf12fa5f46c97018dd5d3c9ab6", size = 20338 }, +] + [[package]] name = "typing-extensions" version = "4.14.0" From 37aa18d969df6689209354b5840d546a67e99722 Mon Sep 17 00:00:00 2001 From: Julio Pasinatto Date: Thu, 25 Sep 2025 20:47:51 -0300 Subject: [PATCH 3/4] Make wait more robust --- e2e-tests/conftest.py | 2 +- e2e-tests/tools.py | 74 +++++++++++++++++++++++-------------------- 2 files changed, 40 insertions(+), 36 deletions(-) diff --git a/e2e-tests/conftest.py b/e2e-tests/conftest.py index 89149e392a..070d484f0e 100644 --- a/e2e-tests/conftest.py +++ b/e2e-tests/conftest.py @@ -20,7 +20,7 @@ def setup_env_vars(): """Setup environment variables for the test session.""" git_branch = tools.get_git_branch() - git_version, kube_version = tools.get_kubernetes_versions() + git_version, kube_version = tools.get_k8s_versions() os.environ.setdefault("KUBE_VERSION", kube_version) os.environ.setdefault("EKS", "1" if "eks" in git_version else "0") diff --git a/e2e-tests/tools.py b/e2e-tests/tools.py index bb38efa7ac..d6bc39f4b6 100644 --- a/e2e-tests/tools.py +++ b/e2e-tests/tools.py @@ -45,23 +45,18 @@ def cat_config(config_file: str) -> str: if "spec" in config: spec = config["spec"] - # Set mongod image if not present if "image" not in spec or spec["image"] is None: spec["image"] = os.environ.get("IMAGE_MONGOD") - # Set PMM client image if "pmm" in spec: spec["pmm"]["image"] = os.environ.get("IMAGE_PMM_CLIENT") - # Set init image if "initImage" in spec: spec["initImage"] = os.environ.get("IMAGE") - # Set backup image if "backup" in spec: spec["backup"]["image"] = os.environ.get("IMAGE_BACKUP") - # Set upgrade options if "upgradeOptions" not in spec: spec["upgradeOptions"] = {} spec["upgradeOptions"]["apply"] = "Never" @@ -127,7 +122,6 @@ def delete_crd_rbac(src_dir: Path) -> None: '{"metadata":{"finalizers":[]}}', ) except subprocess.CalledProcessError: - # Kind may not exist or no instances exist; ignore pass for name in crd_names: @@ -298,15 +292,27 @@ def clean_all_namespaces() -> None: logger.error("Failed to clean namespaces") -def wait_pod(pod_name: str, timeout: str = "360") -> None: +def wait_pod(pod_name: str, timeout: int = 360) -> None: """Wait for pod to be ready.""" + start_time = time.time() logger.info(f"Waiting for {CYAN}pod/{pod_name}{RESET} to be ready...") - time.sleep(4) - try: - kubectl_bin("wait", f"pod/{pod_name}", "--for=condition=ready", f"--timeout={timeout}s") - logger.info(f"Pod {CYAN}{pod_name}{RESET} is ready") - except subprocess.CalledProcessError as e: - raise TimeoutError(f"Pod {pod_name} did not become ready within {timeout}s") from e + while time.time() - start_time < timeout: + try: + result = kubectl_bin( + "get", + "pod", + pod_name, + "-o", + "jsonpath={.status.conditions[?(@.type=='Ready')].status}", + ).strip("'") + if result == "True": + logger.info(f"Pod {CYAN}{pod_name}{RESET} is ready") + return + except subprocess.CalledProcessError: + # Pod likely not created yet + pass + time.sleep(1) + raise TimeoutError(f"Timeout waiting for {pod_name} to be ready") def wait_for_running( @@ -385,8 +391,8 @@ def compare_kubectl(test_dir: str, resource: str, namespace: str, postfix: str = with open(expected_result, "r") as f: expected_yaml = f.read() - filtered_actual = filter_yaml_with_yq(actual_yaml, namespace) - filtered_expected = filter_yaml_with_yq(expected_yaml, namespace) + filtered_actual = filter_yaml(actual_yaml, namespace) + filtered_expected = filter_yaml(expected_yaml, namespace) actual_data = yaml.safe_load(filtered_actual) expected_data = yaml.safe_load(filtered_expected) @@ -421,6 +427,19 @@ def detect_k8s_provider(provider: str) -> str: return "0" +def get_k8s_versions() -> tuple[str, str]: + """Get Kubernetes git version and semantic version.""" + output = kubectl_bin("version", "-o", "json") + version_info = json.loads(output)["serverVersion"] + + git_version = version_info["gitVersion"] + major = version_info["major"] + minor = version_info["minor"].rstrip("+") + kube_version = f"{major}.{minor}" + + return git_version, kube_version + + def get_git_commit() -> str: result = subprocess.run(["git", "rev-parse", "HEAD"], capture_output=True, text=True) return result.stdout.strip() @@ -476,7 +495,7 @@ def get_user_data(secret_name: str, data_key: str) -> str: return urllib.parse.quote(secret_data, safe="") -def filter_yaml_with_yq( +def filter_yaml( yaml_content: str, namespace: str, resource: str = "", skip_generation_check: bool = False ) -> str: """Filter YAML content using yq command""" @@ -541,19 +560,6 @@ def filter_yaml_with_yq( return filtered_yaml -def get_kubernetes_versions() -> tuple[str, str]: - """Get Kubernetes git version and semantic version.""" - output = kubectl_bin("version", "-o", "json") - version_info = json.loads(output)["serverVersion"] - - git_version = version_info["gitVersion"] - major = version_info["major"] - minor = version_info["minor"].rstrip("+") - kube_version = f"{major}.{minor}" - - return git_version, kube_version - - # TODO: implement this function def check_passwords_leak(namespace=None): """Check for password leaks in Kubernetes pod logs.""" @@ -675,15 +681,13 @@ def ordered(obj): return obj # Get actual MongoDB user permissions - try: - result = self.run_mongosh( + result = retry( + lambda: self.run_mongosh( "EJSON.stringify(db.runCommand({connectionStatus:1,showPrivileges:true}))", uri, ) - actual_data = clean_mongo_json(json.loads(result)) - - except Exception as e: - raise RuntimeError(f"Failed to get MongoDB user permissions: {e}") + ) + actual_data = clean_mongo_json(json.loads(result)) expected_data = get_expected_file(test_dir, expected_role) expected_data = ordered(expected_data) From c784bf018fc1678db3152ca647d2cd2be576b506 Mon Sep 17 00:00:00 2001 From: Julio Pasinatto Date: Fri, 26 Sep 2025 09:14:38 -0300 Subject: [PATCH 4/4] Add liveness test --- e2e-tests/liveness/test_liveness.py | 81 +++++++++++++++++++++++++++++ 1 file changed, 81 insertions(+) create mode 100644 e2e-tests/liveness/test_liveness.py diff --git a/e2e-tests/liveness/test_liveness.py b/e2e-tests/liveness/test_liveness.py new file mode 100644 index 0000000000..ce5dc738e3 --- /dev/null +++ b/e2e-tests/liveness/test_liveness.py @@ -0,0 +1,81 @@ +#!/usr/bin/env python3 + +import pytest +import logging +import re + +import tools + +logger = logging.getLogger(__name__) + + +@pytest.fixture(scope="class", autouse=True) +def config(create_infra): + """Configuration for tests""" + return { + "namespace": create_infra("liveness"), + "cluster": "liveness", + } + + +@pytest.fixture(scope="class", autouse=True) +def setup_tests(test_paths): + """Setup test environment""" + tools.kubectl_bin("apply", "-f", f"{test_paths['conf_dir']}/secrets_with_tls.yml") + + +class TestLiveness: + @pytest.mark.dependency() + def test_create_first_cluster(self, config, test_paths): + """Create first PSMDB cluster""" + tools.apply_cluster(f"{test_paths['test_dir']}/conf/{config['cluster']}-rs0.yml") + tools.wait_for_running(f"{config['cluster']}-rs0", 3) + + tools.compare_kubectl( + test_paths["test_dir"], f"statefulset/{config['cluster']}-rs0", config["namespace"] + ) + + @pytest.mark.dependency(depends=["TestLiveness::test_create_first_cluster"]) + def test_liveness_check_fails_with_invalid_ssl_option(self, config): + tools.kubectl_bin( + "exec", + f"{config['cluster']}-rs0-0", + "-c", + "mongod", + "--", + "bash", + "-c", + "/opt/percona/mongodb-healthcheck k8s liveness --ssl", + check=False, + ) + + logs_output = tools.kubectl_bin( + "exec", + f"{config['cluster']}-rs0-0", + "-c", + "mongod", + "--", + "bash", + "-c", + "ls /data/db/mongod-data/logs", + ) + log_count = logs_output.count("mongodb-healthcheck.log") + assert log_count == 1, f"Expected 1 healthcheck log file, got {log_count}" + + rotated_count = len(re.findall(r"mongodb-healthcheck-.*\.log\.gz", logs_output)) + assert rotated_count >= 1, f"Expected >=1 rotated logs, got {rotated_count}" + + @pytest.mark.dependency( + depends=["TestLiveness::test_liveness_check_fails_with_invalid_ssl_option"] + ) + def test_change_liveness_config(self, config, test_paths): + tools.apply_cluster(f"{test_paths['test_dir']}/conf/{config['cluster']}-rs0-changed.yml") + + tools.wait_for_running(f"{config['cluster']}-rs0", 3) + + tools.compare_kubectl( + test_paths["test_dir"], + f"statefulset/{config['cluster']}-rs0", + config["namespace"], + "-changed", + )