Skip to content

Commit 2f01184

Browse files
committed
Allow for more than one deployment per test under cloud-qa
1 parent 35b2a30 commit 2f01184

File tree

6 files changed

+118
-32
lines changed

6 files changed

+118
-32
lines changed

docker/mongodb-kubernetes-tests/kubetester/mongodb.py

Lines changed: 48 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -235,7 +235,18 @@ def __repr__(self):
235235

236236
def configure(
237237
self,
238-
om: MongoDBOpsManager,
238+
om: Optional[MongoDBOpsManager],
239+
project_name: str,
240+
api_client: Optional[client.ApiClient] = None,
241+
) -> MongoDB:
242+
if om is not None:
243+
return self.configure_ops_manager(om, project_name, api_client=api_client)
244+
else:
245+
return self.configure_cloud_qa(project_name, api_client=api_client)
246+
247+
def configure_ops_manager(
248+
self,
249+
om: Optional[MongoDBOpsManager],
239250
project_name: str,
240251
api_client: Optional[client.ApiClient] = None,
241252
) -> MongoDB:
@@ -252,6 +263,39 @@ def configure(
252263
self["spec"]["credentials"] = om.api_key_secret(self.namespace, api_client=api_client)
253264
return self
254265

266+
def configure_cloud_qa(
267+
self,
268+
project_name,
269+
src_project_config_map_name: str = None,
270+
api_client: Optional[client.ApiClient] = None,
271+
) -> MongoDB:
272+
if "opsManager" in self["spec"]:
273+
del self["spec"]["opsManager"]
274+
275+
if src_project_config_map_name is None and "cloudManager" in self["spec"]:
276+
src_project_config_map_name = self["spec"]["cloudManager"]["configMapRef"]["name"]
277+
else:
278+
# my-project cm and my-credentials secret are created by scripts/evergreen/e2e/configure_operator.sh
279+
src_project_config_map_name = "my-project"
280+
281+
try:
282+
src_cm = read_configmap(self.namespace, src_project_config_map_name, api_client=api_client)
283+
except client.ApiException as e:
284+
if e.status == 404:
285+
logger.debug("project config map is not specified, trying my-project as the source")
286+
src_cm = read_configmap(self.namespace, "my-project", api_client=api_client)
287+
else:
288+
raise e
289+
290+
new_project_config_map_name = f"{self.name}-project-config"
291+
ensure_nested_objects(self, ["spec", "cloudManager", "configMapRef"])
292+
self["spec"]["cloudManager"]["configMapRef"]["name"] = new_project_config_map_name
293+
294+
src_cm.update({"projectName": f"{self.namespace}-{project_name}"})
295+
create_or_update_configmap(self.namespace, new_project_config_map_name, src_cm, api_client=api_client)
296+
297+
return self
298+
255299
def configure_backup(self, mode: str = "enabled") -> MongoDB:
256300
ensure_nested_objects(self, ["spec", "backup"])
257301
self["spec"]["backup"]["mode"] = mode
@@ -454,6 +498,9 @@ def get_external_domain(self):
454498
def config_map_name(self) -> str:
455499
if "opsManager" in self["spec"]:
456500
return self["spec"]["opsManager"]["configMapRef"]["name"]
501+
elif "cloudManager" in self["spec"]:
502+
return self["spec"]["cloudManager"]["configMapRef"]["name"]
503+
457504
return self["spec"]["project"]
458505

459506
def shard_replicaset_names(self) -> List[str]:

docker/mongodb-kubernetes-tests/kubetester/omtester.py

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -20,16 +20,11 @@
2020
from kubetester.om_queryable_backups import OMQueryableBackup
2121
from opentelemetry import trace
2222
from requests.adapters import HTTPAdapter, Retry
23+
from tests.common.ops_manager.cloud_manager import is_cloud_qa
2324

2425
from .kubetester import get_env_var_or_fail
2526

26-
27-
def running_cloud_manager():
28-
"Determines if the current test is running against Cloud Manager"
29-
return get_env_var_or_fail("OM_HOST") == "https://cloud-qa.mongodb.com"
30-
31-
32-
skip_if_cloud_manager = pytest.mark.skipif(running_cloud_manager(), reason="Do not run in Cloud Manager")
27+
skip_if_cloud_manager = pytest.mark.skipif(is_cloud_qa(), reason="Do not run in Cloud Manager")
3328

3429

3530
class BackupStatus(str, Enum):

docker/mongodb-kubernetes-tests/tests/multicluster_shardedcluster/multi_cluster_sharded_disaster_recovery.py

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -12,12 +12,10 @@
1212
update_configmap,
1313
)
1414
from kubetester.kubetester import (
15-
KubernetesTester,
1615
ensure_ent_version,
1716
)
1817
from kubetester.kubetester import fixture as yaml_fixture
1918
from kubetester.kubetester import (
20-
get_env_var_or_fail,
2119
is_default_architecture_static,
2220
is_multi_cluster,
2321
run_periodically,
@@ -29,6 +27,7 @@
2927
from kubetester.phase import Phase
3028
from pytest import fixture, mark
3129
from tests import test_logger
30+
from tests.common.ops_manager.cloud_manager import is_cloud_qa
3231
from tests.conftest import (
3332
MULTI_CLUSTER_MEMBER_LIST_CONFIGMAP,
3433
get_central_cluster_client,
@@ -54,10 +53,6 @@
5453
# to reconfigure the deployment further.
5554

5655

57-
def is_cloud_qa() -> bool:
58-
return os.getenv("ops_manager_version", "cloud_qa") == "cloud_qa"
59-
60-
6156
@mark.e2e_multi_cluster_sharded_disaster_recovery
6257
def test_install_operator(multi_cluster_operator: Operator):
6358
multi_cluster_operator.assert_is_running()

docker/mongodb-kubernetes-tests/tests/upgrades/sharded_cluster_operator_upgrade_v1_27_to_mck.py

Lines changed: 61 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1,28 +1,24 @@
1-
from typing import Dict, Optional
1+
from typing import Dict
22

33
import pytest
4-
from kubeobject import CustomObject
5-
from kubernetes import client
6-
from kubetester import create_or_update_configmap, read_configmap
7-
from kubetester.certs import create_sharded_cluster_certs
8-
from kubetester.kubetester import ensure_nested_objects
4+
from kubetester import read_configmap, try_load
5+
from kubetester.certs import create_mongodb_tls_certs, create_sharded_cluster_certs
96
from kubetester.kubetester import fixture as yaml_fixture
107
from kubetester.mongodb import MongoDB
11-
from kubetester.mongotester import ShardedClusterTester
12-
from kubetester.operator import Operator
8+
from kubetester.mongotester import ReplicaSetTester, ShardedClusterTester
139
from kubetester.phase import Phase
1410
from tests import test_logger
1511
from tests.conftest import (
1612
LEGACY_OPERATOR_NAME,
1713
OPERATOR_NAME,
18-
get_central_cluster_client,
1914
get_default_operator,
2015
install_legacy_deployment_state_meko,
2116
log_deployments_info,
2217
)
2318
from tests.upgrades import downscale_operator_deployment
2419

2520
MDB_RESOURCE = "sh001-base"
21+
MDB_RS_RESOURCE = "rs"
2622
CERT_PREFIX = "prefix"
2723

2824
logger = test_logger.get_test_logger(__name__)
@@ -41,6 +37,8 @@
4137
If the sharded cluster resource correctly reconciles after upgrade/downgrade and scaling steps, we assume it works
4238
correctly.
4339
"""
40+
41+
4442
# TODO CLOUDP-318100: this test should eventually be updated and not pinned to 1.27 anymore
4543

4644

@@ -68,7 +66,7 @@ def server_certs(issuer: str, namespace: str) -> str:
6866
)
6967

7068

71-
@pytest.fixture(scope="module")
69+
@pytest.fixture(scope="function")
7270
def sharded_cluster(
7371
issuer_ca_configmap: str,
7472
namespace: str,
@@ -79,15 +77,46 @@ def sharded_cluster(
7977
yaml_fixture("sharded-cluster.yaml"),
8078
namespace=namespace,
8179
name=MDB_RESOURCE,
82-
)
80+
).configure(om=None, project_name=MDB_RESOURCE)
81+
82+
if try_load(resource):
83+
return resource
84+
8385
resource.set_version(custom_mdb_version)
8486
resource["spec"]["mongodsPerShardCount"] = 2
8587
resource["spec"]["configServerCount"] = 2
8688
resource["spec"]["mongosCount"] = 1
8789
resource["spec"]["persistent"] = True
8890
resource.configure_custom_tls(issuer_ca_configmap, CERT_PREFIX)
8991

90-
return resource.update()
92+
return resource
93+
94+
95+
@pytest.fixture(scope="module")
96+
def replica_set_certs(issuer: str, namespace: str):
97+
return create_mongodb_tls_certs(issuer, namespace, MDB_RS_RESOURCE, f"prefix-{MDB_RS_RESOURCE}-cert")
98+
99+
100+
@pytest.fixture(scope="module")
101+
def replica_set(
102+
issuer_ca_configmap: str,
103+
namespace: str,
104+
replica_set_certs: str,
105+
custom_mdb_version: str,
106+
):
107+
resource = MongoDB.from_yaml(
108+
yaml_fixture("replica-set-basic.yaml"),
109+
namespace=namespace,
110+
name=MDB_RS_RESOURCE,
111+
).configure(om=None, project_name=f"{MDB_RS_RESOURCE}")
112+
113+
if try_load(resource):
114+
return resource
115+
116+
resource.set_version(custom_mdb_version)
117+
resource.configure_custom_tls(issuer_ca_configmap, CERT_PREFIX)
118+
119+
return resource
91120

92121

93122
@pytest.mark.e2e_sharded_cluster_operator_upgrade_v1_27_to_mck
@@ -101,16 +130,23 @@ def test_install_legacy_deployment_state_meko(
101130
install_legacy_deployment_state_meko(namespace, managed_security_context, operator_installation_config)
102131

103132
def test_create_sharded_cluster(self, sharded_cluster: MongoDB):
133+
sharded_cluster.update()
104134
sharded_cluster.assert_reaches_phase(phase=Phase.Running, timeout=350)
105135

106136
def test_scale_up_sharded_cluster(self, sharded_cluster: MongoDB):
107-
sharded_cluster.load()
108137
sharded_cluster["spec"]["mongodsPerShardCount"] = 3
109138
sharded_cluster["spec"]["configServerCount"] = 3
110139
sharded_cluster.update()
111140
sharded_cluster.assert_reaches_phase(phase=Phase.Running, timeout=300)
112141

113142

143+
@pytest.mark.e2e_sharded_cluster_operator_upgrade_v1_27_to_mck
144+
class TestReplicaSetDeployment:
145+
def test_create_replica_set(self, replica_set: MongoDB):
146+
replica_set.update()
147+
replica_set.assert_reaches_phase(phase=Phase.Running, timeout=350)
148+
149+
114150
@pytest.mark.e2e_sharded_cluster_operator_upgrade_v1_27_to_mck
115151
class TestOperatorUpgrade:
116152

@@ -137,6 +173,12 @@ def test_sharded_cluster_reconciled(self, sharded_cluster: MongoDB, namespace: s
137173
def test_assert_connectivity(self, ca_path: str):
138174
ShardedClusterTester(MDB_RESOURCE, 1, ssl=True, ca_path=ca_path).assert_connectivity()
139175

176+
def test_replica_set_reconciled(self, replica_set: MongoDB):
177+
replica_set.assert_reaches_phase(phase=Phase.Running, timeout=850, ignore_errors=True)
178+
179+
def test_assert_connectivity_replica_set(self, ca_path: str):
180+
ReplicaSetTester(MDB_RS_RESOURCE, 3, ssl=True, ca_path=ca_path).assert_connectivity()
181+
140182
def test_scale_down_sharded_cluster(self, sharded_cluster: MongoDB, namespace: str):
141183
sharded_cluster.load()
142184
# Scale down both by 1
@@ -168,6 +210,12 @@ def test_sharded_cluster_reconciled(self, sharded_cluster: MongoDB):
168210
def test_assert_connectivity(self, ca_path: str):
169211
ShardedClusterTester(MDB_RESOURCE, 1, ssl=True, ca_path=ca_path).assert_connectivity()
170212

213+
def test_replica_set_reconciled(self, replica_set: MongoDB):
214+
replica_set.assert_reaches_phase(phase=Phase.Running, timeout=850, ignore_errors=True)
215+
216+
def test_assert_connectivity_replica_set(self, ca_path: str):
217+
ReplicaSetTester(MDB_RS_RESOURCE, 3, ssl=True, ca_path=ca_path).assert_connectivity()
218+
171219
def test_scale_up_sharded_cluster(self, sharded_cluster: MongoDB):
172220
sharded_cluster.load()
173221
sharded_cluster["spec"]["mongodsPerShardCount"] = 3

scripts/dev/contexts/variables/om80

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,3 +16,5 @@ export AGENT_IMAGE="${MDB_AGENT_IMAGE_REPOSITORY}:${AGENT_VERSION}"
1616
export CUSTOM_APPDB_VERSION=8.0.6-ent
1717
export TEST_MODE=opsmanager
1818
export OPS_MANAGER_REGISTRY="${REGISTRY}"
19+
20+
export ops_manager_version="${CUSTOM_OM_VERSION}"

scripts/funcs/kubernetes

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -98,15 +98,15 @@ create_image_registries_secret() {
9898
context=$1
9999
namespace=$2
100100
secret_name=$3
101-
101+
102102
# Detect the correct config file path based on container runtime
103103
local config_file
104104
local temp_config_file=""
105105
if command -v podman &> /dev/null && (podman info &> /dev/null || sudo podman info &> /dev/null); then
106106
# For Podman, use root's auth.json since minikube uses sudo podman
107107
config_file="/root/.config/containers/auth.json"
108108
echo "Using Podman config: ${config_file}"
109-
109+
110110
# Create a temporary copy that the current user can read
111111
temp_config_file=$(mktemp)
112112
sudo cp "${config_file}" "${temp_config_file}"
@@ -117,7 +117,7 @@ create_image_registries_secret() {
117117
config_file="${HOME}/.docker/config.json"
118118
echo "Using Docker config: ${config_file}"
119119
fi
120-
120+
121121
# shellcheck disable=SC2154
122122
if kubectl --context "${context}" get namespace "${namespace}"; then
123123
kubectl --context "${context}" -n "${namespace}" delete secret "${secret_name}" --ignore-not-found
@@ -127,7 +127,7 @@ create_image_registries_secret() {
127127
else
128128
echo "Skipping creating pull secret in ${context}/${namespace}. The namespace doesn't exist yet."
129129
fi
130-
130+
131131
# Clean up temporary file
132132
if [[ -n "${temp_config_file}" ]] && [[ -f "${temp_config_file}" ]]; then
133133
rm -f "${temp_config_file}"
@@ -255,7 +255,6 @@ run_script_with_wrapped_kubectl() {
255255
cat > "${wrapper_script}" << EOF
256256
#!/bin/bash
257257
# Define kubectl function to include the context
258-
set -x
259258
kubectl() {
260259
command kubectl --context "${context}" "\$@"
261260
}

0 commit comments

Comments
 (0)