Skip to content

Commit e0329ea

Browse files
authored
Merge pull request #72 from arangodb/feature/test-individual-pod-deletion
Feature/test individual pod deletion
2 parents e274839 + 76536b7 commit e0329ea

File tree

1 file changed

+254
-0
lines changed

1 file changed

+254
-0
lines changed

tests/resilience_test.go

Lines changed: 254 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,254 @@
1+
package tests
2+
3+
import (
4+
"context"
5+
"fmt"
6+
"testing"
7+
"time"
8+
9+
"github.com/dchest/uniuri"
10+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
11+
12+
driver "github.com/arangodb/go-driver"
13+
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1alpha"
14+
"github.com/arangodb/kube-arangodb/pkg/client"
15+
"github.com/arangodb/kube-arangodb/pkg/util/retry"
16+
)
17+
18+
// TestResiliencePod
19+
// Tests handling of individual pod deletions
20+
func TestResiliencePod(t *testing.T) {
21+
longOrSkip(t)
22+
c := client.MustNewInCluster()
23+
kubecli := mustNewKubeClient(t)
24+
ns := getNamespace(t)
25+
26+
//fmt.Printf("There are %d pods in the cluster\n", len(pods.Items))
27+
28+
// Prepare deployment config
29+
depl := newDeployment("test-pod-resilience" + uniuri.NewLen(4))
30+
depl.Spec.Mode = api.NewMode(api.DeploymentModeCluster)
31+
depl.Spec.SetDefaults(depl.GetName()) // this must be last
32+
33+
// Create deployment
34+
apiObject, err := c.DatabaseV1alpha().ArangoDeployments(ns).Create(depl)
35+
if err != nil {
36+
t.Fatalf("Create deployment failed: %v", err)
37+
}
38+
39+
// Wait for deployment to be ready
40+
if _, err = waitUntilDeployment(c, depl.GetName(), ns, deploymentHasState(api.DeploymentStateRunning)); err != nil {
41+
t.Fatalf("Deployment not running in time: %v", err)
42+
}
43+
44+
// Create a database client
45+
ctx := context.Background()
46+
client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t)
47+
48+
// Wait for cluster to be completely ready
49+
if err := waitUntilClusterHealth(client, func(h driver.ClusterHealth) error {
50+
return clusterHealthEqualsSpec(h, apiObject.Spec)
51+
}); err != nil {
52+
t.Fatalf("Cluster not running in expected health in time: %v", err)
53+
}
54+
55+
// Fetch latest status so we know all member details
56+
apiObject, err = c.DatabaseV1alpha().ArangoDeployments(ns).Get(depl.GetName(), metav1.GetOptions{})
57+
if err != nil {
58+
t.Fatalf("Failed to get deployment: %v", err)
59+
}
60+
61+
// Delete one pod after the other
62+
apiObject.ForeachServerGroup(func(group api.ServerGroup, spec api.ServerGroupSpec, status *api.MemberStatusList) error {
63+
for _, m := range *status {
64+
// Get current pod so we can compare UID later
65+
originalPod, err := kubecli.CoreV1().Pods(ns).Get(m.PodName, metav1.GetOptions{})
66+
if err != nil {
67+
t.Fatalf("Failed to get pod %s: %v", m.PodName, err)
68+
}
69+
if err := kubecli.CoreV1().Pods(ns).Delete(m.PodName, &metav1.DeleteOptions{}); err != nil {
70+
t.Fatalf("Failed to delete pod %s: %v", m.PodName, err)
71+
}
72+
// Wait for pod to return with different UID
73+
op := func() error {
74+
pod, err := kubecli.CoreV1().Pods(ns).Get(m.PodName, metav1.GetOptions{})
75+
if err != nil {
76+
return maskAny(err)
77+
}
78+
if pod.GetUID() == originalPod.GetUID() {
79+
return fmt.Errorf("Still original pod")
80+
}
81+
return nil
82+
}
83+
if err := retry.Retry(op, time.Minute); err != nil {
84+
t.Fatalf("Pod did not restart: %v", err)
85+
}
86+
// Wait for cluster to be completely ready
87+
if err := waitUntilClusterHealth(client, func(h driver.ClusterHealth) error {
88+
return clusterHealthEqualsSpec(h, apiObject.Spec)
89+
}); err != nil {
90+
t.Fatalf("Cluster not running in expected health in time: %v", err)
91+
}
92+
}
93+
return nil
94+
}, &apiObject.Status)
95+
96+
// Cleanup
97+
removeDeployment(c, depl.GetName(), ns)
98+
}
99+
100+
// TestResiliencePVC
101+
// Tests handling of individual pod deletions
102+
func TestResiliencePVC(t *testing.T) {
103+
longOrSkip(t)
104+
c := client.MustNewInCluster()
105+
kubecli := mustNewKubeClient(t)
106+
ns := getNamespace(t)
107+
108+
// Prepare deployment config
109+
depl := newDeployment("test-pvc-resilience" + uniuri.NewLen(4))
110+
depl.Spec.Mode = api.NewMode(api.DeploymentModeCluster)
111+
depl.Spec.SetDefaults(depl.GetName()) // this must be last
112+
113+
// Create deployment
114+
apiObject, err := c.DatabaseV1alpha().ArangoDeployments(ns).Create(depl)
115+
if err != nil {
116+
t.Fatalf("Create deployment failed: %v", err)
117+
}
118+
119+
// Wait for deployment to be ready
120+
if _, err = waitUntilDeployment(c, depl.GetName(), ns, deploymentHasState(api.DeploymentStateRunning)); err != nil {
121+
t.Fatalf("Deployment not running in time: %v", err)
122+
}
123+
124+
// Create a database client
125+
ctx := context.Background()
126+
client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t)
127+
128+
// Wait for cluster to be completely ready
129+
if err := waitUntilClusterHealth(client, func(h driver.ClusterHealth) error {
130+
return clusterHealthEqualsSpec(h, apiObject.Spec)
131+
}); err != nil {
132+
t.Fatalf("Cluster not running in expected health in time: %v", err)
133+
}
134+
135+
// Fetch latest status so we know all member details
136+
apiObject, err = c.DatabaseV1alpha().ArangoDeployments(ns).Get(depl.GetName(), metav1.GetOptions{})
137+
if err != nil {
138+
t.Fatalf("Failed to get deployment: %v", err)
139+
}
140+
141+
// Delete one pvc after the other
142+
apiObject.ForeachServerGroup(func(group api.ServerGroup, spec api.ServerGroupSpec, status *api.MemberStatusList) error {
143+
for _, m := range *status {
144+
// Get current pvc so we can compare UID later
145+
originalPVC, err := kubecli.CoreV1().PersistentVolumeClaims(ns).Get(m.PersistentVolumeClaimName, metav1.GetOptions{})
146+
if err != nil {
147+
t.Fatalf("Failed to get pvc %s: %v", m.PersistentVolumeClaimName, err)
148+
}
149+
if err := kubecli.CoreV1().PersistentVolumeClaims(ns).Delete(m.PersistentVolumeClaimName, &metav1.DeleteOptions{}); err != nil {
150+
t.Fatalf("Failed to delete pvc %s: %v", m.PersistentVolumeClaimName, err)
151+
}
152+
// Wait for pvc to return with different UID
153+
op := func() error {
154+
pvc, err := kubecli.CoreV1().PersistentVolumeClaims(ns).Get(m.PersistentVolumeClaimName, metav1.GetOptions{})
155+
if err != nil {
156+
return maskAny(err)
157+
}
158+
if pvc.GetUID() == originalPVC.GetUID() {
159+
return fmt.Errorf("Still original pvc")
160+
}
161+
return nil
162+
}
163+
if err := retry.Retry(op, time.Minute); err != nil {
164+
t.Fatalf("PVC did not restart: %v", err)
165+
}
166+
// Wait for cluster to be completely ready
167+
if err := waitUntilClusterHealth(client, func(h driver.ClusterHealth) error {
168+
return clusterHealthEqualsSpec(h, apiObject.Spec)
169+
}); err != nil {
170+
t.Fatalf("Cluster not running in expected health in time: %v", err)
171+
}
172+
}
173+
return nil
174+
}, &apiObject.Status)
175+
176+
// Cleanup
177+
removeDeployment(c, depl.GetName(), ns)
178+
}
179+
180+
// TestResilienceService
181+
// Tests handling of individual service deletions
182+
func TestResilienceService(t *testing.T) {
183+
longOrSkip(t)
184+
c := client.MustNewInCluster()
185+
kubecli := mustNewKubeClient(t)
186+
ns := getNamespace(t)
187+
188+
// Prepare deployment config
189+
depl := newDeployment("test-service-resilience" + uniuri.NewLen(4))
190+
depl.Spec.Mode = api.NewMode(api.DeploymentModeCluster)
191+
depl.Spec.SetDefaults(depl.GetName()) // this must be last
192+
193+
// Create deployment
194+
apiObject, err := c.DatabaseV1alpha().ArangoDeployments(ns).Create(depl)
195+
if err != nil {
196+
t.Fatalf("Create deployment failed: %v", err)
197+
}
198+
199+
// Wait for deployment to be ready
200+
if _, err = waitUntilDeployment(c, depl.GetName(), ns, deploymentHasState(api.DeploymentStateRunning)); err != nil {
201+
t.Fatalf("Deployment not running in time: %v", err)
202+
}
203+
204+
// Create a database client
205+
ctx := context.Background()
206+
client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t)
207+
208+
// Wait for cluster to be completely ready
209+
if err := waitUntilClusterHealth(client, func(h driver.ClusterHealth) error {
210+
return clusterHealthEqualsSpec(h, apiObject.Spec)
211+
}); err != nil {
212+
t.Fatalf("Cluster not running in expected health in time: %v", err)
213+
}
214+
215+
// Fetch latest status so we know all member details
216+
apiObject, err = c.DatabaseV1alpha().ArangoDeployments(ns).Get(depl.GetName(), metav1.GetOptions{})
217+
if err != nil {
218+
t.Fatalf("Failed to get deployment: %v", err)
219+
}
220+
221+
// Delete database service
222+
// Get current pod so we can compare UID later
223+
serviceName := apiObject.Status.ServiceName
224+
originalService, err := kubecli.CoreV1().Services(ns).Get(serviceName, metav1.GetOptions{})
225+
if err != nil {
226+
t.Fatalf("Failed to get service %s: %v", serviceName, err)
227+
}
228+
if err := kubecli.CoreV1().Services(ns).Delete(serviceName, &metav1.DeleteOptions{}); err != nil {
229+
t.Fatalf("Failed to delete service %s: %v", serviceName, err)
230+
}
231+
// Wait for service to return with different UID
232+
op := func() error {
233+
service, err := kubecli.CoreV1().Services(ns).Get(serviceName, metav1.GetOptions{})
234+
if err != nil {
235+
return maskAny(err)
236+
}
237+
if service.GetUID() == originalService.GetUID() {
238+
return fmt.Errorf("Still original service")
239+
}
240+
return nil
241+
}
242+
if err := retry.Retry(op, time.Minute); err != nil {
243+
t.Fatalf("PVC did not restart: %v", err)
244+
}
245+
// Wait for cluster to be completely ready
246+
if err := waitUntilClusterHealth(client, func(h driver.ClusterHealth) error {
247+
return clusterHealthEqualsSpec(h, apiObject.Spec)
248+
}); err != nil {
249+
t.Fatalf("Cluster not running in expected health in time: %v", err)
250+
}
251+
252+
// Cleanup
253+
removeDeployment(c, depl.GetName(), ns)
254+
}

0 commit comments

Comments
 (0)