Skip to content

Commit 0e723fd

Browse files
authored
Merge pull request #389 from acekingke/LableRebuild
mysqlcluster: Support automatic rebuild of nodes by label.
2 parents 411b712 + 344c5c5 commit 0e723fd

File tree

3 files changed

+51
-1
lines changed

3 files changed

+51
-1
lines changed

docs/en-us/rebuild.md

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,4 +11,11 @@ Before you want to rebuild the pod, you need to manually check the security and
1111
**for example**
1212
```shell
1313
./hack/rebuild.sh sample-mysql-2
14-
```
14+
```
15+
16+
# Auto Rebuild
17+
if you want auto rebuild the pod, such as `sample-mysql-0`
18+
```shell
19+
kubectl label pods sample-mysql-0 rebuild=true
20+
```
21+
It will rebuild the pod automatically.

mysqlcluster/syncer/status.go

Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ import (
2525
corev1 "k8s.io/api/core/v1"
2626
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2727
"k8s.io/apimachinery/pkg/runtime"
28+
"k8s.io/apimachinery/pkg/types"
2829
"sigs.k8s.io/controller-runtime/pkg/client"
2930

3031
apiv1alpha1 "github.com/radondb/radondb-mysql-kubernetes/api/v1alpha1"
@@ -95,6 +96,12 @@ func (s *StatusSyncer) Sync(ctx context.Context) (syncer.SyncResult, error) {
9596
// get ready nodes.
9697
var readyNodes []corev1.Pod
9798
for _, pod := range list.Items {
99+
if pod.ObjectMeta.Labels[utils.LableRebuild] == "true" {
100+
if err := s.AutoRebuild(ctx, &pod); err != nil {
101+
log.Error(err, "failed to AutoRebuild", "pod", pod.Name, "namespace", pod.Namespace)
102+
}
103+
continue
104+
}
98105
for _, cond := range pod.Status.Conditions {
99106
switch cond.Type {
100107
case corev1.ContainersReady:
@@ -183,6 +190,40 @@ func (s *StatusSyncer) updateClusterStatus() apiv1alpha1.ClusterCondition {
183190
return clusterCondition
184191
}
185192

193+
// Rebuild Pod by deleting and creating it.
194+
// Notice: This function just delete Pod and PVC,
195+
// then after k8s recreate pod, it will clone and initial it.
196+
func (s *StatusSyncer) AutoRebuild(ctx context.Context, pod *corev1.Pod) error {
197+
ordinal, err := utils.GetOrdinal(pod.Name)
198+
if err != nil {
199+
return err
200+
201+
}
202+
// Set Pod UnHealthy.
203+
pod.Labels["healthy"] = "no"
204+
if err := s.cli.Update(ctx, pod); err != nil {
205+
return err
206+
}
207+
// Delete the Pod.
208+
if err := s.cli.Delete(ctx, pod); err != nil {
209+
return err
210+
}
211+
// Delete the pvc.
212+
pvcName := fmt.Sprintf("%s-%s-%d", utils.DataVolumeName,
213+
s.GetNameForResource(utils.StatefulSet), ordinal)
214+
pvc := corev1.PersistentVolumeClaim{}
215+
216+
if err := s.cli.Get(ctx,
217+
types.NamespacedName{Name: pvcName, Namespace: s.Namespace},
218+
&pvc); err != nil {
219+
return err
220+
}
221+
if err := s.cli.Delete(ctx, &pvc); err != nil {
222+
return err
223+
}
224+
return nil
225+
}
226+
186227
// updateNodeStatus update the node status.
187228
func (s *StatusSyncer) updateNodeStatus(ctx context.Context, cli client.Client, pods []corev1.Pod) error {
188229
for _, pod := range pods {

utils/constants.go

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -168,6 +168,8 @@ const (
168168
Candidate RaftRole = "CANDIDATE"
169169
)
170170

171+
const LableRebuild = "rebuild"
172+
171173
// XenonHttpUrl is a http url corresponding to the xenon instruction.
172174
type XenonHttpUrl string
173175

0 commit comments

Comments
 (0)