Xu-Wentao commented on code in PR #316:
URL: 
https://github.com/apache/shardingsphere-on-cloud/pull/316#discussion_r1173425900


##########
shardingsphere-operator/pkg/controllers/shardingsphere_chaos_controller.go:
##########
@@ -178,33 +190,54 @@ func (r *ShardingSphereChaosReconciler) reconcileJob(ctx 
context.Context, ssChao
        if ssChaos.Status.Phase == "" || ssChaos.Status.Phase == 
sschaosv1alpha1.PhaseBeforeExperiment || ssChaos.Status.Phase == 
sschaosv1alpha1.PhaseAfterExperiment {
                nowInjectRequirement = reconcile.Experimental
        }
-       if ssChaos.Status.Phase == sschaosv1alpha1.PhaseInChaos || 
ssChaos.Status.Phase == sschaosv1alpha1.PhaseRecoveredChaos {
+       if ssChaos.Status.Phase == sschaosv1alpha1.PhaseInChaos {
                nowInjectRequirement = reconcile.Pressure
        }
+       if ssChaos.Status.Phase == sschaosv1alpha1.PhaseRecoveredChaos {
+               nowInjectRequirement = reconcile.Verify
+       }
        if isExist {
                return r.updateJob(ctx, nowInjectRequirement, ssChaos, rJob)
        }
 
-       return r.createJob(ctx, nowInjectRequirement, ssChaos)
+       err = r.createJob(ctx, nowInjectRequirement, ssChaos)
+       if err != nil {
+               r.Events.Event(ssChaos, "Warning", "Created", fmt.Sprintf("job 
created fail %s", err))
+               return err
+       }
+
+       r.Events.Event(ssChaos, "Normal", "Created", fmt.Sprintf("%s job 
created successfully", string(nowInjectRequirement)))
+       return nil
 }
 
 func (r *ShardingSphereChaosReconciler) reconcileStatus(ctx context.Context, 
namespacedName types.NamespacedName) error {
        ssChaos, err := r.getRuntimeSSChaos(ctx, namespacedName)
        if err != nil {
                return err
        }
-       if ssChaos.Status.Phase == "" {
-               ssChaos.Status.Phase = sschaosv1alpha1.PhaseBeforeExperiment
-       }
-       rJob := &batchV1.Job{}
-       if err := r.Get(ctx, namespacedName, rJob); err != nil {
+
+       SetDefault(ssChaos)
+
+       rJob, found, err := r.getJobByNamespacedName(ctx, namespacedName)
+       if err != nil || !found || rJob == nil {

Review Comment:
   1. recommend to use `rJob, err` as return values.  if `err == nil && rJob == 
nil` equals ` !found`.
   2. if  `err == nil` and `!found` this  will returns `nil` err, make sure 
this is want you want to returns.



##########
shardingsphere-operator/pkg/controllers/shardingsphere_chaos_controller.go:
##########
@@ -213,10 +246,137 @@ func (r *ShardingSphereChaosReconciler) 
reconcileStatus(ctx context.Context, nam
        if err != nil {
                return err
        }
-       rt.Status = ssChaos.Status
+       setRtStatus(rt, ssChaos)
        return r.Status().Update(ctx, rt)
 }
 
+func SetDefault(ssChaos *sschaosv1alpha1.ShardingSphereChaos) {

Review Comment:
   renamed `SetDefault` to `setDefault` if this function will not be called out 
of package.



##########
shardingsphere-operator/pkg/controllers/shardingsphere_chaos_controller.go:
##########
@@ -213,10 +246,137 @@ func (r *ShardingSphereChaosReconciler) 
reconcileStatus(ctx context.Context, nam
        if err != nil {
                return err
        }
-       rt.Status = ssChaos.Status
+       setRtStatus(rt, ssChaos)
        return r.Status().Update(ctx, rt)
 }
 
+func SetDefault(ssChaos *sschaosv1alpha1.ShardingSphereChaos) {
+       if ssChaos.Status.Phase == "" {
+               ssChaos.Status.Phase = sschaosv1alpha1.PhaseBeforeExperiment
+       }
+       if len(ssChaos.Status.Result) <= 0 || ssChaos.Status.Result == nil {
+               ssChaos.Status.Result = []sschaosv1alpha1.Result{}
+       }
+}
+
+func setRtStatus(rt *sschaosv1alpha1.ShardingSphereChaos, ssChaos 
*sschaosv1alpha1.ShardingSphereChaos) {
+       rt.Status.Result = []sschaosv1alpha1.Result{}
+       for i := range ssChaos.Status.Result {
+               rt.Status.Result = append(rt.Status.Result, 
sschaosv1alpha1.Result{
+                       Success: ssChaos.Status.Result[i].Success,
+                       Details: sschaosv1alpha1.Details{
+                               Time: metav1.Time{Time: time.Now()},
+                               Msg:  ssChaos.Status.Result[i].Details.Msg,
+                       },
+               })
+       }
+       rt.Status.Phase = ssChaos.Status.Phase
+       rt.Status.ChaosCondition = ssChaos.Status.ChaosCondition
+}
+
+func (r *ShardingSphereChaosReconciler) updateRecoveredJob(ctx 
context.Context, ssChaos *sschaosv1alpha1.ShardingSphereChaos, rJob 
*batchV1.Job) error {
+       if checkResultExist(rJob) {

Review Comment:
   `isResultExist` is more readable for the func, coz you want to get a boolean 
value.



##########
shardingsphere-operator/pkg/controllers/shardingsphere_chaos_controller.go:
##########
@@ -213,10 +246,137 @@ func (r *ShardingSphereChaosReconciler) 
reconcileStatus(ctx context.Context, nam
        if err != nil {
                return err
        }
-       rt.Status = ssChaos.Status
+       setRtStatus(rt, ssChaos)
        return r.Status().Update(ctx, rt)
 }
 
+func SetDefault(ssChaos *sschaosv1alpha1.ShardingSphereChaos) {
+       if ssChaos.Status.Phase == "" {
+               ssChaos.Status.Phase = sschaosv1alpha1.PhaseBeforeExperiment
+       }
+       if len(ssChaos.Status.Result) <= 0 || ssChaos.Status.Result == nil {
+               ssChaos.Status.Result = []sschaosv1alpha1.Result{}
+       }
+}
+
+func setRtStatus(rt *sschaosv1alpha1.ShardingSphereChaos, ssChaos 
*sschaosv1alpha1.ShardingSphereChaos) {
+       rt.Status.Result = []sschaosv1alpha1.Result{}
+       for i := range ssChaos.Status.Result {
+               rt.Status.Result = append(rt.Status.Result, 
sschaosv1alpha1.Result{
+                       Success: ssChaos.Status.Result[i].Success,
+                       Details: sschaosv1alpha1.Details{
+                               Time: metav1.Time{Time: time.Now()},
+                               Msg:  ssChaos.Status.Result[i].Details.Msg,
+                       },
+               })
+       }
+       rt.Status.Phase = ssChaos.Status.Phase
+       rt.Status.ChaosCondition = ssChaos.Status.ChaosCondition
+}
+
+func (r *ShardingSphereChaosReconciler) updateRecoveredJob(ctx 
context.Context, ssChaos *sschaosv1alpha1.ShardingSphereChaos, rJob 
*batchV1.Job) error {
+       if checkResultExist(rJob) {
+               return nil
+       }
+       check := "Verify"
+       for i := range ssChaos.Status.Result {
+               if len(ssChaos.Status.Result[i].Details.Msg) >= len(check) && 
ssChaos.Status.Result[i].Details.Msg[:len(check)] == check {

Review Comment:
   use a pointer is a better choice.



##########
shardingsphere-operator/pkg/controllers/shardingsphere_chaos_controller.go:
##########
@@ -213,10 +246,137 @@ func (r *ShardingSphereChaosReconciler) 
reconcileStatus(ctx context.Context, nam
        if err != nil {
                return err
        }
-       rt.Status = ssChaos.Status
+       setRtStatus(rt, ssChaos)
        return r.Status().Update(ctx, rt)
 }
 
+func SetDefault(ssChaos *sschaosv1alpha1.ShardingSphereChaos) {
+       if ssChaos.Status.Phase == "" {
+               ssChaos.Status.Phase = sschaosv1alpha1.PhaseBeforeExperiment
+       }
+       if len(ssChaos.Status.Result) <= 0 || ssChaos.Status.Result == nil {
+               ssChaos.Status.Result = []sschaosv1alpha1.Result{}
+       }
+}
+
+func setRtStatus(rt *sschaosv1alpha1.ShardingSphereChaos, ssChaos 
*sschaosv1alpha1.ShardingSphereChaos) {
+       rt.Status.Result = []sschaosv1alpha1.Result{}
+       for i := range ssChaos.Status.Result {
+               rt.Status.Result = append(rt.Status.Result, 
sschaosv1alpha1.Result{
+                       Success: ssChaos.Status.Result[i].Success,
+                       Details: sschaosv1alpha1.Details{
+                               Time: metav1.Time{Time: time.Now()},
+                               Msg:  ssChaos.Status.Result[i].Details.Msg,
+                       },
+               })
+       }
+       rt.Status.Phase = ssChaos.Status.Phase
+       rt.Status.ChaosCondition = ssChaos.Status.ChaosCondition
+}
+
+func (r *ShardingSphereChaosReconciler) updateRecoveredJob(ctx 
context.Context, ssChaos *sschaosv1alpha1.ShardingSphereChaos, rJob 
*batchV1.Job) error {
+       if checkResultExist(rJob) {
+               return nil
+       }
+       check := "Verify"
+       for i := range ssChaos.Status.Result {
+               if len(ssChaos.Status.Result[i].Details.Msg) >= len(check) && 
ssChaos.Status.Result[i].Details.Msg[:len(check)] == check {
+                       return nil
+               }
+       }
+       logOpts := &v1.PodLogOptions{}
+       pod, err := r.getPodHaveLog(ctx, rJob)
+       if err != nil {
+               return err
+       }
+       containerNamespacedname := types.NamespacedName{
+               Namespace: pod.Namespace,
+               Name:      pod.Name,
+       }
+       result := &sschaosv1alpha1.Result{}
+       if rJob.Status.Succeeded == 1 {
+               log, err := r.getPodLog(ctx, containerNamespacedname, logOpts)
+               if ssChaos.Spec.Expect.Verify == "" || 
ssChaos.Spec.Expect.Verify == log {
+                       result.Success = true

Review Comment:
   `rJob.Status.Succeeded == 1` `result.Success = true` 
   unify format if is possible



##########
shardingsphere-operator/pkg/controllers/shardingsphere_chaos_controller.go:
##########
@@ -213,10 +246,137 @@ func (r *ShardingSphereChaosReconciler) 
reconcileStatus(ctx context.Context, nam
        if err != nil {
                return err
        }
-       rt.Status = ssChaos.Status
+       setRtStatus(rt, ssChaos)
        return r.Status().Update(ctx, rt)
 }
 
+func SetDefault(ssChaos *sschaosv1alpha1.ShardingSphereChaos) {
+       if ssChaos.Status.Phase == "" {
+               ssChaos.Status.Phase = sschaosv1alpha1.PhaseBeforeExperiment
+       }
+       if len(ssChaos.Status.Result) <= 0 || ssChaos.Status.Result == nil {
+               ssChaos.Status.Result = []sschaosv1alpha1.Result{}
+       }
+}
+
+func setRtStatus(rt *sschaosv1alpha1.ShardingSphereChaos, ssChaos 
*sschaosv1alpha1.ShardingSphereChaos) {
+       rt.Status.Result = []sschaosv1alpha1.Result{}
+       for i := range ssChaos.Status.Result {
+               rt.Status.Result = append(rt.Status.Result, 
sschaosv1alpha1.Result{
+                       Success: ssChaos.Status.Result[i].Success,
+                       Details: sschaosv1alpha1.Details{
+                               Time: metav1.Time{Time: time.Now()},
+                               Msg:  ssChaos.Status.Result[i].Details.Msg,
+                       },
+               })
+       }
+       rt.Status.Phase = ssChaos.Status.Phase
+       rt.Status.ChaosCondition = ssChaos.Status.ChaosCondition
+}
+
+func (r *ShardingSphereChaosReconciler) updateRecoveredJob(ctx 
context.Context, ssChaos *sschaosv1alpha1.ShardingSphereChaos, rJob 
*batchV1.Job) error {
+       if checkResultExist(rJob) {
+               return nil
+       }
+       check := "Verify"
+       for i := range ssChaos.Status.Result {
+               if len(ssChaos.Status.Result[i].Details.Msg) >= len(check) && 
ssChaos.Status.Result[i].Details.Msg[:len(check)] == check {
+                       return nil
+               }
+       }
+       logOpts := &v1.PodLogOptions{}
+       pod, err := r.getPodHaveLog(ctx, rJob)
+       if err != nil {
+               return err
+       }
+       containerNamespacedname := types.NamespacedName{

Review Comment:
   you mean `podNamespacedName` ?



##########
shardingsphere-operator/pkg/controllers/shardingsphere_chaos_controller.go:
##########
@@ -330,20 +490,14 @@ func (r *ShardingSphereChaosReconciler) updateJob(ctx 
context.Context, requireme
                return err
        }
        if exp != nil {
-               if err := r.Delete(ctx, cur); err != nil {
-                       return err
-               }
-               if err := ctrl.SetControllerReference(chao, exp, r.Scheme); err 
!= nil {
-                       return err
-               }
-               if err := r.Create(ctx, exp); err != nil {
+               if err := r.Delete(ctx, cur); err != nil && 
!apierrors.IsNotFound(err) {

Review Comment:
   i saw that `reconcile.UpdateJob` will return nil, nil if job no need to 
update, it is as your expect? the name of `UpdateJob` makes me confused.



##########
shardingsphere-operator/pkg/controllers/shardingsphere_chaos_controller.go:
##########
@@ -213,10 +246,137 @@ func (r *ShardingSphereChaosReconciler) 
reconcileStatus(ctx context.Context, nam
        if err != nil {
                return err
        }
-       rt.Status = ssChaos.Status
+       setRtStatus(rt, ssChaos)
        return r.Status().Update(ctx, rt)
 }
 
+func SetDefault(ssChaos *sschaosv1alpha1.ShardingSphereChaos) {
+       if ssChaos.Status.Phase == "" {
+               ssChaos.Status.Phase = sschaosv1alpha1.PhaseBeforeExperiment
+       }
+       if len(ssChaos.Status.Result) <= 0 || ssChaos.Status.Result == nil {
+               ssChaos.Status.Result = []sschaosv1alpha1.Result{}
+       }
+}
+
+func setRtStatus(rt *sschaosv1alpha1.ShardingSphereChaos, ssChaos 
*sschaosv1alpha1.ShardingSphereChaos) {
+       rt.Status.Result = []sschaosv1alpha1.Result{}
+       for i := range ssChaos.Status.Result {
+               rt.Status.Result = append(rt.Status.Result, 
sschaosv1alpha1.Result{

Review Comment:
   use pointer is more safe and efficient `r := &ssChaos.Status.Result[i]` 



##########
shardingsphere-operator/pkg/controllers/shardingsphere_chaos_controller.go:
##########
@@ -178,33 +190,54 @@ func (r *ShardingSphereChaosReconciler) reconcileJob(ctx 
context.Context, ssChao
        if ssChaos.Status.Phase == "" || ssChaos.Status.Phase == 
sschaosv1alpha1.PhaseBeforeExperiment || ssChaos.Status.Phase == 
sschaosv1alpha1.PhaseAfterExperiment {
                nowInjectRequirement = reconcile.Experimental
        }
-       if ssChaos.Status.Phase == sschaosv1alpha1.PhaseInChaos || 
ssChaos.Status.Phase == sschaosv1alpha1.PhaseRecoveredChaos {
+       if ssChaos.Status.Phase == sschaosv1alpha1.PhaseInChaos {
                nowInjectRequirement = reconcile.Pressure
        }
+       if ssChaos.Status.Phase == sschaosv1alpha1.PhaseRecoveredChaos {
+               nowInjectRequirement = reconcile.Verify
+       }
        if isExist {
                return r.updateJob(ctx, nowInjectRequirement, ssChaos, rJob)
        }
 
-       return r.createJob(ctx, nowInjectRequirement, ssChaos)
+       err = r.createJob(ctx, nowInjectRequirement, ssChaos)
+       if err != nil {
+               r.Events.Event(ssChaos, "Warning", "Created", fmt.Sprintf("job 
created fail %s", err))
+               return err
+       }
+
+       r.Events.Event(ssChaos, "Normal", "Created", fmt.Sprintf("%s job 
created successfully", string(nowInjectRequirement)))
+       return nil
 }
 
 func (r *ShardingSphereChaosReconciler) reconcileStatus(ctx context.Context, 
namespacedName types.NamespacedName) error {
        ssChaos, err := r.getRuntimeSSChaos(ctx, namespacedName)
        if err != nil {
                return err
        }
-       if ssChaos.Status.Phase == "" {
-               ssChaos.Status.Phase = sschaosv1alpha1.PhaseBeforeExperiment
-       }
-       rJob := &batchV1.Job{}
-       if err := r.Get(ctx, namespacedName, rJob); err != nil {
+
+       SetDefault(ssChaos)
+
+       rJob, found, err := r.getJobByNamespacedName(ctx, namespacedName)
+       if err != nil || !found || rJob == nil {
                return err
        }
 
        if ssChaos.Status.Phase == sschaosv1alpha1.PhaseBeforeExperiment && 
rJob.Status.Succeeded == 1 {
                ssChaos.Status.Phase = sschaosv1alpha1.PhaseAfterExperiment
        }
 
+       if rJob.Status.Failed == 1 {

Review Comment:
   i think use `boolean` to set the type of `rJob.Status.Failed` is more simple 
if the value of Failed can just be 0 or 1.



##########
shardingsphere-operator/pkg/controllers/shardingsphere_chaos_controller.go:
##########
@@ -213,10 +246,137 @@ func (r *ShardingSphereChaosReconciler) 
reconcileStatus(ctx context.Context, nam
        if err != nil {
                return err
        }
-       rt.Status = ssChaos.Status
+       setRtStatus(rt, ssChaos)
        return r.Status().Update(ctx, rt)
 }
 
+func SetDefault(ssChaos *sschaosv1alpha1.ShardingSphereChaos) {
+       if ssChaos.Status.Phase == "" {
+               ssChaos.Status.Phase = sschaosv1alpha1.PhaseBeforeExperiment
+       }
+       if len(ssChaos.Status.Result) <= 0 || ssChaos.Status.Result == nil {
+               ssChaos.Status.Result = []sschaosv1alpha1.Result{}
+       }
+}
+
+func setRtStatus(rt *sschaosv1alpha1.ShardingSphereChaos, ssChaos 
*sschaosv1alpha1.ShardingSphereChaos) {
+       rt.Status.Result = []sschaosv1alpha1.Result{}
+       for i := range ssChaos.Status.Result {
+               rt.Status.Result = append(rt.Status.Result, 
sschaosv1alpha1.Result{
+                       Success: ssChaos.Status.Result[i].Success,
+                       Details: sschaosv1alpha1.Details{
+                               Time: metav1.Time{Time: time.Now()},
+                               Msg:  ssChaos.Status.Result[i].Details.Msg,
+                       },
+               })
+       }
+       rt.Status.Phase = ssChaos.Status.Phase
+       rt.Status.ChaosCondition = ssChaos.Status.ChaosCondition
+}
+
+func (r *ShardingSphereChaosReconciler) updateRecoveredJob(ctx 
context.Context, ssChaos *sschaosv1alpha1.ShardingSphereChaos, rJob 
*batchV1.Job) error {
+       if checkResultExist(rJob) {
+               return nil
+       }
+       check := "Verify"

Review Comment:
   set `Verify` as const is better.



##########
shardingsphere-operator/pkg/controllers/shardingsphere_chaos_controller.go:
##########
@@ -213,10 +246,137 @@ func (r *ShardingSphereChaosReconciler) 
reconcileStatus(ctx context.Context, nam
        if err != nil {
                return err
        }
-       rt.Status = ssChaos.Status
+       setRtStatus(rt, ssChaos)
        return r.Status().Update(ctx, rt)
 }
 
+func SetDefault(ssChaos *sschaosv1alpha1.ShardingSphereChaos) {
+       if ssChaos.Status.Phase == "" {
+               ssChaos.Status.Phase = sschaosv1alpha1.PhaseBeforeExperiment
+       }
+       if len(ssChaos.Status.Result) <= 0 || ssChaos.Status.Result == nil {

Review Comment:
   1. No need to check the length of `ssChaos.Status.Result`. (BTW: `len` of a 
slice will not be less than `0`, just `len() == 0` is enough.)
   2.  Check `ssChaos.Status.Result` is `nil` first, otherwise this may get 
`panic`. 
   



##########
shardingsphere-operator/pkg/controllers/shardingsphere_chaos_controller.go:
##########
@@ -213,10 +246,137 @@ func (r *ShardingSphereChaosReconciler) 
reconcileStatus(ctx context.Context, nam
        if err != nil {
                return err
        }
-       rt.Status = ssChaos.Status
+       setRtStatus(rt, ssChaos)
        return r.Status().Update(ctx, rt)
 }
 
+func SetDefault(ssChaos *sschaosv1alpha1.ShardingSphereChaos) {
+       if ssChaos.Status.Phase == "" {
+               ssChaos.Status.Phase = sschaosv1alpha1.PhaseBeforeExperiment
+       }
+       if len(ssChaos.Status.Result) <= 0 || ssChaos.Status.Result == nil {
+               ssChaos.Status.Result = []sschaosv1alpha1.Result{}
+       }
+}
+
+func setRtStatus(rt *sschaosv1alpha1.ShardingSphereChaos, ssChaos 
*sschaosv1alpha1.ShardingSphereChaos) {
+       rt.Status.Result = []sschaosv1alpha1.Result{}
+       for i := range ssChaos.Status.Result {
+               rt.Status.Result = append(rt.Status.Result, 
sschaosv1alpha1.Result{
+                       Success: ssChaos.Status.Result[i].Success,
+                       Details: sschaosv1alpha1.Details{
+                               Time: metav1.Time{Time: time.Now()},
+                               Msg:  ssChaos.Status.Result[i].Details.Msg,
+                       },
+               })
+       }
+       rt.Status.Phase = ssChaos.Status.Phase
+       rt.Status.ChaosCondition = ssChaos.Status.ChaosCondition
+}
+
+func (r *ShardingSphereChaosReconciler) updateRecoveredJob(ctx 
context.Context, ssChaos *sschaosv1alpha1.ShardingSphereChaos, rJob 
*batchV1.Job) error {
+       if checkResultExist(rJob) {
+               return nil
+       }
+       check := "Verify"
+       for i := range ssChaos.Status.Result {
+               if len(ssChaos.Status.Result[i].Details.Msg) >= len(check) && 
ssChaos.Status.Result[i].Details.Msg[:len(check)] == check {
+                       return nil
+               }
+       }
+       logOpts := &v1.PodLogOptions{}
+       pod, err := r.getPodHaveLog(ctx, rJob)
+       if err != nil {
+               return err
+       }
+       containerNamespacedname := types.NamespacedName{
+               Namespace: pod.Namespace,
+               Name:      pod.Name,
+       }
+       result := &sschaosv1alpha1.Result{}
+       if rJob.Status.Succeeded == 1 {
+               log, err := r.getPodLog(ctx, containerNamespacedname, logOpts)
+               if ssChaos.Spec.Expect.Verify == "" || 
ssChaos.Spec.Expect.Verify == log {
+                       result.Success = true
+                       result.Details = sschaosv1alpha1.Details{
+                               Time: metav1.Time{Time: time.Now()},
+                               Msg:  fmt.Sprintf("%s: job succeeded", check),
+                       }
+               } else {
+                       if err != nil {
+                               return err
+                       }
+                       result.Success = false
+                       result.Details = sschaosv1alpha1.Details{
+                               Time: metav1.Time{Time: time.Now()},
+                               Msg:  fmt.Sprintf("%s: %s", check, log),
+                       }
+               }
+       }
+
+       if rJob.Status.Failed == 1 {

Review Comment:
   the same problem above.



##########
shardingsphere-operator/pkg/controllers/shardingsphere_chaos_controller.go:
##########
@@ -213,10 +246,137 @@ func (r *ShardingSphereChaosReconciler) 
reconcileStatus(ctx context.Context, nam
        if err != nil {
                return err
        }
-       rt.Status = ssChaos.Status
+       setRtStatus(rt, ssChaos)
        return r.Status().Update(ctx, rt)
 }
 
+func SetDefault(ssChaos *sschaosv1alpha1.ShardingSphereChaos) {
+       if ssChaos.Status.Phase == "" {
+               ssChaos.Status.Phase = sschaosv1alpha1.PhaseBeforeExperiment
+       }
+       if len(ssChaos.Status.Result) <= 0 || ssChaos.Status.Result == nil {
+               ssChaos.Status.Result = []sschaosv1alpha1.Result{}
+       }
+}
+
+func setRtStatus(rt *sschaosv1alpha1.ShardingSphereChaos, ssChaos 
*sschaosv1alpha1.ShardingSphereChaos) {
+       rt.Status.Result = []sschaosv1alpha1.Result{}
+       for i := range ssChaos.Status.Result {
+               rt.Status.Result = append(rt.Status.Result, 
sschaosv1alpha1.Result{
+                       Success: ssChaos.Status.Result[i].Success,
+                       Details: sschaosv1alpha1.Details{
+                               Time: metav1.Time{Time: time.Now()},
+                               Msg:  ssChaos.Status.Result[i].Details.Msg,
+                       },
+               })
+       }
+       rt.Status.Phase = ssChaos.Status.Phase
+       rt.Status.ChaosCondition = ssChaos.Status.ChaosCondition
+}
+
+func (r *ShardingSphereChaosReconciler) updateRecoveredJob(ctx 
context.Context, ssChaos *sschaosv1alpha1.ShardingSphereChaos, rJob 
*batchV1.Job) error {
+       if checkResultExist(rJob) {
+               return nil
+       }
+       check := "Verify"
+       for i := range ssChaos.Status.Result {
+               if len(ssChaos.Status.Result[i].Details.Msg) >= len(check) && 
ssChaos.Status.Result[i].Details.Msg[:len(check)] == check {
+                       return nil
+               }
+       }
+       logOpts := &v1.PodLogOptions{}
+       pod, err := r.getPodHaveLog(ctx, rJob)
+       if err != nil {
+               return err
+       }
+       containerNamespacedname := types.NamespacedName{
+               Namespace: pod.Namespace,
+               Name:      pod.Name,
+       }
+       result := &sschaosv1alpha1.Result{}
+       if rJob.Status.Succeeded == 1 {
+               log, err := r.getPodLog(ctx, containerNamespacedname, logOpts)
+               if ssChaos.Spec.Expect.Verify == "" || 
ssChaos.Spec.Expect.Verify == log {
+                       result.Success = true
+                       result.Details = sschaosv1alpha1.Details{
+                               Time: metav1.Time{Time: time.Now()},
+                               Msg:  fmt.Sprintf("%s: job succeeded", check),
+                       }
+               } else {
+                       if err != nil {
+                               return err
+                       }
+                       result.Success = false
+                       result.Details = sschaosv1alpha1.Details{
+                               Time: metav1.Time{Time: time.Now()},
+                               Msg:  fmt.Sprintf("%s: %s", check, log),
+                       }
+               }
+       }
+
+       if rJob.Status.Failed == 1 {
+               log, err := r.getPodLog(ctx, containerNamespacedname, logOpts)
+               if err != nil {
+                       return err
+               }
+               result.Success = false
+               result.Details = sschaosv1alpha1.Details{
+                       Time: metav1.Time{Time: time.Now()},
+                       Msg:  fmt.Sprintf("%s: %s", check, log),
+               }
+       }
+       ssChaos.Status.Result = appendResult(*result, ssChaos.Status.Result, 
check)
+       return nil
+}
+
+func (r *ShardingSphereChaosReconciler) getPodHaveLog(ctx context.Context, 
rJob *batchV1.Job) (*v1.Pod, error) {
+       pods := &v1.PodList{}
+       if err := r.List(ctx, pods, client.MatchingLabels{"controller-uid": 
rJob.Spec.Template.Labels["controller-uid"]}); err != nil {
+               return nil, err
+       }
+       var pod *v1.Pod
+       for i := range pods.Items {
+               pod = &pods.Items[i]
+               break
+       }

Review Comment:
   this is used to get the first `pod` from `pods`? will the `pod` be `nil`?



##########
shardingsphere-operator/pkg/controllers/shardingsphere_chaos_controller.go:
##########
@@ -213,10 +246,137 @@ func (r *ShardingSphereChaosReconciler) 
reconcileStatus(ctx context.Context, nam
        if err != nil {
                return err
        }
-       rt.Status = ssChaos.Status
+       setRtStatus(rt, ssChaos)
        return r.Status().Update(ctx, rt)
 }
 
+func SetDefault(ssChaos *sschaosv1alpha1.ShardingSphereChaos) {
+       if ssChaos.Status.Phase == "" {
+               ssChaos.Status.Phase = sschaosv1alpha1.PhaseBeforeExperiment
+       }
+       if len(ssChaos.Status.Result) <= 0 || ssChaos.Status.Result == nil {
+               ssChaos.Status.Result = []sschaosv1alpha1.Result{}
+       }
+}
+
+func setRtStatus(rt *sschaosv1alpha1.ShardingSphereChaos, ssChaos 
*sschaosv1alpha1.ShardingSphereChaos) {
+       rt.Status.Result = []sschaosv1alpha1.Result{}
+       for i := range ssChaos.Status.Result {
+               rt.Status.Result = append(rt.Status.Result, 
sschaosv1alpha1.Result{
+                       Success: ssChaos.Status.Result[i].Success,
+                       Details: sschaosv1alpha1.Details{
+                               Time: metav1.Time{Time: time.Now()},
+                               Msg:  ssChaos.Status.Result[i].Details.Msg,
+                       },
+               })
+       }
+       rt.Status.Phase = ssChaos.Status.Phase
+       rt.Status.ChaosCondition = ssChaos.Status.ChaosCondition
+}
+
+func (r *ShardingSphereChaosReconciler) updateRecoveredJob(ctx 
context.Context, ssChaos *sschaosv1alpha1.ShardingSphereChaos, rJob 
*batchV1.Job) error {
+       if checkResultExist(rJob) {
+               return nil
+       }
+       check := "Verify"
+       for i := range ssChaos.Status.Result {
+               if len(ssChaos.Status.Result[i].Details.Msg) >= len(check) && 
ssChaos.Status.Result[i].Details.Msg[:len(check)] == check {
+                       return nil
+               }
+       }
+       logOpts := &v1.PodLogOptions{}
+       pod, err := r.getPodHaveLog(ctx, rJob)
+       if err != nil {
+               return err
+       }
+       containerNamespacedname := types.NamespacedName{
+               Namespace: pod.Namespace,
+               Name:      pod.Name,
+       }
+       result := &sschaosv1alpha1.Result{}
+       if rJob.Status.Succeeded == 1 {
+               log, err := r.getPodLog(ctx, containerNamespacedname, logOpts)
+               if ssChaos.Spec.Expect.Verify == "" || 
ssChaos.Spec.Expect.Verify == log {
+                       result.Success = true
+                       result.Details = sschaosv1alpha1.Details{
+                               Time: metav1.Time{Time: time.Now()},
+                               Msg:  fmt.Sprintf("%s: job succeeded", check),
+                       }
+               } else {
+                       if err != nil {
+                               return err
+                       }
+                       result.Success = false
+                       result.Details = sschaosv1alpha1.Details{
+                               Time: metav1.Time{Time: time.Now()},
+                               Msg:  fmt.Sprintf("%s: %s", check, log),
+                       }
+               }
+       }
+
+       if rJob.Status.Failed == 1 {
+               log, err := r.getPodLog(ctx, containerNamespacedname, logOpts)
+               if err != nil {
+                       return err
+               }
+               result.Success = false
+               result.Details = sschaosv1alpha1.Details{
+                       Time: metav1.Time{Time: time.Now()},
+                       Msg:  fmt.Sprintf("%s: %s", check, log),
+               }
+       }
+       ssChaos.Status.Result = appendResult(*result, ssChaos.Status.Result, 
check)
+       return nil
+}
+
+func (r *ShardingSphereChaosReconciler) getPodHaveLog(ctx context.Context, 
rJob *batchV1.Job) (*v1.Pod, error) {
+       pods := &v1.PodList{}
+       if err := r.List(ctx, pods, client.MatchingLabels{"controller-uid": 
rJob.Spec.Template.Labels["controller-uid"]}); err != nil {
+               return nil, err
+       }
+       var pod *v1.Pod
+       for i := range pods.Items {
+               pod = &pods.Items[i]
+               break
+       }
+       return pod, nil
+}
+
+func checkResultExist(rJob *batchV1.Job) bool {
+       for _, cmd := range rJob.Spec.Template.Spec.Containers[0].Args {
+               if strings.Contains(cmd, string(reconcile.Verify)) {
+                       return true
+               }
+       }
+       return false
+}
+
+func appendResult(r sschaosv1alpha1.Result, results []sschaosv1alpha1.Result, 
check string) []sschaosv1alpha1.Result {

Review Comment:
   set `results` as first args and `r` as the second, this func will be used 
like `append(alist, aelement)`



##########
shardingsphere-operator/pkg/controllers/shardingsphere_chaos_controller.go:
##########
@@ -213,10 +246,137 @@ func (r *ShardingSphereChaosReconciler) 
reconcileStatus(ctx context.Context, nam
        if err != nil {
                return err
        }
-       rt.Status = ssChaos.Status
+       setRtStatus(rt, ssChaos)
        return r.Status().Update(ctx, rt)
 }
 
+func SetDefault(ssChaos *sschaosv1alpha1.ShardingSphereChaos) {
+       if ssChaos.Status.Phase == "" {
+               ssChaos.Status.Phase = sschaosv1alpha1.PhaseBeforeExperiment
+       }
+       if len(ssChaos.Status.Result) <= 0 || ssChaos.Status.Result == nil {
+               ssChaos.Status.Result = []sschaosv1alpha1.Result{}
+       }
+}
+
+func setRtStatus(rt *sschaosv1alpha1.ShardingSphereChaos, ssChaos 
*sschaosv1alpha1.ShardingSphereChaos) {
+       rt.Status.Result = []sschaosv1alpha1.Result{}
+       for i := range ssChaos.Status.Result {
+               rt.Status.Result = append(rt.Status.Result, 
sschaosv1alpha1.Result{
+                       Success: ssChaos.Status.Result[i].Success,
+                       Details: sschaosv1alpha1.Details{
+                               Time: metav1.Time{Time: time.Now()},
+                               Msg:  ssChaos.Status.Result[i].Details.Msg,
+                       },
+               })
+       }
+       rt.Status.Phase = ssChaos.Status.Phase
+       rt.Status.ChaosCondition = ssChaos.Status.ChaosCondition
+}
+
+func (r *ShardingSphereChaosReconciler) updateRecoveredJob(ctx 
context.Context, ssChaos *sschaosv1alpha1.ShardingSphereChaos, rJob 
*batchV1.Job) error {
+       if checkResultExist(rJob) {
+               return nil
+       }
+       check := "Verify"
+       for i := range ssChaos.Status.Result {
+               if len(ssChaos.Status.Result[i].Details.Msg) >= len(check) && 
ssChaos.Status.Result[i].Details.Msg[:len(check)] == check {
+                       return nil
+               }
+       }
+       logOpts := &v1.PodLogOptions{}
+       pod, err := r.getPodHaveLog(ctx, rJob)
+       if err != nil {
+               return err
+       }
+       containerNamespacedname := types.NamespacedName{
+               Namespace: pod.Namespace,
+               Name:      pod.Name,
+       }
+       result := &sschaosv1alpha1.Result{}
+       if rJob.Status.Succeeded == 1 {
+               log, err := r.getPodLog(ctx, containerNamespacedname, logOpts)
+               if ssChaos.Spec.Expect.Verify == "" || 
ssChaos.Spec.Expect.Verify == log {
+                       result.Success = true
+                       result.Details = sschaosv1alpha1.Details{

Review Comment:
   use `Detail` instead of `Details` if is not a list.



##########
shardingsphere-operator/pkg/controllers/shardingsphere_chaos_controller.go:
##########
@@ -213,10 +246,137 @@ func (r *ShardingSphereChaosReconciler) 
reconcileStatus(ctx context.Context, nam
        if err != nil {
                return err
        }
-       rt.Status = ssChaos.Status
+       setRtStatus(rt, ssChaos)
        return r.Status().Update(ctx, rt)
 }
 
+func SetDefault(ssChaos *sschaosv1alpha1.ShardingSphereChaos) {
+       if ssChaos.Status.Phase == "" {
+               ssChaos.Status.Phase = sschaosv1alpha1.PhaseBeforeExperiment
+       }
+       if len(ssChaos.Status.Result) <= 0 || ssChaos.Status.Result == nil {
+               ssChaos.Status.Result = []sschaosv1alpha1.Result{}
+       }
+}
+
+func setRtStatus(rt *sschaosv1alpha1.ShardingSphereChaos, ssChaos 
*sschaosv1alpha1.ShardingSphereChaos) {
+       rt.Status.Result = []sschaosv1alpha1.Result{}
+       for i := range ssChaos.Status.Result {
+               rt.Status.Result = append(rt.Status.Result, 
sschaosv1alpha1.Result{
+                       Success: ssChaos.Status.Result[i].Success,
+                       Details: sschaosv1alpha1.Details{
+                               Time: metav1.Time{Time: time.Now()},
+                               Msg:  ssChaos.Status.Result[i].Details.Msg,
+                       },
+               })
+       }
+       rt.Status.Phase = ssChaos.Status.Phase
+       rt.Status.ChaosCondition = ssChaos.Status.ChaosCondition
+}
+
+func (r *ShardingSphereChaosReconciler) updateRecoveredJob(ctx 
context.Context, ssChaos *sschaosv1alpha1.ShardingSphereChaos, rJob 
*batchV1.Job) error {
+       if checkResultExist(rJob) {
+               return nil
+       }
+       check := "Verify"
+       for i := range ssChaos.Status.Result {
+               if len(ssChaos.Status.Result[i].Details.Msg) >= len(check) && 
ssChaos.Status.Result[i].Details.Msg[:len(check)] == check {
+                       return nil
+               }
+       }
+       logOpts := &v1.PodLogOptions{}
+       pod, err := r.getPodHaveLog(ctx, rJob)
+       if err != nil {
+               return err
+       }
+       containerNamespacedname := types.NamespacedName{
+               Namespace: pod.Namespace,
+               Name:      pod.Name,
+       }
+       result := &sschaosv1alpha1.Result{}
+       if rJob.Status.Succeeded == 1 {
+               log, err := r.getPodLog(ctx, containerNamespacedname, logOpts)
+               if ssChaos.Spec.Expect.Verify == "" || 
ssChaos.Spec.Expect.Verify == log {
+                       result.Success = true
+                       result.Details = sschaosv1alpha1.Details{
+                               Time: metav1.Time{Time: time.Now()},
+                               Msg:  fmt.Sprintf("%s: job succeeded", check),
+                       }
+               } else {
+                       if err != nil {
+                               return err
+                       }
+                       result.Success = false
+                       result.Details = sschaosv1alpha1.Details{
+                               Time: metav1.Time{Time: time.Now()},
+                               Msg:  fmt.Sprintf("%s: %s", check, log),
+                       }
+               }
+       }
+
+       if rJob.Status.Failed == 1 {
+               log, err := r.getPodLog(ctx, containerNamespacedname, logOpts)
+               if err != nil {
+                       return err
+               }
+               result.Success = false
+               result.Details = sschaosv1alpha1.Details{
+                       Time: metav1.Time{Time: time.Now()},
+                       Msg:  fmt.Sprintf("%s: %s", check, log),
+               }
+       }
+       ssChaos.Status.Result = appendResult(*result, ssChaos.Status.Result, 
check)
+       return nil
+}
+
+func (r *ShardingSphereChaosReconciler) getPodHaveLog(ctx context.Context, 
rJob *batchV1.Job) (*v1.Pod, error) {
+       pods := &v1.PodList{}
+       if err := r.List(ctx, pods, client.MatchingLabels{"controller-uid": 
rJob.Spec.Template.Labels["controller-uid"]}); err != nil {
+               return nil, err
+       }
+       var pod *v1.Pod
+       for i := range pods.Items {
+               pod = &pods.Items[i]
+               break
+       }
+       return pod, nil
+}
+
+func checkResultExist(rJob *batchV1.Job) bool {
+       for _, cmd := range rJob.Spec.Template.Spec.Containers[0].Args {
+               if strings.Contains(cmd, string(reconcile.Verify)) {
+                       return true
+               }
+       }
+       return false
+}
+
+func appendResult(r sschaosv1alpha1.Result, results []sschaosv1alpha1.Result, 
check string) []sschaosv1alpha1.Result {
+       for i := range results {
+               msg := results[i].Details.Msg
+               if len(msg) >= len(check) && msg[:len(check)] == check && 
r.Details.Msg[:len(check)] == check {
+                       results[i] = r
+                       return results
+               }
+       }

Review Comment:
   what are these code for ? update results? 



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to