This is an automated email from the ASF dual-hosted git repository.

tsato pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/camel-k.git

commit ffc6b7d176d506d9b5e94f5f7cba2244bd498826
Author: Tadayoshi Sato <[email protected]>
AuthorDate: Tue Sep 13 14:30:05 2022 +0900

    feat(controller): set integration phase to Error when traits application 
failed
---
 e2e/global/common/files/BadRoute.java              | 11 ++---
 e2e/global/common/files/Java.java                  | 14 +++----
 .../files/{BadRoute.java => Unresolvable.java}     | 17 ++++----
 e2e/global/common/integration_fail_test.go         | 26 +++++++++++-
 pkg/apis/camel/v1/integration_types.go             |  2 +
 pkg/apis/camel/v1/integration_types_support.go     | 23 ++++++++++
 pkg/controller/integration/initialize.go           |  6 ++-
 .../integration/integration_controller.go          | 30 ++++++++-----
 pkg/controller/integration/monitor.go              | 49 +++++++++++-----------
 pkg/controller/integration/monitor_cronjob.go      | 19 ++++++---
 pkg/controller/integration/monitor_deployment.go   | 14 +++++--
 pkg/controller/integration/monitor_knative.go      |  8 ++--
 12 files changed, 148 insertions(+), 71 deletions(-)

diff --git a/e2e/global/common/files/BadRoute.java 
b/e2e/global/common/files/BadRoute.java
index 9967879cb..dc2e42343 100644
--- a/e2e/global/common/files/BadRoute.java
+++ b/e2e/global/common/files/BadRoute.java
@@ -20,9 +20,10 @@ import java.lang.Override;
 import org.apache.camel.builder.RouteBuilder;
 
 public class BadRoute extends RouteBuilder {
-  @Override
-  public void configure() throws Exception {
-    
from("mongodb:sample?database=sampledb&collection=mycollection&operation=findfoo").throwException(new
 RuntimeException("Heyyy")).log("bar");
-  }
+    @Override
+    public void configure() throws Exception {
+        
from("mongodb:sample?database=sampledb&collection=mycollection&operation=findfoo")
+            .throwException(new RuntimeException("Heyyy"))
+            .log("bar");
+    }
 }
-
diff --git a/e2e/global/common/files/Java.java 
b/e2e/global/common/files/Java.java
index 66fef5fe8..30b1f459e 100644
--- a/e2e/global/common/files/Java.java
+++ b/e2e/global/common/files/Java.java
@@ -18,11 +18,11 @@
 import org.apache.camel.builder.RouteBuilder;
 
 public class Java extends RouteBuilder {
-  @Override
-  public void configure() throws Exception {
-         from("timer:tick")
-         .setHeader("m").constant("string!")
-         .setBody().simple("Magic${header.m}")
-      .log("${body}");
-  }
+    @Override
+    public void configure() throws Exception {
+        from("timer:tick")
+            .setHeader("m").constant("string!")
+            .setBody().simple("Magic${header.m}")
+            .log("${body}");
+    }
 }
diff --git a/e2e/global/common/files/BadRoute.java 
b/e2e/global/common/files/Unresolvable.java
similarity index 73%
copy from e2e/global/common/files/BadRoute.java
copy to e2e/global/common/files/Unresolvable.java
index 9967879cb..768f32ad4 100644
--- a/e2e/global/common/files/BadRoute.java
+++ b/e2e/global/common/files/Unresolvable.java
@@ -15,14 +15,15 @@
  * limitations under the License.
  */
 
-import java.lang.Exception;
-import java.lang.Override;
 import org.apache.camel.builder.RouteBuilder;
 
-public class BadRoute extends RouteBuilder {
-  @Override
-  public void configure() throws Exception {
-    
from("mongodb:sample?database=sampledb&collection=mycollection&operation=findfoo").throwException(new
 RuntimeException("Heyyy")).log("bar");
-  }
+public class Unresolvable extends RouteBuilder {
+    @Override
+    public void configure() throws Exception {
+        from("timer:tick")
+            .setHeader("m").constant("string!")
+            .setBody().simple("Magic${header.m}")
+            .log("${body}")
+            .to("non-existent:hello");
+    }
 }
-
diff --git a/e2e/global/common/integration_fail_test.go 
b/e2e/global/common/integration_fail_test.go
index c95b691b1..0f498ac84 100644
--- a/e2e/global/common/integration_fail_test.go
+++ b/e2e/global/common/integration_fail_test.go
@@ -24,6 +24,7 @@ package common
 
 import (
        "testing"
+       "time"
 
        . "github.com/onsi/gomega"
        "github.com/onsi/gomega/gstruct"
@@ -44,7 +45,8 @@ func TestBadRouteIntegration(t *testing.T) {
                        Expect(KamelRunWithID(operatorID, ns, 
"files/BadRoute.java", "--name", name).Execute()).To(Succeed())
                        Eventually(IntegrationPodPhase(ns, name), 
TestTimeoutLong).Should(Equal(corev1.PodRunning))
                        Eventually(IntegrationPhase(ns, name), 
TestTimeoutShort).Should(Equal(v1.IntegrationPhaseError))
-                       Eventually(IntegrationConditionStatus(ns, name, 
v1.IntegrationConditionReady), 
TestTimeoutShort).Should(Equal(corev1.ConditionFalse))
+                       Eventually(IntegrationConditionStatus(ns, name, 
v1.IntegrationConditionReady), TestTimeoutShort).
+                               Should(Equal(corev1.ConditionFalse))
 
                        // Make sure the Integration can be scaled
                        Expect(ScaleIntegration(ns, name, 2)).To(Succeed())
@@ -63,7 +65,8 @@ func TestBadRouteIntegration(t *testing.T) {
                t.Run("run missing dependency java route", func(t *testing.T) {
                        RegisterTestingT(t)
                        name := "java-route"
-                       Expect(KamelRunWithID(operatorID, ns, 
"files/Java.java", "--name", name, "-d", 
"mvn:com.example:nonexistent:1.0").Execute()).To(Succeed())
+                       Expect(KamelRunWithID(operatorID, ns, 
"files/Java.java", "--name", name,
+                               "-d", 
"mvn:com.example:nonexistent:1.0").Execute()).To(Succeed())
                        // Integration in error
                        Eventually(IntegrationPhase(ns, name), 
TestTimeoutLong).Should(Equal(v1.IntegrationPhaseError))
                        kitName := IntegrationKit(ns, name)()
@@ -77,5 +80,24 @@ func TestBadRouteIntegration(t *testing.T) {
                        // Clean up
                        Expect(Kamel("delete", "--all", "-n", 
ns).Execute()).To(Succeed())
                })
+
+               t.Run("run unresolvable component java route", func(t 
*testing.T) {
+                       RegisterTestingT(t)
+                       name := "unresolvable-route"
+                       Expect(KamelRunWithID(operatorID, ns, 
"files/Unresolvable.java", "--name", name).Execute()).To(Succeed())
+                       // Integration in error
+                       Eventually(IntegrationPhase(ns, name), 
TestTimeoutShort).Should(Equal(v1.IntegrationPhaseError))
+                       Eventually(IntegrationConditionStatus(ns, name, 
v1.IntegrationConditionReady), TestTimeoutShort).
+                               Should(Equal(corev1.ConditionFalse))
+                       Eventually(IntegrationCondition(ns, name, 
v1.IntegrationConditionReady), TestTimeoutShort).Should(And(
+                               WithTransform(IntegrationConditionReason, 
Equal(v1.IntegrationConditionInitializationFailedReason)),
+                               WithTransform(IntegrationConditionMessage, 
HavePrefix("error during trait customization")),
+                       ))
+                       // Kit shouldn't be created
+                       Consistently(IntegrationKit(ns, name), 
10*time.Second).Should(BeEmpty())
+
+                       // Clean up
+                       Expect(Kamel("delete", "--all", "-n", 
ns).Execute()).To(Succeed())
+               })
        })
 }
diff --git a/pkg/apis/camel/v1/integration_types.go 
b/pkg/apis/camel/v1/integration_types.go
index c6f1e750e..921b2d74f 100644
--- a/pkg/apis/camel/v1/integration_types.go
+++ b/pkg/apis/camel/v1/integration_types.go
@@ -240,6 +240,8 @@ const (
        // IntegrationConditionErrorReason --
        IntegrationConditionErrorReason string = "Error"
 
+       // IntegrationConditionInitializationFailedReason --
+       IntegrationConditionInitializationFailedReason string = 
"InitializationFailed"
        // IntegrationConditionUnsupportedLanguageReason --
        IntegrationConditionUnsupportedLanguageReason string = 
"UnsupportedLanguage"
 
diff --git a/pkg/apis/camel/v1/integration_types_support.go 
b/pkg/apis/camel/v1/integration_types_support.go
index 50316a081..3885b9211 100644
--- a/pkg/apis/camel/v1/integration_types_support.go
+++ b/pkg/apis/camel/v1/integration_types_support.go
@@ -322,6 +322,29 @@ func (in *Integration) GetIntegrationKitNamespace(p 
*IntegrationPlatform) string
        return in.Namespace
 }
 
+// IsConditionTrue checks if the condition with the given type is true.
+func (in *Integration) IsConditionTrue(conditionType IntegrationConditionType) 
bool {
+       if in == nil {
+               return false
+       }
+       cond := in.Status.GetCondition(conditionType)
+       if cond == nil {
+               return false
+       }
+
+       return cond.Status == corev1.ConditionTrue
+}
+
+// SetReadyCondition sets Ready condition with the given status, reason, and 
message.
+func (in *Integration) SetReadyCondition(status corev1.ConditionStatus, 
reason, message string) {
+       in.Status.SetCondition(IntegrationConditionReady, status, reason, 
message)
+}
+
+// SetReadyConditionError sets Ready condition to False with the given error 
message.
+func (in *Integration) SetReadyConditionError(err string) {
+       in.SetReadyCondition(corev1.ConditionFalse, 
IntegrationConditionErrorReason, err)
+}
+
 // GetCondition returns the condition with the provided type.
 func (in *IntegrationStatus) GetCondition(condType IntegrationConditionType) 
*IntegrationCondition {
        for i := range in.Conditions {
diff --git a/pkg/controller/integration/initialize.go 
b/pkg/controller/integration/initialize.go
index 8feddee9e..58cf23cb9 100644
--- a/pkg/controller/integration/initialize.go
+++ b/pkg/controller/integration/initialize.go
@@ -20,6 +20,7 @@ package integration
 import (
        "context"
 
+       corev1 "k8s.io/api/core/v1"
        k8serrors "k8s.io/apimachinery/pkg/api/errors"
        metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 
@@ -51,7 +52,10 @@ func (action *initializeAction) CanHandle(integration 
*v1.Integration) bool {
 // Handle handles the integrations.
 func (action *initializeAction) Handle(ctx context.Context, integration 
*v1.Integration) (*v1.Integration, error) {
        if _, err := trait.Apply(ctx, action.client, integration, nil); err != 
nil {
-               return nil, err
+               integration.Status.Phase = v1.IntegrationPhaseError
+               integration.SetReadyCondition(corev1.ConditionFalse,
+                       v1.IntegrationConditionInitializationFailedReason, 
err.Error())
+               return integration, err
        }
 
        if integration.Status.IntegrationKit == nil {
diff --git a/pkg/controller/integration/integration_controller.go 
b/pkg/controller/integration/integration_controller.go
index bd9a19937..d898ff3ea 100644
--- a/pkg/controller/integration/integration_controller.go
+++ b/pkg/controller/integration/integration_controller.go
@@ -344,22 +344,18 @@ func (r *reconcileIntegration) Reconcile(ctx 
context.Context, request reconcile.
                        newTarget, err := a.Handle(ctx, target)
                        if err != nil {
                                camelevent.NotifyIntegrationError(ctx, 
r.client, r.recorder, &instance, newTarget, err)
+                               // Update the integration (mostly just to 
update its phase) if the new instance is returned
+                               if newTarget != nil {
+                                       _ = r.update(ctx, &instance, newTarget, 
&targetLog)
+                               }
                                return reconcile.Result{}, err
                        }
 
                        if newTarget != nil {
-                               if err := r.update(ctx, &instance, newTarget); 
err != nil {
+                               if err := r.update(ctx, &instance, newTarget, 
&targetLog); err != nil {
                                        camelevent.NotifyIntegrationError(ctx, 
r.client, r.recorder, &instance, newTarget, err)
                                        return reconcile.Result{}, err
                                }
-
-                               if newTarget.Status.Phase != 
instance.Status.Phase {
-                                       targetLog.Info(
-                                               "state transition",
-                                               "phase-from", 
instance.Status.Phase,
-                                               "phase-to", 
newTarget.Status.Phase,
-                                       )
-                               }
                        }
 
                        // handle one action at time so the resource
@@ -372,7 +368,7 @@ func (r *reconcileIntegration) Reconcile(ctx 
context.Context, request reconcile.
        return reconcile.Result{}, nil
 }
 
-func (r *reconcileIntegration) update(ctx context.Context, base 
*v1.Integration, target *v1.Integration) error {
+func (r *reconcileIntegration) update(ctx context.Context, base 
*v1.Integration, target *v1.Integration, log *log.Logger) error {
        d, err := digest.ComputeForIntegration(target)
        if err != nil {
                return err
@@ -381,5 +377,17 @@ func (r *reconcileIntegration) update(ctx context.Context, 
base *v1.Integration,
        target.Status.Digest = d
        target.Status.ObservedGeneration = base.Generation
 
-       return r.client.Status().Patch(ctx, target, ctrl.MergeFrom(base))
+       if err := r.client.Status().Patch(ctx, target, ctrl.MergeFrom(base)); 
err != nil {
+               return err
+       }
+
+       if target.Status.Phase != base.Status.Phase {
+               log.Info(
+                       "state transition",
+                       "phase-from", base.Status.Phase,
+                       "phase-to", target.Status.Phase,
+               )
+       }
+
+       return nil
 }
diff --git a/pkg/controller/integration/monitor.go 
b/pkg/controller/integration/monitor.go
index df8f426f3..9909be03e 100644
--- a/pkg/controller/integration/monitor.go
+++ b/pkg/controller/integration/monitor.go
@@ -57,11 +57,28 @@ func (action *monitorAction) Name() string {
 }
 
 func (action *monitorAction) CanHandle(integration *v1.Integration) bool {
+       // When in InitializationFailed condition a kit is not available for 
the integration
+       // so the monitor action is not able to handle it.
+       if isInInitializationFailed(integration.Status) {
+               return false
+       }
+
        return integration.Status.Phase == v1.IntegrationPhaseDeploying ||
                integration.Status.Phase == v1.IntegrationPhaseRunning ||
                integration.Status.Phase == v1.IntegrationPhaseError
 }
 
+func isInInitializationFailed(status v1.IntegrationStatus) bool {
+       if status.Phase == v1.IntegrationPhaseError {
+               cond := status.GetCondition(v1.IntegrationConditionReady)
+               if cond.Status == corev1.ConditionFalse && cond.Reason == 
v1.IntegrationConditionInitializationFailedReason {
+                       return true
+               }
+       }
+
+       return false
+}
+
 func (action *monitorAction) Handle(ctx context.Context, integration 
*v1.Integration) (*v1.Integration, error) {
        // At that staged the Integration must have a Kit
        if integration.Status.IntegrationKit == nil {
@@ -177,7 +194,7 @@ func (action *monitorAction) newController(env 
*trait.Environment, integration *
        var controller controller
        var obj ctrl.Object
        switch {
-       case isConditionTrue(integration, 
v1.IntegrationConditionDeploymentAvailable):
+       case 
integration.IsConditionTrue(v1.IntegrationConditionDeploymentAvailable):
                obj = getUpdatedController(env, &appsv1.Deployment{})
                deploy, ok := obj.(*appsv1.Deployment)
                if !ok {
@@ -187,7 +204,7 @@ func (action *monitorAction) newController(env 
*trait.Environment, integration *
                        obj:         deploy,
                        integration: integration,
                }
-       case isConditionTrue(integration, 
v1.IntegrationConditionKnativeServiceAvailable):
+       case 
integration.IsConditionTrue(v1.IntegrationConditionKnativeServiceAvailable):
                obj = getUpdatedController(env, &servingv1.Service{})
                svc, ok := obj.(*servingv1.Service)
                if !ok {
@@ -197,7 +214,7 @@ func (action *monitorAction) newController(env 
*trait.Environment, integration *
                        obj:         svc,
                        integration: integration,
                }
-       case isConditionTrue(integration, 
v1.IntegrationConditionCronJobAvailable):
+       case 
integration.IsConditionTrue(v1.IntegrationConditionCronJobAvailable):
                obj = getUpdatedController(env, &batchv1.CronJob{})
                cj, ok := obj.(*batchv1.CronJob)
                if !ok {
@@ -257,7 +274,7 @@ func checkPodStatuses(integration *v1.Integration, 
pendingPods []corev1.Pod, run
                // Check the scheduled condition
                if scheduled := kubernetes.GetPodCondition(pod, 
corev1.PodScheduled); scheduled != nil && scheduled.Status == 
corev1.ConditionFalse && scheduled.Reason == "Unschedulable" {
                        integration.Status.Phase = v1.IntegrationPhaseError
-                       setReadyConditionError(integration, scheduled.Message)
+                       integration.SetReadyConditionError(scheduled.Message)
                        return true
                }
        }
@@ -270,7 +287,7 @@ func checkPodStatuses(integration *v1.Integration, 
pendingPods []corev1.Pod, run
                        // Check the images are pulled
                        if waiting := container.State.Waiting; waiting != nil 
&& waiting.Reason == "ImagePullBackOff" {
                                integration.Status.Phase = 
v1.IntegrationPhaseError
-                               setReadyConditionError(integration, 
waiting.Message)
+                               
integration.SetReadyConditionError(waiting.Message)
                                return true
                        }
                }
@@ -287,12 +304,12 @@ func checkPodStatuses(integration *v1.Integration, 
pendingPods []corev1.Pod, run
                        // Check the container state
                        if waiting := container.State.Waiting; waiting != nil 
&& waiting.Reason == "CrashLoopBackOff" {
                                integration.Status.Phase = 
v1.IntegrationPhaseError
-                               setReadyConditionError(integration, 
waiting.Message)
+                               
integration.SetReadyConditionError(waiting.Message)
                                return true
                        }
                        if terminated := container.State.Terminated; terminated 
!= nil && terminated.Reason == "Error" {
                                integration.Status.Phase = 
v1.IntegrationPhaseError
-                               setReadyConditionError(integration, 
terminated.Message)
+                               
integration.SetReadyConditionError(terminated.Message)
                                return true
                        }
                }
@@ -416,7 +433,7 @@ func (action *monitorAction) probeReadiness(ctx 
context.Context, environment *tr
                if integration.Status.Phase == v1.IntegrationPhaseError {
                        reason = v1.IntegrationConditionErrorReason
                }
-               setReadyCondition(integration, corev1.ConditionFalse, reason, 
fmt.Sprintf("%s", runtimeNotReadyMessages))
+               integration.SetReadyCondition(corev1.ConditionFalse, reason, 
fmt.Sprintf("%s", runtimeNotReadyMessages))
        }
 
        return nil
@@ -453,19 +470,3 @@ func getIntegrationContainer(environment 
*trait.Environment, pod *corev1.Pod) *c
        }
        return nil
 }
-
-func isConditionTrue(integration *v1.Integration, conditionType 
v1.IntegrationConditionType) bool {
-       cond := integration.Status.GetCondition(conditionType)
-       if cond == nil {
-               return false
-       }
-       return cond.Status == corev1.ConditionTrue
-}
-
-func setReadyConditionError(integration *v1.Integration, err string) {
-       setReadyCondition(integration, corev1.ConditionFalse, 
v1.IntegrationConditionErrorReason, err)
-}
-
-func setReadyCondition(integration *v1.Integration, status 
corev1.ConditionStatus, reason string, message string) {
-       integration.Status.SetCondition(v1.IntegrationConditionReady, status, 
reason, message)
-}
diff --git a/pkg/controller/integration/monitor_cronjob.go 
b/pkg/controller/integration/monitor_cronjob.go
index 203938943..baea57d5c 100644
--- a/pkg/controller/integration/monitor_cronjob.go
+++ b/pkg/controller/integration/monitor_cronjob.go
@@ -60,7 +60,9 @@ func (c *cronJobController) checkReadyCondition(ctx 
context.Context) (bool, erro
                }
                if c.lastCompletedJob != nil {
                        if failed := 
kubernetes.GetJobCondition(*c.lastCompletedJob, batchv1.JobFailed); failed != 
nil && failed.Status == corev1.ConditionTrue {
-                               setReadyCondition(c.integration, 
corev1.ConditionFalse, v1.IntegrationConditionLastJobFailedReason, 
fmt.Sprintf("last job %s failed: %s", c.lastCompletedJob.Name, failed.Message))
+                               
c.integration.SetReadyCondition(corev1.ConditionFalse,
+                                       
v1.IntegrationConditionLastJobFailedReason,
+                                       fmt.Sprintf("last job %s failed: %s", 
c.lastCompletedJob.Name, failed.Message))
                                c.integration.Status.Phase = 
v1.IntegrationPhaseError
                                return true, nil
                        }
@@ -77,25 +79,30 @@ func (c *cronJobController) getPodSpec() corev1.PodSpec {
 func (c *cronJobController) updateReadyCondition(readyPods []corev1.Pod) bool {
        switch {
        case c.obj.Status.LastScheduleTime == nil:
-               setReadyCondition(c.integration, corev1.ConditionTrue, 
v1.IntegrationConditionCronJobCreatedReason, "cronjob created")
+               c.integration.SetReadyCondition(corev1.ConditionTrue,
+                       v1.IntegrationConditionCronJobCreatedReason, "cronjob 
created")
                return true
 
        case len(c.obj.Status.Active) > 0:
-               setReadyCondition(c.integration, corev1.ConditionTrue, 
v1.IntegrationConditionCronJobActiveReason, "cronjob active")
+               c.integration.SetReadyCondition(corev1.ConditionTrue,
+                       v1.IntegrationConditionCronJobActiveReason, "cronjob 
active")
                return true
 
        case c.obj.Spec.SuccessfulJobsHistoryLimit != nil && 
*c.obj.Spec.SuccessfulJobsHistoryLimit == 0 && 
c.obj.Spec.FailedJobsHistoryLimit != nil && *c.obj.Spec.FailedJobsHistoryLimit 
== 0:
-               setReadyCondition(c.integration, corev1.ConditionTrue, 
v1.IntegrationConditionCronJobCreatedReason, "no jobs history available")
+               c.integration.SetReadyCondition(corev1.ConditionTrue,
+                       v1.IntegrationConditionCronJobCreatedReason, "no jobs 
history available")
                return true
 
        case c.lastCompletedJob != nil:
                if complete := kubernetes.GetJobCondition(*c.lastCompletedJob, 
batchv1.JobComplete); complete != nil && complete.Status == 
corev1.ConditionTrue {
-                       setReadyCondition(c.integration, corev1.ConditionTrue, 
v1.IntegrationConditionLastJobSucceededReason, fmt.Sprintf("last job %s 
completed successfully", c.lastCompletedJob.Name))
+                       c.integration.SetReadyCondition(corev1.ConditionTrue,
+                               v1.IntegrationConditionLastJobSucceededReason,
+                               fmt.Sprintf("last job %s completed 
successfully", c.lastCompletedJob.Name))
                        return true
                }
 
        default:
-               setReadyCondition(c.integration, corev1.ConditionUnknown, "", 
"")
+               c.integration.SetReadyCondition(corev1.ConditionUnknown, "", "")
        }
 
        return false
diff --git a/pkg/controller/integration/monitor_deployment.go 
b/pkg/controller/integration/monitor_deployment.go
index 097fa73d5..96f2b5948 100644
--- a/pkg/controller/integration/monitor_deployment.go
+++ b/pkg/controller/integration/monitor_deployment.go
@@ -39,7 +39,7 @@ func (c *deploymentController) checkReadyCondition(ctx 
context.Context) (bool, e
        // Check the Deployment progression
        if progressing := kubernetes.GetDeploymentCondition(*c.obj, 
appsv1.DeploymentProgressing); progressing != nil && progressing.Status == 
corev1.ConditionFalse && progressing.Reason == "ProgressDeadlineExceeded" {
                c.integration.Status.Phase = v1.IntegrationPhaseError
-               setReadyConditionError(c.integration, progressing.Message)
+               c.integration.SetReadyConditionError(progressing.Message)
                return true, nil
        }
 
@@ -64,14 +64,20 @@ func (c *deploymentController) 
updateReadyCondition(readyPods []corev1.Pod) bool
                // reported to be ready is larger than or equal to the 
specified number
                // of replicas. This avoids reporting a falsy readiness 
condition
                // when the Integration is being down-scaled.
-               setReadyCondition(c.integration, corev1.ConditionTrue, 
v1.IntegrationConditionDeploymentReadyReason, fmt.Sprintf("%d/%d ready 
replicas", readyReplicas, replicas))
+               c.integration.SetReadyCondition(corev1.ConditionTrue,
+                       v1.IntegrationConditionDeploymentReadyReason,
+                       fmt.Sprintf("%d/%d ready replicas", readyReplicas, 
replicas))
                return true
 
        case c.obj.Status.UpdatedReplicas < replicas:
-               setReadyCondition(c.integration, corev1.ConditionFalse, 
v1.IntegrationConditionDeploymentProgressingReason, fmt.Sprintf("%d/%d updated 
replicas", c.obj.Status.UpdatedReplicas, replicas))
+               c.integration.SetReadyCondition(corev1.ConditionFalse,
+                       v1.IntegrationConditionDeploymentProgressingReason,
+                       fmt.Sprintf("%d/%d updated replicas", 
c.obj.Status.UpdatedReplicas, replicas))
 
        default:
-               setReadyCondition(c.integration, corev1.ConditionFalse, 
v1.IntegrationConditionDeploymentProgressingReason, fmt.Sprintf("%d/%d ready 
replicas", readyReplicas, replicas))
+               c.integration.SetReadyCondition(corev1.ConditionFalse,
+                       v1.IntegrationConditionDeploymentProgressingReason,
+                       fmt.Sprintf("%d/%d ready replicas", readyReplicas, 
replicas))
        }
 
        return false
diff --git a/pkg/controller/integration/monitor_knative.go 
b/pkg/controller/integration/monitor_knative.go
index c8182266e..22b902ef2 100644
--- a/pkg/controller/integration/monitor_knative.go
+++ b/pkg/controller/integration/monitor_knative.go
@@ -39,7 +39,7 @@ func (c *knativeServiceController) checkReadyCondition(ctx 
context.Context) (boo
        // Check the KnativeService conditions
        if ready := kubernetes.GetKnativeServiceCondition(*c.obj, 
servingv1.ServiceConditionReady); ready.IsFalse() && ready.GetReason() == 
"RevisionFailed" {
                c.integration.Status.Phase = v1.IntegrationPhaseError
-               setReadyConditionError(c.integration, ready.Message)
+               c.integration.SetReadyConditionError(ready.Message)
                return true, nil
        }
 
@@ -53,10 +53,12 @@ func (c *knativeServiceController) getPodSpec() 
corev1.PodSpec {
 func (c *knativeServiceController) updateReadyCondition(readyPods 
[]corev1.Pod) bool {
        ready := kubernetes.GetKnativeServiceCondition(*c.obj, 
servingv1.ServiceConditionReady)
        if ready.IsTrue() {
-               setReadyCondition(c.integration, corev1.ConditionTrue, 
v1.IntegrationConditionKnativeServiceReadyReason, "")
+               c.integration.SetReadyCondition(corev1.ConditionTrue,
+                       v1.IntegrationConditionKnativeServiceReadyReason, "")
                return true
        }
-       setReadyCondition(c.integration, corev1.ConditionFalse, 
ready.GetReason(), ready.GetMessage())
+       c.integration.SetReadyCondition(corev1.ConditionFalse,
+               ready.GetReason(), ready.GetMessage())
 
        return false
 }

Reply via email to