This is an automated email from the ASF dual-hosted git repository.

tsato pushed a commit to branch release-1.10.x
in repository https://gitbox.apache.org/repos/asf/camel-k.git

commit e87f42ad4778133b8b185b012d733b646ebef534
Author: Tadayoshi Sato <sato.tadayo...@gmail.com>
AuthorDate: Wed Nov 16 14:54:10 2022 +0900

    chore: reformat long lines
---
 pkg/controller/integration/monitor.go            | 31 +++++++++++++++++-------
 pkg/controller/integration/monitor_cronjob.go    | 26 ++++++++++++++------
 pkg/controller/integration/monitor_deployment.go |  4 ++-
 pkg/controller/integration/monitor_knative.go    |  3 ++-
 4 files changed, 45 insertions(+), 19 deletions(-)

diff --git a/pkg/controller/integration/monitor.go 
b/pkg/controller/integration/monitor.go
index df8f426f3..fe59feea2 100644
--- a/pkg/controller/integration/monitor.go
+++ b/pkg/controller/integration/monitor.go
@@ -99,7 +99,8 @@ func (action *monitorAction) Handle(ctx context.Context, 
integration *v1.Integra
        if !ok {
                priority = "0"
        }
-       withHigherPriority, err := 
labels.NewRequirement(v1.IntegrationKitPriorityLabel, selection.GreaterThan, 
[]string{priority})
+       withHigherPriority, err := 
labels.NewRequirement(v1.IntegrationKitPriorityLabel,
+               selection.GreaterThan, []string{priority})
        if err != nil {
                return nil, err
        }
@@ -159,8 +160,9 @@ func (action *monitorAction) Handle(ctx context.Context, 
integration *v1.Integra
        if integration.Status.Phase == v1.IntegrationPhaseDeploying {
                integration.Status.Phase = v1.IntegrationPhaseRunning
        }
-       err = action.updateIntegrationPhaseAndReadyCondition(ctx, environment, 
integration, pendingPods.Items, runningPods.Items)
-       if err != nil {
+       if err = action.updateIntegrationPhaseAndReadyCondition(
+               ctx, environment, integration, pendingPods.Items, 
runningPods.Items,
+       ); err != nil {
                return nil, err
        }
 
@@ -226,7 +228,10 @@ func getUpdatedController(env *trait.Environment, obj 
ctrl.Object) ctrl.Object {
        })
 }
 
-func (action *monitorAction) updateIntegrationPhaseAndReadyCondition(ctx 
context.Context, environment *trait.Environment, integration *v1.Integration, 
pendingPods []corev1.Pod, runningPods []corev1.Pod) error {
+func (action *monitorAction) updateIntegrationPhaseAndReadyCondition(
+       ctx context.Context, environment *trait.Environment, integration 
*v1.Integration,
+       pendingPods []corev1.Pod, runningPods []corev1.Pod,
+) error {
        controller, err := action.newController(environment, integration)
        if err != nil {
                return err
@@ -255,7 +260,9 @@ func checkPodStatuses(integration *v1.Integration, 
pendingPods []corev1.Pod, run
        // Check Pods statuses
        for _, pod := range pendingPods {
                // Check the scheduled condition
-               if scheduled := kubernetes.GetPodCondition(pod, 
corev1.PodScheduled); scheduled != nil && scheduled.Status == 
corev1.ConditionFalse && scheduled.Reason == "Unschedulable" {
+               if scheduled := kubernetes.GetPodCondition(pod, 
corev1.PodScheduled); scheduled != nil &&
+                       scheduled.Status == corev1.ConditionFalse &&
+                       scheduled.Reason == "Unschedulable" {
                        integration.Status.Phase = v1.IntegrationPhaseError
                        setReadyConditionError(integration, scheduled.Message)
                        return true
@@ -372,7 +379,10 @@ func findIntegrationContainer(spec corev1.PodSpec) 
*corev1.Container {
 }
 
 // probeReadiness calls the readiness probes of the non-ready Pods directly to 
retrieve insights from the Camel runtime.
-func (action *monitorAction) probeReadiness(ctx context.Context, environment 
*trait.Environment, integration *v1.Integration, unreadyPods []corev1.Pod) 
error {
+func (action *monitorAction) probeReadiness(
+       ctx context.Context, environment *trait.Environment, integration 
*v1.Integration,
+       unreadyPods []corev1.Pod,
+) error {
        var runtimeNotReadyMessages []string
        for i := range unreadyPods {
                pod := &unreadyPods[i]
@@ -389,11 +399,13 @@ func (action *monitorAction) probeReadiness(ctx 
context.Context, environment *tr
                                continue
                        }
                        if errors.Is(err, context.DeadlineExceeded) {
-                               runtimeNotReadyMessages = 
append(runtimeNotReadyMessages, fmt.Sprintf("readiness probe timed out for Pod 
%s/%s", pod.Namespace, pod.Name))
+                               runtimeNotReadyMessages = 
append(runtimeNotReadyMessages,
+                                       fmt.Sprintf("readiness probe timed out 
for Pod %s/%s", pod.Namespace, pod.Name))
                                continue
                        }
                        if !k8serrors.IsServiceUnavailable(err) {
-                               runtimeNotReadyMessages = 
append(runtimeNotReadyMessages, fmt.Sprintf("readiness probe failed for Pod 
%s/%s: %s", pod.Namespace, pod.Name, err.Error()))
+                               runtimeNotReadyMessages = 
append(runtimeNotReadyMessages,
+                                       fmt.Sprintf("readiness probe failed for 
Pod %s/%s: %s", pod.Namespace, pod.Name, err.Error()))
                                continue
                        }
                        health, err := NewHealthCheck(body)
@@ -407,7 +419,8 @@ func (action *monitorAction) probeReadiness(ctx 
context.Context, environment *tr
                                if _, ok := 
check.Data[runtimeHealthCheckErrorMessage]; ok {
                                        integration.Status.Phase = 
v1.IntegrationPhaseError
                                }
-                               runtimeNotReadyMessages = 
append(runtimeNotReadyMessages, fmt.Sprintf("Pod %s runtime is not ready: %s", 
pod.Name, check.Data))
+                               runtimeNotReadyMessages = 
append(runtimeNotReadyMessages,
+                                       fmt.Sprintf("Pod %s runtime is not 
ready: %s", pod.Name, check.Data))
                        }
                }
        }
diff --git a/pkg/controller/integration/monitor_cronjob.go 
b/pkg/controller/integration/monitor_cronjob.go
index 203938943..fed9dadd1 100644
--- a/pkg/controller/integration/monitor_cronjob.go
+++ b/pkg/controller/integration/monitor_cronjob.go
@@ -59,8 +59,11 @@ func (c *cronJobController) checkReadyCondition(ctx 
context.Context) (bool, erro
                        t = c.lastCompletedJob.CreationTimestamp.Time
                }
                if c.lastCompletedJob != nil {
-                       if failed := 
kubernetes.GetJobCondition(*c.lastCompletedJob, batchv1.JobFailed); failed != 
nil && failed.Status == corev1.ConditionTrue {
-                               setReadyCondition(c.integration, 
corev1.ConditionFalse, v1.IntegrationConditionLastJobFailedReason, 
fmt.Sprintf("last job %s failed: %s", c.lastCompletedJob.Name, failed.Message))
+                       if failed := 
kubernetes.GetJobCondition(*c.lastCompletedJob, batchv1.JobFailed); failed != 
nil &&
+                               failed.Status == corev1.ConditionTrue {
+                               setReadyCondition(c.integration, 
corev1.ConditionFalse,
+                                       
v1.IntegrationConditionLastJobFailedReason,
+                                       fmt.Sprintf("last job %s failed: %s", 
c.lastCompletedJob.Name, failed.Message))
                                c.integration.Status.Phase = 
v1.IntegrationPhaseError
                                return true, nil
                        }
@@ -77,20 +80,27 @@ func (c *cronJobController) getPodSpec() corev1.PodSpec {
 func (c *cronJobController) updateReadyCondition(readyPods []corev1.Pod) bool {
        switch {
        case c.obj.Status.LastScheduleTime == nil:
-               setReadyCondition(c.integration, corev1.ConditionTrue, 
v1.IntegrationConditionCronJobCreatedReason, "cronjob created")
+               setReadyCondition(c.integration, corev1.ConditionTrue,
+                       v1.IntegrationConditionCronJobCreatedReason, "cronjob 
created")
                return true
 
        case len(c.obj.Status.Active) > 0:
-               setReadyCondition(c.integration, corev1.ConditionTrue, 
v1.IntegrationConditionCronJobActiveReason, "cronjob active")
+               setReadyCondition(c.integration, corev1.ConditionTrue,
+                       v1.IntegrationConditionCronJobActiveReason, "cronjob 
active")
                return true
 
-       case c.obj.Spec.SuccessfulJobsHistoryLimit != nil && 
*c.obj.Spec.SuccessfulJobsHistoryLimit == 0 && 
c.obj.Spec.FailedJobsHistoryLimit != nil && *c.obj.Spec.FailedJobsHistoryLimit 
== 0:
-               setReadyCondition(c.integration, corev1.ConditionTrue, 
v1.IntegrationConditionCronJobCreatedReason, "no jobs history available")
+       case c.obj.Spec.SuccessfulJobsHistoryLimit != nil && 
*c.obj.Spec.SuccessfulJobsHistoryLimit == 0 &&
+               c.obj.Spec.FailedJobsHistoryLimit != nil && 
*c.obj.Spec.FailedJobsHistoryLimit == 0:
+               setReadyCondition(c.integration, corev1.ConditionTrue,
+                       v1.IntegrationConditionCronJobCreatedReason, "no jobs 
history available")
                return true
 
        case c.lastCompletedJob != nil:
-               if complete := kubernetes.GetJobCondition(*c.lastCompletedJob, 
batchv1.JobComplete); complete != nil && complete.Status == 
corev1.ConditionTrue {
-                       setReadyCondition(c.integration, corev1.ConditionTrue, 
v1.IntegrationConditionLastJobSucceededReason, fmt.Sprintf("last job %s 
completed successfully", c.lastCompletedJob.Name))
+               if complete := kubernetes.GetJobCondition(*c.lastCompletedJob, 
batchv1.JobComplete); complete != nil &&
+                       complete.Status == corev1.ConditionTrue {
+                       setReadyCondition(c.integration, corev1.ConditionTrue,
+                               v1.IntegrationConditionLastJobSucceededReason,
+                               fmt.Sprintf("last job %s completed 
successfully", c.lastCompletedJob.Name))
                        return true
                }
 
diff --git a/pkg/controller/integration/monitor_deployment.go 
b/pkg/controller/integration/monitor_deployment.go
index 097fa73d5..adf154799 100644
--- a/pkg/controller/integration/monitor_deployment.go
+++ b/pkg/controller/integration/monitor_deployment.go
@@ -37,7 +37,9 @@ var _ controller = &deploymentController{}
 
 func (c *deploymentController) checkReadyCondition(ctx context.Context) (bool, 
error) {
        // Check the Deployment progression
-       if progressing := kubernetes.GetDeploymentCondition(*c.obj, 
appsv1.DeploymentProgressing); progressing != nil && progressing.Status == 
corev1.ConditionFalse && progressing.Reason == "ProgressDeadlineExceeded" {
+       if progressing := kubernetes.GetDeploymentCondition(*c.obj, 
appsv1.DeploymentProgressing); progressing != nil &&
+               progressing.Status == corev1.ConditionFalse &&
+               progressing.Reason == "ProgressDeadlineExceeded" {
                c.integration.Status.Phase = v1.IntegrationPhaseError
                setReadyConditionError(c.integration, progressing.Message)
                return true, nil
diff --git a/pkg/controller/integration/monitor_knative.go 
b/pkg/controller/integration/monitor_knative.go
index c8182266e..d95f0d1b0 100644
--- a/pkg/controller/integration/monitor_knative.go
+++ b/pkg/controller/integration/monitor_knative.go
@@ -37,7 +37,8 @@ var _ controller = &knativeServiceController{}
 
 func (c *knativeServiceController) checkReadyCondition(ctx context.Context) 
(bool, error) {
        // Check the KnativeService conditions
-       if ready := kubernetes.GetKnativeServiceCondition(*c.obj, 
servingv1.ServiceConditionReady); ready.IsFalse() && ready.GetReason() == 
"RevisionFailed" {
+       if ready := kubernetes.GetKnativeServiceCondition(*c.obj, 
servingv1.ServiceConditionReady); ready.IsFalse() &&
+               ready.GetReason() == "RevisionFailed" {
                c.integration.Status.Phase = v1.IntegrationPhaseError
                setReadyConditionError(c.integration, ready.Message)
                return true, nil

Reply via email to