This is an automated email from the ASF dual-hosted git repository.

tsato pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/camel-k.git

commit 89951b26bbb8f04f7b41084a8d86d172da1032eb
Author: Tadayoshi Sato <sato.tadayo...@gmail.com>
AuthorDate: Wed Nov 16 14:54:10 2022 +0900

    chore: reformat long lines
---
 pkg/controller/integration/monitor.go            | 38 +++++++++++++++++-------
 pkg/controller/integration/monitor_cronjob.go    |  9 ++++--
 pkg/controller/integration/monitor_deployment.go |  4 ++-
 pkg/controller/integration/monitor_knative.go    |  3 +-
 4 files changed, 38 insertions(+), 16 deletions(-)

diff --git a/pkg/controller/integration/monitor.go 
b/pkg/controller/integration/monitor.go
index 03aa86c67..a42456be7 100644
--- a/pkg/controller/integration/monitor.go
+++ b/pkg/controller/integration/monitor.go
@@ -94,7 +94,8 @@ func (action *monitorAction) Handle(ctx context.Context, 
integration *v1.Integra
        if !ok {
                priority = "0"
        }
-       withHigherPriority, err := 
labels.NewRequirement(v1.IntegrationKitPriorityLabel, selection.GreaterThan, 
[]string{priority})
+       withHigherPriority, err := 
labels.NewRequirement(v1.IntegrationKitPriorityLabel,
+               selection.GreaterThan, []string{priority})
        if err != nil {
                return nil, err
        }
@@ -154,8 +155,9 @@ func (action *monitorAction) Handle(ctx context.Context, 
integration *v1.Integra
        if integration.Status.Phase == v1.IntegrationPhaseDeploying {
                integration.Status.Phase = v1.IntegrationPhaseRunning
        }
-       err = action.updateIntegrationPhaseAndReadyCondition(ctx, environment, 
integration, pendingPods.Items, runningPods.Items)
-       if err != nil {
+       if err = action.updateIntegrationPhaseAndReadyCondition(
+               ctx, environment, integration, pendingPods.Items, 
runningPods.Items,
+       ); err != nil {
                return nil, err
        }
 
@@ -176,7 +178,9 @@ func isInInitializationFailed(status v1.IntegrationStatus) 
bool {
        return false
 }
 
-func (action *monitorAction) checkDigestAndRebuild(integration 
*v1.Integration, kit *v1.IntegrationKit) (*v1.Integration, error) {
+func (action *monitorAction) checkDigestAndRebuild(
+       integration *v1.Integration, kit *v1.IntegrationKit,
+) (*v1.Integration, error) {
        hash, err := digest.ComputeForIntegration(integration)
        if err != nil {
                return nil, err
@@ -188,7 +192,8 @@ func (action *monitorAction) 
checkDigestAndRebuild(integration *v1.Integration,
                if kit != nil &&
                        v1.GetOperatorIDAnnotation(integration) != "" &&
                        v1.GetOperatorIDAnnotation(integration) != 
v1.GetOperatorIDAnnotation(kit) {
-                       // Operator to reconcile the integration has changed. 
Reset integration kit so new operator can handle the kit reference
+                       // Operator to reconcile the integration has changed. 
Reset integration kit
+                       // so new operator can handle the kit reference
                        integration.SetIntegrationKit(nil)
                }
 
@@ -260,7 +265,10 @@ func getUpdatedController(env *trait.Environment, obj 
ctrl.Object) ctrl.Object {
        })
 }
 
-func (action *monitorAction) updateIntegrationPhaseAndReadyCondition(ctx 
context.Context, environment *trait.Environment, integration *v1.Integration, 
pendingPods []corev1.Pod, runningPods []corev1.Pod) error {
+func (action *monitorAction) updateIntegrationPhaseAndReadyCondition(
+       ctx context.Context, environment *trait.Environment, integration 
*v1.Integration,
+       pendingPods []corev1.Pod, runningPods []corev1.Pod,
+) error {
        controller, err := action.newController(environment, integration)
        if err != nil {
                return err
@@ -289,7 +297,9 @@ func checkPodStatuses(integration *v1.Integration, 
pendingPods []corev1.Pod, run
        // Check Pods statuses
        for _, pod := range pendingPods {
                // Check the scheduled condition
-               if scheduled := kubernetes.GetPodCondition(pod, 
corev1.PodScheduled); scheduled != nil && scheduled.Status == 
corev1.ConditionFalse && scheduled.Reason == "Unschedulable" {
+               if scheduled := kubernetes.GetPodCondition(pod, 
corev1.PodScheduled); scheduled != nil &&
+                       scheduled.Status == corev1.ConditionFalse &&
+                       scheduled.Reason == "Unschedulable" {
                        integration.Status.Phase = v1.IntegrationPhaseError
                        integration.SetReadyConditionError(scheduled.Message)
                        return true
@@ -406,7 +416,10 @@ func findIntegrationContainer(spec corev1.PodSpec) 
*corev1.Container {
 }
 
 // probeReadiness calls the readiness probes of the non-ready Pods directly to 
retrieve insights from the Camel runtime.
-func (action *monitorAction) probeReadiness(ctx context.Context, environment 
*trait.Environment, integration *v1.Integration, unreadyPods []corev1.Pod) 
error {
+func (action *monitorAction) probeReadiness(
+       ctx context.Context, environment *trait.Environment, integration 
*v1.Integration,
+       unreadyPods []corev1.Pod,
+) error {
        var runtimeNotReadyMessages []string
        for i := range unreadyPods {
                pod := &unreadyPods[i]
@@ -423,11 +436,13 @@ func (action *monitorAction) probeReadiness(ctx 
context.Context, environment *tr
                                continue
                        }
                        if errors.Is(err, context.DeadlineExceeded) {
-                               runtimeNotReadyMessages = 
append(runtimeNotReadyMessages, fmt.Sprintf("readiness probe timed out for Pod 
%s/%s", pod.Namespace, pod.Name))
+                               runtimeNotReadyMessages = 
append(runtimeNotReadyMessages,
+                                       fmt.Sprintf("readiness probe timed out 
for Pod %s/%s", pod.Namespace, pod.Name))
                                continue
                        }
                        if !k8serrors.IsServiceUnavailable(err) {
-                               runtimeNotReadyMessages = 
append(runtimeNotReadyMessages, fmt.Sprintf("readiness probe failed for Pod 
%s/%s: %s", pod.Namespace, pod.Name, err.Error()))
+                               runtimeNotReadyMessages = 
append(runtimeNotReadyMessages,
+                                       fmt.Sprintf("readiness probe failed for 
Pod %s/%s: %s", pod.Namespace, pod.Name, err.Error()))
                                continue
                        }
                        health, err := NewHealthCheck(body)
@@ -441,7 +456,8 @@ func (action *monitorAction) probeReadiness(ctx 
context.Context, environment *tr
                                if _, ok := 
check.Data[runtimeHealthCheckErrorMessage]; ok {
                                        integration.Status.Phase = 
v1.IntegrationPhaseError
                                }
-                               runtimeNotReadyMessages = 
append(runtimeNotReadyMessages, fmt.Sprintf("Pod %s runtime is not ready: %s", 
pod.Name, check.Data))
+                               runtimeNotReadyMessages = 
append(runtimeNotReadyMessages,
+                                       fmt.Sprintf("Pod %s runtime is not 
ready: %s", pod.Name, check.Data))
                        }
                }
        }
diff --git a/pkg/controller/integration/monitor_cronjob.go 
b/pkg/controller/integration/monitor_cronjob.go
index baea57d5c..fcec451f8 100644
--- a/pkg/controller/integration/monitor_cronjob.go
+++ b/pkg/controller/integration/monitor_cronjob.go
@@ -59,7 +59,8 @@ func (c *cronJobController) checkReadyCondition(ctx 
context.Context) (bool, erro
                        t = c.lastCompletedJob.CreationTimestamp.Time
                }
                if c.lastCompletedJob != nil {
-                       if failed := 
kubernetes.GetJobCondition(*c.lastCompletedJob, batchv1.JobFailed); failed != 
nil && failed.Status == corev1.ConditionTrue {
+                       if failed := 
kubernetes.GetJobCondition(*c.lastCompletedJob, batchv1.JobFailed); failed != 
nil &&
+                               failed.Status == corev1.ConditionTrue {
                                
c.integration.SetReadyCondition(corev1.ConditionFalse,
                                        
v1.IntegrationConditionLastJobFailedReason,
                                        fmt.Sprintf("last job %s failed: %s", 
c.lastCompletedJob.Name, failed.Message))
@@ -88,13 +89,15 @@ func (c *cronJobController) updateReadyCondition(readyPods 
[]corev1.Pod) bool {
                        v1.IntegrationConditionCronJobActiveReason, "cronjob 
active")
                return true
 
-       case c.obj.Spec.SuccessfulJobsHistoryLimit != nil && 
*c.obj.Spec.SuccessfulJobsHistoryLimit == 0 && 
c.obj.Spec.FailedJobsHistoryLimit != nil && *c.obj.Spec.FailedJobsHistoryLimit 
== 0:
+       case c.obj.Spec.SuccessfulJobsHistoryLimit != nil && 
*c.obj.Spec.SuccessfulJobsHistoryLimit == 0 &&
+               c.obj.Spec.FailedJobsHistoryLimit != nil && 
*c.obj.Spec.FailedJobsHistoryLimit == 0:
                c.integration.SetReadyCondition(corev1.ConditionTrue,
                        v1.IntegrationConditionCronJobCreatedReason, "no jobs 
history available")
                return true
 
        case c.lastCompletedJob != nil:
-               if complete := kubernetes.GetJobCondition(*c.lastCompletedJob, 
batchv1.JobComplete); complete != nil && complete.Status == 
corev1.ConditionTrue {
+               if complete := kubernetes.GetJobCondition(*c.lastCompletedJob, 
batchv1.JobComplete); complete != nil &&
+                       complete.Status == corev1.ConditionTrue {
                        c.integration.SetReadyCondition(corev1.ConditionTrue,
                                v1.IntegrationConditionLastJobSucceededReason,
                                fmt.Sprintf("last job %s completed 
successfully", c.lastCompletedJob.Name))
diff --git a/pkg/controller/integration/monitor_deployment.go 
b/pkg/controller/integration/monitor_deployment.go
index 96f2b5948..8ba876781 100644
--- a/pkg/controller/integration/monitor_deployment.go
+++ b/pkg/controller/integration/monitor_deployment.go
@@ -37,7 +37,9 @@ var _ controller = &deploymentController{}
 
 func (c *deploymentController) checkReadyCondition(ctx context.Context) (bool, 
error) {
        // Check the Deployment progression
-       if progressing := kubernetes.GetDeploymentCondition(*c.obj, 
appsv1.DeploymentProgressing); progressing != nil && progressing.Status == 
corev1.ConditionFalse && progressing.Reason == "ProgressDeadlineExceeded" {
+       if progressing := kubernetes.GetDeploymentCondition(*c.obj, 
appsv1.DeploymentProgressing); progressing != nil &&
+               progressing.Status == corev1.ConditionFalse &&
+               progressing.Reason == "ProgressDeadlineExceeded" {
                c.integration.Status.Phase = v1.IntegrationPhaseError
                c.integration.SetReadyConditionError(progressing.Message)
                return true, nil
diff --git a/pkg/controller/integration/monitor_knative.go 
b/pkg/controller/integration/monitor_knative.go
index 22b902ef2..f1ef4b055 100644
--- a/pkg/controller/integration/monitor_knative.go
+++ b/pkg/controller/integration/monitor_knative.go
@@ -37,7 +37,8 @@ var _ controller = &knativeServiceController{}
 
 func (c *knativeServiceController) checkReadyCondition(ctx context.Context) 
(bool, error) {
        // Check the KnativeService conditions
-       if ready := kubernetes.GetKnativeServiceCondition(*c.obj, 
servingv1.ServiceConditionReady); ready.IsFalse() && ready.GetReason() == 
"RevisionFailed" {
+       if ready := kubernetes.GetKnativeServiceCondition(*c.obj, 
servingv1.ServiceConditionReady); ready.IsFalse() &&
+               ready.GetReason() == "RevisionFailed" {
                c.integration.Status.Phase = v1.IntegrationPhaseError
                c.integration.SetReadyConditionError(ready.Message)
                return true, nil

Reply via email to