Skip to content

Commit 8a1a5d4

Browse files
author
Per G. da Silva
committed
Refactor to use new boxcutter api
Signed-off-by: Per G. da Silva <pegoncal@redhat.com>
1 parent 917c701 commit 8a1a5d4

File tree

1 file changed

+42
-57
lines changed

1 file changed

+42
-57
lines changed

internal/operator-controller/controllers/clusterextensionrevision_controller.go

Lines changed: 42 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -129,33 +129,25 @@ func (c *ClusterExtensionRevisionReconciler) reconcile(ctx context.Context, rev
129129
return ctrl.Result{}, fmt.Errorf("error ensuring teardown finalizer: %v", err)
130130
}
131131

132-
// If the Available condition is not present, we are still rolling out the objects
133-
inRollout := meta.FindStatusCondition(rev.Status.Conditions, ocv1.ClusterExtensionRevisionTypeAvailable) == nil
134-
if inRollout {
135-
if err := c.establishWatch(ctx, rev, revision); err != nil {
136-
werr := fmt.Errorf("establish watch: %v", err)
137-
// this error is very likely transient, so we should keep revision as progressing
138-
rev.MarkAsProgressing(ocv1.ClusterExtensionRevisionReasonReconcileFailure, werr.Error())
139-
return ctrl.Result{}, werr
140-
}
141-
rev.MarkAsProgressing(ocv1.ClusterExtensionRevisionReasonRolloutInProgress, "Revision is being rolled out.")
132+
if err := c.establishWatch(ctx, rev, revision); err != nil {
133+
werr := fmt.Errorf("establish watch: %v", err)
134+
// this error is very likely transient, so we should keep revision as progressing
135+
rev.MarkAsProgressing(ocv1.ClusterExtensionRevisionReasonReconcileFailure, werr.Error())
136+
return ctrl.Result{}, werr
142137
}
143138

144139
rres, err := c.RevisionEngine.Reconcile(ctx, *revision, opts...)
145140
if err != nil {
146-
if inRollout {
147-
rev.MarkAsProgressing(ocv1.ClusterExtensionRevisionReasonRolloutError, err.Error())
148-
} else {
149-
// it is a probably transient error, and we do not know if the revision is available or not
150-
// perhaps we should not report it at all, hoping that it is going to be mitigated in the next reconcile?
151-
meta.SetStatusCondition(&rev.Status.Conditions, metav1.Condition{
152-
Type: ocv1.ClusterExtensionRevisionTypeAvailable,
153-
Status: metav1.ConditionUnknown,
154-
Reason: ocv1.ClusterExtensionRevisionReasonReconcileFailure,
155-
Message: err.Error(),
156-
ObservedGeneration: rev.Generation,
157-
})
158-
}
141+
rev.MarkAsProgressing(ocv1.ClusterExtensionRevisionReasonRolloutError, err.Error())
142+
// it is a probably transient error, and we do not know if the revision is available or not
143+
// perhaps we should not report it at all, hoping that it is going to be mitigated in the next reconcile?
144+
meta.SetStatusCondition(&rev.Status.Conditions, metav1.Condition{
145+
Type: ocv1.ClusterExtensionRevisionTypeAvailable,
146+
Status: metav1.ConditionUnknown,
147+
Reason: ocv1.ClusterExtensionRevisionReasonReconcileFailure,
148+
Message: err.Error(),
149+
ObservedGeneration: rev.Generation,
150+
})
159151
return ctrl.Result{}, fmt.Errorf("revision reconcile: %v", err)
160152
}
161153
l.Info("reconcile report", "report", rres.String())
@@ -165,41 +157,35 @@ func (c *ClusterExtensionRevisionReconciler) reconcile(ctx context.Context, rev
165157
if verr := rres.GetValidationError(); verr != nil {
166158
l.Info("preflight error, retrying after 10s", "err", verr.String())
167159

168-
if inRollout {
160+
// given that we retry, we are going to keep Progressing condition True
161+
rev.MarkAsProgressing(ocv1.ClusterExtensionRevisionReasonRevisionValidationFailure, fmt.Sprintf("revision validation error: %s", verr))
162+
// it is a probably transient error, and we do not know if the revision is available or not
163+
// perhaps we should not report it at all, hoping that it is going to be mitigated in the next reconcile?
164+
meta.SetStatusCondition(&rev.Status.Conditions, metav1.Condition{
165+
Type: ocv1.ClusterExtensionRevisionTypeAvailable,
166+
Status: metav1.ConditionUnknown,
167+
Reason: ocv1.ClusterExtensionRevisionReasonReconcileFailure,
168+
Message: fmt.Sprintf("revision validation error: %s", verr),
169+
ObservedGeneration: rev.Generation,
170+
})
171+
return ctrl.Result{RequeueAfter: 10 * time.Second}, nil
172+
}
173+
174+
for i, pres := range rres.GetPhases() {
175+
if verr := pres.GetValidationError(); verr != nil {
176+
l.Info("preflight error, retrying after 10s", "err", verr.String())
177+
169178
// given that we retry, we are going to keep Progressing condition True
170-
rev.MarkAsProgressing(ocv1.ClusterExtensionRevisionReasonRevisionValidationFailure, fmt.Sprintf("revision validation error: %s", verr))
171-
} else {
179+
rev.MarkAsProgressing(ocv1.ClusterExtensionRevisionReasonPhaseValidationError, fmt.Sprintf("phase %d validation error: %s", i, verr))
172180
// it is a probably transient error, and we do not know if the revision is available or not
173181
// perhaps we should not report it at all, hoping that it is going to be mitigated in the next reconcile?
174182
meta.SetStatusCondition(&rev.Status.Conditions, metav1.Condition{
175183
Type: ocv1.ClusterExtensionRevisionTypeAvailable,
176184
Status: metav1.ConditionUnknown,
177-
Reason: ocv1.ClusterExtensionRevisionReasonReconcileFailure,
178-
Message: fmt.Sprintf("revision validation error: %s", verr),
185+
Reason: ocv1.ClusterExtensionRevisionReasonPhaseValidationError,
186+
Message: fmt.Sprintf("phase %d validation error: %s", i, verr),
179187
ObservedGeneration: rev.Generation,
180188
})
181-
}
182-
return ctrl.Result{RequeueAfter: 10 * time.Second}, nil
183-
}
184-
185-
for i, pres := range rres.GetPhases() {
186-
if verr := pres.GetValidationError(); verr != nil {
187-
l.Info("preflight error, retrying after 10s", "err", verr.String())
188-
189-
if inRollout {
190-
// given that we retry, we are going to keep Progressing condition True
191-
rev.MarkAsProgressing(ocv1.ClusterExtensionRevisionReasonPhaseValidationError, fmt.Sprintf("phase %d validation error: %s", i, verr))
192-
} else {
193-
// it is a probably transient error, and we do not know if the revision is available or not
194-
// perhaps we should not report it at all, hoping that it is going to be mitigated in the next reconcile?
195-
meta.SetStatusCondition(&rev.Status.Conditions, metav1.Condition{
196-
Type: ocv1.ClusterExtensionRevisionTypeAvailable,
197-
Status: metav1.ConditionUnknown,
198-
Reason: ocv1.ClusterExtensionRevisionReasonPhaseValidationError,
199-
Message: fmt.Sprintf("phase %d validation error: %s", i, verr),
200-
ObservedGeneration: rev.Generation,
201-
})
202-
}
203189
return ctrl.Result{RequeueAfter: 10 * time.Second}, nil
204190
}
205191

@@ -214,17 +200,16 @@ func (c *ClusterExtensionRevisionReconciler) reconcile(ctx context.Context, rev
214200
l.Info("object collision error, retrying after 10s", "collisions", collidingObjs)
215201
// collisions are probably stickier than phase roll out probe failures - so we'd probably want to set
216202
// Progressing to false here due to the collision
217-
if inRollout {
218-
rev.MarkAsNotProgressing(ocv1.ClusterExtensionRevisionReasonObjectCollisions, fmt.Sprintf("revision object collisions in phase %d\n%s", i, strings.Join(collidingObjs, "\n\n")))
203+
rev.MarkAsNotProgressing(ocv1.ClusterExtensionRevisionReasonObjectCollisions, fmt.Sprintf("revision object collisions in phase %d\n%s", i, strings.Join(collidingObjs, "\n\n")))
219204

220-
// NOTE(pedjak): not sure if we want to retry here - collisions are probably not transient?
221-
return ctrl.Result{RequeueAfter: 10 * time.Second}, nil
222-
}
205+
// NOTE(pedjak): not sure if we want to retry here - collisions are probably not transient?
206+
return ctrl.Result{RequeueAfter: 10 * time.Second}, nil
223207
}
224208
}
225209

226-
if !rres.InTransistion() {
227-
// we have rolled out all objects in all phases, not interested in probes here
210+
if !rres.IsOnCluster() || !rres.IsToSpec() {
211+
rev.MarkAsProgressing(ocv1.ClusterExtensionRevisionReasonRolloutInProgress, "Revision is being rolled out.")
212+
} else if rres.IsOnCluster() {
228213
rev.MarkAsNotProgressing(ocv1.ClusterExtensionRevisionReasonRolledOut, "Revision is rolled out.")
229214
}
230215

0 commit comments

Comments
 (0)