Skip to content

Commit f95e732

Browse files
committed
docs: improve javadoc
Signed-off-by: Chris Laprun <claprun@redhat.com>
1 parent 16bc52a commit f95e732

File tree

1 file changed

+37
-40
lines changed

1 file changed

+37
-40
lines changed

operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/reconciler/PrimaryUpdateAndCacheUtils.java

Lines changed: 37 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -14,13 +14,13 @@
1414

1515
/**
1616
* Utility methods to patch the primary resource state and store it to the related cache, to make
17-
* sure that fresh resource is present for the next reconciliation. The main use case for such
18-
* updates is to store state is resource status.
17+
* sure that the latest version of the resource is present for the next reconciliation. The main use
18+
* case for such updates is to store state is resource status.
1919
*
2020
* <p>The way the framework handles this is with retryable updates with optimistic locking, and
2121
* caches the updated resource from the response in an overlay cache on top of the Informer cache.
22-
* If the update fails, it reads the primary resource and applies the modifications again and
23-
* retries the update.
22+
* If the update fails, it reads the primary resource from the cluster, applies the modifications
23+
* again and retries the update.
2424
*/
2525
public class PrimaryUpdateAndCacheUtils {
2626

@@ -90,74 +90,71 @@ public static <P extends HasMetadata> P ssaPatchStatusAndCacheResource(
9090
}
9191

9292
/**
93-
* Modifies the primary using modificationFunction, then uses the modified resource for the
94-
* request to update with provided update method. But before the update operation sets the
95-
* resourceVersion to the modified resource from the primary resource, so there is always
96-
* optimistic locking happening. If the request fails on optimistic update, we read the resource
97-
* again from the K8S API server and retry the whole process. In short, we make sure we always
98-
* update the resource with optimistic locking, after we cache the resource in internal cache.
99-
* Without further going into the details, the optimistic locking is needed so we can reliably
100-
* handle the caching.
93+
* Same as {@link #updateAndCacheResource(HasMetadata, Context, UnaryOperator, UnaryOperator,
94+
* int)} using the default maximum retry number as defined by {@link #DEFAULT_MAX_RETRY}.
10195
*
102-
* @param primary original resource to update
96+
* @param resourceToUpdate original resource to update
10397
* @param context of reconciliation
10498
* @param modificationFunction modifications to make on primary
10599
* @param updateMethod the update method implementation
106-
* @return updated resource
107100
* @param <P> primary type
101+
* @return the updated resource
108102
*/
109103
public static <P extends HasMetadata> P updateAndCacheResource(
110-
P primary,
104+
P resourceToUpdate,
111105
Context<P> context,
112106
UnaryOperator<P> modificationFunction,
113107
UnaryOperator<P> updateMethod) {
114108
return updateAndCacheResource(
115-
primary, context, modificationFunction, updateMethod, DEFAULT_MAX_RETRY);
109+
resourceToUpdate, context, modificationFunction, updateMethod, DEFAULT_MAX_RETRY);
116110
}
117111

118112
/**
119-
* Modifies the primary using modificationFunction, then uses the modified resource for the
120-
* request to update with provided update method. But before the update operation sets the
121-
* resourceVersion to the modified resource from the primary resource, so there is always
122-
* optimistic locking happening. If the request fails on optimistic update, we read the resource
123-
* again from the K8S API server and retry the whole process. In short, we make sure we always
124-
* update the resource with optimistic locking, after we cache the resource in internal cache.
125-
* Without further going into the details, the optimistic locking is needed so we can reliably
126-
* handle the caching.
113+
* Modifies the primary using the specified modification function, then uses the modified resource
114+
* for the request to update with provided update method. As the {@code resourceVersion} field of
115+
* the modified resource is set to the value found in the specified resource to update, the update
116+
* operation will therefore use optimistic locking on the server. If the request fails on
117+
* optimistic update, we read the resource again from the K8S API server and retry the whole
118+
* process. In short, we make sure we always update the resource with optimistic locking, then we
119+
* cache the resource in an internal cache. Without further going into details, the optimistic
120+
* locking is needed so we can reliably handle the caching.
127121
*
128-
* @param primary original resource to update
122+
* @param resourceToUpdate original resource to update
129123
* @param context of reconciliation
130124
* @param modificationFunction modifications to make on primary
131125
* @param updateMethod the update method implementation
132-
* @param maxRetry - maximum number of retries of conflicts
133-
* @return updated resource
126+
* @param maxRetry maximum number of retries before giving up
134127
* @param <P> primary type
128+
* @return the updated resource
135129
*/
136130
@SuppressWarnings("unchecked")
137131
public static <P extends HasMetadata> P updateAndCacheResource(
138-
P primary,
132+
P resourceToUpdate,
139133
Context<P> context,
140134
UnaryOperator<P> modificationFunction,
141135
UnaryOperator<P> updateMethod,
142136
int maxRetry) {
143137

144138
if (log.isDebugEnabled()) {
145-
log.debug("Conflict retrying update for: {}", ResourceID.fromResource(primary));
139+
log.debug("Conflict retrying update for: {}", ResourceID.fromResource(resourceToUpdate));
146140
}
147141
P modified = null;
148142
int retryIndex = 0;
149143
while (true) {
150144
try {
151-
modified = modificationFunction.apply(primary);
152-
modified.getMetadata().setResourceVersion(primary.getMetadata().getResourceVersion());
145+
modified = modificationFunction.apply(resourceToUpdate);
146+
modified
147+
.getMetadata()
148+
.setResourceVersion(resourceToUpdate.getMetadata().getResourceVersion());
153149
var updated = updateMethod.apply(modified);
154150
context
155151
.eventSourceRetriever()
156152
.getControllerEventSource()
157-
.handleRecentResourceUpdate(ResourceID.fromResource(primary), updated, primary);
153+
.handleRecentResourceUpdate(
154+
ResourceID.fromResource(resourceToUpdate), updated, resourceToUpdate);
158155
return updated;
159156
} catch (KubernetesClientException e) {
160-
log.trace("Exception during patch for resource: {}", primary);
157+
log.trace("Exception during patch for resource: {}", resourceToUpdate);
161158
retryIndex++;
162159
// only retry on conflict (409) and unprocessable content (422) which
163160
// can happen if JSON Patch is not a valid request since there was
@@ -174,21 +171,21 @@ public static <P extends HasMetadata> P updateAndCacheResource(
174171
"Exceeded maximum ("
175172
+ maxRetry
176173
+ ") retry attempts to patch resource: "
177-
+ ResourceID.fromResource(primary),
174+
+ ResourceID.fromResource(resourceToUpdate),
178175
e);
179176
}
180177
log.debug(
181178
"Retrying patch for resource name: {}, namespace: {}; HTTP code: {}",
182-
primary.getMetadata().getName(),
183-
primary.getMetadata().getNamespace(),
179+
resourceToUpdate.getMetadata().getName(),
180+
resourceToUpdate.getMetadata().getNamespace(),
184181
e.getCode());
185-
primary =
182+
resourceToUpdate =
186183
(P)
187184
context
188185
.getClient()
189-
.resources(primary.getClass())
190-
.inNamespace(primary.getMetadata().getNamespace())
191-
.withName(primary.getMetadata().getName())
186+
.resources(resourceToUpdate.getClass())
187+
.inNamespace(resourceToUpdate.getMetadata().getNamespace())
188+
.withName(resourceToUpdate.getMetadata().getName())
192189
.get();
193190
}
194191
}

0 commit comments

Comments
 (0)