diff --git a/cmd/postgres-operator/main.go b/cmd/postgres-operator/main.go index fa2ea051c4..dd321d5541 100644 --- a/cmd/postgres-operator/main.go +++ b/cmd/postgres-operator/main.go @@ -248,18 +248,23 @@ func main() { must(manager.Add(registrar)) token, _ := registrar.CheckToken() + bridgeURL := os.Getenv("PGO_BRIDGE_URL") + bridgeClient := func() *bridge.Client { + client := bridge.NewClient(bridgeURL, versionString) + client.Transport = otelTransportWrapper()(http.DefaultTransport) + return client + } + // add all PostgreSQL Operator controllers to the runtime manager addControllersToManager(manager, log, registrar) + must(pgupgrade.ManagedReconciler(manager, registrar)) + must(standalone_pgadmin.ManagedReconciler(manager)) + must(crunchybridgecluster.ManagedReconciler(manager, func() bridge.ClientInterface { + return bridgeClient() + })) if features.Enabled(feature.BridgeIdentifiers) { - url := os.Getenv("PGO_BRIDGE_URL") - constructor := func() *bridge.Client { - client := bridge.NewClient(url, versionString) - client.Transport = otelTransportWrapper()(http.DefaultTransport) - return client - } - - must(bridge.ManagedInstallationReconciler(manager, constructor)) + must(bridge.ManagedInstallationReconciler(manager, bridgeClient)) } // Enable upgrade checking @@ -307,8 +312,8 @@ func main() { func addControllersToManager(mgr runtime.Manager, log logging.Logger, reg registration.Registration) { pgReconciler := &postgrescluster.Reconciler{ Client: mgr.GetClient(), - Owner: postgrescluster.ControllerName, - Recorder: mgr.GetEventRecorderFor(postgrescluster.ControllerName), + Owner: naming.ControllerPostgresCluster, + Recorder: mgr.GetEventRecorderFor(naming.ControllerPostgresCluster), Registration: reg, } @@ -316,46 +321,4 @@ func addControllersToManager(mgr runtime.Manager, log logging.Logger, reg regist log.Error(err, "unable to create PostgresCluster controller") os.Exit(1) } - - upgradeReconciler := &pgupgrade.PGUpgradeReconciler{ - Client: mgr.GetClient(), - Owner: "pgupgrade-controller", - Recorder: mgr.GetEventRecorderFor("pgupgrade-controller"), - Registration: reg, - } - - if err := upgradeReconciler.SetupWithManager(mgr); err != nil { - log.Error(err, "unable to create PGUpgrade controller") - os.Exit(1) - } - - pgAdminReconciler := &standalone_pgadmin.PGAdminReconciler{ - Client: mgr.GetClient(), - Owner: "pgadmin-controller", - Recorder: mgr.GetEventRecorderFor(naming.ControllerPGAdmin), - } - - if err := pgAdminReconciler.SetupWithManager(mgr); err != nil { - log.Error(err, "unable to create PGAdmin controller") - os.Exit(1) - } - - constructor := func() bridge.ClientInterface { - client := bridge.NewClient(os.Getenv("PGO_BRIDGE_URL"), versionString) - client.Transport = otelTransportWrapper()(http.DefaultTransport) - return client - } - - crunchyBridgeClusterReconciler := &crunchybridgecluster.CrunchyBridgeClusterReconciler{ - Client: mgr.GetClient(), - Owner: "crunchybridgecluster-controller", - // TODO(crunchybridgecluster): recorder? - // Recorder: mgr.GetEventRecorderFor(naming...), - NewClient: constructor, - } - - if err := crunchyBridgeClusterReconciler.SetupWithManager(mgr); err != nil { - log.Error(err, "unable to create CrunchyBridgeCluster controller") - os.Exit(1) - } } diff --git a/internal/bridge/crunchybridgecluster/apply.go b/internal/bridge/crunchybridgecluster/apply.go index 6edd870790..850920fa83 100644 --- a/internal/bridge/crunchybridgecluster/apply.go +++ b/internal/bridge/crunchybridgecluster/apply.go @@ -11,23 +11,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -// patch sends patch to object's endpoint in the Kubernetes API and updates -// object with any returned content. The fieldManager is set to r.Owner, but -// can be overridden in options. -// - https://docs.k8s.io/reference/using-api/server-side-apply/#managers -// -// NOTE: This function is duplicated from a version in the postgrescluster package -func (r *CrunchyBridgeClusterReconciler) patch( - ctx context.Context, object client.Object, - patch client.Patch, options ...client.PatchOption, -) error { - options = append([]client.PatchOption{r.Owner}, options...) - return r.Patch(ctx, object, patch, options...) -} - // apply sends an apply patch to object's endpoint in the Kubernetes API and -// updates object with any returned content. The fieldManager is set to -// r.Owner and the force parameter is true. +// updates object with any returned content. The fieldManager is set by +// r.Writer and the force parameter is true. // - https://docs.k8s.io/reference/using-api/server-side-apply/#managers // - https://docs.k8s.io/reference/using-api/server-side-apply/#conflicts // @@ -40,7 +26,7 @@ func (r *CrunchyBridgeClusterReconciler) apply(ctx context.Context, object clien // Send the apply-patch with force=true. if err == nil { - err = r.patch(ctx, object, apply, client.ForceOwnership) + err = r.Writer.Patch(ctx, object, apply, client.ForceOwnership) } return err diff --git a/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller.go b/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller.go index ec9973ade1..98f3897c01 100644 --- a/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller.go +++ b/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller.go @@ -33,27 +33,38 @@ import ( // CrunchyBridgeClusterReconciler reconciles a CrunchyBridgeCluster object type CrunchyBridgeClusterReconciler struct { - client.Client - - Owner client.FieldOwner - - // For this iteration, we will only be setting conditions rather than - // setting conditions and emitting events. That may change in the future, - // so we're leaving this EventRecorder here for now. - // record.EventRecorder - - // NewClient is called each time a new Client is needed. + // NewClient is called each time a new bridge.Client is needed. NewClient func() bridge.ClientInterface + + Reader interface { + Get(context.Context, client.ObjectKey, client.Object, ...client.GetOption) error + List(context.Context, client.ObjectList, ...client.ListOption) error + } + Writer interface { + Delete(context.Context, client.Object, ...client.DeleteOption) error + Patch(context.Context, client.Object, client.Patch, ...client.PatchOption) error + Update(context.Context, client.Object, ...client.UpdateOption) error + } + StatusWriter interface { + Patch(context.Context, client.Object, client.Patch, ...client.SubResourcePatchOption) error + } } //+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="crunchybridgeclusters",verbs={list,watch} //+kubebuilder:rbac:groups="",resources="secrets",verbs={list,watch} -// SetupWithManager sets up the controller with the Manager. -func (r *CrunchyBridgeClusterReconciler) SetupWithManager( - mgr ctrl.Manager, -) error { - return ctrl.NewControllerManagedBy(mgr). +// ManagedReconciler creates a [CrunchyBridgeClusterReconciler] and adds it to m. +func ManagedReconciler(m ctrl.Manager, newClient func() bridge.ClientInterface) error { + kubernetes := client.WithFieldOwner(m.GetClient(), naming.ControllerCrunchyBridgeCluster) + + reconciler := &CrunchyBridgeClusterReconciler{ + NewClient: newClient, + Reader: kubernetes, + StatusWriter: kubernetes.Status(), + Writer: kubernetes, + } + + return ctrl.NewControllerManagedBy(m). For(&v1beta1.CrunchyBridgeCluster{}). Owns(&corev1.Secret{}). // Wake periodically to check Bridge API for all CrunchyBridgeClusters. @@ -63,7 +74,7 @@ func (r *CrunchyBridgeClusterReconciler) SetupWithManager( runtime.NewTickerImmediate(5*time.Minute, event.GenericEvent{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, _ client.Object) []ctrl.Request { var list v1beta1.CrunchyBridgeClusterList - _ = r.List(ctx, &list) + _ = reconciler.Reader.List(ctx, &list) return runtime.Requests(initialize.Pointers(list.Items...)...) }), ), @@ -72,10 +83,10 @@ func (r *CrunchyBridgeClusterReconciler) SetupWithManager( Watches( &corev1.Secret{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, secret client.Object) []ctrl.Request { - return runtime.Requests(r.findCrunchyBridgeClustersForSecret(ctx, client.ObjectKeyFromObject(secret))...) + return runtime.Requests(reconciler.findCrunchyBridgeClustersForSecret(ctx, client.ObjectKeyFromObject(secret))...) }), ). - Complete(r) + Complete(reconciler) } // The owner reference created by controllerutil.SetControllerReference blocks @@ -91,7 +102,7 @@ func (r *CrunchyBridgeClusterReconciler) SetupWithManager( func (r *CrunchyBridgeClusterReconciler) setControllerReference( owner *v1beta1.CrunchyBridgeCluster, controlled client.Object, ) error { - return controllerutil.SetControllerReference(owner, controlled, r.Scheme()) + return controllerutil.SetControllerReference(owner, controlled, runtime.Scheme) } //+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="crunchybridgeclusters",verbs={get,patch,update} @@ -113,14 +124,14 @@ func (r *CrunchyBridgeClusterReconciler) Reconcile(ctx context.Context, req ctrl // copy before returning from its cache. // - https://github.com/kubernetes-sigs/controller-runtime/issues/1235 crunchybridgecluster := &v1beta1.CrunchyBridgeCluster{} - err := r.Get(ctx, req.NamespacedName, crunchybridgecluster) + err := r.Reader.Get(ctx, req.NamespacedName, crunchybridgecluster) if err == nil { // Write any changes to the crunchybridgecluster status on the way out. before := crunchybridgecluster.DeepCopy() defer func() { if !equality.Semantic.DeepEqual(before.Status, crunchybridgecluster.Status) { - status := r.Status().Patch(ctx, crunchybridgecluster, client.MergeFrom(before), r.Owner) + status := r.StatusWriter.Patch(ctx, crunchybridgecluster, client.MergeFrom(before)) if err == nil && status != nil { err = status @@ -684,7 +695,7 @@ func (r *CrunchyBridgeClusterReconciler) GetSecretKeys( }} err := errors.WithStack( - r.Get(ctx, client.ObjectKeyFromObject(existing), existing)) + r.Reader.Get(ctx, client.ObjectKeyFromObject(existing), existing)) if err == nil { if existing.Data["key"] != nil && existing.Data["team"] != nil { @@ -707,7 +718,7 @@ func (r *CrunchyBridgeClusterReconciler) deleteControlled( version := object.GetResourceVersion() exactly := client.Preconditions{UID: &uid, ResourceVersion: &version} - return r.Delete(ctx, object, exactly) + return r.Writer.Delete(ctx, object, exactly) } return nil diff --git a/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller_test.go b/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller_test.go index a29b418b13..955282f61d 100644 --- a/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller_test.go +++ b/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller_test.go @@ -36,8 +36,7 @@ func TestReconcileBridgeConnectionSecret(t *testing.T) { require.ParallelCapacity(t, 0) reconciler := &CrunchyBridgeClusterReconciler{ - Client: tClient, - Owner: "crunchybridgecluster-controller", + Reader: tClient, } ns := setupNamespace(t, tClient).Name @@ -87,15 +86,10 @@ func TestReconcileBridgeConnectionSecret(t *testing.T) { func TestHandleDuplicateClusterName(t *testing.T) { ctx := context.Background() - tClient := setupKubernetes(t) - require.ParallelCapacity(t, 0) clusterInBridge := testClusterApiResource() clusterInBridge.ClusterName = "bridge-cluster-1" // originally "hippo-cluster" - reconciler := &CrunchyBridgeClusterReconciler{ - Client: tClient, - Owner: "crunchybridgecluster-controller", - } + reconciler := &CrunchyBridgeClusterReconciler{} reconciler.NewClient = func() bridge.ClientInterface { return &TestBridgeClient{ ApiKey: testApiKey, @@ -104,11 +98,8 @@ func TestHandleDuplicateClusterName(t *testing.T) { } } - ns := setupNamespace(t, tClient).Name - t.Run("FailureToListClusters", func(t *testing.T) { cluster := testCluster() - cluster.Namespace = ns controllerResult, err := reconciler.handleDuplicateClusterName(ctx, "bad_api_key", testTeamId, cluster) assert.Check(t, err != nil) @@ -124,7 +115,6 @@ func TestHandleDuplicateClusterName(t *testing.T) { t.Run("NoDuplicateFound", func(t *testing.T) { cluster := testCluster() - cluster.Namespace = ns controllerResult, err := reconciler.handleDuplicateClusterName(ctx, testApiKey, testTeamId, cluster) assert.NilError(t, err) @@ -133,7 +123,6 @@ func TestHandleDuplicateClusterName(t *testing.T) { t.Run("DuplicateFoundAdoptionAnnotationNotPresent", func(t *testing.T) { cluster := testCluster() - cluster.Namespace = ns cluster.Spec.ClusterName = "bridge-cluster-1" // originally "hippo-cluster" controllerResult, err := reconciler.handleDuplicateClusterName(ctx, testApiKey, testTeamId, cluster) @@ -150,7 +139,6 @@ func TestHandleDuplicateClusterName(t *testing.T) { t.Run("DuplicateFoundAdoptionAnnotationPresent", func(t *testing.T) { cluster := testCluster() - cluster.Namespace = ns cluster.Spec.ClusterName = "bridge-cluster-1" // originally "hippo-cluster" cluster.Annotations = map[string]string{} cluster.Annotations[naming.CrunchyBridgeClusterAdoptionAnnotation] = "1234" @@ -164,15 +152,8 @@ func TestHandleDuplicateClusterName(t *testing.T) { func TestHandleCreateCluster(t *testing.T) { ctx := context.Background() - tClient := setupKubernetes(t) - require.ParallelCapacity(t, 0) - ns := setupNamespace(t, tClient).Name - - reconciler := &CrunchyBridgeClusterReconciler{ - Client: tClient, - Owner: "crunchybridgecluster-controller", - } + reconciler := &CrunchyBridgeClusterReconciler{} reconciler.NewClient = func() bridge.ClientInterface { return &TestBridgeClient{ ApiKey: testApiKey, @@ -183,7 +164,6 @@ func TestHandleCreateCluster(t *testing.T) { t.Run("SuccessfulCreate", func(t *testing.T) { cluster := testCluster() - cluster.Namespace = ns controllerResult := reconciler.handleCreateCluster(ctx, testApiKey, testTeamId, cluster) assert.Equal(t, controllerResult.RequeueAfter, 3*time.Minute) @@ -208,7 +188,6 @@ func TestHandleCreateCluster(t *testing.T) { t.Run("UnsuccessfulCreate", func(t *testing.T) { cluster := testCluster() - cluster.Namespace = ns controllerResult := reconciler.handleCreateCluster(ctx, "bad_api_key", testTeamId, cluster) assert.Equal(t, controllerResult, ctrl.Result{}) @@ -229,19 +208,13 @@ func TestHandleCreateCluster(t *testing.T) { func TestHandleGetCluster(t *testing.T) { ctx := context.Background() - tClient := setupKubernetes(t) - require.ParallelCapacity(t, 0) - ns := setupNamespace(t, tClient).Name firstClusterInBridge := testClusterApiResource() secondClusterInBridge := testClusterApiResource() secondClusterInBridge.ID = "2345" // originally "1234" secondClusterInBridge.ClusterName = "hippo-cluster-2" // originally "hippo-cluster" - reconciler := &CrunchyBridgeClusterReconciler{ - Client: tClient, - Owner: "crunchybridgecluster-controller", - } + reconciler := &CrunchyBridgeClusterReconciler{} reconciler.NewClient = func() bridge.ClientInterface { return &TestBridgeClient{ ApiKey: testApiKey, @@ -252,7 +225,6 @@ func TestHandleGetCluster(t *testing.T) { t.Run("SuccessfulGet", func(t *testing.T) { cluster := testCluster() - cluster.Namespace = ns cluster.Status.ID = "1234" err := reconciler.handleGetCluster(ctx, testApiKey, cluster) @@ -269,7 +241,6 @@ func TestHandleGetCluster(t *testing.T) { t.Run("UnsuccessfulGet", func(t *testing.T) { cluster := testCluster() - cluster.Namespace = ns cluster.Status.ID = "bad_cluster_id" err := reconciler.handleGetCluster(ctx, testApiKey, cluster) @@ -287,20 +258,14 @@ func TestHandleGetCluster(t *testing.T) { func TestHandleGetClusterStatus(t *testing.T) { ctx := context.Background() - tClient := setupKubernetes(t) - require.ParallelCapacity(t, 0) - ns := setupNamespace(t, tClient).Name readyClusterId := "1234" creatingClusterId := "7890" readyClusterStatusInBridge := testClusterStatusApiResource(readyClusterId) creatingClusterStatusInBridge := testClusterStatusApiResource(creatingClusterId) creatingClusterStatusInBridge.State = "creating" // originally "ready" - reconciler := &CrunchyBridgeClusterReconciler{ - Client: tClient, - Owner: "crunchybridgecluster-controller", - } + reconciler := &CrunchyBridgeClusterReconciler{} reconciler.NewClient = func() bridge.ClientInterface { return &TestBridgeClient{ ApiKey: testApiKey, @@ -314,7 +279,6 @@ func TestHandleGetClusterStatus(t *testing.T) { t.Run("SuccessReadyState", func(t *testing.T) { cluster := testCluster() - cluster.Namespace = ns cluster.Status.ID = readyClusterId err := reconciler.handleGetClusterStatus(ctx, testApiKey, cluster) @@ -331,7 +295,6 @@ func TestHandleGetClusterStatus(t *testing.T) { t.Run("SuccessNonReadyState", func(t *testing.T) { cluster := testCluster() - cluster.Namespace = ns cluster.Status.ID = creatingClusterId err := reconciler.handleGetClusterStatus(ctx, testApiKey, cluster) @@ -348,7 +311,6 @@ func TestHandleGetClusterStatus(t *testing.T) { t.Run("UnsuccessfulGet", func(t *testing.T) { cluster := testCluster() - cluster.Namespace = ns cluster.Status.ID = creatingClusterId err := reconciler.handleGetClusterStatus(ctx, "bad_api_key", cluster) @@ -366,20 +328,14 @@ func TestHandleGetClusterStatus(t *testing.T) { func TestHandleGetClusterUpgrade(t *testing.T) { ctx := context.Background() - tClient := setupKubernetes(t) - require.ParallelCapacity(t, 0) - ns := setupNamespace(t, tClient).Name upgradingClusterId := "1234" notUpgradingClusterId := "7890" upgradingClusterUpgradeInBridge := testClusterUpgradeApiResource(upgradingClusterId) notUpgradingClusterUpgradeInBridge := testClusterUpgradeApiResource(notUpgradingClusterId) notUpgradingClusterUpgradeInBridge.Operations = []*v1beta1.UpgradeOperation{} - reconciler := &CrunchyBridgeClusterReconciler{ - Client: tClient, - Owner: "crunchybridgecluster-controller", - } + reconciler := &CrunchyBridgeClusterReconciler{} reconciler.NewClient = func() bridge.ClientInterface { return &TestBridgeClient{ ApiKey: testApiKey, @@ -393,7 +349,6 @@ func TestHandleGetClusterUpgrade(t *testing.T) { t.Run("SuccessUpgrading", func(t *testing.T) { cluster := testCluster() - cluster.Namespace = ns cluster.Status.ID = upgradingClusterId err := reconciler.handleGetClusterUpgrade(ctx, testApiKey, cluster) @@ -414,7 +369,6 @@ func TestHandleGetClusterUpgrade(t *testing.T) { t.Run("SuccessNotUpgrading", func(t *testing.T) { cluster := testCluster() - cluster.Namespace = ns cluster.Status.ID = notUpgradingClusterId err := reconciler.handleGetClusterUpgrade(ctx, testApiKey, cluster) @@ -431,7 +385,6 @@ func TestHandleGetClusterUpgrade(t *testing.T) { t.Run("UnsuccessfulGet", func(t *testing.T) { cluster := testCluster() - cluster.Namespace = ns cluster.Status.ID = notUpgradingClusterId err := reconciler.handleGetClusterUpgrade(ctx, "bad_api_key", cluster) @@ -448,16 +401,9 @@ func TestHandleGetClusterUpgrade(t *testing.T) { func TestHandleUpgrade(t *testing.T) { ctx := context.Background() - tClient := setupKubernetes(t) - require.ParallelCapacity(t, 0) - - ns := setupNamespace(t, tClient).Name clusterInBridge := testClusterApiResource() - reconciler := &CrunchyBridgeClusterReconciler{ - Client: tClient, - Owner: "crunchybridgecluster-controller", - } + reconciler := &CrunchyBridgeClusterReconciler{} reconciler.NewClient = func() bridge.ClientInterface { return &TestBridgeClient{ ApiKey: testApiKey, @@ -468,7 +414,6 @@ func TestHandleUpgrade(t *testing.T) { t.Run("UpgradePlan", func(t *testing.T) { cluster := testCluster() - cluster.Namespace = ns cluster.Status.ID = "1234" cluster.Spec.Plan = "standard-16" // originally "standard-8" @@ -490,7 +435,6 @@ func TestHandleUpgrade(t *testing.T) { t.Run("UpgradePostgres", func(t *testing.T) { cluster := testCluster() - cluster.Namespace = ns cluster.Status.ID = "1234" cluster.Spec.PostgresVersion = 16 // originally "15" @@ -512,7 +456,6 @@ func TestHandleUpgrade(t *testing.T) { t.Run("UpgradeStorage", func(t *testing.T) { cluster := testCluster() - cluster.Namespace = ns cluster.Status.ID = "1234" cluster.Spec.Storage = resource.MustParse("15Gi") // originally "10Gi" @@ -534,7 +477,6 @@ func TestHandleUpgrade(t *testing.T) { t.Run("UpgradeFailure", func(t *testing.T) { cluster := testCluster() - cluster.Namespace = ns cluster.Status.ID = "1234" cluster.Spec.Storage = resource.MustParse("15Gi") // originally "10Gi" @@ -552,19 +494,13 @@ func TestHandleUpgrade(t *testing.T) { func TestHandleUpgradeHA(t *testing.T) { ctx := context.Background() - tClient := setupKubernetes(t) - require.ParallelCapacity(t, 0) - ns := setupNamespace(t, tClient).Name clusterInBridgeWithHaDisabled := testClusterApiResource() clusterInBridgeWithHaEnabled := testClusterApiResource() clusterInBridgeWithHaEnabled.ID = "2345" // originally "1234" clusterInBridgeWithHaEnabled.IsHA = initialize.Bool(true) // originally "false" - reconciler := &CrunchyBridgeClusterReconciler{ - Client: tClient, - Owner: "crunchybridgecluster-controller", - } + reconciler := &CrunchyBridgeClusterReconciler{} reconciler.NewClient = func() bridge.ClientInterface { return &TestBridgeClient{ ApiKey: testApiKey, @@ -576,7 +512,6 @@ func TestHandleUpgradeHA(t *testing.T) { t.Run("EnableHA", func(t *testing.T) { cluster := testCluster() - cluster.Namespace = ns cluster.Status.ID = "1234" cluster.Spec.IsHA = true // originally "false" @@ -598,7 +533,6 @@ func TestHandleUpgradeHA(t *testing.T) { t.Run("DisableHA", func(t *testing.T) { cluster := testCluster() - cluster.Namespace = ns cluster.Status.ID = "2345" controllerResult := reconciler.handleUpgradeHA(ctx, testApiKey, cluster) @@ -619,7 +553,6 @@ func TestHandleUpgradeHA(t *testing.T) { t.Run("UpgradeFailure", func(t *testing.T) { cluster := testCluster() - cluster.Namespace = ns cluster.Status.ID = "1234" controllerResult := reconciler.handleUpgradeHA(ctx, "bad_api_key", cluster) @@ -636,16 +569,9 @@ func TestHandleUpgradeHA(t *testing.T) { func TestHandleUpdate(t *testing.T) { ctx := context.Background() - tClient := setupKubernetes(t) - require.ParallelCapacity(t, 0) - - ns := setupNamespace(t, tClient).Name clusterInBridge := testClusterApiResource() - reconciler := &CrunchyBridgeClusterReconciler{ - Client: tClient, - Owner: "crunchybridgecluster-controller", - } + reconciler := &CrunchyBridgeClusterReconciler{} reconciler.NewClient = func() bridge.ClientInterface { return &TestBridgeClient{ ApiKey: testApiKey, @@ -656,7 +582,6 @@ func TestHandleUpdate(t *testing.T) { t.Run("UpdateName", func(t *testing.T) { cluster := testCluster() - cluster.Namespace = ns cluster.Status.ID = "1234" cluster.Spec.ClusterName = "new-cluster-name" // originally "hippo-cluster" @@ -674,7 +599,6 @@ func TestHandleUpdate(t *testing.T) { t.Run("UpdateIsProtected", func(t *testing.T) { cluster := testCluster() - cluster.Namespace = ns cluster.Status.ID = "1234" cluster.Spec.IsProtected = true // originally "false" @@ -692,7 +616,6 @@ func TestHandleUpdate(t *testing.T) { t.Run("UpgradeFailure", func(t *testing.T) { cluster := testCluster() - cluster.Namespace = ns cluster.Status.ID = "1234" cluster.Spec.IsProtected = true // originally "false" @@ -713,8 +636,7 @@ func TestGetSecretKeys(t *testing.T) { require.ParallelCapacity(t, 0) reconciler := &CrunchyBridgeClusterReconciler{ - Client: tClient, - Owner: "crunchybridgecluster-controller", + Reader: tClient, } ns := setupNamespace(t, tClient).Name @@ -796,8 +718,7 @@ func TestDeleteControlled(t *testing.T) { ns := setupNamespace(t, tClient) reconciler := &CrunchyBridgeClusterReconciler{ - Client: tClient, - Owner: "crunchybridgecluster-controller", + Writer: client.WithFieldOwner(tClient, t.Name()), } cluster := testCluster() diff --git a/internal/bridge/crunchybridgecluster/delete.go b/internal/bridge/crunchybridgecluster/delete.go index ae44c8036b..cf5a320f54 100644 --- a/internal/bridge/crunchybridgecluster/delete.go +++ b/internal/bridge/crunchybridgecluster/delete.go @@ -31,7 +31,7 @@ func (r *CrunchyBridgeClusterReconciler) handleDelete( if crunchybridgecluster.DeletionTimestamp.IsZero() { if !controllerutil.ContainsFinalizer(crunchybridgecluster, finalizer) { controllerutil.AddFinalizer(crunchybridgecluster, finalizer) - if err := r.Update(ctx, crunchybridgecluster); err != nil { + if err := r.Writer.Update(ctx, crunchybridgecluster); err != nil { return nil, err } } @@ -57,7 +57,7 @@ func (r *CrunchyBridgeClusterReconciler) handleDelete( log.Info("cluster deleted", "clusterName", crunchybridgecluster.Spec.ClusterName) controllerutil.RemoveFinalizer(crunchybridgecluster, finalizer) - if err := r.Update(ctx, crunchybridgecluster); err != nil { + if err := r.Writer.Update(ctx, crunchybridgecluster); err != nil { return &ctrl.Result{}, err } } diff --git a/internal/bridge/crunchybridgecluster/delete_test.go b/internal/bridge/crunchybridgecluster/delete_test.go index c86746ef1b..508c87c5c9 100644 --- a/internal/bridge/crunchybridgecluster/delete_test.go +++ b/internal/bridge/crunchybridgecluster/delete_test.go @@ -32,8 +32,7 @@ func TestHandleDeleteCluster(t *testing.T) { secondClusterInBridge.ID = "2345" reconciler := &CrunchyBridgeClusterReconciler{ - Client: tClient, - Owner: "crunchybridgecluster-controller", + Writer: client.WithFieldOwner(tClient, t.Name()), } testBridgeClient := &TestBridgeClient{ ApiKey: "9012", diff --git a/internal/bridge/crunchybridgecluster/postgres.go b/internal/bridge/crunchybridgecluster/postgres.go index 80096de91b..0aa09517d5 100644 --- a/internal/bridge/crunchybridgecluster/postgres.go +++ b/internal/bridge/crunchybridgecluster/postgres.go @@ -92,7 +92,7 @@ func (r *CrunchyBridgeClusterReconciler) reconcilePostgresRoleSecrets( // Make sure that this cluster's role secret names are not being used by any other // secrets in the namespace allSecretsInNamespace := &corev1.SecretList{} - err := errors.WithStack(r.List(ctx, allSecretsInNamespace, client.InNamespace(cluster.Namespace))) + err := errors.WithStack(r.Reader.List(ctx, allSecretsInNamespace, client.InNamespace(cluster.Namespace))) if err != nil { return nil, nil, err } @@ -115,7 +115,7 @@ func (r *CrunchyBridgeClusterReconciler) reconcilePostgresRoleSecrets( selector, err := naming.AsSelector(naming.CrunchyBridgeClusterPostgresRoles(cluster.Name)) if err == nil { err = errors.WithStack( - r.List(ctx, secrets, + r.Reader.List(ctx, secrets, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector}, )) diff --git a/internal/bridge/crunchybridgecluster/postgres_test.go b/internal/bridge/crunchybridgecluster/postgres_test.go index 6fae4fe26a..f2594bbba4 100644 --- a/internal/bridge/crunchybridgecluster/postgres_test.go +++ b/internal/bridge/crunchybridgecluster/postgres_test.go @@ -20,16 +20,10 @@ import ( ) func TestGeneratePostgresRoleSecret(t *testing.T) { - tClient := setupKubernetes(t) - require.ParallelCapacity(t, 0) - - reconciler := &CrunchyBridgeClusterReconciler{ - Client: tClient, - Owner: "crunchybridgecluster-controller", - } + reconciler := &CrunchyBridgeClusterReconciler{} cluster := testCluster() - cluster.Namespace = setupNamespace(t, tClient).Name + cluster.Namespace = "asdf" spec := &v1beta1.CrunchyBridgeClusterRoleSpec{ Name: "application", @@ -77,8 +71,8 @@ func TestReconcilePostgresRoleSecrets(t *testing.T) { ns := setupNamespace(t, tClient).Name reconciler := &CrunchyBridgeClusterReconciler{ - Client: tClient, - Owner: "crunchybridgecluster-controller", + Reader: tClient, + Writer: client.WithFieldOwner(tClient, t.Name()), } t.Run("DuplicateSecretNameInSpec", func(t *testing.T) { diff --git a/internal/bridge/crunchybridgecluster/watches.go b/internal/bridge/crunchybridgecluster/watches.go index 44a2c1490b..ac9b59b429 100644 --- a/internal/bridge/crunchybridgecluster/watches.go +++ b/internal/bridge/crunchybridgecluster/watches.go @@ -26,7 +26,7 @@ func (r *CrunchyBridgeClusterReconciler) findCrunchyBridgeClustersForSecret( // namespace, we can configure the [manager.Manager] field indexer and pass a // [fields.Selector] here. // - https://book.kubebuilder.io/reference/watching-resources/externally-managed.html - if err := r.List(ctx, &clusters, &client.ListOptions{ + if err := r.Reader.List(ctx, &clusters, &client.ListOptions{ Namespace: secret.Namespace, }); err == nil { for i := range clusters.Items { diff --git a/internal/bridge/crunchybridgecluster/watches_test.go b/internal/bridge/crunchybridgecluster/watches_test.go index 7ac0e26e57..b7e6f67f31 100644 --- a/internal/bridge/crunchybridgecluster/watches_test.go +++ b/internal/bridge/crunchybridgecluster/watches_test.go @@ -21,7 +21,7 @@ func TestFindCrunchyBridgeClustersForSecret(t *testing.T) { require.ParallelCapacity(t, 0) ns := setupNamespace(t, tClient) - reconciler := &CrunchyBridgeClusterReconciler{Client: tClient} + reconciler := &CrunchyBridgeClusterReconciler{Reader: tClient} secret := &corev1.Secret{} secret.Namespace = ns.Name diff --git a/internal/controller/pgupgrade/apply.go b/internal/controller/pgupgrade/apply.go index fb0c55950e..c3e869eba6 100644 --- a/internal/controller/pgupgrade/apply.go +++ b/internal/controller/pgupgrade/apply.go @@ -11,21 +11,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -// patch sends patch to object's endpoint in the Kubernetes API and updates -// object with any returned content. The fieldManager is set to r.Owner, but -// can be overridden in options. -// - https://docs.k8s.io/reference/using-api/server-side-apply/#managers -func (r *PGUpgradeReconciler) patch( - ctx context.Context, object client.Object, - patch client.Patch, options ...client.PatchOption, -) error { - options = append([]client.PatchOption{r.Owner}, options...) - return r.Client.Patch(ctx, object, patch, options...) -} - // apply sends an apply patch to object's endpoint in the Kubernetes API and -// updates object with any returned content. The fieldManager is set to -// r.Owner and the force parameter is true. +// updates object with any returned content. The fieldManager is set by +// r.Writer and the force parameter is true. // - https://docs.k8s.io/reference/using-api/server-side-apply/#managers // - https://docs.k8s.io/reference/using-api/server-side-apply/#conflicts func (r *PGUpgradeReconciler) apply(ctx context.Context, object client.Object) error { @@ -36,7 +24,7 @@ func (r *PGUpgradeReconciler) apply(ctx context.Context, object client.Object) e // Send the apply-patch with force=true. if err == nil { - err = r.patch(ctx, object, apply, client.ForceOwnership) + err = r.Writer.Patch(ctx, object, apply, client.ForceOwnership) } return err diff --git a/internal/controller/pgupgrade/pgupgrade_controller.go b/internal/controller/pgupgrade/pgupgrade_controller.go index 06a36574f0..653ea9e55e 100644 --- a/internal/controller/pgupgrade/pgupgrade_controller.go +++ b/internal/controller/pgupgrade/pgupgrade_controller.go @@ -21,6 +21,7 @@ import ( "github.com/crunchydata/postgres-operator/internal/config" "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/logging" + "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/registration" "github.com/crunchydata/postgres-operator/internal/tracing" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" @@ -32,29 +33,49 @@ const ( // PGUpgradeReconciler reconciles a PGUpgrade object type PGUpgradeReconciler struct { - Client client.Client - Owner client.FieldOwner - Recorder record.EventRecorder Registration registration.Registration + + Reader interface { + Get(context.Context, client.ObjectKey, client.Object, ...client.GetOption) error + List(context.Context, client.ObjectList, ...client.ListOption) error + } + Writer interface { + Delete(context.Context, client.Object, ...client.DeleteOption) error + Patch(context.Context, client.Object, client.Patch, ...client.PatchOption) error + } + StatusWriter interface { + Patch(context.Context, client.Object, client.Patch, ...client.SubResourcePatchOption) error + } } //+kubebuilder:rbac:groups="batch",resources="jobs",verbs={list,watch} //+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="pgupgrades",verbs={list,watch} //+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="postgresclusters",verbs={list,watch} -// SetupWithManager sets up the controller with the Manager. -func (r *PGUpgradeReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). +// ManagedReconciler creates a [PGUpgradeReconciler] and adds it to m. +func ManagedReconciler(m ctrl.Manager, r registration.Registration) error { + kubernetes := client.WithFieldOwner(m.GetClient(), naming.ControllerPGUpgrade) + recorder := m.GetEventRecorderFor(naming.ControllerPGUpgrade) + + reconciler := &PGUpgradeReconciler{ + Reader: kubernetes, + Recorder: recorder, + Registration: r, + StatusWriter: kubernetes.Status(), + Writer: kubernetes, + } + + return ctrl.NewControllerManagedBy(m). For(&v1beta1.PGUpgrade{}). Owns(&batchv1.Job{}). Watches( v1beta1.NewPostgresCluster(), handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, cluster client.Object) []ctrl.Request { - return runtime.Requests(r.findUpgradesForPostgresCluster(ctx, client.ObjectKeyFromObject(cluster))...) + return runtime.Requests(reconciler.findUpgradesForPostgresCluster(ctx, client.ObjectKeyFromObject(cluster))...) }), ). - Complete(r) + Complete(reconciler) } //+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="pgupgrades",verbs={list} @@ -70,7 +91,7 @@ func (r *PGUpgradeReconciler) findUpgradesForPostgresCluster( // namespace, we can configure the [ctrl.Manager] field indexer and pass a // [fields.Selector] here. // - https://book.kubebuilder.io/reference/watching-resources/externally-managed.html - if r.Client.List(ctx, &upgrades, &client.ListOptions{ + if r.Reader.List(ctx, &upgrades, &client.ListOptions{ Namespace: cluster.Namespace, }) == nil { for i := range upgrades.Items { @@ -107,14 +128,14 @@ func (r *PGUpgradeReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( // copy before returning from its cache. // - https://github.com/kubernetes-sigs/controller-runtime/issues/1235 upgrade := &v1beta1.PGUpgrade{} - err = r.Client.Get(ctx, req.NamespacedName, upgrade) + err = r.Reader.Get(ctx, req.NamespacedName, upgrade) if err == nil { // Write any changes to the upgrade status on the way out. before := upgrade.DeepCopy() defer func() { if !equality.Semantic.DeepEqual(before.Status, upgrade.Status) { - status := r.Client.Status().Patch(ctx, upgrade, client.MergeFrom(before), r.Owner) + status := r.StatusWriter.Patch(ctx, upgrade, client.MergeFrom(before)) if err == nil && status != nil { err = status @@ -409,7 +430,7 @@ func (r *PGUpgradeReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( // - https://kubernetes.io/docs/concepts/workloads/controllers/job/ // - https://github.com/kubernetes/kubernetes/blob/master/pkg/registry/batch/job/strategy.go#L58 propagate := client.PropagationPolicy(metav1.DeletePropagationBackground) - err = client.IgnoreNotFound(r.Client.Delete(ctx, object, exactly, propagate)) + err = client.IgnoreNotFound(r.Writer.Delete(ctx, object, exactly, propagate)) } } @@ -424,7 +445,7 @@ func (r *PGUpgradeReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( // Set the pgBackRest status for bootstrapping patch.Status.PGBackRest.Repos = []v1beta1.RepoStatus{} - err = r.Client.Status().Patch(ctx, patch, client.MergeFrom(world.Cluster), r.Owner) + err = r.StatusWriter.Patch(ctx, patch, client.MergeFrom(world.Cluster)) } return ctrl.Result{}, err @@ -461,7 +482,7 @@ func (r *PGUpgradeReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( uid := object.GetUID() version := object.GetResourceVersion() exactly := client.Preconditions{UID: &uid, ResourceVersion: &version} - err = client.IgnoreNotFound(r.Client.Delete(ctx, object, exactly)) + err = client.IgnoreNotFound(r.Writer.Delete(ctx, object, exactly)) } // Requeue to verify that Patroni endpoints are deleted diff --git a/internal/controller/pgupgrade/utils.go b/internal/controller/pgupgrade/utils.go index 6c92ba5693..64dff4f589 100644 --- a/internal/controller/pgupgrade/utils.go +++ b/internal/controller/pgupgrade/utils.go @@ -12,6 +12,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -29,7 +30,7 @@ func (r *PGUpgradeReconciler) setControllerReference( owner *v1beta1.PGUpgrade, controlled client.Object, ) { if metav1.GetControllerOf(controlled) != nil { - panic(controllerutil.SetControllerReference(owner, controlled, r.Client.Scheme())) + panic(controllerutil.SetControllerReference(owner, controlled, runtime.Scheme)) } controlled.SetOwnerReferences(append( diff --git a/internal/controller/pgupgrade/world.go b/internal/controller/pgupgrade/world.go index c5536e720b..5933fbcd69 100644 --- a/internal/controller/pgupgrade/world.go +++ b/internal/controller/pgupgrade/world.go @@ -39,7 +39,7 @@ func (r *PGUpgradeReconciler) observeWorld( cluster := v1beta1.NewPostgresCluster() err := errors.WithStack( - r.Client.Get(ctx, client.ObjectKey{ + r.Reader.Get(ctx, client.ObjectKey{ Namespace: upgrade.Namespace, Name: upgrade.Spec.PostgresClusterName, }, cluster)) @@ -48,7 +48,7 @@ func (r *PGUpgradeReconciler) observeWorld( if err == nil { var endpoints corev1.EndpointsList err = errors.WithStack( - r.Client.List(ctx, &endpoints, + r.Reader.List(ctx, &endpoints, client.InNamespace(upgrade.Namespace), client.MatchingLabelsSelector{Selector: selectCluster}, )) @@ -58,7 +58,7 @@ func (r *PGUpgradeReconciler) observeWorld( if err == nil { var jobs batchv1.JobList err = errors.WithStack( - r.Client.List(ctx, &jobs, + r.Reader.List(ctx, &jobs, client.InNamespace(upgrade.Namespace), client.MatchingLabelsSelector{Selector: selectCluster}, )) @@ -70,7 +70,7 @@ func (r *PGUpgradeReconciler) observeWorld( if err == nil { var statefulsets appsv1.StatefulSetList err = errors.WithStack( - r.Client.List(ctx, &statefulsets, + r.Reader.List(ctx, &statefulsets, client.InNamespace(upgrade.Namespace), client.MatchingLabelsSelector{Selector: selectCluster}, )) diff --git a/internal/controller/postgrescluster/controller.go b/internal/controller/postgrescluster/controller.go index bbe141c0b4..98093e8ce2 100644 --- a/internal/controller/postgrescluster/controller.go +++ b/internal/controller/postgrescluster/controller.go @@ -33,6 +33,7 @@ import ( "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/kubernetes" "github.com/crunchydata/postgres-operator/internal/logging" + "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/pki" "github.com/crunchydata/postgres-operator/internal/postgres" "github.com/crunchydata/postgres-operator/internal/registration" @@ -40,10 +41,7 @@ import ( "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) -const ( - // ControllerName is the name of the PostgresCluster controller - ControllerName = "postgrescluster-controller" -) +const controllerName = naming.ControllerPostgresCluster // Reconciler holds resources for the PostgresCluster reconciler type Reconciler struct { diff --git a/internal/controller/postgrescluster/controller_ref_manager.go b/internal/controller/postgrescluster/controller_ref_manager.go index d229728b12..6caa58b85d 100644 --- a/internal/controller/postgrescluster/controller_ref_manager.go +++ b/internal/controller/postgrescluster/controller_ref_manager.go @@ -41,7 +41,7 @@ func (r *Reconciler) adoptObject(ctx context.Context, postgresCluster *v1beta1.P return r.Client.Patch(ctx, obj, client.RawPatch(types.StrategicMergePatchType, patchBytes), &client.PatchOptions{ - FieldManager: ControllerName, + FieldManager: controllerName, }) } diff --git a/internal/controller/postgrescluster/pgbackrest_test.go b/internal/controller/postgrescluster/pgbackrest_test.go index 6e0cc3a5e6..9e166e80ef 100644 --- a/internal/controller/postgrescluster/pgbackrest_test.go +++ b/internal/controller/postgrescluster/pgbackrest_test.go @@ -179,8 +179,8 @@ func TestReconcilePGBackRest(t *testing.T) { ctx, cancel := setupManager(t, cfg, func(mgr manager.Manager) { r = &Reconciler{ Client: mgr.GetClient(), - Recorder: mgr.GetEventRecorderFor(ControllerName), - Owner: ControllerName, + Recorder: mgr.GetEventRecorderFor(controllerName), + Owner: controllerName, } }) t.Cleanup(func() { teardownManager(cancel, t) }) @@ -778,8 +778,8 @@ func TestReconcileStanzaCreate(t *testing.T) { ctx, cancel := setupManager(t, cfg, func(mgr manager.Manager) { r = &Reconciler{ Client: mgr.GetClient(), - Recorder: mgr.GetEventRecorderFor(ControllerName), - Owner: ControllerName, + Recorder: mgr.GetEventRecorderFor(controllerName), + Owner: controllerName, } }) t.Cleanup(func() { teardownManager(cancel, t) }) @@ -1060,8 +1060,8 @@ func TestReconcileManualBackup(t *testing.T) { _, cancel := setupManager(t, cfg, func(mgr manager.Manager) { r = &Reconciler{ Client: mgr.GetClient(), - Recorder: mgr.GetEventRecorderFor(ControllerName), - Owner: ControllerName, + Recorder: mgr.GetEventRecorderFor(controllerName), + Owner: controllerName, } }) t.Cleanup(func() { teardownManager(cancel, t) }) @@ -1804,8 +1804,8 @@ func TestReconcilePostgresClusterDataSource(t *testing.T) { ctx, cancel := setupManager(t, cfg, func(mgr manager.Manager) { r = &Reconciler{ Client: tClient, - Recorder: mgr.GetEventRecorderFor(ControllerName), - Owner: ControllerName, + Recorder: mgr.GetEventRecorderFor(controllerName), + Owner: controllerName, } }) t.Cleanup(func() { teardownManager(cancel, t) }) @@ -2183,8 +2183,8 @@ func TestReconcileCloudBasedDataSource(t *testing.T) { ctx, cancel := setupManager(t, cfg, func(mgr manager.Manager) { r = &Reconciler{ Client: tClient, - Recorder: mgr.GetEventRecorderFor(ControllerName), - Owner: ControllerName, + Recorder: mgr.GetEventRecorderFor(controllerName), + Owner: controllerName, } }) t.Cleanup(func() { teardownManager(cancel, t) }) @@ -2608,7 +2608,7 @@ func TestGenerateBackupJobIntent(t *testing.T) { r := &Reconciler{ Client: cc, - Owner: ControllerName, + Owner: controllerName, } ctx := context.Background() @@ -3904,8 +3904,8 @@ func TestReconcileScheduledBackups(t *testing.T) { _, cancel := setupManager(t, cfg, func(mgr manager.Manager) { r = &Reconciler{ Client: mgr.GetClient(), - Recorder: mgr.GetEventRecorderFor(ControllerName), - Owner: ControllerName, + Recorder: mgr.GetEventRecorderFor(controllerName), + Owner: controllerName, } }) t.Cleanup(func() { teardownManager(cancel, t) }) @@ -4240,8 +4240,8 @@ func TestBackupsEnabled(t *testing.T) { ctx, cancel := setupManager(t, cfg, func(mgr manager.Manager) { r = &Reconciler{ Client: mgr.GetClient(), - Recorder: mgr.GetEventRecorderFor(ControllerName), - Owner: ControllerName, + Recorder: mgr.GetEventRecorderFor(controllerName), + Owner: controllerName, } }) t.Cleanup(func() { teardownManager(cancel, t) }) diff --git a/internal/controller/postgrescluster/pki_test.go b/internal/controller/postgrescluster/pki_test.go index 0cb5f15a99..ed74b1220b 100644 --- a/internal/controller/postgrescluster/pki_test.go +++ b/internal/controller/postgrescluster/pki_test.go @@ -43,7 +43,7 @@ func TestReconcileCerts(t *testing.T) { r := &Reconciler{ Client: tClient, - Owner: ControllerName, + Owner: controllerName, } // set up cluster1 diff --git a/internal/controller/standalone_pgadmin/apply.go b/internal/controller/standalone_pgadmin/apply.go index 0cc3191967..23df91192f 100644 --- a/internal/controller/standalone_pgadmin/apply.go +++ b/internal/controller/standalone_pgadmin/apply.go @@ -11,23 +11,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -// patch sends patch to object's endpoint in the Kubernetes API and updates -// object with any returned content. The fieldManager is set to r.Owner, but -// can be overridden in options. -// - https://docs.k8s.io/reference/using-api/server-side-apply/#managers -// -// TODO(tjmoore4): This function is duplicated from a version that takes a PostgresCluster object. -func (r *PGAdminReconciler) patch( - ctx context.Context, object client.Object, - patch client.Patch, options ...client.PatchOption, -) error { - options = append([]client.PatchOption{r.Owner}, options...) - return r.Patch(ctx, object, patch, options...) -} - // apply sends an apply patch to object's endpoint in the Kubernetes API and -// updates object with any returned content. The fieldManager is set to -// r.Owner and the force parameter is true. +// updates object with any returned content. The fieldManager is set by +// r.Writer and the force parameter is true. // - https://docs.k8s.io/reference/using-api/server-side-apply/#managers // - https://docs.k8s.io/reference/using-api/server-side-apply/#conflicts // @@ -40,7 +26,7 @@ func (r *PGAdminReconciler) apply(ctx context.Context, object client.Object) err // Send the apply-patch with force=true. if err == nil { - err = r.patch(ctx, object, apply, client.ForceOwnership) + err = r.Writer.Patch(ctx, object, apply, client.ForceOwnership) } return err diff --git a/internal/controller/standalone_pgadmin/controller.go b/internal/controller/standalone_pgadmin/controller.go index a8b95b0053..fe205dcaf6 100644 --- a/internal/controller/standalone_pgadmin/controller.go +++ b/internal/controller/standalone_pgadmin/controller.go @@ -6,6 +6,7 @@ package standalone_pgadmin import ( "context" + "errors" "io" appsv1 "k8s.io/api/apps/v1" @@ -20,18 +21,30 @@ import ( "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/logging" + "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/tracing" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) // PGAdminReconciler reconciles a PGAdmin object type PGAdminReconciler struct { - client.Client - Owner client.FieldOwner PodExec func( ctx context.Context, namespace, pod, container string, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error + + Reader interface { + Get(context.Context, client.ObjectKey, client.Object, ...client.GetOption) error + List(context.Context, client.ObjectList, ...client.ListOption) error + } + Writer interface { + Delete(context.Context, client.Object, ...client.DeleteOption) error + Patch(context.Context, client.Object, client.Patch, ...client.PatchOption) error + } + StatusWriter interface { + Patch(context.Context, client.Object, client.Patch, ...client.SubResourcePatchOption) error + } + Recorder record.EventRecorder } @@ -42,19 +55,21 @@ type PGAdminReconciler struct { //+kubebuilder:rbac:groups="",resources="configmaps",verbs={list,watch} //+kubebuilder:rbac:groups="apps",resources="statefulsets",verbs={list,watch} -// SetupWithManager sets up the controller with the Manager. -// -// TODO(tjmoore4): This function is duplicated from a version that takes a PostgresCluster object. -func (r *PGAdminReconciler) SetupWithManager(mgr ctrl.Manager) error { - if r.PodExec == nil { - var err error - r.PodExec, err = runtime.NewPodExecutor(mgr.GetConfig()) - if err != nil { - return err - } +// ManagedReconciler creates a [PGAdminReconciler] and adds it to m. +func ManagedReconciler(m ctrl.Manager) error { + exec, err := runtime.NewPodExecutor(m.GetConfig()) + kubernetes := client.WithFieldOwner(m.GetClient(), naming.ControllerPGAdmin) + recorder := m.GetEventRecorderFor(naming.ControllerPGAdmin) + + reconciler := &PGAdminReconciler{ + PodExec: exec, + Reader: kubernetes, + Recorder: recorder, + StatusWriter: kubernetes.Status(), + Writer: kubernetes, } - return ctrl.NewControllerManagedBy(mgr). + return errors.Join(err, ctrl.NewControllerManagedBy(m). For(&v1beta1.PGAdmin{}). Owns(&corev1.ConfigMap{}). Owns(&corev1.PersistentVolumeClaim{}). @@ -64,16 +79,16 @@ func (r *PGAdminReconciler) SetupWithManager(mgr ctrl.Manager) error { Watches( v1beta1.NewPostgresCluster(), handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, cluster client.Object) []ctrl.Request { - return runtime.Requests(r.findPGAdminsForPostgresCluster(ctx, cluster)...) + return runtime.Requests(reconciler.findPGAdminsForPostgresCluster(ctx, cluster)...) }), ). Watches( &corev1.Secret{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, secret client.Object) []ctrl.Request { - return runtime.Requests(r.findPGAdminsForSecret(ctx, client.ObjectKeyFromObject(secret))...) + return runtime.Requests(reconciler.findPGAdminsForSecret(ctx, client.ObjectKeyFromObject(secret))...) }), ). - Complete(r) + Complete(reconciler)) } //+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="pgadmins",verbs={get} @@ -89,7 +104,7 @@ func (r *PGAdminReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct defer span.End() pgAdmin := &v1beta1.PGAdmin{} - if err := r.Get(ctx, req.NamespacedName, pgAdmin); err != nil { + if err := r.Reader.Get(ctx, req.NamespacedName, pgAdmin); err != nil { // NotFound cannot be fixed by requeuing so ignore it. During background // deletion, we receive delete events from pgadmin's dependents after // pgadmin is deleted. @@ -100,7 +115,7 @@ func (r *PGAdminReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct before := pgAdmin.DeepCopy() defer func() { if !equality.Semantic.DeepEqual(before.Status, pgAdmin.Status) { - statusErr := r.Status().Patch(ctx, pgAdmin, client.MergeFrom(before), r.Owner) + statusErr := r.StatusWriter.Patch(ctx, pgAdmin, client.MergeFrom(before)) if statusErr != nil { log.Error(statusErr, "Patching PGAdmin status") } @@ -166,7 +181,7 @@ func (r *PGAdminReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct func (r *PGAdminReconciler) setControllerReference( owner *v1beta1.PGAdmin, controlled client.Object, ) error { - return controllerutil.SetControllerReference(owner, controlled, r.Scheme()) + return controllerutil.SetControllerReference(owner, controlled, runtime.Scheme) } // deleteControlled safely deletes object when it is controlled by pgAdmin. @@ -178,7 +193,7 @@ func (r *PGAdminReconciler) deleteControlled( version := object.GetResourceVersion() exactly := client.Preconditions{UID: &uid, ResourceVersion: &version} - return r.Delete(ctx, object, exactly) + return r.Writer.Delete(ctx, object, exactly) } return nil diff --git a/internal/controller/standalone_pgadmin/controller_test.go b/internal/controller/standalone_pgadmin/controller_test.go index 4dd984d8ef..a3b91eea15 100644 --- a/internal/controller/standalone_pgadmin/controller_test.go +++ b/internal/controller/standalone_pgadmin/controller_test.go @@ -24,7 +24,7 @@ func TestDeleteControlled(t *testing.T) { require.ParallelCapacity(t, 1) ns := setupNamespace(t, cc) - reconciler := PGAdminReconciler{Client: cc} + reconciler := PGAdminReconciler{Writer: cc} pgadmin := new(v1beta1.PGAdmin) pgadmin.Namespace = ns.Name diff --git a/internal/controller/standalone_pgadmin/related.go b/internal/controller/standalone_pgadmin/related.go index c7fcb119bc..0ae255d311 100644 --- a/internal/controller/standalone_pgadmin/related.go +++ b/internal/controller/standalone_pgadmin/related.go @@ -30,7 +30,7 @@ func (r *PGAdminReconciler) findPGAdminsForPostgresCluster( // namespace, we can configure the [manager.Manager] field indexer and pass a // [fields.Selector] here. // - https://book.kubebuilder.io/reference/watching-resources/externally-managed.html - if r.List(ctx, &pgadmins, &client.ListOptions{ + if r.Reader.List(ctx, &pgadmins, &client.ListOptions{ Namespace: cluster.GetNamespace(), }) == nil { for i := range pgadmins.Items { @@ -64,7 +64,7 @@ func (r *PGAdminReconciler) findPGAdminsForSecret( // namespace, we can configure the [manager.Manager] field indexer and pass a // [fields.Selector] here. // - https://book.kubebuilder.io/reference/watching-resources/externally-managed.html - if err := r.List(ctx, &pgadmins, &client.ListOptions{ + if err := r.Reader.List(ctx, &pgadmins, &client.ListOptions{ Namespace: secret.Namespace, }); err == nil { for i := range pgadmins.Items { @@ -93,7 +93,7 @@ func (r *PGAdminReconciler) getClustersForPGAdmin( for _, serverGroup := range pgAdmin.Spec.ServerGroups { var cluster v1beta1.PostgresCluster if serverGroup.PostgresClusterName != "" { - err = r.Get(ctx, client.ObjectKey{ + err = r.Reader.Get(ctx, client.ObjectKey{ Name: serverGroup.PostgresClusterName, Namespace: pgAdmin.GetNamespace(), }, &cluster) @@ -104,7 +104,7 @@ func (r *PGAdminReconciler) getClustersForPGAdmin( } if selector, err = naming.AsSelector(serverGroup.PostgresClusterSelector); err == nil { var list v1beta1.PostgresClusterList - err = r.List(ctx, &list, + err = r.Reader.List(ctx, &list, client.InNamespace(pgAdmin.Namespace), client.MatchingLabelsSelector{Selector: selector}, ) diff --git a/internal/controller/standalone_pgadmin/related_test.go b/internal/controller/standalone_pgadmin/related_test.go index a14e50d9e2..742e10eef6 100644 --- a/internal/controller/standalone_pgadmin/related_test.go +++ b/internal/controller/standalone_pgadmin/related_test.go @@ -22,7 +22,7 @@ func TestFindPGAdminsForSecret(t *testing.T) { require.ParallelCapacity(t, 0) ns := setupNamespace(t, tClient) - reconciler := &PGAdminReconciler{Client: tClient} + reconciler := &PGAdminReconciler{Reader: tClient} secret1 := &corev1.Secret{} secret1.Namespace = ns.Name diff --git a/internal/controller/standalone_pgadmin/service.go b/internal/controller/standalone_pgadmin/service.go index bfdc04c6ec..8f21da4765 100644 --- a/internal/controller/standalone_pgadmin/service.go +++ b/internal/controller/standalone_pgadmin/service.go @@ -36,7 +36,7 @@ func (r *PGAdminReconciler) reconcilePGAdminService( // need to delete any existing service(s). At the start of every reconcile // get all services that match the current pgAdmin labels. services := corev1.ServiceList{} - if err := r.List(ctx, &services, + if err := r.Reader.List(ctx, &services, client.InNamespace(pgadmin.Namespace), client.MatchingLabels{ naming.LabelStandalonePGAdmin: pgadmin.Name, @@ -62,7 +62,7 @@ func (r *PGAdminReconciler) reconcilePGAdminService( if pgadmin.Spec.ServiceName != "" { // Look for an existing service with name ServiceName in the namespace existingService := &corev1.Service{} - err := r.Get(ctx, types.NamespacedName{ + err := r.Reader.Get(ctx, types.NamespacedName{ Name: pgadmin.Spec.ServiceName, Namespace: pgadmin.GetNamespace(), }, existingService) diff --git a/internal/controller/standalone_pgadmin/statefulset.go b/internal/controller/standalone_pgadmin/statefulset.go index b8730b7112..ce1cfb5fc5 100644 --- a/internal/controller/standalone_pgadmin/statefulset.go +++ b/internal/controller/standalone_pgadmin/statefulset.go @@ -34,7 +34,7 @@ func (r *PGAdminReconciler) reconcilePGAdminStatefulSet( // When we delete the StatefulSet, we will leave its Pods in place. They will be claimed by // the StatefulSet that gets created in the next reconcile. existing := &appsv1.StatefulSet{} - if err := errors.WithStack(r.Get(ctx, client.ObjectKeyFromObject(sts), existing)); err != nil { + if err := errors.WithStack(r.Reader.Get(ctx, client.ObjectKeyFromObject(sts), existing)); err != nil { if !apierrors.IsNotFound(err) { return err } @@ -47,7 +47,7 @@ func (r *PGAdminReconciler) reconcilePGAdminStatefulSet( exactly := client.Preconditions{UID: &uid, ResourceVersion: &version} propagate := client.PropagationPolicy(metav1.DeletePropagationOrphan) - return errors.WithStack(client.IgnoreNotFound(r.Delete(ctx, existing, exactly, propagate))) + return errors.WithStack(client.IgnoreNotFound(r.Writer.Delete(ctx, existing, exactly, propagate))) } } diff --git a/internal/controller/standalone_pgadmin/statefulset_test.go b/internal/controller/standalone_pgadmin/statefulset_test.go index 9d6b804476..1f90a13e90 100644 --- a/internal/controller/standalone_pgadmin/statefulset_test.go +++ b/internal/controller/standalone_pgadmin/statefulset_test.go @@ -27,8 +27,8 @@ func TestReconcilePGAdminStatefulSet(t *testing.T) { require.ParallelCapacity(t, 1) reconciler := &PGAdminReconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), + Reader: cc, + Writer: client.WithFieldOwner(cc, t.Name()), } ns := setupNamespace(t, cc) diff --git a/internal/controller/standalone_pgadmin/users.go b/internal/controller/standalone_pgadmin/users.go index 678a3a722b..959437762f 100644 --- a/internal/controller/standalone_pgadmin/users.go +++ b/internal/controller/standalone_pgadmin/users.go @@ -53,7 +53,7 @@ func (r *PGAdminReconciler) reconcilePGAdminUsers(ctx context.Context, pgadmin * pod := &corev1.Pod{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} pod.Name += "-0" - err := errors.WithStack(r.Get(ctx, client.ObjectKeyFromObject(pod), pod)) + err := errors.WithStack(r.Reader.Get(ctx, client.ObjectKeyFromObject(pod), pod)) if err != nil { return client.IgnoreNotFound(err) } @@ -136,7 +136,7 @@ func (r *PGAdminReconciler) writePGAdminUsers(ctx context.Context, pgadmin *v1be existingUserSecret := &corev1.Secret{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} err := errors.WithStack( - r.Get(ctx, client.ObjectKeyFromObject(existingUserSecret), existingUserSecret)) + r.Reader.Get(ctx, client.ObjectKeyFromObject(existingUserSecret), existingUserSecret)) if client.IgnoreNotFound(err) != nil { return err } @@ -186,7 +186,7 @@ cd $PGADMIN_DIR Name: user.PasswordRef.Name, }} err := errors.WithStack( - r.Get(ctx, client.ObjectKeyFromObject(userPasswordSecret), userPasswordSecret)) + r.Reader.Get(ctx, client.ObjectKeyFromObject(userPasswordSecret), userPasswordSecret)) if err != nil { log.Error(err, "Could not get user password secret") continue diff --git a/internal/controller/standalone_pgadmin/users_test.go b/internal/controller/standalone_pgadmin/users_test.go index b164bb6069..5ec58dc573 100644 --- a/internal/controller/standalone_pgadmin/users_test.go +++ b/internal/controller/standalone_pgadmin/users_test.go @@ -42,7 +42,7 @@ func TestReconcilePGAdminUsers(t *testing.T) { t.Run("NoPods", func(t *testing.T) { r := new(PGAdminReconciler) - r.Client = fake.NewClientBuilder().Build() + r.Reader = fake.NewClientBuilder().Build() assert.NilError(t, r.reconcilePGAdminUsers(ctx, pgadmin)) }) @@ -58,7 +58,7 @@ func TestReconcilePGAdminUsers(t *testing.T) { pod.Status.ContainerStatuses = nil r := new(PGAdminReconciler) - r.Client = fake.NewClientBuilder().WithObjects(pod).Build() + r.Reader = fake.NewClientBuilder().WithObjects(pod).Build() assert.NilError(t, r.reconcilePGAdminUsers(ctx, pgadmin)) }) @@ -78,7 +78,7 @@ func TestReconcilePGAdminUsers(t *testing.T) { new(corev1.ContainerStateRunning) r := new(PGAdminReconciler) - r.Client = fake.NewClientBuilder().WithObjects(pod).Build() + r.Reader = fake.NewClientBuilder().WithObjects(pod).Build() assert.NilError(t, r.reconcilePGAdminUsers(ctx, pgadmin)) }) @@ -97,7 +97,7 @@ func TestReconcilePGAdminUsers(t *testing.T) { pod.Status.ContainerStatuses[0].ImageID = "fakeSHA" r := new(PGAdminReconciler) - r.Client = fake.NewClientBuilder().WithObjects(pod).Build() + r.Reader = fake.NewClientBuilder().WithObjects(pod).Build() calls := 0 r.PodExec = func( @@ -136,7 +136,7 @@ func TestReconcilePGAdminUsers(t *testing.T) { pod.Status.ContainerStatuses[0].ImageID = "newFakeSHA" r := new(PGAdminReconciler) - r.Client = fake.NewClientBuilder().WithObjects(pod).Build() + r.Reader = fake.NewClientBuilder().WithObjects(pod).Build() calls := 0 r.PodExec = func( @@ -227,8 +227,8 @@ func TestWritePGAdminUsers(t *testing.T) { recorder := events.NewRecorder(t, runtime.Scheme) reconciler := &PGAdminReconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), + Reader: cc, + Writer: client.WithFieldOwner(cc, t.Name()), Recorder: recorder, } @@ -316,8 +316,7 @@ func TestWritePGAdminUsers(t *testing.T) { assert.Equal(t, calls, 1, "PodExec should be called once") secret := &corev1.Secret{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} - assert.NilError(t, - reconciler.Get(ctx, client.ObjectKeyFromObject(secret), secret)) + assert.NilError(t, cc.Get(ctx, client.ObjectKeyFromObject(secret), secret)) if assert.Check(t, secret.Data["users.json"] != nil) { var usersArr []pgAdminUserForJson assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) @@ -376,8 +375,7 @@ func TestWritePGAdminUsers(t *testing.T) { assert.Equal(t, updateUserCalls, 1, "The update-user command should be executed once") secret := &corev1.Secret{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} - assert.NilError(t, - reconciler.Get(ctx, client.ObjectKeyFromObject(secret), secret)) + assert.NilError(t, cc.Get(ctx, client.ObjectKeyFromObject(secret), secret)) if assert.Check(t, secret.Data["users.json"] != nil) { var usersArr []pgAdminUserForJson assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) @@ -448,8 +446,7 @@ func TestWritePGAdminUsers(t *testing.T) { assert.Equal(t, updateUserCalls, 1, "The update-user command should be executed once") secret := &corev1.Secret{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} - assert.NilError(t, - reconciler.Get(ctx, client.ObjectKeyFromObject(secret), secret)) + assert.NilError(t, cc.Get(ctx, client.ObjectKeyFromObject(secret), secret)) if assert.Check(t, secret.Data["users.json"] != nil) { var usersArr []pgAdminUserForJson assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) @@ -493,8 +490,7 @@ func TestWritePGAdminUsers(t *testing.T) { assert.Equal(t, calls, 0, "PodExec should be called zero times") secret := &corev1.Secret{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} - assert.NilError(t, - reconciler.Get(ctx, client.ObjectKeyFromObject(secret), secret)) + assert.NilError(t, cc.Get(ctx, client.ObjectKeyFromObject(secret), secret)) if assert.Check(t, secret.Data["users.json"] != nil) { var usersArr []pgAdminUserForJson assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) @@ -535,8 +531,7 @@ func TestWritePGAdminUsers(t *testing.T) { // User in users.json should be unchanged secret := &corev1.Secret{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} - assert.NilError(t, - reconciler.Get(ctx, client.ObjectKeyFromObject(secret), secret)) + assert.NilError(t, cc.Get(ctx, client.ObjectKeyFromObject(secret), secret)) if assert.Check(t, secret.Data["users.json"] != nil) { var usersArr []pgAdminUserForJson assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) @@ -562,8 +557,7 @@ func TestWritePGAdminUsers(t *testing.T) { assert.Equal(t, calls, 2, "PodExec should be called once more") // User in users.json should be unchanged - assert.NilError(t, - reconciler.Get(ctx, client.ObjectKeyFromObject(secret), secret)) + assert.NilError(t, cc.Get(ctx, client.ObjectKeyFromObject(secret), secret)) if assert.Check(t, secret.Data["users.json"] != nil) { var usersArr []pgAdminUserForJson assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) @@ -615,8 +609,7 @@ func TestWritePGAdminUsers(t *testing.T) { // User in users.json should be unchanged and attempt to add user should not // have succeeded secret := &corev1.Secret{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} - assert.NilError(t, - reconciler.Get(ctx, client.ObjectKeyFromObject(secret), secret)) + assert.NilError(t, cc.Get(ctx, client.ObjectKeyFromObject(secret), secret)) if assert.Check(t, secret.Data["users.json"] != nil) { var usersArr []pgAdminUserForJson assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) @@ -643,8 +636,7 @@ func TestWritePGAdminUsers(t *testing.T) { // User in users.json should be unchanged and attempt to add user should not // have succeeded - assert.NilError(t, - reconciler.Get(ctx, client.ObjectKeyFromObject(secret), secret)) + assert.NilError(t, cc.Get(ctx, client.ObjectKeyFromObject(secret), secret)) if assert.Check(t, secret.Data["users.json"] != nil) { var usersArr []pgAdminUserForJson assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) @@ -671,8 +663,7 @@ func TestWritePGAdminUsers(t *testing.T) { // User in users.json should be unchanged and attempt to add user should not // have succeeded - assert.NilError(t, - reconciler.Get(ctx, client.ObjectKeyFromObject(secret), secret)) + assert.NilError(t, cc.Get(ctx, client.ObjectKeyFromObject(secret), secret)) if assert.Check(t, secret.Data["users.json"] != nil) { var usersArr []pgAdminUserForJson assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) @@ -700,8 +691,7 @@ func TestWritePGAdminUsers(t *testing.T) { // User in users.json should be unchanged and attempt to add user should not // have succeeded - assert.NilError(t, - reconciler.Get(ctx, client.ObjectKeyFromObject(secret), secret)) + assert.NilError(t, cc.Get(ctx, client.ObjectKeyFromObject(secret), secret)) if assert.Check(t, secret.Data["users.json"] != nil) { var usersArr []pgAdminUserForJson assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) diff --git a/internal/controller/standalone_pgadmin/volume_test.go b/internal/controller/standalone_pgadmin/volume_test.go index 6500ac6c42..59d1a6e29e 100644 --- a/internal/controller/standalone_pgadmin/volume_test.go +++ b/internal/controller/standalone_pgadmin/volume_test.go @@ -30,8 +30,7 @@ func TestReconcilePGAdminDataVolume(t *testing.T) { require.ParallelCapacity(t, 1) reconciler := &PGAdminReconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), + Writer: client.WithFieldOwner(cc, t.Name()), } ns := setupNamespace(t, cc) diff --git a/internal/naming/controllers.go b/internal/naming/controllers.go index b434b8dbc5..dd797282d1 100644 --- a/internal/naming/controllers.go +++ b/internal/naming/controllers.go @@ -5,6 +5,9 @@ package naming const ( - ControllerBridge = "bridge-controller" - ControllerPGAdmin = "pgadmin-controller" + ControllerBridge = "bridge-controller" + ControllerCrunchyBridgeCluster = "crunchybridgecluster-controller" + ControllerPGAdmin = "pgadmin-controller" + ControllerPGUpgrade = "pgupgrade-controller" + ControllerPostgresCluster = "postgrescluster-controller" ) diff --git a/internal/upgradecheck/header.go b/internal/upgradecheck/header.go index f2449f909b..719105d9d3 100644 --- a/internal/upgradecheck/header.go +++ b/internal/upgradecheck/header.go @@ -16,7 +16,6 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" crclient "sigs.k8s.io/controller-runtime/pkg/client" - "github.com/crunchydata/postgres-operator/internal/controller/postgrescluster" "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/kubernetes" "github.com/crunchydata/postgres-operator/internal/logging" @@ -128,7 +127,7 @@ func manageUpgradeCheckConfigMap(ctx context.Context, crClient crclient.Client, } } - err = applyConfigMap(ctx, crClient, cm, postgrescluster.ControllerName) + err = applyConfigMap(ctx, crClient, cm, naming.ControllerPostgresCluster) if err != nil { log.V(1).Info("upgrade check issue: could not apply configmap", "response", err.Error())

AltStyle によって変換されたページ (->オリジナル) /