diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index a5055acac..ac57d4434 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -212,6 +212,14 @@ rules: - list - update - watch +- apiGroups: + - ramendr.openshift.io + resources: + - drclusterconfigs + verbs: + - get + - list + - watch - apiGroups: - security.openshift.io resources: diff --git a/controllers/storageclaim_controller.go b/controllers/storageclaim_controller.go index afd67fe3f..2cd6262b0 100644 --- a/controllers/storageclaim_controller.go +++ b/controllers/storageclaim_controller.go @@ -22,7 +22,10 @@ import ( "encoding/hex" "encoding/json" "fmt" + "k8s.io/apimachinery/pkg/types" "reflect" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" "slices" "strings" "time" @@ -31,8 +34,10 @@ import ( "github.com/red-hat-storage/ocs-client-operator/pkg/csi" "github.com/red-hat-storage/ocs-client-operator/pkg/utils" + replicationv1alpha1 "github.com/csi-addons/kubernetes-csi-addons/apis/replication.storage/v1alpha1" "github.com/go-logr/logr" snapapi "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" + ramenv1alpha1 "github.com/ramendr/ramen/api/v1alpha1" providerclient "github.com/red-hat-storage/ocs-operator/v4/services/provider/client" corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" @@ -106,8 +111,43 @@ func (r *StorageClaimReconciler) SetupWithManager(mgr ctrl.Manager) error { return fmt.Errorf("unable to set up FieldIndexer for VSC csi driver name: %v", err) } + enqueueVolumeReplicationClass := handler.EnqueueRequestsFromMapFunc( + func(context context.Context, obj client.Object) []reconcile.Request { + vrcs := &replicationv1alpha1.VolumeReplicationClassList{} + err := r.Client.List(context, vrcs, &client.ListOptions{Namespace: obj.GetNamespace()}) + if err != nil { + r.log.Error(err, "Unable to list VolumeReplicationClass objects") + return []reconcile.Request{} + } + + // Return name and namespace of the VolumeReplicationClass object + request := []reconcile.Request{} + for _, vrc := range vrcs.Items { + request = append(request, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: vrc.Namespace, + Name: vrc.Name, + }, + }) + } + return request + }, + ) + + drClusterConfigPredicate := predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + if e.ObjectOld == nil || e.ObjectNew == nil { + return false + } + oldObj := e.ObjectOld.(*ramenv1alpha1.DRClusterConfig) + newObj := e.ObjectNew.(*ramenv1alpha1.DRClusterConfig) + return !reflect.DeepEqual(oldObj.Spec, newObj.Spec) + }, + } return ctrl.NewControllerManagedBy(mgr). For(&v1alpha1.StorageClaim{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})). + Watches(&ramenv1alpha1.DRClusterConfig{}, enqueueVolumeReplicationClass, + builder.WithPredicates(drClusterConfigPredicate)). Owns(&storagev1.StorageClass{}). Owns(&snapapi.VolumeSnapshotClass{}). Complete(r) @@ -121,6 +161,7 @@ func (r *StorageClaimReconciler) SetupWithManager(mgr ctrl.Manager) error { //+kubebuilder:rbac:groups=snapshot.storage.k8s.io,resources=volumesnapshotclasses,verbs=get;list;watch;create;delete //+kubebuilder:rbac:groups=core,resources=persistentvolumes,verbs=get;list;watch //+kubebuilder:rbac:groups=snapshot.storage.k8s.io,resources=volumesnapshotcontents,verbs=get;list;watch +//+kubebuilder:rbac:groups=ramendr.openshift.io,resources=drclusterconfigs,verbs=get;list;watch // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. diff --git a/go.mod b/go.mod index ee1e8512e..6c1a3291e 100644 --- a/go.mod +++ b/go.mod @@ -4,12 +4,14 @@ go 1.21 replace ( github.com/portworx/sched-ops => github.com/portworx/sched-ops v0.20.4-openstorage-rc3 // required by Rook v1.12 + k8s.io/client-go v12.0.0+incompatible => k8s.io/client-go v0.29.0 vbom.ml/util => github.com/fvbommel/util v0.0.0-20180919145318-efcd4e0f9787 ) exclude github.com/kubernetes-incubator/external-storage v0.20.4-openstorage-rc2 require ( + github.com/csi-addons/kubernetes-csi-addons v0.8.0 github.com/go-logr/logr v1.4.1 github.com/kubernetes-csi/external-snapshotter/client/v6 v6.3.0 github.com/onsi/ginkgo v1.16.5 @@ -18,6 +20,7 @@ require ( github.com/operator-framework/api v0.22.0 github.com/pkg/errors v0.9.1 github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.72.0 + github.com/ramendr/ramen/api v0.0.0-20240719134233-210f00a7a0c0 github.com/red-hat-storage/ocs-operator/v4 v4.0.0-20240422111920-faced96485bc github.com/stretchr/testify v1.9.0 google.golang.org/grpc v1.62.1 @@ -25,7 +28,7 @@ require ( k8s.io/api v0.29.3 k8s.io/apiextensions-apiserver v0.29.2 k8s.io/apimachinery v0.29.3 - k8s.io/client-go v0.29.3 + k8s.io/client-go v12.0.0+incompatible k8s.io/klog/v2 v2.120.1 k8s.io/utils v0.0.0-20240310230437-4693a0247e57 sigs.k8s.io/controller-runtime v0.17.2 diff --git a/go.sum b/go.sum index 916dd55b9..eef182f72 100644 --- a/go.sum +++ b/go.sum @@ -4,6 +4,8 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/csi-addons/kubernetes-csi-addons v0.8.0 h1:zvYGp4DM6KdQzEX3dQSYKykqJdLZlxpVBJjtpbaqFjs= +github.com/csi-addons/kubernetes-csi-addons v0.8.0/go.mod h1:dvinzoiXlqdOGDpKkYx8Jxl507BzVEEEO+SI0OmBaRI= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -120,6 +122,8 @@ github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lne github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/ramendr/ramen/api v0.0.0-20240719134233-210f00a7a0c0 h1:we+4M+jL1ojLo1FPzR1ASIMnrzWqnuBRq6eyCrD4tPA= +github.com/ramendr/ramen/api v0.0.0-20240719134233-210f00a7a0c0/go.mod h1:/g3Jrr9U/itqpRA1NN/bLxSRrKP1ja5U5tfdOtRspGU= github.com/red-hat-storage/ocs-operator/v4 v4.0.0-20240422111920-faced96485bc h1:bV/ttKjR3nn9jIrOSt5UOttDE6iQ6l+bzLEFPWw335M= github.com/red-hat-storage/ocs-operator/v4 v4.0.0-20240422111920-faced96485bc/go.mod h1:e4AElguwRgtyGEW7JtfJvphjYbcYG4hlpvwDYrQFGi8= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= @@ -250,8 +254,8 @@ k8s.io/apiextensions-apiserver v0.29.2 h1:UK3xB5lOWSnhaCk0RFZ0LUacPZz9RY4wi/yt2I k8s.io/apiextensions-apiserver v0.29.2/go.mod h1:aLfYjpA5p3OwtqNXQFkhJ56TB+spV8Gc4wfMhUA3/b8= k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU= k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU= -k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg= -k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0= +k8s.io/client-go v0.29.0 h1:KmlDtFcrdUzOYrBhXHgKw5ycWzc3ryPX5mQe0SkG3y8= +k8s.io/client-go v0.29.0/go.mod h1:yLkXH4HKMAywcrD82KMSmfYg2DlE8mepPR4JGSo5n38= k8s.io/component-base v0.29.2 h1:lpiLyuvPA9yV1aQwGLENYyK7n/8t6l3nn3zAtFTJYe8= k8s.io/component-base v0.29.2/go.mod h1:BfB3SLrefbZXiBfbM+2H1dlat21Uewg/5qtKOl8degM= k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= diff --git a/vendor/github.com/csi-addons/kubernetes-csi-addons/LICENSE b/vendor/github.com/csi-addons/kubernetes-csi-addons/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/csi-addons/kubernetes-csi-addons/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/csi-addons/kubernetes-csi-addons/apis/replication.storage/v1alpha1/groupversion_info.go b/vendor/github.com/csi-addons/kubernetes-csi-addons/apis/replication.storage/v1alpha1/groupversion_info.go new file mode 100644 index 000000000..37182b68d --- /dev/null +++ b/vendor/github.com/csi-addons/kubernetes-csi-addons/apis/replication.storage/v1alpha1/groupversion_info.go @@ -0,0 +1,36 @@ +/* +Copyright 2022 The Kubernetes-CSI-Addons Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha1 contains API Schema definitions for the replication.storage v1alpha1 API group +// +kubebuilder:object:generate=true +// +groupName=replication.storage.openshift.io +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "replication.storage.openshift.io", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/vendor/github.com/csi-addons/kubernetes-csi-addons/apis/replication.storage/v1alpha1/volumereplication_types.go b/vendor/github.com/csi-addons/kubernetes-csi-addons/apis/replication.storage/v1alpha1/volumereplication_types.go new file mode 100644 index 000000000..d9fca40ad --- /dev/null +++ b/vendor/github.com/csi-addons/kubernetes-csi-addons/apis/replication.storage/v1alpha1/volumereplication_types.go @@ -0,0 +1,131 @@ +/* +Copyright 2022 The Kubernetes-CSI-Addons Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + VolumeReplicationNameAnnotation = "replication.storage.openshift.io/volume-replication-name" +) + +// ReplicationState represents the replication operations to be performed on the volume. +// +kubebuilder:validation:Enum=primary;secondary;resync +type ReplicationState string + +const ( + // Primary ReplicationState enables mirroring and promotes the volume to primary. + Primary ReplicationState = "primary" + + // Secondary ReplicationState demotes the volume to secondary and resyncs the volume if out of sync. + Secondary ReplicationState = "secondary" + + // Resync option resyncs the volume. + Resync ReplicationState = "resync" +) + +// State captures the latest state of the replication operation. +type State string + +const ( + // PrimaryState represents the Primary replication state. + PrimaryState State = "Primary" + + // SecondaryState represents the Secondary replication state. + SecondaryState State = "Secondary" + + // UnknownState represents the Unknown replication state. + UnknownState State = "Unknown" +) + +// VolumeReplicationSpec defines the desired state of VolumeReplication. +type VolumeReplicationSpec struct { + // VolumeReplicationClass is the VolumeReplicationClass name for this VolumeReplication resource + // +kubebuilder:validation:Required + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="volumeReplicationClass is immutable" + VolumeReplicationClass string `json:"volumeReplicationClass"` + + // ReplicationState represents the replication operation to be performed on the volume. + // Supported operations are "primary", "secondary" and "resync" + // +kubebuilder:validation:Required + ReplicationState ReplicationState `json:"replicationState"` + + // DataSource represents the object associated with the volume + // +kubebuilder:validation:Required + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="dataSource is immutable" + DataSource corev1.TypedLocalObjectReference `json:"dataSource"` + + // AutoResync represents the volume to be auto resynced when + // ReplicationState is "secondary" + // +kubebuilder:default:=false + AutoResync bool `json:"autoResync"` + + // replicationHandle represents an existing (but new) replication id + // +kubebuilder:validation:Optional + ReplicationHandle string `json:"replicationHandle"` +} + +// VolumeReplicationStatus defines the observed state of VolumeReplication. +type VolumeReplicationStatus struct { + State State `json:"state,omitempty"` + Message string `json:"message,omitempty"` + // Conditions are the list of conditions and their status. + Conditions []metav1.Condition `json:"conditions,omitempty"` + // observedGeneration is the last generation change the operator has dealt with + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + LastStartTime *metav1.Time `json:"lastStartTime,omitempty"` + LastCompletionTime *metav1.Time `json:"lastCompletionTime,omitempty"` + LastSyncTime *metav1.Time `json:"lastSyncTime,omitempty"` + LastSyncBytes *int64 `json:"lastSyncBytes,omitempty"` + LastSyncDuration *metav1.Duration `json:"lastSyncDuration,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",name=Age,type=date +// +kubebuilder:printcolumn:JSONPath=".spec.volumeReplicationClass",name=volumeReplicationClass,type=string +// +kubebuilder:printcolumn:JSONPath=".spec.dataSource.name",name=pvcName,type=string +// +kubebuilder:printcolumn:JSONPath=".spec.replicationState",name=desiredState,type=string +// +kubebuilder:printcolumn:JSONPath=".status.state",name=currentState,type=string +// +kubebuilder:resource:shortName=vr + +// VolumeReplication is the Schema for the volumereplications API. +type VolumeReplication struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +kubebuilder:validation:Required + Spec VolumeReplicationSpec `json:"spec"` + + Status VolumeReplicationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// VolumeReplicationList contains a list of VolumeReplication. +type VolumeReplicationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []VolumeReplication `json:"items"` +} + +func init() { + SchemeBuilder.Register(&VolumeReplication{}, &VolumeReplicationList{}) +} diff --git a/vendor/github.com/csi-addons/kubernetes-csi-addons/apis/replication.storage/v1alpha1/volumereplicationclass_types.go b/vendor/github.com/csi-addons/kubernetes-csi-addons/apis/replication.storage/v1alpha1/volumereplicationclass_types.go new file mode 100644 index 000000000..25632a049 --- /dev/null +++ b/vendor/github.com/csi-addons/kubernetes-csi-addons/apis/replication.storage/v1alpha1/volumereplicationclass_types.go @@ -0,0 +1,69 @@ +/* +Copyright 2022 The Kubernetes-CSI-Addons Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// VolumeReplicationClassSpec specifies parameters that an underlying storage system uses +// when creating a volume replica. A specific VolumeReplicationClass is used by specifying +// its name in a VolumeReplication object. +// +kubebuilder:validation:XValidation:rule="has(self.parameters) == has(oldSelf.parameters)",message="parameters are immutable" +type VolumeReplicationClassSpec struct { + // Provisioner is the name of storage provisioner + // +kubebuilder:validation:Required + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="provisioner is immutable" + Provisioner string `json:"provisioner"` + // Parameters is a key-value map with storage provisioner specific configurations for + // creating volume replicas + // +kubebuilder:validation:Optional + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="parameters are immutable" + Parameters map[string]string `json:"parameters,omitempty"` +} + +// VolumeReplicationClassStatus defines the observed state of VolumeReplicationClass. +type VolumeReplicationClassStatus struct{} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,shortName=vrc +// +kubebuilder:printcolumn:JSONPath=".spec.provisioner",name=provisioner,type=string + +// VolumeReplicationClass is the Schema for the volumereplicationclasses API. +type VolumeReplicationClass struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +kubebuilder:validation:Required + Spec VolumeReplicationClassSpec `json:"spec"` + + Status VolumeReplicationClassStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// VolumeReplicationClassList contains a list of VolumeReplicationClass. +type VolumeReplicationClassList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []VolumeReplicationClass `json:"items"` +} + +func init() { + SchemeBuilder.Register(&VolumeReplicationClass{}, &VolumeReplicationClassList{}) +} diff --git a/vendor/github.com/csi-addons/kubernetes-csi-addons/apis/replication.storage/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/csi-addons/kubernetes-csi-addons/apis/replication.storage/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..cd8fe3777 --- /dev/null +++ b/vendor/github.com/csi-addons/kubernetes-csi-addons/apis/replication.storage/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,242 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2022 The Kubernetes-CSI-Addons Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeReplication) DeepCopyInto(out *VolumeReplication) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeReplication. +func (in *VolumeReplication) DeepCopy() *VolumeReplication { + if in == nil { + return nil + } + out := new(VolumeReplication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeReplication) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeReplicationClass) DeepCopyInto(out *VolumeReplicationClass) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeReplicationClass. +func (in *VolumeReplicationClass) DeepCopy() *VolumeReplicationClass { + if in == nil { + return nil + } + out := new(VolumeReplicationClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeReplicationClass) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeReplicationClassList) DeepCopyInto(out *VolumeReplicationClassList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VolumeReplicationClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeReplicationClassList. +func (in *VolumeReplicationClassList) DeepCopy() *VolumeReplicationClassList { + if in == nil { + return nil + } + out := new(VolumeReplicationClassList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeReplicationClassList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeReplicationClassSpec) DeepCopyInto(out *VolumeReplicationClassSpec) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeReplicationClassSpec. +func (in *VolumeReplicationClassSpec) DeepCopy() *VolumeReplicationClassSpec { + if in == nil { + return nil + } + out := new(VolumeReplicationClassSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeReplicationClassStatus) DeepCopyInto(out *VolumeReplicationClassStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeReplicationClassStatus. +func (in *VolumeReplicationClassStatus) DeepCopy() *VolumeReplicationClassStatus { + if in == nil { + return nil + } + out := new(VolumeReplicationClassStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeReplicationList) DeepCopyInto(out *VolumeReplicationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VolumeReplication, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeReplicationList. +func (in *VolumeReplicationList) DeepCopy() *VolumeReplicationList { + if in == nil { + return nil + } + out := new(VolumeReplicationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeReplicationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeReplicationSpec) DeepCopyInto(out *VolumeReplicationSpec) { + *out = *in + in.DataSource.DeepCopyInto(&out.DataSource) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeReplicationSpec. +func (in *VolumeReplicationSpec) DeepCopy() *VolumeReplicationSpec { + if in == nil { + return nil + } + out := new(VolumeReplicationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeReplicationStatus) DeepCopyInto(out *VolumeReplicationStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LastStartTime != nil { + in, out := &in.LastStartTime, &out.LastStartTime + *out = (*in).DeepCopy() + } + if in.LastCompletionTime != nil { + in, out := &in.LastCompletionTime, &out.LastCompletionTime + *out = (*in).DeepCopy() + } + if in.LastSyncTime != nil { + in, out := &in.LastSyncTime, &out.LastSyncTime + *out = (*in).DeepCopy() + } + if in.LastSyncBytes != nil { + in, out := &in.LastSyncBytes, &out.LastSyncBytes + *out = new(int64) + **out = **in + } + if in.LastSyncDuration != nil { + in, out := &in.LastSyncDuration, &out.LastSyncDuration + *out = new(v1.Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeReplicationStatus. +func (in *VolumeReplicationStatus) DeepCopy() *VolumeReplicationStatus { + if in == nil { + return nil + } + out := new(VolumeReplicationStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/ramendr/ramen/api/LICENSE b/vendor/github.com/ramendr/ramen/api/LICENSE new file mode 100644 index 000000000..137069b82 --- /dev/null +++ b/vendor/github.com/ramendr/ramen/api/LICENSE @@ -0,0 +1,73 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. + +"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: + + (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. + + You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + +To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/ramendr/ramen/api/v1alpha1/drcluster_types.go b/vendor/github.com/ramendr/ramen/api/v1alpha1/drcluster_types.go new file mode 100644 index 000000000..8f40e187b --- /dev/null +++ b/vendor/github.com/ramendr/ramen/api/v1alpha1/drcluster_types.go @@ -0,0 +1,144 @@ +// SPDX-FileCopyrightText: The RamenDR authors +// SPDX-License-Identifier: Apache-2.0 + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ClusterFenceState which will be either Unfenced, Fenced, ManuallyFenced or ManuallyUnfenced +// +kubebuilder:validation:Enum=Unfenced;Fenced;ManuallyFenced;ManuallyUnfenced +type ClusterFenceState string + +const ( + ClusterFenceStateUnfenced = ClusterFenceState("Unfenced") + ClusterFenceStateFenced = ClusterFenceState("Fenced") + ClusterFenceStateManuallyFenced = ClusterFenceState("ManuallyFenced") + ClusterFenceStateManuallyUnfenced = ClusterFenceState("ManuallyUnfenced") +) + +type Region string + +// DRClusterSpec defines the desired state of DRCluster +type DRClusterSpec struct { + // CIDRs is a list of CIDR strings. An admin can use this field to indicate + // the CIDRs that are used or could potentially be used for the nodes in + // this managed cluster. These will be used for the cluster fencing + // operation for sync/Metro DR. + CIDRs []string `json:"cidrs,omitempty"` + + // ClusterFence is a string that determines the desired fencing state of the cluster. + ClusterFence ClusterFenceState `json:"clusterFence,omitempty"` + + // Region of a managed cluster determines it DR group. + // All managed clusters in a region are considered to be in a sync group. + // +kubebuilder:validation:Required + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="region is immutable" + Region Region `json:"region"` + + // S3 profile name (in Ramen config) to use as a source to restore PV + // related cluster state during recovery or relocate actions of applications + // to this managed cluster; hence, this S3 profile should be available to + // successfully move the workload to this managed cluster. For applications + // that are active on this managed cluster, their PV related cluster state + // is stored to S3 profiles of all other drclusters in the same + // DRPolicy to enable recovery or relocate actions to those managed clusters. + // +kubebuilder:validation:Required + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="s3ProfileName is immutable" + S3ProfileName string `json:"s3ProfileName"` +} + +const ( + // DRCluster has been validated + DRClusterValidated string = `Validated` + + // everything is clean. No fencing CRs present + // in this cluster + DRClusterConditionTypeClean = "Clean" + + // Fencing CR to fence off this cluster + // has been created + DRClusterConditionTypeFenced = "Fenced" +) + +type DRClusterPhase string + +// These are the valid values for DRState +const ( + // Available, state recorded in the DRCluster status to indicate that this + // resource is available. Usually done when there is no fencing state + // provided in the spec and DRCluster just reconciles to validate itself. + Available = DRClusterPhase("Available") + + // Starting, state recorded in the DRCluster status to indicate that this + // is the start of the reconciler. + Starting = DRClusterPhase("Starting") + + // Fencing, state recorded in the DRCluster status to indicate that + // fencing is in progress. Fencing means selecting the + // peer cluster and creating a NetworkFence MW for it and waiting for MW + // to be applied in the managed cluster + Fencing = DRClusterPhase("Fencing") + + // Fenced, this is the state that will be recorded in the DRCluster status + // when fencing has been performed successfully + Fenced = DRClusterPhase("Fenced") + + // Unfencing, state recorded in the DRCluster status to indicate that + // unfencing is in progress. Unfencing means selecting the + // peer cluster and creating/updating a NetworkFence MW for it and waiting for MW + // to be applied in the managed cluster + Unfencing = DRClusterPhase("Unfencing") + + // Unfenced, this is the state that will be recorded in the DRCluster status + // when unfencing has been performed successfully + Unfenced = DRClusterPhase("Unfenced") +) + +type ClusterMaintenanceMode struct { + // StorageProvisioner indicates the type of the provisioner + StorageProvisioner string `json:"storageProvisioner"` + + // TargetID indicates the storage or replication instance identifier for the StorageProvisioner + TargetID string `json:"targetID"` + + // State from MaintenanceMode resource created for the StorageProvisioner + State MModeState `json:"state"` + + // Conditions from MaintenanceMode resource created for the StorageProvisioner + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// DRClusterStatus defines the observed state of DRCluster +type DRClusterStatus struct { + Phase DRClusterPhase `json:"phase,omitempty"` + Conditions []metav1.Condition `json:"conditions,omitempty"` + MaintenanceModes []ClusterMaintenanceMode `json:"maintenanceModes,omitempty"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +//+kubebuilder:resource:scope=Cluster + +// DRCluster is the Schema for the drclusters API +type DRCluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec DRClusterSpec `json:"spec,omitempty"` + Status DRClusterStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// DRClusterList contains a list of DRCluster +type DRClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DRCluster `json:"items"` +} + +func init() { + SchemeBuilder.Register(&DRCluster{}, &DRClusterList{}) +} diff --git a/vendor/github.com/ramendr/ramen/api/v1alpha1/drclusterconfig_types.go b/vendor/github.com/ramendr/ramen/api/v1alpha1/drclusterconfig_types.go new file mode 100644 index 000000000..eebc817b9 --- /dev/null +++ b/vendor/github.com/ramendr/ramen/api/v1alpha1/drclusterconfig_types.go @@ -0,0 +1,64 @@ +// SPDX-FileCopyrightText: The RamenDR authors +// SPDX-License-Identifier: Apache-2.0 + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// DRClusterConfigSpec defines the desired state of DRClusterConfig +// It carries information regarding the cluster identity as known at the OCM hub cluster. It is also used to +// advertise required replication schedules on the cluster, if an equivalent DRPolicy resource is created for +// the same at the hub cluster. +// It is expected to be watched and used by storage providers that require meta information regarding the cluster +// and to prepare and manage required storage resources. +type DRClusterConfigSpec struct { + // ReplicationSchedules desired from storage providers for replicating Persistent Volume data to a peer cluster. + // Values are in the form . Where is a number, 'm' indicates minutes, 'h' means hours and + // 'd' stands for days. + // Typically used to generate VolumeReplicationClass resources with the desired schedules by storage + // provider reconcilers + ReplicationSchedules []string `json:"replicationSchedules,omitempty"` + + // ClusterID would carry the ManagedCluster identity from the ManagedCluster claim value for `id.k8s.io` + ClusterID string `json:"clusterID,omitempty"` + + // TODO: PeerClusters []ClusterID; to decide if we really need this! +} + +// DRClusterConfigStatus defines the observed state of DRClusterConfig +type DRClusterConfigStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // TODO: handle no status for this resource, and remove required RBAC/kubebuilder artifacts for the same +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +//+kubebuilder:resource:scope=Cluster + +// DRClusterConfig is the Schema for the drclusterconfigs API +// +//nolint:maligned +type DRClusterConfig struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec DRClusterConfigSpec `json:"spec,omitempty"` + Status DRClusterConfigStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// DRClusterConfigList contains a list of DRClusterConfig +type DRClusterConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DRClusterConfig `json:"items"` +} + +func init() { + SchemeBuilder.Register(&DRClusterConfig{}, &DRClusterConfigList{}) +} diff --git a/vendor/github.com/ramendr/ramen/api/v1alpha1/drplacementcontrol_types.go b/vendor/github.com/ramendr/ramen/api/v1alpha1/drplacementcontrol_types.go new file mode 100644 index 000000000..b4cb8d04a --- /dev/null +++ b/vendor/github.com/ramendr/ramen/api/v1alpha1/drplacementcontrol_types.go @@ -0,0 +1,272 @@ +// SPDX-FileCopyrightText: The RamenDR authors +// SPDX-License-Identifier: Apache-2.0 + +package v1alpha1 + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// DRAction which will be either a Failover or Relocate action +// +kubebuilder:validation:Enum=Failover;Relocate +type DRAction string + +// These are the valid values for DRAction +const ( + // Failover, restore PVs to the TargetCluster + ActionFailover = DRAction("Failover") + + // Relocate, restore PVs to the designated TargetCluster. PreferredCluster will change + // to be the TargetCluster. + ActionRelocate = DRAction("Relocate") +) + +// DRState for keeping track of the DR placement +type DRState string + +// These are the valid values for DRState +const ( + // WaitForUser, state recorded in DRPC status to indicate that we are + // waiting for the user to take an action after hub recover. + WaitForUser = DRState("WaitForUser") + + // Initiating, state recorded in the DRPC status to indicate that this + // action (Deploy/Failover/Relocate) is preparing for execution. There + // is NO follow up state called 'Initiated' + Initiating = DRState("Initiating") + + // Deploying, state recorded in the DRPC status to indicate that the + // initial deployment is in progress. Deploying means selecting the + // preffered cluster and creating a VRG MW for it and waiting for MW + // to be applied in the managed cluster + Deploying = DRState("Deploying") + + // Deployed, this is the state that will be recorded in the DRPC status + // when initial deplyment has been performed successfully + Deployed = DRState("Deployed") + + // FailingOver, state recorded in the DRPC status when the failover + // is initiated but has not been completed yet + FailingOver = DRState("FailingOver") + + // FailedOver, state recorded in the DRPC status when the failover + // process has completed + FailedOver = DRState("FailedOver") + + // Relocating, state recorded in the DRPC status to indicate that the + // relocation is in progress + Relocating = DRState("Relocating") + + // Relocated, state recorded in + Relocated = DRState("Relocated") + + Deleting = DRState("Deleting") +) + +const ( + // Available condition provides the latest available observation regarding the readiness of the cluster, + // in status.preferredDecision, for workload deployment. + ConditionAvailable = "Available" + + // PeerReady condition provides the latest available observation regarding the readiness of a peer cluster + // to failover or relocate the workload. + ConditionPeerReady = "PeerReady" + + // Protected condition provides the latest available observation regarding the protection status of the workload, + // on the cluster it is expected to be available on. + ConditionProtected = "Protected" +) + +const ( + ReasonProgressing = "Progressing" + ReasonCleaning = "Cleaning" + ReasonSuccess = "Success" + ReasonNotStarted = "NotStarted" + ReasonPaused = "Paused" +) + +const ( + ReasonProtectedUnknown = "Unknown" + ReasonProtectedProgressing = "Progressing" + ReasonProtectedError = "Error" + ReasonProtected = "Protected" +) + +type ProgressionStatus string + +const ( + ProgressionCompleted = ProgressionStatus("Completed") + ProgressionCreatingMW = ProgressionStatus("CreatingMW") + ProgressionUpdatingPlRule = ProgressionStatus("UpdatingPlRule") + ProgressionWaitForReadiness = ProgressionStatus("WaitForReadiness") + ProgressionCleaningUp = ProgressionStatus("Cleaning Up") + ProgressionWaitOnUserToCleanUp = ProgressionStatus("WaitOnUserToCleanUp") + ProgressionCheckingFailoverPrequisites = ProgressionStatus("CheckingFailoverPrequisites") + ProgressionFailingOverToCluster = ProgressionStatus("FailingOverToCluster") + ProgressionWaitForFencing = ProgressionStatus("WaitForFencing") + ProgressionWaitForStorageMaintenanceActivation = ProgressionStatus("WaitForStorageMaintenanceActivation") + ProgressionPreparingFinalSync = ProgressionStatus("PreparingFinalSync") + ProgressionClearingPlacement = ProgressionStatus("ClearingPlacement") + ProgressionRunningFinalSync = ProgressionStatus("RunningFinalSync") + ProgressionFinalSyncComplete = ProgressionStatus("FinalSyncComplete") + ProgressionEnsuringVolumesAreSecondary = ProgressionStatus("EnsuringVolumesAreSecondary") + ProgressionWaitingForResourceRestore = ProgressionStatus("WaitingForResourceRestore") + ProgressionUpdatedPlacement = ProgressionStatus("UpdatedPlacement") + ProgressionEnsuringVolSyncSetup = ProgressionStatus("EnsuringVolSyncSetup") + ProgressionSettingupVolsyncDest = ProgressionStatus("SettingUpVolSyncDest") + ProgressionDeleting = ProgressionStatus("Deleting") + ProgressionDeleted = ProgressionStatus("Deleted") + ProgressionActionPaused = ProgressionStatus("Paused") +) + +// DRPlacementControlSpec defines the desired state of DRPlacementControl +type DRPlacementControlSpec struct { + // PlacementRef is the reference to the PlacementRule used by DRPC + // +kubebuilder:validation:Required + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="placementRef is immutable" + PlacementRef v1.ObjectReference `json:"placementRef"` + + // ProtectedNamespaces is a list of namespaces that are protected by the DRPC. + // Omitting this field means resources are only protected in the namespace controlled by the PlacementRef. + // If this field is set, the PlacementRef and the DRPC must be in the RamenOpsNamespace as set in the Ramen Config. + // If this field is set, the protected namespace resources are treated as unmanaged. + // You can use a recipe to filter and coordinate the order of the resources that are protected. + // +kubebuilder:validation:Optional + ProtectedNamespaces *[]string `json:"protectedNamespaces,omitempty"` + + // DRPolicyRef is the reference to the DRPolicy participating in the DR replication for this DRPC + // +kubebuilder:validation:Required + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="drPolicyRef is immutable" + DRPolicyRef v1.ObjectReference `json:"drPolicyRef"` + + // PreferredCluster is the cluster name that the user preferred to run the application on + PreferredCluster string `json:"preferredCluster,omitempty"` + + // FailoverCluster is the cluster name that the user wants to failover the application to. + // If not sepcified, then the DRPC will select the surviving cluster from the DRPolicy + FailoverCluster string `json:"failoverCluster,omitempty"` + + // Label selector to identify all the PVCs that need DR protection. + // This selector is assumed to be the same for all subscriptions that + // need DR protection. It will be passed in to the VRG when it is created + // +kubebuilder:validation:Required + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="pvcSelector is immutable" + PVCSelector metav1.LabelSelector `json:"pvcSelector"` + + // Action is either Failover or Relocate operation + Action DRAction `json:"action,omitempty"` + + // +optional + KubeObjectProtection *KubeObjectProtectionSpec `json:"kubeObjectProtection,omitempty"` +} + +// PlacementDecision defines the decision made by controller +type PlacementDecision struct { + ClusterName string `json:"clusterName,omitempty"` + ClusterNamespace string `json:"clusterNamespace,omitempty"` +} + +// VRGResourceMeta represents the VRG resource. +type VRGResourceMeta struct { + // Kind is the kind of the Kubernetes resource. + Kind string `json:"kind"` + + // Name is the name of the Kubernetes resource. + Name string `json:"name"` + + // Namespace is the namespace of the Kubernetes resource. + Namespace string `json:"namespace"` + + // A sequence number representing a specific generation of the desired state. + Generation int64 `json:"generation"` + + // List of PVCs that are protected by the VRG resource + //+optional + ProtectedPVCs []string `json:"protectedpvcs,omitempty"` + + // ResourceVersion is a value used to identify the version of the + // VRG resource object + //+optional + ResourceVersion string `json:"resourceVersion,omitempty"` +} + +// VRGConditions represents the conditions of the resources deployed on a +// managed cluster. +type VRGConditions struct { + // ResourceMeta represents the VRG resoure. + // +required + ResourceMeta VRGResourceMeta `json:"resourceMeta,omitempty"` + + // Conditions represents the conditions of this resource on a managed cluster. + // +required + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// DRPlacementControlStatus defines the observed state of DRPlacementControl +type DRPlacementControlStatus struct { + Phase DRState `json:"phase,omitempty"` + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + ActionStartTime *metav1.Time `json:"actionStartTime,omitempty"` + ActionDuration *metav1.Duration `json:"actionDuration,omitempty"` + Progression ProgressionStatus `json:"progression,omitempty"` + PreferredDecision PlacementDecision `json:"preferredDecision,omitempty"` + Conditions []metav1.Condition `json:"conditions,omitempty"` + ResourceConditions VRGConditions `json:"resourceConditions,omitempty"` + + // LastUpdateTime is when was the last time a condition or the overall status was updated + LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"` + + // lastGroupSyncTime is the time of the most recent successful synchronization of all PVCs + //+optional + LastGroupSyncTime *metav1.Time `json:"lastGroupSyncTime,omitempty"` + + // lastGroupSyncDuration is the longest time taken to sync + // from the most recent successful synchronization of all PVCs + //+optional + LastGroupSyncDuration *metav1.Duration `json:"lastGroupSyncDuration,omitempty"` + + // lastGroupSyncBytes is the total bytes transferred from the most recent + // successful synchronization of all PVCs + //+optional + LastGroupSyncBytes *int64 `json:"lastGroupSyncBytes,omitempty"` + + // lastKubeObjectProtectionTime is the time of the most recent successful kube object protection + //+optional + LastKubeObjectProtectionTime *metav1.Time `json:"lastKubeObjectProtectionTime,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",name=Age,type=date +// +kubebuilder:printcolumn:JSONPath=".spec.preferredCluster",name=preferredCluster,type=string +// +kubebuilder:printcolumn:JSONPath=".spec.failoverCluster",name=failoverCluster,type=string +// +kubebuilder:printcolumn:JSONPath=".spec.action",name=desiredState,type=string +// +kubebuilder:printcolumn:JSONPath=".status.phase",name=currentState,type=string +// +kubebuilder:printcolumn:JSONPath=".status.progression",name=progression,type=string,priority=2 +// +kubebuilder:printcolumn:JSONPath=".status.actionStartTime",name=start time,type=string,priority=2 +// +kubebuilder:printcolumn:JSONPath=".status.actionDuration",name=duration,type=string,priority=2 +// +kubebuilder:printcolumn:JSONPath=".status.conditions[1].status",name=peer ready,type=string,priority=2 +// +kubebuilder:resource:shortName=drpc + +// DRPlacementControl is the Schema for the drplacementcontrols API +type DRPlacementControl struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec DRPlacementControlSpec `json:"spec,omitempty"` + Status DRPlacementControlStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DRPlacementControlList contains a list of DRPlacementControl +type DRPlacementControlList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DRPlacementControl `json:"items"` +} + +func init() { + SchemeBuilder.Register(&DRPlacementControl{}, &DRPlacementControlList{}) +} diff --git a/vendor/github.com/ramendr/ramen/api/v1alpha1/drpolicy_types.go b/vendor/github.com/ramendr/ramen/api/v1alpha1/drpolicy_types.go new file mode 100644 index 000000000..3685b8101 --- /dev/null +++ b/vendor/github.com/ramendr/ramen/api/v1alpha1/drpolicy_types.go @@ -0,0 +1,81 @@ +// SPDX-FileCopyrightText: The RamenDR authors +// SPDX-License-Identifier: Apache-2.0 + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// DRPolicySpec defines the desired state of DRPolicy +// +kubebuilder:validation:XValidation:rule="has(oldSelf.replicationClassSelector) == has(self.replicationClassSelector)", message="replicationClassSelector is immutable" +// +kubebuilder:validation:XValidation:rule="has(oldSelf.volumeSnapshotClassSelector) == has(self.volumeSnapshotClassSelector)", message="volumeSnapshotClassSelector is immutable" +type DRPolicySpec struct { + // scheduling Interval for replicating Persistent Volume + // data to a peer cluster. Interval is typically in the + // form . Here is a number, 'm' means + // minutes, 'h' means hours and 'd' stands for days. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^(|\d+[mhd])$` + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="schedulingInterval is immutable" + SchedulingInterval string `json:"schedulingInterval"` + + // Label selector to identify all the VolumeReplicationClasses. + // This selector is assumed to be the same for all subscriptions that + // need DR protection. It will be passed in to the VRG when it is created + //+optional + // +kubebuilder:validation:Optional + // +kubebuilder:default:={} + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="replicationClassSelector is immutable" + ReplicationClassSelector metav1.LabelSelector `json:"replicationClassSelector"` + + // Label selector to identify all the VolumeSnapshotClasses. + // This selector is assumed to be the same for all subscriptions that + // need DR protection. It will be passed in to the VRG when it is created + //+optional + // +kubebuilder:validation:Optional + // +kubebuilder:default:={} + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="volumeSnapshotClassSelector is immutable" + VolumeSnapshotClassSelector metav1.LabelSelector `json:"volumeSnapshotClassSelector"` + + // List of DRCluster resources that are governed by this policy + // +kubebuilder:validation:Required + // +kubebuilder:validation:XValidation:rule="size(self) == 2", message="drClusters requires a list of 2 clusters" + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="drClusters is immutable" + DRClusters []string `json:"drClusters"` +} + +// DRPolicyStatus defines the observed state of DRPolicy +type DRPolicyStatus struct { + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +const ( + DRPolicyValidated string = `Validated` +) + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster + +// DRPolicy is the Schema for the drpolicies API +type DRPolicy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec DRPolicySpec `json:"spec,omitempty"` + Status DRPolicyStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DRPolicyList contains a list of DRPolicy +type DRPolicyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DRPolicy `json:"items"` +} + +func init() { + SchemeBuilder.Register(&DRPolicy{}, &DRPolicyList{}) +} diff --git a/vendor/github.com/ramendr/ramen/api/v1alpha1/groupversion_info.go b/vendor/github.com/ramendr/ramen/api/v1alpha1/groupversion_info.go new file mode 100644 index 000000000..b8cece354 --- /dev/null +++ b/vendor/github.com/ramendr/ramen/api/v1alpha1/groupversion_info.go @@ -0,0 +1,23 @@ +// SPDX-FileCopyrightText: The RamenDR authors +// SPDX-License-Identifier: Apache-2.0 + +// Package v1alpha1 contains API Schema definitions for the ramendr v1alpha1 API group +// +kubebuilder:object:generate=true +// +groupName=ramendr.openshift.io +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "ramendr.openshift.io", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/vendor/github.com/ramendr/ramen/api/v1alpha1/maintenancemode_types.go b/vendor/github.com/ramendr/ramen/api/v1alpha1/maintenancemode_types.go new file mode 100644 index 000000000..d99df594d --- /dev/null +++ b/vendor/github.com/ramendr/ramen/api/v1alpha1/maintenancemode_types.go @@ -0,0 +1,97 @@ +// SPDX-FileCopyrightText: The RamenDR authors +// SPDX-License-Identifier: Apache-2.0 + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// MMode defines a maintenance mode, that a storage backend may be requested to act on, based on the DR orchestration +// in progress for one or more workloads whose PVCs use the specific storage provisioner +// +kubebuilder:validation:Enum=Failover +type MMode string + +// Supported maintenance modes +const ( + MModeFailover = MMode("Failover") +) + +// MaintenanceModeSpec defines the desired state of MaintenanceMode for a StorageProvisioner +// If a storage or replication backend desires specific maintenance modes to be activated prior to certain +// Ramen actions (for e.g notify backend of ANY failover operation for internal storage preparation), it presents +// its requirements via specific Ramen labels on the appropriate StorageClass or ReplicationClass as detailed +// in the VolumeReplicationGroup status.ProtectedPVCs.StorageIdentifiers fields. +// Ramen orchestration would create required MaintenanceMode resources based on these labels, for the storage +// backed to reconcile and provide its readiness status for the action. +// NOTE: Ramen only creates the MaintenanceMode resource, it is expected to be reconciled by the storage drivers +// by matching the provisioner and the targetID, that is specific to its instance, and update status as detailed +// for Ramen to proceed with its actions +type MaintenanceModeSpec struct { + // StorageProvisioner indicates the type of the provisioner, and is matched with provisioner string present in the + // StorageClass and/or VolumeReplicationClass for PVCs that are DR protected + StorageProvisioner string `json:"storageProvisioner"` + + // TargetID indicates the storage or replication instance identifier for the StorageProvisioner that needs to handle + // the requested maintenance modes. It is read using ramen specific labels on the StorageClass or + // the VolumeReplicationClass as set by the storage provisioner + TargetID string `json:"targetID,omitempty"` + + // Modes are the desired maintenance modes that the storage provisioner needs to act on + Modes []MMode `json:"modes,omitempty"` +} + +// MModeState defines the state of the system as per the desired spec, at a given generation of the spec (which is noted +// in status.observedGeneration) +// +kubebuilder:validation:Enum=Unknown;Error;Progressing;Completed +type MModeState string + +// Valid values for MModeState +const ( + MModeStateUnknown = MModeState("Unknown") + MModeStateError = MModeState("Error") + MModeStateProgressing = MModeState("Progressing") + MModeStateCompleted = MModeState("Completed") +) + +// MModeStatusConditionType defines an expected condition type +// +kubebuilder:validation:Enum=FailoverActivated +type MModeStatusConditionType string + +// Valid MModeStatusConditionType types (condition types) +const ( + MModeConditionFailoverActivated = MModeStatusConditionType("FailoverActivated") +) + +// MaintenanceModeStatus defines the observed state of MaintenanceMode +type MaintenanceModeStatus struct { + State MModeState `json:"state,omitempty"` + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +//+kubebuilder:resource:scope=Cluster + +// MaintenanceMode is the Schema for the maintenancemodes API +type MaintenanceMode struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec MaintenanceModeSpec `json:"spec,omitempty"` + Status MaintenanceModeStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// MaintenanceModeList contains a list of MaintenanceMode +type MaintenanceModeList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MaintenanceMode `json:"items"` +} + +func init() { + SchemeBuilder.Register(&MaintenanceMode{}, &MaintenanceModeList{}) +} diff --git a/vendor/github.com/ramendr/ramen/api/v1alpha1/protectedvolumereplicationgrouplist_types.go b/vendor/github.com/ramendr/ramen/api/v1alpha1/protectedvolumereplicationgrouplist_types.go new file mode 100644 index 000000000..276e28a89 --- /dev/null +++ b/vendor/github.com/ramendr/ramen/api/v1alpha1/protectedvolumereplicationgrouplist_types.go @@ -0,0 +1,53 @@ +// SPDX-FileCopyrightText: The RamenDR authors +// SPDX-License-Identifier: Apache-2.0 + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ProtectedVolumeReplicationGroupListSpec defines the desired state of ProtectedVolumeReplicationGroupList +type ProtectedVolumeReplicationGroupListSpec struct { + // ProfileName is the name of the S3 profile in the Ramen operator config map + // specifying the store to be queried + S3ProfileName string `json:"s3ProfileName"` +} + +// ProtectedVolumeReplicationGroupListStatus defines the observed state of ProtectedVolumeReplicationGroupList +type ProtectedVolumeReplicationGroupListStatus struct { + // SampleTime is a timestamp representing the node time when the specified + // store was last queried. It is represented in RFC3339 form and is in UTC. + SampleTime metav1.Time `json:"sampleTime,omitempty"` + + // Items is a list of VolumeReplicationGroup objects represented in + // the specified store when it was last queried. + Items []VolumeReplicationGroup `json:"items,omitempty"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +//+kubebuilder:resource:scope=Cluster + +// ProtectedVolumeReplicationGroupList is the Schema for the protectedvolumereplicationgrouplists API +type ProtectedVolumeReplicationGroupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ProtectedVolumeReplicationGroupListSpec `json:"spec,omitempty"` + // +optional + Status *ProtectedVolumeReplicationGroupListStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// ProtectedVolumeReplicationGroupListList contains a list of ProtectedVolumeReplicationGroupList +type ProtectedVolumeReplicationGroupListList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ProtectedVolumeReplicationGroupList `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ProtectedVolumeReplicationGroupList{}, &ProtectedVolumeReplicationGroupListList{}) +} diff --git a/vendor/github.com/ramendr/ramen/api/v1alpha1/ramenconfig_types.go b/vendor/github.com/ramendr/ramen/api/v1alpha1/ramenconfig_types.go new file mode 100644 index 000000000..0c860bfd6 --- /dev/null +++ b/vendor/github.com/ramendr/ramen/api/v1alpha1/ramenconfig_types.go @@ -0,0 +1,198 @@ +// SPDX-FileCopyrightText: The RamenDR authors +// SPDX-License-Identifier: Apache-2.0 + +package v1alpha1 + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + configv1alpha1 "k8s.io/component-base/config/v1alpha1" +) + +// ControllerType is the type of controller to run +// +kubebuilder:validation:Enum=dr-hub;dr-cluster +type ControllerType string + +const ( + // DRClusterType operates as the DR cluster controller on a peer cluster + DRClusterType ControllerType = "dr-cluster" + + // DRHubType operates as the DR hub controller on a cluster managing DR across peer clusters + DRHubType ControllerType = "dr-hub" +) + +// When naming a S3 bucket, follow the bucket naming rules at: +// https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html +// - Bucket names must be between 3 and 63 characters long. +// - Bucket names can consist only of lowercase letters, numbers, dots (.), and hyphens (-). +// - Bucket names must begin and end with a letter or number. +// - Bucket names must not be formatted as an IP address (for example, 192.168.5.4). +// - Bucket names must be unique within a partition. A partition is a grouping of Regions. +// - Buckets used with Amazon S3 Transfer Acceleration can't have dots (.) in their names. + +// Profile of a S3 compatible store to replicate the relevant Kubernetes cluster +// state (in etcd), such as PV state, across clusters protected by Ramen. +// - DRProtectionControl and VolumeReplicationGroup objects specify the S3 +// profile that should be used to protect the cluster state of the relevant +// PVs. +// - A single S3 store profile can be used by multiple DRProtectionControl and +// VolumeReplicationGroup objects. +// - See DRPolicy type for additional details about S3 configuration options +type S3StoreProfile struct { + // Name of this S3 profile + S3ProfileName string `json:"s3ProfileName"` + + // Name of the S3 bucket to protect and recover PV related cluster-data of + // subscriptions protected by this DR policy. This S3 bucket name is used + // across all DR policies that use this S3 profile. Objects deposited in + // this bucket are prefixed with the namespace-qualified name of the VRG to + // uniquely identify objects of a particular subscription (an instance of an + // application). A single S3 bucket at a given endpoint may be shared by + // multiple DR placements that are concurrently active in a given hub. + // However, sharing an S3 bucket across multiple hub clusters can cause + // object key name conflicts of cluster data uploaded to the bucket, + // resulting in undefined and undesired side-effects. Hence, do not share an + // S3 bucket at a given S3 endpoint across multiple hub clusters. Bucket + // name should follow AWS bucket naming rules: + // https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html + S3Bucket string `json:"s3Bucket"` + + // S3 compatible endpoint of the object store of this S3 profile + S3CompatibleEndpoint string `json:"s3CompatibleEndpoint"` + + // S3 Region; the AWS go client SDK does not have a default region; hence, + // this is a mandatory field. + // https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html + S3Region string `json:"s3Region"` + + // Reference to the secret that contains the S3 access key id and s3 secret + // access key with the keys AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY + // respectively. + S3SecretRef v1.SecretReference `json:"s3SecretRef"` + //+optional + VeleroNamespaceSecretKeyRef *v1.SecretKeySelector `json:"veleroNamespaceSecretKeyRef,omitempty"` + // A CA bundle to use when verifying TLS connections to the provider + //+optional + CACertificates []byte `json:"caCertificates,omitempty"` +} + +// ControllerMetrics defines the controller metrics configuration +type ControllerMetrics struct { + // BindAddress is the TCP address that the controller should bind to + // for serving prometheus metrics. + // It can be set to "0" to disable the metrics serving. + // +optional + BindAddress string `json:"bindAddress,omitempty"` +} + +// ControllerHealth defines the health configs. +type ControllerHealth struct { + // HealthProbeBindAddress is the TCP address that the controller should bind to + // for serving health probes + // It can be set to "0" or "" to disable serving the health probe. + // +optional + HealthProbeBindAddress string `json:"healthProbeBindAddress,omitempty"` + + // ReadinessEndpointName, defaults to "readyz" + // +optional + ReadinessEndpointName string `json:"readinessEndpointName,omitempty"` + + // LivenessEndpointName, defaults to "healthz" + // +optional + LivenessEndpointName string `json:"livenessEndpointName,omitempty"` +} + +//+kubebuilder:object:root=true + +// RamenConfig is the Schema for the ramenconfig API +type RamenConfig struct { + metav1.TypeMeta `json:",inline"` + + // LeaderElection is the LeaderElection config to be used when configuring + // the manager.Manager leader election + // +optional + LeaderElection *configv1alpha1.LeaderElectionConfiguration `json:"leaderElection,omitempty"` + + // Metrics contains the controller metrics configuration + // +optional + Metrics ControllerMetrics `json:"metrics,omitempty"` + + // Health contains the controller health configuration + // +optional + Health ControllerHealth `json:"health,omitempty"` + // RamenControllerType defines the type of controller to run + RamenControllerType ControllerType `json:"ramenControllerType"` + + // Map of S3 store profiles + S3StoreProfiles []S3StoreProfile `json:"s3StoreProfiles,omitempty"` + + // MaxConcurrentReconciles is the maximum number of concurrent Reconciles which can be run. + // Defaults to 1. + MaxConcurrentReconciles int `json:"maxConcurrentReconciles,omitempty"` + + // dr-cluster operator deployment/undeployment automation configuration + DrClusterOperator struct { + // dr-cluster operator deployment/undeployment automation enabled + DeploymentAutomationEnabled bool `json:"deploymentAutomationEnabled,omitempty"` + + // Enable s3 secret distribution and management across dr-clusters + S3SecretDistributionEnabled bool `json:"s3SecretDistributionEnabled,omitempty"` + + // channel name + ChannelName string `json:"channelName,omitempty"` + + // package name + PackageName string `json:"packageName,omitempty"` + + // namespace name + NamespaceName string `json:"namespaceName,omitempty"` + + // catalog source name + CatalogSourceName string `json:"catalogSourceName,omitempty"` + + // catalog source namespace name + CatalogSourceNamespaceName string `json:"catalogSourceNamespaceName,omitempty"` + + // cluster service version name + ClusterServiceVersionName string `json:"clusterServiceVersionName,omitempty"` + } `json:"drClusterOperator,omitempty"` + + // VolSync configuration + VolSync struct { + // Disabled is used to disable VolSync usage in Ramen. Defaults to false. + Disabled bool `json:"disabled,omitempty"` + + // Default cephFS CSIDriver name used to enable ROX volumes. If this name matches + // the PVC's storageclass provisioner, a new storageclass will be created and the + // name of it passed to VolSync alongside the readOnly flag access mode. + CephFSCSIDriverName string `json:"cephFSCSIDriverName,omitempty"` + + // destinationCopyMethod indicates the method that should be used when syncing + // from source to destination. Should be Snapshot/Direct + // default: Snapshot + DestinationCopyMethod string `json:"destinationCopyMethod,omitempty"` + } `json:"volSync,omitempty"` + + KubeObjectProtection struct { + // Disabled is used to disable KubeObjectProtection usage in Ramen. + Disabled bool `json:"disabled,omitempty"` + // Velero namespace input + VeleroNamespaceName string `json:"veleroNamespaceName,omitempty"` + } `json:"kubeObjectProtection,omitempty"` + + MultiNamespace struct { + // Enables feature to protect resources in namespaces other than VRG's + FeatureEnabled bool `json:"FeatureEnabled,omitempty"` + VolsyncSupported bool `json:"volsyncSupported,omitempty"` + } `json:"multiNamespace,omitempty"` + + // Unprotect deleted or deselected PVCs + VolumeUnprotectionEnabled bool `json:"volumeUnprotectionEnabled,omitempty"` + + // RamenOpsNamespace is the namespace where resources for unmanaged apps are created + RamenOpsNamespace string `json:"ramenOpsNamespace,omitempty"` +} + +func init() { + SchemeBuilder.Register(&RamenConfig{}) +} diff --git a/vendor/github.com/ramendr/ramen/api/v1alpha1/volumereplicationgroup_types.go b/vendor/github.com/ramendr/ramen/api/v1alpha1/volumereplicationgroup_types.go new file mode 100644 index 000000000..18d27c705 --- /dev/null +++ b/vendor/github.com/ramendr/ramen/api/v1alpha1/volumereplicationgroup_types.go @@ -0,0 +1,364 @@ +// SPDX-FileCopyrightText: The RamenDR authors +// SPDX-License-Identifier: Apache-2.0 + +package v1alpha1 + +import ( + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ReplicationState represents the replication operations to be performed on the volume +type ReplicationState string + +const ( + // Promote the protected PVCs to primary + Primary ReplicationState = "primary" + + // Demote the proteced PVCs to secondary + Secondary ReplicationState = "secondary" +) + +// State captures the latest state of the replication operation +type State string + +const ( + // PrimaryState represents the Primary replication state + PrimaryState State = "Primary" + + // SecondaryState represents the Secondary replication state + SecondaryState State = "Secondary" + + // UnknownState represents the Unknown replication state + UnknownState State = "Unknown" +) + +// VRGAsyncSpec has the parameters associated with RegionalDR +type VRGAsyncSpec struct { + // Label selector to identify the VolumeReplicationClass resources + // that are scanned to select an appropriate VolumeReplicationClass + // for the VolumeReplication resource. + //+optional + ReplicationClassSelector metav1.LabelSelector `json:"replicationClassSelector,omitempty"` + + // Label selector to identify the VolumeSnapshotClass resources + // that are scanned to select an appropriate VolumeSnapshotClass + // for the VolumeReplication resource when using VolSync. + //+optional + VolumeSnapshotClassSelector metav1.LabelSelector `json:"volumeSnapshotClassSelector,omitempty"` + + // scheduling Interval for replicating Persistent Volume + // data to a peer cluster. Interval is typically in the + // form . Here is a number, 'm' means + // minutes, 'h' means hours and 'd' stands for days. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^\d+[mhd]$` + SchedulingInterval string `json:"schedulingInterval"` +} + +// VRGSyncSpec has the parameters associated with MetroDR +type VRGSyncSpec struct{} + +// VolSyncReplicationDestinationSpec defines the configuration for the VolSync +// protected PVC to be used by the destination cluster (Secondary) +type VolSyncReplicationDestinationSpec struct { + // protectedPVC contains the information about the PVC to be protected by VolSync + //+optional + ProtectedPVC ProtectedPVC `json:"protectedPVC,omitempty"` +} + +// VolSyncReplicationSourceSpec defines the configuration for the VolSync +// protected PVC to be used by the source cluster (Primary) +type VolSyncReplicationSourceSpec struct { + // protectedPVC contains the information about the PVC to be protected by VolSync + //+optional + ProtectedPVC ProtectedPVC `json:"protectedPVC,omitempty"` +} + +// VolSynccSpec defines the ReplicationDestination specs for the Secondary VRG, or +// the ReplicationSource specs for the Primary VRG +type VolSyncSpec struct { + // rdSpec array contains the PVCs information that will/are be/being protected by VolSync + //+optional + RDSpec []VolSyncReplicationDestinationSpec `json:"rdSpec,omitempty"` + + // disabled when set, all the VolSync code is bypassed. Default is 'false' + Disabled bool `json:"disabled,omitempty"` +} + +// VRGAction which will be either a Failover or Relocate +// +kubebuilder:validation:Enum=Failover;Relocate +type VRGAction string + +// These are the valid values for VRGAction +const ( + // Failover, VRG was failed over to/from this cluster, + // the to/from is determined by VRG spec.ReplicationState values of Primary/Secondary respectively + VRGActionFailover = VRGAction("Failover") + + // Relocate, VRG was relocated to/from this cluster, + // the to/from is determined by VRG spec.ReplicationState values of Primary/Secondary respectively + VRGActionRelocate = VRGAction("Relocate") +) + +const ReservedBackupName = "use-backup-not-restore" + +type KubeObjectProtectionSpec struct { + // Preferred time between captures + //+optional + //+kubebuilder:validation:Format=duration + CaptureInterval *metav1.Duration `json:"captureInterval,omitempty"` + + // Name of the Recipe to reference for capture and recovery workflows and volume selection. + //+optional + RecipeRef *RecipeRef `json:"recipeRef,omitempty"` + + // Recipe parameter definitions + //+optional + RecipeParameters map[string][]string `json:"recipeParameters,omitempty"` + + // Label selector to identify all the kube objects that need DR protection. + // +optional + KubeObjectSelector *metav1.LabelSelector `json:"kubeObjectSelector,omitempty"` +} + +type RecipeRef struct { + // Name of namespace recipe is in + //+optional + Namespace string `json:"namespace,omitempty"` + + // Name of recipe + //+optional + Name string `json:"name,omitempty"` +} + +const KubeObjectProtectionCaptureIntervalDefault = 5 * time.Minute + +// VolumeReplicationGroup (VRG) spec declares the desired schedule for data +// replication and replication state of all PVCs identified via the given +// PVC label selector. For each such PVC, the VRG will do the following: +// - Create a VolumeReplication (VR) CR to enable storage level replication +// of volume data and set the desired replication state (primary, secondary, +// etc). +// - Take the corresponding PV cluster data in Kubernetes etcd and deposit it in +// the S3 store. The url, access key and access id required to access the +// S3 store is specified via environment variables of the VRG operator POD, +// which is obtained from a secret resource. +// - Manage the lifecycle of VR CR and S3 data according to CUD operations on +// the PVC and the VRG CR. +type VolumeReplicationGroupSpec struct { + // Label selector to identify all the PVCs that are in this group + // that needs to be replicated to the peer cluster. + PVCSelector metav1.LabelSelector `json:"pvcSelector"` + + // Desired state of all volumes [primary or secondary] in this replication group; + // this value is propagated to children VolumeReplication CRs + ReplicationState ReplicationState `json:"replicationState"` + + // List of unique S3 profiles in RamenConfig that should be used to store + // and forward PV related cluster state to peer DR clusters. + S3Profiles []string `json:"s3Profiles"` + + //+optional + Async *VRGAsyncSpec `json:"async,omitempty"` + //+optional + Sync *VRGSyncSpec `json:"sync,omitempty"` + + // volsync defines the configuration when using VolSync plugin for replication. + //+optional + VolSync VolSyncSpec `json:"volSync,omitempty"` + + // PrepareForFinalSync when set, it tells VRG to prepare for the final sync from source to destination + // cluster. Final sync is needed for relocation only, and for VolSync only + //+optional + PrepareForFinalSync bool `json:"prepareForFinalSync,omitempty"` + + // runFinalSync used to indicate whether final sync is needed. Final sync is needed for + // relocation only, and for VolSync only + //+optional + RunFinalSync bool `json:"runFinalSync,omitempty"` + + // Action is either Failover or Relocate + //+optional + Action VRGAction `json:"action,omitempty"` + //+optional + KubeObjectProtection *KubeObjectProtectionSpec `json:"kubeObjectProtection,omitempty"` + + // ProtectedNamespaces is a list of namespaces that are considered for protection by the VRG. + // Omitting this field means resources are only protected in the namespace where VRG is. + // If this field is set, the VRG must be in the Ramen Ops Namespace as configured in the Ramen Config. + // If this field is set, the protected namespace resources are treated as unmanaged. + // You can use a recipe to filter and coordinate the order of the resources that are protected. + //+optional + ProtectedNamespaces *[]string `json:"protectedNamespaces,omitempty"` +} + +type Identifier struct { + // ID contains the globally unique storage identifier that identifies + // the storage or replication backend + ID string `json:"id"` + + // Modes is a list of maintenance modes that need to be activated on the storage + // backend, prior to various Ramen related orchestration. This is read from the label + // "ramendr.openshift.io/maintenancemodes" on the StorageClass or VolumeReplicationClass, + // the value for which is a comma separated list of maintenance modes. + //+optional + Modes []MMode `json:"modes,omitempty"` +} + +// StorageIdentifiers carries various identifiers that help correlate the identify of a storage instance +// that is backing a PVC across kubernetes clusters. +type StorageIdentifiers struct { + // StorageProvisioners contains the provisioner name of the CSI driver used to provision this + // PVC (extracted from the storageClass that was used for provisioning) + //+optional + StorageProvisioner string `json:"csiProvisioner,omitempty"` + + // StorageID contains the globally unique storage identifier, as reported by the storage backend + // on the StorageClass as the value for the label "ramendr.openshift.io/storageid", that identifies + // the storage backend that was used to provision the volume. It is used to label different StorageClasses + // across different kubernetes clusters, that potentially share the same storage backend. + // It also contains any maintenance modes that the storage backend requires during vaious Ramen actions + //+optional + StorageID Identifier `json:"storageID,omitempty"` + + // ReplicationID contains the globally unique replication identifier, as reported by the storage backend + // on the VolumeReplicationClass as the value for the label "ramendr.openshift.io/replicationid", that + // identifies the storage backends across 2 (or more) storage instances where the volume is replicated + // It also contains any maintenance modes that the replication backend requires during vaious Ramen actions + //+optional + ReplicationID Identifier `json:"replicationID,omitempty"` +} + +type ProtectedPVC struct { + // Name of the namespace the PVC is in + //+optional + Namespace string `json:"namespace,omitempty"` + + // Name of the VolRep/PVC resource + //+optional + Name string `json:"name,omitempty"` + + // VolSyncPVC can be used to denote whether this PVC is protected by VolSync. Defaults to "false". + //+optional + ProtectedByVolSync bool `json:"protectedByVolSync,omitempty"` + + //+optional + StorageIdentifiers `json:",inline,omitempty"` + + // Name of the StorageClass required by the claim. + //+optional + StorageClassName *string `json:"storageClassName,omitempty"` + + // Annotations for the PVC + //+optional + Annotations map[string]string `json:"annotations,omitempty"` + + // Labels for the PVC + //+optional + Labels map[string]string `json:"labels,omitempty"` + + // AccessModes set in the claim to be replicated + //+optional + AccessModes []corev1.PersistentVolumeAccessMode `json:"accessModes,omitempty"` + + // Resources set in the claim to be replicated + //+optional + Resources corev1.VolumeResourceRequirements `json:"resources,omitempty"` + + // Conditions for this protected pvc + //+optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // Time of the most recent successful synchronization for the PVC, if + // protected in the async or volsync mode + //+optional + LastSyncTime *metav1.Time `json:"lastSyncTime,omitempty"` + + // Duration of recent synchronization for PVC, if + // protected in the async or volsync mode + //+optional + LastSyncDuration *metav1.Duration `json:"lastSyncDuration,omitempty"` + + // Bytes transferred per sync, if protected in async mode only + LastSyncBytes *int64 `json:"lastSyncBytes,omitempty"` +} + +type KubeObjectsCaptureIdentifier struct { + Number int64 `json:"number"` + //+nullable + StartTime metav1.Time `json:"startTime,omitempty"` + //+nullable + EndTime metav1.Time `json:"endTime,omitempty"` + StartGeneration int64 `json:"startGeneration,omitempty"` +} + +type KubeObjectProtectionStatus struct { + //+optional + CaptureToRecoverFrom *KubeObjectsCaptureIdentifier `json:"captureToRecoverFrom,omitempty"` +} + +// VolumeReplicationGroupStatus defines the observed state of VolumeReplicationGroup +type VolumeReplicationGroupStatus struct { + State State `json:"state,omitempty"` + + // All the protected pvcs + ProtectedPVCs []ProtectedPVC `json:"protectedPVCs,omitempty"` + + // Conditions are the list of VRG's summary conditions and their status. + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // observedGeneration is the last generation change the operator has dealt with + //+optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + //+nullable + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"` + //+optional + KubeObjectProtection KubeObjectProtectionStatus `json:"kubeObjectProtection,omitempty"` + + PrepareForFinalSyncComplete bool `json:"prepareForFinalSyncComplete,omitempty"` + FinalSyncComplete bool `json:"finalSyncComplete,omitempty"` + + // lastGroupSyncTime is the time of the most recent successful synchronization of all PVCs + //+optional + LastGroupSyncTime *metav1.Time `json:"lastGroupSyncTime,omitempty"` + + // lastGroupSyncDuration is the max time from all the successful synced PVCs + //+optional + LastGroupSyncDuration *metav1.Duration `json:"lastGroupSyncDuration,omitempty"` + + // lastGroupSyncBytes is the total bytes transferred from the most recent + // successful synchronization of all PVCs + //+optional + LastGroupSyncBytes *int64 `json:"lastGroupSyncBytes,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:shortName=vrg +// +kubebuilder:printcolumn:JSONPath=".spec.replicationState",name=desiredState,type=string +// +kubebuilder:printcolumn:JSONPath=".status.state",name=currentState,type=string + +// VolumeReplicationGroup is the Schema for the volumereplicationgroups API +type VolumeReplicationGroup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec VolumeReplicationGroupSpec `json:"spec,omitempty"` + Status VolumeReplicationGroupStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// VolumeReplicationGroupList contains a list of VolumeReplicationGroup +type VolumeReplicationGroupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []VolumeReplicationGroup `json:"items"` +} + +func init() { + SchemeBuilder.Register(&VolumeReplicationGroup{}, &VolumeReplicationGroupList{}) +} diff --git a/vendor/github.com/ramendr/ramen/api/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/ramendr/ramen/api/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..4186c11d2 --- /dev/null +++ b/vendor/github.com/ramendr/ramen/api/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,1279 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: The RamenDR authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + configv1alpha1 "k8s.io/component-base/config/v1alpha1" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterMaintenanceMode) DeepCopyInto(out *ClusterMaintenanceMode) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMaintenanceMode. +func (in *ClusterMaintenanceMode) DeepCopy() *ClusterMaintenanceMode { + if in == nil { + return nil + } + out := new(ClusterMaintenanceMode) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerHealth) DeepCopyInto(out *ControllerHealth) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerHealth. +func (in *ControllerHealth) DeepCopy() *ControllerHealth { + if in == nil { + return nil + } + out := new(ControllerHealth) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerMetrics) DeepCopyInto(out *ControllerMetrics) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerMetrics. +func (in *ControllerMetrics) DeepCopy() *ControllerMetrics { + if in == nil { + return nil + } + out := new(ControllerMetrics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DRCluster) DeepCopyInto(out *DRCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DRCluster. +func (in *DRCluster) DeepCopy() *DRCluster { + if in == nil { + return nil + } + out := new(DRCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DRCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DRClusterConfig) DeepCopyInto(out *DRClusterConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DRClusterConfig. +func (in *DRClusterConfig) DeepCopy() *DRClusterConfig { + if in == nil { + return nil + } + out := new(DRClusterConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DRClusterConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DRClusterConfigList) DeepCopyInto(out *DRClusterConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DRClusterConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DRClusterConfigList. +func (in *DRClusterConfigList) DeepCopy() *DRClusterConfigList { + if in == nil { + return nil + } + out := new(DRClusterConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DRClusterConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DRClusterConfigSpec) DeepCopyInto(out *DRClusterConfigSpec) { + *out = *in + if in.ReplicationSchedules != nil { + in, out := &in.ReplicationSchedules, &out.ReplicationSchedules + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DRClusterConfigSpec. +func (in *DRClusterConfigSpec) DeepCopy() *DRClusterConfigSpec { + if in == nil { + return nil + } + out := new(DRClusterConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DRClusterConfigStatus) DeepCopyInto(out *DRClusterConfigStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DRClusterConfigStatus. +func (in *DRClusterConfigStatus) DeepCopy() *DRClusterConfigStatus { + if in == nil { + return nil + } + out := new(DRClusterConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DRClusterList) DeepCopyInto(out *DRClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DRCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DRClusterList. +func (in *DRClusterList) DeepCopy() *DRClusterList { + if in == nil { + return nil + } + out := new(DRClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DRClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DRClusterSpec) DeepCopyInto(out *DRClusterSpec) { + *out = *in + if in.CIDRs != nil { + in, out := &in.CIDRs, &out.CIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DRClusterSpec. +func (in *DRClusterSpec) DeepCopy() *DRClusterSpec { + if in == nil { + return nil + } + out := new(DRClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DRClusterStatus) DeepCopyInto(out *DRClusterStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaintenanceModes != nil { + in, out := &in.MaintenanceModes, &out.MaintenanceModes + *out = make([]ClusterMaintenanceMode, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DRClusterStatus. +func (in *DRClusterStatus) DeepCopy() *DRClusterStatus { + if in == nil { + return nil + } + out := new(DRClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DRPlacementControl) DeepCopyInto(out *DRPlacementControl) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DRPlacementControl. +func (in *DRPlacementControl) DeepCopy() *DRPlacementControl { + if in == nil { + return nil + } + out := new(DRPlacementControl) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DRPlacementControl) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DRPlacementControlList) DeepCopyInto(out *DRPlacementControlList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DRPlacementControl, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DRPlacementControlList. +func (in *DRPlacementControlList) DeepCopy() *DRPlacementControlList { + if in == nil { + return nil + } + out := new(DRPlacementControlList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DRPlacementControlList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DRPlacementControlSpec) DeepCopyInto(out *DRPlacementControlSpec) { + *out = *in + out.PlacementRef = in.PlacementRef + if in.ProtectedNamespaces != nil { + in, out := &in.ProtectedNamespaces, &out.ProtectedNamespaces + *out = new([]string) + if **in != nil { + in, out := *in, *out + *out = make([]string, len(*in)) + copy(*out, *in) + } + } + out.DRPolicyRef = in.DRPolicyRef + in.PVCSelector.DeepCopyInto(&out.PVCSelector) + if in.KubeObjectProtection != nil { + in, out := &in.KubeObjectProtection, &out.KubeObjectProtection + *out = new(KubeObjectProtectionSpec) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DRPlacementControlSpec. +func (in *DRPlacementControlSpec) DeepCopy() *DRPlacementControlSpec { + if in == nil { + return nil + } + out := new(DRPlacementControlSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DRPlacementControlStatus) DeepCopyInto(out *DRPlacementControlStatus) { + *out = *in + if in.ActionStartTime != nil { + in, out := &in.ActionStartTime, &out.ActionStartTime + *out = (*in).DeepCopy() + } + if in.ActionDuration != nil { + in, out := &in.ActionDuration, &out.ActionDuration + *out = new(v1.Duration) + **out = **in + } + out.PreferredDecision = in.PreferredDecision + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.ResourceConditions.DeepCopyInto(&out.ResourceConditions) + if in.LastUpdateTime != nil { + in, out := &in.LastUpdateTime, &out.LastUpdateTime + *out = (*in).DeepCopy() + } + if in.LastGroupSyncTime != nil { + in, out := &in.LastGroupSyncTime, &out.LastGroupSyncTime + *out = (*in).DeepCopy() + } + if in.LastGroupSyncDuration != nil { + in, out := &in.LastGroupSyncDuration, &out.LastGroupSyncDuration + *out = new(v1.Duration) + **out = **in + } + if in.LastGroupSyncBytes != nil { + in, out := &in.LastGroupSyncBytes, &out.LastGroupSyncBytes + *out = new(int64) + **out = **in + } + if in.LastKubeObjectProtectionTime != nil { + in, out := &in.LastKubeObjectProtectionTime, &out.LastKubeObjectProtectionTime + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DRPlacementControlStatus. +func (in *DRPlacementControlStatus) DeepCopy() *DRPlacementControlStatus { + if in == nil { + return nil + } + out := new(DRPlacementControlStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DRPolicy) DeepCopyInto(out *DRPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DRPolicy. +func (in *DRPolicy) DeepCopy() *DRPolicy { + if in == nil { + return nil + } + out := new(DRPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DRPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DRPolicyList) DeepCopyInto(out *DRPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DRPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DRPolicyList. +func (in *DRPolicyList) DeepCopy() *DRPolicyList { + if in == nil { + return nil + } + out := new(DRPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DRPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DRPolicySpec) DeepCopyInto(out *DRPolicySpec) { + *out = *in + in.ReplicationClassSelector.DeepCopyInto(&out.ReplicationClassSelector) + in.VolumeSnapshotClassSelector.DeepCopyInto(&out.VolumeSnapshotClassSelector) + if in.DRClusters != nil { + in, out := &in.DRClusters, &out.DRClusters + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DRPolicySpec. +func (in *DRPolicySpec) DeepCopy() *DRPolicySpec { + if in == nil { + return nil + } + out := new(DRPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DRPolicyStatus) DeepCopyInto(out *DRPolicyStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DRPolicyStatus. +func (in *DRPolicyStatus) DeepCopy() *DRPolicyStatus { + if in == nil { + return nil + } + out := new(DRPolicyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Identifier) DeepCopyInto(out *Identifier) { + *out = *in + if in.Modes != nil { + in, out := &in.Modes, &out.Modes + *out = make([]MMode, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Identifier. +func (in *Identifier) DeepCopy() *Identifier { + if in == nil { + return nil + } + out := new(Identifier) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeObjectProtectionSpec) DeepCopyInto(out *KubeObjectProtectionSpec) { + *out = *in + if in.CaptureInterval != nil { + in, out := &in.CaptureInterval, &out.CaptureInterval + *out = new(v1.Duration) + **out = **in + } + if in.RecipeRef != nil { + in, out := &in.RecipeRef, &out.RecipeRef + *out = new(RecipeRef) + **out = **in + } + if in.RecipeParameters != nil { + in, out := &in.RecipeParameters, &out.RecipeParameters + *out = make(map[string][]string, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.KubeObjectSelector != nil { + in, out := &in.KubeObjectSelector, &out.KubeObjectSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeObjectProtectionSpec. +func (in *KubeObjectProtectionSpec) DeepCopy() *KubeObjectProtectionSpec { + if in == nil { + return nil + } + out := new(KubeObjectProtectionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeObjectProtectionStatus) DeepCopyInto(out *KubeObjectProtectionStatus) { + *out = *in + if in.CaptureToRecoverFrom != nil { + in, out := &in.CaptureToRecoverFrom, &out.CaptureToRecoverFrom + *out = new(KubeObjectsCaptureIdentifier) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeObjectProtectionStatus. +func (in *KubeObjectProtectionStatus) DeepCopy() *KubeObjectProtectionStatus { + if in == nil { + return nil + } + out := new(KubeObjectProtectionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeObjectsCaptureIdentifier) DeepCopyInto(out *KubeObjectsCaptureIdentifier) { + *out = *in + in.StartTime.DeepCopyInto(&out.StartTime) + in.EndTime.DeepCopyInto(&out.EndTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeObjectsCaptureIdentifier. +func (in *KubeObjectsCaptureIdentifier) DeepCopy() *KubeObjectsCaptureIdentifier { + if in == nil { + return nil + } + out := new(KubeObjectsCaptureIdentifier) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceMode) DeepCopyInto(out *MaintenanceMode) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceMode. +func (in *MaintenanceMode) DeepCopy() *MaintenanceMode { + if in == nil { + return nil + } + out := new(MaintenanceMode) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MaintenanceMode) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceModeList) DeepCopyInto(out *MaintenanceModeList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MaintenanceMode, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceModeList. +func (in *MaintenanceModeList) DeepCopy() *MaintenanceModeList { + if in == nil { + return nil + } + out := new(MaintenanceModeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MaintenanceModeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceModeSpec) DeepCopyInto(out *MaintenanceModeSpec) { + *out = *in + if in.Modes != nil { + in, out := &in.Modes, &out.Modes + *out = make([]MMode, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceModeSpec. +func (in *MaintenanceModeSpec) DeepCopy() *MaintenanceModeSpec { + if in == nil { + return nil + } + out := new(MaintenanceModeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceModeStatus) DeepCopyInto(out *MaintenanceModeStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceModeStatus. +func (in *MaintenanceModeStatus) DeepCopy() *MaintenanceModeStatus { + if in == nil { + return nil + } + out := new(MaintenanceModeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementDecision) DeepCopyInto(out *PlacementDecision) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementDecision. +func (in *PlacementDecision) DeepCopy() *PlacementDecision { + if in == nil { + return nil + } + out := new(PlacementDecision) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProtectedPVC) DeepCopyInto(out *ProtectedPVC) { + *out = *in + in.StorageIdentifiers.DeepCopyInto(&out.StorageIdentifiers) + if in.StorageClassName != nil { + in, out := &in.StorageClassName, &out.StorageClassName + *out = new(string) + **out = **in + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]corev1.PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + in.Resources.DeepCopyInto(&out.Resources) + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LastSyncTime != nil { + in, out := &in.LastSyncTime, &out.LastSyncTime + *out = (*in).DeepCopy() + } + if in.LastSyncDuration != nil { + in, out := &in.LastSyncDuration, &out.LastSyncDuration + *out = new(v1.Duration) + **out = **in + } + if in.LastSyncBytes != nil { + in, out := &in.LastSyncBytes, &out.LastSyncBytes + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProtectedPVC. +func (in *ProtectedPVC) DeepCopy() *ProtectedPVC { + if in == nil { + return nil + } + out := new(ProtectedPVC) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProtectedVolumeReplicationGroupList) DeepCopyInto(out *ProtectedVolumeReplicationGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(ProtectedVolumeReplicationGroupListStatus) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProtectedVolumeReplicationGroupList. +func (in *ProtectedVolumeReplicationGroupList) DeepCopy() *ProtectedVolumeReplicationGroupList { + if in == nil { + return nil + } + out := new(ProtectedVolumeReplicationGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProtectedVolumeReplicationGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProtectedVolumeReplicationGroupListList) DeepCopyInto(out *ProtectedVolumeReplicationGroupListList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ProtectedVolumeReplicationGroupList, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProtectedVolumeReplicationGroupListList. +func (in *ProtectedVolumeReplicationGroupListList) DeepCopy() *ProtectedVolumeReplicationGroupListList { + if in == nil { + return nil + } + out := new(ProtectedVolumeReplicationGroupListList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProtectedVolumeReplicationGroupListList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProtectedVolumeReplicationGroupListSpec) DeepCopyInto(out *ProtectedVolumeReplicationGroupListSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProtectedVolumeReplicationGroupListSpec. +func (in *ProtectedVolumeReplicationGroupListSpec) DeepCopy() *ProtectedVolumeReplicationGroupListSpec { + if in == nil { + return nil + } + out := new(ProtectedVolumeReplicationGroupListSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProtectedVolumeReplicationGroupListStatus) DeepCopyInto(out *ProtectedVolumeReplicationGroupListStatus) { + *out = *in + in.SampleTime.DeepCopyInto(&out.SampleTime) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VolumeReplicationGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProtectedVolumeReplicationGroupListStatus. +func (in *ProtectedVolumeReplicationGroupListStatus) DeepCopy() *ProtectedVolumeReplicationGroupListStatus { + if in == nil { + return nil + } + out := new(ProtectedVolumeReplicationGroupListStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RamenConfig) DeepCopyInto(out *RamenConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.LeaderElection != nil { + in, out := &in.LeaderElection, &out.LeaderElection + *out = new(configv1alpha1.LeaderElectionConfiguration) + (*in).DeepCopyInto(*out) + } + out.Metrics = in.Metrics + out.Health = in.Health + if in.S3StoreProfiles != nil { + in, out := &in.S3StoreProfiles, &out.S3StoreProfiles + *out = make([]S3StoreProfile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.DrClusterOperator = in.DrClusterOperator + out.VolSync = in.VolSync + out.KubeObjectProtection = in.KubeObjectProtection + out.MultiNamespace = in.MultiNamespace +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RamenConfig. +func (in *RamenConfig) DeepCopy() *RamenConfig { + if in == nil { + return nil + } + out := new(RamenConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RamenConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecipeRef) DeepCopyInto(out *RecipeRef) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecipeRef. +func (in *RecipeRef) DeepCopy() *RecipeRef { + if in == nil { + return nil + } + out := new(RecipeRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3StoreProfile) DeepCopyInto(out *S3StoreProfile) { + *out = *in + out.S3SecretRef = in.S3SecretRef + if in.VeleroNamespaceSecretKeyRef != nil { + in, out := &in.VeleroNamespaceSecretKeyRef, &out.VeleroNamespaceSecretKeyRef + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.CACertificates != nil { + in, out := &in.CACertificates, &out.CACertificates + *out = make([]byte, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3StoreProfile. +func (in *S3StoreProfile) DeepCopy() *S3StoreProfile { + if in == nil { + return nil + } + out := new(S3StoreProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageIdentifiers) DeepCopyInto(out *StorageIdentifiers) { + *out = *in + in.StorageID.DeepCopyInto(&out.StorageID) + in.ReplicationID.DeepCopyInto(&out.ReplicationID) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageIdentifiers. +func (in *StorageIdentifiers) DeepCopy() *StorageIdentifiers { + if in == nil { + return nil + } + out := new(StorageIdentifiers) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VRGAsyncSpec) DeepCopyInto(out *VRGAsyncSpec) { + *out = *in + in.ReplicationClassSelector.DeepCopyInto(&out.ReplicationClassSelector) + in.VolumeSnapshotClassSelector.DeepCopyInto(&out.VolumeSnapshotClassSelector) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VRGAsyncSpec. +func (in *VRGAsyncSpec) DeepCopy() *VRGAsyncSpec { + if in == nil { + return nil + } + out := new(VRGAsyncSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VRGConditions) DeepCopyInto(out *VRGConditions) { + *out = *in + in.ResourceMeta.DeepCopyInto(&out.ResourceMeta) + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VRGConditions. +func (in *VRGConditions) DeepCopy() *VRGConditions { + if in == nil { + return nil + } + out := new(VRGConditions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VRGResourceMeta) DeepCopyInto(out *VRGResourceMeta) { + *out = *in + if in.ProtectedPVCs != nil { + in, out := &in.ProtectedPVCs, &out.ProtectedPVCs + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VRGResourceMeta. +func (in *VRGResourceMeta) DeepCopy() *VRGResourceMeta { + if in == nil { + return nil + } + out := new(VRGResourceMeta) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VRGSyncSpec) DeepCopyInto(out *VRGSyncSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VRGSyncSpec. +func (in *VRGSyncSpec) DeepCopy() *VRGSyncSpec { + if in == nil { + return nil + } + out := new(VRGSyncSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolSyncReplicationDestinationSpec) DeepCopyInto(out *VolSyncReplicationDestinationSpec) { + *out = *in + in.ProtectedPVC.DeepCopyInto(&out.ProtectedPVC) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolSyncReplicationDestinationSpec. +func (in *VolSyncReplicationDestinationSpec) DeepCopy() *VolSyncReplicationDestinationSpec { + if in == nil { + return nil + } + out := new(VolSyncReplicationDestinationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolSyncReplicationSourceSpec) DeepCopyInto(out *VolSyncReplicationSourceSpec) { + *out = *in + in.ProtectedPVC.DeepCopyInto(&out.ProtectedPVC) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolSyncReplicationSourceSpec. +func (in *VolSyncReplicationSourceSpec) DeepCopy() *VolSyncReplicationSourceSpec { + if in == nil { + return nil + } + out := new(VolSyncReplicationSourceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolSyncSpec) DeepCopyInto(out *VolSyncSpec) { + *out = *in + if in.RDSpec != nil { + in, out := &in.RDSpec, &out.RDSpec + *out = make([]VolSyncReplicationDestinationSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolSyncSpec. +func (in *VolSyncSpec) DeepCopy() *VolSyncSpec { + if in == nil { + return nil + } + out := new(VolSyncSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeReplicationGroup) DeepCopyInto(out *VolumeReplicationGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeReplicationGroup. +func (in *VolumeReplicationGroup) DeepCopy() *VolumeReplicationGroup { + if in == nil { + return nil + } + out := new(VolumeReplicationGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeReplicationGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeReplicationGroupList) DeepCopyInto(out *VolumeReplicationGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VolumeReplicationGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeReplicationGroupList. +func (in *VolumeReplicationGroupList) DeepCopy() *VolumeReplicationGroupList { + if in == nil { + return nil + } + out := new(VolumeReplicationGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeReplicationGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeReplicationGroupSpec) DeepCopyInto(out *VolumeReplicationGroupSpec) { + *out = *in + in.PVCSelector.DeepCopyInto(&out.PVCSelector) + if in.S3Profiles != nil { + in, out := &in.S3Profiles, &out.S3Profiles + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Async != nil { + in, out := &in.Async, &out.Async + *out = new(VRGAsyncSpec) + (*in).DeepCopyInto(*out) + } + if in.Sync != nil { + in, out := &in.Sync, &out.Sync + *out = new(VRGSyncSpec) + **out = **in + } + in.VolSync.DeepCopyInto(&out.VolSync) + if in.KubeObjectProtection != nil { + in, out := &in.KubeObjectProtection, &out.KubeObjectProtection + *out = new(KubeObjectProtectionSpec) + (*in).DeepCopyInto(*out) + } + if in.ProtectedNamespaces != nil { + in, out := &in.ProtectedNamespaces, &out.ProtectedNamespaces + *out = new([]string) + if **in != nil { + in, out := *in, *out + *out = make([]string, len(*in)) + copy(*out, *in) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeReplicationGroupSpec. +func (in *VolumeReplicationGroupSpec) DeepCopy() *VolumeReplicationGroupSpec { + if in == nil { + return nil + } + out := new(VolumeReplicationGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeReplicationGroupStatus) DeepCopyInto(out *VolumeReplicationGroupStatus) { + *out = *in + if in.ProtectedPVCs != nil { + in, out := &in.ProtectedPVCs, &out.ProtectedPVCs + *out = make([]ProtectedPVC, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + in.KubeObjectProtection.DeepCopyInto(&out.KubeObjectProtection) + if in.LastGroupSyncTime != nil { + in, out := &in.LastGroupSyncTime, &out.LastGroupSyncTime + *out = (*in).DeepCopy() + } + if in.LastGroupSyncDuration != nil { + in, out := &in.LastGroupSyncDuration, &out.LastGroupSyncDuration + *out = new(v1.Duration) + **out = **in + } + if in.LastGroupSyncBytes != nil { + in, out := &in.LastGroupSyncBytes, &out.LastGroupSyncBytes + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeReplicationGroupStatus. +func (in *VolumeReplicationGroupStatus) DeepCopy() *VolumeReplicationGroupStatus { + if in == nil { + return nil + } + out := new(VolumeReplicationGroupStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/modules.txt b/vendor/modules.txt index a8bb4d3fc..acc65ebdd 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -7,6 +7,9 @@ github.com/blang/semver/v4 # github.com/cespare/xxhash/v2 v2.2.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 +# github.com/csi-addons/kubernetes-csi-addons v0.8.0 +## explicit; go 1.20 +github.com/csi-addons/kubernetes-csi-addons/apis/replication.storage/v1alpha1 # github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc ## explicit github.com/davecgh/go-spew/spew @@ -182,6 +185,9 @@ github.com/prometheus/common/model github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util +# github.com/ramendr/ramen/api v0.0.0-20240719134233-210f00a7a0c0 +## explicit; go 1.21 +github.com/ramendr/ramen/api/v1alpha1 # github.com/red-hat-storage/ocs-operator/v4 v4.0.0-20240422111920-faced96485bc ## explicit; go 1.21 github.com/red-hat-storage/ocs-operator/v4/services/provider/client @@ -497,7 +503,7 @@ k8s.io/apimachinery/pkg/version k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/client-go v0.29.3 +# k8s.io/client-go v12.0.0+incompatible => k8s.io/client-go v0.29.0 ## explicit; go 1.21 k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1