diff --git a/apis/osis/v1beta1/zz_generated.conversion_hubs.go b/apis/osis/v1beta1/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..0d1b5797a2 --- /dev/null +++ b/apis/osis/v1beta1/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +// Hub marks this type as a conversion hub. +func (tr *Pipeline) Hub() {} diff --git a/apis/osis/v1beta1/zz_generated.deepcopy.go b/apis/osis/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..52a316823b --- /dev/null +++ b/apis/osis/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,758 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BufferOptionsInitParameters) DeepCopyInto(out *BufferOptionsInitParameters) { + *out = *in + if in.PersistentBufferEnabled != nil { + in, out := &in.PersistentBufferEnabled, &out.PersistentBufferEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BufferOptionsInitParameters. +func (in *BufferOptionsInitParameters) DeepCopy() *BufferOptionsInitParameters { + if in == nil { + return nil + } + out := new(BufferOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BufferOptionsObservation) DeepCopyInto(out *BufferOptionsObservation) { + *out = *in + if in.PersistentBufferEnabled != nil { + in, out := &in.PersistentBufferEnabled, &out.PersistentBufferEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BufferOptionsObservation. +func (in *BufferOptionsObservation) DeepCopy() *BufferOptionsObservation { + if in == nil { + return nil + } + out := new(BufferOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BufferOptionsParameters) DeepCopyInto(out *BufferOptionsParameters) { + *out = *in + if in.PersistentBufferEnabled != nil { + in, out := &in.PersistentBufferEnabled, &out.PersistentBufferEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BufferOptionsParameters. +func (in *BufferOptionsParameters) DeepCopy() *BufferOptionsParameters { + if in == nil { + return nil + } + out := new(BufferOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchLogDestinationInitParameters) DeepCopyInto(out *CloudwatchLogDestinationInitParameters) { + *out = *in + if in.LogGroup != nil { + in, out := &in.LogGroup, &out.LogGroup + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchLogDestinationInitParameters. +func (in *CloudwatchLogDestinationInitParameters) DeepCopy() *CloudwatchLogDestinationInitParameters { + if in == nil { + return nil + } + out := new(CloudwatchLogDestinationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchLogDestinationObservation) DeepCopyInto(out *CloudwatchLogDestinationObservation) { + *out = *in + if in.LogGroup != nil { + in, out := &in.LogGroup, &out.LogGroup + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchLogDestinationObservation. +func (in *CloudwatchLogDestinationObservation) DeepCopy() *CloudwatchLogDestinationObservation { + if in == nil { + return nil + } + out := new(CloudwatchLogDestinationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchLogDestinationParameters) DeepCopyInto(out *CloudwatchLogDestinationParameters) { + *out = *in + if in.LogGroup != nil { + in, out := &in.LogGroup, &out.LogGroup + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchLogDestinationParameters. +func (in *CloudwatchLogDestinationParameters) DeepCopy() *CloudwatchLogDestinationParameters { + if in == nil { + return nil + } + out := new(CloudwatchLogDestinationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionAtRestOptionsInitParameters) DeepCopyInto(out *EncryptionAtRestOptionsInitParameters) { + *out = *in + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionAtRestOptionsInitParameters. +func (in *EncryptionAtRestOptionsInitParameters) DeepCopy() *EncryptionAtRestOptionsInitParameters { + if in == nil { + return nil + } + out := new(EncryptionAtRestOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionAtRestOptionsObservation) DeepCopyInto(out *EncryptionAtRestOptionsObservation) { + *out = *in + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionAtRestOptionsObservation. +func (in *EncryptionAtRestOptionsObservation) DeepCopy() *EncryptionAtRestOptionsObservation { + if in == nil { + return nil + } + out := new(EncryptionAtRestOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionAtRestOptionsParameters) DeepCopyInto(out *EncryptionAtRestOptionsParameters) { + *out = *in + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionAtRestOptionsParameters. +func (in *EncryptionAtRestOptionsParameters) DeepCopy() *EncryptionAtRestOptionsParameters { + if in == nil { + return nil + } + out := new(EncryptionAtRestOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogPublishingOptionsInitParameters) DeepCopyInto(out *LogPublishingOptionsInitParameters) { + *out = *in + if in.CloudwatchLogDestination != nil { + in, out := &in.CloudwatchLogDestination, &out.CloudwatchLogDestination + *out = make([]CloudwatchLogDestinationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IsLoggingEnabled != nil { + in, out := &in.IsLoggingEnabled, &out.IsLoggingEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogPublishingOptionsInitParameters. +func (in *LogPublishingOptionsInitParameters) DeepCopy() *LogPublishingOptionsInitParameters { + if in == nil { + return nil + } + out := new(LogPublishingOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogPublishingOptionsObservation) DeepCopyInto(out *LogPublishingOptionsObservation) { + *out = *in + if in.CloudwatchLogDestination != nil { + in, out := &in.CloudwatchLogDestination, &out.CloudwatchLogDestination + *out = make([]CloudwatchLogDestinationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IsLoggingEnabled != nil { + in, out := &in.IsLoggingEnabled, &out.IsLoggingEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogPublishingOptionsObservation. +func (in *LogPublishingOptionsObservation) DeepCopy() *LogPublishingOptionsObservation { + if in == nil { + return nil + } + out := new(LogPublishingOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogPublishingOptionsParameters) DeepCopyInto(out *LogPublishingOptionsParameters) { + *out = *in + if in.CloudwatchLogDestination != nil { + in, out := &in.CloudwatchLogDestination, &out.CloudwatchLogDestination + *out = make([]CloudwatchLogDestinationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IsLoggingEnabled != nil { + in, out := &in.IsLoggingEnabled, &out.IsLoggingEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogPublishingOptionsParameters. +func (in *LogPublishingOptionsParameters) DeepCopy() *LogPublishingOptionsParameters { + if in == nil { + return nil + } + out := new(LogPublishingOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Pipeline) DeepCopyInto(out *Pipeline) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Pipeline. +func (in *Pipeline) DeepCopy() *Pipeline { + if in == nil { + return nil + } + out := new(Pipeline) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Pipeline) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineInitParameters) DeepCopyInto(out *PipelineInitParameters) { + *out = *in + if in.BufferOptions != nil { + in, out := &in.BufferOptions, &out.BufferOptions + *out = make([]BufferOptionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EncryptionAtRestOptions != nil { + in, out := &in.EncryptionAtRestOptions, &out.EncryptionAtRestOptions + *out = make([]EncryptionAtRestOptionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LogPublishingOptions != nil { + in, out := &in.LogPublishingOptions, &out.LogPublishingOptions + *out = make([]LogPublishingOptionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaxUnits != nil { + in, out := &in.MaxUnits, &out.MaxUnits + *out = new(float64) + **out = **in + } + if in.MinUnits != nil { + in, out := &in.MinUnits, &out.MinUnits + *out = new(float64) + **out = **in + } + if in.PipelineConfigurationBody != nil { + in, out := &in.PipelineConfigurationBody, &out.PipelineConfigurationBody + *out = new(string) + **out = **in + } + if in.PipelineName != nil { + in, out := &in.PipelineName, &out.PipelineName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCOptions != nil { + in, out := &in.VPCOptions, &out.VPCOptions + *out = make([]VPCOptionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineInitParameters. +func (in *PipelineInitParameters) DeepCopy() *PipelineInitParameters { + if in == nil { + return nil + } + out := new(PipelineInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineList) DeepCopyInto(out *PipelineList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Pipeline, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineList. +func (in *PipelineList) DeepCopy() *PipelineList { + if in == nil { + return nil + } + out := new(PipelineList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PipelineList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineObservation) DeepCopyInto(out *PipelineObservation) { + *out = *in + if in.BufferOptions != nil { + in, out := &in.BufferOptions, &out.BufferOptions + *out = make([]BufferOptionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EncryptionAtRestOptions != nil { + in, out := &in.EncryptionAtRestOptions, &out.EncryptionAtRestOptions + *out = make([]EncryptionAtRestOptionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IngestEndpointUrls != nil { + in, out := &in.IngestEndpointUrls, &out.IngestEndpointUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LogPublishingOptions != nil { + in, out := &in.LogPublishingOptions, &out.LogPublishingOptions + *out = make([]LogPublishingOptionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaxUnits != nil { + in, out := &in.MaxUnits, &out.MaxUnits + *out = new(float64) + **out = **in + } + if in.MinUnits != nil { + in, out := &in.MinUnits, &out.MinUnits + *out = new(float64) + **out = **in + } + if in.PipelineArn != nil { + in, out := &in.PipelineArn, &out.PipelineArn + *out = new(string) + **out = **in + } + if in.PipelineConfigurationBody != nil { + in, out := &in.PipelineConfigurationBody, &out.PipelineConfigurationBody + *out = new(string) + **out = **in + } + if in.PipelineName != nil { + in, out := &in.PipelineName, &out.PipelineName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCOptions != nil { + in, out := &in.VPCOptions, &out.VPCOptions + *out = make([]VPCOptionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineObservation. +func (in *PipelineObservation) DeepCopy() *PipelineObservation { + if in == nil { + return nil + } + out := new(PipelineObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineParameters) DeepCopyInto(out *PipelineParameters) { + *out = *in + if in.BufferOptions != nil { + in, out := &in.BufferOptions, &out.BufferOptions + *out = make([]BufferOptionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EncryptionAtRestOptions != nil { + in, out := &in.EncryptionAtRestOptions, &out.EncryptionAtRestOptions + *out = make([]EncryptionAtRestOptionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LogPublishingOptions != nil { + in, out := &in.LogPublishingOptions, &out.LogPublishingOptions + *out = make([]LogPublishingOptionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaxUnits != nil { + in, out := &in.MaxUnits, &out.MaxUnits + *out = new(float64) + **out = **in + } + if in.MinUnits != nil { + in, out := &in.MinUnits, &out.MinUnits + *out = new(float64) + **out = **in + } + if in.PipelineConfigurationBody != nil { + in, out := &in.PipelineConfigurationBody, &out.PipelineConfigurationBody + *out = new(string) + **out = **in + } + if in.PipelineName != nil { + in, out := &in.PipelineName, &out.PipelineName + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCOptions != nil { + in, out := &in.VPCOptions, &out.VPCOptions + *out = make([]VPCOptionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineParameters. +func (in *PipelineParameters) DeepCopy() *PipelineParameters { + if in == nil { + return nil + } + out := new(PipelineParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineSpec) DeepCopyInto(out *PipelineSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineSpec. +func (in *PipelineSpec) DeepCopy() *PipelineSpec { + if in == nil { + return nil + } + out := new(PipelineSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineStatus) DeepCopyInto(out *PipelineStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineStatus. +func (in *PipelineStatus) DeepCopy() *PipelineStatus { + if in == nil { + return nil + } + out := new(PipelineStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCOptionsInitParameters) DeepCopyInto(out *VPCOptionsInitParameters) { + *out = *in + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCOptionsInitParameters. +func (in *VPCOptionsInitParameters) DeepCopy() *VPCOptionsInitParameters { + if in == nil { + return nil + } + out := new(VPCOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCOptionsObservation) DeepCopyInto(out *VPCOptionsObservation) { + *out = *in + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCOptionsObservation. +func (in *VPCOptionsObservation) DeepCopy() *VPCOptionsObservation { + if in == nil { + return nil + } + out := new(VPCOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCOptionsParameters) DeepCopyInto(out *VPCOptionsParameters) { + *out = *in + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCOptionsParameters. +func (in *VPCOptionsParameters) DeepCopy() *VPCOptionsParameters { + if in == nil { + return nil + } + out := new(VPCOptionsParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/osis/v1beta1/zz_generated.managed.go b/apis/osis/v1beta1/zz_generated.managed.go new file mode 100644 index 0000000000..396746bef5 --- /dev/null +++ b/apis/osis/v1beta1/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Pipeline. +func (mg *Pipeline) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Pipeline. +func (mg *Pipeline) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Pipeline. +func (mg *Pipeline) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Pipeline. +func (mg *Pipeline) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Pipeline. +func (mg *Pipeline) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Pipeline. +func (mg *Pipeline) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Pipeline. +func (mg *Pipeline) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Pipeline. +func (mg *Pipeline) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Pipeline. +func (mg *Pipeline) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Pipeline. +func (mg *Pipeline) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Pipeline. +func (mg *Pipeline) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Pipeline. +func (mg *Pipeline) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/osis/v1beta1/zz_generated.managedlist.go b/apis/osis/v1beta1/zz_generated.managedlist.go new file mode 100644 index 0000000000..ea2a371fed --- /dev/null +++ b/apis/osis/v1beta1/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta1 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this PipelineList. +func (l *PipelineList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/osis/v1beta1/zz_groupversion_info.go b/apis/osis/v1beta1/zz_groupversion_info.go new file mode 100755 index 0000000000..dbf2f62bde --- /dev/null +++ b/apis/osis/v1beta1/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=osis.aws.upbound.io +// +versionName=v1beta1 +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "osis.aws.upbound.io" + CRDVersion = "v1beta1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/osis/v1beta1/zz_pipeline_terraformed.go b/apis/osis/v1beta1/zz_pipeline_terraformed.go new file mode 100755 index 0000000000..3f1deb2ad5 --- /dev/null +++ b/apis/osis/v1beta1/zz_pipeline_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Pipeline +func (mg *Pipeline) GetTerraformResourceType() string { + return "aws_osis_pipeline" +} + +// GetConnectionDetailsMapping for this Pipeline +func (tr *Pipeline) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Pipeline +func (tr *Pipeline) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Pipeline +func (tr *Pipeline) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Pipeline +func (tr *Pipeline) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Pipeline +func (tr *Pipeline) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Pipeline +func (tr *Pipeline) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Pipeline +func (tr *Pipeline) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Pipeline +func (tr *Pipeline) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Pipeline using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Pipeline) LateInitialize(attrs []byte) (bool, error) { + params := &PipelineParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Pipeline) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/osis/v1beta1/zz_pipeline_types.go b/apis/osis/v1beta1/zz_pipeline_types.go new file mode 100755 index 0000000000..2312ef86fd --- /dev/null +++ b/apis/osis/v1beta1/zz_pipeline_types.go @@ -0,0 +1,320 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type BufferOptionsInitParameters struct { + + // Whether persistent buffering should be enabled. + PersistentBufferEnabled *bool `json:"persistentBufferEnabled,omitempty" tf:"persistent_buffer_enabled,omitempty"` +} + +type BufferOptionsObservation struct { + + // Whether persistent buffering should be enabled. + PersistentBufferEnabled *bool `json:"persistentBufferEnabled,omitempty" tf:"persistent_buffer_enabled,omitempty"` +} + +type BufferOptionsParameters struct { + + // Whether persistent buffering should be enabled. + // +kubebuilder:validation:Optional + PersistentBufferEnabled *bool `json:"persistentBufferEnabled" tf:"persistent_buffer_enabled,omitempty"` +} + +type CloudwatchLogDestinationInitParameters struct { + + // The name of the CloudWatch Logs group to send pipeline logs to. You can specify an existing log group or create a new one. For example, /aws/OpenSearchService/IngestionService/my-pipeline. + LogGroup *string `json:"logGroup,omitempty" tf:"log_group,omitempty"` +} + +type CloudwatchLogDestinationObservation struct { + + // The name of the CloudWatch Logs group to send pipeline logs to. You can specify an existing log group or create a new one. For example, /aws/OpenSearchService/IngestionService/my-pipeline. + LogGroup *string `json:"logGroup,omitempty" tf:"log_group,omitempty"` +} + +type CloudwatchLogDestinationParameters struct { + + // The name of the CloudWatch Logs group to send pipeline logs to. You can specify an existing log group or create a new one. For example, /aws/OpenSearchService/IngestionService/my-pipeline. + // +kubebuilder:validation:Optional + LogGroup *string `json:"logGroup" tf:"log_group,omitempty"` +} + +type EncryptionAtRestOptionsInitParameters struct { + + // The ARN of the KMS key used to encrypt data-at-rest in OpenSearch Ingestion. By default, data is encrypted using an AWS owned key. + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` +} + +type EncryptionAtRestOptionsObservation struct { + + // The ARN of the KMS key used to encrypt data-at-rest in OpenSearch Ingestion. By default, data is encrypted using an AWS owned key. + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` +} + +type EncryptionAtRestOptionsParameters struct { + + // The ARN of the KMS key used to encrypt data-at-rest in OpenSearch Ingestion. By default, data is encrypted using an AWS owned key. + // +kubebuilder:validation:Optional + KMSKeyArn *string `json:"kmsKeyArn" tf:"kms_key_arn,omitempty"` +} + +type LogPublishingOptionsInitParameters struct { + + // The destination for OpenSearch Ingestion logs sent to Amazon CloudWatch Logs. This parameter is required if IsLoggingEnabled is set to true. See cloudwatch_log_destination below. + CloudwatchLogDestination []CloudwatchLogDestinationInitParameters `json:"cloudwatchLogDestination,omitempty" tf:"cloudwatch_log_destination,omitempty"` + + // Whether logs should be published. + IsLoggingEnabled *bool `json:"isLoggingEnabled,omitempty" tf:"is_logging_enabled,omitempty"` +} + +type LogPublishingOptionsObservation struct { + + // The destination for OpenSearch Ingestion logs sent to Amazon CloudWatch Logs. This parameter is required if IsLoggingEnabled is set to true. See cloudwatch_log_destination below. + CloudwatchLogDestination []CloudwatchLogDestinationObservation `json:"cloudwatchLogDestination,omitempty" tf:"cloudwatch_log_destination,omitempty"` + + // Whether logs should be published. + IsLoggingEnabled *bool `json:"isLoggingEnabled,omitempty" tf:"is_logging_enabled,omitempty"` +} + +type LogPublishingOptionsParameters struct { + + // The destination for OpenSearch Ingestion logs sent to Amazon CloudWatch Logs. This parameter is required if IsLoggingEnabled is set to true. See cloudwatch_log_destination below. + // +kubebuilder:validation:Optional + CloudwatchLogDestination []CloudwatchLogDestinationParameters `json:"cloudwatchLogDestination,omitempty" tf:"cloudwatch_log_destination,omitempty"` + + // Whether logs should be published. + // +kubebuilder:validation:Optional + IsLoggingEnabled *bool `json:"isLoggingEnabled,omitempty" tf:"is_logging_enabled,omitempty"` +} + +type PipelineInitParameters struct { + + // Key-value pairs to configure persistent buffering for the pipeline. See buffer_options below. + BufferOptions []BufferOptionsInitParameters `json:"bufferOptions,omitempty" tf:"buffer_options,omitempty"` + + // Key-value pairs to configure encryption for data that is written to a persistent buffer. See encryption_at_rest_options below. + EncryptionAtRestOptions []EncryptionAtRestOptionsInitParameters `json:"encryptionAtRestOptions,omitempty" tf:"encryption_at_rest_options,omitempty"` + + // Key-value pairs to configure log publishing. See log_publishing_options below. + LogPublishingOptions []LogPublishingOptionsInitParameters `json:"logPublishingOptions,omitempty" tf:"log_publishing_options,omitempty"` + + // The maximum pipeline capacity, in Ingestion Compute Units (ICUs). + MaxUnits *float64 `json:"maxUnits,omitempty" tf:"max_units,omitempty"` + + // The minimum pipeline capacity, in Ingestion Compute Units (ICUs). + MinUnits *float64 `json:"minUnits,omitempty" tf:"min_units,omitempty"` + + // The pipeline configuration in YAML format. This argument accepts the pipeline configuration as a string or within a .yaml file. If you provide the configuration as a string, each new line must be escaped with \n. + PipelineConfigurationBody *string `json:"pipelineConfigurationBody,omitempty" tf:"pipeline_configuration_body,omitempty"` + + // The name of the OpenSearch Ingestion pipeline to create. Pipeline names are unique across the pipelines owned by an account within an AWS Region. + PipelineName *string `json:"pipelineName,omitempty" tf:"pipeline_name,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Container for the values required to configure VPC access for the pipeline. If you don't specify these values, OpenSearch Ingestion creates the pipeline with a public endpoint. See vpc_options below. + VPCOptions []VPCOptionsInitParameters `json:"vpcOptions,omitempty" tf:"vpc_options,omitempty"` +} + +type PipelineObservation struct { + + // Key-value pairs to configure persistent buffering for the pipeline. See buffer_options below. + BufferOptions []BufferOptionsObservation `json:"bufferOptions,omitempty" tf:"buffer_options,omitempty"` + + // Key-value pairs to configure encryption for data that is written to a persistent buffer. See encryption_at_rest_options below. + EncryptionAtRestOptions []EncryptionAtRestOptionsObservation `json:"encryptionAtRestOptions,omitempty" tf:"encryption_at_rest_options,omitempty"` + + // Unique identifier for the pipeline. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The list of ingestion endpoints for the pipeline, which you can send data to. + // +listType=set + IngestEndpointUrls []*string `json:"ingestEndpointUrls,omitempty" tf:"ingest_endpoint_urls,omitempty"` + + // Key-value pairs to configure log publishing. See log_publishing_options below. + LogPublishingOptions []LogPublishingOptionsObservation `json:"logPublishingOptions,omitempty" tf:"log_publishing_options,omitempty"` + + // The maximum pipeline capacity, in Ingestion Compute Units (ICUs). + MaxUnits *float64 `json:"maxUnits,omitempty" tf:"max_units,omitempty"` + + // The minimum pipeline capacity, in Ingestion Compute Units (ICUs). + MinUnits *float64 `json:"minUnits,omitempty" tf:"min_units,omitempty"` + + // Amazon Resource Name (ARN) of the pipeline. + PipelineArn *string `json:"pipelineArn,omitempty" tf:"pipeline_arn,omitempty"` + + // The pipeline configuration in YAML format. This argument accepts the pipeline configuration as a string or within a .yaml file. If you provide the configuration as a string, each new line must be escaped with \n. + PipelineConfigurationBody *string `json:"pipelineConfigurationBody,omitempty" tf:"pipeline_configuration_body,omitempty"` + + // The name of the OpenSearch Ingestion pipeline to create. Pipeline names are unique across the pipelines owned by an account within an AWS Region. + PipelineName *string `json:"pipelineName,omitempty" tf:"pipeline_name,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Container for the values required to configure VPC access for the pipeline. If you don't specify these values, OpenSearch Ingestion creates the pipeline with a public endpoint. See vpc_options below. + VPCOptions []VPCOptionsObservation `json:"vpcOptions,omitempty" tf:"vpc_options,omitempty"` +} + +type PipelineParameters struct { + + // Key-value pairs to configure persistent buffering for the pipeline. See buffer_options below. + // +kubebuilder:validation:Optional + BufferOptions []BufferOptionsParameters `json:"bufferOptions,omitempty" tf:"buffer_options,omitempty"` + + // Key-value pairs to configure encryption for data that is written to a persistent buffer. See encryption_at_rest_options below. + // +kubebuilder:validation:Optional + EncryptionAtRestOptions []EncryptionAtRestOptionsParameters `json:"encryptionAtRestOptions,omitempty" tf:"encryption_at_rest_options,omitempty"` + + // Key-value pairs to configure log publishing. See log_publishing_options below. + // +kubebuilder:validation:Optional + LogPublishingOptions []LogPublishingOptionsParameters `json:"logPublishingOptions,omitempty" tf:"log_publishing_options,omitempty"` + + // The maximum pipeline capacity, in Ingestion Compute Units (ICUs). + // +kubebuilder:validation:Optional + MaxUnits *float64 `json:"maxUnits,omitempty" tf:"max_units,omitempty"` + + // The minimum pipeline capacity, in Ingestion Compute Units (ICUs). + // +kubebuilder:validation:Optional + MinUnits *float64 `json:"minUnits,omitempty" tf:"min_units,omitempty"` + + // The pipeline configuration in YAML format. This argument accepts the pipeline configuration as a string or within a .yaml file. If you provide the configuration as a string, each new line must be escaped with \n. + // +kubebuilder:validation:Optional + PipelineConfigurationBody *string `json:"pipelineConfigurationBody,omitempty" tf:"pipeline_configuration_body,omitempty"` + + // The name of the OpenSearch Ingestion pipeline to create. Pipeline names are unique across the pipelines owned by an account within an AWS Region. + // +kubebuilder:validation:Optional + PipelineName *string `json:"pipelineName,omitempty" tf:"pipeline_name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Container for the values required to configure VPC access for the pipeline. If you don't specify these values, OpenSearch Ingestion creates the pipeline with a public endpoint. See vpc_options below. + // +kubebuilder:validation:Optional + VPCOptions []VPCOptionsParameters `json:"vpcOptions,omitempty" tf:"vpc_options,omitempty"` +} + +type VPCOptionsInitParameters struct { + + // A list of security groups associated with the VPC endpoint. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // A list of subnet IDs associated with the VPC endpoint. + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` +} + +type VPCOptionsObservation struct { + + // A list of security groups associated with the VPC endpoint. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // A list of subnet IDs associated with the VPC endpoint. + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` +} + +type VPCOptionsParameters struct { + + // A list of security groups associated with the VPC endpoint. + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // A list of subnet IDs associated with the VPC endpoint. + // +kubebuilder:validation:Optional + // +listType=set + SubnetIds []*string `json:"subnetIds" tf:"subnet_ids,omitempty"` +} + +// PipelineSpec defines the desired state of Pipeline +type PipelineSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider PipelineParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider PipelineInitParameters `json:"initProvider,omitempty"` +} + +// PipelineStatus defines the observed state of Pipeline. +type PipelineStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider PipelineObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Pipeline is the Schema for the Pipelines API. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Pipeline struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.maxUnits) || (has(self.initProvider) && has(self.initProvider.maxUnits))",message="spec.forProvider.maxUnits is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.minUnits) || (has(self.initProvider) && has(self.initProvider.minUnits))",message="spec.forProvider.minUnits is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.pipelineConfigurationBody) || (has(self.initProvider) && has(self.initProvider.pipelineConfigurationBody))",message="spec.forProvider.pipelineConfigurationBody is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.pipelineName) || (has(self.initProvider) && has(self.initProvider.pipelineName))",message="spec.forProvider.pipelineName is a required parameter" + Spec PipelineSpec `json:"spec"` + Status PipelineStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// PipelineList contains a list of Pipelines +type PipelineList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Pipeline `json:"items"` +} + +// Repository type metadata. +var ( + Pipeline_Kind = "Pipeline" + Pipeline_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Pipeline_Kind}.String() + Pipeline_KindAPIVersion = Pipeline_Kind + "." + CRDGroupVersion.String() + Pipeline_GroupVersionKind = CRDGroupVersion.WithKind(Pipeline_Kind) +) + +func init() { + SchemeBuilder.Register(&Pipeline{}, &PipelineList{}) +} diff --git a/apis/zz_register.go b/apis/zz_register.go index 56436b57db..8088402d5e 100755 --- a/apis/zz_register.go +++ b/apis/zz_register.go @@ -222,6 +222,7 @@ import ( v1beta1opsworks "github.com/upbound/provider-aws/apis/opsworks/v1beta1" v1beta2opsworks "github.com/upbound/provider-aws/apis/opsworks/v1beta2" v1beta1organizations "github.com/upbound/provider-aws/apis/organizations/v1beta1" + v1beta1osis "github.com/upbound/provider-aws/apis/osis/v1beta1" v1beta1pinpoint "github.com/upbound/provider-aws/apis/pinpoint/v1beta1" v1beta2pinpoint "github.com/upbound/provider-aws/apis/pinpoint/v1beta2" v1beta1pipes "github.com/upbound/provider-aws/apis/pipes/v1beta1" @@ -517,6 +518,7 @@ func init() { v1beta1opsworks.SchemeBuilder.AddToScheme, v1beta2opsworks.SchemeBuilder.AddToScheme, v1beta1organizations.SchemeBuilder.AddToScheme, + v1beta1osis.SchemeBuilder.AddToScheme, v1beta1pinpoint.SchemeBuilder.AddToScheme, v1beta2pinpoint.SchemeBuilder.AddToScheme, v1beta1pipes.SchemeBuilder.AddToScheme, diff --git a/cmd/provider/osis/zz_main.go b/cmd/provider/osis/zz_main.go new file mode 100644 index 0000000000..748e990800 --- /dev/null +++ b/cmd/provider/osis/zz_main.go @@ -0,0 +1,223 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +package main + +import ( + "context" + "fmt" + "io" + "log" + "os" + "path/filepath" + "time" + + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + "github.com/crossplane/crossplane-runtime/pkg/certificates" + xpcontroller "github.com/crossplane/crossplane-runtime/pkg/controller" + "github.com/crossplane/crossplane-runtime/pkg/feature" + "github.com/crossplane/crossplane-runtime/pkg/logging" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" + "gopkg.in/alecthomas/kingpin.v2" + kerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/leaderelection/resourcelock" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/metrics" + "sigs.k8s.io/controller-runtime/pkg/webhook" + + "github.com/upbound/provider-aws/apis" + "github.com/upbound/provider-aws/apis/v1alpha1" + "github.com/upbound/provider-aws/config" + resolverapis "github.com/upbound/provider-aws/internal/apis" + "github.com/upbound/provider-aws/internal/clients" + "github.com/upbound/provider-aws/internal/controller" + "github.com/upbound/provider-aws/internal/features" +) + +const ( + webhookTLSCertDirEnvVar = "WEBHOOK_TLS_CERT_DIR" + tlsServerCertDirEnvVar = "TLS_SERVER_CERTS_DIR" + certsDirEnvVar = "CERTS_DIR" + tlsServerCertDir = "/tls/server" +) + +func deprecationAction(flagName string) kingpin.Action { + return func(c *kingpin.ParseContext) error { + _, err := fmt.Fprintf(os.Stderr, "warning: Command-line flag %q is deprecated and no longer used. It will be removed in a future release. Please remove it from all of your configurations (ControllerConfigs, etc.).\n", flagName) + kingpin.FatalIfError(err, "Failed to print the deprecation notice.") + return nil + } +} + +func main() { + var ( + app = kingpin.New(filepath.Base(os.Args[0]), "AWS support for Crossplane.").DefaultEnvars() + debug = app.Flag("debug", "Run with debug logging.").Short('d').Bool() + syncInterval = app.Flag("sync", "Sync interval controls how often all resources will be double checked for drift.").Short('s').Default("1h").Duration() + pollInterval = app.Flag("poll", "Poll interval controls how often an individual resource should be checked for drift.").Default("10m").Duration() + pollStateMetricInterval = app.Flag("poll-state-metric", "State metric recording interval").Default("5s").Duration() + leaderElection = app.Flag("leader-election", "Use leader election for the controller manager.").Short('l').Default("false").OverrideDefaultFromEnvar("LEADER_ELECTION").Bool() + maxReconcileRate = app.Flag("max-reconcile-rate", "The global maximum rate per second at which resources may be checked for drift from the desired state.").Default("100").Int() + + namespace = app.Flag("namespace", "Namespace used to set as default scope in default secret store config.").Default("crossplane-system").Envar("POD_NAMESPACE").String() + enableExternalSecretStores = app.Flag("enable-external-secret-stores", "Enable support for ExternalSecretStores.").Default("false").Envar("ENABLE_EXTERNAL_SECRET_STORES").Bool() + essTLSCertsPath = app.Flag("ess-tls-cert-dir", "Path of ESS TLS certificates.").Envar("ESS_TLS_CERTS_DIR").String() + enableManagementPolicies = app.Flag("enable-management-policies", "Enable support for Management Policies.").Default("true").Envar("ENABLE_MANAGEMENT_POLICIES").Bool() + + certsDirSet = false + // we record whether the command-line option "--certs-dir" was supplied + // in the registered PreAction for the flag. + certsDir = app.Flag("certs-dir", "The directory that contains the server key and certificate.").Default(tlsServerCertDir).Envar(certsDirEnvVar).PreAction(func(_ *kingpin.ParseContext) error { + certsDirSet = true + return nil + }).String() + + // now deprecated command-line arguments with the Terraform SDK-based upjet architecture + _ = app.Flag("provider-ttl", "[DEPRECATED: This option is no longer used and it will be removed in a future release.] TTL for the native plugin processes before they are replaced. Changing the default may increase memory consumption.").Hidden().Action(deprecationAction("provider-ttl")).Int() + _ = app.Flag("terraform-version", "[DEPRECATED: This option is no longer used and it will be removed in a future release.] Terraform version.").Envar("TERRAFORM_VERSION").Hidden().Action(deprecationAction("terraform-version")).String() + _ = app.Flag("terraform-provider-version", "[DEPRECATED: This option is no longer used and it will be removed in a future release.] Terraform provider version.").Envar("TERRAFORM_PROVIDER_VERSION").Hidden().Action(deprecationAction("terraform-provider-version")).String() + _ = app.Flag("terraform-native-provider-path", "[DEPRECATED: This option is no longer used and it will be removed in a future release.] Terraform native provider path for shared execution.").Envar("TERRAFORM_NATIVE_PROVIDER_PATH").Hidden().Action(deprecationAction("terraform-native-provider-path")).String() + _ = app.Flag("terraform-provider-source", "[DEPRECATED: This option is no longer used and it will be removed in a future release.] Terraform provider source.").Envar("TERRAFORM_PROVIDER_SOURCE").Hidden().Action(deprecationAction("terraform-provider-source")).String() + ) + kingpin.MustParse(app.Parse(os.Args[1:])) + log.Default().SetOutput(io.Discard) + ctrl.SetLogger(zap.New(zap.WriteTo(io.Discard))) + + zl := zap.New(zap.UseDevMode(*debug)) + logr := logging.NewLogrLogger(zl.WithName("provider-aws")) + if *debug { + // The controller-runtime runs with a no-op logger by default. It is + // *very* verbose even at info level, so we only provide it a real + // logger when we're running in debug mode. + ctrl.SetLogger(zl) + } + + // currently, we configure the jitter to be the 5% of the poll interval + pollJitter := time.Duration(float64(*pollInterval) * 0.05) + logr.Debug("Starting", "sync-interval", syncInterval.String(), + "poll-interval", pollInterval.String(), "poll-jitter", pollJitter, "max-reconcile-rate", *maxReconcileRate) + + cfg, err := ctrl.GetConfig() + kingpin.FatalIfError(err, "Cannot get API server rest config") + + // Get the TLS certs directory from the environment variables set by + // Crossplane if they're available. + // In older XP versions we used WEBHOOK_TLS_CERT_DIR, in newer versions + // we use TLS_SERVER_CERTS_DIR. If an explicit certs dir is not supplied + // via the command-line options, then these environment variables are used + // instead. + if !certsDirSet { + // backwards-compatibility concerns + xpCertsDir := os.Getenv(certsDirEnvVar) + if xpCertsDir == "" { + xpCertsDir = os.Getenv(tlsServerCertDirEnvVar) + } + if xpCertsDir == "" { + xpCertsDir = os.Getenv(webhookTLSCertDirEnvVar) + } + // we probably don't need this condition but just to be on the + // safe side, if we are missing any kingpin machinery details... + if xpCertsDir != "" { + *certsDir = xpCertsDir + } + } + + mgr, err := ctrl.NewManager(ratelimiter.LimitRESTConfig(cfg, *maxReconcileRate), ctrl.Options{ + LeaderElection: *leaderElection, + LeaderElectionID: "crossplane-leader-election-provider-aws-osis", + Cache: cache.Options{ + SyncPeriod: syncInterval, + }, + WebhookServer: webhook.NewServer( + webhook.Options{ + CertDir: *certsDir, + }), + LeaderElectionResourceLock: resourcelock.LeasesResourceLock, + LeaseDuration: func() *time.Duration { d := 60 * time.Second; return &d }(), + RenewDeadline: func() *time.Duration { d := 50 * time.Second; return &d }(), + }) + kingpin.FatalIfError(err, "Cannot create controller manager") + kingpin.FatalIfError(apis.AddToScheme(mgr.GetScheme()), "Cannot add AWS APIs to scheme") + kingpin.FatalIfError(resolverapis.BuildScheme(apis.AddToSchemes), "Cannot register the AWS APIs with the API resolver's runtime scheme") + + metricRecorder := managed.NewMRMetricRecorder() + stateMetrics := statemetrics.NewMRStateMetrics() + + metrics.Registry.MustRegister(metricRecorder) + metrics.Registry.MustRegister(stateMetrics) + + ctx := context.Background() + provider, err := config.GetProvider(ctx, false) + kingpin.FatalIfError(err, "Cannot initialize the provider configuration") + setupConfig := &clients.SetupConfig{ + Logger: logr, + TerraformProvider: provider.TerraformProvider, + } + o := tjcontroller.Options{ + Options: xpcontroller.Options{ + Logger: logr, + GlobalRateLimiter: ratelimiter.NewGlobal(*maxReconcileRate), + PollInterval: *pollInterval, + MaxConcurrentReconciles: *maxReconcileRate, + Features: &feature.Flags{}, + MetricOptions: &xpcontroller.MetricOptions{ + PollStateMetricInterval: *pollStateMetricInterval, + MRMetrics: metricRecorder, + MRStateMetrics: stateMetrics, + }, + }, + Provider: provider, + SetupFn: clients.SelectTerraformSetup(setupConfig), + PollJitter: pollJitter, + OperationTrackerStore: tjcontroller.NewOperationStore(logr), + StartWebhooks: *certsDir != "", + } + + if *enableManagementPolicies { + o.Features.Enable(features.EnableBetaManagementPolicies) + logr.Info("Beta feature enabled", "flag", features.EnableBetaManagementPolicies) + } + + if *enableExternalSecretStores { + o.SecretStoreConfigGVK = &v1alpha1.StoreConfigGroupVersionKind + logr.Info("Alpha feature enabled", "flag", features.EnableAlphaExternalSecretStores) + + o.ESSOptions = &tjcontroller.ESSOptions{} + if *essTLSCertsPath != "" { + logr.Info("ESS TLS certificates path is set. Loading mTLS configuration.") + tCfg, err := certificates.LoadMTLSConfig(filepath.Join(*essTLSCertsPath, "ca.crt"), filepath.Join(*essTLSCertsPath, "tls.crt"), filepath.Join(*essTLSCertsPath, "tls.key"), false) + kingpin.FatalIfError(err, "Cannot load ESS TLS config.") + + o.ESSOptions.TLSConfig = tCfg + } + + // Ensure default store config exists. + kingpin.FatalIfError(resource.Ignore(kerrors.IsAlreadyExists, mgr.GetClient().Create(ctx, &v1alpha1.StoreConfig{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + }, + Spec: v1alpha1.StoreConfigSpec{ + // NOTE(turkenh): We only set required spec and expect optional + // ones to properly be initialized with CRD level default values. + SecretStoreConfig: xpv1.SecretStoreConfig{ + DefaultScope: *namespace, + }, + }, + Status: v1alpha1.StoreConfigStatus{}, + })), "cannot create default store config") + } + + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") + kingpin.FatalIfError(controller.Setup_osis(mgr, o), "Cannot setup AWS controllers") + kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") +} diff --git a/config/externalname.go b/config/externalname.go index 94b0907122..ede2bb8896 100644 --- a/config/externalname.go +++ b/config/externalname.go @@ -80,6 +80,11 @@ var TerraformPluginFrameworkExternalNameConfigs = map[string]config.ExternalName // VPCEndpoint can be imported using the AWS-assigned VPC Endpoint ID, i.e. vpce-0a957ae9ed5aee308 "aws_opensearchserverless_vpc_endpoint": opensearchserverlessVpcEndpoint(), + // osis + // + // OSIS Pipeline can be imported using the name + "aws_osis_pipeline": config.NameAsIdentifier, + // s3 // // S3 directory bucket can be imported using the full id: [bucket_name]--[azid]--x-s3 diff --git a/config/generated.lst b/config/generated.lst index ace2118fdd..3ff419f287 100644 --- a/config/generated.lst +++ b/config/generated.lst @@ -664,6 +664,7 @@ "aws_organizations_organizational_unit", "aws_organizations_policy", "aws_organizations_policy_attachment", +"aws_osis_pipeline", "aws_pinpoint_app", "aws_pinpoint_sms_channel", "aws_pipes_pipe", diff --git a/config/osis/config.go b/config/osis/config.go new file mode 100644 index 0000000000..cdb65245fd --- /dev/null +++ b/config/osis/config.go @@ -0,0 +1,40 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: CC0-1.0 + +package osis + +import ( + "github.com/crossplane/upjet/pkg/config" + + "github.com/upbound/provider-aws/config/common" +) + +// Configure adds configurations for the osis group. +func Configure(p *config.Provider) { + p.AddResourceConfigurator("aws_osis_pipeline", func(r *config.Resource) { + r.References["vpc_options.security_group_ids"] = config.Reference{ + TerraformName: "aws_security_group", + RefFieldName: "SecurityGroupIDRefs", + SelectorFieldName: "SecurityGroupIDSelector", + } + + r.References["vpc_options.subnet_ids"] = config.Reference{ + TerraformName: "aws_subnet", + RefFieldName: "SubnetIDRefs", + SelectorFieldName: "SubnetIDSelector", + } + + r.References["encrypt_at_rest.kms_key_arn"] = config.Reference{ + // its KMS key ARN in AWS API + TerraformName: "aws_kms_key", + Extractor: common.PathARNExtractor, + } + + r.References["log_publishing_options.cloudwatch_log_destination.log_group"] = config.Reference{ + TerraformName: "aws_cloudwatch_log_group", + } + + r.UseAsync = true + }) +} diff --git a/examples-generated/osis/v1beta1/pipeline.yaml b/examples-generated/osis/v1beta1/pipeline.yaml new file mode 100644 index 0000000000..c374069596 --- /dev/null +++ b/examples-generated/osis/v1beta1/pipeline.yaml @@ -0,0 +1,57 @@ +apiVersion: osis.aws.upbound.io/v1beta1 +kind: Pipeline +metadata: + annotations: + meta.upbound.io/example-id: osis/v1beta1/pipeline + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + maxUnits: 1 + minUnits: 1 + pipelineConfigurationBody: | + version: "2" + example-pipeline: + source: + http: + path: "/example" + sink: + - s3: + aws: + sts_role_arn: "${aws_iam_role.example.arn}" + region: "${data.aws_region.current.name}" + bucket: "example" + threshold: + event_collect_timeout: "60s" + codec: + ndjson: + pipelineName: example + region: us-west-1 + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: Role +metadata: + annotations: + meta.upbound.io/example-id: osis/v1beta1/pipeline + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + assumeRolePolicy: |- + ${jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = "sts:AssumeRole" + Effect = "Allow" + Sid = "" + Principal = { + Service = "osis-pipelines.amazonaws.com" + } + }, + ] + })} diff --git a/examples/osis/v1beta1/pipeline.yaml b/examples/osis/v1beta1/pipeline.yaml new file mode 100644 index 0000000000..26123e2661 --- /dev/null +++ b/examples/osis/v1beta1/pipeline.yaml @@ -0,0 +1,73 @@ +# SPDX-FileCopyrightText: 2024 The Crossplane Authors +# +# SPDX-License-Identifier: CC0-1.0 + +apiVersion: osis.aws.upbound.io/v1beta1 +kind: Pipeline +metadata: + annotations: + meta.upbound.io/example-id: osis/v1beta1/pipeline + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + maxUnits: 1 + minUnits: 1 + pipelineConfigurationBody: | + version: "2" + example-pipeline: + source: + http: + path: "/example" + sink: + - s3: + aws: + sts_role_arn: "arn:aws:iam::609897127049:role/example-osi-pipeline-role" + region: "us-west-1" + bucket: "example-upbound-osis-pipeline-example" + threshold: + event_collect_timeout: "60s" + codec: + ndjson: + pipelineName: example + region: us-west-1 + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: Role +metadata: + annotations: + meta.upbound.io/example-id: osis/v1beta1/pipeline + labels: + testing.upbound.io/example-name: example + name: example-osi-pipeline-role +spec: + forProvider: + assumeRolePolicy: |- + { + "Version": "2012-10-17", + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "osis-pipelines.amazonaws.com" + } + } + ] + } +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: Bucket +metadata: + annotations: + meta.upbound.io/example-id: osis/v1beta1/pipeline + labels: + testing.upbound.io/example-name: example + name: example-upbound-osis-pipeline-example +spec: + forProvider: + region: us-west-1 \ No newline at end of file diff --git a/internal/controller/osis/pipeline/zz_controller.go b/internal/controller/osis/pipeline/zz_controller.go new file mode 100755 index 0000000000..ef50a6945e --- /dev/null +++ b/internal/controller/osis/pipeline/zz_controller.go @@ -0,0 +1,95 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package pipeline + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1beta1 "github.com/upbound/provider-aws/apis/osis/v1beta1" + features "github.com/upbound/provider-aws/internal/features" +) + +// Setup adds a controller that reconciles Pipeline managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1beta1.Pipeline_GroupVersionKind.String()) + var initializers managed.InitializerChain + for _, i := range o.Provider.Resources["aws_osis_pipeline"].InitializerFns { + initializers = append(initializers, i(mgr.GetClient())) + } + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1beta1.Pipeline_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1beta1.Pipeline_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginFrameworkAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["aws_osis_pipeline"], + tjcontroller.WithTerraformPluginFrameworkAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginFrameworkAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginFrameworkAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginFrameworkAsyncMetricRecorder(metrics.NewMetricRecorder(v1beta1.Pipeline_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginFrameworkAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1beta1.Pipeline + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1beta1.Pipeline{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1beta1.Pipeline") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1beta1.PipelineList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1beta1.PipelineList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1beta1.Pipeline_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1beta1.Pipeline{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/zz_monolith_setup.go b/internal/controller/zz_monolith_setup.go index cb6428f3b6..402e0611c1 100755 --- a/internal/controller/zz_monolith_setup.go +++ b/internal/controller/zz_monolith_setup.go @@ -703,6 +703,7 @@ import ( organizationalunit "github.com/upbound/provider-aws/internal/controller/organizations/organizationalunit" policyorganizations "github.com/upbound/provider-aws/internal/controller/organizations/policy" policyattachmentorganizations "github.com/upbound/provider-aws/internal/controller/organizations/policyattachment" + pipelineosis "github.com/upbound/provider-aws/internal/controller/osis/pipeline" apppinpoint "github.com/upbound/provider-aws/internal/controller/pinpoint/app" smschannel "github.com/upbound/provider-aws/internal/controller/pinpoint/smschannel" pipe "github.com/upbound/provider-aws/internal/controller/pipes/pipe" @@ -1663,6 +1664,7 @@ func Setup_monolith(mgr ctrl.Manager, o controller.Options) error { organizationalunit.Setup, policyorganizations.Setup, policyattachmentorganizations.Setup, + pipelineosis.Setup, apppinpoint.Setup, smschannel.Setup, pipe.Setup, diff --git a/internal/controller/zz_osis_setup.go b/internal/controller/zz_osis_setup.go new file mode 100755 index 0000000000..c952470775 --- /dev/null +++ b/internal/controller/zz_osis_setup.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +package controller + +import ( + ctrl "sigs.k8s.io/controller-runtime" + + "github.com/crossplane/upjet/pkg/controller" + + pipeline "github.com/upbound/provider-aws/internal/controller/osis/pipeline" +) + +// Setup_osis creates all controllers with the supplied logger and adds them to +// the supplied manager. +func Setup_osis(mgr ctrl.Manager, o controller.Options) error { + for _, setup := range []func(ctrl.Manager, controller.Options) error{ + pipeline.Setup, + } { + if err := setup(mgr, o); err != nil { + return err + } + } + return nil +} diff --git a/package/crds/osis.aws.upbound.io_pipelines.yaml b/package/crds/osis.aws.upbound.io_pipelines.yaml new file mode 100644 index 0000000000..38abd2265a --- /dev/null +++ b/package/crds/osis.aws.upbound.io_pipelines.yaml @@ -0,0 +1,649 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: pipelines.osis.aws.upbound.io +spec: + group: osis.aws.upbound.io + names: + categories: + - crossplane + - managed + - aws + kind: Pipeline + listKind: PipelineList + plural: pipelines + singular: pipeline + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: Pipeline is the Schema for the Pipelines API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PipelineSpec defines the desired state of Pipeline + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + bufferOptions: + description: Key-value pairs to configure persistent buffering + for the pipeline. See buffer_options below. + items: + properties: + persistentBufferEnabled: + description: Whether persistent buffering should be enabled. + type: boolean + type: object + type: array + encryptionAtRestOptions: + description: Key-value pairs to configure encryption for data + that is written to a persistent buffer. See encryption_at_rest_options + below. + items: + properties: + kmsKeyArn: + description: The ARN of the KMS key used to encrypt data-at-rest + in OpenSearch Ingestion. By default, data is encrypted + using an AWS owned key. + type: string + type: object + type: array + logPublishingOptions: + description: Key-value pairs to configure log publishing. See + log_publishing_options below. + items: + properties: + cloudwatchLogDestination: + description: The destination for OpenSearch Ingestion logs + sent to Amazon CloudWatch Logs. This parameter is required + if IsLoggingEnabled is set to true. See cloudwatch_log_destination + below. + items: + properties: + logGroup: + description: The name of the CloudWatch Logs group + to send pipeline logs to. You can specify an existing + log group or create a new one. For example, /aws/OpenSearchService/IngestionService/my-pipeline. + type: string + type: object + type: array + isLoggingEnabled: + description: Whether logs should be published. + type: boolean + type: object + type: array + maxUnits: + description: The maximum pipeline capacity, in Ingestion Compute + Units (ICUs). + type: number + minUnits: + description: The minimum pipeline capacity, in Ingestion Compute + Units (ICUs). + type: number + pipelineConfigurationBody: + description: The pipeline configuration in YAML format. This argument + accepts the pipeline configuration as a string or within a .yaml + file. If you provide the configuration as a string, each new + line must be escaped with \n. + type: string + pipelineName: + description: The name of the OpenSearch Ingestion pipeline to + create. Pipeline names are unique across the pipelines owned + by an account within an AWS Region. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + vpcOptions: + description: Container for the values required to configure VPC + access for the pipeline. If you don't specify these values, + OpenSearch Ingestion creates the pipeline with a public endpoint. + See vpc_options below. + items: + properties: + securityGroupIds: + description: A list of security groups associated with the + VPC endpoint. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: A list of subnet IDs associated with the VPC + endpoint. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + bufferOptions: + description: Key-value pairs to configure persistent buffering + for the pipeline. See buffer_options below. + items: + properties: + persistentBufferEnabled: + description: Whether persistent buffering should be enabled. + type: boolean + type: object + type: array + encryptionAtRestOptions: + description: Key-value pairs to configure encryption for data + that is written to a persistent buffer. See encryption_at_rest_options + below. + items: + properties: + kmsKeyArn: + description: The ARN of the KMS key used to encrypt data-at-rest + in OpenSearch Ingestion. By default, data is encrypted + using an AWS owned key. + type: string + type: object + type: array + logPublishingOptions: + description: Key-value pairs to configure log publishing. See + log_publishing_options below. + items: + properties: + cloudwatchLogDestination: + description: The destination for OpenSearch Ingestion logs + sent to Amazon CloudWatch Logs. This parameter is required + if IsLoggingEnabled is set to true. See cloudwatch_log_destination + below. + items: + properties: + logGroup: + description: The name of the CloudWatch Logs group + to send pipeline logs to. You can specify an existing + log group or create a new one. For example, /aws/OpenSearchService/IngestionService/my-pipeline. + type: string + type: object + type: array + isLoggingEnabled: + description: Whether logs should be published. + type: boolean + type: object + type: array + maxUnits: + description: The maximum pipeline capacity, in Ingestion Compute + Units (ICUs). + type: number + minUnits: + description: The minimum pipeline capacity, in Ingestion Compute + Units (ICUs). + type: number + pipelineConfigurationBody: + description: The pipeline configuration in YAML format. This argument + accepts the pipeline configuration as a string or within a .yaml + file. If you provide the configuration as a string, each new + line must be escaped with \n. + type: string + pipelineName: + description: The name of the OpenSearch Ingestion pipeline to + create. Pipeline names are unique across the pipelines owned + by an account within an AWS Region. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + vpcOptions: + description: Container for the values required to configure VPC + access for the pipeline. If you don't specify these values, + OpenSearch Ingestion creates the pipeline with a public endpoint. + See vpc_options below. + items: + properties: + securityGroupIds: + description: A list of security groups associated with the + VPC endpoint. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: A list of subnet IDs associated with the VPC + endpoint. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.maxUnits is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.maxUnits) + || (has(self.initProvider) && has(self.initProvider.maxUnits))' + - message: spec.forProvider.minUnits is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.minUnits) + || (has(self.initProvider) && has(self.initProvider.minUnits))' + - message: spec.forProvider.pipelineConfigurationBody is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.pipelineConfigurationBody) + || (has(self.initProvider) && has(self.initProvider.pipelineConfigurationBody))' + - message: spec.forProvider.pipelineName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.pipelineName) + || (has(self.initProvider) && has(self.initProvider.pipelineName))' + status: + description: PipelineStatus defines the observed state of Pipeline. + properties: + atProvider: + properties: + bufferOptions: + description: Key-value pairs to configure persistent buffering + for the pipeline. See buffer_options below. + items: + properties: + persistentBufferEnabled: + description: Whether persistent buffering should be enabled. + type: boolean + type: object + type: array + encryptionAtRestOptions: + description: Key-value pairs to configure encryption for data + that is written to a persistent buffer. See encryption_at_rest_options + below. + items: + properties: + kmsKeyArn: + description: The ARN of the KMS key used to encrypt data-at-rest + in OpenSearch Ingestion. By default, data is encrypted + using an AWS owned key. + type: string + type: object + type: array + id: + description: Unique identifier for the pipeline. + type: string + ingestEndpointUrls: + description: The list of ingestion endpoints for the pipeline, + which you can send data to. + items: + type: string + type: array + x-kubernetes-list-type: set + logPublishingOptions: + description: Key-value pairs to configure log publishing. See + log_publishing_options below. + items: + properties: + cloudwatchLogDestination: + description: The destination for OpenSearch Ingestion logs + sent to Amazon CloudWatch Logs. This parameter is required + if IsLoggingEnabled is set to true. See cloudwatch_log_destination + below. + items: + properties: + logGroup: + description: The name of the CloudWatch Logs group + to send pipeline logs to. You can specify an existing + log group or create a new one. For example, /aws/OpenSearchService/IngestionService/my-pipeline. + type: string + type: object + type: array + isLoggingEnabled: + description: Whether logs should be published. + type: boolean + type: object + type: array + maxUnits: + description: The maximum pipeline capacity, in Ingestion Compute + Units (ICUs). + type: number + minUnits: + description: The minimum pipeline capacity, in Ingestion Compute + Units (ICUs). + type: number + pipelineArn: + description: Amazon Resource Name (ARN) of the pipeline. + type: string + pipelineConfigurationBody: + description: The pipeline configuration in YAML format. This argument + accepts the pipeline configuration as a string or within a .yaml + file. If you provide the configuration as a string, each new + line must be escaped with \n. + type: string + pipelineName: + description: The name of the OpenSearch Ingestion pipeline to + create. Pipeline names are unique across the pipelines owned + by an account within an AWS Region. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + type: object + x-kubernetes-map-type: granular + vpcOptions: + description: Container for the values required to configure VPC + access for the pipeline. If you don't specify these values, + OpenSearch Ingestion creates the pipeline with a public endpoint. + See vpc_options below. + items: + properties: + securityGroupIds: + description: A list of security groups associated with the + VPC endpoint. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: A list of subnet IDs associated with the VPC + endpoint. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {}