Documentation ¶
Index ¶
- func NewApiEventSource_Override(a ApiEventSource, method *string, path *string, ...)
- func NewDynamoEventSource_Override(d DynamoEventSource, table awsdynamodb.ITable, props *DynamoEventSourceProps)
- func NewKinesisEventSource_Override(k KinesisEventSource, stream awskinesis.IStream, ...)
- func NewManagedKafkaEventSource_Override(m ManagedKafkaEventSource, props *ManagedKafkaEventSourceProps)
- func NewS3EventSource_Override(s S3EventSource, bucket awss3.Bucket, props *S3EventSourceProps)
- func NewSelfManagedKafkaEventSource_Override(s SelfManagedKafkaEventSource, props *SelfManagedKafkaEventSourceProps)
- func NewSnsDlq_Override(s SnsDlq, topic awssns.ITopic)
- func NewSnsEventSource_Override(s SnsEventSource, topic awssns.ITopic, props *SnsEventSourceProps)
- func NewSqsDlq_Override(s SqsDlq, queue awssqs.IQueue)
- func NewSqsEventSource_Override(s SqsEventSource, queue awssqs.IQueue, props *SqsEventSourceProps)
- func NewStreamEventSource_Override(s StreamEventSource, props *StreamEventSourceProps)
- type ApiEventSource
- type AuthenticationMethod
- type BaseStreamEventSourceProps
- type DynamoEventSource
- type DynamoEventSourceProps
- type KafkaEventSourceProps
- type KinesisEventSource
- type KinesisEventSourceProps
- type ManagedKafkaEventSource
- type ManagedKafkaEventSourceProps
- type S3EventSource
- type S3EventSourceProps
- type SelfManagedKafkaEventSource
- type SelfManagedKafkaEventSourceProps
- type SnsDlq
- type SnsEventSource
- type SnsEventSourceProps
- type SqsDlq
- type SqsEventSource
- type SqsEventSourceProps
- type StreamEventSource
- type StreamEventSourceProps
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
func NewApiEventSource_Override ¶
func NewApiEventSource_Override(a ApiEventSource, method *string, path *string, options *awsapigateway.MethodOptions)
func NewDynamoEventSource_Override ¶
func NewDynamoEventSource_Override(d DynamoEventSource, table awsdynamodb.ITable, props *DynamoEventSourceProps)
func NewKinesisEventSource_Override ¶
func NewKinesisEventSource_Override(k KinesisEventSource, stream awskinesis.IStream, props *KinesisEventSourceProps)
func NewManagedKafkaEventSource_Override ¶
func NewManagedKafkaEventSource_Override(m ManagedKafkaEventSource, props *ManagedKafkaEventSourceProps)
func NewS3EventSource_Override ¶
func NewS3EventSource_Override(s S3EventSource, bucket awss3.Bucket, props *S3EventSourceProps)
func NewSelfManagedKafkaEventSource_Override ¶
func NewSelfManagedKafkaEventSource_Override(s SelfManagedKafkaEventSource, props *SelfManagedKafkaEventSourceProps)
func NewSnsDlq_Override ¶
func NewSnsEventSource_Override ¶
func NewSnsEventSource_Override(s SnsEventSource, topic awssns.ITopic, props *SnsEventSourceProps)
func NewSqsDlq_Override ¶
func NewSqsEventSource_Override ¶
func NewSqsEventSource_Override(s SqsEventSource, queue awssqs.IQueue, props *SqsEventSourceProps)
func NewStreamEventSource_Override ¶
func NewStreamEventSource_Override(s StreamEventSource, props *StreamEventSourceProps)
Types ¶
type ApiEventSource ¶
type ApiEventSource interface { awslambda.IEventSource // Called by `lambda.addEventSource` to allow the event source to bind to this function. Bind(target awslambda.IFunction) }
Example:
// The code below shows an example of how to instantiate this type. // The values are placeholders you should change. import "github.com/aws/aws-cdk-go/awscdk" import "github.com/aws/aws-cdk-go/awscdk" var authorizer authorizer var model model var requestValidator requestValidator apiEventSource := awscdk.Aws_lambda_event_sources.NewApiEventSource(jsii.String("method"), jsii.String("path"), &methodOptions{ apiKeyRequired: jsii.Boolean(false), authorizationScopes: []*string{ jsii.String("authorizationScopes"), }, authorizationType: awscdk.Aws_apigateway.authorizationType_NONE, authorizer: authorizer, methodResponses: []methodResponse{ &methodResponse{ statusCode: jsii.String("statusCode"), // the properties below are optional responseModels: map[string]iModel{ "responseModelsKey": model, }, responseParameters: map[string]*bool{ "responseParametersKey": jsii.Boolean(false), }, }, }, operationName: jsii.String("operationName"), requestModels: map[string]*iModel{ "requestModelsKey": model, }, requestParameters: map[string]*bool{ "requestParametersKey": jsii.Boolean(false), }, requestValidator: requestValidator, requestValidatorOptions: &requestValidatorOptions{ requestValidatorName: jsii.String("requestValidatorName"), validateRequestBody: jsii.Boolean(false), validateRequestParameters: jsii.Boolean(false), }, })
func NewApiEventSource ¶
func NewApiEventSource(method *string, path *string, options *awsapigateway.MethodOptions) ApiEventSource
type AuthenticationMethod ¶
type AuthenticationMethod string
The authentication method to use with SelfManagedKafkaEventSource.
const ( // SASL_SCRAM_512_AUTH authentication method for your Kafka cluster. AuthenticationMethod_SASL_SCRAM_512_AUTH AuthenticationMethod = "SASL_SCRAM_512_AUTH" // SASL_SCRAM_256_AUTH authentication method for your Kafka cluster. AuthenticationMethod_SASL_SCRAM_256_AUTH AuthenticationMethod = "SASL_SCRAM_256_AUTH" // BASIC_AUTH (SASL/PLAIN) authentication method for your Kafka cluster. AuthenticationMethod_BASIC_AUTH AuthenticationMethod = "BASIC_AUTH" // CLIENT_CERTIFICATE_TLS_AUTH (mTLS) authentication method for your Kafka cluster. AuthenticationMethod_CLIENT_CERTIFICATE_TLS_AUTH AuthenticationMethod = "CLIENT_CERTIFICATE_TLS_AUTH" )
type BaseStreamEventSourceProps ¶ added in v2.7.0
type BaseStreamEventSourceProps struct { // Where to begin consuming the stream. StartingPosition awslambda.StartingPosition `field:"required" json:"startingPosition" yaml:"startingPosition"` // The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. // // Your function receives an // event with all the retrieved records. // // Valid Range: // * Minimum value of 1 // * Maximum value of: // * 1000 for {@link DynamoEventSource} // * 10000 for {@link KinesisEventSource}, {@link ManagedKafkaEventSource} and {@link SelfManagedKafkaEventSource}. BatchSize *float64 `field:"optional" json:"batchSize" yaml:"batchSize"` // If the stream event source mapping should be enabled. Enabled *bool `field:"optional" json:"enabled" yaml:"enabled"` // The maximum amount of time to gather records before invoking the function. // // Maximum of Duration.minutes(5). // See: https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventsourcemapping.html#invocation-eventsourcemapping-batching // MaxBatchingWindow awscdk.Duration `field:"optional" json:"maxBatchingWindow" yaml:"maxBatchingWindow"` }
The set of properties for streaming event sources shared by Dynamo, Kinesis and Kafka.
Example:
// The code below shows an example of how to instantiate this type. // The values are placeholders you should change. import cdk "github.com/aws/aws-cdk-go/awscdk" import "github.com/aws/aws-cdk-go/awscdk" import "github.com/aws/aws-cdk-go/awscdk" baseStreamEventSourceProps := &baseStreamEventSourceProps{ startingPosition: awscdk.Aws_lambda.startingPosition_TRIM_HORIZON, // the properties below are optional batchSize: jsii.Number(123), enabled: jsii.Boolean(false), maxBatchingWindow: cdk.duration.minutes(jsii.Number(30)), }
type DynamoEventSource ¶
type DynamoEventSource interface { StreamEventSource // The identifier for this EventSourceMapping. EventSourceMappingId() *string Props() *StreamEventSourceProps // Called by `lambda.addEventSource` to allow the event source to bind to this function. Bind(target awslambda.IFunction) EnrichMappingOptions(options *awslambda.EventSourceMappingOptions) *awslambda.EventSourceMappingOptions }
Use an Amazon DynamoDB stream as an event source for AWS Lambda.
Example:
import eventsources "github.com/aws/aws-cdk-go/awscdk" import dynamodb "github.com/aws/aws-cdk-go/awscdk" var fn function table := dynamodb.NewTable(this, jsii.String("Table"), &tableProps{ partitionKey: &attribute{ name: jsii.String("id"), type: dynamodb.attributeType_STRING, }, stream: dynamodb.streamViewType_NEW_IMAGE, }) fn.addEventSource(eventsources.NewDynamoEventSource(table, &dynamoEventSourceProps{ startingPosition: lambda.startingPosition_LATEST, filters: []map[string]interface{}{ map[string]interface{}{ "eventName": lambda.FilterRule.isEqual(jsii.String("INSERT")), }, }, }))
func NewDynamoEventSource ¶
func NewDynamoEventSource(table awsdynamodb.ITable, props *DynamoEventSourceProps) DynamoEventSource
type DynamoEventSourceProps ¶
type DynamoEventSourceProps struct { // Where to begin consuming the stream. StartingPosition awslambda.StartingPosition `field:"required" json:"startingPosition" yaml:"startingPosition"` // The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. // // Your function receives an // event with all the retrieved records. // // Valid Range: // * Minimum value of 1 // * Maximum value of: // * 1000 for {@link DynamoEventSource} // * 10000 for {@link KinesisEventSource}, {@link ManagedKafkaEventSource} and {@link SelfManagedKafkaEventSource}. BatchSize *float64 `field:"optional" json:"batchSize" yaml:"batchSize"` // If the stream event source mapping should be enabled. Enabled *bool `field:"optional" json:"enabled" yaml:"enabled"` // The maximum amount of time to gather records before invoking the function. // // Maximum of Duration.minutes(5). // See: https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventsourcemapping.html#invocation-eventsourcemapping-batching // MaxBatchingWindow awscdk.Duration `field:"optional" json:"maxBatchingWindow" yaml:"maxBatchingWindow"` // If the function returns an error, split the batch in two and retry. BisectBatchOnError *bool `field:"optional" json:"bisectBatchOnError" yaml:"bisectBatchOnError"` // Add filter criteria option. Filters *[]*map[string]interface{} `field:"optional" json:"filters" yaml:"filters"` // The maximum age of a record that Lambda sends to a function for processing. // // Valid Range: // * Minimum value of 60 seconds // * Maximum value of 7 days. MaxRecordAge awscdk.Duration `field:"optional" json:"maxRecordAge" yaml:"maxRecordAge"` // An Amazon SQS queue or Amazon SNS topic destination for discarded records. OnFailure awslambda.IEventSourceDlq `field:"optional" json:"onFailure" yaml:"onFailure"` // The number of batches to process from each shard concurrently. // // Valid Range: // * Minimum value of 1 // * Maximum value of 10. ParallelizationFactor *float64 `field:"optional" json:"parallelizationFactor" yaml:"parallelizationFactor"` // Allow functions to return partially successful responses for a batch of records. // See: https://docs.aws.amazon.com/lambda/latest/dg/with-ddb.html#services-ddb-batchfailurereporting // ReportBatchItemFailures *bool `field:"optional" json:"reportBatchItemFailures" yaml:"reportBatchItemFailures"` // Maximum number of retry attempts Valid Range: * Minimum value of 0 * Maximum value of 10000. RetryAttempts *float64 `field:"optional" json:"retryAttempts" yaml:"retryAttempts"` // The size of the tumbling windows to group records sent to DynamoDB or Kinesis Valid Range: 0 - 15 minutes. TumblingWindow awscdk.Duration `field:"optional" json:"tumblingWindow" yaml:"tumblingWindow"` }
Example:
import eventsources "github.com/aws/aws-cdk-go/awscdk" import dynamodb "github.com/aws/aws-cdk-go/awscdk" var fn function table := dynamodb.NewTable(this, jsii.String("Table"), &tableProps{ partitionKey: &attribute{ name: jsii.String("id"), type: dynamodb.attributeType_STRING, }, stream: dynamodb.streamViewType_NEW_IMAGE, }) fn.addEventSource(eventsources.NewDynamoEventSource(table, &dynamoEventSourceProps{ startingPosition: lambda.startingPosition_LATEST, filters: []map[string]interface{}{ map[string]interface{}{ "eventName": lambda.FilterRule.isEqual(jsii.String("INSERT")), }, }, }))
type KafkaEventSourceProps ¶
type KafkaEventSourceProps struct { // Where to begin consuming the stream. StartingPosition awslambda.StartingPosition `field:"required" json:"startingPosition" yaml:"startingPosition"` // The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. // // Your function receives an // event with all the retrieved records. // // Valid Range: // * Minimum value of 1 // * Maximum value of: // * 1000 for {@link DynamoEventSource} // * 10000 for {@link KinesisEventSource}, {@link ManagedKafkaEventSource} and {@link SelfManagedKafkaEventSource}. BatchSize *float64 `field:"optional" json:"batchSize" yaml:"batchSize"` // If the stream event source mapping should be enabled. Enabled *bool `field:"optional" json:"enabled" yaml:"enabled"` // The maximum amount of time to gather records before invoking the function. // // Maximum of Duration.minutes(5). // See: https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventsourcemapping.html#invocation-eventsourcemapping-batching // MaxBatchingWindow awscdk.Duration `field:"optional" json:"maxBatchingWindow" yaml:"maxBatchingWindow"` // The Kafka topic to subscribe to. Topic *string `field:"required" json:"topic" yaml:"topic"` // The identifier for the Kafka consumer group to join. // // The consumer group ID must be unique among all your Kafka event sources. After creating a Kafka event source mapping with the consumer group ID specified, you cannot update this value. The value must have a lenght between 1 and 200 and full the pattern '[a-zA-Z0-9-\/*:_+=.@-]*'. // See: https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#services-msk-consumer-group-id // ConsumerGroupId *string `field:"optional" json:"consumerGroupId" yaml:"consumerGroupId"` // The secret with the Kafka credentials, see https://docs.aws.amazon.com/msk/latest/developerguide/msk-password.html for details This field is required if your Kafka brokers are accessed over the Internet. Secret awssecretsmanager.ISecret `field:"optional" json:"secret" yaml:"secret"` }
Properties for a Kafka event source.
Example:
// The code below shows an example of how to instantiate this type. // The values are placeholders you should change. import cdk "github.com/aws/aws-cdk-go/awscdk" import "github.com/aws/aws-cdk-go/awscdk" import "github.com/aws/aws-cdk-go/awscdk" import "github.com/aws/aws-cdk-go/awscdk" var secret secret kafkaEventSourceProps := &kafkaEventSourceProps{ startingPosition: awscdk.Aws_lambda.startingPosition_TRIM_HORIZON, topic: jsii.String("topic"), // the properties below are optional batchSize: jsii.Number(123), consumerGroupId: jsii.String("consumerGroupId"), enabled: jsii.Boolean(false), maxBatchingWindow: cdk.duration.minutes(jsii.Number(30)), secret: secret, }
type KinesisEventSource ¶
type KinesisEventSource interface { StreamEventSource // The identifier for this EventSourceMapping. EventSourceMappingId() *string Props() *StreamEventSourceProps Stream() awskinesis.IStream // Called by `lambda.addEventSource` to allow the event source to bind to this function. Bind(target awslambda.IFunction) EnrichMappingOptions(options *awslambda.EventSourceMappingOptions) *awslambda.EventSourceMappingOptions }
Use an Amazon Kinesis stream as an event source for AWS Lambda.
Example:
import kinesis "github.com/aws/aws-cdk-go/awscdk" import "github.com/aws/aws-cdk-go/awscdk" var myFunction function stream := kinesis.NewStream(this, jsii.String("MyStream")) myFunction.addEventSource(awscdk.NewKinesisEventSource(stream, &kinesisEventSourceProps{ batchSize: jsii.Number(100), // default startingPosition: lambda.startingPosition_TRIM_HORIZON, }))
func NewKinesisEventSource ¶
func NewKinesisEventSource(stream awskinesis.IStream, props *KinesisEventSourceProps) KinesisEventSource
type KinesisEventSourceProps ¶
type KinesisEventSourceProps struct { // Where to begin consuming the stream. StartingPosition awslambda.StartingPosition `field:"required" json:"startingPosition" yaml:"startingPosition"` // The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. // // Your function receives an // event with all the retrieved records. // // Valid Range: // * Minimum value of 1 // * Maximum value of: // * 1000 for {@link DynamoEventSource} // * 10000 for {@link KinesisEventSource}, {@link ManagedKafkaEventSource} and {@link SelfManagedKafkaEventSource}. BatchSize *float64 `field:"optional" json:"batchSize" yaml:"batchSize"` // If the stream event source mapping should be enabled. Enabled *bool `field:"optional" json:"enabled" yaml:"enabled"` // The maximum amount of time to gather records before invoking the function. // // Maximum of Duration.minutes(5). // See: https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventsourcemapping.html#invocation-eventsourcemapping-batching // MaxBatchingWindow awscdk.Duration `field:"optional" json:"maxBatchingWindow" yaml:"maxBatchingWindow"` // If the function returns an error, split the batch in two and retry. BisectBatchOnError *bool `field:"optional" json:"bisectBatchOnError" yaml:"bisectBatchOnError"` // Add filter criteria option. Filters *[]*map[string]interface{} `field:"optional" json:"filters" yaml:"filters"` // The maximum age of a record that Lambda sends to a function for processing. // // Valid Range: // * Minimum value of 60 seconds // * Maximum value of 7 days. MaxRecordAge awscdk.Duration `field:"optional" json:"maxRecordAge" yaml:"maxRecordAge"` // An Amazon SQS queue or Amazon SNS topic destination for discarded records. OnFailure awslambda.IEventSourceDlq `field:"optional" json:"onFailure" yaml:"onFailure"` // The number of batches to process from each shard concurrently. // // Valid Range: // * Minimum value of 1 // * Maximum value of 10. ParallelizationFactor *float64 `field:"optional" json:"parallelizationFactor" yaml:"parallelizationFactor"` // Allow functions to return partially successful responses for a batch of records. // See: https://docs.aws.amazon.com/lambda/latest/dg/with-ddb.html#services-ddb-batchfailurereporting // ReportBatchItemFailures *bool `field:"optional" json:"reportBatchItemFailures" yaml:"reportBatchItemFailures"` // Maximum number of retry attempts Valid Range: * Minimum value of 0 * Maximum value of 10000. RetryAttempts *float64 `field:"optional" json:"retryAttempts" yaml:"retryAttempts"` // The size of the tumbling windows to group records sent to DynamoDB or Kinesis Valid Range: 0 - 15 minutes. TumblingWindow awscdk.Duration `field:"optional" json:"tumblingWindow" yaml:"tumblingWindow"` // The time from which to start reading, in Unix time seconds. StartingPositionTimestamp *float64 `field:"optional" json:"startingPositionTimestamp" yaml:"startingPositionTimestamp"` }
Example:
import kinesis "github.com/aws/aws-cdk-go/awscdk" import "github.com/aws/aws-cdk-go/awscdk" var myFunction function stream := kinesis.NewStream(this, jsii.String("MyStream")) myFunction.addEventSource(awscdk.NewKinesisEventSource(stream, &kinesisEventSourceProps{ batchSize: jsii.Number(100), // default startingPosition: lambda.startingPosition_TRIM_HORIZON, }))
type ManagedKafkaEventSource ¶
type ManagedKafkaEventSource interface { StreamEventSource // The identifier for this EventSourceMapping. EventSourceMappingId() *string Props() *StreamEventSourceProps // Called by `lambda.addEventSource` to allow the event source to bind to this function. Bind(target awslambda.IFunction) EnrichMappingOptions(options *awslambda.EventSourceMappingOptions) *awslambda.EventSourceMappingOptions }
Use a MSK cluster as a streaming source for AWS Lambda.
Example:
import "github.com/aws/aws-cdk-go/awscdk" import "github.com/aws/aws-cdk-go/awscdk" var myFunction function // Your MSK cluster arn clusterArn := "arn:aws:kafka:us-east-1:0123456789019:cluster/SalesCluster/abcd1234-abcd-cafe-abab-9876543210ab-4" // The Kafka topic you want to subscribe to topic := "some-cool-topic" // The secret that allows access to your MSK cluster // You still have to make sure that it is associated with your cluster as described in the documentation secret := awscdk.NewSecret(this, jsii.String("Secret"), &secretProps{ secretName: jsii.String("AmazonMSK_KafkaSecret"), }) myFunction.addEventSource(awscdk.NewManagedKafkaEventSource(&managedKafkaEventSourceProps{ clusterArn: jsii.String(clusterArn), topic: topic, secret: secret, batchSize: jsii.Number(100), // default startingPosition: lambda.startingPosition_TRIM_HORIZON, }))
func NewManagedKafkaEventSource ¶
func NewManagedKafkaEventSource(props *ManagedKafkaEventSourceProps) ManagedKafkaEventSource
type ManagedKafkaEventSourceProps ¶
type ManagedKafkaEventSourceProps struct { // Where to begin consuming the stream. StartingPosition awslambda.StartingPosition `field:"required" json:"startingPosition" yaml:"startingPosition"` // The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. // // Your function receives an // event with all the retrieved records. // // Valid Range: // * Minimum value of 1 // * Maximum value of: // * 1000 for {@link DynamoEventSource} // * 10000 for {@link KinesisEventSource}, {@link ManagedKafkaEventSource} and {@link SelfManagedKafkaEventSource}. BatchSize *float64 `field:"optional" json:"batchSize" yaml:"batchSize"` // If the stream event source mapping should be enabled. Enabled *bool `field:"optional" json:"enabled" yaml:"enabled"` // The maximum amount of time to gather records before invoking the function. // // Maximum of Duration.minutes(5). // See: https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventsourcemapping.html#invocation-eventsourcemapping-batching // MaxBatchingWindow awscdk.Duration `field:"optional" json:"maxBatchingWindow" yaml:"maxBatchingWindow"` // The Kafka topic to subscribe to. Topic *string `field:"required" json:"topic" yaml:"topic"` // The identifier for the Kafka consumer group to join. // // The consumer group ID must be unique among all your Kafka event sources. After creating a Kafka event source mapping with the consumer group ID specified, you cannot update this value. The value must have a lenght between 1 and 200 and full the pattern '[a-zA-Z0-9-\/*:_+=.@-]*'. // See: https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#services-msk-consumer-group-id // ConsumerGroupId *string `field:"optional" json:"consumerGroupId" yaml:"consumerGroupId"` // The secret with the Kafka credentials, see https://docs.aws.amazon.com/msk/latest/developerguide/msk-password.html for details This field is required if your Kafka brokers are accessed over the Internet. Secret awssecretsmanager.ISecret `field:"optional" json:"secret" yaml:"secret"` // An MSK cluster construct. ClusterArn *string `field:"required" json:"clusterArn" yaml:"clusterArn"` }
Properties for a MSK event source.
Example:
import "github.com/aws/aws-cdk-go/awscdk" import "github.com/aws/aws-cdk-go/awscdk" var myFunction function // Your MSK cluster arn clusterArn := "arn:aws:kafka:us-east-1:0123456789019:cluster/SalesCluster/abcd1234-abcd-cafe-abab-9876543210ab-4" // The Kafka topic you want to subscribe to topic := "some-cool-topic" // The secret that allows access to your MSK cluster // You still have to make sure that it is associated with your cluster as described in the documentation secret := awscdk.NewSecret(this, jsii.String("Secret"), &secretProps{ secretName: jsii.String("AmazonMSK_KafkaSecret"), }) myFunction.addEventSource(awscdk.NewManagedKafkaEventSource(&managedKafkaEventSourceProps{ clusterArn: jsii.String(clusterArn), topic: topic, secret: secret, batchSize: jsii.Number(100), // default startingPosition: lambda.startingPosition_TRIM_HORIZON, }))
type S3EventSource ¶
type S3EventSource interface { awslambda.IEventSource Bucket() awss3.Bucket // Called by `lambda.addEventSource` to allow the event source to bind to this function. Bind(target awslambda.IFunction) }
Use S3 bucket notifications as an event source for AWS Lambda.
Example:
import eventsources "github.com/aws/aws-cdk-go/awscdk" import s3 "github.com/aws/aws-cdk-go/awscdk" var fn function bucket := s3.NewBucket(this, jsii.String("Bucket")) fn.addEventSource(eventsources.NewS3EventSource(bucket, &s3EventSourceProps{ events: []eventType{ s3.*eventType_OBJECT_CREATED, s3.*eventType_OBJECT_REMOVED, }, filters: []notificationKeyFilter{ ¬ificationKeyFilter{ prefix: jsii.String("subdir/"), }, }, }))
func NewS3EventSource ¶
func NewS3EventSource(bucket awss3.Bucket, props *S3EventSourceProps) S3EventSource
type S3EventSourceProps ¶
type S3EventSourceProps struct { // The s3 event types that will trigger the notification. Events *[]awss3.EventType `field:"required" json:"events" yaml:"events"` // S3 object key filter rules to determine which objects trigger this event. // // Each filter must include a `prefix` and/or `suffix` that will be matched // against the s3 object key. Refer to the S3 Developer Guide for details // about allowed filter rules. Filters *[]*awss3.NotificationKeyFilter `field:"optional" json:"filters" yaml:"filters"` }
Example:
import eventsources "github.com/aws/aws-cdk-go/awscdk" import s3 "github.com/aws/aws-cdk-go/awscdk" var fn function bucket := s3.NewBucket(this, jsii.String("Bucket")) fn.addEventSource(eventsources.NewS3EventSource(bucket, &s3EventSourceProps{ events: []eventType{ s3.*eventType_OBJECT_CREATED, s3.*eventType_OBJECT_REMOVED, }, filters: []notificationKeyFilter{ ¬ificationKeyFilter{ prefix: jsii.String("subdir/"), }, }, }))
type SelfManagedKafkaEventSource ¶
type SelfManagedKafkaEventSource interface { StreamEventSource Props() *StreamEventSourceProps // Called by `lambda.addEventSource` to allow the event source to bind to this function. Bind(target awslambda.IFunction) EnrichMappingOptions(options *awslambda.EventSourceMappingOptions) *awslambda.EventSourceMappingOptions }
Use a self hosted Kafka installation as a streaming source for AWS Lambda.
Example:
// Example automatically generated from non-compiling source. May contain errors. import "github.com/aws/aws-cdk-go/awscdk" import "github.com/aws/aws-cdk-go/awscdk" // The secret that allows access to your self hosted Kafka cluster var secret secret // (Optional) The secret containing the root CA certificate that your Kafka brokers use for TLS encryption var encryption secret var myFunction function // The list of Kafka brokers bootstrapServers := []*string{ "kafka-broker:9092", } // The Kafka topic you want to subscribe to topic := "some-cool-topic" // (Optional) The consumer group id to use when connecting to the Kafka broker. If omitted the UUID of the event source mapping will be used. var consumerGroupId string myFunction.addEventSource(awscdk.NewSelfManagedKafkaEventSource(&selfManagedKafkaEventSourceProps{ bootstrapServers: bootstrapServers, topic: topic, consumerGroupId: consumerGroupId, secret: secret, batchSize: jsii.Number(100), // default startingPosition: lambda.startingPosition_TRIM_HORIZON, encryption: encryption, }))
func NewSelfManagedKafkaEventSource ¶
func NewSelfManagedKafkaEventSource(props *SelfManagedKafkaEventSourceProps) SelfManagedKafkaEventSource
type SelfManagedKafkaEventSourceProps ¶
type SelfManagedKafkaEventSourceProps struct { // Where to begin consuming the stream. StartingPosition awslambda.StartingPosition `field:"required" json:"startingPosition" yaml:"startingPosition"` // The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. // // Your function receives an // event with all the retrieved records. // // Valid Range: // * Minimum value of 1 // * Maximum value of: // * 1000 for {@link DynamoEventSource} // * 10000 for {@link KinesisEventSource}, {@link ManagedKafkaEventSource} and {@link SelfManagedKafkaEventSource}. BatchSize *float64 `field:"optional" json:"batchSize" yaml:"batchSize"` // If the stream event source mapping should be enabled. Enabled *bool `field:"optional" json:"enabled" yaml:"enabled"` // The maximum amount of time to gather records before invoking the function. // // Maximum of Duration.minutes(5). // See: https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventsourcemapping.html#invocation-eventsourcemapping-batching // MaxBatchingWindow awscdk.Duration `field:"optional" json:"maxBatchingWindow" yaml:"maxBatchingWindow"` // The Kafka topic to subscribe to. Topic *string `field:"required" json:"topic" yaml:"topic"` // The identifier for the Kafka consumer group to join. // // The consumer group ID must be unique among all your Kafka event sources. After creating a Kafka event source mapping with the consumer group ID specified, you cannot update this value. The value must have a lenght between 1 and 200 and full the pattern '[a-zA-Z0-9-\/*:_+=.@-]*'. // See: https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#services-msk-consumer-group-id // ConsumerGroupId *string `field:"optional" json:"consumerGroupId" yaml:"consumerGroupId"` // The secret with the Kafka credentials, see https://docs.aws.amazon.com/msk/latest/developerguide/msk-password.html for details This field is required if your Kafka brokers are accessed over the Internet. Secret awssecretsmanager.ISecret `field:"optional" json:"secret" yaml:"secret"` // The list of host and port pairs that are the addresses of the Kafka brokers in a "bootstrap" Kafka cluster that a Kafka client connects to initially to bootstrap itself. // // They are in the format `abc.xyz.com:xxxx`. BootstrapServers *[]*string `field:"required" json:"bootstrapServers" yaml:"bootstrapServers"` // The authentication method for your Kafka cluster. AuthenticationMethod AuthenticationMethod `field:"optional" json:"authenticationMethod" yaml:"authenticationMethod"` // The secret with the root CA certificate used by your Kafka brokers for TLS encryption This field is required if your Kafka brokers use certificates signed by a private CA. RootCACertificate awssecretsmanager.ISecret `field:"optional" json:"rootCACertificate" yaml:"rootCACertificate"` // If your Kafka brokers are only reachable via VPC, provide the security group here. SecurityGroup awsec2.ISecurityGroup `field:"optional" json:"securityGroup" yaml:"securityGroup"` // If your Kafka brokers are only reachable via VPC provide the VPC here. Vpc awsec2.IVpc `field:"optional" json:"vpc" yaml:"vpc"` // If your Kafka brokers are only reachable via VPC, provide the subnets selection here. VpcSubnets *awsec2.SubnetSelection `field:"optional" json:"vpcSubnets" yaml:"vpcSubnets"` }
Properties for a self managed Kafka cluster event source.
If your Kafka cluster is only reachable via VPC make sure to configure it.
Example:
// Example automatically generated from non-compiling source. May contain errors. import "github.com/aws/aws-cdk-go/awscdk" import "github.com/aws/aws-cdk-go/awscdk" // The secret that allows access to your self hosted Kafka cluster var secret secret // (Optional) The secret containing the root CA certificate that your Kafka brokers use for TLS encryption var encryption secret var myFunction function // The list of Kafka brokers bootstrapServers := []*string{ "kafka-broker:9092", } // The Kafka topic you want to subscribe to topic := "some-cool-topic" // (Optional) The consumer group id to use when connecting to the Kafka broker. If omitted the UUID of the event source mapping will be used. var consumerGroupId string myFunction.addEventSource(awscdk.NewSelfManagedKafkaEventSource(&selfManagedKafkaEventSourceProps{ bootstrapServers: bootstrapServers, topic: topic, consumerGroupId: consumerGroupId, secret: secret, batchSize: jsii.Number(100), // default startingPosition: lambda.startingPosition_TRIM_HORIZON, encryption: encryption, }))
type SnsDlq ¶
type SnsDlq interface { awslambda.IEventSourceDlq // Returns a destination configuration for the DLQ. Bind(_target awslambda.IEventSourceMapping, targetHandler awslambda.IFunction) *awslambda.DlqDestinationConfig }
An SNS dead letter queue destination configuration for a Lambda event source.
Example:
// The code below shows an example of how to instantiate this type. // The values are placeholders you should change. import "github.com/aws/aws-cdk-go/awscdk" import "github.com/aws/aws-cdk-go/awscdk" var topic topic snsDlq := awscdk.Aws_lambda_event_sources.NewSnsDlq(topic)
type SnsEventSource ¶
type SnsEventSource interface { awslambda.IEventSource Topic() awssns.ITopic // Called by `lambda.addEventSource` to allow the event source to bind to this function. Bind(target awslambda.IFunction) }
Use an Amazon SNS topic as an event source for AWS Lambda.
Example:
import sns "github.com/aws/aws-cdk-go/awscdk" import "github.com/aws/aws-cdk-go/awscdk" var topic topic var fn function deadLetterQueue := sqs.NewQueue(this, jsii.String("deadLetterQueue")) fn.addEventSource(awscdk.NewSnsEventSource(topic, &snsEventSourceProps{ filterPolicy: map[string]interface{}{ }, deadLetterQueue: deadLetterQueue, }))
func NewSnsEventSource ¶
func NewSnsEventSource(topic awssns.ITopic, props *SnsEventSourceProps) SnsEventSource
type SnsEventSourceProps ¶
type SnsEventSourceProps struct { // Queue to be used as dead letter queue. // // If not passed no dead letter queue is enabled. DeadLetterQueue awssqs.IQueue `field:"optional" json:"deadLetterQueue" yaml:"deadLetterQueue"` // The filter policy. FilterPolicy *map[string]awssns.SubscriptionFilter `field:"optional" json:"filterPolicy" yaml:"filterPolicy"` }
Properties forwarded to the Lambda Subscription.
Example:
import sns "github.com/aws/aws-cdk-go/awscdk" import "github.com/aws/aws-cdk-go/awscdk" var topic topic var fn function deadLetterQueue := sqs.NewQueue(this, jsii.String("deadLetterQueue")) fn.addEventSource(awscdk.NewSnsEventSource(topic, &snsEventSourceProps{ filterPolicy: map[string]interface{}{ }, deadLetterQueue: deadLetterQueue, }))
type SqsDlq ¶
type SqsDlq interface { awslambda.IEventSourceDlq // Returns a destination configuration for the DLQ. Bind(_target awslambda.IEventSourceMapping, targetHandler awslambda.IFunction) *awslambda.DlqDestinationConfig }
An SQS dead letter queue destination configuration for a Lambda event source.
Example:
import dynamodb "github.com/aws/aws-cdk-go/awscdk" import "github.com/aws/aws-cdk-go/awscdk" var table table var fn function deadLetterQueue := sqs.NewQueue(this, jsii.String("deadLetterQueue")) fn.addEventSource(awscdk.NewDynamoEventSource(table, &dynamoEventSourceProps{ startingPosition: lambda.startingPosition_TRIM_HORIZON, batchSize: jsii.Number(5), bisectBatchOnError: jsii.Boolean(true), onFailure: awscdk.NewSqsDlq(deadLetterQueue), retryAttempts: jsii.Number(10), }))
type SqsEventSource ¶
type SqsEventSource interface { awslambda.IEventSource // The identifier for this EventSourceMapping. EventSourceMappingId() *string Queue() awssqs.IQueue // Called by `lambda.addEventSource` to allow the event source to bind to this function. Bind(target awslambda.IFunction) }
Use an Amazon SQS queue as an event source for AWS Lambda.
Example:
import "github.com/aws/aws-cdk-go/awscdk" var fn function queue := sqs.NewQueue(this, jsii.String("MyQueue")) eventSource := awscdk.NewSqsEventSource(queue) fn.addEventSource(eventSource) eventSourceId := eventSource.eventSourceMappingId
func NewSqsEventSource ¶
func NewSqsEventSource(queue awssqs.IQueue, props *SqsEventSourceProps) SqsEventSource
type SqsEventSourceProps ¶
type SqsEventSourceProps struct { // The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. // // Your function receives an // event with all the retrieved records. // // Valid Range: Minimum value of 1. Maximum value of 10. // If `maxBatchingWindow` is configured, this value can go up to 10,000. BatchSize *float64 `field:"optional" json:"batchSize" yaml:"batchSize"` // If the SQS event source mapping should be enabled. Enabled *bool `field:"optional" json:"enabled" yaml:"enabled"` // Add filter criteria option. Filters *[]*map[string]interface{} `field:"optional" json:"filters" yaml:"filters"` // The maximum amount of time to gather records before invoking the function. // // Valid Range: Minimum value of 0 minutes. Maximum value of 5 minutes. MaxBatchingWindow awscdk.Duration `field:"optional" json:"maxBatchingWindow" yaml:"maxBatchingWindow"` // Allow functions to return partially successful responses for a batch of records. // See: https://docs.aws.amazon.com/lambda/latest/dg/with-sqs.html#services-sqs-batchfailurereporting // ReportBatchItemFailures *bool `field:"optional" json:"reportBatchItemFailures" yaml:"reportBatchItemFailures"` }
Example:
import "github.com/aws/aws-cdk-go/awscdk" var fn function queue := sqs.NewQueue(this, jsii.String("MyQueue"), &queueProps{ visibilityTimeout: awscdk.Duration.seconds(jsii.Number(30)), // default, receiveMessageWaitTime: awscdk.Duration.seconds(jsii.Number(20)), }) fn.addEventSource(awscdk.NewSqsEventSource(queue, &sqsEventSourceProps{ batchSize: jsii.Number(10), // default maxBatchingWindow: awscdk.Duration.minutes(jsii.Number(5)), reportBatchItemFailures: jsii.Boolean(true), }))
type StreamEventSource ¶
type StreamEventSource interface { awslambda.IEventSource Props() *StreamEventSourceProps // Called by `lambda.addEventSource` to allow the event source to bind to this function. Bind(_target awslambda.IFunction) EnrichMappingOptions(options *awslambda.EventSourceMappingOptions) *awslambda.EventSourceMappingOptions }
Use an stream as an event source for AWS Lambda.
type StreamEventSourceProps ¶
type StreamEventSourceProps struct { // Where to begin consuming the stream. StartingPosition awslambda.StartingPosition `field:"required" json:"startingPosition" yaml:"startingPosition"` // The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. // // Your function receives an // event with all the retrieved records. // // Valid Range: // * Minimum value of 1 // * Maximum value of: // * 1000 for {@link DynamoEventSource} // * 10000 for {@link KinesisEventSource}, {@link ManagedKafkaEventSource} and {@link SelfManagedKafkaEventSource}. BatchSize *float64 `field:"optional" json:"batchSize" yaml:"batchSize"` // If the stream event source mapping should be enabled. Enabled *bool `field:"optional" json:"enabled" yaml:"enabled"` // The maximum amount of time to gather records before invoking the function. // // Maximum of Duration.minutes(5). // See: https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventsourcemapping.html#invocation-eventsourcemapping-batching // MaxBatchingWindow awscdk.Duration `field:"optional" json:"maxBatchingWindow" yaml:"maxBatchingWindow"` // If the function returns an error, split the batch in two and retry. BisectBatchOnError *bool `field:"optional" json:"bisectBatchOnError" yaml:"bisectBatchOnError"` // Add filter criteria option. Filters *[]*map[string]interface{} `field:"optional" json:"filters" yaml:"filters"` // The maximum age of a record that Lambda sends to a function for processing. // // Valid Range: // * Minimum value of 60 seconds // * Maximum value of 7 days. MaxRecordAge awscdk.Duration `field:"optional" json:"maxRecordAge" yaml:"maxRecordAge"` // An Amazon SQS queue or Amazon SNS topic destination for discarded records. OnFailure awslambda.IEventSourceDlq `field:"optional" json:"onFailure" yaml:"onFailure"` // The number of batches to process from each shard concurrently. // // Valid Range: // * Minimum value of 1 // * Maximum value of 10. ParallelizationFactor *float64 `field:"optional" json:"parallelizationFactor" yaml:"parallelizationFactor"` // Allow functions to return partially successful responses for a batch of records. // See: https://docs.aws.amazon.com/lambda/latest/dg/with-ddb.html#services-ddb-batchfailurereporting // ReportBatchItemFailures *bool `field:"optional" json:"reportBatchItemFailures" yaml:"reportBatchItemFailures"` // Maximum number of retry attempts Valid Range: * Minimum value of 0 * Maximum value of 10000. RetryAttempts *float64 `field:"optional" json:"retryAttempts" yaml:"retryAttempts"` // The size of the tumbling windows to group records sent to DynamoDB or Kinesis Valid Range: 0 - 15 minutes. TumblingWindow awscdk.Duration `field:"optional" json:"tumblingWindow" yaml:"tumblingWindow"` }
The set of properties for streaming event sources shared by Dynamo and Kinesis.
Example:
// The code below shows an example of how to instantiate this type. // The values are placeholders you should change. import cdk "github.com/aws/aws-cdk-go/awscdk" import "github.com/aws/aws-cdk-go/awscdk" import "github.com/aws/aws-cdk-go/awscdk" var eventSourceDlq iEventSourceDlq var filters interface{} streamEventSourceProps := &streamEventSourceProps{ startingPosition: awscdk.Aws_lambda.startingPosition_TRIM_HORIZON, // the properties below are optional batchSize: jsii.Number(123), bisectBatchOnError: jsii.Boolean(false), enabled: jsii.Boolean(false), filters: []map[string]interface{}{ map[string]interface{}{ "filtersKey": filters, }, }, maxBatchingWindow: cdk.duration.minutes(jsii.Number(30)), maxRecordAge: cdk.*duration.minutes(jsii.Number(30)), onFailure: eventSourceDlq, parallelizationFactor: jsii.Number(123), reportBatchItemFailures: jsii.Boolean(false), retryAttempts: jsii.Number(123), tumblingWindow: cdk.*duration.minutes(jsii.Number(30)), }
Source Files ¶
- awslambdaeventsources.go
- awslambdaeventsources_ApiEventSource.go
- awslambdaeventsources_ApiEventSource__runtime_type_checks.go
- awslambdaeventsources_AuthenticationMethod.go
- awslambdaeventsources_BaseStreamEventSourceProps.go
- awslambdaeventsources_DynamoEventSource.go
- awslambdaeventsources_DynamoEventSourceProps.go
- awslambdaeventsources_DynamoEventSource__runtime_type_checks.go
- awslambdaeventsources_KafkaEventSourceProps.go
- awslambdaeventsources_KinesisEventSource.go
- awslambdaeventsources_KinesisEventSourceProps.go
- awslambdaeventsources_KinesisEventSource__runtime_type_checks.go
- awslambdaeventsources_ManagedKafkaEventSource.go
- awslambdaeventsources_ManagedKafkaEventSourceProps.go
- awslambdaeventsources_ManagedKafkaEventSource__runtime_type_checks.go
- awslambdaeventsources_S3EventSource.go
- awslambdaeventsources_S3EventSourceProps.go
- awslambdaeventsources_S3EventSource__runtime_type_checks.go
- awslambdaeventsources_SelfManagedKafkaEventSource.go
- awslambdaeventsources_SelfManagedKafkaEventSourceProps.go
- awslambdaeventsources_SelfManagedKafkaEventSource__runtime_type_checks.go
- awslambdaeventsources_SnsDlq.go
- awslambdaeventsources_SnsDlq__runtime_type_checks.go
- awslambdaeventsources_SnsEventSource.go
- awslambdaeventsources_SnsEventSourceProps.go
- awslambdaeventsources_SnsEventSource__runtime_type_checks.go
- awslambdaeventsources_SqsDlq.go
- awslambdaeventsources_SqsDlq__runtime_type_checks.go
- awslambdaeventsources_SqsEventSource.go
- awslambdaeventsources_SqsEventSourceProps.go
- awslambdaeventsources_SqsEventSource__runtime_type_checks.go
- awslambdaeventsources_StreamEventSource.go
- awslambdaeventsources_StreamEventSourceProps.go
- awslambdaeventsources_StreamEventSource__runtime_type_checks.go