Documentation ¶
Index ¶
- func NewApiEventSource_Override(a ApiEventSource, method *string, path *string, ...)
- func NewDynamoEventSource_Override(d DynamoEventSource, table awsdynamodb.ITable, props *DynamoEventSourceProps)
- func NewKinesisEventSource_Override(k KinesisEventSource, stream awskinesis.IStream, ...)
- func NewManagedKafkaEventSource_Override(m ManagedKafkaEventSource, props *ManagedKafkaEventSourceProps)
- func NewS3EventSource_Override(s S3EventSource, bucket awss3.Bucket, props *S3EventSourceProps)
- func NewSelfManagedKafkaEventSource_Override(s SelfManagedKafkaEventSource, props *SelfManagedKafkaEventSourceProps)
- func NewSnsDlq_Override(s SnsDlq, topic awssns.ITopic)
- func NewSnsEventSource_Override(s SnsEventSource, topic awssns.ITopic, props *SnsEventSourceProps)
- func NewSqsDlq_Override(s SqsDlq, queue awssqs.IQueue)
- func NewSqsEventSource_Override(s SqsEventSource, queue awssqs.IQueue, props *SqsEventSourceProps)
- func NewStreamEventSource_Override(s StreamEventSource, props *StreamEventSourceProps)
- type ApiEventSource
- type AuthenticationMethod
- type DynamoEventSource
- type DynamoEventSourceProps
- type KafkaEventSourceProps
- type KinesisEventSource
- type KinesisEventSourceProps
- type ManagedKafkaEventSource
- type ManagedKafkaEventSourceProps
- type S3EventSource
- type S3EventSourceProps
- type SelfManagedKafkaEventSource
- type SelfManagedKafkaEventSourceProps
- type SnsDlq
- type SnsEventSource
- type SnsEventSourceProps
- type SqsDlq
- type SqsEventSource
- type SqsEventSourceProps
- type StreamEventSource
- type StreamEventSourceProps
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
func NewApiEventSource_Override ¶
func NewApiEventSource_Override(a ApiEventSource, method *string, path *string, options *awsapigateway.MethodOptions)
func NewDynamoEventSource_Override ¶
func NewDynamoEventSource_Override(d DynamoEventSource, table awsdynamodb.ITable, props *DynamoEventSourceProps)
func NewKinesisEventSource_Override ¶
func NewKinesisEventSource_Override(k KinesisEventSource, stream awskinesis.IStream, props *KinesisEventSourceProps)
func NewManagedKafkaEventSource_Override ¶
func NewManagedKafkaEventSource_Override(m ManagedKafkaEventSource, props *ManagedKafkaEventSourceProps)
func NewS3EventSource_Override ¶
func NewS3EventSource_Override(s S3EventSource, bucket awss3.Bucket, props *S3EventSourceProps)
func NewSelfManagedKafkaEventSource_Override ¶
func NewSelfManagedKafkaEventSource_Override(s SelfManagedKafkaEventSource, props *SelfManagedKafkaEventSourceProps)
func NewSnsDlq_Override ¶
func NewSnsEventSource_Override ¶
func NewSnsEventSource_Override(s SnsEventSource, topic awssns.ITopic, props *SnsEventSourceProps)
func NewSqsDlq_Override ¶
func NewSqsEventSource_Override ¶
func NewSqsEventSource_Override(s SqsEventSource, queue awssqs.IQueue, props *SqsEventSourceProps)
func NewStreamEventSource_Override ¶
func NewStreamEventSource_Override(s StreamEventSource, props *StreamEventSourceProps)
Types ¶
type ApiEventSource ¶
type ApiEventSource interface { awslambda.IEventSource Bind(target awslambda.IFunction) }
TODO: EXAMPLE
func NewApiEventSource ¶
func NewApiEventSource(method *string, path *string, options *awsapigateway.MethodOptions) ApiEventSource
type AuthenticationMethod ¶
type AuthenticationMethod string
The authentication method to use with SelfManagedKafkaEventSource.
const ( AuthenticationMethod_SASL_SCRAM_512_AUTH AuthenticationMethod = "SASL_SCRAM_512_AUTH" AuthenticationMethod_SASL_SCRAM_256_AUTH AuthenticationMethod = "SASL_SCRAM_256_AUTH" AuthenticationMethod_BASIC_AUTH AuthenticationMethod = "BASIC_AUTH" )
type DynamoEventSource ¶
type DynamoEventSource interface { StreamEventSource EventSourceMappingId() *string Props() *StreamEventSourceProps Bind(target awslambda.IFunction) EnrichMappingOptions(options *awslambda.EventSourceMappingOptions) *awslambda.EventSourceMappingOptions }
Use an Amazon DynamoDB stream as an event source for AWS Lambda.
TODO: EXAMPLE
func NewDynamoEventSource ¶
func NewDynamoEventSource(table awsdynamodb.ITable, props *DynamoEventSourceProps) DynamoEventSource
type DynamoEventSourceProps ¶
type DynamoEventSourceProps struct { // The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. // // Your function receives an // event with all the retrieved records. // // Valid Range: // * Minimum value of 1 // * Maximum value of: // * 1000 for {@link DynamoEventSource} // * 10000 for {@link KinesisEventSource} BatchSize *float64 `json:"batchSize"` // If the function returns an error, split the batch in two and retry. BisectBatchOnError *bool `json:"bisectBatchOnError"` // If the stream event source mapping should be enabled. Enabled *bool `json:"enabled"` // The maximum amount of time to gather records before invoking the function. // // Maximum of Duration.minutes(5) MaxBatchingWindow awscdk.Duration `json:"maxBatchingWindow"` // The maximum age of a record that Lambda sends to a function for processing. // // Valid Range: // * Minimum value of 60 seconds // * Maximum value of 7 days MaxRecordAge awscdk.Duration `json:"maxRecordAge"` // An Amazon SQS queue or Amazon SNS topic destination for discarded records. OnFailure awslambda.IEventSourceDlq `json:"onFailure"` // The number of batches to process from each shard concurrently. // // Valid Range: // * Minimum value of 1 // * Maximum value of 10 ParallelizationFactor *float64 `json:"parallelizationFactor"` // Allow functions to return partially successful responses for a batch of records. // See: https://docs.aws.amazon.com/lambda/latest/dg/with-ddb.html#services-ddb-batchfailurereporting // ReportBatchItemFailures *bool `json:"reportBatchItemFailures"` // Maximum number of retry attempts Valid Range: * Minimum value of 0 * Maximum value of 10000. RetryAttempts *float64 `json:"retryAttempts"` // Where to begin consuming the stream. StartingPosition awslambda.StartingPosition `json:"startingPosition"` // The size of the tumbling windows to group records sent to DynamoDB or Kinesis Valid Range: 0 - 15 minutes. TumblingWindow awscdk.Duration `json:"tumblingWindow"` }
TODO: EXAMPLE
type KafkaEventSourceProps ¶
type KafkaEventSourceProps struct { // The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. // // Your function receives an // event with all the retrieved records. // // Valid Range: // * Minimum value of 1 // * Maximum value of: // * 1000 for {@link DynamoEventSource} // * 10000 for {@link KinesisEventSource} BatchSize *float64 `json:"batchSize"` // If the function returns an error, split the batch in two and retry. BisectBatchOnError *bool `json:"bisectBatchOnError"` // If the stream event source mapping should be enabled. Enabled *bool `json:"enabled"` // The maximum amount of time to gather records before invoking the function. // // Maximum of Duration.minutes(5) MaxBatchingWindow awscdk.Duration `json:"maxBatchingWindow"` // The maximum age of a record that Lambda sends to a function for processing. // // Valid Range: // * Minimum value of 60 seconds // * Maximum value of 7 days MaxRecordAge awscdk.Duration `json:"maxRecordAge"` // An Amazon SQS queue or Amazon SNS topic destination for discarded records. OnFailure awslambda.IEventSourceDlq `json:"onFailure"` // The number of batches to process from each shard concurrently. // // Valid Range: // * Minimum value of 1 // * Maximum value of 10 ParallelizationFactor *float64 `json:"parallelizationFactor"` // Allow functions to return partially successful responses for a batch of records. // See: https://docs.aws.amazon.com/lambda/latest/dg/with-ddb.html#services-ddb-batchfailurereporting // ReportBatchItemFailures *bool `json:"reportBatchItemFailures"` // Maximum number of retry attempts Valid Range: * Minimum value of 0 * Maximum value of 10000. RetryAttempts *float64 `json:"retryAttempts"` // Where to begin consuming the stream. StartingPosition awslambda.StartingPosition `json:"startingPosition"` // The size of the tumbling windows to group records sent to DynamoDB or Kinesis Valid Range: 0 - 15 minutes. TumblingWindow awscdk.Duration `json:"tumblingWindow"` // The secret with the Kafka credentials, see https://docs.aws.amazon.com/msk/latest/developerguide/msk-password.html for details This field is required if your Kafka brokers are accessed over the Internet. Secret awssecretsmanager.ISecret `json:"secret"` // The Kafka topic to subscribe to. Topic *string `json:"topic"` }
Properties for a Kafka event source.
TODO: EXAMPLE
type KinesisEventSource ¶
type KinesisEventSource interface { StreamEventSource EventSourceMappingId() *string Props() *StreamEventSourceProps Stream() awskinesis.IStream Bind(target awslambda.IFunction) EnrichMappingOptions(options *awslambda.EventSourceMappingOptions) *awslambda.EventSourceMappingOptions }
Use an Amazon Kinesis stream as an event source for AWS Lambda.
TODO: EXAMPLE
func NewKinesisEventSource ¶
func NewKinesisEventSource(stream awskinesis.IStream, props *KinesisEventSourceProps) KinesisEventSource
type KinesisEventSourceProps ¶
type KinesisEventSourceProps struct { // The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. // // Your function receives an // event with all the retrieved records. // // Valid Range: // * Minimum value of 1 // * Maximum value of: // * 1000 for {@link DynamoEventSource} // * 10000 for {@link KinesisEventSource} BatchSize *float64 `json:"batchSize"` // If the function returns an error, split the batch in two and retry. BisectBatchOnError *bool `json:"bisectBatchOnError"` // If the stream event source mapping should be enabled. Enabled *bool `json:"enabled"` // The maximum amount of time to gather records before invoking the function. // // Maximum of Duration.minutes(5) MaxBatchingWindow awscdk.Duration `json:"maxBatchingWindow"` // The maximum age of a record that Lambda sends to a function for processing. // // Valid Range: // * Minimum value of 60 seconds // * Maximum value of 7 days MaxRecordAge awscdk.Duration `json:"maxRecordAge"` // An Amazon SQS queue or Amazon SNS topic destination for discarded records. OnFailure awslambda.IEventSourceDlq `json:"onFailure"` // The number of batches to process from each shard concurrently. // // Valid Range: // * Minimum value of 1 // * Maximum value of 10 ParallelizationFactor *float64 `json:"parallelizationFactor"` // Allow functions to return partially successful responses for a batch of records. // See: https://docs.aws.amazon.com/lambda/latest/dg/with-ddb.html#services-ddb-batchfailurereporting // ReportBatchItemFailures *bool `json:"reportBatchItemFailures"` // Maximum number of retry attempts Valid Range: * Minimum value of 0 * Maximum value of 10000. RetryAttempts *float64 `json:"retryAttempts"` // Where to begin consuming the stream. StartingPosition awslambda.StartingPosition `json:"startingPosition"` // The size of the tumbling windows to group records sent to DynamoDB or Kinesis Valid Range: 0 - 15 minutes. TumblingWindow awscdk.Duration `json:"tumblingWindow"` }
TODO: EXAMPLE
type ManagedKafkaEventSource ¶
type ManagedKafkaEventSource interface { StreamEventSource EventSourceMappingId() *string Props() *StreamEventSourceProps Bind(target awslambda.IFunction) EnrichMappingOptions(options *awslambda.EventSourceMappingOptions) *awslambda.EventSourceMappingOptions }
Use a MSK cluster as a streaming source for AWS Lambda.
TODO: EXAMPLE
func NewManagedKafkaEventSource ¶
func NewManagedKafkaEventSource(props *ManagedKafkaEventSourceProps) ManagedKafkaEventSource
type ManagedKafkaEventSourceProps ¶
type ManagedKafkaEventSourceProps struct { // The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. // // Your function receives an // event with all the retrieved records. // // Valid Range: // * Minimum value of 1 // * Maximum value of: // * 1000 for {@link DynamoEventSource} // * 10000 for {@link KinesisEventSource} BatchSize *float64 `json:"batchSize"` // If the function returns an error, split the batch in two and retry. BisectBatchOnError *bool `json:"bisectBatchOnError"` // If the stream event source mapping should be enabled. Enabled *bool `json:"enabled"` // The maximum amount of time to gather records before invoking the function. // // Maximum of Duration.minutes(5) MaxBatchingWindow awscdk.Duration `json:"maxBatchingWindow"` // The maximum age of a record that Lambda sends to a function for processing. // // Valid Range: // * Minimum value of 60 seconds // * Maximum value of 7 days MaxRecordAge awscdk.Duration `json:"maxRecordAge"` // An Amazon SQS queue or Amazon SNS topic destination for discarded records. OnFailure awslambda.IEventSourceDlq `json:"onFailure"` // The number of batches to process from each shard concurrently. // // Valid Range: // * Minimum value of 1 // * Maximum value of 10 ParallelizationFactor *float64 `json:"parallelizationFactor"` // Allow functions to return partially successful responses for a batch of records. // See: https://docs.aws.amazon.com/lambda/latest/dg/with-ddb.html#services-ddb-batchfailurereporting // ReportBatchItemFailures *bool `json:"reportBatchItemFailures"` // Maximum number of retry attempts Valid Range: * Minimum value of 0 * Maximum value of 10000. RetryAttempts *float64 `json:"retryAttempts"` // Where to begin consuming the stream. StartingPosition awslambda.StartingPosition `json:"startingPosition"` // The size of the tumbling windows to group records sent to DynamoDB or Kinesis Valid Range: 0 - 15 minutes. TumblingWindow awscdk.Duration `json:"tumblingWindow"` // The secret with the Kafka credentials, see https://docs.aws.amazon.com/msk/latest/developerguide/msk-password.html for details This field is required if your Kafka brokers are accessed over the Internet. Secret awssecretsmanager.ISecret `json:"secret"` // The Kafka topic to subscribe to. Topic *string `json:"topic"` // An MSK cluster construct. ClusterArn *string `json:"clusterArn"` }
Properties for a MSK event source.
TODO: EXAMPLE
type S3EventSource ¶
type S3EventSource interface { awslambda.IEventSource Bucket() awss3.Bucket Bind(target awslambda.IFunction) }
Use S3 bucket notifications as an event source for AWS Lambda.
TODO: EXAMPLE
func NewS3EventSource ¶
func NewS3EventSource(bucket awss3.Bucket, props *S3EventSourceProps) S3EventSource
type S3EventSourceProps ¶
type S3EventSourceProps struct { // The s3 event types that will trigger the notification. Events *[]awss3.EventType `json:"events"` // S3 object key filter rules to determine which objects trigger this event. // // Each filter must include a `prefix` and/or `suffix` that will be matched // against the s3 object key. Refer to the S3 Developer Guide for details // about allowed filter rules. Filters *[]*awss3.NotificationKeyFilter `json:"filters"` }
TODO: EXAMPLE
type SelfManagedKafkaEventSource ¶
type SelfManagedKafkaEventSource interface { StreamEventSource Props() *StreamEventSourceProps Bind(target awslambda.IFunction) EnrichMappingOptions(options *awslambda.EventSourceMappingOptions) *awslambda.EventSourceMappingOptions }
Use a self hosted Kafka installation as a streaming source for AWS Lambda.
TODO: EXAMPLE
func NewSelfManagedKafkaEventSource ¶
func NewSelfManagedKafkaEventSource(props *SelfManagedKafkaEventSourceProps) SelfManagedKafkaEventSource
type SelfManagedKafkaEventSourceProps ¶
type SelfManagedKafkaEventSourceProps struct { // The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. // // Your function receives an // event with all the retrieved records. // // Valid Range: // * Minimum value of 1 // * Maximum value of: // * 1000 for {@link DynamoEventSource} // * 10000 for {@link KinesisEventSource} BatchSize *float64 `json:"batchSize"` // If the function returns an error, split the batch in two and retry. BisectBatchOnError *bool `json:"bisectBatchOnError"` // If the stream event source mapping should be enabled. Enabled *bool `json:"enabled"` // The maximum amount of time to gather records before invoking the function. // // Maximum of Duration.minutes(5) MaxBatchingWindow awscdk.Duration `json:"maxBatchingWindow"` // The maximum age of a record that Lambda sends to a function for processing. // // Valid Range: // * Minimum value of 60 seconds // * Maximum value of 7 days MaxRecordAge awscdk.Duration `json:"maxRecordAge"` // An Amazon SQS queue or Amazon SNS topic destination for discarded records. OnFailure awslambda.IEventSourceDlq `json:"onFailure"` // The number of batches to process from each shard concurrently. // // Valid Range: // * Minimum value of 1 // * Maximum value of 10 ParallelizationFactor *float64 `json:"parallelizationFactor"` // Allow functions to return partially successful responses for a batch of records. // See: https://docs.aws.amazon.com/lambda/latest/dg/with-ddb.html#services-ddb-batchfailurereporting // ReportBatchItemFailures *bool `json:"reportBatchItemFailures"` // Maximum number of retry attempts Valid Range: * Minimum value of 0 * Maximum value of 10000. RetryAttempts *float64 `json:"retryAttempts"` // Where to begin consuming the stream. StartingPosition awslambda.StartingPosition `json:"startingPosition"` // The size of the tumbling windows to group records sent to DynamoDB or Kinesis Valid Range: 0 - 15 minutes. TumblingWindow awscdk.Duration `json:"tumblingWindow"` // The secret with the Kafka credentials, see https://docs.aws.amazon.com/msk/latest/developerguide/msk-password.html for details This field is required if your Kafka brokers are accessed over the Internet. Secret awssecretsmanager.ISecret `json:"secret"` // The Kafka topic to subscribe to. Topic *string `json:"topic"` // The list of host and port pairs that are the addresses of the Kafka brokers in a "bootstrap" Kafka cluster that a Kafka client connects to initially to bootstrap itself. // // They are in the format `abc.xyz.com:xxxx`. BootstrapServers *[]*string `json:"bootstrapServers"` // The authentication method for your Kafka cluster. AuthenticationMethod AuthenticationMethod `json:"authenticationMethod"` // If your Kafka brokers are only reachable via VPC, provide the security group here. SecurityGroup awsec2.ISecurityGroup `json:"securityGroup"` // If your Kafka brokers are only reachable via VPC provide the VPC here. Vpc awsec2.IVpc `json:"vpc"` // If your Kafka brokers are only reachable via VPC, provide the subnets selection here. VpcSubnets *awsec2.SubnetSelection `json:"vpcSubnets"` }
Properties for a self managed Kafka cluster event source.
If your Kafka cluster is only reachable via VPC make sure to configure it.
TODO: EXAMPLE
type SnsDlq ¶
type SnsDlq interface { awslambda.IEventSourceDlq Bind(_target awslambda.IEventSourceMapping, targetHandler awslambda.IFunction) *awslambda.DlqDestinationConfig }
An SNS dead letter queue destination configuration for a Lambda event source.
TODO: EXAMPLE
type SnsEventSource ¶
type SnsEventSource interface { awslambda.IEventSource Topic() awssns.ITopic Bind(target awslambda.IFunction) }
Use an Amazon SNS topic as an event source for AWS Lambda.
TODO: EXAMPLE
func NewSnsEventSource ¶
func NewSnsEventSource(topic awssns.ITopic, props *SnsEventSourceProps) SnsEventSource
type SnsEventSourceProps ¶
type SnsEventSourceProps struct { // Queue to be used as dead letter queue. // // If not passed no dead letter queue is enabled. DeadLetterQueue awssqs.IQueue `json:"deadLetterQueue"` // The filter policy. FilterPolicy *map[string]awssns.SubscriptionFilter `json:"filterPolicy"` }
Properties forwarded to the Lambda Subscription.
TODO: EXAMPLE
type SqsDlq ¶
type SqsDlq interface { awslambda.IEventSourceDlq Bind(_target awslambda.IEventSourceMapping, targetHandler awslambda.IFunction) *awslambda.DlqDestinationConfig }
An SQS dead letter queue destination configuration for a Lambda event source.
TODO: EXAMPLE
type SqsEventSource ¶
type SqsEventSource interface { awslambda.IEventSource EventSourceMappingId() *string Queue() awssqs.IQueue Bind(target awslambda.IFunction) }
Use an Amazon SQS queue as an event source for AWS Lambda.
TODO: EXAMPLE
func NewSqsEventSource ¶
func NewSqsEventSource(queue awssqs.IQueue, props *SqsEventSourceProps) SqsEventSource
type SqsEventSourceProps ¶
type SqsEventSourceProps struct { // The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. // // Your function receives an // event with all the retrieved records. // // Valid Range: Minimum value of 1. Maximum value of 10. // If `maxBatchingWindow` is configured, this value can go up to 10,000. BatchSize *float64 `json:"batchSize"` // If the SQS event source mapping should be enabled. Enabled *bool `json:"enabled"` // The maximum amount of time to gather records before invoking the function. // // Valid Range: Minimum value of 0 minutes. Maximum value of 5 minutes. MaxBatchingWindow awscdk.Duration `json:"maxBatchingWindow"` // Allow functions to return partially successful responses for a batch of records. // See: https://docs.aws.amazon.com/lambda/latest/dg/with-sqs.html#services-sqs-batchfailurereporting // ReportBatchItemFailures *bool `json:"reportBatchItemFailures"` }
TODO: EXAMPLE
type StreamEventSource ¶
type StreamEventSource interface { awslambda.IEventSource Props() *StreamEventSourceProps Bind(_target awslambda.IFunction) EnrichMappingOptions(options *awslambda.EventSourceMappingOptions) *awslambda.EventSourceMappingOptions }
Use an stream as an event source for AWS Lambda.
type StreamEventSourceProps ¶
type StreamEventSourceProps struct { // The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. // // Your function receives an // event with all the retrieved records. // // Valid Range: // * Minimum value of 1 // * Maximum value of: // * 1000 for {@link DynamoEventSource} // * 10000 for {@link KinesisEventSource} BatchSize *float64 `json:"batchSize"` // If the function returns an error, split the batch in two and retry. BisectBatchOnError *bool `json:"bisectBatchOnError"` // If the stream event source mapping should be enabled. Enabled *bool `json:"enabled"` // The maximum amount of time to gather records before invoking the function. // // Maximum of Duration.minutes(5) MaxBatchingWindow awscdk.Duration `json:"maxBatchingWindow"` // The maximum age of a record that Lambda sends to a function for processing. // // Valid Range: // * Minimum value of 60 seconds // * Maximum value of 7 days MaxRecordAge awscdk.Duration `json:"maxRecordAge"` // An Amazon SQS queue or Amazon SNS topic destination for discarded records. OnFailure awslambda.IEventSourceDlq `json:"onFailure"` // The number of batches to process from each shard concurrently. // // Valid Range: // * Minimum value of 1 // * Maximum value of 10 ParallelizationFactor *float64 `json:"parallelizationFactor"` // Allow functions to return partially successful responses for a batch of records. // See: https://docs.aws.amazon.com/lambda/latest/dg/with-ddb.html#services-ddb-batchfailurereporting // ReportBatchItemFailures *bool `json:"reportBatchItemFailures"` // Maximum number of retry attempts Valid Range: * Minimum value of 0 * Maximum value of 10000. RetryAttempts *float64 `json:"retryAttempts"` // Where to begin consuming the stream. StartingPosition awslambda.StartingPosition `json:"startingPosition"` // The size of the tumbling windows to group records sent to DynamoDB or Kinesis Valid Range: 0 - 15 minutes. TumblingWindow awscdk.Duration `json:"tumblingWindow"` }
The set of properties for event sources that follow the streaming model, such as, Dynamo, Kinesis and Kafka.
TODO: EXAMPLE