Documentation
¶
Index ¶
- type AccessDeniedException
- type Action
- type AdditionalOptionKeys
- type AggFunction
- type Aggregate
- type AggregateOperation
- type AllowedValue
- type AlreadyExistsException
- type AmazonRedshiftAdvancedOption
- type AmazonRedshiftNodeData
- type AmazonRedshiftSource
- type AmazonRedshiftTarget
- type AnnotationError
- type ApplyMapping
- type AthenaConnectorSource
- type AuditContext
- type AuthConfiguration
- type AuthenticationConfiguration
- type AuthenticationConfigurationInput
- type AuthenticationType
- type AuthorizationCodeProperties
- type BackfillError
- type BackfillErrorCode
- type BasicAuthenticationCredentials
- type BasicCatalogTarget
- type BatchGetTableOptimizerEntry
- type BatchGetTableOptimizerError
- type BatchStopJobRunError
- type BatchStopJobRunSuccessfulSubmission
- type BatchTableOptimizer
- type BatchUpdatePartitionFailureEntry
- type BatchUpdatePartitionRequestEntry
- type BinaryColumnStatisticsData
- type Blueprint
- type BlueprintDetails
- type BlueprintRun
- type BlueprintRunState
- type BlueprintStatus
- type BooleanColumnStatisticsData
- type Capabilities
- type Catalog
- type CatalogDeltaSource
- type CatalogEncryptionMode
- type CatalogEntry
- type CatalogHudiSource
- type CatalogImportStatus
- type CatalogInput
- type CatalogKafkaSource
- type CatalogKinesisSource
- type CatalogProperties
- type CatalogPropertiesOutput
- type CatalogSchemaChangePolicy
- type CatalogSource
- type CatalogTarget
- type Classifier
- type CloudWatchEncryption
- type CloudWatchEncryptionMode
- type CodeGenConfigurationNode
- type CodeGenEdge
- type CodeGenNode
- type CodeGenNodeArg
- type Column
- type ColumnError
- type ColumnImportance
- type ColumnRowFilter
- type ColumnStatistics
- type ColumnStatisticsData
- type ColumnStatisticsError
- type ColumnStatisticsState
- type ColumnStatisticsTaskNotRunningException
- type ColumnStatisticsTaskRun
- type ColumnStatisticsTaskRunningException
- type ColumnStatisticsTaskSettings
- type ColumnStatisticsTaskStoppingException
- type ColumnStatisticsType
- type CompactionMetrics
- type Comparator
- type Compatibility
- type CompressionType
- type ComputationType
- type ComputeEnvironment
- type ComputeEnvironmentConfiguration
- type ConcurrentModificationException
- type ConcurrentRunsExceededException
- type Condition
- type ConditionCheckFailureException
- type ConditionExpression
- type ConfigurationObject
- type ConflictException
- type ConfusionMatrix
- type Connection
- type ConnectionInput
- type ConnectionPasswordEncryption
- type ConnectionPropertyKey
- type ConnectionStatus
- type ConnectionType
- type ConnectionTypeBrief
- type ConnectionsList
- type ConnectorDataSource
- type ConnectorDataTarget
- type Crawl
- type CrawlState
- type Crawler
- type CrawlerHistory
- type CrawlerHistoryState
- type CrawlerLineageSettings
- type CrawlerMetrics
- type CrawlerNodeDetails
- type CrawlerNotRunningException
- type CrawlerRunningException
- type CrawlerState
- type CrawlerStoppingException
- type CrawlerTargets
- type CrawlsFilter
- type CreateCsvClassifierRequest
- type CreateGrokClassifierRequest
- type CreateJsonClassifierRequest
- type CreateXMLClassifierRequest
- type CsvClassifier
- type CsvHeaderOption
- type CsvSerdeOption
- type CustomCode
- type CustomEntityType
- type DQCompositeRuleEvaluationMethod
- type DQResultsPublishingOptions
- type DQStopJobOnFailureOptions
- type DQStopJobOnFailureTiming
- type DQTransformOutput
- type DataCatalogEncryptionSettings
- type DataFormat
- type DataLakeAccessProperties
- type DataLakeAccessPropertiesOutput
- type DataLakePrincipal
- type DataOperation
- type DataQualityAnalyzerResult
- type DataQualityEncryption
- type DataQualityEncryptionMode
- type DataQualityEvaluationRunAdditionalRunOptions
- type DataQualityMetricValues
- type DataQualityModelStatus
- type DataQualityObservation
- type DataQualityResult
- type DataQualityResultDescription
- type DataQualityResultFilterCriteria
- type DataQualityRuleRecommendationRunDescription
- type DataQualityRuleRecommendationRunFilter
- type DataQualityRuleResult
- type DataQualityRuleResultStatus
- type DataQualityRulesetEvaluationRunDescription
- type DataQualityRulesetEvaluationRunFilter
- type DataQualityRulesetFilterCriteria
- type DataQualityRulesetListDetails
- type DataQualityTargetTable
- type DataSource
- type Database
- type DatabaseAttributes
- type DatabaseIdentifier
- type DatabaseInput
- type DatapointInclusionAnnotation
- type Datatype
- type DateColumnStatisticsData
- type DecimalColumnStatisticsData
- type DecimalNumber
- type DeleteBehavior
- type DeltaTarget
- type DeltaTargetCompressionType
- type DevEndpoint
- type DevEndpointCustomLibraries
- type DirectJDBCSource
- type DirectKafkaSource
- type DirectKinesisSource
- type DirectSchemaChangePolicy
- type DoubleColumnStatisticsData
- type DropDuplicates
- type DropFields
- type DropNullFields
- type DynamicTransform
- type DynamoDBCatalogSource
- type DynamoDBTarget
- type Edge
- type EnableHybridValues
- type EncryptionAtRest
- type EncryptionConfiguration
- type Entity
- type EntityNotFoundException
- type ErrorDetail
- type ErrorDetails
- type EvaluateDataQuality
- type EvaluateDataQualityMultiFrame
- type EvaluationMetrics
- type EventBatchingCondition
- type ExecutionAttempt
- type ExecutionClass
- type ExecutionProperty
- type ExecutionStatus
- type ExistCondition
- type ExportLabelsTaskRunProperties
- type FederatedCatalog
- type FederatedDatabase
- type FederatedResourceAlreadyExistsException
- type FederatedTable
- type FederationSourceErrorCode
- type FederationSourceException
- type FederationSourceRetryableException
- type Field
- type FieldDataType
- type FieldFilterOperator
- type FieldName
- type FillMissingValues
- type Filter
- type FilterExpression
- type FilterLogicalOperator
- type FilterOperation
- type FilterOperator
- type FilterValue
- type FilterValueType
- type FindMatchesMetrics
- type FindMatchesParameters
- type FindMatchesTaskRunProperties
- type GetConnectionsFilter
- type GlueEncryptionException
- type GluePolicy
- type GlueRecordType
- type GlueSchema
- type GlueStudioSchemaColumn
- type GlueTable
- type GovernedCatalogSource
- type GovernedCatalogTarget
- type GrokClassifier
- type HudiTarget
- type HudiTargetCompressionType
- type IcebergCompactionMetrics
- type IcebergInput
- type IcebergOrphanFileDeletionConfiguration
- type IcebergOrphanFileDeletionMetrics
- type IcebergRetentionConfiguration
- type IcebergRetentionMetrics
- type IcebergTarget
- type IdempotentParameterMismatchException
- type IllegalBlueprintStateException
- type IllegalSessionStateException
- type IllegalWorkflowStateException
- type ImportLabelsTaskRunProperties
- type InboundIntegration
- type InclusionAnnotationValue
- type Integration
- type IntegrationConflictOperationFault
- type IntegrationError
- type IntegrationFilter
- type IntegrationNotFoundFault
- type IntegrationPartition
- type IntegrationQuotaExceededFault
- type IntegrationStatus
- type InternalServerException
- type InternalServiceException
- type InvalidInputException
- type InvalidIntegrationStateFault
- type InvalidStateException
- type JDBCConnectionType
- type JDBCConnectorOptions
- type JDBCConnectorSource
- type JDBCConnectorTarget
- type JDBCDataType
- type JdbcMetadataEntry
- type JdbcTarget
- type Job
- type JobBookmarkEntry
- type JobBookmarksEncryption
- type JobBookmarksEncryptionMode
- type JobCommand
- type JobMode
- type JobNodeDetails
- type JobRun
- type JobRunState
- type JobUpdate
- type Join
- type JoinColumn
- type JoinType
- type JsonClassifier
- type KMSKeyNotAccessibleFault
- type KafkaStreamingSourceOptions
- type KeySchemaElement
- type KinesisStreamingSourceOptions
- type LabelingSetGenerationTaskRunProperties
- type LakeFormationConfiguration
- type Language
- type LastActiveDefinition
- type LastCrawlInfo
- type LastCrawlStatus
- type LineageConfiguration
- type Location
- type Logical
- type LogicalOperator
- type LongColumnStatisticsData
- type MLTransform
- type MLTransformNotReadyException
- type MLUserDataEncryption
- type MLUserDataEncryptionModeString
- type Mapping
- type MappingEntry
- type Merge
- type MetadataInfo
- type MetadataKeyValuePair
- type MetadataOperation
- type MetricBasedObservation
- type MicrosoftSQLServerCatalogSource
- type MicrosoftSQLServerCatalogTarget
- type MongoDBTarget
- type MySQLCatalogSource
- type MySQLCatalogTarget
- type NoScheduleException
- type Node
- type NodeType
- type NotificationProperty
- type NullCheckBoxList
- type NullValueField
- type OAuth2ClientApplication
- type OAuth2Credentials
- type OAuth2GrantType
- type OAuth2Properties
- type OAuth2PropertiesInput
- type OpenTableFormatInput
- type OperationNotSupportedException
- type OperationTimeoutException
- type Option
- type OracleSQLCatalogSource
- type OracleSQLCatalogTarget
- type Order
- type OrphanFileDeletionConfiguration
- type OrphanFileDeletionMetrics
- type OtherMetadataValueListItem
- type PIIDetection
- type ParamType
- type ParquetCompressionType
- type Partition
- type PartitionError
- type PartitionIndex
- type PartitionIndexDescriptor
- type PartitionIndexStatus
- type PartitionInput
- type PartitionValueList
- type Permission
- type PermissionType
- type PermissionTypeMismatchException
- type PhysicalConnectionRequirements
- type PiiType
- type PostgreSQLCatalogSource
- type PostgreSQLCatalogTarget
- type Predecessor
- type Predicate
- type PrincipalPermissions
- type PrincipalType
- type ProfileConfiguration
- type Property
- type PropertyPredicate
- type PropertyType
- type QuerySessionContext
- type QuoteChar
- type Recipe
- type RecipeAction
- type RecipeReference
- type RecipeStep
- type RecrawlBehavior
- type RecrawlPolicy
- type RedshiftSource
- type RedshiftTarget
- type RegistryId
- type RegistryListItem
- type RegistryStatus
- type RelationalCatalogSource
- type RenameField
- type ResourceAction
- type ResourceNotFoundException
- type ResourceNotReadyException
- type ResourceNumberLimitExceededException
- type ResourceShareType
- type ResourceState
- type ResourceType
- type ResourceUri
- type RetentionConfiguration
- type RetentionMetrics
- type RunIdentifier
- type RunMetrics
- type S3CatalogDeltaSource
- type S3CatalogHudiSource
- type S3CatalogSource
- type S3CatalogTarget
- type S3CsvSource
- type S3DeltaCatalogTarget
- type S3DeltaDirectTarget
- type S3DeltaSource
- type S3DirectSourceAdditionalOptions
- type S3DirectTarget
- type S3Encryption
- type S3EncryptionMode
- type S3GlueParquetTarget
- type S3HudiCatalogTarget
- type S3HudiDirectTarget
- type S3HudiSource
- type S3JsonSource
- type S3ParquetSource
- type S3SourceAdditionalOptions
- type S3Target
- type Schedule
- type ScheduleState
- type ScheduleType
- type SchedulerNotRunningException
- type SchedulerRunningException
- type SchedulerTransitioningException
- type SchemaChangePolicy
- type SchemaColumn
- type SchemaDiffType
- type SchemaId
- type SchemaListItem
- type SchemaReference
- type SchemaStatus
- type SchemaVersionErrorItem
- type SchemaVersionListItem
- type SchemaVersionNumber
- type SchemaVersionStatus
- type SecurityConfiguration
- type Segment
- type SelectFields
- type SelectFromCollection
- type Separator
- type SerDeInfo
- type Session
- type SessionCommand
- type SessionStatus
- type SettingSource
- type SkewedInfo
- type SnowflakeNodeData
- type SnowflakeSource
- type SnowflakeTarget
- type Sort
- type SortCriterion
- type SortDirectionType
- type SourceControlAuthStrategy
- type SourceControlDetails
- type SourceControlProvider
- type SourceProcessingProperties
- type SourceTableConfig
- type SparkConnectorSource
- type SparkConnectorTarget
- type SparkSQL
- type Spigot
- type SplitFields
- type SqlAlias
- type StartingEventBatchCondition
- type StartingPosition
- type Statement
- type StatementOutput
- type StatementOutputData
- type StatementState
- type StatisticAnnotation
- type StatisticEvaluationLevel
- type StatisticModelResult
- type StatisticSummary
- type StatusDetails
- type StorageDescriptor
- type StreamingDataPreviewOptions
- type StringColumnStatisticsData
- type SupportedDialect
- type Table
- type TableAttributes
- type TableError
- type TableIdentifier
- type TableInput
- type TableOptimizer
- type TableOptimizerConfiguration
- type TableOptimizerEventType
- type TableOptimizerRun
- type TableOptimizerType
- type TableOptimizerVpcConfiguration
- type TableOptimizerVpcConfigurationMemberGlueConnectionName
- type TableStatus
- type TableVersion
- type TableVersionError
- type Tag
- type TargetFormat
- type TargetProcessingProperties
- type TargetRedshiftCatalog
- type TargetResourceNotFound
- type TargetTableConfig
- type TaskRun
- type TaskRunFilterCriteria
- type TaskRunProperties
- type TaskRunSortColumnType
- type TaskRunSortCriteria
- type TaskStatusType
- type TaskType
- type TestConnectionInput
- type ThrottlingException
- type TimestampFilter
- type TimestampedInclusionAnnotation
- type TransformConfigParameter
- type TransformEncryption
- type TransformFilterCriteria
- type TransformParameters
- type TransformSortColumnType
- type TransformSortCriteria
- type TransformStatusType
- type TransformType
- type Trigger
- type TriggerNodeDetails
- type TriggerState
- type TriggerType
- type TriggerUpdate
- type UnfilteredPartition
- type Union
- type UnionType
- type UnknownUnionMember
- type UnnestSpec
- type UpdateBehavior
- type UpdateCatalogBehavior
- type UpdateCsvClassifierRequest
- type UpdateGrokClassifierRequest
- type UpdateJsonClassifierRequest
- type UpdateXMLClassifierRequest
- type UpsertRedshiftTargetOptions
- type UsageProfileDefinition
- type UserDefinedFunction
- type UserDefinedFunctionInput
- type ValidationException
- type VersionMismatchException
- type ViewDefinition
- type ViewDefinitionInput
- type ViewDialect
- type ViewRepresentation
- type ViewRepresentationInput
- type ViewUpdateAction
- type ViewValidation
- type WorkerType
- type Workflow
- type WorkflowGraph
- type WorkflowRun
- type WorkflowRunStatistics
- type WorkflowRunStatus
- type XMLClassifier
Examples ¶
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
This section is empty.
Types ¶
type AccessDeniedException ¶
type AccessDeniedException struct { Message *string ErrorCodeOverride *string // contains filtered or unexported fields }
Access to a resource was denied.
func (*AccessDeniedException) Error ¶
func (e *AccessDeniedException) Error() string
func (*AccessDeniedException) ErrorCode ¶
func (e *AccessDeniedException) ErrorCode() string
func (*AccessDeniedException) ErrorFault ¶
func (e *AccessDeniedException) ErrorFault() smithy.ErrorFault
func (*AccessDeniedException) ErrorMessage ¶
func (e *AccessDeniedException) ErrorMessage() string
type Action ¶
type Action struct { // The job arguments used when this trigger fires. For this job run, they replace // the default arguments set in the job definition itself. // // You can specify arguments here that your own job-execution script consumes, as // well as arguments that Glue itself consumes. // // For information about how to specify and consume your own Job arguments, see // the [Calling Glue APIs in Python]topic in the developer guide. // // For information about the key-value pairs that Glue consumes to set up your // job, see the [Special Parameters Used by Glue]topic in the developer guide. // // [Calling Glue APIs in Python]: https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html // [Special Parameters Used by Glue]: https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html Arguments map[string]string // The name of the crawler to be used with this action. CrawlerName *string // The name of a job to be run. JobName *string // Specifies configuration properties of a job run notification. NotificationProperty *NotificationProperty // The name of the SecurityConfiguration structure to be used with this action. SecurityConfiguration *string // The JobRun timeout in minutes. This is the maximum time that a job run can // consume resources before it is terminated and enters TIMEOUT status. This // overrides the timeout value set in the parent job. // // Jobs must have timeout values less than 7 days or 10080 minutes. Otherwise, the // jobs will throw an exception. // // When the value is left blank, the timeout is defaulted to 2880 minutes. // // Any existing Glue jobs that had a timeout value greater than 7 days will be // defaulted to 7 days. For instance if you have specified a timeout of 20 days for // a batch job, it will be stopped on the 7th day. // // For streaming jobs, if you have set up a maintenance window, it will be // restarted during the maintenance window after 7 days. Timeout *int32 // contains filtered or unexported fields }
Defines an action to be initiated by a trigger.
type AdditionalOptionKeys ¶ added in v1.49.0
type AdditionalOptionKeys string
const ( AdditionalOptionKeysCacheOption AdditionalOptionKeys = "performanceTuning.caching" AdditionalOptionKeysObservationsOption AdditionalOptionKeys = "observations.scope" )
Enum values for AdditionalOptionKeys
func (AdditionalOptionKeys) Values ¶ added in v1.49.0
func (AdditionalOptionKeys) Values() []AdditionalOptionKeys
Values returns all known values for AdditionalOptionKeys. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type AggFunction ¶ added in v1.25.0
type AggFunction string
const ( AggFunctionAvg AggFunction = "avg" AggFunctionCountDistinct AggFunction = "countDistinct" AggFunctionCount AggFunction = "count" AggFunctionFirst AggFunction = "first" AggFunctionLast AggFunction = "last" AggFunctionKurtosis AggFunction = "kurtosis" AggFunctionMax AggFunction = "max" AggFunctionMin AggFunction = "min" AggFunctionSkewness AggFunction = "skewness" AggFunctionStddevSamp AggFunction = "stddev_samp" AggFunctionStddevPop AggFunction = "stddev_pop" AggFunctionSum AggFunction = "sum" AggFunctionSumDistinct AggFunction = "sumDistinct" AggFunctionVarSamp AggFunction = "var_samp" AggFunctionVarPop AggFunction = "var_pop" )
Enum values for AggFunction
func (AggFunction) Values ¶ added in v1.25.0
func (AggFunction) Values() []AggFunction
Values returns all known values for AggFunction. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type Aggregate ¶ added in v1.25.0
type Aggregate struct { // Specifies the aggregate functions to be performed on specified fields. // // This member is required. Aggs []AggregateOperation // Specifies the fields to group by. // // This member is required. Groups [][]string // Specifies the fields and rows to use as inputs for the aggregate transform. // // This member is required. Inputs []string // The name of the transform node. // // This member is required. Name *string // contains filtered or unexported fields }
Specifies a transform that groups rows by chosen fields and computes the aggregated value by specified function.
type AggregateOperation ¶ added in v1.25.0
type AggregateOperation struct { // Specifies the aggregation function to apply. // // Possible aggregation functions include: avg countDistinct, count, first, last, // kurtosis, max, min, skewness, stddev_samp, stddev_pop, sum, sumDistinct, // var_samp, var_pop // // This member is required. AggFunc AggFunction // Specifies the column on the data set on which the aggregation function will be // applied. // // This member is required. Column []string // contains filtered or unexported fields }
Specifies the set of parameters needed to perform aggregation in the aggregate transform.
type AllowedValue ¶ added in v1.103.0
type AllowedValue struct { // The value allowed for the property. // // This member is required. Value *string // A description of the allowed value. Description *string // contains filtered or unexported fields }
An object representing a value allowed for a property.
type AlreadyExistsException ¶
type AlreadyExistsException struct { Message *string ErrorCodeOverride *string // contains filtered or unexported fields }
A resource to be created or added already exists.
func (*AlreadyExistsException) Error ¶
func (e *AlreadyExistsException) Error() string
func (*AlreadyExistsException) ErrorCode ¶
func (e *AlreadyExistsException) ErrorCode() string
func (*AlreadyExistsException) ErrorFault ¶
func (e *AlreadyExistsException) ErrorFault() smithy.ErrorFault
func (*AlreadyExistsException) ErrorMessage ¶
func (e *AlreadyExistsException) ErrorMessage() string
type AmazonRedshiftAdvancedOption ¶ added in v1.47.0
type AmazonRedshiftAdvancedOption struct { // The key for the additional connection option. Key *string // The value for the additional connection option. Value *string // contains filtered or unexported fields }
Specifies an optional value when connecting to the Redshift cluster.
type AmazonRedshiftNodeData ¶ added in v1.47.0
type AmazonRedshiftNodeData struct { // The access type for the Redshift connection. Can be a direct connection or // catalog connections. AccessType *string // Specifies how writing to a Redshift cluser will occur. Action *string // Optional values when connecting to the Redshift cluster. AdvancedOptions []AmazonRedshiftAdvancedOption // The name of the Glue Data Catalog database when working with a data catalog. CatalogDatabase *Option // The Redshift schema name when working with a data catalog. CatalogRedshiftSchema *string // The database table to read from. CatalogRedshiftTable *string // The Glue Data Catalog table name when working with a data catalog. CatalogTable *Option // The Glue connection to the Redshift cluster. Connection *Option // Specifies the name of the connection that is associated with the catalog table // used. CrawlerConnection *string // Optional. The role name use when connection to S3. The IAM role ill default to // the role on the job when left blank. IamRole *Option // The action used when to detemine how a MERGE in a Redshift sink will be handled. MergeAction *string // The SQL used in a custom merge to deal with matching records. MergeClause *string // The action used when to detemine how a MERGE in a Redshift sink will be handled // when an existing record matches a new record. MergeWhenMatched *string // The action used when to detemine how a MERGE in a Redshift sink will be handled // when an existing record doesn't match a new record. MergeWhenNotMatched *string // The SQL used before a MERGE or APPEND with upsert is run. PostAction *string // The SQL used before a MERGE or APPEND with upsert is run. PreAction *string // The SQL used to fetch the data from a Redshift sources when the SourceType is // 'query'. SampleQuery *string // The Redshift schema name when working with a direct connection. Schema *Option // The list of column names used to determine a matching record when doing a MERGE // or APPEND with upsert. SelectedColumns []Option // The source type to specify whether a specific table is the source or a custom // query. SourceType *string // The name of the temporary staging table that is used when doing a MERGE or // APPEND with upsert. StagingTable *string // The Redshift table name when working with a direct connection. Table *Option // Specifies the prefix to a table. TablePrefix *string // The array of schema output for a given node. TableSchema []Option // The Amazon S3 path where temporary data can be staged when copying out of the // database. TempDir *string // The action used on Redshift sinks when doing an APPEND. Upsert bool // contains filtered or unexported fields }
Specifies an Amazon Redshift node.
type AmazonRedshiftSource ¶ added in v1.47.0
type AmazonRedshiftSource struct { // Specifies the data of the Amazon Reshift source node. Data *AmazonRedshiftNodeData // The name of the Amazon Redshift source. Name *string // contains filtered or unexported fields }
Specifies an Amazon Redshift source.
type AmazonRedshiftTarget ¶ added in v1.47.0
type AmazonRedshiftTarget struct { // Specifies the data of the Amazon Redshift target node. Data *AmazonRedshiftNodeData // The nodes that are inputs to the data target. Inputs []string // The name of the Amazon Redshift target. Name *string // contains filtered or unexported fields }
Specifies an Amazon Redshift target.
type AnnotationError ¶ added in v1.92.0
type AnnotationError struct { // The reason why the annotation failed. FailureReason *string // The Profile ID for the failed annotation. ProfileId *string // The Statistic ID for the failed annotation. StatisticId *string // contains filtered or unexported fields }
A failed annotation.
type ApplyMapping ¶ added in v1.25.0
type ApplyMapping struct { // The data inputs identified by their node names. // // This member is required. Inputs []string // Specifies the mapping of data property keys in the data source to data property // keys in the data target. // // This member is required. Mapping []Mapping // The name of the transform node. // // This member is required. Name *string // contains filtered or unexported fields }
Specifies a transform that maps data property keys in the data source to data property keys in the data target. You can rename keys, modify the data types for keys, and choose which keys to drop from the dataset.
type AthenaConnectorSource ¶ added in v1.25.0
type AthenaConnectorSource struct { // The name of the connection that is associated with the connector. // // This member is required. ConnectionName *string // The type of connection, such as marketplace.athena or custom.athena, // designating a connection to an Amazon Athena data store. // // This member is required. ConnectionType *string // The name of a connector that assists with accessing the data store in Glue // Studio. // // This member is required. ConnectorName *string // The name of the data source. // // This member is required. Name *string // The name of the Cloudwatch log group to read from. For example, // /aws-glue/jobs/output . // // This member is required. SchemaName *string // The name of the table in the data source. ConnectionTable *string // Specifies the data schema for the custom Athena source. OutputSchemas []GlueSchema // contains filtered or unexported fields }
Specifies a connector to an Amazon Athena data source.
type AuditContext ¶ added in v1.18.0
type AuditContext struct { // A string containing the additional audit context information. AdditionalAuditContext *string // All columns request for audit. AllColumnsRequested *bool // The requested columns for audit. RequestedColumns []string // contains filtered or unexported fields }
A structure containing the Lake Formation audit context.
type AuthConfiguration ¶ added in v1.103.0
type AuthConfiguration struct { // The type of authentication for a connection. // // This member is required. AuthenticationType *Property // A map of key-value pairs for the OAuth2 properties. Each value is a a Property // object. BasicAuthenticationProperties map[string]Property // A map of key-value pairs for the custom authentication properties. Each value // is a a Property object. CustomAuthenticationProperties map[string]Property // A map of key-value pairs for the OAuth2 properties. Each value is a a Property // object. OAuth2Properties map[string]Property // The Amazon Resource Name (ARN) for the Secrets Manager. SecretArn *Property // contains filtered or unexported fields }
The authentication configuration for a connection returned by the DescribeConnectionType API.
type AuthenticationConfiguration ¶ added in v1.83.0
type AuthenticationConfiguration struct { // A structure containing the authentication configuration. AuthenticationType AuthenticationType // The properties for OAuth2 authentication. OAuth2Properties *OAuth2Properties // The secret manager ARN to store credentials. SecretArn *string // contains filtered or unexported fields }
A structure containing the authentication configuration.
type AuthenticationConfigurationInput ¶ added in v1.83.0
type AuthenticationConfigurationInput struct { // A structure containing the authentication configuration in the CreateConnection // request. AuthenticationType AuthenticationType // The credentials used when the authentication type is basic authentication. BasicAuthenticationCredentials *BasicAuthenticationCredentials // The credentials used when the authentication type is custom authentication. CustomAuthenticationCredentials map[string]string // The ARN of the KMS key used to encrypt the connection. Only taken an as input // in the request and stored in the Secret Manager. KmsKeyArn *string // The properties for OAuth2 authentication in the CreateConnection request. OAuth2Properties *OAuth2PropertiesInput // The secret manager ARN to store credentials in the CreateConnection request. SecretArn *string // contains filtered or unexported fields }
A structure containing the authentication configuration in the CreateConnection request.
type AuthenticationType ¶ added in v1.83.0
type AuthenticationType string
const ( AuthenticationTypeBasic AuthenticationType = "BASIC" AuthenticationTypeOauth2 AuthenticationType = "OAUTH2" AuthenticationTypeCustom AuthenticationType = "CUSTOM" AuthenticationTypeIam AuthenticationType = "IAM" )
Enum values for AuthenticationType
func (AuthenticationType) Values ¶ added in v1.83.0
func (AuthenticationType) Values() []AuthenticationType
Values returns all known values for AuthenticationType. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type AuthorizationCodeProperties ¶ added in v1.83.0
type AuthorizationCodeProperties struct { // An authorization code to be used in the third leg of the AUTHORIZATION_CODE // grant workflow. This is a single-use code which becomes invalid once exchanged // for an access token, thus it is acceptable to have this value as a request // parameter. AuthorizationCode *string // The redirect URI where the user gets redirected to by authorization server when // issuing an authorization code. The URI is subsequently used when the // authorization code is exchanged for an access token. RedirectUri *string // contains filtered or unexported fields }
The set of properties required for the the OAuth2 AUTHORIZATION_CODE grant type workflow.
type BackfillError ¶ added in v0.31.0
type BackfillError struct { // The error code for an error that occurred when registering partition indexes // for an existing table. Code BackfillErrorCode // A list of a limited number of partitions in the response. Partitions []PartitionValueList // contains filtered or unexported fields }
A list of errors that can occur when registering partition indexes for an existing table.
These errors give the details about why an index registration failed and provide a limited number of partitions in the response, so that you can fix the partitions at fault and try registering the index again. The most common set of errors that can occur are categorized as follows:
EncryptedPartitionError: The partitions are encrypted.
InvalidPartitionTypeDataError: The partition value doesn't match the data type for that partition column.
MissingPartitionValueError: The partitions are encrypted.
UnsupportedPartitionCharacterError: Characters inside the partition value are not supported. For example: U+0000 , U+0001, U+0002.
InternalError: Any error which does not belong to other error codes.
type BackfillErrorCode ¶ added in v0.31.0
type BackfillErrorCode string
const ( BackfillErrorCodeEncryptedPartitionError BackfillErrorCode = "ENCRYPTED_PARTITION_ERROR" BackfillErrorCodeInternalError BackfillErrorCode = "INTERNAL_ERROR" BackfillErrorCodeInvalidPartitionTypeDataError BackfillErrorCode = "INVALID_PARTITION_TYPE_DATA_ERROR" BackfillErrorCodeMissingPartitionValueError BackfillErrorCode = "MISSING_PARTITION_VALUE_ERROR" BackfillErrorCodeUnsupportedPartitionCharacterError BackfillErrorCode = "UNSUPPORTED_PARTITION_CHARACTER_ERROR" )
Enum values for BackfillErrorCode
func (BackfillErrorCode) Values ¶ added in v0.31.0
func (BackfillErrorCode) Values() []BackfillErrorCode
Values returns all known values for BackfillErrorCode. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type BasicAuthenticationCredentials ¶ added in v1.103.0
type BasicAuthenticationCredentials struct { // The password to connect to the data source. Password *string // The username to connect to the data source. Username *string // contains filtered or unexported fields }
For supplying basic auth credentials when not providing a SecretArn value.
type BasicCatalogTarget ¶ added in v1.25.0
type BasicCatalogTarget struct { // The database that contains the table you want to use as the target. This // database must already exist in the Data Catalog. // // This member is required. Database *string // The nodes that are inputs to the data target. // // This member is required. Inputs []string // The name of your data target. // // This member is required. Name *string // The table that defines the schema of your output data. This table must already // exist in the Data Catalog. // // This member is required. Table *string // The partition keys used to distribute data across multiple partitions or shards // based on a specific key or set of key. PartitionKeys [][]string // contains filtered or unexported fields }
Specifies a target that uses a Glue Data Catalog table.
type BatchGetTableOptimizerEntry ¶ added in v1.68.0
type BatchGetTableOptimizerEntry struct { // The Catalog ID of the table. CatalogId *string // The name of the database in the catalog in which the table resides. DatabaseName *string // The name of the table. TableName *string // The type of table optimizer. Type TableOptimizerType // contains filtered or unexported fields }
Represents a table optimizer to retrieve in the BatchGetTableOptimizer operation.
type BatchGetTableOptimizerError ¶ added in v1.68.0
type BatchGetTableOptimizerError struct { // The Catalog ID of the table. CatalogId *string // The name of the database in the catalog in which the table resides. DatabaseName *string // An ErrorDetail object containing code and message details about the error. Error *ErrorDetail // The name of the table. TableName *string // The type of table optimizer. Type TableOptimizerType // contains filtered or unexported fields }
Contains details on one of the errors in the error list returned by the BatchGetTableOptimizer operation.
type BatchStopJobRunError ¶
type BatchStopJobRunError struct { // Specifies details about the error that was encountered. ErrorDetail *ErrorDetail // The name of the job definition that is used in the job run in question. JobName *string // The JobRunId of the job run in question. JobRunId *string // contains filtered or unexported fields }
Records an error that occurred when attempting to stop a specified job run.
type BatchStopJobRunSuccessfulSubmission ¶
type BatchStopJobRunSuccessfulSubmission struct { // The name of the job definition used in the job run that was stopped. JobName *string // The JobRunId of the job run that was stopped. JobRunId *string // contains filtered or unexported fields }
Records a successful request to stop a specified JobRun .
type BatchTableOptimizer ¶ added in v1.68.0
type BatchTableOptimizer struct { // The Catalog ID of the table. CatalogId *string // The name of the database in the catalog in which the table resides. DatabaseName *string // The name of the table. TableName *string // A TableOptimizer object that contains details on the configuration and last run // of a table optimizer. TableOptimizer *TableOptimizer // contains filtered or unexported fields }
Contains details for one of the table optimizers returned by the BatchGetTableOptimizer operation.
type BatchUpdatePartitionFailureEntry ¶ added in v0.29.0
type BatchUpdatePartitionFailureEntry struct { // The details about the batch update partition error. ErrorDetail *ErrorDetail // A list of values defining the partitions. PartitionValueList []string // contains filtered or unexported fields }
Contains information about a batch update partition error.
type BatchUpdatePartitionRequestEntry ¶ added in v0.29.0
type BatchUpdatePartitionRequestEntry struct { // The structure used to update a partition. // // This member is required. PartitionInput *PartitionInput // A list of values defining the partitions. // // This member is required. PartitionValueList []string // contains filtered or unexported fields }
A structure that contains the values and structure used to update a partition.
type BinaryColumnStatisticsData ¶
type BinaryColumnStatisticsData struct { // The average bit sequence length in the column. // // This member is required. AverageLength float64 // The size of the longest bit sequence in the column. // // This member is required. MaximumLength int64 // The number of null values in the column. // // This member is required. NumberOfNulls int64 // contains filtered or unexported fields }
Defines column statistics supported for bit sequence data values.
type Blueprint ¶ added in v1.11.0
type Blueprint struct { // Specifies the path in Amazon S3 where the blueprint is published. BlueprintLocation *string // Specifies a path in Amazon S3 where the blueprint is copied when you call // CreateBlueprint/UpdateBlueprint to register the blueprint in Glue. BlueprintServiceLocation *string // The date and time the blueprint was registered. CreatedOn *time.Time // The description of the blueprint. Description *string // An error message. ErrorMessage *string // When there are multiple versions of a blueprint and the latest version has some // errors, this attribute indicates the last successful blueprint definition that // is available with the service. LastActiveDefinition *LastActiveDefinition // The date and time the blueprint was last modified. LastModifiedOn *time.Time // The name of the blueprint. Name *string // A JSON string that indicates the list of parameter specifications for the // blueprint. ParameterSpec *string // The status of the blueprint registration. // // - Creating — The blueprint registration is in progress. // // - Active — The blueprint has been successfully registered. // // - Updating — An update to the blueprint registration is in progress. // // - Failed — The blueprint registration failed. Status BlueprintStatus // contains filtered or unexported fields }
The details of a blueprint.
type BlueprintDetails ¶ added in v1.11.0
type BlueprintDetails struct { // The name of the blueprint. BlueprintName *string // The run ID for this blueprint. RunId *string // contains filtered or unexported fields }
The details of a blueprint.
type BlueprintRun ¶ added in v1.11.0
type BlueprintRun struct { // The name of the blueprint. BlueprintName *string // The date and time that the blueprint run completed. CompletedOn *time.Time // Indicates any errors that are seen while running the blueprint. ErrorMessage *string // The blueprint parameters as a string. You will have to provide a value for each // key that is required from the parameter spec that is defined in the // Blueprint$ParameterSpec . Parameters *string // The role ARN. This role will be assumed by the Glue service and will be used to // create the workflow and other entities of a workflow. RoleArn *string // If there are any errors while creating the entities of a workflow, we try to // roll back the created entities until that point and delete them. This attribute // indicates the errors seen while trying to delete the entities that are created. RollbackErrorMessage *string // The run ID for this blueprint run. RunId *string // The date and time that the blueprint run started. StartedOn *time.Time // The state of the blueprint run. Possible values are: // // - Running — The blueprint run is in progress. // // - Succeeded — The blueprint run completed successfully. // // - Failed — The blueprint run failed and rollback is complete. // // - Rolling Back — The blueprint run failed and rollback is in progress. State BlueprintRunState // The name of a workflow that is created as a result of a successful blueprint // run. If a blueprint run has an error, there will not be a workflow created. WorkflowName *string // contains filtered or unexported fields }
The details of a blueprint run.
type BlueprintRunState ¶ added in v1.11.0
type BlueprintRunState string
const ( BlueprintRunStateRunning BlueprintRunState = "RUNNING" BlueprintRunStateSucceeded BlueprintRunState = "SUCCEEDED" BlueprintRunStateFailed BlueprintRunState = "FAILED" BlueprintRunStateRollingBack BlueprintRunState = "ROLLING_BACK" )
Enum values for BlueprintRunState
func (BlueprintRunState) Values ¶ added in v1.11.0
func (BlueprintRunState) Values() []BlueprintRunState
Values returns all known values for BlueprintRunState. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type BlueprintStatus ¶ added in v1.11.0
type BlueprintStatus string
const ( BlueprintStatusCreating BlueprintStatus = "CREATING" BlueprintStatusActive BlueprintStatus = "ACTIVE" BlueprintStatusUpdating BlueprintStatus = "UPDATING" BlueprintStatusFailed BlueprintStatus = "FAILED" )
Enum values for BlueprintStatus
func (BlueprintStatus) Values ¶ added in v1.11.0
func (BlueprintStatus) Values() []BlueprintStatus
Values returns all known values for BlueprintStatus. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type BooleanColumnStatisticsData ¶
type BooleanColumnStatisticsData struct { // The number of false values in the column. // // This member is required. NumberOfFalses int64 // The number of null values in the column. // // This member is required. NumberOfNulls int64 // The number of true values in the column. // // This member is required. NumberOfTrues int64 // contains filtered or unexported fields }
Defines column statistics supported for Boolean data columns.
type Capabilities ¶ added in v1.103.0
type Capabilities struct { // A list of supported authentication types. // // This member is required. SupportedAuthenticationTypes []AuthenticationType // A list of supported compute environments. // // This member is required. SupportedComputeEnvironments []ComputeEnvironment // A list of supported data operations. // // This member is required. SupportedDataOperations []DataOperation // contains filtered or unexported fields }
Specifies the supported authentication types returned by the DescribeConnectionType API.
type Catalog ¶ added in v1.103.0
type Catalog struct { // The name of the catalog. Cannot be the same as the account ID. // // This member is required. Name *string // The ID of the catalog. To grant access to the default catalog, this field // should not be provided. CatalogId *string // A CatalogProperties object that specifies data lake access properties and other // custom properties. CatalogProperties *CatalogPropertiesOutput // An array of PrincipalPermissions objects. Creates a set of default permissions // on the database(s) for principals. Used by Amazon Web Services Lake Formation. // Not used in the normal course of Glue operations. CreateDatabaseDefaultPermissions []PrincipalPermissions // An array of PrincipalPermissions objects. Creates a set of default permissions // on the table(s) for principals. Used by Amazon Web Services Lake Formation. Not // used in the normal course of Glue operations. CreateTableDefaultPermissions []PrincipalPermissions // The time at which the catalog was created. CreateTime *time.Time // Description string, not more than 2048 bytes long, matching the URI address // multi-line string pattern. A description of the catalog. Description *string // A FederatedCatalog object that points to an entity outside the Glue Data // Catalog. FederatedCatalog *FederatedCatalog // A map array of key-value pairs that define parameters and properties of the // catalog. Parameters map[string]string // The Amazon Resource Name (ARN) assigned to the catalog resource. ResourceArn *string // A TargetRedshiftCatalog object that describes a target catalog for database // resource linking. TargetRedshiftCatalog *TargetRedshiftCatalog // The time at which the catalog was last updated. UpdateTime *time.Time // contains filtered or unexported fields }
The catalog object represents a logical grouping of databases in the Glue Data Catalog or a federated source. You can now create a Redshift-federated catalog or a catalog containing resource links to Redshift databases in another account or region.
type CatalogDeltaSource ¶ added in v1.43.0
type CatalogDeltaSource struct { // The name of the database to read from. // // This member is required. Database *string // The name of the Delta Lake data source. // // This member is required. Name *string // The name of the table in the database to read from. // // This member is required. Table *string // Specifies additional connection options. AdditionalDeltaOptions map[string]string // Specifies the data schema for the Delta Lake source. OutputSchemas []GlueSchema // contains filtered or unexported fields }
Specifies a Delta Lake data source that is registered in the Glue Data Catalog.
type CatalogEncryptionMode ¶
type CatalogEncryptionMode string
const ( CatalogEncryptionModeDisabled CatalogEncryptionMode = "DISABLED" CatalogEncryptionModeSsekms CatalogEncryptionMode = "SSE-KMS" CatalogEncryptionModeSsekmswithservicerole CatalogEncryptionMode = "SSE-KMS-WITH-SERVICE-ROLE" )
Enum values for CatalogEncryptionMode
func (CatalogEncryptionMode) Values ¶ added in v0.29.0
func (CatalogEncryptionMode) Values() []CatalogEncryptionMode
Values returns all known values for CatalogEncryptionMode. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type CatalogEntry ¶
type CatalogEntry struct { // The database in which the table metadata resides. // // This member is required. DatabaseName *string // The name of the table in question. // // This member is required. TableName *string // contains filtered or unexported fields }
Specifies a table definition in the Glue Data Catalog.
type CatalogHudiSource ¶ added in v1.40.0
type CatalogHudiSource struct { // The name of the database to read from. // // This member is required. Database *string // The name of the Hudi data source. // // This member is required. Name *string // The name of the table in the database to read from. // // This member is required. Table *string // Specifies additional connection options. AdditionalHudiOptions map[string]string // Specifies the data schema for the Hudi source. OutputSchemas []GlueSchema // contains filtered or unexported fields }
Specifies a Hudi data source that is registered in the Glue Data Catalog.
type CatalogImportStatus ¶
type CatalogImportStatus struct { // True if the migration has completed, or False otherwise. ImportCompleted bool // The time that the migration was started. ImportTime *time.Time // The name of the person who initiated the migration. ImportedBy *string // contains filtered or unexported fields }
A structure containing migration status information.
type CatalogInput ¶ added in v1.103.0
type CatalogInput struct { // A CatalogProperties object that specifies data lake access properties and other // custom properties. CatalogProperties *CatalogProperties // An array of PrincipalPermissions objects. Creates a set of default permissions // on the database(s) for principals. Used by Amazon Web Services Lake Formation. // Typically should be explicitly set as an empty list. CreateDatabaseDefaultPermissions []PrincipalPermissions // An array of PrincipalPermissions objects. Creates a set of default permissions // on the table(s) for principals. Used by Amazon Web Services Lake Formation. // Typically should be explicitly set as an empty list. CreateTableDefaultPermissions []PrincipalPermissions // Description string, not more than 2048 bytes long, matching the URI address // multi-line string pattern. A description of the catalog. Description *string // A FederatedCatalog object. A FederatedCatalog structure that references an // entity outside the Glue Data Catalog, for example a Redshift database. FederatedCatalog *FederatedCatalog // A map array of key-value pairs that define the parameters and properties of the // catalog. Parameters map[string]string // A TargetRedshiftCatalog object that describes a target catalog for resource // linking. TargetRedshiftCatalog *TargetRedshiftCatalog // contains filtered or unexported fields }
A structure that describes catalog properties.
type CatalogKafkaSource ¶ added in v1.25.0
type CatalogKafkaSource struct { // The name of the database to read from. // // This member is required. Database *string // The name of the data store. // // This member is required. Name *string // The name of the table in the database to read from. // // This member is required. Table *string // Specifies options related to data preview for viewing a sample of your data. DataPreviewOptions *StreamingDataPreviewOptions // Whether to automatically determine the schema from the incoming data. DetectSchema *bool // Specifies the streaming options. StreamingOptions *KafkaStreamingSourceOptions // The amount of time to spend processing each micro batch. WindowSize *int32 // contains filtered or unexported fields }
Specifies an Apache Kafka data store in the Data Catalog.
type CatalogKinesisSource ¶ added in v1.25.0
type CatalogKinesisSource struct { // The name of the database to read from. // // This member is required. Database *string // The name of the data source. // // This member is required. Name *string // The name of the table in the database to read from. // // This member is required. Table *string // Additional options for data preview. DataPreviewOptions *StreamingDataPreviewOptions // Whether to automatically determine the schema from the incoming data. DetectSchema *bool // Additional options for the Kinesis streaming data source. StreamingOptions *KinesisStreamingSourceOptions // The amount of time to spend processing each micro batch. WindowSize *int32 // contains filtered or unexported fields }
Specifies a Kinesis data source in the Glue Data Catalog.
type CatalogProperties ¶ added in v1.103.0
type CatalogProperties struct { // Additional key-value properties for the catalog, such as column statistics // optimizations. CustomProperties map[string]string // A DataLakeAccessProperties object that specifies properties to configure data // lake access for your catalog resource in the Glue Data Catalog. DataLakeAccessProperties *DataLakeAccessProperties // contains filtered or unexported fields }
A structure that specifies data lake access properties and other custom properties.
type CatalogPropertiesOutput ¶ added in v1.103.0
type CatalogPropertiesOutput struct { // Additional key-value properties for the catalog, such as column statistics // optimizations. CustomProperties map[string]string // A DataLakeAccessProperties object with input properties to configure data lake // access for your catalog resource in the Glue Data Catalog. DataLakeAccessProperties *DataLakeAccessPropertiesOutput // contains filtered or unexported fields }
Property attributes that include configuration properties for the catalog resource.
type CatalogSchemaChangePolicy ¶ added in v1.25.0
type CatalogSchemaChangePolicy struct { // Whether to use the specified update behavior when the crawler finds a changed // schema. EnableUpdateCatalog *bool // The update behavior when the crawler finds a changed schema. UpdateBehavior UpdateCatalogBehavior // contains filtered or unexported fields }
A policy that specifies update behavior for the crawler.
type CatalogSource ¶ added in v1.25.0
type CatalogSource struct { // The name of the database to read from. // // This member is required. Database *string // The name of the data store. // // This member is required. Name *string // The name of the table in the database to read from. // // This member is required. Table *string // contains filtered or unexported fields }
Specifies a data store in the Glue Data Catalog.
type CatalogTarget ¶
type CatalogTarget struct { // The name of the database to be synchronized. // // This member is required. DatabaseName *string // A list of the tables to be synchronized. // // This member is required. Tables []string // The name of the connection for an Amazon S3-backed Data Catalog table to be a // target of the crawl when using a Catalog connection type paired with a NETWORK // Connection type. ConnectionName *string // A valid Amazon dead-letter SQS ARN. For example, // arn:aws:sqs:region:account:deadLetterQueue . DlqEventQueueArn *string // A valid Amazon SQS ARN. For example, arn:aws:sqs:region:account:sqs . EventQueueArn *string // contains filtered or unexported fields }
Specifies an Glue Data Catalog target.
type Classifier ¶
type Classifier struct { // A classifier for comma-separated values (CSV). CsvClassifier *CsvClassifier // A classifier that uses grok . GrokClassifier *GrokClassifier // A classifier for JSON content. JsonClassifier *JsonClassifier // A classifier for XML content. XMLClassifier *XMLClassifier // contains filtered or unexported fields }
Classifiers are triggered during a crawl task. A classifier checks whether a given file is in a format it can handle. If it is, the classifier creates a schema in the form of a StructType object that matches that data format.
You can use the standard classifiers that Glue provides, or you can write your own classifiers to best categorize your data sources and specify the appropriate schemas to use for them. A classifier can be a grok classifier, an XML classifier, a JSON classifier, or a custom CSV classifier, as specified in one of the fields in the Classifier object.
type CloudWatchEncryption ¶
type CloudWatchEncryption struct { // The encryption mode to use for CloudWatch data. CloudWatchEncryptionMode CloudWatchEncryptionMode // The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data. KmsKeyArn *string // contains filtered or unexported fields }
Specifies how Amazon CloudWatch data should be encrypted.
type CloudWatchEncryptionMode ¶
type CloudWatchEncryptionMode string
const ( CloudWatchEncryptionModeDisabled CloudWatchEncryptionMode = "DISABLED" CloudWatchEncryptionModeSsekms CloudWatchEncryptionMode = "SSE-KMS" )
Enum values for CloudWatchEncryptionMode
func (CloudWatchEncryptionMode) Values ¶ added in v0.29.0
func (CloudWatchEncryptionMode) Values() []CloudWatchEncryptionMode
Values returns all known values for CloudWatchEncryptionMode. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type CodeGenConfigurationNode ¶ added in v1.25.0
type CodeGenConfigurationNode struct { // Specifies a transform that groups rows by chosen fields and computes the // aggregated value by specified function. Aggregate *Aggregate // Specifies a target that writes to a data source in Amazon Redshift. AmazonRedshiftSource *AmazonRedshiftSource // Specifies a target that writes to a data target in Amazon Redshift. AmazonRedshiftTarget *AmazonRedshiftTarget // Specifies a transform that maps data property keys in the data source to data // property keys in the data target. You can rename keys, modify the data types for // keys, and choose which keys to drop from the dataset. ApplyMapping *ApplyMapping // Specifies a connector to an Amazon Athena data source. AthenaConnectorSource *AthenaConnectorSource // Specifies a Delta Lake data source that is registered in the Glue Data Catalog. CatalogDeltaSource *CatalogDeltaSource // Specifies a Hudi data source that is registered in the Glue Data Catalog. CatalogHudiSource *CatalogHudiSource // Specifies an Apache Kafka data store in the Data Catalog. CatalogKafkaSource *CatalogKafkaSource // Specifies a Kinesis data source in the Glue Data Catalog. CatalogKinesisSource *CatalogKinesisSource // Specifies a data store in the Glue Data Catalog. CatalogSource *CatalogSource // Specifies a target that uses a Glue Data Catalog table. CatalogTarget *BasicCatalogTarget // Specifies a source generated with standard connection options. ConnectorDataSource *ConnectorDataSource // Specifies a target generated with standard connection options. ConnectorDataTarget *ConnectorDataTarget // Specifies a transform that uses custom code you provide to perform the data // transformation. The output is a collection of DynamicFrames. CustomCode *CustomCode // Specifies the direct JDBC source connection. DirectJDBCSource *DirectJDBCSource // Specifies an Apache Kafka data store. DirectKafkaSource *DirectKafkaSource // Specifies a direct Amazon Kinesis data source. DirectKinesisSource *DirectKinesisSource // Specifies a transform that removes rows of repeating data from a data set. DropDuplicates *DropDuplicates // Specifies a transform that chooses the data property keys that you want to drop. DropFields *DropFields // Specifies a transform that removes columns from the dataset if all values in // the column are 'null'. By default, Glue Studio will recognize null objects, but // some values such as empty strings, strings that are "null", -1 integers or other // placeholders such as zeros, are not automatically recognized as nulls. DropNullFields *DropNullFields // Specifies a custom visual transform created by a user. DynamicTransform *DynamicTransform // Specifies a DynamoDBC Catalog data store in the Glue Data Catalog. DynamoDBCatalogSource *DynamoDBCatalogSource // Specifies your data quality evaluation criteria. EvaluateDataQuality *EvaluateDataQuality // Specifies your data quality evaluation criteria. Allows multiple input data and // returns a collection of Dynamic Frames. EvaluateDataQualityMultiFrame *EvaluateDataQualityMultiFrame // Specifies a transform that locates records in the dataset that have missing // values and adds a new field with a value determined by imputation. The input // data set is used to train the machine learning model that determines what the // missing value should be. FillMissingValues *FillMissingValues // Specifies a transform that splits a dataset into two, based on a filter // condition. Filter *Filter // Specifies a data source in a goverened Data Catalog. GovernedCatalogSource *GovernedCatalogSource // Specifies a data target that writes to a goverened catalog. GovernedCatalogTarget *GovernedCatalogTarget // Specifies a connector to a JDBC data source. JDBCConnectorSource *JDBCConnectorSource // Specifies a data target that writes to Amazon S3 in Apache Parquet columnar // storage. JDBCConnectorTarget *JDBCConnectorTarget // Specifies a transform that joins two datasets into one dataset using a // comparison phrase on the specified data property keys. You can use inner, outer, // left, right, left semi, and left anti joins. Join *Join // Specifies a transform that merges a DynamicFrame with a staging DynamicFrame // based on the specified primary keys to identify records. Duplicate records // (records with the same primary keys) are not de-duplicated. Merge *Merge // Specifies a Microsoft SQL server data source in the Glue Data Catalog. MicrosoftSQLServerCatalogSource *MicrosoftSQLServerCatalogSource // Specifies a target that uses Microsoft SQL. MicrosoftSQLServerCatalogTarget *MicrosoftSQLServerCatalogTarget // Specifies a MySQL data source in the Glue Data Catalog. MySQLCatalogSource *MySQLCatalogSource // Specifies a target that uses MySQL. MySQLCatalogTarget *MySQLCatalogTarget // Specifies an Oracle data source in the Glue Data Catalog. OracleSQLCatalogSource *OracleSQLCatalogSource // Specifies a target that uses Oracle SQL. OracleSQLCatalogTarget *OracleSQLCatalogTarget // Specifies a transform that identifies, removes or masks PII data. PIIDetection *PIIDetection // Specifies a PostgresSQL data source in the Glue Data Catalog. PostgreSQLCatalogSource *PostgreSQLCatalogSource // Specifies a target that uses Postgres SQL. PostgreSQLCatalogTarget *PostgreSQLCatalogTarget // Specifies a Glue DataBrew recipe node. Recipe *Recipe // Specifies an Amazon Redshift data store. RedshiftSource *RedshiftSource // Specifies a target that uses Amazon Redshift. RedshiftTarget *RedshiftTarget // Specifies a relational catalog data store in the Glue Data Catalog. RelationalCatalogSource *RelationalCatalogSource // Specifies a transform that renames a single data property key. RenameField *RenameField // Specifies a Delta Lake data source that is registered in the Glue Data Catalog. // The data source must be stored in Amazon S3. S3CatalogDeltaSource *S3CatalogDeltaSource // Specifies a Hudi data source that is registered in the Glue Data Catalog. The // data source must be stored in Amazon S3. S3CatalogHudiSource *S3CatalogHudiSource // Specifies an Amazon S3 data store in the Glue Data Catalog. S3CatalogSource *S3CatalogSource // Specifies a data target that writes to Amazon S3 using the Glue Data Catalog. S3CatalogTarget *S3CatalogTarget // Specifies a command-separated value (CSV) data store stored in Amazon S3. S3CsvSource *S3CsvSource // Specifies a target that writes to a Delta Lake data source in the Glue Data // Catalog. S3DeltaCatalogTarget *S3DeltaCatalogTarget // Specifies a target that writes to a Delta Lake data source in Amazon S3. S3DeltaDirectTarget *S3DeltaDirectTarget // Specifies a Delta Lake data source stored in Amazon S3. S3DeltaSource *S3DeltaSource // Specifies a data target that writes to Amazon S3. S3DirectTarget *S3DirectTarget // Specifies a data target that writes to Amazon S3 in Apache Parquet columnar // storage. S3GlueParquetTarget *S3GlueParquetTarget // Specifies a target that writes to a Hudi data source in the Glue Data Catalog. S3HudiCatalogTarget *S3HudiCatalogTarget // Specifies a target that writes to a Hudi data source in Amazon S3. S3HudiDirectTarget *S3HudiDirectTarget // Specifies a Hudi data source stored in Amazon S3. S3HudiSource *S3HudiSource // Specifies a JSON data store stored in Amazon S3. S3JsonSource *S3JsonSource // Specifies an Apache Parquet data store stored in Amazon S3. S3ParquetSource *S3ParquetSource // Specifies a transform that chooses the data property keys that you want to keep. SelectFields *SelectFields // Specifies a transform that chooses one DynamicFrame from a collection of // DynamicFrames . The output is the selected DynamicFrame SelectFromCollection *SelectFromCollection // Specifies a Snowflake data source. SnowflakeSource *SnowflakeSource // Specifies a target that writes to a Snowflake data source. SnowflakeTarget *SnowflakeTarget // Specifies a connector to an Apache Spark data source. SparkConnectorSource *SparkConnectorSource // Specifies a target that uses an Apache Spark connector. SparkConnectorTarget *SparkConnectorTarget // Specifies a transform where you enter a SQL query using Spark SQL syntax to // transform the data. The output is a single DynamicFrame . SparkSQL *SparkSQL // Specifies a transform that writes samples of the data to an Amazon S3 bucket. Spigot *Spigot // Specifies a transform that splits data property keys into two DynamicFrames . // The output is a collection of DynamicFrames : one with selected data property // keys, and one with the remaining data property keys. SplitFields *SplitFields // Specifies a transform that combines the rows from two or more datasets into a // single result. Union *Union // contains filtered or unexported fields }
CodeGenConfigurationNode enumerates all valid Node types. One and only one of its member variables can be populated.
type CodeGenEdge ¶
type CodeGenEdge struct { // The ID of the node at which the edge starts. // // This member is required. Source *string // The ID of the node at which the edge ends. // // This member is required. Target *string // The target of the edge. TargetParameter *string // contains filtered or unexported fields }
Represents a directional edge in a directed acyclic graph (DAG).
type CodeGenNode ¶
type CodeGenNode struct { // Properties of the node, in the form of name-value pairs. // // This member is required. Args []CodeGenNodeArg // A node identifier that is unique within the node's graph. // // This member is required. Id *string // The type of node that this is. // // This member is required. NodeType *string // The line number of the node. LineNumber int32 // contains filtered or unexported fields }
Represents a node in a directed acyclic graph (DAG)
type CodeGenNodeArg ¶
type CodeGenNodeArg struct { // The name of the argument or property. // // This member is required. Name *string // The value of the argument or property. // // This member is required. Value *string // True if the value is used as a parameter. Param bool // contains filtered or unexported fields }
An argument or property of a node.
type Column ¶
type Column struct { // The name of the Column . // // This member is required. Name *string // A free-form text comment. Comment *string // These key-value pairs define properties associated with the column. Parameters map[string]string // The data type of the Column . Type *string // contains filtered or unexported fields }
A column in a Table .
type ColumnError ¶
type ColumnError struct { // The name of the column that failed. ColumnName *string // An error message with the reason for the failure of an operation. Error *ErrorDetail // contains filtered or unexported fields }
Encapsulates a column name that failed and the reason for failure.
type ColumnImportance ¶ added in v0.31.0
type ColumnImportance struct { // The name of a column. ColumnName *string // The column importance score for the column, as a decimal. Importance *float64 // contains filtered or unexported fields }
A structure containing the column name and column importance score for a column.
Column importance helps you understand how columns contribute to your model, by identifying which columns in your records are more important than others.
type ColumnRowFilter ¶ added in v1.18.0
type ColumnRowFilter struct { // A string containing the name of the column. ColumnName *string // A string containing the row-level filter expression. RowFilterExpression *string // contains filtered or unexported fields }
A filter that uses both column-level and row-level filtering.
type ColumnStatistics ¶
type ColumnStatistics struct { // The timestamp of when column statistics were generated. // // This member is required. AnalyzedTime *time.Time // Name of column which statistics belong to. // // This member is required. ColumnName *string // The data type of the column. // // This member is required. ColumnType *string // A ColumnStatisticData object that contains the statistics data values. // // This member is required. StatisticsData *ColumnStatisticsData // contains filtered or unexported fields }
Represents the generated column-level statistics for a table or partition.
type ColumnStatisticsData ¶
type ColumnStatisticsData struct { // The type of column statistics data. // // This member is required. Type ColumnStatisticsType // Binary column statistics data. BinaryColumnStatisticsData *BinaryColumnStatisticsData // Boolean column statistics data. BooleanColumnStatisticsData *BooleanColumnStatisticsData // Date column statistics data. DateColumnStatisticsData *DateColumnStatisticsData // Decimal column statistics data. UnscaledValues within are Base64-encoded // binary objects storing big-endian, two's complement representations of the // decimal's unscaled value. DecimalColumnStatisticsData *DecimalColumnStatisticsData // Double column statistics data. DoubleColumnStatisticsData *DoubleColumnStatisticsData // Long column statistics data. LongColumnStatisticsData *LongColumnStatisticsData // String column statistics data. StringColumnStatisticsData *StringColumnStatisticsData // contains filtered or unexported fields }
Contains the individual types of column statistics data. Only one data object should be set and indicated by the Type attribute.
type ColumnStatisticsError ¶
type ColumnStatisticsError struct { // The ColumnStatistics of the column. ColumnStatistics *ColumnStatistics // An error message with the reason for the failure of an operation. Error *ErrorDetail // contains filtered or unexported fields }
Encapsulates a ColumnStatistics object that failed and the reason for failure.
type ColumnStatisticsState ¶ added in v1.69.0
type ColumnStatisticsState string
const ( ColumnStatisticsStateStarting ColumnStatisticsState = "STARTING" ColumnStatisticsStateRunning ColumnStatisticsState = "RUNNING" ColumnStatisticsStateSucceeded ColumnStatisticsState = "SUCCEEDED" ColumnStatisticsStateFailed ColumnStatisticsState = "FAILED" ColumnStatisticsStateStopped ColumnStatisticsState = "STOPPED" )
Enum values for ColumnStatisticsState
func (ColumnStatisticsState) Values ¶ added in v1.69.0
func (ColumnStatisticsState) Values() []ColumnStatisticsState
Values returns all known values for ColumnStatisticsState. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type ColumnStatisticsTaskNotRunningException ¶ added in v1.69.0
type ColumnStatisticsTaskNotRunningException struct { Message *string ErrorCodeOverride *string // contains filtered or unexported fields }
An exception thrown when you try to stop a task run when there is no task running.
func (*ColumnStatisticsTaskNotRunningException) Error ¶ added in v1.69.0
func (e *ColumnStatisticsTaskNotRunningException) Error() string
func (*ColumnStatisticsTaskNotRunningException) ErrorCode ¶ added in v1.69.0
func (e *ColumnStatisticsTaskNotRunningException) ErrorCode() string
func (*ColumnStatisticsTaskNotRunningException) ErrorFault ¶ added in v1.69.0
func (e *ColumnStatisticsTaskNotRunningException) ErrorFault() smithy.ErrorFault
func (*ColumnStatisticsTaskNotRunningException) ErrorMessage ¶ added in v1.69.0
func (e *ColumnStatisticsTaskNotRunningException) ErrorMessage() string
type ColumnStatisticsTaskRun ¶ added in v1.69.0
type ColumnStatisticsTaskRun struct { // The ID of the Data Catalog where the table resides. If none is supplied, the // Amazon Web Services account ID is used by default. CatalogID *string // A list of the column names. If none is supplied, all column names for the table // will be used by default. ColumnNameList []string // The identifier for the particular column statistics task run. ColumnStatisticsTaskRunId *string // The type of column statistics computation. ComputationType ComputationType // The time that this task was created. CreationTime *time.Time // The Amazon Web Services account ID. CustomerId *string // The calculated DPU usage in seconds for all autoscaled workers. DPUSeconds float64 // The database where the table resides. DatabaseName *string // The end time of the task. EndTime *time.Time // The error message for the job. ErrorMessage *string // The last point in time when this task was modified. LastUpdated *time.Time // The number of workers used to generate column statistics. The job is // preconfigured to autoscale up to 25 instances. NumberOfWorkers int32 // The IAM role that the service assumes to generate statistics. Role *string // The percentage of rows used to generate statistics. If none is supplied, the // entire table will be used to generate stats. SampleSize float64 // Name of the security configuration that is used to encrypt CloudWatch logs for // the column stats task run. SecurityConfiguration *string // The start time of the task. StartTime *time.Time // The status of the task run. Status ColumnStatisticsState // The name of the table for which column statistics is generated. TableName *string // The type of workers being used for generating stats. The default is g.1x . WorkerType *string // contains filtered or unexported fields }
The object that shows the details of the column stats run.
type ColumnStatisticsTaskRunningException ¶ added in v1.69.0
type ColumnStatisticsTaskRunningException struct { Message *string ErrorCodeOverride *string // contains filtered or unexported fields }
An exception thrown when you try to start another job while running a column stats generation job.
func (*ColumnStatisticsTaskRunningException) Error ¶ added in v1.69.0
func (e *ColumnStatisticsTaskRunningException) Error() string
func (*ColumnStatisticsTaskRunningException) ErrorCode ¶ added in v1.69.0
func (e *ColumnStatisticsTaskRunningException) ErrorCode() string
func (*ColumnStatisticsTaskRunningException) ErrorFault ¶ added in v1.69.0
func (e *ColumnStatisticsTaskRunningException) ErrorFault() smithy.ErrorFault
func (*ColumnStatisticsTaskRunningException) ErrorMessage ¶ added in v1.69.0
func (e *ColumnStatisticsTaskRunningException) ErrorMessage() string
type ColumnStatisticsTaskSettings ¶ added in v1.101.0
type ColumnStatisticsTaskSettings struct { // The ID of the Data Catalog in which the database resides. CatalogID *string // A list of column names for which to run statistics. ColumnNameList []string // The name of the database where the table resides. DatabaseName *string // The last ExecutionAttempt for the column statistics task run. LastExecutionAttempt *ExecutionAttempt // The role used for running the column statistics. Role *string // The percentage of data to sample. SampleSize float64 // A schedule for running the column statistics, specified in CRON syntax. Schedule *Schedule // The type of schedule for a column statistics task. Possible values may be CRON // or AUTO . ScheduleType ScheduleType // Name of the security configuration that is used to encrypt CloudWatch logs. SecurityConfiguration *string // The source of setting the column statistics task. Possible values may be CATALOG // or TABLE . SettingSource SettingSource // The name of the table for which to generate column statistics. TableName *string // contains filtered or unexported fields }
The settings for a column statistics task.
type ColumnStatisticsTaskStoppingException ¶ added in v1.69.0
type ColumnStatisticsTaskStoppingException struct { Message *string ErrorCodeOverride *string // contains filtered or unexported fields }
An exception thrown when you try to stop a task run.
func (*ColumnStatisticsTaskStoppingException) Error ¶ added in v1.69.0
func (e *ColumnStatisticsTaskStoppingException) Error() string
func (*ColumnStatisticsTaskStoppingException) ErrorCode ¶ added in v1.69.0
func (e *ColumnStatisticsTaskStoppingException) ErrorCode() string
func (*ColumnStatisticsTaskStoppingException) ErrorFault ¶ added in v1.69.0
func (e *ColumnStatisticsTaskStoppingException) ErrorFault() smithy.ErrorFault
func (*ColumnStatisticsTaskStoppingException) ErrorMessage ¶ added in v1.69.0
func (e *ColumnStatisticsTaskStoppingException) ErrorMessage() string
type ColumnStatisticsType ¶
type ColumnStatisticsType string
const ( ColumnStatisticsTypeBoolean ColumnStatisticsType = "BOOLEAN" ColumnStatisticsTypeDate ColumnStatisticsType = "DATE" ColumnStatisticsTypeDecimal ColumnStatisticsType = "DECIMAL" ColumnStatisticsTypeDouble ColumnStatisticsType = "DOUBLE" ColumnStatisticsTypeLong ColumnStatisticsType = "LONG" ColumnStatisticsTypeString ColumnStatisticsType = "STRING" ColumnStatisticsTypeBinary ColumnStatisticsType = "BINARY" )
Enum values for ColumnStatisticsType
func (ColumnStatisticsType) Values ¶ added in v0.29.0
func (ColumnStatisticsType) Values() []ColumnStatisticsType
Values returns all known values for ColumnStatisticsType. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type CompactionMetrics ¶ added in v1.96.0
type CompactionMetrics struct { // A structure containing the Iceberg compaction metrics for the optimizer run. IcebergMetrics *IcebergCompactionMetrics // contains filtered or unexported fields }
A structure that contains compaction metrics for the optimizer run.
type Comparator ¶
type Comparator string
const ( ComparatorEquals Comparator = "EQUALS" ComparatorGreaterThan Comparator = "GREATER_THAN" ComparatorLessThan Comparator = "LESS_THAN" ComparatorGreaterThanEquals Comparator = "GREATER_THAN_EQUALS" ComparatorLessThanEquals Comparator = "LESS_THAN_EQUALS" )
Enum values for Comparator
func (Comparator) Values ¶ added in v0.29.0
func (Comparator) Values() []Comparator
Values returns all known values for Comparator. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type Compatibility ¶ added in v0.30.0
type Compatibility string
const ( CompatibilityNone Compatibility = "NONE" CompatibilityDisabled Compatibility = "DISABLED" CompatibilityBackward Compatibility = "BACKWARD" CompatibilityBackwardAll Compatibility = "BACKWARD_ALL" CompatibilityForward Compatibility = "FORWARD" CompatibilityForwardAll Compatibility = "FORWARD_ALL" CompatibilityFull Compatibility = "FULL" CompatibilityFullAll Compatibility = "FULL_ALL" )
Enum values for Compatibility
func (Compatibility) Values ¶ added in v0.30.0
func (Compatibility) Values() []Compatibility
Values returns all known values for Compatibility. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type CompressionType ¶ added in v1.25.0
type CompressionType string
const ( CompressionTypeGzip CompressionType = "gzip" CompressionTypeBzip2 CompressionType = "bzip2" )
Enum values for CompressionType
func (CompressionType) Values ¶ added in v1.25.0
func (CompressionType) Values() []CompressionType
Values returns all known values for CompressionType. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type ComputationType ¶ added in v1.101.0
type ComputationType string
const ( ComputationTypeFull ComputationType = "FULL" ComputationTypeIncremental ComputationType = "INCREMENTAL" )
Enum values for ComputationType
func (ComputationType) Values ¶ added in v1.101.0
func (ComputationType) Values() []ComputationType
Values returns all known values for ComputationType. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type ComputeEnvironment ¶ added in v1.103.0
type ComputeEnvironment string
const ( ComputeEnvironmentSpark ComputeEnvironment = "SPARK" ComputeEnvironmentAthena ComputeEnvironment = "ATHENA" ComputeEnvironmentPython ComputeEnvironment = "PYTHON" )
Enum values for ComputeEnvironment
func (ComputeEnvironment) Values ¶ added in v1.103.0
func (ComputeEnvironment) Values() []ComputeEnvironment
Values returns all known values for ComputeEnvironment. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type ComputeEnvironmentConfiguration ¶ added in v1.103.0
type ComputeEnvironmentConfiguration struct { // The type of compute environment. // // This member is required. ComputeEnvironment ComputeEnvironment // The connection option name overrides for the compute environment. // // This member is required. ConnectionOptionNameOverrides map[string]string // The parameters used as connection options for the compute environment. // // This member is required. ConnectionOptions map[string]Property // The connection properties that are required as overrides for the compute // environment. // // This member is required. ConnectionPropertiesRequiredOverrides []string // The connection property name overrides for the compute environment. // // This member is required. ConnectionPropertyNameOverrides map[string]string // A description of the compute environment. // // This member is required. Description *string // A name for the compute environment configuration. // // This member is required. Name *string // The supported authentication types for the compute environment. // // This member is required. SupportedAuthenticationTypes []AuthenticationType // Indicates whether PhysicalConnectionProperties are required for the compute // environment. PhysicalConnectionPropertiesRequired *bool // contains filtered or unexported fields }
An object containing configuration for a compute environment (such as Spark, Python or Athena) returned by the DescribeConnectionType API.
type ConcurrentModificationException ¶
type ConcurrentModificationException struct { Message *string ErrorCodeOverride *string // contains filtered or unexported fields }
Two processes are trying to modify a resource simultaneously.
func (*ConcurrentModificationException) Error ¶
func (e *ConcurrentModificationException) Error() string
func (*ConcurrentModificationException) ErrorCode ¶
func (e *ConcurrentModificationException) ErrorCode() string
func (*ConcurrentModificationException) ErrorFault ¶
func (e *ConcurrentModificationException) ErrorFault() smithy.ErrorFault
func (*ConcurrentModificationException) ErrorMessage ¶
func (e *ConcurrentModificationException) ErrorMessage() string
type ConcurrentRunsExceededException ¶
type ConcurrentRunsExceededException struct { Message *string ErrorCodeOverride *string // contains filtered or unexported fields }
Too many jobs are being run concurrently.
func (*ConcurrentRunsExceededException) Error ¶
func (e *ConcurrentRunsExceededException) Error() string
func (*ConcurrentRunsExceededException) ErrorCode ¶
func (e *ConcurrentRunsExceededException) ErrorCode() string
func (*ConcurrentRunsExceededException) ErrorFault ¶
func (e *ConcurrentRunsExceededException) ErrorFault() smithy.ErrorFault
func (*ConcurrentRunsExceededException) ErrorMessage ¶
func (e *ConcurrentRunsExceededException) ErrorMessage() string
type Condition ¶
type Condition struct { // The state of the crawler to which this condition applies. CrawlState CrawlState // The name of the crawler to which this condition applies. CrawlerName *string // The name of the job whose JobRuns this condition applies to, and on which this // trigger waits. JobName *string // A logical operator. LogicalOperator LogicalOperator // The condition state. Currently, the only job states that a trigger can listen // for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states // that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED . State JobRunState // contains filtered or unexported fields }
Defines a condition under which a trigger fires.
type ConditionCheckFailureException ¶
type ConditionCheckFailureException struct { Message *string ErrorCodeOverride *string // contains filtered or unexported fields }
A specified condition was not satisfied.
func (*ConditionCheckFailureException) Error ¶
func (e *ConditionCheckFailureException) Error() string
func (*ConditionCheckFailureException) ErrorCode ¶
func (e *ConditionCheckFailureException) ErrorCode() string
func (*ConditionCheckFailureException) ErrorFault ¶
func (e *ConditionCheckFailureException) ErrorFault() smithy.ErrorFault
func (*ConditionCheckFailureException) ErrorMessage ¶
func (e *ConditionCheckFailureException) ErrorMessage() string
type ConditionExpression ¶ added in v1.90.0
type ConditionExpression struct { // The condition of the condition expression. // // This member is required. Condition *string // The target column of the condition expressions. // // This member is required. TargetColumn *string // The value of the condition expression. Value *string // contains filtered or unexported fields }
Condition expression defined in the Glue Studio data preparation recipe node.
type ConfigurationObject ¶ added in v1.86.0
type ConfigurationObject struct { // A list of allowed values for the parameter. AllowedValues []string // A default value for the parameter. DefaultValue *string // A maximum allowed value for the parameter. MaxValue *string // A minimum allowed value for the parameter. MinValue *string // contains filtered or unexported fields }
Specifies the values that an admin sets for each job or session parameter configured in a Glue usage profile.
type ConflictException ¶ added in v0.29.0
type ConflictException struct { Message *string ErrorCodeOverride *string // contains filtered or unexported fields }
The CreatePartitions API was called on a table that has indexes enabled.
func (*ConflictException) Error ¶ added in v0.29.0
func (e *ConflictException) Error() string
func (*ConflictException) ErrorCode ¶ added in v0.29.0
func (e *ConflictException) ErrorCode() string
func (*ConflictException) ErrorFault ¶ added in v0.29.0
func (e *ConflictException) ErrorFault() smithy.ErrorFault
func (*ConflictException) ErrorMessage ¶ added in v0.29.0
func (e *ConflictException) ErrorMessage() string
type ConfusionMatrix ¶
type ConfusionMatrix struct { // The number of matches in the data that the transform didn't find, in the // confusion matrix for your transform. NumFalseNegatives *int64 // The number of nonmatches in the data that the transform incorrectly classified // as a match, in the confusion matrix for your transform. NumFalsePositives *int64 // The number of nonmatches in the data that the transform correctly rejected, in // the confusion matrix for your transform. NumTrueNegatives *int64 // The number of matches in the data that the transform correctly found, in the // confusion matrix for your transform. NumTruePositives *int64 // contains filtered or unexported fields }
The confusion matrix shows you what your transform is predicting accurately and what types of errors it is making.
For more information, see Confusion matrix in Wikipedia.
type Connection ¶
type Connection struct { // Connection properties specific to the Athena compute environment. AthenaProperties map[string]string // The authentication properties of the connection. AuthenticationConfiguration *AuthenticationConfiguration // A list of compute environments compatible with the connection. CompatibleComputeEnvironments []ComputeEnvironment // These key-value pairs define parameters for the connection when using the // version 1 Connection schema: // // - HOST - The host URI: either the fully qualified domain name (FQDN) or the // IPv4 address of the database host. // // - PORT - The port number, between 1024 and 65535, of the port on which the // database host is listening for database connections. // // - USER_NAME - The name under which to log in to the database. The value string // for USER_NAME is " USERNAME ". // // - PASSWORD - A password, if one is used, for the user name. // // - ENCRYPTED_PASSWORD - When you enable connection password protection by // setting ConnectionPasswordEncryption in the Data Catalog encryption settings, // this field stores the encrypted password. // // - JDBC_DRIVER_JAR_URI - The Amazon Simple Storage Service (Amazon S3) path of // the JAR file that contains the JDBC driver to use. // // - JDBC_DRIVER_CLASS_NAME - The class name of the JDBC driver to use. // // - JDBC_ENGINE - The name of the JDBC engine to use. // // - JDBC_ENGINE_VERSION - The version of the JDBC engine to use. // // - CONFIG_FILES - (Reserved for future use.) // // - INSTANCE_ID - The instance ID to use. // // - JDBC_CONNECTION_URL - The URL for connecting to a JDBC data source. // // - JDBC_ENFORCE_SSL - A Boolean string (true, false) specifying whether Secure // Sockets Layer (SSL) with hostname matching is enforced for the JDBC connection // on the client. The default is false. // // - CUSTOM_JDBC_CERT - An Amazon S3 location specifying the customer's root // certificate. Glue uses this root certificate to validate the customer’s // certificate when connecting to the customer database. Glue only handles X.509 // certificates. The certificate provided must be DER-encoded and supplied in // Base64 encoding PEM format. // // - SKIP_CUSTOM_JDBC_CERT_VALIDATION - By default, this is false . Glue // validates the Signature algorithm and Subject Public Key Algorithm for the // customer certificate. The only permitted algorithms for the Signature algorithm // are SHA256withRSA, SHA384withRSA or SHA512withRSA. For the Subject Public Key // Algorithm, the key length must be at least 2048. You can set the value of this // property to true to skip Glue’s validation of the customer certificate. // // - CUSTOM_JDBC_CERT_STRING - A custom JDBC certificate string which is used for // domain match or distinguished name match to prevent a man-in-the-middle attack. // In Oracle database, this is used as the SSL_SERVER_CERT_DN ; in Microsoft SQL // Server, this is used as the hostNameInCertificate . // // - CONNECTION_URL - The URL for connecting to a general (non-JDBC) data source. // // - SECRET_ID - The secret ID used for the secret manager of credentials. // // - CONNECTOR_URL - The connector URL for a MARKETPLACE or CUSTOM connection. // // - CONNECTOR_TYPE - The connector type for a MARKETPLACE or CUSTOM connection. // // - CONNECTOR_CLASS_NAME - The connector class name for a MARKETPLACE or CUSTOM // connection. // // - KAFKA_BOOTSTRAP_SERVERS - A comma-separated list of host and port pairs that // are the addresses of the Apache Kafka brokers in a Kafka cluster to which a // Kafka client will connect to and bootstrap itself. // // - KAFKA_SSL_ENABLED - Whether to enable or disable SSL on an Apache Kafka // connection. Default value is "true". // // - KAFKA_CUSTOM_CERT - The Amazon S3 URL for the private CA cert file (.pem // format). The default is an empty string. // // - KAFKA_SKIP_CUSTOM_CERT_VALIDATION - Whether to skip the validation of the CA // cert file or not. Glue validates for three algorithms: SHA256withRSA, // SHA384withRSA and SHA512withRSA. Default value is "false". // // - KAFKA_CLIENT_KEYSTORE - The Amazon S3 location of the client keystore file // for Kafka client side authentication (Optional). // // - KAFKA_CLIENT_KEYSTORE_PASSWORD - The password to access the provided // keystore (Optional). // // - KAFKA_CLIENT_KEY_PASSWORD - A keystore can consist of multiple keys, so this // is the password to access the client key to be used with the Kafka server side // key (Optional). // // - ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD - The encrypted version of the // Kafka client keystore password (if the user has the Glue encrypt passwords // setting selected). // // - ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD - The encrypted version of the Kafka // client key password (if the user has the Glue encrypt passwords setting // selected). // // - KAFKA_SASL_MECHANISM - "SCRAM-SHA-512" , "GSSAPI" , "AWS_MSK_IAM" , or // "PLAIN" . These are the supported [SASL Mechanisms]. // // - KAFKA_SASL_PLAIN_USERNAME - A plaintext username used to authenticate with // the "PLAIN" mechanism. // // - KAFKA_SASL_PLAIN_PASSWORD - A plaintext password used to authenticate with // the "PLAIN" mechanism. // // - ENCRYPTED_KAFKA_SASL_PLAIN_PASSWORD - The encrypted version of the Kafka // SASL PLAIN password (if the user has the Glue encrypt passwords setting // selected). // // - KAFKA_SASL_SCRAM_USERNAME - A plaintext username used to authenticate with // the "SCRAM-SHA-512" mechanism. // // - KAFKA_SASL_SCRAM_PASSWORD - A plaintext password used to authenticate with // the "SCRAM-SHA-512" mechanism. // // - ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD - The encrypted version of the Kafka // SASL SCRAM password (if the user has the Glue encrypt passwords setting // selected). // // - KAFKA_SASL_SCRAM_SECRETS_ARN - The Amazon Resource Name of a secret in // Amazon Web Services Secrets Manager. // // - KAFKA_SASL_GSSAPI_KEYTAB - The S3 location of a Kerberos keytab file. A // keytab stores long-term keys for one or more principals. For more information, // see [MIT Kerberos Documentation: Keytab]. // // - KAFKA_SASL_GSSAPI_KRB5_CONF - The S3 location of a Kerberos krb5.conf file. // A krb5.conf stores Kerberos configuration information, such as the location of // the KDC server. For more information, see [MIT Kerberos Documentation: krb5.conf]. // // - KAFKA_SASL_GSSAPI_SERVICE - The Kerberos service name, as set with // sasl.kerberos.service.name in your [Kafka Configuration]. // // - KAFKA_SASL_GSSAPI_PRINCIPAL - The name of the Kerberos princial used by // Glue. For more information, see [Kafka Documentation: Configuring Kafka Brokers]. // // - ROLE_ARN - The role to be used for running queries. // // - REGION - The Amazon Web Services Region where queries will be run. // // - WORKGROUP_NAME - The name of an Amazon Redshift serverless workgroup or // Amazon Athena workgroup in which queries will run. // // - CLUSTER_IDENTIFIER - The cluster identifier of an Amazon Redshift cluster in // which queries will run. // // - DATABASE - The Amazon Redshift database that you are connecting to. // // [MIT Kerberos Documentation: Keytab]: https://web.mit.edu/kerberos/krb5-latest/doc/basic/keytab_def.html // [SASL Mechanisms]: https://www.iana.org/assignments/sasl-mechanisms/sasl-mechanisms.xhtml // [Kafka Documentation: Configuring Kafka Brokers]: https://kafka.apache.org/documentation/#security_sasl_kerberos_clientconfig // [Kafka Configuration]: https://kafka.apache.org/documentation/#brokerconfigs_sasl.kerberos.service.name // [MIT Kerberos Documentation: krb5.conf]: https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html ConnectionProperties map[string]string // The version of the connection schema for this connection. Version 2 supports // properties for specific compute environments. ConnectionSchemaVersion *int32 // The type of the connection. Currently, SFTP is not supported. ConnectionType ConnectionType // The timestamp of the time that this connection definition was created. CreationTime *time.Time // The description of the connection. Description *string // A timestamp of the time this connection was last validated. LastConnectionValidationTime *time.Time // The user, group, or role that last updated this connection definition. LastUpdatedBy *string // The timestamp of the last time the connection definition was updated. LastUpdatedTime *time.Time // A list of criteria that can be used in selecting this connection. MatchCriteria []string // The name of the connection definition. Name *string // The physical connection requirements, such as virtual private cloud (VPC) and // SecurityGroup , that are needed to make this connection successfully. PhysicalConnectionRequirements *PhysicalConnectionRequirements // Connection properties specific to the Python compute environment. PythonProperties map[string]string // Connection properties specific to the Spark compute environment. SparkProperties map[string]string // The status of the connection. Can be one of: READY , IN_PROGRESS , or FAILED . Status ConnectionStatus // The reason for the connection status. StatusReason *string // contains filtered or unexported fields }
Defines a connection to a data source.
type ConnectionInput ¶
type ConnectionInput struct { // These key-value pairs define parameters for the connection. // // This member is required. ConnectionProperties map[string]string // The type of the connection. Currently, these types are supported: // // - JDBC - Designates a connection to a database through Java Database // Connectivity (JDBC). // // JDBC Connections use the following ConnectionParameters. // // - Required: All of ( HOST , PORT , JDBC_ENGINE ) or JDBC_CONNECTION_URL . // // - Required: All of ( USERNAME , PASSWORD ) or SECRET_ID . // // - Optional: JDBC_ENFORCE_SSL , CUSTOM_JDBC_CERT , CUSTOM_JDBC_CERT_STRING , // SKIP_CUSTOM_JDBC_CERT_VALIDATION . These parameters are used to configure SSL // with JDBC. // // - KAFKA - Designates a connection to an Apache Kafka streaming platform. // // KAFKA Connections use the following ConnectionParameters. // // - Required: KAFKA_BOOTSTRAP_SERVERS . // // - Optional: KAFKA_SSL_ENABLED , KAFKA_CUSTOM_CERT , // KAFKA_SKIP_CUSTOM_CERT_VALIDATION . These parameters are used to configure SSL // with KAFKA . // // - Optional: KAFKA_CLIENT_KEYSTORE , KAFKA_CLIENT_KEYSTORE_PASSWORD , // KAFKA_CLIENT_KEY_PASSWORD , ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD , // ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD . These parameters are used to configure // TLS client configuration with SSL in KAFKA . // // - Optional: KAFKA_SASL_MECHANISM . Can be specified as SCRAM-SHA-512 , GSSAPI // , or AWS_MSK_IAM . // // - Optional: KAFKA_SASL_SCRAM_USERNAME , KAFKA_SASL_SCRAM_PASSWORD , // ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD . These parameters are used to configure // SASL/SCRAM-SHA-512 authentication with KAFKA . // // - Optional: KAFKA_SASL_GSSAPI_KEYTAB , KAFKA_SASL_GSSAPI_KRB5_CONF , // KAFKA_SASL_GSSAPI_SERVICE , KAFKA_SASL_GSSAPI_PRINCIPAL . These parameters are // used to configure SASL/GSSAPI authentication with KAFKA . // // - MONGODB - Designates a connection to a MongoDB document database. // // MONGODB Connections use the following ConnectionParameters. // // - Required: CONNECTION_URL . // // - Required: All of ( USERNAME , PASSWORD ) or SECRET_ID . // // - VIEW_VALIDATION_REDSHIFT - Designates a connection used for view validation // by Amazon Redshift. // // - VIEW_VALIDATION_ATHENA - Designates a connection used for view validation by // Amazon Athena. // // - NETWORK - Designates a network connection to a data source within an Amazon // Virtual Private Cloud environment (Amazon VPC). // // NETWORK Connections do not require ConnectionParameters. Instead, provide a // PhysicalConnectionRequirements. // // - MARKETPLACE - Uses configuration settings contained in a connector purchased // from Amazon Web Services Marketplace to read from and write to data stores that // are not natively supported by Glue. // // MARKETPLACE Connections use the following ConnectionParameters. // // - Required: CONNECTOR_TYPE , CONNECTOR_URL , CONNECTOR_CLASS_NAME , // CONNECTION_URL . // // - Required for JDBC CONNECTOR_TYPE connections: All of ( USERNAME , PASSWORD ) // or SECRET_ID . // // - CUSTOM - Uses configuration settings contained in a custom connector to read // from and write to data stores that are not natively supported by Glue. // // Additionally, a ConnectionType for the following SaaS connectors is supported: // // - FACEBOOKADS - Designates a connection to Facebook Ads. // // - GOOGLEADS - Designates a connection to Google Ads. // // - GOOGLESHEETS - Designates a connection to Google Sheets. // // - GOOGLEANALYTICS4 - Designates a connection to Google Analytics 4. // // - HUBSPOT - Designates a connection to HubSpot. // // - INSTAGRAMADS - Designates a connection to Instagram Ads. // // - INTERCOM - Designates a connection to Intercom. // // - JIRACLOUD - Designates a connection to Jira Cloud. // // - MARKETO - Designates a connection to Adobe Marketo Engage. // // - NETSUITEERP - Designates a connection to Oracle NetSuite. // // - SALESFORCE - Designates a connection to Salesforce using OAuth // authentication. // // - SALESFORCEMARKETINGCLOUD - Designates a connection to Salesforce Marketing // Cloud. // // - SALESFORCEPARDOT - Designates a connection to Salesforce Marketing Cloud // Account Engagement (MCAE). // // - SAPODATA - Designates a connection to SAP OData. // // - SERVICENOW - Designates a connection to ServiceNow. // // - SLACK - Designates a connection to Slack. // // - SNAPCHATADS - Designates a connection to Snapchat Ads. // // - STRIPE - Designates a connection to Stripe. // // - ZENDESK - Designates a connection to Zendesk. // // - ZOHOCRM - Designates a connection to Zoho CRM. // // For more information on the connection parameters needed for a particular // connector, see the documentation for the connector in [Adding an Glue connection]in the Glue User Guide. // // SFTP is not supported. // // For more information about how optional ConnectionProperties are used to // configure features in Glue, consult [Glue connection properties]. // // For more information about how optional ConnectionProperties are used to // configure features in Glue Studio, consult [Using connectors and connections]. // // [Glue connection properties]: https://docs.aws.amazon.com/glue/latest/dg/connection-defining.html // [Using connectors and connections]: https://docs.aws.amazon.com/glue/latest/ug/connectors-chapter.html // [Adding an Glue connection]: https://docs.aws.amazon.com/glue/latest/dg/console-connections.html // // This member is required. ConnectionType ConnectionType // The name of the connection. // // This member is required. Name *string // Connection properties specific to the Athena compute environment. AthenaProperties map[string]string // The authentication properties of the connection. AuthenticationConfiguration *AuthenticationConfigurationInput // The description of the connection. Description *string // A list of criteria that can be used in selecting this connection. MatchCriteria []string // The physical connection requirements, such as virtual private cloud (VPC) and // SecurityGroup , that are needed to successfully make this connection. PhysicalConnectionRequirements *PhysicalConnectionRequirements // Connection properties specific to the Python compute environment. PythonProperties map[string]string // Connection properties specific to the Spark compute environment. SparkProperties map[string]string // A flag to validate the credentials during create connection. Default is true. ValidateCredentials bool // The compute environments that the specified connection properties are validated // against. ValidateForComputeEnvironments []ComputeEnvironment // contains filtered or unexported fields }
A structure that is used to specify a connection to create or update.
type ConnectionPasswordEncryption ¶
type ConnectionPasswordEncryption struct { // When the ReturnConnectionPasswordEncrypted flag is set to "true", passwords // remain encrypted in the responses of GetConnection and GetConnections . This // encryption takes effect independently from catalog encryption. // // This member is required. ReturnConnectionPasswordEncrypted bool // An KMS key that is used to encrypt the connection password. // // If connection password protection is enabled, the caller of CreateConnection // and UpdateConnection needs at least kms:Encrypt permission on the specified KMS // key, to encrypt passwords before storing them in the Data Catalog. // // You can set the decrypt permission to enable or restrict access on the password // key according to your security requirements. AwsKmsKeyId *string // contains filtered or unexported fields }
The data structure used by the Data Catalog to encrypt the password as part of CreateConnection or UpdateConnection and store it in the ENCRYPTED_PASSWORD field in the connection properties. You can enable catalog encryption or only password encryption.
When a CreationConnection request arrives containing a password, the Data Catalog first encrypts the password using your KMS key. It then encrypts the whole connection object again if catalog encryption is also enabled.
This encryption requires that you set KMS key permissions to enable or restrict access on the password key according to your security requirements. For example, you might want only administrators to have decrypt permission on the password key.
type ConnectionPropertyKey ¶
type ConnectionPropertyKey string
const ( ConnectionPropertyKeyHost ConnectionPropertyKey = "HOST" ConnectionPropertyKeyPort ConnectionPropertyKey = "PORT" ConnectionPropertyKeyUserName ConnectionPropertyKey = "USERNAME" ConnectionPropertyKeyPassword ConnectionPropertyKey = "PASSWORD" ConnectionPropertyKeyEncryptedPassword ConnectionPropertyKey = "ENCRYPTED_PASSWORD" ConnectionPropertyKeyJdbcDriverJarUri ConnectionPropertyKey = "JDBC_DRIVER_JAR_URI" ConnectionPropertyKeyJdbcDriverClassName ConnectionPropertyKey = "JDBC_DRIVER_CLASS_NAME" ConnectionPropertyKeyJdbcEngine ConnectionPropertyKey = "JDBC_ENGINE" ConnectionPropertyKeyJdbcEngineVersion ConnectionPropertyKey = "JDBC_ENGINE_VERSION" ConnectionPropertyKeyConfigFiles ConnectionPropertyKey = "CONFIG_FILES" ConnectionPropertyKeyInstanceId ConnectionPropertyKey = "INSTANCE_ID" ConnectionPropertyKeyJdbcConnectionUrl ConnectionPropertyKey = "JDBC_CONNECTION_URL" ConnectionPropertyKeyJdbcEnforceSsl ConnectionPropertyKey = "JDBC_ENFORCE_SSL" ConnectionPropertyKeyCustomJdbcCert ConnectionPropertyKey = "CUSTOM_JDBC_CERT" ConnectionPropertyKeySkipCustomJdbcCertValidation ConnectionPropertyKey = "SKIP_CUSTOM_JDBC_CERT_VALIDATION" ConnectionPropertyKeyCustomJdbcCertString ConnectionPropertyKey = "CUSTOM_JDBC_CERT_STRING" ConnectionPropertyKeyConnectionUrl ConnectionPropertyKey = "CONNECTION_URL" ConnectionPropertyKeyKafkaBootstrapServers ConnectionPropertyKey = "KAFKA_BOOTSTRAP_SERVERS" ConnectionPropertyKeyKafkaSslEnabled ConnectionPropertyKey = "KAFKA_SSL_ENABLED" ConnectionPropertyKeyKafkaCustomCert ConnectionPropertyKey = "KAFKA_CUSTOM_CERT" ConnectionPropertyKeyKafkaSkipCustomCertValidation ConnectionPropertyKey = "KAFKA_SKIP_CUSTOM_CERT_VALIDATION" ConnectionPropertyKeyKafkaClientKeystore ConnectionPropertyKey = "KAFKA_CLIENT_KEYSTORE" ConnectionPropertyKeyKafkaClientKeystorePassword ConnectionPropertyKey = "KAFKA_CLIENT_KEYSTORE_PASSWORD" ConnectionPropertyKeyKafkaClientKeyPassword ConnectionPropertyKey = "KAFKA_CLIENT_KEY_PASSWORD" ConnectionPropertyKeyEncryptedKafkaClientKeystorePassword ConnectionPropertyKey = "ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD" ConnectionPropertyKeyEncryptedKafkaClientKeyPassword ConnectionPropertyKey = "ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD" ConnectionPropertyKeyKafkaSaslMechanism ConnectionPropertyKey = "KAFKA_SASL_MECHANISM" ConnectionPropertyKeyKafkaSaslPlainUsername ConnectionPropertyKey = "KAFKA_SASL_PLAIN_USERNAME" ConnectionPropertyKeyKafkaSaslPlainPassword ConnectionPropertyKey = "KAFKA_SASL_PLAIN_PASSWORD" ConnectionPropertyKeyEncryptedKafkaSaslPlainPassword ConnectionPropertyKey = "ENCRYPTED_KAFKA_SASL_PLAIN_PASSWORD" ConnectionPropertyKeyKafkaSaslScramUsername ConnectionPropertyKey = "KAFKA_SASL_SCRAM_USERNAME" ConnectionPropertyKeyKafkaSaslScramPassword ConnectionPropertyKey = "KAFKA_SASL_SCRAM_PASSWORD" ConnectionPropertyKeyKafkaSaslScramSecretsArn ConnectionPropertyKey = "KAFKA_SASL_SCRAM_SECRETS_ARN" ConnectionPropertyKeyEncryptedKafkaSaslScramPassword ConnectionPropertyKey = "ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD" ConnectionPropertyKeyKafkaSaslGssapiKeytab ConnectionPropertyKey = "KAFKA_SASL_GSSAPI_KEYTAB" ConnectionPropertyKeyKafkaSaslGssapiKrb5Conf ConnectionPropertyKey = "KAFKA_SASL_GSSAPI_KRB5_CONF" ConnectionPropertyKeyKafkaSaslGssapiService ConnectionPropertyKey = "KAFKA_SASL_GSSAPI_SERVICE" ConnectionPropertyKeyKafkaSaslGssapiPrincipal ConnectionPropertyKey = "KAFKA_SASL_GSSAPI_PRINCIPAL" ConnectionPropertyKeySecretId ConnectionPropertyKey = "SECRET_ID" ConnectionPropertyKeyConnectorUrl ConnectionPropertyKey = "CONNECTOR_URL" ConnectionPropertyKeyConnectorType ConnectionPropertyKey = "CONNECTOR_TYPE" ConnectionPropertyKeyConnectorClassName ConnectionPropertyKey = "CONNECTOR_CLASS_NAME" ConnectionPropertyKeyEndpoint ConnectionPropertyKey = "ENDPOINT" ConnectionPropertyKeyEndpointType ConnectionPropertyKey = "ENDPOINT_TYPE" ConnectionPropertyKeyRoleArn ConnectionPropertyKey = "ROLE_ARN" ConnectionPropertyKeyRegion ConnectionPropertyKey = "REGION" ConnectionPropertyKeyWorkgroupName ConnectionPropertyKey = "WORKGROUP_NAME" ConnectionPropertyKeyClusterIdentifier ConnectionPropertyKey = "CLUSTER_IDENTIFIER" ConnectionPropertyKeyDatabase ConnectionPropertyKey = "DATABASE" )
Enum values for ConnectionPropertyKey
func (ConnectionPropertyKey) Values ¶ added in v0.29.0
func (ConnectionPropertyKey) Values() []ConnectionPropertyKey
Values returns all known values for ConnectionPropertyKey. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type ConnectionStatus ¶ added in v1.83.0
type ConnectionStatus string
const ( ConnectionStatusReady ConnectionStatus = "READY" ConnectionStatusInProgress ConnectionStatus = "IN_PROGRESS" ConnectionStatusFailed ConnectionStatus = "FAILED" )
Enum values for ConnectionStatus
func (ConnectionStatus) Values ¶ added in v1.83.0
func (ConnectionStatus) Values() []ConnectionStatus
Values returns all known values for ConnectionStatus. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type ConnectionType ¶
type ConnectionType string
const ( ConnectionTypeJdbc ConnectionType = "JDBC" ConnectionTypeSftp ConnectionType = "SFTP" ConnectionTypeMongodb ConnectionType = "MONGODB" ConnectionTypeKafka ConnectionType = "KAFKA" ConnectionTypeNetwork ConnectionType = "NETWORK" ConnectionTypeMarketplace ConnectionType = "MARKETPLACE" ConnectionTypeCustom ConnectionType = "CUSTOM" ConnectionTypeSalesforce ConnectionType = "SALESFORCE" ConnectionTypeViewValidationRedshift ConnectionType = "VIEW_VALIDATION_REDSHIFT" ConnectionTypeViewValidationAthena ConnectionType = "VIEW_VALIDATION_ATHENA" ConnectionTypeGoogleads ConnectionType = "GOOGLEADS" ConnectionTypeGooglesheets ConnectionType = "GOOGLESHEETS" ConnectionTypeGoogleanalytics4 ConnectionType = "GOOGLEANALYTICS4" ConnectionTypeServicenow ConnectionType = "SERVICENOW" ConnectionTypeMarketo ConnectionType = "MARKETO" ConnectionTypeSapodata ConnectionType = "SAPODATA" ConnectionTypeZendesk ConnectionType = "ZENDESK" ConnectionTypeJiracloud ConnectionType = "JIRACLOUD" ConnectionTypeNetsuiteerp ConnectionType = "NETSUITEERP" ConnectionTypeHubspot ConnectionType = "HUBSPOT" ConnectionTypeFacebookads ConnectionType = "FACEBOOKADS" ConnectionTypeInstagramads ConnectionType = "INSTAGRAMADS" ConnectionTypeZohocrm ConnectionType = "ZOHOCRM" ConnectionTypeSalesforcepardot ConnectionType = "SALESFORCEPARDOT" ConnectionTypeSalesforcemarketingcloud ConnectionType = "SALESFORCEMARKETINGCLOUD" ConnectionTypeSlack ConnectionType = "SLACK" ConnectionTypeStripe ConnectionType = "STRIPE" ConnectionTypeIntercom ConnectionType = "INTERCOM" ConnectionTypeSnapchatads ConnectionType = "SNAPCHATADS" )
Enum values for ConnectionType
func (ConnectionType) Values ¶ added in v0.29.0
func (ConnectionType) Values() []ConnectionType
Values returns all known values for ConnectionType. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type ConnectionTypeBrief ¶ added in v1.103.0
type ConnectionTypeBrief struct { // The supported authentication types, data interface types (compute // environments), and data operations of the connector. Capabilities *Capabilities // The name of the connection type. ConnectionType ConnectionType // A description of the connection type. Description *string // contains filtered or unexported fields }
Brief information about a supported connection type returned by the ListConnectionTypes API.
type ConnectionsList ¶
type ConnectionsList struct { // A list of connections used by the job. Connections []string // contains filtered or unexported fields }
Specifies the connections used by a job.
type ConnectorDataSource ¶ added in v1.67.0
type ConnectorDataSource struct { // The connectionType , as provided to the underlying Glue library. This node type // supports the following connection types: // // - opensearch // // - azuresql // // - azurecosmos // // - bigquery // // - saphana // // - teradata // // - vertica // // This member is required. ConnectionType *string // A map specifying connection options for the node. You can find standard // connection options for the corresponding connection type in the [Connection parameters]section of the // Glue documentation. // // [Connection parameters]: https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-connect.html // // This member is required. Data map[string]string // The name of this source node. // // This member is required. Name *string // Specifies the data schema for this source. OutputSchemas []GlueSchema // contains filtered or unexported fields }
Specifies a source generated with standard connection options.
type ConnectorDataTarget ¶ added in v1.67.0
type ConnectorDataTarget struct { // The connectionType , as provided to the underlying Glue library. This node type // supports the following connection types: // // - opensearch // // - azuresql // // - azurecosmos // // - bigquery // // - saphana // // - teradata // // - vertica // // This member is required. ConnectionType *string // A map specifying connection options for the node. You can find standard // connection options for the corresponding connection type in the [Connection parameters]section of the // Glue documentation. // // [Connection parameters]: https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-connect.html // // This member is required. Data map[string]string // The name of this target node. // // This member is required. Name *string // The nodes that are inputs to the data target. Inputs []string // contains filtered or unexported fields }
Specifies a target generated with standard connection options.
type Crawl ¶
type Crawl struct { // The date and time on which the crawl completed. CompletedOn *time.Time // The error message associated with the crawl. ErrorMessage *string // The log group associated with the crawl. LogGroup *string // The log stream associated with the crawl. LogStream *string // The date and time on which the crawl started. StartedOn *time.Time // The state of the crawler. State CrawlState // contains filtered or unexported fields }
The details of a crawl in the workflow.
type CrawlState ¶
type CrawlState string
const ( CrawlStateRunning CrawlState = "RUNNING" CrawlStateCancelling CrawlState = "CANCELLING" CrawlStateCancelled CrawlState = "CANCELLED" CrawlStateSucceeded CrawlState = "SUCCEEDED" CrawlStateFailed CrawlState = "FAILED" CrawlStateError CrawlState = "ERROR" )
Enum values for CrawlState
func (CrawlState) Values ¶ added in v0.29.0
func (CrawlState) Values() []CrawlState
Values returns all known values for CrawlState. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type Crawler ¶
type Crawler struct { // A list of UTF-8 strings that specify the custom classifiers that are associated // with the crawler. Classifiers []string // Crawler configuration information. This versioned JSON string allows users to // specify aspects of a crawler's behavior. For more information, see [Setting crawler configuration options]. // // [Setting crawler configuration options]: https://docs.aws.amazon.com/glue/latest/dg/crawler-configuration.html Configuration *string // If the crawler is running, contains the total time elapsed since the last crawl // began. CrawlElapsedTime int64 // The name of the SecurityConfiguration structure to be used by this crawler. CrawlerSecurityConfiguration *string // The time that the crawler was created. CreationTime *time.Time // The name of the database in which the crawler's output is stored. DatabaseName *string // A description of the crawler. Description *string // Specifies whether the crawler should use Lake Formation credentials for the // crawler instead of the IAM role credentials. LakeFormationConfiguration *LakeFormationConfiguration // The status of the last crawl, and potentially error information if an error // occurred. LastCrawl *LastCrawlInfo // The time that the crawler was last updated. LastUpdated *time.Time // A configuration that specifies whether data lineage is enabled for the crawler. LineageConfiguration *LineageConfiguration // The name of the crawler. Name *string // A policy that specifies whether to crawl the entire dataset again, or to crawl // only folders that were added since the last crawler run. RecrawlPolicy *RecrawlPolicy // The Amazon Resource Name (ARN) of an IAM role that's used to access customer // resources, such as Amazon Simple Storage Service (Amazon S3) data. Role *string // For scheduled crawlers, the schedule when the crawler runs. Schedule *Schedule // The policy that specifies update and delete behaviors for the crawler. SchemaChangePolicy *SchemaChangePolicy // Indicates whether the crawler is running, or whether a run is pending. State CrawlerState // The prefix added to the names of tables that are created. TablePrefix *string // A collection of targets to crawl. Targets *CrawlerTargets // The version of the crawler. Version int64 // contains filtered or unexported fields }
Specifies a crawler program that examines a data source and uses classifiers to try to determine its schema. If successful, the crawler records metadata concerning the data source in the Glue Data Catalog.
type CrawlerHistory ¶ added in v1.26.0
type CrawlerHistory struct { // A UUID identifier for each crawl. CrawlId *string // The number of data processing units (DPU) used in hours for the crawl. DPUHour float64 // The date and time on which the crawl ended. EndTime *time.Time // If an error occurred, the error message associated with the crawl. ErrorMessage *string // The log group associated with the crawl. LogGroup *string // The log stream associated with the crawl. LogStream *string // The prefix for a CloudWatch message about this crawl. MessagePrefix *string // The date and time on which the crawl started. StartTime *time.Time // The state of the crawl. State CrawlerHistoryState // A run summary for the specific crawl in JSON. Contains the catalog tables and // partitions that were added, updated, or deleted. Summary *string // contains filtered or unexported fields }
Contains the information for a run of a crawler.
type CrawlerHistoryState ¶ added in v1.26.0
type CrawlerHistoryState string
const ( CrawlerHistoryStateRunning CrawlerHistoryState = "RUNNING" CrawlerHistoryStateCompleted CrawlerHistoryState = "COMPLETED" CrawlerHistoryStateFailed CrawlerHistoryState = "FAILED" CrawlerHistoryStateStopped CrawlerHistoryState = "STOPPED" )
Enum values for CrawlerHistoryState
func (CrawlerHistoryState) Values ¶ added in v1.26.0
func (CrawlerHistoryState) Values() []CrawlerHistoryState
Values returns all known values for CrawlerHistoryState. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type CrawlerLineageSettings ¶ added in v0.31.0
type CrawlerLineageSettings string
const ( CrawlerLineageSettingsEnable CrawlerLineageSettings = "ENABLE" CrawlerLineageSettingsDisable CrawlerLineageSettings = "DISABLE" )
Enum values for CrawlerLineageSettings
func (CrawlerLineageSettings) Values ¶ added in v0.31.0
func (CrawlerLineageSettings) Values() []CrawlerLineageSettings
Values returns all known values for CrawlerLineageSettings. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type CrawlerMetrics ¶
type CrawlerMetrics struct { // The name of the crawler. CrawlerName *string // The duration of the crawler's most recent run, in seconds. LastRuntimeSeconds float64 // The median duration of this crawler's runs, in seconds. MedianRuntimeSeconds float64 // True if the crawler is still estimating how long it will take to complete this // run. StillEstimating bool // The number of tables created by this crawler. TablesCreated int32 // The number of tables deleted by this crawler. TablesDeleted int32 // The number of tables updated by this crawler. TablesUpdated int32 // The estimated time left to complete a running crawl. TimeLeftSeconds float64 // contains filtered or unexported fields }
Metrics for a specified crawler.
type CrawlerNodeDetails ¶
type CrawlerNodeDetails struct { // A list of crawls represented by the crawl node. Crawls []Crawl // contains filtered or unexported fields }
The details of a Crawler node present in the workflow.
type CrawlerNotRunningException ¶
type CrawlerNotRunningException struct { Message *string ErrorCodeOverride *string // contains filtered or unexported fields }
The specified crawler is not running.
func (*CrawlerNotRunningException) Error ¶
func (e *CrawlerNotRunningException) Error() string
func (*CrawlerNotRunningException) ErrorCode ¶
func (e *CrawlerNotRunningException) ErrorCode() string
func (*CrawlerNotRunningException) ErrorFault ¶
func (e *CrawlerNotRunningException) ErrorFault() smithy.ErrorFault
func (*CrawlerNotRunningException) ErrorMessage ¶
func (e *CrawlerNotRunningException) ErrorMessage() string
type CrawlerRunningException ¶
type CrawlerRunningException struct { Message *string ErrorCodeOverride *string // contains filtered or unexported fields }
The operation cannot be performed because the crawler is already running.
func (*CrawlerRunningException) Error ¶
func (e *CrawlerRunningException) Error() string
func (*CrawlerRunningException) ErrorCode ¶
func (e *CrawlerRunningException) ErrorCode() string
func (*CrawlerRunningException) ErrorFault ¶
func (e *CrawlerRunningException) ErrorFault() smithy.ErrorFault
func (*CrawlerRunningException) ErrorMessage ¶
func (e *CrawlerRunningException) ErrorMessage() string
type CrawlerState ¶
type CrawlerState string
const ( CrawlerStateReady CrawlerState = "READY" CrawlerStateRunning CrawlerState = "RUNNING" CrawlerStateStopping CrawlerState = "STOPPING" )
Enum values for CrawlerState
func (CrawlerState) Values ¶ added in v0.29.0
func (CrawlerState) Values() []CrawlerState
Values returns all known values for CrawlerState. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type CrawlerStoppingException ¶
type CrawlerStoppingException struct { Message *string ErrorCodeOverride *string // contains filtered or unexported fields }
The specified crawler is stopping.
func (*CrawlerStoppingException) Error ¶
func (e *CrawlerStoppingException) Error() string
func (*CrawlerStoppingException) ErrorCode ¶
func (e *CrawlerStoppingException) ErrorCode() string
func (*CrawlerStoppingException) ErrorFault ¶
func (e *CrawlerStoppingException) ErrorFault() smithy.ErrorFault
func (*CrawlerStoppingException) ErrorMessage ¶
func (e *CrawlerStoppingException) ErrorMessage() string
type CrawlerTargets ¶
type CrawlerTargets struct { // Specifies Glue Data Catalog targets. CatalogTargets []CatalogTarget // Specifies Delta data store targets. DeltaTargets []DeltaTarget // Specifies Amazon DynamoDB targets. DynamoDBTargets []DynamoDBTarget // Specifies Apache Hudi data store targets. HudiTargets []HudiTarget // Specifies Apache Iceberg data store targets. IcebergTargets []IcebergTarget // Specifies JDBC targets. JdbcTargets []JdbcTarget // Specifies Amazon DocumentDB or MongoDB targets. MongoDBTargets []MongoDBTarget // Specifies Amazon Simple Storage Service (Amazon S3) targets. S3Targets []S3Target // contains filtered or unexported fields }
Specifies data stores to crawl.
type CrawlsFilter ¶ added in v1.26.0
type CrawlsFilter struct { // A key used to filter the crawler runs for a specified crawler. Valid values for // each of the field names are: // // - CRAWL_ID : A string representing the UUID identifier for a crawl. // // - STATE : A string representing the state of the crawl. // // - START_TIME and END_TIME : The epoch timestamp in milliseconds. // // - DPU_HOUR : The number of data processing unit (DPU) hours used for the crawl. FieldName FieldName // The value provided for comparison on the crawl field. FieldValue *string // A defined comparator that operates on the value. The available operators are: // // - GT : Greater than. // // - GE : Greater than or equal to. // // - LT : Less than. // // - LE : Less than or equal to. // // - EQ : Equal to. // // - NE : Not equal to. FilterOperator FilterOperator // contains filtered or unexported fields }
A list of fields, comparators and value that you can use to filter the crawler runs for a specified crawler.
type CreateCsvClassifierRequest ¶
type CreateCsvClassifierRequest struct { // The name of the classifier. // // This member is required. Name *string // Enables the processing of files that contain only one column. AllowSingleColumn *bool // Indicates whether the CSV file contains a header. ContainsHeader CsvHeaderOption // Enables the configuration of custom datatypes. CustomDatatypeConfigured *bool // Creates a list of supported custom datatypes. CustomDatatypes []string // A custom symbol to denote what separates each column entry in the row. Delimiter *string // Specifies not to trim values before identifying the type of column values. The // default value is true. DisableValueTrimming *bool // A list of strings representing column names. Header []string // A custom symbol to denote what combines content into a single column value. // Must be different from the column delimiter. QuoteSymbol *string // Sets the SerDe for processing CSV in the classifier, which will be applied in // the Data Catalog. Valid values are OpenCSVSerDe , LazySimpleSerDe , and None . // You can specify the None value when you want the crawler to do the detection. Serde CsvSerdeOption // contains filtered or unexported fields }
Specifies a custom CSV classifier for CreateClassifier to create.
type CreateGrokClassifierRequest ¶
type CreateGrokClassifierRequest struct { // An identifier of the data format that the classifier matches, such as Twitter, // JSON, Omniture logs, Amazon CloudWatch Logs, and so on. // // This member is required. Classification *string // The grok pattern used by this classifier. // // This member is required. GrokPattern *string // The name of the new classifier. // // This member is required. Name *string // Optional custom grok patterns used by this classifier. CustomPatterns *string // contains filtered or unexported fields }
Specifies a grok classifier for CreateClassifier to create.
type CreateJsonClassifierRequest ¶
type CreateJsonClassifierRequest struct { // A JsonPath string defining the JSON data for the classifier to classify. Glue // supports a subset of JsonPath, as described in [Writing JsonPath Custom Classifiers]. // // [Writing JsonPath Custom Classifiers]: https://docs.aws.amazon.com/glue/latest/dg/custom-classifier.html#custom-classifier-json // // This member is required. JsonPath *string // The name of the classifier. // // This member is required. Name *string // contains filtered or unexported fields }
Specifies a JSON classifier for CreateClassifier to create.
type CreateXMLClassifierRequest ¶
type CreateXMLClassifierRequest struct { // An identifier of the data format that the classifier matches. // // This member is required. Classification *string // The name of the classifier. // // This member is required. Name *string // The XML tag designating the element that contains each record in an XML // document being parsed. This can't identify a self-closing element (closed by /> // ). An empty row element that contains only attributes can be parsed as long as // it ends with a closing tag (for example, is okay, but is not). RowTag *string // contains filtered or unexported fields }
Specifies an XML classifier for CreateClassifier to create.
type CsvClassifier ¶
type CsvClassifier struct { // The name of the classifier. // // This member is required. Name *string // Enables the processing of files that contain only one column. AllowSingleColumn *bool // Indicates whether the CSV file contains a header. ContainsHeader CsvHeaderOption // The time that this classifier was registered. CreationTime *time.Time // Enables the custom datatype to be configured. CustomDatatypeConfigured *bool // A list of custom datatypes including "BINARY", "BOOLEAN", "DATE", "DECIMAL", // "DOUBLE", "FLOAT", "INT", "LONG", "SHORT", "STRING", "TIMESTAMP". CustomDatatypes []string // A custom symbol to denote what separates each column entry in the row. Delimiter *string // Specifies not to trim values before identifying the type of column values. The // default value is true . DisableValueTrimming *bool // A list of strings representing column names. Header []string // The time that this classifier was last updated. LastUpdated *time.Time // A custom symbol to denote what combines content into a single column value. It // must be different from the column delimiter. QuoteSymbol *string // Sets the SerDe for processing CSV in the classifier, which will be applied in // the Data Catalog. Valid values are OpenCSVSerDe , LazySimpleSerDe , and None . // You can specify the None value when you want the crawler to do the detection. Serde CsvSerdeOption // The version of this classifier. Version int64 // contains filtered or unexported fields }
A classifier for custom CSV content.
type CsvHeaderOption ¶
type CsvHeaderOption string
const ( CsvHeaderOptionUnknown CsvHeaderOption = "UNKNOWN" CsvHeaderOptionPresent CsvHeaderOption = "PRESENT" CsvHeaderOptionAbsent CsvHeaderOption = "ABSENT" )
Enum values for CsvHeaderOption
func (CsvHeaderOption) Values ¶ added in v0.29.0
func (CsvHeaderOption) Values() []CsvHeaderOption
Values returns all known values for CsvHeaderOption. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type CsvSerdeOption ¶ added in v1.61.0
type CsvSerdeOption string
const ( CsvSerdeOptionOpenCSVSerDe CsvSerdeOption = "OpenCSVSerDe" CsvSerdeOptionLazySimpleSerDe CsvSerdeOption = "LazySimpleSerDe" CsvSerdeOptionNone CsvSerdeOption = "None" )
Enum values for CsvSerdeOption
func (CsvSerdeOption) Values ¶ added in v1.61.0
func (CsvSerdeOption) Values() []CsvSerdeOption
Values returns all known values for CsvSerdeOption. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type CustomCode ¶ added in v1.25.0
type CustomCode struct { // The name defined for the custom code node class. // // This member is required. ClassName *string // The custom code that is used to perform the data transformation. // // This member is required. Code *string // The data inputs identified by their node names. // // This member is required. Inputs []string // The name of the transform node. // // This member is required. Name *string // Specifies the data schema for the custom code transform. OutputSchemas []GlueSchema // contains filtered or unexported fields }
Specifies a transform that uses custom code you provide to perform the data transformation. The output is a collection of DynamicFrames.
type CustomEntityType ¶ added in v1.24.0
type CustomEntityType struct { // A name for the custom pattern that allows it to be retrieved or deleted later. // This name must be unique per Amazon Web Services account. // // This member is required. Name *string // A regular expression string that is used for detecting sensitive data in a // custom pattern. // // This member is required. RegexString *string // A list of context words. If none of these context words are found within the // vicinity of the regular expression the data will not be detected as sensitive // data. // // If no context words are passed only a regular expression is checked. ContextWords []string // contains filtered or unexported fields }
An object representing a custom pattern for detecting sensitive data across the columns and rows of your structured data.
type DQCompositeRuleEvaluationMethod ¶ added in v1.85.0
type DQCompositeRuleEvaluationMethod string
const ( DQCompositeRuleEvaluationMethodColumn DQCompositeRuleEvaluationMethod = "COLUMN" DQCompositeRuleEvaluationMethodRow DQCompositeRuleEvaluationMethod = "ROW" )
Enum values for DQCompositeRuleEvaluationMethod
func (DQCompositeRuleEvaluationMethod) Values ¶ added in v1.85.0
func (DQCompositeRuleEvaluationMethod) Values() []DQCompositeRuleEvaluationMethod
Values returns all known values for DQCompositeRuleEvaluationMethod. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type DQResultsPublishingOptions ¶ added in v1.37.0
type DQResultsPublishingOptions struct { // Enable metrics for your data quality results. CloudWatchMetricsEnabled *bool // The context of the evaluation. EvaluationContext *string // Enable publishing for your data quality results. ResultsPublishingEnabled *bool // The Amazon S3 prefix prepended to the results. ResultsS3Prefix *string // contains filtered or unexported fields }
Options to configure how your data quality evaluation results are published.
type DQStopJobOnFailureOptions ¶ added in v1.37.0
type DQStopJobOnFailureOptions struct { // When to stop job if your data quality evaluation fails. Options are Immediate // or AfterDataLoad. StopJobOnFailureTiming DQStopJobOnFailureTiming // contains filtered or unexported fields }
Options to configure how your job will stop if your data quality evaluation fails.
type DQStopJobOnFailureTiming ¶ added in v1.37.0
type DQStopJobOnFailureTiming string
const ( DQStopJobOnFailureTimingImmediate DQStopJobOnFailureTiming = "Immediate" DQStopJobOnFailureTimingAfterDataLoad DQStopJobOnFailureTiming = "AfterDataLoad" )
Enum values for DQStopJobOnFailureTiming
func (DQStopJobOnFailureTiming) Values ¶ added in v1.37.0
func (DQStopJobOnFailureTiming) Values() []DQStopJobOnFailureTiming
Values returns all known values for DQStopJobOnFailureTiming. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type DQTransformOutput ¶ added in v1.37.0
type DQTransformOutput string
const ( DQTransformOutputPrimaryInput DQTransformOutput = "PrimaryInput" DQTransformOutputEvaluationResults DQTransformOutput = "EvaluationResults" )
Enum values for DQTransformOutput
func (DQTransformOutput) Values ¶ added in v1.37.0
func (DQTransformOutput) Values() []DQTransformOutput
Values returns all known values for DQTransformOutput. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type DataCatalogEncryptionSettings ¶
type DataCatalogEncryptionSettings struct { // When connection password protection is enabled, the Data Catalog uses a // customer-provided key to encrypt the password as part of CreateConnection or // UpdateConnection and store it in the ENCRYPTED_PASSWORD field in the connection // properties. You can enable catalog encryption or only password encryption. ConnectionPasswordEncryption *ConnectionPasswordEncryption // Specifies the encryption-at-rest configuration for the Data Catalog. EncryptionAtRest *EncryptionAtRest // contains filtered or unexported fields }
Contains configuration information for maintaining Data Catalog security.
type DataFormat ¶ added in v0.30.0
type DataFormat string
const ( DataFormatAvro DataFormat = "AVRO" DataFormatJson DataFormat = "JSON" DataFormatProtobuf DataFormat = "PROTOBUF" )
Enum values for DataFormat
func (DataFormat) Values ¶ added in v0.30.0
func (DataFormat) Values() []DataFormat
Values returns all known values for DataFormat. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type DataLakeAccessProperties ¶ added in v1.103.0
type DataLakeAccessProperties struct { // Specifies a federated catalog type for the native catalog resource. The // currently supported type is aws:redshift . CatalogType *string // Turns on or off data lake access for Apache Spark applications that access // Amazon Redshift databases in the Data Catalog from any non-Redshift engine, such // as Amazon Athena, Amazon EMR, or Glue ETL. DataLakeAccess bool // A role that will be assumed by Glue for transferring data into/out of the // staging bucket during a query. DataTransferRole *string // An encryption key that will be used for the staging bucket that will be created // along with the catalog. KmsKey *string // contains filtered or unexported fields }
Input properties to configure data lake access for your catalog resource in the Glue Data Catalog.
type DataLakeAccessPropertiesOutput ¶ added in v1.103.0
type DataLakeAccessPropertiesOutput struct { // Specifies a federated catalog type for the native catalog resource. The // currently supported type is aws:redshift . CatalogType *string // Turns on or off data lake access for Apache Spark applications that access // Amazon Redshift databases in the Data Catalog. DataLakeAccess bool // A role that will be assumed by Glue for transferring data into/out of the // staging bucket during a query. DataTransferRole *string // An encryption key that will be used for the staging bucket that will be created // along with the catalog. KmsKey *string // The managed Redshift Serverless compute name that is created for your catalog // resource. ManagedWorkgroupName *string // The managed Redshift Serverless compute status. ManagedWorkgroupStatus *string // The default Redshift database resource name in the managed compute. RedshiftDatabaseName *string // A message that gives more detailed information about the managed workgroup // status. StatusMessage *string // contains filtered or unexported fields }
The output properties of the data lake access configuration for your catalog resource in the Glue Data Catalog.
type DataLakePrincipal ¶
type DataLakePrincipal struct { // An identifier for the Lake Formation principal. DataLakePrincipalIdentifier *string // contains filtered or unexported fields }
The Lake Formation principal.
type DataOperation ¶ added in v1.103.0
type DataOperation string
const ( DataOperationRead DataOperation = "READ" DataOperationWrite DataOperation = "WRITE" )
Enum values for DataOperation
func (DataOperation) Values ¶ added in v1.103.0
func (DataOperation) Values() []DataOperation
Values returns all known values for DataOperation. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type DataQualityAnalyzerResult ¶ added in v1.72.0
type DataQualityAnalyzerResult struct { // A description of the data quality analyzer. Description *string // A map of metrics associated with the evaluation of the analyzer. EvaluatedMetrics map[string]float64 // An evaluation message. EvaluationMessage *string // The name of the data quality analyzer. Name *string // contains filtered or unexported fields }
Describes the result of the evaluation of a data quality analyzer.
type DataQualityEncryption ¶ added in v1.104.0
type DataQualityEncryption struct { // The encryption mode to use for encrypting Data Quality assets. These assets // include data quality rulesets, results, statistics, anomaly detection models and // observations. // // Valid values are SSEKMS for encryption using a customer-managed KMS key, or // DISABLED . DataQualityEncryptionMode DataQualityEncryptionMode // The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data. KmsKeyArn *string // contains filtered or unexported fields }
Specifies how Data Quality assets in your account should be encrypted.
type DataQualityEncryptionMode ¶ added in v1.104.0
type DataQualityEncryptionMode string
const ( DataQualityEncryptionModeDisabled DataQualityEncryptionMode = "DISABLED" DataQualityEncryptionModeSsekms DataQualityEncryptionMode = "SSE-KMS" )
Enum values for DataQualityEncryptionMode
func (DataQualityEncryptionMode) Values ¶ added in v1.104.0
func (DataQualityEncryptionMode) Values() []DataQualityEncryptionMode
Values returns all known values for DataQualityEncryptionMode. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type DataQualityEvaluationRunAdditionalRunOptions ¶ added in v1.37.0
type DataQualityEvaluationRunAdditionalRunOptions struct { // Whether or not to enable CloudWatch metrics. CloudWatchMetricsEnabled *bool // Set the evaluation method for composite rules in the ruleset to ROW/COLUMN CompositeRuleEvaluationMethod DQCompositeRuleEvaluationMethod // Prefix for Amazon S3 to store results. ResultsS3Prefix *string // contains filtered or unexported fields }
Additional run options you can specify for an evaluation run.
type DataQualityMetricValues ¶ added in v1.72.0
type DataQualityMetricValues struct { // The actual value of the data quality metric. ActualValue *float64 // The expected value of the data quality metric according to the analysis of // historical data. ExpectedValue *float64 // The lower limit of the data quality metric value according to the analysis of // historical data. LowerLimit *float64 // The upper limit of the data quality metric value according to the analysis of // historical data. UpperLimit *float64 // contains filtered or unexported fields }
Describes the data quality metric value according to the analysis of historical data.
type DataQualityModelStatus ¶ added in v1.92.0
type DataQualityModelStatus string
const ( DataQualityModelStatusRunning DataQualityModelStatus = "RUNNING" DataQualityModelStatusSucceeded DataQualityModelStatus = "SUCCEEDED" DataQualityModelStatusFailed DataQualityModelStatus = "FAILED" )
Enum values for DataQualityModelStatus
func (DataQualityModelStatus) Values ¶ added in v1.92.0
func (DataQualityModelStatus) Values() []DataQualityModelStatus
Values returns all known values for DataQualityModelStatus. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type DataQualityObservation ¶ added in v1.72.0
type DataQualityObservation struct { // A description of the data quality observation. Description *string // An object of type MetricBasedObservation representing the observation that is // based on evaluated data quality metrics. MetricBasedObservation *MetricBasedObservation // contains filtered or unexported fields }
Describes the observation generated after evaluating the rules and analyzers.
type DataQualityResult ¶ added in v1.37.0
type DataQualityResult struct { // A list of DataQualityAnalyzerResult objects representing the results for each // analyzer. AnalyzerResults []DataQualityAnalyzerResult // The date and time when this data quality run completed. CompletedOn *time.Time // The table associated with the data quality result, if any. DataSource *DataSource // In the context of a job in Glue Studio, each node in the canvas is typically // assigned some sort of name and data quality nodes will have names. In the case // of multiple nodes, the evaluationContext can differentiate the nodes. EvaluationContext *string // The job name associated with the data quality result, if any. JobName *string // The job run ID associated with the data quality result, if any. JobRunId *string // A list of DataQualityObservation objects representing the observations // generated after evaluating the rules and analyzers. Observations []DataQualityObservation // The Profile ID for the data quality result. ProfileId *string // A unique result ID for the data quality result. ResultId *string // A list of DataQualityRuleResult objects representing the results for each rule. RuleResults []DataQualityRuleResult // The unique run ID for the ruleset evaluation for this data quality result. RulesetEvaluationRunId *string // The name of the ruleset associated with the data quality result. RulesetName *string // An aggregate data quality score. Represents the ratio of rules that passed to // the total number of rules. Score *float64 // The date and time when this data quality run started. StartedOn *time.Time // contains filtered or unexported fields }
Describes a data quality result.
type DataQualityResultDescription ¶ added in v1.37.0
type DataQualityResultDescription struct { // The table name associated with the data quality result. DataSource *DataSource // The job name associated with the data quality result. JobName *string // The job run ID associated with the data quality result. JobRunId *string // The unique result ID for this data quality result. ResultId *string // The time that the run started for this data quality result. StartedOn *time.Time // contains filtered or unexported fields }
Describes a data quality result.
type DataQualityResultFilterCriteria ¶ added in v1.37.0
type DataQualityResultFilterCriteria struct { // Filter results by the specified data source. For example, retrieving all // results for an Glue table. DataSource *DataSource // Filter results by the specified job name. JobName *string // Filter results by the specified job run ID. JobRunId *string // Filter results by runs that started after this time. StartedAfter *time.Time // Filter results by runs that started before this time. StartedBefore *time.Time // contains filtered or unexported fields }
Criteria used to return data quality results.
type DataQualityRuleRecommendationRunDescription ¶ added in v1.37.0
type DataQualityRuleRecommendationRunDescription struct { // The data source (Glue table) associated with the recommendation run. DataSource *DataSource // The unique run identifier associated with this run. RunId *string // The date and time when this run started. StartedOn *time.Time // The status for this run. Status TaskStatusType // contains filtered or unexported fields }
Describes the result of a data quality rule recommendation run.
type DataQualityRuleRecommendationRunFilter ¶ added in v1.37.0
type DataQualityRuleRecommendationRunFilter struct { // Filter based on a specified data source (Glue table). // // This member is required. DataSource *DataSource // Filter based on time for results started after provided time. StartedAfter *time.Time // Filter based on time for results started before provided time. StartedBefore *time.Time // contains filtered or unexported fields }
A filter for listing data quality recommendation runs.
type DataQualityRuleResult ¶ added in v1.37.0
type DataQualityRuleResult struct { // A description of the data quality rule. Description *string // A map of metrics associated with the evaluation of the rule. EvaluatedMetrics map[string]float64 // The evaluated rule. EvaluatedRule *string // An evaluation message. EvaluationMessage *string // The name of the data quality rule. Name *string // A pass or fail status for the rule. Result DataQualityRuleResultStatus // contains filtered or unexported fields }
Describes the result of the evaluation of a data quality rule.
type DataQualityRuleResultStatus ¶ added in v1.37.0
type DataQualityRuleResultStatus string
const ( DataQualityRuleResultStatusPass DataQualityRuleResultStatus = "PASS" DataQualityRuleResultStatusFail DataQualityRuleResultStatus = "FAIL" DataQualityRuleResultStatusError DataQualityRuleResultStatus = "ERROR" )
Enum values for DataQualityRuleResultStatus
func (DataQualityRuleResultStatus) Values ¶ added in v1.37.0
func (DataQualityRuleResultStatus) Values() []DataQualityRuleResultStatus
Values returns all known values for DataQualityRuleResultStatus. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type DataQualityRulesetEvaluationRunDescription ¶ added in v1.37.0
type DataQualityRulesetEvaluationRunDescription struct { // The data source (an Glue table) associated with the run. DataSource *DataSource // The unique run identifier associated with this run. RunId *string // The date and time when the run started. StartedOn *time.Time // The status for this run. Status TaskStatusType // contains filtered or unexported fields }
Describes the result of a data quality ruleset evaluation run.
type DataQualityRulesetEvaluationRunFilter ¶ added in v1.37.0
type DataQualityRulesetEvaluationRunFilter struct { // Filter based on a data source (an Glue table) associated with the run. // // This member is required. DataSource *DataSource // Filter results by runs that started after this time. StartedAfter *time.Time // Filter results by runs that started before this time. StartedBefore *time.Time // contains filtered or unexported fields }
The filter criteria.
type DataQualityRulesetFilterCriteria ¶ added in v1.37.0
type DataQualityRulesetFilterCriteria struct { // Filter on rulesets created after this date. CreatedAfter *time.Time // Filter on rulesets created before this date. CreatedBefore *time.Time // The description of the ruleset filter criteria. Description *string // Filter on rulesets last modified after this date. LastModifiedAfter *time.Time // Filter on rulesets last modified before this date. LastModifiedBefore *time.Time // The name of the ruleset filter criteria. Name *string // The name and database name of the target table. TargetTable *DataQualityTargetTable // contains filtered or unexported fields }
The criteria used to filter data quality rulesets.
type DataQualityRulesetListDetails ¶ added in v1.37.0
type DataQualityRulesetListDetails struct { // The date and time the data quality ruleset was created. CreatedOn *time.Time // A description of the data quality ruleset. Description *string // The date and time the data quality ruleset was last modified. LastModifiedOn *time.Time // The name of the data quality ruleset. Name *string // When a ruleset was created from a recommendation run, this run ID is generated // to link the two together. RecommendationRunId *string // The number of rules in the ruleset. RuleCount *int32 // An object representing an Glue table. TargetTable *DataQualityTargetTable // contains filtered or unexported fields }
Describes a data quality ruleset returned by GetDataQualityRuleset .
type DataQualityTargetTable ¶ added in v1.37.0
type DataQualityTargetTable struct { // The name of the database where the Glue table exists. // // This member is required. DatabaseName *string // The name of the Glue table. // // This member is required. TableName *string // The catalog id where the Glue table exists. CatalogId *string // contains filtered or unexported fields }
An object representing an Glue table.
type DataSource ¶ added in v1.37.0
type DataSource struct { // An Glue table. // // This member is required. GlueTable *GlueTable // contains filtered or unexported fields }
A data source (an Glue table) for which you want data quality results.
type Database ¶
type Database struct { // The name of the database. For Hive compatibility, this is folded to lowercase // when it is stored. // // This member is required. Name *string // The ID of the Data Catalog in which the database resides. CatalogId *string // Creates a set of default permissions on the table for principals. Used by Lake // Formation. Not used in the normal course of Glue operations. CreateTableDefaultPermissions []PrincipalPermissions // The time at which the metadata database was created in the catalog. CreateTime *time.Time // A description of the database. Description *string // A FederatedDatabase structure that references an entity outside the Glue Data // Catalog. FederatedDatabase *FederatedDatabase // The location of the database (for example, an HDFS path). LocationUri *string // These key-value pairs define parameters and properties of the database. Parameters map[string]string // A DatabaseIdentifier structure that describes a target database for resource // linking. TargetDatabase *DatabaseIdentifier // contains filtered or unexported fields }
The Database object represents a logical grouping of tables that might reside in a Hive metastore or an RDBMS.
type DatabaseAttributes ¶ added in v1.89.0
type DatabaseAttributes string
const (
DatabaseAttributesName DatabaseAttributes = "NAME"
)
Enum values for DatabaseAttributes
func (DatabaseAttributes) Values ¶ added in v1.89.0
func (DatabaseAttributes) Values() []DatabaseAttributes
Values returns all known values for DatabaseAttributes. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type DatabaseIdentifier ¶
type DatabaseIdentifier struct { // The ID of the Data Catalog in which the database resides. CatalogId *string // The name of the catalog database. DatabaseName *string // Region of the target database. Region *string // contains filtered or unexported fields }
A structure that describes a target database for resource linking.
type DatabaseInput ¶
type DatabaseInput struct { // The name of the database. For Hive compatibility, this is folded to lowercase // when it is stored. // // This member is required. Name *string // Creates a set of default permissions on the table for principals. Used by Lake // Formation. Not used in the normal course of Glue operations. CreateTableDefaultPermissions []PrincipalPermissions // A description of the database. Description *string // A FederatedDatabase structure that references an entity outside the Glue Data // Catalog. FederatedDatabase *FederatedDatabase // The location of the database (for example, an HDFS path). LocationUri *string // These key-value pairs define parameters and properties of the database. // // These key-value pairs define parameters and properties of the database. Parameters map[string]string // A DatabaseIdentifier structure that describes a target database for resource // linking. TargetDatabase *DatabaseIdentifier // contains filtered or unexported fields }
The structure used to create or update a database.
type DatapointInclusionAnnotation ¶ added in v1.92.0
type DatapointInclusionAnnotation struct { // The inclusion annotation value to apply to the statistic. InclusionAnnotation InclusionAnnotationValue // The ID of the data quality profile the statistic belongs to. ProfileId *string // The Statistic ID. StatisticId *string // contains filtered or unexported fields }
An Inclusion Annotation.
type Datatype ¶ added in v1.25.0
type Datatype struct { // The datatype of the value. // // This member is required. Id *string // A label assigned to the datatype. // // This member is required. Label *string // contains filtered or unexported fields }
A structure representing the datatype of the value.
type DateColumnStatisticsData ¶
type DateColumnStatisticsData struct { // The number of distinct values in a column. // // This member is required. NumberOfDistinctValues int64 // The number of null values in the column. // // This member is required. NumberOfNulls int64 // The highest value in the column. MaximumValue *time.Time // The lowest value in the column. MinimumValue *time.Time // contains filtered or unexported fields }
Defines column statistics supported for timestamp data columns.
type DecimalColumnStatisticsData ¶
type DecimalColumnStatisticsData struct { // The number of distinct values in a column. // // This member is required. NumberOfDistinctValues int64 // The number of null values in the column. // // This member is required. NumberOfNulls int64 // The highest value in the column. MaximumValue *DecimalNumber // The lowest value in the column. MinimumValue *DecimalNumber // contains filtered or unexported fields }
Defines column statistics supported for fixed-point number data columns.
type DecimalNumber ¶
type DecimalNumber struct { // The scale that determines where the decimal point falls in the unscaled value. // // This member is required. Scale int32 // The unscaled numeric value. // // This member is required. UnscaledValue []byte // contains filtered or unexported fields }
Contains a numeric value in decimal format.
type DeleteBehavior ¶
type DeleteBehavior string
const ( DeleteBehaviorLog DeleteBehavior = "LOG" DeleteBehaviorDeleteFromDatabase DeleteBehavior = "DELETE_FROM_DATABASE" DeleteBehaviorDeprecateInDatabase DeleteBehavior = "DEPRECATE_IN_DATABASE" )
Enum values for DeleteBehavior
func (DeleteBehavior) Values ¶ added in v0.29.0
func (DeleteBehavior) Values() []DeleteBehavior
Values returns all known values for DeleteBehavior. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type DeltaTarget ¶ added in v1.18.0
type DeltaTarget struct { // The name of the connection to use to connect to the Delta table target. ConnectionName *string // Specifies whether the crawler will create native tables, to allow integration // with query engines that support querying of the Delta transaction log directly. CreateNativeDeltaTable *bool // A list of the Amazon S3 paths to the Delta tables. DeltaTables []string // Specifies whether to write the manifest files to the Delta table path. WriteManifest *bool // contains filtered or unexported fields }
Specifies a Delta data store to crawl one or more Delta tables.
type DeltaTargetCompressionType ¶ added in v1.43.0
type DeltaTargetCompressionType string
const ( DeltaTargetCompressionTypeUncompressed DeltaTargetCompressionType = "uncompressed" DeltaTargetCompressionTypeSnappy DeltaTargetCompressionType = "snappy" )
Enum values for DeltaTargetCompressionType
func (DeltaTargetCompressionType) Values ¶ added in v1.43.0
func (DeltaTargetCompressionType) Values() []DeltaTargetCompressionType
Values returns all known values for DeltaTargetCompressionType. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type DevEndpoint ¶
type DevEndpoint struct { // A map of arguments used to configure the DevEndpoint . // // Valid arguments are: // // - "--enable-glue-datacatalog": "" // // You can specify a version of Python support for development endpoints by using // the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If // no arguments are provided, the version defaults to Python 2. Arguments map[string]string // The Amazon Web Services Availability Zone where this DevEndpoint is located. AvailabilityZone *string // The point in time at which this DevEndpoint was created. CreatedTimestamp *time.Time // The name of the DevEndpoint . EndpointName *string // The path to one or more Java .jar files in an S3 bucket that should be loaded // in your DevEndpoint . // // You can only use pure Java/Scala libraries with a DevEndpoint . ExtraJarsS3Path *string // The paths to one or more Python libraries in an Amazon S3 bucket that should be // loaded in your DevEndpoint . Multiple values must be complete paths separated by // a comma. // // You can only use pure Python libraries with a DevEndpoint . Libraries that rely // on C extensions, such as the [pandas]Python data analysis library, are not currently // supported. // // [pandas]: http://pandas.pydata.org/ ExtraPythonLibsS3Path *string // The reason for a current failure in this DevEndpoint . FailureReason *string // Glue version determines the versions of Apache Spark and Python that Glue // supports. The Python version indicates the version supported for running your // ETL scripts on development endpoints. // // For more information about the available Glue versions and corresponding Spark // and Python versions, see [Glue version]in the developer guide. // // Development endpoints that are created without specifying a Glue version // default to Glue 0.9. // // You can specify a version of Python support for development endpoints by using // the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If // no arguments are provided, the version defaults to Python 2. // // [Glue version]: https://docs.aws.amazon.com/glue/latest/dg/add-job.html GlueVersion *string // The point in time at which this DevEndpoint was last modified. LastModifiedTimestamp *time.Time // The status of the last update. LastUpdateStatus *string // The number of Glue Data Processing Units (DPUs) allocated to this DevEndpoint . NumberOfNodes int32 // The number of workers of a defined workerType that are allocated to the // development endpoint. // // The maximum number of workers you can define are 299 for G.1X , and 149 for G.2X // . NumberOfWorkers *int32 // A private IP address to access the DevEndpoint within a VPC if the DevEndpoint // is created within one. The PrivateAddress field is present only when you create // the DevEndpoint within your VPC. PrivateAddress *string // The public IP address used by this DevEndpoint . The PublicAddress field is // present only when you create a non-virtual private cloud (VPC) DevEndpoint . PublicAddress *string // The public key to be used by this DevEndpoint for authentication. This // attribute is provided for backward compatibility because the recommended // attribute to use is public keys. PublicKey *string // A list of public keys to be used by the DevEndpoints for authentication. Using // this attribute is preferred over a single public key because the public keys // allow you to have a different private key per client. // // If you previously created an endpoint with a public key, you must remove that // key to be able to set a list of public keys. Call the UpdateDevEndpoint API // operation with the public key content in the deletePublicKeys attribute, and // the list of new keys in the addPublicKeys attribute. PublicKeys []string // The Amazon Resource Name (ARN) of the IAM role used in this DevEndpoint . RoleArn *string // The name of the SecurityConfiguration structure to be used with this DevEndpoint // . SecurityConfiguration *string // A list of security group identifiers used in this DevEndpoint . SecurityGroupIds []string // The current status of this DevEndpoint . Status *string // The subnet ID for this DevEndpoint . SubnetId *string // The ID of the virtual private cloud (VPC) used by this DevEndpoint . VpcId *string // The type of predefined worker that is allocated to the development endpoint. // Accepts a value of Standard, G.1X, or G.2X. // // - For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory // and a 50GB disk, and 2 executors per worker. // // - For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of // memory, 64 GB disk), and provides 1 executor per worker. We recommend this // worker type for memory-intensive jobs. // // - For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of // memory, 128 GB disk), and provides 1 executor per worker. We recommend this // worker type for memory-intensive jobs. // // Known issue: when a development endpoint is created with the G.2X WorkerType // configuration, the Spark drivers for the development endpoint will run on 4 // vCPU, 16 GB of memory, and a 64 GB disk. WorkerType WorkerType // The YARN endpoint address used by this DevEndpoint . YarnEndpointAddress *string // The Apache Zeppelin port for the remote Apache Spark interpreter. ZeppelinRemoteSparkInterpreterPort int32 // contains filtered or unexported fields }
A development endpoint where a developer can remotely debug extract, transform, and load (ETL) scripts.
type DevEndpointCustomLibraries ¶
type DevEndpointCustomLibraries struct { // The path to one or more Java .jar files in an S3 bucket that should be loaded // in your DevEndpoint . // // You can only use pure Java/Scala libraries with a DevEndpoint . ExtraJarsS3Path *string // The paths to one or more Python libraries in an Amazon Simple Storage Service // (Amazon S3) bucket that should be loaded in your DevEndpoint . Multiple values // must be complete paths separated by a comma. // // You can only use pure Python libraries with a DevEndpoint . Libraries that rely // on C extensions, such as the [pandas]Python data analysis library, are not currently // supported. // // [pandas]: http://pandas.pydata.org/ ExtraPythonLibsS3Path *string // contains filtered or unexported fields }
Custom libraries to be loaded into a development endpoint.
type DirectJDBCSource ¶ added in v1.42.0
type DirectJDBCSource struct { // The connection name of the JDBC source. // // This member is required. ConnectionName *string // The connection type of the JDBC source. // // This member is required. ConnectionType JDBCConnectionType // The database of the JDBC source connection. // // This member is required. Database *string // The name of the JDBC source connection. // // This member is required. Name *string // The table of the JDBC source connection. // // This member is required. Table *string // The temp directory of the JDBC Redshift source. RedshiftTmpDir *string // contains filtered or unexported fields }
Specifies the direct JDBC source connection.
type DirectKafkaSource ¶ added in v1.25.0
type DirectKafkaSource struct { // The name of the data store. // // This member is required. Name *string // Specifies options related to data preview for viewing a sample of your data. DataPreviewOptions *StreamingDataPreviewOptions // Whether to automatically determine the schema from the incoming data. DetectSchema *bool // Specifies the streaming options. StreamingOptions *KafkaStreamingSourceOptions // The amount of time to spend processing each micro batch. WindowSize *int32 // contains filtered or unexported fields }
Specifies an Apache Kafka data store.
type DirectKinesisSource ¶ added in v1.25.0
type DirectKinesisSource struct { // The name of the data source. // // This member is required. Name *string // Additional options for data preview. DataPreviewOptions *StreamingDataPreviewOptions // Whether to automatically determine the schema from the incoming data. DetectSchema *bool // Additional options for the Kinesis streaming data source. StreamingOptions *KinesisStreamingSourceOptions // The amount of time to spend processing each micro batch. WindowSize *int32 // contains filtered or unexported fields }
Specifies a direct Amazon Kinesis data source.
type DirectSchemaChangePolicy ¶ added in v1.25.0
type DirectSchemaChangePolicy struct { // Specifies the database that the schema change policy applies to. Database *string // Whether to use the specified update behavior when the crawler finds a changed // schema. EnableUpdateCatalog *bool // Specifies the table in the database that the schema change policy applies to. Table *string // The update behavior when the crawler finds a changed schema. UpdateBehavior UpdateCatalogBehavior // contains filtered or unexported fields }
A policy that specifies update behavior for the crawler.
type DoubleColumnStatisticsData ¶
type DoubleColumnStatisticsData struct { // The number of distinct values in a column. // // This member is required. NumberOfDistinctValues int64 // The number of null values in the column. // // This member is required. NumberOfNulls int64 // The highest value in the column. MaximumValue float64 // The lowest value in the column. MinimumValue float64 // contains filtered or unexported fields }
Defines column statistics supported for floating-point number data columns.
type DropDuplicates ¶ added in v1.25.0
type DropDuplicates struct { // The data inputs identified by their node names. // // This member is required. Inputs []string // The name of the transform node. // // This member is required. Name *string // The name of the columns to be merged or removed if repeating. Columns [][]string // contains filtered or unexported fields }
Specifies a transform that removes rows of repeating data from a data set.
type DropFields ¶ added in v1.25.0
type DropFields struct { // The data inputs identified by their node names. // // This member is required. Inputs []string // The name of the transform node. // // This member is required. Name *string // A JSON path to a variable in the data structure. // // This member is required. Paths [][]string // contains filtered or unexported fields }
Specifies a transform that chooses the data property keys that you want to drop.
type DropNullFields ¶ added in v1.25.0
type DropNullFields struct { // The data inputs identified by their node names. // // This member is required. Inputs []string // The name of the transform node. // // This member is required. Name *string // A structure that represents whether certain values are recognized as null // values for removal. NullCheckBoxList *NullCheckBoxList // A structure that specifies a list of NullValueField structures that represent a // custom null value such as zero or other value being used as a null placeholder // unique to the dataset. // // The DropNullFields transform removes custom null values only if both the value // of the null placeholder and the datatype match the data. NullTextList []NullValueField // contains filtered or unexported fields }
Specifies a transform that removes columns from the dataset if all values in the column are 'null'. By default, Glue Studio will recognize null objects, but some values such as empty strings, strings that are "null", -1 integers or other placeholders such as zeros, are not automatically recognized as nulls.
type DynamicTransform ¶ added in v1.36.0
type DynamicTransform struct { // Specifies the name of the function of the dynamic transform. // // This member is required. FunctionName *string // Specifies the inputs for the dynamic transform that are required. // // This member is required. Inputs []string // Specifies the name of the dynamic transform. // // This member is required. Name *string // Specifies the path of the dynamic transform source and config files. // // This member is required. Path *string // Specifies the name of the dynamic transform as it appears in the Glue Studio // visual editor. // // This member is required. TransformName *string // Specifies the data schema for the dynamic transform. OutputSchemas []GlueSchema // Specifies the parameters of the dynamic transform. Parameters []TransformConfigParameter // This field is not used and will be deprecated in future release. Version *string // contains filtered or unexported fields }
Specifies the set of parameters needed to perform the dynamic transform.
type DynamoDBCatalogSource ¶ added in v1.25.0
type DynamoDBCatalogSource struct { // The name of the database to read from. // // This member is required. Database *string // The name of the data source. // // This member is required. Name *string // The name of the table in the database to read from. // // This member is required. Table *string // contains filtered or unexported fields }
Specifies a DynamoDB data source in the Glue Data Catalog.
type DynamoDBTarget ¶
type DynamoDBTarget struct { // The name of the DynamoDB table to crawl. Path *string // Indicates whether to scan all the records, or to sample rows from the table. // Scanning all the records can take a long time when the table is not a high // throughput table. // // A value of true means to scan all records, while a value of false means to // sample the records. If no value is specified, the value defaults to true . ScanAll *bool // The percentage of the configured read capacity units to use by the Glue // crawler. Read capacity units is a term defined by DynamoDB, and is a numeric // value that acts as rate limiter for the number of reads that can be performed on // that table per second. // // The valid values are null or a value between 0.1 to 1.5. A null value is used // when user does not provide a value, and defaults to 0.5 of the configured Read // Capacity Unit (for provisioned tables), or 0.25 of the max configured Read // Capacity Unit (for tables using on-demand mode). ScanRate *float64 // contains filtered or unexported fields }
Specifies an Amazon DynamoDB table to crawl.
type Edge ¶
type Edge struct { // The unique of the node within the workflow where the edge ends. DestinationId *string // The unique of the node within the workflow where the edge starts. SourceId *string // contains filtered or unexported fields }
An edge represents a directed connection between two Glue components that are part of the workflow the edge belongs to.
type EnableHybridValues ¶
type EnableHybridValues string
const ( EnableHybridValuesTrue EnableHybridValues = "TRUE" EnableHybridValuesFalse EnableHybridValues = "FALSE" )
Enum values for EnableHybridValues
func (EnableHybridValues) Values ¶ added in v0.29.0
func (EnableHybridValues) Values() []EnableHybridValues
Values returns all known values for EnableHybridValues. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type EncryptionAtRest ¶
type EncryptionAtRest struct { // The encryption-at-rest mode for encrypting Data Catalog data. // // This member is required. CatalogEncryptionMode CatalogEncryptionMode // The role that Glue assumes to encrypt and decrypt the Data Catalog objects on // the caller's behalf. CatalogEncryptionServiceRole *string // The ID of the KMS key to use for encryption at rest. SseAwsKmsKeyId *string // contains filtered or unexported fields }
Specifies the encryption-at-rest configuration for the Data Catalog.
type EncryptionConfiguration ¶
type EncryptionConfiguration struct { // The encryption configuration for Amazon CloudWatch. CloudWatchEncryption *CloudWatchEncryption // The encryption configuration for Glue Data Quality assets. DataQualityEncryption *DataQualityEncryption // The encryption configuration for job bookmarks. JobBookmarksEncryption *JobBookmarksEncryption // The encryption configuration for Amazon Simple Storage Service (Amazon S3) data. S3Encryption []S3Encryption // contains filtered or unexported fields }
Specifies an encryption configuration.
type Entity ¶ added in v1.103.0
type Entity struct { // The type of entities that are present in the response. This value depends on // the source connection. For example this is SObjects for Salesforce and databases // or schemas or tables for sources like Amazon Redshift. Category *string // An optional map of keys which may be returned for an entity by a connector. CustomProperties map[string]string // A description of the entity. Description *string // The name of the entity. EntityName *string // A Boolean value which helps to determine whether there are sub objects that can // be listed. IsParentEntity *bool // Label used for the entity. Label *string // contains filtered or unexported fields }
An entity supported by a given ConnectionType .
type EntityNotFoundException ¶
type EntityNotFoundException struct { Message *string ErrorCodeOverride *string FromFederationSource *bool // contains filtered or unexported fields }
A specified entity does not exist
func (*EntityNotFoundException) Error ¶
func (e *EntityNotFoundException) Error() string
func (*EntityNotFoundException) ErrorCode ¶
func (e *EntityNotFoundException) ErrorCode() string
func (*EntityNotFoundException) ErrorFault ¶
func (e *EntityNotFoundException) ErrorFault() smithy.ErrorFault
func (*EntityNotFoundException) ErrorMessage ¶
func (e *EntityNotFoundException) ErrorMessage() string
type ErrorDetail ¶
type ErrorDetail struct { // The code associated with this error. ErrorCode *string // A message describing the error. ErrorMessage *string // contains filtered or unexported fields }
Contains details about an error.
type ErrorDetails ¶ added in v0.30.0
type ErrorDetails struct { // The error code for an error. ErrorCode *string // The error message for an error. ErrorMessage *string // contains filtered or unexported fields }
An object containing error details.
type EvaluateDataQuality ¶ added in v1.37.0
type EvaluateDataQuality struct { // The inputs of your data quality evaluation. // // This member is required. Inputs []string // The name of the data quality evaluation. // // This member is required. Name *string // The ruleset for your data quality evaluation. // // This member is required. Ruleset *string // The output of your data quality evaluation. Output DQTransformOutput // Options to configure how your results are published. PublishingOptions *DQResultsPublishingOptions // Options to configure how your job will stop if your data quality evaluation // fails. StopJobOnFailureOptions *DQStopJobOnFailureOptions // contains filtered or unexported fields }
Specifies your data quality evaluation criteria.
type EvaluateDataQualityMultiFrame ¶ added in v1.49.0
type EvaluateDataQualityMultiFrame struct { // The inputs of your data quality evaluation. The first input in this list is the // primary data source. // // This member is required. Inputs []string // The name of the data quality evaluation. // // This member is required. Name *string // The ruleset for your data quality evaluation. // // This member is required. Ruleset *string // The aliases of all data sources except primary. AdditionalDataSources map[string]string // Options to configure runtime behavior of the transform. AdditionalOptions map[string]string // Options to configure how your results are published. PublishingOptions *DQResultsPublishingOptions // Options to configure how your job will stop if your data quality evaluation // fails. StopJobOnFailureOptions *DQStopJobOnFailureOptions // contains filtered or unexported fields }
Specifies your data quality evaluation criteria.
type EvaluationMetrics ¶
type EvaluationMetrics struct { // The type of machine learning transform. // // This member is required. TransformType TransformType // The evaluation metrics for the find matches algorithm. FindMatchesMetrics *FindMatchesMetrics // contains filtered or unexported fields }
Evaluation metrics provide an estimate of the quality of your machine learning transform.
type EventBatchingCondition ¶ added in v1.9.0
type EventBatchingCondition struct { // Number of events that must be received from Amazon EventBridge before // EventBridge event trigger fires. // // This member is required. BatchSize *int32 // Window of time in seconds after which EventBridge event trigger fires. Window // starts when first event is received. BatchWindow *int32 // contains filtered or unexported fields }
Batch condition that must be met (specified number of events received or batch time window expired) before EventBridge event trigger fires.
type ExecutionAttempt ¶ added in v1.103.0
type ExecutionAttempt struct { // A task run ID for the last column statistics task run. ColumnStatisticsTaskRunId *string // An error message associated with the last column statistics task run. ErrorMessage *string // A timestamp when the last column statistics task run occurred. ExecutionTimestamp *time.Time // The status of the last column statistics task run. Status ExecutionStatus // contains filtered or unexported fields }
A run attempt for a column statistics task run.
type ExecutionClass ¶ added in v1.29.0
type ExecutionClass string
const ( ExecutionClassFlex ExecutionClass = "FLEX" ExecutionClassStandard ExecutionClass = "STANDARD" )
Enum values for ExecutionClass
func (ExecutionClass) Values ¶ added in v1.29.0
func (ExecutionClass) Values() []ExecutionClass
Values returns all known values for ExecutionClass. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type ExecutionProperty ¶
type ExecutionProperty struct { // The maximum number of concurrent runs allowed for the job. The default is 1. An // error is returned when this threshold is reached. The maximum value you can // specify is controlled by a service limit. MaxConcurrentRuns int32 // contains filtered or unexported fields }
An execution property of a job.
type ExecutionStatus ¶ added in v1.103.0
type ExecutionStatus string
const ( ExecutionStatusFailed ExecutionStatus = "FAILED" ExecutionStatusStarted ExecutionStatus = "STARTED" )
Enum values for ExecutionStatus
func (ExecutionStatus) Values ¶ added in v1.103.0
func (ExecutionStatus) Values() []ExecutionStatus
Values returns all known values for ExecutionStatus. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type ExistCondition ¶
type ExistCondition string
const ( ExistConditionMustExist ExistCondition = "MUST_EXIST" ExistConditionNotExist ExistCondition = "NOT_EXIST" ExistConditionNone ExistCondition = "NONE" )
Enum values for ExistCondition
func (ExistCondition) Values ¶ added in v0.29.0
func (ExistCondition) Values() []ExistCondition
Values returns all known values for ExistCondition. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type ExportLabelsTaskRunProperties ¶
type ExportLabelsTaskRunProperties struct { // The Amazon Simple Storage Service (Amazon S3) path where you will export the // labels. OutputS3Path *string // contains filtered or unexported fields }
Specifies configuration properties for an exporting labels task run.
type FederatedCatalog ¶ added in v1.103.0
type FederatedCatalog struct { // The name of the connection to an external data source, for example a // Redshift-federated catalog. ConnectionName *string // A unique identifier for the federated catalog. Identifier *string // contains filtered or unexported fields }
A catalog that points to an entity outside the Glue Data Catalog.
type FederatedDatabase ¶ added in v1.45.0
type FederatedDatabase struct { // The name of the connection to the external metastore. ConnectionName *string // A unique identifier for the federated database. Identifier *string // contains filtered or unexported fields }
A database that points to an entity outside the Glue Data Catalog.
type FederatedResourceAlreadyExistsException ¶ added in v1.45.0
type FederatedResourceAlreadyExistsException struct { Message *string ErrorCodeOverride *string AssociatedGlueResource *string // contains filtered or unexported fields }
A federated resource already exists.
func (*FederatedResourceAlreadyExistsException) Error ¶ added in v1.45.0
func (e *FederatedResourceAlreadyExistsException) Error() string
func (*FederatedResourceAlreadyExistsException) ErrorCode ¶ added in v1.45.0
func (e *FederatedResourceAlreadyExistsException) ErrorCode() string
func (*FederatedResourceAlreadyExistsException) ErrorFault ¶ added in v1.45.0
func (e *FederatedResourceAlreadyExistsException) ErrorFault() smithy.ErrorFault
func (*FederatedResourceAlreadyExistsException) ErrorMessage ¶ added in v1.45.0
func (e *FederatedResourceAlreadyExistsException) ErrorMessage() string
type FederatedTable ¶ added in v1.45.0
type FederatedTable struct { // The name of the connection to the external metastore. ConnectionName *string // A unique identifier for the federated database. DatabaseIdentifier *string // A unique identifier for the federated table. Identifier *string // contains filtered or unexported fields }
A table that points to an entity outside the Glue Data Catalog.
type FederationSourceErrorCode ¶ added in v1.45.0
type FederationSourceErrorCode string
const ( FederationSourceErrorCodeAccessDeniedException FederationSourceErrorCode = "AccessDeniedException" FederationSourceErrorCodeEntityNotFoundException FederationSourceErrorCode = "EntityNotFoundException" FederationSourceErrorCodeInvalidCredentialsException FederationSourceErrorCode = "InvalidCredentialsException" FederationSourceErrorCodeInvalidInputException FederationSourceErrorCode = "InvalidInputException" FederationSourceErrorCodeInvalidResponseException FederationSourceErrorCode = "InvalidResponseException" FederationSourceErrorCodeOperationTimeoutException FederationSourceErrorCode = "OperationTimeoutException" FederationSourceErrorCodeOperationNotSupportedException FederationSourceErrorCode = "OperationNotSupportedException" FederationSourceErrorCodeInternalServiceException FederationSourceErrorCode = "InternalServiceException" FederationSourceErrorCodePartialFailureException FederationSourceErrorCode = "PartialFailureException" FederationSourceErrorCodeThrottlingException FederationSourceErrorCode = "ThrottlingException" )
Enum values for FederationSourceErrorCode
func (FederationSourceErrorCode) Values ¶ added in v1.45.0
func (FederationSourceErrorCode) Values() []FederationSourceErrorCode
Values returns all known values for FederationSourceErrorCode. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type FederationSourceException ¶ added in v1.45.0
type FederationSourceException struct { Message *string ErrorCodeOverride *string FederationSourceErrorCode FederationSourceErrorCode // contains filtered or unexported fields }
A federation source failed.
func (*FederationSourceException) Error ¶ added in v1.45.0
func (e *FederationSourceException) Error() string
func (*FederationSourceException) ErrorCode ¶ added in v1.45.0
func (e *FederationSourceException) ErrorCode() string
func (*FederationSourceException) ErrorFault ¶ added in v1.45.0
func (e *FederationSourceException) ErrorFault() smithy.ErrorFault
func (*FederationSourceException) ErrorMessage ¶ added in v1.45.0
func (e *FederationSourceException) ErrorMessage() string
type FederationSourceRetryableException ¶ added in v1.45.0
type FederationSourceRetryableException struct { Message *string ErrorCodeOverride *string // contains filtered or unexported fields }
A federation source failed, but the operation may be retried.
func (*FederationSourceRetryableException) Error ¶ added in v1.45.0
func (e *FederationSourceRetryableException) Error() string
func (*FederationSourceRetryableException) ErrorCode ¶ added in v1.45.0
func (e *FederationSourceRetryableException) ErrorCode() string
func (*FederationSourceRetryableException) ErrorFault ¶ added in v1.45.0
func (e *FederationSourceRetryableException) ErrorFault() smithy.ErrorFault
func (*FederationSourceRetryableException) ErrorMessage ¶ added in v1.45.0
func (e *FederationSourceRetryableException) ErrorMessage() string
type Field ¶ added in v1.103.0
type Field struct { // Optional map of keys which may be returned. CustomProperties map[string]string // A description of the field. Description *string // A unique identifier for the field. FieldName *string // The type of data in the field. FieldType FieldDataType // Indicates whether this field can be created as part of a destination write. IsCreateable *bool // Indicates whether this field is populated automatically when the object is // created, such as a created at timestamp. IsDefaultOnCreate *bool // Indicates whether this field can used in a filter clause ( WHERE clause) of a // SQL statement when querying data. IsFilterable *bool // Indicates whether this field can be nullable or not. IsNullable *bool // Indicates whether a given field can be used in partitioning the query made to // SaaS. IsPartitionable *bool // Indicates whether this field can used as a primary key for the given entity. IsPrimaryKey *bool // Indicates whether this field can be added in Select clause of SQL query or // whether it is retrievable or not. IsRetrievable *bool // Indicates whether this field can be updated as part of a destination write. IsUpdateable *bool // Indicates whether this field can be upserted as part of a destination write. IsUpsertable *bool // A readable label used for the field. Label *string // The data type returned by the SaaS API, such as “picklist” or “textarea” from // Salesforce. NativeDataType *string // A parent field name for a nested field. ParentField *string // Indicates the support filter operators for this field. SupportedFilterOperators []FieldFilterOperator // A list of supported values for the field. SupportedValues []string // contains filtered or unexported fields }
The Field object has information about the different properties associated with a field in the connector.
type FieldDataType ¶ added in v1.103.0
type FieldDataType string
const ( FieldDataTypeInt FieldDataType = "INT" FieldDataTypeSmallint FieldDataType = "SMALLINT" FieldDataTypeBigint FieldDataType = "BIGINT" FieldDataTypeFloat FieldDataType = "FLOAT" FieldDataTypeLong FieldDataType = "LONG" FieldDataTypeDate FieldDataType = "DATE" FieldDataTypeBoolean FieldDataType = "BOOLEAN" FieldDataTypeMap FieldDataType = "MAP" FieldDataTypeArray FieldDataType = "ARRAY" FieldDataTypeString FieldDataType = "STRING" FieldDataTypeTimestamp FieldDataType = "TIMESTAMP" FieldDataTypeDecimal FieldDataType = "DECIMAL" FieldDataTypeByte FieldDataType = "BYTE" FieldDataTypeShort FieldDataType = "SHORT" FieldDataTypeDouble FieldDataType = "DOUBLE" FieldDataTypeStruct FieldDataType = "STRUCT" )
Enum values for FieldDataType
func (FieldDataType) Values ¶ added in v1.103.0
func (FieldDataType) Values() []FieldDataType
Values returns all known values for FieldDataType. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type FieldFilterOperator ¶ added in v1.103.0
type FieldFilterOperator string
const ( FieldFilterOperatorLessThan FieldFilterOperator = "LESS_THAN" FieldFilterOperatorGreaterThan FieldFilterOperator = "GREATER_THAN" FieldFilterOperatorBetween FieldFilterOperator = "BETWEEN" FieldFilterOperatorEqualTo FieldFilterOperator = "EQUAL_TO" FieldFilterOperatorNotEqualTo FieldFilterOperator = "NOT_EQUAL_TO" FieldFilterOperatorGreaterThanOrEqualTo FieldFilterOperator = "GREATER_THAN_OR_EQUAL_TO" FieldFilterOperatorLessThanOrEqualTo FieldFilterOperator = "LESS_THAN_OR_EQUAL_TO" FieldFilterOperatorContains FieldFilterOperator = "CONTAINS" FieldFilterOperatorOrderBy FieldFilterOperator = "ORDER_BY" )
Enum values for FieldFilterOperator
func (FieldFilterOperator) Values ¶ added in v1.103.0
func (FieldFilterOperator) Values() []FieldFilterOperator
Values returns all known values for FieldFilterOperator. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type FieldName ¶ added in v1.26.0
type FieldName string
type FillMissingValues ¶ added in v1.25.0
type FillMissingValues struct { // A JSON path to a variable in the data structure for the dataset that is imputed. // // This member is required. ImputedPath *string // The data inputs identified by their node names. // // This member is required. Inputs []string // The name of the transform node. // // This member is required. Name *string // A JSON path to a variable in the data structure for the dataset that is filled. FilledPath *string // contains filtered or unexported fields }
Specifies a transform that locates records in the dataset that have missing values and adds a new field with a value determined by imputation. The input data set is used to train the machine learning model that determines what the missing value should be.
type Filter ¶ added in v1.25.0
type Filter struct { // Specifies a filter expression. // // This member is required. Filters []FilterExpression // The data inputs identified by their node names. // // This member is required. Inputs []string // The operator used to filter rows by comparing the key value to a specified // value. // // This member is required. LogicalOperator FilterLogicalOperator // The name of the transform node. // // This member is required. Name *string // contains filtered or unexported fields }
Specifies a transform that splits a dataset into two, based on a filter condition.
type FilterExpression ¶ added in v1.25.0
type FilterExpression struct { // The type of operation to perform in the expression. // // This member is required. Operation FilterOperation // A list of filter values. // // This member is required. Values []FilterValue // Whether the expression is to be negated. Negated *bool // contains filtered or unexported fields }
Specifies a filter expression.
type FilterLogicalOperator ¶ added in v1.25.0
type FilterLogicalOperator string
const ( FilterLogicalOperatorAnd FilterLogicalOperator = "AND" FilterLogicalOperatorOr FilterLogicalOperator = "OR" )
Enum values for FilterLogicalOperator
func (FilterLogicalOperator) Values ¶ added in v1.25.0
func (FilterLogicalOperator) Values() []FilterLogicalOperator
Values returns all known values for FilterLogicalOperator. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type FilterOperation ¶ added in v1.25.0
type FilterOperation string
const ( FilterOperationEq FilterOperation = "EQ" FilterOperationLt FilterOperation = "LT" FilterOperationGt FilterOperation = "GT" FilterOperationLte FilterOperation = "LTE" FilterOperationGte FilterOperation = "GTE" FilterOperationRegex FilterOperation = "REGEX" FilterOperationIsnull FilterOperation = "ISNULL" )
Enum values for FilterOperation
func (FilterOperation) Values ¶ added in v1.25.0
func (FilterOperation) Values() []FilterOperation
Values returns all known values for FilterOperation. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type FilterOperator ¶ added in v1.26.0
type FilterOperator string
const ( FilterOperatorGt FilterOperator = "GT" FilterOperatorGe FilterOperator = "GE" FilterOperatorLt FilterOperator = "LT" FilterOperatorLe FilterOperator = "LE" FilterOperatorEq FilterOperator = "EQ" FilterOperatorNe FilterOperator = "NE" )
Enum values for FilterOperator
func (FilterOperator) Values ¶ added in v1.26.0
func (FilterOperator) Values() []FilterOperator
Values returns all known values for FilterOperator. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type FilterValue ¶ added in v1.25.0
type FilterValue struct { // The type of filter value. // // This member is required. Type FilterValueType // The value to be associated. // // This member is required. Value []string // contains filtered or unexported fields }
Represents a single entry in the list of values for a FilterExpression .
type FilterValueType ¶ added in v1.25.0
type FilterValueType string
const ( FilterValueTypeColumnextracted FilterValueType = "COLUMNEXTRACTED" FilterValueTypeConstant FilterValueType = "CONSTANT" )
Enum values for FilterValueType
func (FilterValueType) Values ¶ added in v1.25.0
func (FilterValueType) Values() []FilterValueType
Values returns all known values for FilterValueType. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type FindMatchesMetrics ¶
type FindMatchesMetrics struct { // The area under the precision/recall curve (AUPRC) is a single number measuring // the overall quality of the transform, that is independent of the choice made for // precision vs. recall. Higher values indicate that you have a more attractive // precision vs. recall tradeoff. // // For more information, see [Precision and recall] in Wikipedia. // // [Precision and recall]: https://en.wikipedia.org/wiki/Precision_and_recall AreaUnderPRCurve *float64 // A list of ColumnImportance structures containing column importance metrics, // sorted in order of descending importance. ColumnImportances []ColumnImportance // The confusion matrix shows you what your transform is predicting accurately and // what types of errors it is making. // // For more information, see [Confusion matrix] in Wikipedia. // // [Confusion matrix]: https://en.wikipedia.org/wiki/Confusion_matrix ConfusionMatrix *ConfusionMatrix // The maximum F1 metric indicates the transform's accuracy between 0 and 1, where // 1 is the best accuracy. // // For more information, see [F1 score] in Wikipedia. // // [F1 score]: https://en.wikipedia.org/wiki/F1_score F1 *float64 // The precision metric indicates when often your transform is correct when it // predicts a match. Specifically, it measures how well the transform finds true // positives from the total true positives possible. // // For more information, see [Precision and recall] in Wikipedia. // // [Precision and recall]: https://en.wikipedia.org/wiki/Precision_and_recall Precision *float64 // The recall metric indicates that for an actual match, how often your transform // predicts the match. Specifically, it measures how well the transform finds true // positives from the total records in the source data. // // For more information, see [Precision and recall] in Wikipedia. // // [Precision and recall]: https://en.wikipedia.org/wiki/Precision_and_recall Recall *float64 // contains filtered or unexported fields }
The evaluation metrics for the find matches algorithm. The quality of your machine learning transform is measured by getting your transform to predict some matches and comparing the results to known matches from the same dataset. The quality metrics are based on a subset of your data, so they are not precise.
type FindMatchesParameters ¶
type FindMatchesParameters struct { // The value that is selected when tuning your transform for a balance between // accuracy and cost. A value of 0.5 means that the system balances accuracy and // cost concerns. A value of 1.0 means a bias purely for accuracy, which typically // results in a higher cost, sometimes substantially higher. A value of 0.0 means a // bias purely for cost, which results in a less accurate FindMatches transform, // sometimes with unacceptable accuracy. // // Accuracy measures how well the transform finds true positives and true // negatives. Increasing accuracy requires more machine resources and cost. But it // also results in increased recall. // // Cost measures how many compute resources, and thus money, are consumed to run // the transform. AccuracyCostTradeoff *float64 // The value to switch on or off to force the output to match the provided labels // from users. If the value is True , the find matches transform forces the output // to match the provided labels. The results override the normal conflation // results. If the value is False , the find matches transform does not ensure all // the labels provided are respected, and the results rely on the trained model. // // Note that setting this value to true may increase the conflation execution time. EnforceProvidedLabels *bool // The value selected when tuning your transform for a balance between precision // and recall. A value of 0.5 means no preference; a value of 1.0 means a bias // purely for precision, and a value of 0.0 means a bias for recall. Because this // is a tradeoff, choosing values close to 1.0 means very low recall, and choosing // values close to 0.0 results in very low precision. // // The precision metric indicates how often your model is correct when it predicts // a match. // // The recall metric indicates that for an actual match, how often your model // predicts the match. PrecisionRecallTradeoff *float64 // The name of a column that uniquely identifies rows in the source table. Used to // help identify matching records. PrimaryKeyColumnName *string // contains filtered or unexported fields }
The parameters to configure the find matches transform.
type FindMatchesTaskRunProperties ¶
type FindMatchesTaskRunProperties struct { // The job ID for the Find Matches task run. JobId *string // The name assigned to the job for the Find Matches task run. JobName *string // The job run ID for the Find Matches task run. JobRunId *string // contains filtered or unexported fields }
Specifies configuration properties for a Find Matches task run.
type GetConnectionsFilter ¶
type GetConnectionsFilter struct { // Denotes if the connection was created with schema version 1 or 2. ConnectionSchemaVersion *int32 // The type of connections to return. Currently, SFTP is not supported. ConnectionType ConnectionType // A criteria string that must match the criteria recorded in the connection // definition for that connection definition to be returned. MatchCriteria []string // contains filtered or unexported fields }
Filters the connection definitions that are returned by the GetConnections API operation.
type GlueEncryptionException ¶
type GlueEncryptionException struct { Message *string ErrorCodeOverride *string // contains filtered or unexported fields }
An encryption operation failed.
func (*GlueEncryptionException) Error ¶
func (e *GlueEncryptionException) Error() string
func (*GlueEncryptionException) ErrorCode ¶
func (e *GlueEncryptionException) ErrorCode() string
func (*GlueEncryptionException) ErrorFault ¶
func (e *GlueEncryptionException) ErrorFault() smithy.ErrorFault
func (*GlueEncryptionException) ErrorMessage ¶
func (e *GlueEncryptionException) ErrorMessage() string
type GluePolicy ¶
type GluePolicy struct { // The date and time at which the policy was created. CreateTime *time.Time // Contains the hash value associated with this policy. PolicyHash *string // Contains the requested policy document, in JSON format. PolicyInJson *string // The date and time at which the policy was last updated. UpdateTime *time.Time // contains filtered or unexported fields }
A structure for returning a resource policy.
type GlueRecordType ¶ added in v1.25.0
type GlueRecordType string
const ( GlueRecordTypeDate GlueRecordType = "DATE" GlueRecordTypeString GlueRecordType = "STRING" GlueRecordTypeTimestamp GlueRecordType = "TIMESTAMP" GlueRecordTypeInt GlueRecordType = "INT" GlueRecordTypeFloat GlueRecordType = "FLOAT" GlueRecordTypeLong GlueRecordType = "LONG" GlueRecordTypeBigdecimal GlueRecordType = "BIGDECIMAL" GlueRecordTypeByte GlueRecordType = "BYTE" GlueRecordTypeShort GlueRecordType = "SHORT" GlueRecordTypeDouble GlueRecordType = "DOUBLE" )
Enum values for GlueRecordType
func (GlueRecordType) Values ¶ added in v1.25.0
func (GlueRecordType) Values() []GlueRecordType
Values returns all known values for GlueRecordType. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type GlueSchema ¶ added in v1.25.0
type GlueSchema struct { // Specifies the column definitions that make up a Glue schema. Columns []GlueStudioSchemaColumn // contains filtered or unexported fields }
Specifies a user-defined schema when a schema cannot be determined by Glue.
type GlueStudioSchemaColumn ¶ added in v1.25.0
type GlueStudioSchemaColumn struct { // The name of the column in the Glue Studio schema. // // This member is required. Name *string // The hive type for this column in the Glue Studio schema. Type *string // contains filtered or unexported fields }
Specifies a single column in a Glue schema definition.
type GlueTable ¶
type GlueTable struct { // A database name in the Glue Data Catalog. // // This member is required. DatabaseName *string // A table name in the Glue Data Catalog. // // This member is required. TableName *string // Additional options for the table. Currently there are two keys supported: // // - pushDownPredicate : to filter on partitions without having to list and read // all the files in your dataset. // // - catalogPartitionPredicate : to use server-side partition pruning using // partition indexes in the Glue Data Catalog. AdditionalOptions map[string]string // A unique identifier for the Glue Data Catalog. CatalogId *string // The name of the connection to the Glue Data Catalog. ConnectionName *string // contains filtered or unexported fields }
The database and table in the Glue Data Catalog that is used for input or output data.
type GovernedCatalogSource ¶ added in v1.25.0
type GovernedCatalogSource struct { // The database to read from. // // This member is required. Database *string // The name of the data store. // // This member is required. Name *string // The database table to read from. // // This member is required. Table *string // Specifies additional connection options. AdditionalOptions *S3SourceAdditionalOptions // Partitions satisfying this predicate are deleted. Files within the retention // period in these partitions are not deleted. Set to "" – empty by default. PartitionPredicate *string // contains filtered or unexported fields }
Specifies the data store in the governed Glue Data Catalog.
type GovernedCatalogTarget ¶ added in v1.25.0
type GovernedCatalogTarget struct { // The name of the database to write to. // // This member is required. Database *string // The nodes that are inputs to the data target. // // This member is required. Inputs []string // The name of the data target. // // This member is required. Name *string // The name of the table in the database to write to. // // This member is required. Table *string // Specifies native partitioning using a sequence of keys. PartitionKeys [][]string // A policy that specifies update behavior for the governed catalog. SchemaChangePolicy *CatalogSchemaChangePolicy // contains filtered or unexported fields }
Specifies a data target that writes to Amazon S3 using the Glue Data Catalog.
type GrokClassifier ¶
type GrokClassifier struct { // An identifier of the data format that the classifier matches, such as Twitter, // JSON, Omniture logs, and so on. // // This member is required. Classification *string // The grok pattern applied to a data store by this classifier. For more // information, see built-in patterns in [Writing Custom Classifiers]. // // [Writing Custom Classifiers]: https://docs.aws.amazon.com/glue/latest/dg/custom-classifier.html // // This member is required. GrokPattern *string // The name of the classifier. // // This member is required. Name *string // The time that this classifier was registered. CreationTime *time.Time // Optional custom grok patterns defined by this classifier. For more information, // see custom patterns in [Writing Custom Classifiers]. // // [Writing Custom Classifiers]: https://docs.aws.amazon.com/glue/latest/dg/custom-classifier.html CustomPatterns *string // The time that this classifier was last updated. LastUpdated *time.Time // The version of this classifier. Version int64 // contains filtered or unexported fields }
A classifier that uses grok patterns.
type HudiTarget ¶ added in v1.56.0
type HudiTarget struct { // The name of the connection to use to connect to the Hudi target. If your Hudi // files are stored in buckets that require VPC authorization, you can set their // connection properties here. ConnectionName *string // A list of glob patterns used to exclude from the crawl. For more information, // see [Catalog Tables with a Crawler]. // // [Catalog Tables with a Crawler]: https://docs.aws.amazon.com/glue/latest/dg/add-crawler.html Exclusions []string // The maximum depth of Amazon S3 paths that the crawler can traverse to discover // the Hudi metadata folder in your Amazon S3 path. Used to limit the crawler run // time. MaximumTraversalDepth *int32 // An array of Amazon S3 location strings for Hudi, each indicating the root // folder with which the metadata files for a Hudi table resides. The Hudi folder // may be located in a child folder of the root folder. // // The crawler will scan all folders underneath a path for a Hudi folder. Paths []string // contains filtered or unexported fields }
Specifies an Apache Hudi data source.
type HudiTargetCompressionType ¶ added in v1.40.0
type HudiTargetCompressionType string
const ( HudiTargetCompressionTypeGzip HudiTargetCompressionType = "gzip" HudiTargetCompressionTypeLzo HudiTargetCompressionType = "lzo" HudiTargetCompressionTypeUncompressed HudiTargetCompressionType = "uncompressed" HudiTargetCompressionTypeSnappy HudiTargetCompressionType = "snappy" )
Enum values for HudiTargetCompressionType
func (HudiTargetCompressionType) Values ¶ added in v1.40.0
func (HudiTargetCompressionType) Values() []HudiTargetCompressionType
Values returns all known values for HudiTargetCompressionType. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type IcebergCompactionMetrics ¶ added in v1.96.0
type IcebergCompactionMetrics struct { // The duration of the job in hours. JobDurationInHour float64 // The number of bytes removed by the compaction job run. NumberOfBytesCompacted int64 // The number of DPU hours consumed by the job. NumberOfDpus int32 // The number of files removed by the compaction job run. NumberOfFilesCompacted int64 // contains filtered or unexported fields }
Compaction metrics for Iceberg for the optimizer run.
type IcebergInput ¶ added in v1.54.0
type IcebergInput struct { // A required metadata operation. Can only be set to CREATE . // // This member is required. MetadataOperation MetadataOperation // The table version for the Iceberg table. Defaults to 2. Version *string // contains filtered or unexported fields }
A structure that defines an Apache Iceberg metadata table to create in the catalog.
type IcebergOrphanFileDeletionConfiguration ¶ added in v1.96.0
type IcebergOrphanFileDeletionConfiguration struct { // Specifies a directory in which to look for files (defaults to the table's // location). You may choose a sub-directory rather than the top-level table // location. Location *string // The number of days that orphan files should be retained before file deletion. // If an input is not provided, the default value 3 will be used. OrphanFileRetentionPeriodInDays *int32 // contains filtered or unexported fields }
The configuration for an Iceberg orphan file deletion optimizer.
type IcebergOrphanFileDeletionMetrics ¶ added in v1.96.0
type IcebergOrphanFileDeletionMetrics struct { // The duration of the job in hours. JobDurationInHour float64 // The number of DPU hours consumed by the job. NumberOfDpus int32 // The number of orphan files deleted by the orphan file deletion job run. NumberOfOrphanFilesDeleted int64 // contains filtered or unexported fields }
Orphan file deletion metrics for Iceberg for the optimizer run.
type IcebergRetentionConfiguration ¶ added in v1.96.0
type IcebergRetentionConfiguration struct { // If set to false, snapshots are only deleted from table metadata, and the // underlying data and metadata files are not deleted. CleanExpiredFiles *bool // The number of Iceberg snapshots to retain within the retention period. If an // input is not provided, the corresponding Iceberg table configuration field will // be used or if not present, the default value 1 will be used. NumberOfSnapshotsToRetain *int32 // The number of days to retain the Iceberg snapshots. If an input is not // provided, the corresponding Iceberg table configuration field will be used or if // not present, the default value 5 will be used. SnapshotRetentionPeriodInDays *int32 // contains filtered or unexported fields }
The configuration for an Iceberg snapshot retention optimizer.
type IcebergRetentionMetrics ¶ added in v1.96.0
type IcebergRetentionMetrics struct { // The duration of the job in hours. JobDurationInHour float64 // The number of data files deleted by the retention job run. NumberOfDataFilesDeleted int64 // The number of DPU hours consumed by the job. NumberOfDpus int32 // The number of manifest files deleted by the retention job run. NumberOfManifestFilesDeleted int64 // The number of manifest lists deleted by the retention job run. NumberOfManifestListsDeleted int64 // contains filtered or unexported fields }
Snapshot retention metrics for Iceberg for the optimizer run.
type IcebergTarget ¶ added in v1.53.0
type IcebergTarget struct { // The name of the connection to use to connect to the Iceberg target. ConnectionName *string // A list of glob patterns used to exclude from the crawl. For more information, // see [Catalog Tables with a Crawler]. // // [Catalog Tables with a Crawler]: https://docs.aws.amazon.com/glue/latest/dg/add-crawler.html Exclusions []string // The maximum depth of Amazon S3 paths that the crawler can traverse to discover // the Iceberg metadata folder in your Amazon S3 path. Used to limit the crawler // run time. MaximumTraversalDepth *int32 // One or more Amazon S3 paths that contains Iceberg metadata folders as // s3://bucket/prefix . Paths []string // contains filtered or unexported fields }
Specifies an Apache Iceberg data source where Iceberg tables are stored in Amazon S3.
type IdempotentParameterMismatchException ¶
type IdempotentParameterMismatchException struct { Message *string ErrorCodeOverride *string // contains filtered or unexported fields }
The same unique identifier was associated with two different records.
func (*IdempotentParameterMismatchException) Error ¶
func (e *IdempotentParameterMismatchException) Error() string
func (*IdempotentParameterMismatchException) ErrorCode ¶
func (e *IdempotentParameterMismatchException) ErrorCode() string
func (*IdempotentParameterMismatchException) ErrorFault ¶
func (e *IdempotentParameterMismatchException) ErrorFault() smithy.ErrorFault
func (*IdempotentParameterMismatchException) ErrorMessage ¶
func (e *IdempotentParameterMismatchException) ErrorMessage() string
type IllegalBlueprintStateException ¶ added in v1.11.0
type IllegalBlueprintStateException struct { Message *string ErrorCodeOverride *string // contains filtered or unexported fields }
The blueprint is in an invalid state to perform a requested operation.
func (*IllegalBlueprintStateException) Error ¶ added in v1.11.0
func (e *IllegalBlueprintStateException) Error() string
func (*IllegalBlueprintStateException) ErrorCode ¶ added in v1.11.0
func (e *IllegalBlueprintStateException) ErrorCode() string
func (*IllegalBlueprintStateException) ErrorFault ¶ added in v1.11.0
func (e *IllegalBlueprintStateException) ErrorFault() smithy.ErrorFault
func (*IllegalBlueprintStateException) ErrorMessage ¶ added in v1.11.0
func (e *IllegalBlueprintStateException) ErrorMessage() string
type IllegalSessionStateException ¶ added in v1.22.0
type IllegalSessionStateException struct { Message *string ErrorCodeOverride *string // contains filtered or unexported fields }
The session is in an invalid state to perform a requested operation.
func (*IllegalSessionStateException) Error ¶ added in v1.22.0
func (e *IllegalSessionStateException) Error() string
func (*IllegalSessionStateException) ErrorCode ¶ added in v1.22.0
func (e *IllegalSessionStateException) ErrorCode() string
func (*IllegalSessionStateException) ErrorFault ¶ added in v1.22.0
func (e *IllegalSessionStateException) ErrorFault() smithy.ErrorFault
func (*IllegalSessionStateException) ErrorMessage ¶ added in v1.22.0
func (e *IllegalSessionStateException) ErrorMessage() string
type IllegalWorkflowStateException ¶
type IllegalWorkflowStateException struct { Message *string ErrorCodeOverride *string // contains filtered or unexported fields }
The workflow is in an invalid state to perform a requested operation.
func (*IllegalWorkflowStateException) Error ¶
func (e *IllegalWorkflowStateException) Error() string
func (*IllegalWorkflowStateException) ErrorCode ¶
func (e *IllegalWorkflowStateException) ErrorCode() string
func (*IllegalWorkflowStateException) ErrorFault ¶
func (e *IllegalWorkflowStateException) ErrorFault() smithy.ErrorFault
func (*IllegalWorkflowStateException) ErrorMessage ¶
func (e *IllegalWorkflowStateException) ErrorMessage() string
type ImportLabelsTaskRunProperties ¶
type ImportLabelsTaskRunProperties struct { // The Amazon Simple Storage Service (Amazon S3) path from where you will import // the labels. InputS3Path *string // Indicates whether to overwrite your existing labels. Replace bool // contains filtered or unexported fields }
Specifies configuration properties for an importing labels task run.
type InboundIntegration ¶ added in v1.103.0
type InboundIntegration struct { // The time that the integration was created, in UTC. // // This member is required. CreateTime *time.Time // The ARN of the zero-ETL integration. // // This member is required. IntegrationArn *string // The ARN of the source resource for the integration. // // This member is required. SourceArn *string // The possible statuses are: // // - CREATING: The integration is being created. // // - ACTIVE: The integration creation succeeds. // // - MODIFYING: The integration is being modified. // // - FAILED: The integration creation fails. // // - DELETING: The integration is deleted. // // - SYNCING: The integration is synchronizing. // // - NEEDS_ATTENTION: The integration needs attention, such as synchronization. // // This member is required. Status IntegrationStatus // The ARN of the target resource for the integration. // // This member is required. TargetArn *string // A list of errors associated with the integration. Errors []IntegrationError // contains filtered or unexported fields }
A structure for an integration that writes data into a resource.
type InclusionAnnotationValue ¶ added in v1.92.0
type InclusionAnnotationValue string
const ( InclusionAnnotationValueInclude InclusionAnnotationValue = "INCLUDE" InclusionAnnotationValueExclude InclusionAnnotationValue = "EXCLUDE" )
Enum values for InclusionAnnotationValue
func (InclusionAnnotationValue) Values ¶ added in v1.92.0
func (InclusionAnnotationValue) Values() []InclusionAnnotationValue
Values returns all known values for InclusionAnnotationValue. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type Integration ¶ added in v1.103.0
type Integration struct { // The time that the integration was created, in UTC. // // This member is required. CreateTime *time.Time // The Amazon Resource Name (ARN) for the integration. // // This member is required. IntegrationArn *string // A unique name for the integration. // // This member is required. IntegrationName *string // The ARN for the source of the integration. // // This member is required. SourceArn *string // The possible statuses are: // // - CREATING: The integration is being created. // // - ACTIVE: The integration creation succeeds. // // - MODIFYING: The integration is being modified. // // - FAILED: The integration creation fails. // // - DELETING: The integration is deleted. // // - SYNCING: The integration is synchronizing. // // - NEEDS_ATTENTION: The integration needs attention, such as synchronization. // // This member is required. Status IntegrationStatus // The ARN for the target of the integration. // // This member is required. TargetArn *string // An optional set of non-secret key–value pairs that contains additional // contextual information for encryption. This can only be provided if KMSKeyId is // provided. AdditionalEncryptionContext map[string]string // Selects source tables for the integration using Maxwell filter syntax. DataFilter *string // A description for the integration. Description *string // A list of errors associated with the integration. Errors []IntegrationError // The ARN of a KMS key used for encrypting the channel. KmsKeyId *string // Metadata assigned to the resource consisting of a list of key-value pairs. Tags []Tag // contains filtered or unexported fields }
Describes a zero-ETL integration.
type IntegrationConflictOperationFault ¶ added in v1.103.0
type IntegrationConflictOperationFault struct { Message *string ErrorCodeOverride *string // contains filtered or unexported fields }
The requested operation conflicts with another operation.
func (*IntegrationConflictOperationFault) Error ¶ added in v1.103.0
func (e *IntegrationConflictOperationFault) Error() string
func (*IntegrationConflictOperationFault) ErrorCode ¶ added in v1.103.0
func (e *IntegrationConflictOperationFault) ErrorCode() string
func (*IntegrationConflictOperationFault) ErrorFault ¶ added in v1.103.0
func (e *IntegrationConflictOperationFault) ErrorFault() smithy.ErrorFault
func (*IntegrationConflictOperationFault) ErrorMessage ¶ added in v1.103.0
func (e *IntegrationConflictOperationFault) ErrorMessage() string
type IntegrationError ¶ added in v1.103.0
type IntegrationError struct { // The code associated with this error. ErrorCode *string // A message describing the error. ErrorMessage *string // contains filtered or unexported fields }
An error associated with a zero-ETL integration.
type IntegrationFilter ¶ added in v1.103.0
type IntegrationFilter struct { // The name of the filter. Name *string // A list of filter values. Values []string // contains filtered or unexported fields }
A filter that can be used when invoking a DescribeIntegrations request.
type IntegrationNotFoundFault ¶ added in v1.103.0
type IntegrationNotFoundFault struct { Message *string ErrorCodeOverride *string // contains filtered or unexported fields }
The specified integration could not be found.
func (*IntegrationNotFoundFault) Error ¶ added in v1.103.0
func (e *IntegrationNotFoundFault) Error() string
func (*IntegrationNotFoundFault) ErrorCode ¶ added in v1.103.0
func (e *IntegrationNotFoundFault) ErrorCode() string
func (*IntegrationNotFoundFault) ErrorFault ¶ added in v1.103.0
func (e *IntegrationNotFoundFault) ErrorFault() smithy.ErrorFault
func (*IntegrationNotFoundFault) ErrorMessage ¶ added in v1.103.0
func (e *IntegrationNotFoundFault) ErrorMessage() string
type IntegrationPartition ¶ added in v1.103.0
type IntegrationPartition struct { // The field name used to partition data on the target. FieldName *string // Specifies a function used to partition data on the target. FunctionSpec *string // contains filtered or unexported fields }
A structure that describes how data is partitioned on the target.
type IntegrationQuotaExceededFault ¶ added in v1.103.0
type IntegrationQuotaExceededFault struct { Message *string ErrorCodeOverride *string // contains filtered or unexported fields }
The data processed through your integration exceeded your quota.
func (*IntegrationQuotaExceededFault) Error ¶ added in v1.103.0
func (e *IntegrationQuotaExceededFault) Error() string
func (*IntegrationQuotaExceededFault) ErrorCode ¶ added in v1.103.0
func (e *IntegrationQuotaExceededFault) ErrorCode() string
func (*IntegrationQuotaExceededFault) ErrorFault ¶ added in v1.103.0
func (e *IntegrationQuotaExceededFault) ErrorFault() smithy.ErrorFault
func (*IntegrationQuotaExceededFault) ErrorMessage ¶ added in v1.103.0
func (e *IntegrationQuotaExceededFault) ErrorMessage() string
type IntegrationStatus ¶ added in v1.103.0
type IntegrationStatus string
const ( IntegrationStatusCreating IntegrationStatus = "CREATING" IntegrationStatusActive IntegrationStatus = "ACTIVE" IntegrationStatusModifying IntegrationStatus = "MODIFYING" IntegrationStatusFailed IntegrationStatus = "FAILED" IntegrationStatusDeleting IntegrationStatus = "DELETING" IntegrationStatusSyncing IntegrationStatus = "SYNCING" IntegrationStatusNeedsAttention IntegrationStatus = "NEEDS_ATTENTION" )
Enum values for IntegrationStatus
func (IntegrationStatus) Values ¶ added in v1.103.0
func (IntegrationStatus) Values() []IntegrationStatus
Values returns all known values for IntegrationStatus. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type InternalServerException ¶ added in v1.103.0
type InternalServerException struct { Message *string ErrorCodeOverride *string // contains filtered or unexported fields }
An internal server error occurred.
func (*InternalServerException) Error ¶ added in v1.103.0
func (e *InternalServerException) Error() string
func (*InternalServerException) ErrorCode ¶ added in v1.103.0
func (e *InternalServerException) ErrorCode() string
func (*InternalServerException) ErrorFault ¶ added in v1.103.0
func (e *InternalServerException) ErrorFault() smithy.ErrorFault
func (*InternalServerException) ErrorMessage ¶ added in v1.103.0
func (e *InternalServerException) ErrorMessage() string
type InternalServiceException ¶
type InternalServiceException struct { Message *string ErrorCodeOverride *string // contains filtered or unexported fields }
An internal service error occurred.
func (*InternalServiceException) Error ¶
func (e *InternalServiceException) Error() string
func (*InternalServiceException) ErrorCode ¶
func (e *InternalServiceException) ErrorCode() string
func (*InternalServiceException) ErrorFault ¶
func (e *InternalServiceException) ErrorFault() smithy.ErrorFault
func (*InternalServiceException) ErrorMessage ¶
func (e *InternalServiceException) ErrorMessage() string
type InvalidInputException ¶
type InvalidInputException struct { Message *string ErrorCodeOverride *string FromFederationSource *bool // contains filtered or unexported fields }
The input provided was not valid.
func (*InvalidInputException) Error ¶
func (e *InvalidInputException) Error() string
func (*InvalidInputException) ErrorCode ¶
func (e *InvalidInputException) ErrorCode() string
func (*InvalidInputException) ErrorFault ¶
func (e *InvalidInputException) ErrorFault() smithy.ErrorFault
func (*InvalidInputException) ErrorMessage ¶
func (e *InvalidInputException) ErrorMessage() string
type InvalidIntegrationStateFault ¶ added in v1.103.0
type InvalidIntegrationStateFault struct { Message *string ErrorCodeOverride *string // contains filtered or unexported fields }
The integration is in an invalid state.
func (*InvalidIntegrationStateFault) Error ¶ added in v1.103.0
func (e *InvalidIntegrationStateFault) Error() string
func (*InvalidIntegrationStateFault) ErrorCode ¶ added in v1.103.0
func (e *InvalidIntegrationStateFault) ErrorCode() string
func (*InvalidIntegrationStateFault) ErrorFault ¶ added in v1.103.0
func (e *InvalidIntegrationStateFault) ErrorFault() smithy.ErrorFault
func (*InvalidIntegrationStateFault) ErrorMessage ¶ added in v1.103.0
func (e *InvalidIntegrationStateFault) ErrorMessage() string
type InvalidStateException ¶ added in v1.16.0
type InvalidStateException struct { Message *string ErrorCodeOverride *string // contains filtered or unexported fields }
An error that indicates your data is in an invalid state.
func (*InvalidStateException) Error ¶ added in v1.16.0
func (e *InvalidStateException) Error() string
func (*InvalidStateException) ErrorCode ¶ added in v1.16.0
func (e *InvalidStateException) ErrorCode() string
func (*InvalidStateException) ErrorFault ¶ added in v1.16.0
func (e *InvalidStateException) ErrorFault() smithy.ErrorFault
func (*InvalidStateException) ErrorMessage ¶ added in v1.16.0
func (e *InvalidStateException) ErrorMessage() string
type JDBCConnectionType ¶ added in v1.42.0
type JDBCConnectionType string
const ( JDBCConnectionTypeSqlserver JDBCConnectionType = "sqlserver" JDBCConnectionTypeMysql JDBCConnectionType = "mysql" JDBCConnectionTypeOracle JDBCConnectionType = "oracle" JDBCConnectionTypePostgresql JDBCConnectionType = "postgresql" JDBCConnectionTypeRedshift JDBCConnectionType = "redshift" )
Enum values for JDBCConnectionType
func (JDBCConnectionType) Values ¶ added in v1.42.0
func (JDBCConnectionType) Values() []JDBCConnectionType
Values returns all known values for JDBCConnectionType. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type JDBCConnectorOptions ¶ added in v1.25.0
type JDBCConnectorOptions struct { // Custom data type mapping that builds a mapping from a JDBC data type to an Glue // data type. For example, the option "dataTypeMapping":{"FLOAT":"STRING"} maps // data fields of JDBC type FLOAT into the Java String type by calling the // ResultSet.getString() method of the driver, and uses it to build the Glue // record. The ResultSet object is implemented by each driver, so the behavior is // specific to the driver you use. Refer to the documentation for your JDBC driver // to understand how the driver performs the conversions. DataTypeMapping map[string]GlueRecordType // Extra condition clause to filter data from source. For example: // // BillingCity='Mountain View' // // When using a query instead of a table name, you should validate that the query // works with the specified filterPredicate . FilterPredicate *string // The name of the job bookmark keys on which to sort. JobBookmarkKeys []string // Specifies an ascending or descending sort order. JobBookmarkKeysSortOrder *string // The minimum value of partitionColumn that is used to decide partition stride. LowerBound *int64 // The number of partitions. This value, along with lowerBound (inclusive) and // upperBound (exclusive), form partition strides for generated WHERE clause // expressions that are used to split the partitionColumn . NumPartitions *int64 // The name of an integer column that is used for partitioning. This option works // only when it's included with lowerBound , upperBound , and numPartitions . This // option works the same way as in the Spark SQL JDBC reader. PartitionColumn *string // The maximum value of partitionColumn that is used to decide partition stride. UpperBound *int64 // contains filtered or unexported fields }
Additional connection options for the connector.
type JDBCConnectorSource ¶ added in v1.25.0
type JDBCConnectorSource struct { // The name of the connection that is associated with the connector. // // This member is required. ConnectionName *string // The type of connection, such as marketplace.jdbc or custom.jdbc, designating a // connection to a JDBC data store. // // This member is required. ConnectionType *string // The name of a connector that assists with accessing the data store in Glue // Studio. // // This member is required. ConnectorName *string // The name of the data source. // // This member is required. Name *string // Additional connection options for the connector. AdditionalOptions *JDBCConnectorOptions // The name of the table in the data source. ConnectionTable *string // Specifies the data schema for the custom JDBC source. OutputSchemas []GlueSchema // The table or SQL query to get the data from. You can specify either // ConnectionTable or query , but not both. Query *string // contains filtered or unexported fields }
Specifies a connector to a JDBC data source.
type JDBCConnectorTarget ¶ added in v1.25.0
type JDBCConnectorTarget struct { // The name of the connection that is associated with the connector. // // This member is required. ConnectionName *string // The name of the table in the data target. // // This member is required. ConnectionTable *string // The type of connection, such as marketplace.jdbc or custom.jdbc, designating a // connection to a JDBC data target. // // This member is required. ConnectionType *string // The name of a connector that will be used. // // This member is required. ConnectorName *string // The nodes that are inputs to the data target. // // This member is required. Inputs []string // The name of the data target. // // This member is required. Name *string // Additional connection options for the connector. AdditionalOptions map[string]string // Specifies the data schema for the JDBC target. OutputSchemas []GlueSchema // contains filtered or unexported fields }
Specifies a data target that writes to Amazon S3 in Apache Parquet columnar storage.
type JDBCDataType ¶ added in v1.25.0
type JDBCDataType string
const ( JDBCDataTypeArray JDBCDataType = "ARRAY" JDBCDataTypeBigint JDBCDataType = "BIGINT" JDBCDataTypeBinary JDBCDataType = "BINARY" JDBCDataTypeBit JDBCDataType = "BIT" JDBCDataTypeBlob JDBCDataType = "BLOB" JDBCDataTypeBoolean JDBCDataType = "BOOLEAN" JDBCDataTypeChar JDBCDataType = "CHAR" JDBCDataTypeClob JDBCDataType = "CLOB" JDBCDataTypeDatalink JDBCDataType = "DATALINK" JDBCDataTypeDate JDBCDataType = "DATE" JDBCDataTypeDecimal JDBCDataType = "DECIMAL" JDBCDataTypeDistinct JDBCDataType = "DISTINCT" JDBCDataTypeDouble JDBCDataType = "DOUBLE" JDBCDataTypeFloat JDBCDataType = "FLOAT" JDBCDataTypeInteger JDBCDataType = "INTEGER" JDBCDataTypeJavaObject JDBCDataType = "JAVA_OBJECT" JDBCDataTypeLongnvarchar JDBCDataType = "LONGNVARCHAR" JDBCDataTypeLongvarbinary JDBCDataType = "LONGVARBINARY" JDBCDataTypeLongvarchar JDBCDataType = "LONGVARCHAR" JDBCDataTypeNchar JDBCDataType = "NCHAR" JDBCDataTypeNclob JDBCDataType = "NCLOB" JDBCDataTypeNull JDBCDataType = "NULL" JDBCDataTypeNumeric JDBCDataType = "NUMERIC" JDBCDataTypeNvarchar JDBCDataType = "NVARCHAR" JDBCDataTypeOther JDBCDataType = "OTHER" JDBCDataTypeReal JDBCDataType = "REAL" JDBCDataTypeRef JDBCDataType = "REF" JDBCDataTypeRefCursor JDBCDataType = "REF_CURSOR" JDBCDataTypeRowid JDBCDataType = "ROWID" JDBCDataTypeSmallint JDBCDataType = "SMALLINT" JDBCDataTypeSqlxml JDBCDataType = "SQLXML" JDBCDataTypeStruct JDBCDataType = "STRUCT" JDBCDataTypeTime JDBCDataType = "TIME" JDBCDataTypeTimeWithTimezone JDBCDataType = "TIME_WITH_TIMEZONE" JDBCDataTypeTimestamp JDBCDataType = "TIMESTAMP" JDBCDataTypeTimestampWithTimezone JDBCDataType = "TIMESTAMP_WITH_TIMEZONE" JDBCDataTypeTinyint JDBCDataType = "TINYINT" JDBCDataTypeVarbinary JDBCDataType = "VARBINARY" JDBCDataTypeVarchar JDBCDataType = "VARCHAR" )
Enum values for JDBCDataType
func (JDBCDataType) Values ¶ added in v1.25.0
func (JDBCDataType) Values() []JDBCDataType
Values returns all known values for JDBCDataType. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type JdbcMetadataEntry ¶ added in v1.35.0
type JdbcMetadataEntry string
const ( JdbcMetadataEntryComments JdbcMetadataEntry = "COMMENTS" JdbcMetadataEntryRawtypes JdbcMetadataEntry = "RAWTYPES" )
Enum values for JdbcMetadataEntry
func (JdbcMetadataEntry) Values ¶ added in v1.35.0
func (JdbcMetadataEntry) Values() []JdbcMetadataEntry
Values returns all known values for JdbcMetadataEntry. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type JdbcTarget ¶
type JdbcTarget struct { // The name of the connection to use to connect to the JDBC target. ConnectionName *string // Specify a value of RAWTYPES or COMMENTS to enable additional metadata in table // responses. RAWTYPES provides the native-level datatype. COMMENTS provides // comments associated with a column or table in the database. // // If you do not need additional metadata, keep the field empty. EnableAdditionalMetadata []JdbcMetadataEntry // A list of glob patterns used to exclude from the crawl. For more information, // see [Catalog Tables with a Crawler]. // // [Catalog Tables with a Crawler]: https://docs.aws.amazon.com/glue/latest/dg/add-crawler.html Exclusions []string // The path of the JDBC target. Path *string // contains filtered or unexported fields }
Specifies a JDBC data store to crawl.
type Job ¶
type Job struct { // This field is deprecated. Use MaxCapacity instead. // // The number of Glue data processing units (DPUs) allocated to runs of this job. // You can allocate a minimum of 2 DPUs; the default is 10. A DPU is a relative // measure of processing power that consists of 4 vCPUs of compute capacity and 16 // GB of memory. For more information, see the [Glue pricing page]. // // [Glue pricing page]: https://aws.amazon.com/glue/pricing/ // // Deprecated: This property is deprecated, use MaxCapacity instead. AllocatedCapacity int32 // The representation of a directed acyclic graph on which both the Glue Studio // visual component and Glue Studio code generation is based. CodeGenConfigurationNodes map[string]CodeGenConfigurationNode // The JobCommand that runs this job. Command *JobCommand // The connections used for this job. Connections *ConnectionsList // The time and date that this job definition was created. CreatedOn *time.Time // The default arguments for every run of this job, specified as name-value pairs. // // You can specify arguments here that your own job-execution script consumes, as // well as arguments that Glue itself consumes. // // Job arguments may be logged. Do not pass plaintext secrets as arguments. // Retrieve secrets from a Glue Connection, Secrets Manager or other secret // management mechanism if you intend to keep them within the Job. // // For information about how to specify and consume your own Job arguments, see // the [Calling Glue APIs in Python]topic in the developer guide. // // For information about the arguments you can provide to this field when // configuring Spark jobs, see the [Special Parameters Used by Glue]topic in the developer guide. // // For information about the arguments you can provide to this field when // configuring Ray jobs, see [Using job parameters in Ray jobs]in the developer guide. // // [Using job parameters in Ray jobs]: https://docs.aws.amazon.com/glue/latest/dg/author-job-ray-job-parameters.html // [Calling Glue APIs in Python]: https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html // [Special Parameters Used by Glue]: https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html DefaultArguments map[string]string // A description of the job. Description *string // Indicates whether the job is run with a standard or flexible execution class. // The standard execution class is ideal for time-sensitive workloads that require // fast job startup and dedicated resources. // // The flexible execution class is appropriate for time-insensitive jobs whose // start and completion times may vary. // // Only jobs with Glue version 3.0 and above and command type glueetl will be // allowed to set ExecutionClass to FLEX . The flexible execution class is // available for Spark jobs. ExecutionClass ExecutionClass // An ExecutionProperty specifying the maximum number of concurrent runs allowed // for this job. ExecutionProperty *ExecutionProperty // In Spark jobs, GlueVersion determines the versions of Apache Spark and Python // that Glue available in a job. The Python version indicates the version supported // for jobs of type Spark. // // Ray jobs should set GlueVersion to 4.0 or greater. However, the versions of // Ray, Python and additional libraries available in your Ray job are determined by // the Runtime parameter of the Job command. // // For more information about the available Glue versions and corresponding Spark // and Python versions, see [Glue version]in the developer guide. // // Jobs that are created without specifying a Glue version default to Glue 0.9. // // [Glue version]: https://docs.aws.amazon.com/glue/latest/dg/add-job.html GlueVersion *string // A mode that describes how a job was created. Valid values are: // // - SCRIPT - The job was created using the Glue Studio script editor. // // - VISUAL - The job was created using the Glue Studio visual editor. // // - NOTEBOOK - The job was created using an interactive sessions notebook. // // When the JobMode field is missing or null, SCRIPT is assigned as the default // value. JobMode JobMode // Specifies whether job run queuing is enabled for the job runs for this job. // // A value of true means job run queuing is enabled for the job runs. If false or // not populated, the job runs will not be considered for queueing. // // If this field does not match the value set in the job run, then the value from // the job run field will be used. JobRunQueuingEnabled *bool // The last point in time when this job definition was modified. LastModifiedOn *time.Time // This field is reserved for future use. LogUri *string // This field specifies a day of the week and hour for a maintenance window for // streaming jobs. Glue periodically performs maintenance activities. During these // maintenance windows, Glue will need to restart your streaming jobs. // // Glue will restart the job within 3 hours of the specified maintenance window. // For instance, if you set up the maintenance window for Monday at 10:00AM GMT, // your jobs will be restarted between 10:00AM GMT to 1:00PM GMT. MaintenanceWindow *string // For Glue version 1.0 or earlier jobs, using the standard worker type, the // number of Glue data processing units (DPUs) that can be allocated when this job // runs. A DPU is a relative measure of processing power that consists of 4 vCPUs // of compute capacity and 16 GB of memory. For more information, see the [Glue pricing page]. // // For Glue version 2.0 or later jobs, you cannot specify a Maximum capacity . // Instead, you should specify a Worker type and the Number of workers . // // Do not set MaxCapacity if using WorkerType and NumberOfWorkers . // // The value that can be allocated for MaxCapacity depends on whether you are // running a Python shell job, an Apache Spark ETL job, or an Apache Spark // streaming ETL job: // // - When you specify a Python shell job ( JobCommand.Name ="pythonshell"), you // can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. // // - When you specify an Apache Spark ETL job ( JobCommand.Name ="glueetl") or // Apache Spark streaming ETL job ( JobCommand.Name ="gluestreaming"), you can // allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a // fractional DPU allocation. // // [Glue pricing page]: https://aws.amazon.com/glue/pricing/ MaxCapacity *float64 // The maximum number of times to retry this job after a JobRun fails. MaxRetries int32 // The name you assign to this job definition. Name *string // Arguments for this job that are not overridden when providing job arguments in // a job run, specified as name-value pairs. NonOverridableArguments map[string]string // Specifies configuration properties of a job notification. NotificationProperty *NotificationProperty // The number of workers of a defined workerType that are allocated when a job // runs. NumberOfWorkers *int32 // The name of an Glue usage profile associated with the job. ProfileName *string // The name or Amazon Resource Name (ARN) of the IAM role associated with this job. Role *string // The name of the SecurityConfiguration structure to be used with this job. SecurityConfiguration *string // The details for a source control configuration for a job, allowing // synchronization of job artifacts to or from a remote repository. SourceControlDetails *SourceControlDetails // The job timeout in minutes. This is the maximum time that a job run can consume // resources before it is terminated and enters TIMEOUT status. // // Jobs must have timeout values less than 7 days or 10080 minutes. Otherwise, the // jobs will throw an exception. // // When the value is left blank, the timeout is defaulted to 2880 minutes. // // Any existing Glue jobs that had a timeout value greater than 7 days will be // defaulted to 7 days. For instance if you have specified a timeout of 20 days for // a batch job, it will be stopped on the 7th day. // // For streaming jobs, if you have set up a maintenance window, it will be // restarted during the maintenance window after 7 days. Timeout *int32 // The type of predefined worker that is allocated when a job runs. Accepts a // value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X // for Ray jobs. // // - For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of // memory) with 94GB disk, and provides 1 executor per worker. We recommend this // worker type for workloads such as data transforms, joins, and queries, to offers // a scalable and cost effective way to run most jobs. // // - For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of // memory) with 138GB disk, and provides 1 executor per worker. We recommend this // worker type for workloads such as data transforms, joins, and queries, to offers // a scalable and cost effective way to run most jobs. // // - For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of // memory) with 256GB disk, and provides 1 executor per worker. We recommend this // worker type for jobs whose workloads contain your most demanding transforms, // aggregations, joins, and queries. This worker type is available only for Glue // version 3.0 or later Spark ETL jobs in the following Amazon Web Services // Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific // (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), // Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). // // - For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of // memory) with 512GB disk, and provides 1 executor per worker. We recommend this // worker type for jobs whose workloads contain your most demanding transforms, // aggregations, joins, and queries. This worker type is available only for Glue // version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as // supported for the G.4X worker type. // // - For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of // memory) with 84GB disk, and provides 1 executor per worker. We recommend this // worker type for low volume streaming jobs. This worker type is only available // for Glue version 3.0 or later streaming jobs. // // - For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of // memory) with 128 GB disk, and provides up to 8 Ray workers based on the // autoscaler. WorkerType WorkerType // contains filtered or unexported fields }
Specifies a job definition.
type JobBookmarkEntry ¶
type JobBookmarkEntry struct { // The attempt ID number. Attempt int32 // The bookmark itself. JobBookmark *string // The name of the job in question. JobName *string // The unique run identifier associated with the previous job run. PreviousRunId *string // The run ID number. Run int32 // The run ID number. RunId *string // The version of the job. Version int32 // contains filtered or unexported fields }
Defines a point that a job can resume processing.
type JobBookmarksEncryption ¶
type JobBookmarksEncryption struct { // The encryption mode to use for job bookmarks data. JobBookmarksEncryptionMode JobBookmarksEncryptionMode // The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data. KmsKeyArn *string // contains filtered or unexported fields }
Specifies how job bookmark data should be encrypted.
type JobBookmarksEncryptionMode ¶
type JobBookmarksEncryptionMode string
const ( JobBookmarksEncryptionModeDisabled JobBookmarksEncryptionMode = "DISABLED" JobBookmarksEncryptionModeCsekms JobBookmarksEncryptionMode = "CSE-KMS" )
Enum values for JobBookmarksEncryptionMode
func (JobBookmarksEncryptionMode) Values ¶ added in v0.29.0
func (JobBookmarksEncryptionMode) Values() []JobBookmarksEncryptionMode
Values returns all known values for JobBookmarksEncryptionMode. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type JobCommand ¶
type JobCommand struct { // The name of the job command. For an Apache Spark ETL job, this must be glueetl . // For a Python shell job, it must be pythonshell . For an Apache Spark streaming // ETL job, this must be gluestreaming . For a Ray job, this must be glueray . Name *string // The Python version being used to run a Python shell job. Allowed values are 2 // or 3. PythonVersion *string // In Ray jobs, Runtime is used to specify the versions of Ray, Python and // additional libraries available in your environment. This field is not used in // other job types. For supported runtime environment values, see [Supported Ray runtime environments]in the Glue // Developer Guide. // // [Supported Ray runtime environments]: https://docs.aws.amazon.com/glue/latest/dg/ray-jobs-section.html Runtime *string // Specifies the Amazon Simple Storage Service (Amazon S3) path to a script that // runs a job. ScriptLocation *string // contains filtered or unexported fields }
Specifies code that runs when a job is run.
type JobMode ¶ added in v1.82.0
type JobMode string
type JobNodeDetails ¶
type JobNodeDetails struct { // The information for the job runs represented by the job node. JobRuns []JobRun // contains filtered or unexported fields }
The details of a Job node present in the workflow.
type JobRun ¶
type JobRun struct { // This field is deprecated. Use MaxCapacity instead. // // The number of Glue data processing units (DPUs) allocated to this JobRun. From // 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure // of processing power that consists of 4 vCPUs of compute capacity and 16 GB of // memory. For more information, see the [Glue pricing page]. // // [Glue pricing page]: https://aws.amazon.com/glue/pricing/ // // Deprecated: This property is deprecated, use MaxCapacity instead. AllocatedCapacity int32 // The job arguments associated with this run. For this job run, they replace the // default arguments set in the job definition itself. // // You can specify arguments here that your own job-execution script consumes, as // well as arguments that Glue itself consumes. // // Job arguments may be logged. Do not pass plaintext secrets as arguments. // Retrieve secrets from a Glue Connection, Secrets Manager or other secret // management mechanism if you intend to keep them within the Job. // // For information about how to specify and consume your own Job arguments, see // the [Calling Glue APIs in Python]topic in the developer guide. // // For information about the arguments you can provide to this field when // configuring Spark jobs, see the [Special Parameters Used by Glue]topic in the developer guide. // // For information about the arguments you can provide to this field when // configuring Ray jobs, see [Using job parameters in Ray jobs]in the developer guide. // // [Using job parameters in Ray jobs]: https://docs.aws.amazon.com/glue/latest/dg/author-job-ray-job-parameters.html // [Calling Glue APIs in Python]: https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html // [Special Parameters Used by Glue]: https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html Arguments map[string]string // The number of the attempt to run this job. Attempt int32 // The date and time that this job run completed. CompletedOn *time.Time // This field can be set for either job runs with execution class FLEX or when // Auto Scaling is enabled, and represents the total time each executor ran during // the lifecycle of a job run in seconds, multiplied by a DPU factor (1 for G.1X , // 2 for G.2X , or 0.25 for G.025X workers). This value may be different than the // executionEngineRuntime * MaxCapacity as in the case of Auto Scaling jobs, as // the number of executors running at a given time may be less than the MaxCapacity // . Therefore, it is possible that the value of DPUSeconds is less than // executionEngineRuntime * MaxCapacity . DPUSeconds *float64 // An error message associated with this job run. ErrorMessage *string // Indicates whether the job is run with a standard or flexible execution class. // The standard execution-class is ideal for time-sensitive workloads that require // fast job startup and dedicated resources. // // The flexible execution class is appropriate for time-insensitive jobs whose // start and completion times may vary. // // Only jobs with Glue version 3.0 and above and command type glueetl will be // allowed to set ExecutionClass to FLEX . The flexible execution class is // available for Spark jobs. ExecutionClass ExecutionClass // The amount of time (in seconds) that the job run consumed resources. ExecutionTime int32 // In Spark jobs, GlueVersion determines the versions of Apache Spark and Python // that Glue available in a job. The Python version indicates the version supported // for jobs of type Spark. // // Ray jobs should set GlueVersion to 4.0 or greater. However, the versions of // Ray, Python and additional libraries available in your Ray job are determined by // the Runtime parameter of the Job command. // // For more information about the available Glue versions and corresponding Spark // and Python versions, see [Glue version]in the developer guide. // // Jobs that are created without specifying a Glue version default to Glue 0.9. // // [Glue version]: https://docs.aws.amazon.com/glue/latest/dg/add-job.html GlueVersion *string // The ID of this job run. Id *string // A mode that describes how a job was created. Valid values are: // // - SCRIPT - The job was created using the Glue Studio script editor. // // - VISUAL - The job was created using the Glue Studio visual editor. // // - NOTEBOOK - The job was created using an interactive sessions notebook. // // When the JobMode field is missing or null, SCRIPT is assigned as the default // value. JobMode JobMode // The name of the job definition being used in this run. JobName *string // Specifies whether job run queuing is enabled for the job run. // // A value of true means job run queuing is enabled for the job run. If false or // not populated, the job run will not be considered for queueing. JobRunQueuingEnabled *bool // The current state of the job run. For more information about the statuses of // jobs that have terminated abnormally, see [Glue Job Run Statuses]. // // [Glue Job Run Statuses]: https://docs.aws.amazon.com/glue/latest/dg/job-run-statuses.html JobRunState JobRunState // The last time that this job run was modified. LastModifiedOn *time.Time // The name of the log group for secure logging that can be server-side encrypted // in Amazon CloudWatch using KMS. This name can be /aws-glue/jobs/ , in which case // the default encryption is NONE . If you add a role name and // SecurityConfiguration name (in other words, // /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/ ), then that security // configuration is used to encrypt the log group. LogGroupName *string // This field specifies a day of the week and hour for a maintenance window for // streaming jobs. Glue periodically performs maintenance activities. During these // maintenance windows, Glue will need to restart your streaming jobs. // // Glue will restart the job within 3 hours of the specified maintenance window. // For instance, if you set up the maintenance window for Monday at 10:00AM GMT, // your jobs will be restarted between 10:00AM GMT to 1:00PM GMT. MaintenanceWindow *string // For Glue version 1.0 or earlier jobs, using the standard worker type, the // number of Glue data processing units (DPUs) that can be allocated when this job // runs. A DPU is a relative measure of processing power that consists of 4 vCPUs // of compute capacity and 16 GB of memory. For more information, see the [Glue pricing page]. // // For Glue version 2.0+ jobs, you cannot specify a Maximum capacity . Instead, you // should specify a Worker type and the Number of workers . // // Do not set MaxCapacity if using WorkerType and NumberOfWorkers . // // The value that can be allocated for MaxCapacity depends on whether you are // running a Python shell job, an Apache Spark ETL job, or an Apache Spark // streaming ETL job: // // - When you specify a Python shell job ( JobCommand.Name ="pythonshell"), you // can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. // // - When you specify an Apache Spark ETL job ( JobCommand.Name ="glueetl") or // Apache Spark streaming ETL job ( JobCommand.Name ="gluestreaming"), you can // allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a // fractional DPU allocation. // // [Glue pricing page]: https://aws.amazon.com/glue/pricing/ MaxCapacity *float64 // Specifies configuration properties of a job run notification. NotificationProperty *NotificationProperty // The number of workers of a defined workerType that are allocated when a job // runs. NumberOfWorkers *int32 // A list of predecessors to this job run. PredecessorRuns []Predecessor // The ID of the previous run of this job. For example, the JobRunId specified in // the StartJobRun action. PreviousRunId *string // The name of an Glue usage profile associated with the job run. ProfileName *string // The name of the SecurityConfiguration structure to be used with this job run. SecurityConfiguration *string // The date and time at which this job run was started. StartedOn *time.Time // This field holds details that pertain to the state of a job run. The field is // nullable. // // For example, when a job run is in a WAITING state as a result of job run // queuing, the field has the reason why the job run is in that state. StateDetail *string // The JobRun timeout in minutes. This is the maximum time that a job run can // consume resources before it is terminated and enters TIMEOUT status. This value // overrides the timeout value set in the parent job. // // Jobs must have timeout values less than 7 days or 10080 minutes. Otherwise, the // jobs will throw an exception. // // When the value is left blank, the timeout is defaulted to 2880 minutes. // // Any existing Glue jobs that had a timeout value greater than 7 days will be // defaulted to 7 days. For instance if you have specified a timeout of 20 days for // a batch job, it will be stopped on the 7th day. // // For streaming jobs, if you have set up a maintenance window, it will be // restarted during the maintenance window after 7 days. Timeout *int32 // The name of the trigger that started this job run. TriggerName *string // The type of predefined worker that is allocated when a job runs. Accepts a // value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X // for Ray jobs. // // - For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of // memory) with 94GB disk, and provides 1 executor per worker. We recommend this // worker type for workloads such as data transforms, joins, and queries, to offers // a scalable and cost effective way to run most jobs. // // - For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of // memory) with 138GB disk, and provides 1 executor per worker. We recommend this // worker type for workloads such as data transforms, joins, and queries, to offers // a scalable and cost effective way to run most jobs. // // - For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of // memory) with 256GB disk, and provides 1 executor per worker. We recommend this // worker type for jobs whose workloads contain your most demanding transforms, // aggregations, joins, and queries. This worker type is available only for Glue // version 3.0 or later Spark ETL jobs in the following Amazon Web Services // Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific // (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), // Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). // // - For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of // memory) with 512GB disk, and provides 1 executor per worker. We recommend this // worker type for jobs whose workloads contain your most demanding transforms, // aggregations, joins, and queries. This worker type is available only for Glue // version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as // supported for the G.4X worker type. // // - For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of // memory) with 84GB disk, and provides 1 executor per worker. We recommend this // worker type for low volume streaming jobs. This worker type is only available // for Glue version 3.0 or later streaming jobs. // // - For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of // memory) with 128 GB disk, and provides up to 8 Ray workers based on the // autoscaler. WorkerType WorkerType // contains filtered or unexported fields }
Contains information about a job run.
type JobRunState ¶
type JobRunState string
const ( JobRunStateStarting JobRunState = "STARTING" JobRunStateRunning JobRunState = "RUNNING" JobRunStateStopping JobRunState = "STOPPING" JobRunStateStopped JobRunState = "STOPPED" JobRunStateSucceeded JobRunState = "SUCCEEDED" JobRunStateFailed JobRunState = "FAILED" JobRunStateTimeout JobRunState = "TIMEOUT" JobRunStateError JobRunState = "ERROR" JobRunStateWaiting JobRunState = "WAITING" JobRunStateExpired JobRunState = "EXPIRED" )
Enum values for JobRunState
func (JobRunState) Values ¶ added in v0.29.0
func (JobRunState) Values() []JobRunState
Values returns all known values for JobRunState. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type JobUpdate ¶
type JobUpdate struct { // This field is deprecated. Use MaxCapacity instead. // // The number of Glue data processing units (DPUs) to allocate to this job. You // can allocate a minimum of 2 DPUs; the default is 10. A DPU is a relative measure // of processing power that consists of 4 vCPUs of compute capacity and 16 GB of // memory. For more information, see the [Glue pricing page]. // // [Glue pricing page]: https://aws.amazon.com/glue/pricing/ // // Deprecated: This property is deprecated, use MaxCapacity instead. AllocatedCapacity int32 // The representation of a directed acyclic graph on which both the Glue Studio // visual component and Glue Studio code generation is based. CodeGenConfigurationNodes map[string]CodeGenConfigurationNode // The JobCommand that runs this job (required). Command *JobCommand // The connections used for this job. Connections *ConnectionsList // The default arguments for every run of this job, specified as name-value pairs. // // You can specify arguments here that your own job-execution script consumes, as // well as arguments that Glue itself consumes. // // Job arguments may be logged. Do not pass plaintext secrets as arguments. // Retrieve secrets from a Glue Connection, Secrets Manager or other secret // management mechanism if you intend to keep them within the Job. // // For information about how to specify and consume your own Job arguments, see // the [Calling Glue APIs in Python]topic in the developer guide. // // For information about the arguments you can provide to this field when // configuring Spark jobs, see the [Special Parameters Used by Glue]topic in the developer guide. // // For information about the arguments you can provide to this field when // configuring Ray jobs, see [Using job parameters in Ray jobs]in the developer guide. // // [Using job parameters in Ray jobs]: https://docs.aws.amazon.com/glue/latest/dg/author-job-ray-job-parameters.html // [Calling Glue APIs in Python]: https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html // [Special Parameters Used by Glue]: https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html DefaultArguments map[string]string // Description of the job being defined. Description *string // Indicates whether the job is run with a standard or flexible execution class. // The standard execution-class is ideal for time-sensitive workloads that require // fast job startup and dedicated resources. // // The flexible execution class is appropriate for time-insensitive jobs whose // start and completion times may vary. // // Only jobs with Glue version 3.0 and above and command type glueetl will be // allowed to set ExecutionClass to FLEX . The flexible execution class is // available for Spark jobs. ExecutionClass ExecutionClass // An ExecutionProperty specifying the maximum number of concurrent runs allowed // for this job. ExecutionProperty *ExecutionProperty // In Spark jobs, GlueVersion determines the versions of Apache Spark and Python // that Glue available in a job. The Python version indicates the version supported // for jobs of type Spark. // // Ray jobs should set GlueVersion to 4.0 or greater. However, the versions of // Ray, Python and additional libraries available in your Ray job are determined by // the Runtime parameter of the Job command. // // For more information about the available Glue versions and corresponding Spark // and Python versions, see [Glue version]in the developer guide. // // Jobs that are created without specifying a Glue version default to Glue 0.9. // // [Glue version]: https://docs.aws.amazon.com/glue/latest/dg/add-job.html GlueVersion *string // A mode that describes how a job was created. Valid values are: // // - SCRIPT - The job was created using the Glue Studio script editor. // // - VISUAL - The job was created using the Glue Studio visual editor. // // - NOTEBOOK - The job was created using an interactive sessions notebook. // // When the JobMode field is missing or null, SCRIPT is assigned as the default // value. JobMode JobMode // Specifies whether job run queuing is enabled for the job runs for this job. // // A value of true means job run queuing is enabled for the job runs. If false or // not populated, the job runs will not be considered for queueing. // // If this field does not match the value set in the job run, then the value from // the job run field will be used. JobRunQueuingEnabled *bool // This field is reserved for future use. LogUri *string // This field specifies a day of the week and hour for a maintenance window for // streaming jobs. Glue periodically performs maintenance activities. During these // maintenance windows, Glue will need to restart your streaming jobs. // // Glue will restart the job within 3 hours of the specified maintenance window. // For instance, if you set up the maintenance window for Monday at 10:00AM GMT, // your jobs will be restarted between 10:00AM GMT to 1:00PM GMT. MaintenanceWindow *string // For Glue version 1.0 or earlier jobs, using the standard worker type, the // number of Glue data processing units (DPUs) that can be allocated when this job // runs. A DPU is a relative measure of processing power that consists of 4 vCPUs // of compute capacity and 16 GB of memory. For more information, see the [Glue pricing page]. // // For Glue version 2.0+ jobs, you cannot specify a Maximum capacity . Instead, you // should specify a Worker type and the Number of workers . // // Do not set MaxCapacity if using WorkerType and NumberOfWorkers . // // The value that can be allocated for MaxCapacity depends on whether you are // running a Python shell job, an Apache Spark ETL job, or an Apache Spark // streaming ETL job: // // - When you specify a Python shell job ( JobCommand.Name ="pythonshell"), you // can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. // // - When you specify an Apache Spark ETL job ( JobCommand.Name ="glueetl") or // Apache Spark streaming ETL job ( JobCommand.Name ="gluestreaming"), you can // allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a // fractional DPU allocation. // // [Glue pricing page]: https://aws.amazon.com/glue/pricing/ MaxCapacity *float64 // The maximum number of times to retry this job if it fails. MaxRetries int32 // Arguments for this job that are not overridden when providing job arguments in // a job run, specified as name-value pairs. NonOverridableArguments map[string]string // Specifies the configuration properties of a job notification. NotificationProperty *NotificationProperty // The number of workers of a defined workerType that are allocated when a job // runs. NumberOfWorkers *int32 // The name or Amazon Resource Name (ARN) of the IAM role associated with this job // (required). Role *string // The name of the SecurityConfiguration structure to be used with this job. SecurityConfiguration *string // The details for a source control configuration for a job, allowing // synchronization of job artifacts to or from a remote repository. SourceControlDetails *SourceControlDetails // The job timeout in minutes. This is the maximum time that a job run can consume // resources before it is terminated and enters TIMEOUT status. // // Jobs must have timeout values less than 7 days or 10080 minutes. Otherwise, the // jobs will throw an exception. // // When the value is left blank, the timeout is defaulted to 2880 minutes. // // Any existing Glue jobs that had a timeout value greater than 7 days will be // defaulted to 7 days. For instance if you have specified a timeout of 20 days for // a batch job, it will be stopped on the 7th day. // // For streaming jobs, if you have set up a maintenance window, it will be // restarted during the maintenance window after 7 days. Timeout *int32 // The type of predefined worker that is allocated when a job runs. Accepts a // value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X // for Ray jobs. // // - For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of // memory) with 94GB disk, and provides 1 executor per worker. We recommend this // worker type for workloads such as data transforms, joins, and queries, to offers // a scalable and cost effective way to run most jobs. // // - For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of // memory) with 138GB disk, and provides 1 executor per worker. We recommend this // worker type for workloads such as data transforms, joins, and queries, to offers // a scalable and cost effective way to run most jobs. // // - For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of // memory) with 256GB disk, and provides 1 executor per worker. We recommend this // worker type for jobs whose workloads contain your most demanding transforms, // aggregations, joins, and queries. This worker type is available only for Glue // version 3.0 or later Spark ETL jobs in the following Amazon Web Services // Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific // (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), // Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). // // - For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of // memory) with 512GB disk, and provides 1 executor per worker. We recommend this // worker type for jobs whose workloads contain your most demanding transforms, // aggregations, joins, and queries. This worker type is available only for Glue // version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as // supported for the G.4X worker type. // // - For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of // memory) with 84GB disk, and provides 1 executor per worker. We recommend this // worker type for low volume streaming jobs. This worker type is only available // for Glue version 3.0 or later streaming jobs. // // - For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of // memory) with 128 GB disk, and provides up to 8 Ray workers based on the // autoscaler. WorkerType WorkerType // contains filtered or unexported fields }
Specifies information used to update an existing job definition. The previous job definition is completely overwritten by this information.
type Join ¶ added in v1.25.0
type Join struct { // A list of the two columns to be joined. // // This member is required. Columns []JoinColumn // The data inputs identified by their node names. // // This member is required. Inputs []string // Specifies the type of join to be performed on the datasets. // // This member is required. JoinType JoinType // The name of the transform node. // // This member is required. Name *string // contains filtered or unexported fields }
Specifies a transform that joins two datasets into one dataset using a comparison phrase on the specified data property keys. You can use inner, outer, left, right, left semi, and left anti joins.
type JoinColumn ¶ added in v1.25.0
type JoinColumn struct { // The column to be joined. // // This member is required. From *string // The key of the column to be joined. // // This member is required. Keys [][]string // contains filtered or unexported fields }
Specifies a column to be joined.
type JoinType ¶ added in v1.25.0
type JoinType string
type JsonClassifier ¶
type JsonClassifier struct { // A JsonPath string defining the JSON data for the classifier to classify. Glue // supports a subset of JsonPath, as described in [Writing JsonPath Custom Classifiers]. // // [Writing JsonPath Custom Classifiers]: https://docs.aws.amazon.com/glue/latest/dg/custom-classifier.html#custom-classifier-json // // This member is required. JsonPath *string // The name of the classifier. // // This member is required. Name *string // The time that this classifier was registered. CreationTime *time.Time // The time that this classifier was last updated. LastUpdated *time.Time // The version of this classifier. Version int64 // contains filtered or unexported fields }
A classifier for JSON content.
type KMSKeyNotAccessibleFault ¶ added in v1.103.0
type KMSKeyNotAccessibleFault struct { Message *string ErrorCodeOverride *string // contains filtered or unexported fields }
The KMS key specified is not accessible.
func (*KMSKeyNotAccessibleFault) Error ¶ added in v1.103.0
func (e *KMSKeyNotAccessibleFault) Error() string
func (*KMSKeyNotAccessibleFault) ErrorCode ¶ added in v1.103.0
func (e *KMSKeyNotAccessibleFault) ErrorCode() string
func (*KMSKeyNotAccessibleFault) ErrorFault ¶ added in v1.103.0
func (e *KMSKeyNotAccessibleFault) ErrorFault() smithy.ErrorFault
func (*KMSKeyNotAccessibleFault) ErrorMessage ¶ added in v1.103.0
func (e *KMSKeyNotAccessibleFault) ErrorMessage() string
type KafkaStreamingSourceOptions ¶ added in v1.25.0
type KafkaStreamingSourceOptions struct { // When this option is set to 'true', the data output will contain an additional // column named "__src_timestamp" that indicates the time when the corresponding // record received by the topic. The default value is 'false'. This option is // supported in Glue version 4.0 or later. AddRecordTimestamp *string // The specific TopicPartitions to consume. You must specify at least one of // "topicName" , "assign" or "subscribePattern" . Assign *string // A list of bootstrap server URLs, for example, as // b-1.vpc-test-2.o4q88o.c6.kafka.us-east-1.amazonaws.com:9094 . This option must // be specified in the API call or defined in the table metadata in the Data // Catalog. BootstrapServers *string // An optional classification. Classification *string // The name of the connection. ConnectionName *string // Specifies the delimiter character. Delimiter *string // When this option is set to 'true', for each batch, it will emit the metrics for // the duration between the oldest record received by the topic and the time it // arrives in Glue to CloudWatch. The metric's name is // "glue.driver.streaming.maxConsumerLagInMs". The default value is 'false'. This // option is supported in Glue version 4.0 or later. EmitConsumerLagMetrics *string // The end point when a batch query is ended. Possible values are either "latest" // or a JSON string that specifies an ending offset for each TopicPartition . EndingOffsets *string // Whether to include the Kafka headers. When the option is set to "true", the // data output will contain an additional column named // "glue_streaming_kafka_headers" with type Array[Struct(key: String, value: // String)] . The default value is "false". This option is available in Glue // version 3.0 or later only. IncludeHeaders *bool // The rate limit on the maximum number of offsets that are processed per trigger // interval. The specified total number of offsets is proportionally split across // topicPartitions of different volumes. The default value is null, which means // that the consumer reads all offsets until the known latest offset. MaxOffsetsPerTrigger *int64 // The desired minimum number of partitions to read from Kafka. The default value // is null, which means that the number of spark partitions is equal to the number // of Kafka partitions. MinPartitions *int32 // The number of times to retry before failing to fetch Kafka offsets. The default // value is 3 . NumRetries *int32 // The timeout in milliseconds to poll data from Kafka in Spark job executors. The // default value is 512 . PollTimeoutMs *int64 // The time in milliseconds to wait before retrying to fetch Kafka offsets. The // default value is 10 . RetryIntervalMs *int64 // The protocol used to communicate with brokers. The possible values are "SSL" or // "PLAINTEXT" . SecurityProtocol *string // The starting position in the Kafka topic to read data from. The possible values // are "earliest" or "latest" . The default value is "latest" . StartingOffsets *string // The timestamp of the record in the Kafka topic to start reading data from. The // possible values are a timestamp string in UTC format of the pattern // yyyy-mm-ddTHH:MM:SSZ (where Z represents a UTC timezone offset with a +/-. For // example: "2023-04-04T08:00:00+08:00"). // // Only one of StartingTimestamp or StartingOffsets must be set. StartingTimestamp *time.Time // A Java regex string that identifies the topic list to subscribe to. You must // specify at least one of "topicName" , "assign" or "subscribePattern" . SubscribePattern *string // The topic name as specified in Apache Kafka. You must specify at least one of // "topicName" , "assign" or "subscribePattern" . TopicName *string // contains filtered or unexported fields }
Additional options for streaming.
type KeySchemaElement ¶ added in v0.29.0
type KeySchemaElement struct { // The name of a partition key. // // This member is required. Name *string // The type of a partition key. // // This member is required. Type *string // contains filtered or unexported fields }
A partition key pair consisting of a name and a type.
type KinesisStreamingSourceOptions ¶ added in v1.25.0
type KinesisStreamingSourceOptions struct { // Adds a time delay between two consecutive getRecords operations. The default // value is "False" . This option is only configurable for Glue version 2.0 and // above. AddIdleTimeBetweenReads *bool // When this option is set to 'true', the data output will contain an additional // column named "__src_timestamp" that indicates the time when the corresponding // record received by the stream. The default value is 'false'. This option is // supported in Glue version 4.0 or later. AddRecordTimestamp *string // Avoids creating an empty microbatch job by checking for unread data in the // Kinesis data stream before the batch is started. The default value is "False" . AvoidEmptyBatches *bool // An optional classification. Classification *string // Specifies the delimiter character. Delimiter *string // The minimum time interval between two ListShards API calls for your script to // consider resharding. The default value is 1s . DescribeShardInterval *int64 // When this option is set to 'true', for each batch, it will emit the metrics for // the duration between the oldest record received by the stream and the time it // arrives in Glue to CloudWatch. The metric's name is // "glue.driver.streaming.maxConsumerLagInMs". The default value is 'false'. This // option is supported in Glue version 4.0 or later. EmitConsumerLagMetrics *string // The URL of the Kinesis endpoint. EndpointUrl *string // The minimum time delay between two consecutive getRecords operations, specified // in ms. The default value is 1000 . This option is only configurable for Glue // version 2.0 and above. IdleTimeBetweenReadsInMs *int64 // The maximum number of records to fetch per shard in the Kinesis data stream per // microbatch. Note: The client can exceed this limit if the streaming job has // already read extra records from Kinesis (in the same get-records call). If // MaxFetchRecordsPerShard needs to be strict then it needs to be a multiple of // MaxRecordPerRead . The default value is 100000 . MaxFetchRecordsPerShard *int64 // The maximum time spent for the job executor to read records for the current // batch from the Kinesis data stream, specified in milliseconds (ms). Multiple // GetRecords API calls may be made within this time. The default value is 1000 . MaxFetchTimeInMs *int64 // The maximum number of records to fetch from the Kinesis data stream in each // getRecords operation. The default value is 10000 . MaxRecordPerRead *int64 // The maximum cool-off time period (specified in ms) between two retries of a // Kinesis Data Streams API call. The default value is 10000 . MaxRetryIntervalMs *int64 // The maximum number of retries for Kinesis Data Streams API requests. The // default value is 3 . NumRetries *int32 // The cool-off time period (specified in ms) before retrying the Kinesis Data // Streams API call. The default value is 1000 . RetryIntervalMs *int64 // The Amazon Resource Name (ARN) of the role to assume using AWS Security Token // Service (AWS STS). This role must have permissions for describe or read record // operations for the Kinesis data stream. You must use this parameter when // accessing a data stream in a different account. Used in conjunction with // "awsSTSSessionName" . RoleArn *string // An identifier for the session assuming the role using AWS STS. You must use // this parameter when accessing a data stream in a different account. Used in // conjunction with "awsSTSRoleARN" . RoleSessionName *string // The starting position in the Kinesis data stream to read data from. The // possible values are "latest" , "trim_horizon" , "earliest" , or a timestamp // string in UTC format in the pattern yyyy-mm-ddTHH:MM:SSZ (where Z represents a // UTC timezone offset with a +/-. For example: "2023-04-04T08:00:00-04:00"). The // default value is "latest" . // // Note: Using a value that is a timestamp string in UTC format for // "startingPosition" is supported only for Glue version 4.0 or later. StartingPosition StartingPosition // The timestamp of the record in the Kinesis data stream to start reading data // from. The possible values are a timestamp string in UTC format of the pattern // yyyy-mm-ddTHH:MM:SSZ (where Z represents a UTC timezone offset with a +/-. For // example: "2023-04-04T08:00:00+08:00"). StartingTimestamp *time.Time // The Amazon Resource Name (ARN) of the Kinesis data stream. StreamArn *string // The name of the Kinesis data stream. StreamName *string // contains filtered or unexported fields }
Additional options for the Amazon Kinesis streaming data source.
type LabelingSetGenerationTaskRunProperties ¶
type LabelingSetGenerationTaskRunProperties struct { // The Amazon Simple Storage Service (Amazon S3) path where you will generate the // labeling set. OutputS3Path *string // contains filtered or unexported fields }
Specifies configuration properties for a labeling set generation task run.
type LakeFormationConfiguration ¶ added in v1.18.0
type LakeFormationConfiguration struct { // Required for cross account crawls. For same account crawls as the target data, // this can be left as null. AccountId *string // Specifies whether to use Lake Formation credentials for the crawler instead of // the IAM role credentials. UseLakeFormationCredentials *bool // contains filtered or unexported fields }
Specifies Lake Formation configuration settings for the crawler.
type LastActiveDefinition ¶ added in v1.11.0
type LastActiveDefinition struct { // Specifies a path in Amazon S3 where the blueprint is published by the Glue // developer. BlueprintLocation *string // Specifies a path in Amazon S3 where the blueprint is copied when you create or // update the blueprint. BlueprintServiceLocation *string // The description of the blueprint. Description *string // The date and time the blueprint was last modified. LastModifiedOn *time.Time // A JSON string specifying the parameters for the blueprint. ParameterSpec *string // contains filtered or unexported fields }
When there are multiple versions of a blueprint and the latest version has some errors, this attribute indicates the last successful blueprint definition that is available with the service.
type LastCrawlInfo ¶
type LastCrawlInfo struct { // If an error occurred, the error information about the last crawl. ErrorMessage *string // The log group for the last crawl. LogGroup *string // The log stream for the last crawl. LogStream *string // The prefix for a message about this crawl. MessagePrefix *string // The time at which the crawl started. StartTime *time.Time // Status of the last crawl. Status LastCrawlStatus // contains filtered or unexported fields }
Status and error information about the most recent crawl.
type LastCrawlStatus ¶
type LastCrawlStatus string
const ( LastCrawlStatusSucceeded LastCrawlStatus = "SUCCEEDED" LastCrawlStatusCancelled LastCrawlStatus = "CANCELLED" LastCrawlStatusFailed LastCrawlStatus = "FAILED" )
Enum values for LastCrawlStatus
func (LastCrawlStatus) Values ¶ added in v0.29.0
func (LastCrawlStatus) Values() []LastCrawlStatus
Values returns all known values for LastCrawlStatus. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type LineageConfiguration ¶ added in v0.31.0
type LineageConfiguration struct { // Specifies whether data lineage is enabled for the crawler. Valid values are: // // - ENABLE: enables data lineage for the crawler // // - DISABLE: disables data lineage for the crawler CrawlerLineageSettings CrawlerLineageSettings // contains filtered or unexported fields }
Specifies data lineage configuration settings for the crawler.
type Location ¶
type Location struct { // An Amazon DynamoDB table location. DynamoDB []CodeGenNodeArg // A JDBC location. Jdbc []CodeGenNodeArg // An Amazon Simple Storage Service (Amazon S3) location. S3 []CodeGenNodeArg // contains filtered or unexported fields }
The location of resources.
type LogicalOperator ¶
type LogicalOperator string
const (
LogicalOperatorEquals LogicalOperator = "EQUALS"
)
Enum values for LogicalOperator
func (LogicalOperator) Values ¶ added in v0.29.0
func (LogicalOperator) Values() []LogicalOperator
Values returns all known values for LogicalOperator. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type LongColumnStatisticsData ¶
type LongColumnStatisticsData struct { // The number of distinct values in a column. // // This member is required. NumberOfDistinctValues int64 // The number of null values in the column. // // This member is required. NumberOfNulls int64 // The highest value in the column. MaximumValue int64 // The lowest value in the column. MinimumValue int64 // contains filtered or unexported fields }
Defines column statistics supported for integer data columns.
type MLTransform ¶
type MLTransform struct { // A timestamp. The time and date that this machine learning transform was created. CreatedOn *time.Time // A user-defined, long-form description text for the machine learning transform. // Descriptions are not guaranteed to be unique and can be changed at any time. Description *string // An EvaluationMetrics object. Evaluation metrics provide an estimate of the // quality of your machine learning transform. EvaluationMetrics *EvaluationMetrics // This value determines which version of Glue this machine learning transform is // compatible with. Glue 1.0 is recommended for most customers. If the value is not // set, the Glue compatibility defaults to Glue 0.9. For more information, see [Glue Versions]in // the developer guide. // // [Glue Versions]: https://docs.aws.amazon.com/glue/latest/dg/release-notes.html#release-notes-versions GlueVersion *string // A list of Glue table definitions used by the transform. InputRecordTables []GlueTable // A count identifier for the labeling files generated by Glue for this transform. // As you create a better transform, you can iteratively download, label, and // upload the labeling file. LabelCount int32 // A timestamp. The last point in time when this machine learning transform was // modified. LastModifiedOn *time.Time // The number of Glue data processing units (DPUs) that are allocated to task runs // for this transform. You can allocate from 2 to 100 DPUs; the default is 10. A // DPU is a relative measure of processing power that consists of 4 vCPUs of // compute capacity and 16 GB of memory. For more information, see the [Glue pricing page]. // // MaxCapacity is a mutually exclusive option with NumberOfWorkers and WorkerType . // // - If either NumberOfWorkers or WorkerType is set, then MaxCapacity cannot be // set. // // - If MaxCapacity is set then neither NumberOfWorkers or WorkerType can be set. // // - If WorkerType is set, then NumberOfWorkers is required (and vice versa). // // - MaxCapacity and NumberOfWorkers must both be at least 1. // // When the WorkerType field is set to a value other than Standard , the // MaxCapacity field is set automatically and becomes read-only. // // [Glue pricing page]: http://aws.amazon.com/glue/pricing/ MaxCapacity *float64 // The maximum number of times to retry after an MLTaskRun of the machine learning // transform fails. MaxRetries *int32 // A user-defined name for the machine learning transform. Names are not // guaranteed unique and can be changed at any time. Name *string // The number of workers of a defined workerType that are allocated when a task of // the transform runs. // // If WorkerType is set, then NumberOfWorkers is required (and vice versa). NumberOfWorkers *int32 // A TransformParameters object. You can use parameters to tune (customize) the // behavior of the machine learning transform by specifying what data it learns // from and your preference on various tradeoffs (such as precious vs. recall, or // accuracy vs. cost). Parameters *TransformParameters // The name or Amazon Resource Name (ARN) of the IAM role with the required // permissions. The required permissions include both Glue service role permissions // to Glue resources, and Amazon S3 permissions required by the transform. // // - This role needs Glue service role permissions to allow access to resources // in Glue. See [Attach a Policy to IAM Users That Access Glue]. // // - This role needs permission to your Amazon Simple Storage Service (Amazon // S3) sources, targets, temporary directory, scripts, and any libraries used by // the task run for this transform. // // [Attach a Policy to IAM Users That Access Glue]: https://docs.aws.amazon.com/glue/latest/dg/attach-policy-iam-user.html Role *string // A map of key-value pairs representing the columns and data types that this // transform can run against. Has an upper bound of 100 columns. Schema []SchemaColumn // The current status of the machine learning transform. Status TransformStatusType // The timeout in minutes of the machine learning transform. Timeout *int32 // The encryption-at-rest settings of the transform that apply to accessing user // data. Machine learning transforms can access user data encrypted in Amazon S3 // using KMS. TransformEncryption *TransformEncryption // The unique transform ID that is generated for the machine learning transform. // The ID is guaranteed to be unique and does not change. TransformId *string // The type of predefined worker that is allocated when a task of this transform // runs. Accepts a value of Standard, G.1X, or G.2X. // // - For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory // and a 50GB disk, and 2 executors per worker. // // - For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a // 64GB disk, and 1 executor per worker. // // - For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a // 128GB disk, and 1 executor per worker. // // MaxCapacity is a mutually exclusive option with NumberOfWorkers and WorkerType . // // - If either NumberOfWorkers or WorkerType is set, then MaxCapacity cannot be // set. // // - If MaxCapacity is set then neither NumberOfWorkers or WorkerType can be set. // // - If WorkerType is set, then NumberOfWorkers is required (and vice versa). // // - MaxCapacity and NumberOfWorkers must both be at least 1. WorkerType WorkerType // contains filtered or unexported fields }
A structure for a machine learning transform.
type MLTransformNotReadyException ¶
type MLTransformNotReadyException struct { Message *string ErrorCodeOverride *string // contains filtered or unexported fields }
The machine learning transform is not ready to run.
func (*MLTransformNotReadyException) Error ¶
func (e *MLTransformNotReadyException) Error() string
func (*MLTransformNotReadyException) ErrorCode ¶
func (e *MLTransformNotReadyException) ErrorCode() string
func (*MLTransformNotReadyException) ErrorFault ¶
func (e *MLTransformNotReadyException) ErrorFault() smithy.ErrorFault
func (*MLTransformNotReadyException) ErrorMessage ¶
func (e *MLTransformNotReadyException) ErrorMessage() string
type MLUserDataEncryption ¶ added in v0.29.0
type MLUserDataEncryption struct { // The encryption mode applied to user data. Valid values are: // // - DISABLED: encryption is disabled // // - SSEKMS: use of server-side encryption with Key Management Service (SSE-KMS) // for user data stored in Amazon S3. // // This member is required. MlUserDataEncryptionMode MLUserDataEncryptionModeString // The ID for the customer-provided KMS key. KmsKeyId *string // contains filtered or unexported fields }
The encryption-at-rest settings of the transform that apply to accessing user data.
type MLUserDataEncryptionModeString ¶ added in v0.29.0
type MLUserDataEncryptionModeString string
const ( MLUserDataEncryptionModeStringDisabled MLUserDataEncryptionModeString = "DISABLED" MLUserDataEncryptionModeStringSsekms MLUserDataEncryptionModeString = "SSE-KMS" )
Enum values for MLUserDataEncryptionModeString
func (MLUserDataEncryptionModeString) Values ¶ added in v0.29.0
func (MLUserDataEncryptionModeString) Values() []MLUserDataEncryptionModeString
Values returns all known values for MLUserDataEncryptionModeString. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type Mapping ¶ added in v1.25.0
type Mapping struct { // Only applicable to nested data structures. If you want to change the parent // structure, but also one of its children, you can fill out this data strucutre. // It is also Mapping , but its FromPath will be the parent's FromPath plus the // FromPath from this structure. // // For the children part, suppose you have the structure: // // { "FromPath": "OuterStructure", "ToKey": "OuterStructure", "ToType": "Struct", // "Dropped": false, "Chidlren": [{ "FromPath": "inner", "ToKey": "inner", // "ToType": "Double", "Dropped": false, }] } // // You can specify a Mapping that looks like: // // { "FromPath": "OuterStructure", "ToKey": "OuterStructure", "ToType": "Struct", // "Dropped": false, "Chidlren": [{ "FromPath": "inner", "ToKey": "inner", // "ToType": "Double", "Dropped": false, }] } Children []Mapping // If true, then the column is removed. Dropped *bool // The table or column to be modified. FromPath []string // The type of the data to be modified. FromType *string // After the apply mapping, what the name of the column should be. Can be the same // as FromPath . ToKey *string // The data type that the data is to be modified to. ToType *string // contains filtered or unexported fields }
Specifies the mapping of data property keys.
type MappingEntry ¶
type MappingEntry struct { // The source path. SourcePath *string // The name of the source table. SourceTable *string // The source type. SourceType *string // The target path. TargetPath *string // The target table. TargetTable *string // The target type. TargetType *string // contains filtered or unexported fields }
Defines a mapping.
type Merge ¶ added in v1.25.0
type Merge struct { // The data inputs identified by their node names. // // This member is required. Inputs []string // The name of the transform node. // // This member is required. Name *string // The list of primary key fields to match records from the source and staging // dynamic frames. // // This member is required. PrimaryKeys [][]string // The source DynamicFrame that will be merged with a staging DynamicFrame . // // This member is required. Source *string // contains filtered or unexported fields }
Specifies a transform that merges a DynamicFrame with a staging DynamicFrame based on the specified primary keys to identify records. Duplicate records (records with the same primary keys) are not de-duplicated.
type MetadataInfo ¶ added in v0.30.0
type MetadataInfo struct { // The time at which the entry was created. CreatedTime *string // The metadata key’s corresponding value. MetadataValue *string // Other metadata belonging to the same metadata key. OtherMetadataValueList []OtherMetadataValueListItem // contains filtered or unexported fields }
A structure containing metadata information for a schema version.
type MetadataKeyValuePair ¶ added in v0.30.0
type MetadataKeyValuePair struct { // A metadata key. MetadataKey *string // A metadata key’s corresponding value. MetadataValue *string // contains filtered or unexported fields }
A structure containing a key value pair for metadata.
type MetadataOperation ¶ added in v1.54.0
type MetadataOperation string
const (
MetadataOperationCreate MetadataOperation = "CREATE"
)
Enum values for MetadataOperation
func (MetadataOperation) Values ¶ added in v1.54.0
func (MetadataOperation) Values() []MetadataOperation
Values returns all known values for MetadataOperation. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type MetricBasedObservation ¶ added in v1.72.0
type MetricBasedObservation struct { // The name of the data quality metric used for generating the observation. MetricName *string // An object of type DataQualityMetricValues representing the analysis of the data // quality metric value. MetricValues *DataQualityMetricValues // A list of new data quality rules generated as part of the observation based on // the data quality metric value. NewRules []string // The Statistic ID. StatisticId *string // contains filtered or unexported fields }
Describes the metric based observation generated based on evaluated data quality metrics.
type MicrosoftSQLServerCatalogSource ¶ added in v1.25.0
type MicrosoftSQLServerCatalogSource struct { // The name of the database to read from. // // This member is required. Database *string // The name of the data source. // // This member is required. Name *string // The name of the table in the database to read from. // // This member is required. Table *string // contains filtered or unexported fields }
Specifies a Microsoft SQL server data source in the Glue Data Catalog.
type MicrosoftSQLServerCatalogTarget ¶ added in v1.25.0
type MicrosoftSQLServerCatalogTarget struct { // The name of the database to write to. // // This member is required. Database *string // The nodes that are inputs to the data target. // // This member is required. Inputs []string // The name of the data target. // // This member is required. Name *string // The name of the table in the database to write to. // // This member is required. Table *string // contains filtered or unexported fields }
Specifies a target that uses Microsoft SQL.
type MongoDBTarget ¶ added in v0.29.0
type MongoDBTarget struct { // The name of the connection to use to connect to the Amazon DocumentDB or // MongoDB target. ConnectionName *string // The path of the Amazon DocumentDB or MongoDB target (database/collection). Path *string // Indicates whether to scan all the records, or to sample rows from the table. // Scanning all the records can take a long time when the table is not a high // throughput table. // // A value of true means to scan all records, while a value of false means to // sample the records. If no value is specified, the value defaults to true . ScanAll *bool // contains filtered or unexported fields }
Specifies an Amazon DocumentDB or MongoDB data store to crawl.
type MySQLCatalogSource ¶ added in v1.25.0
type MySQLCatalogSource struct { // The name of the database to read from. // // This member is required. Database *string // The name of the data source. // // This member is required. Name *string // The name of the table in the database to read from. // // This member is required. Table *string // contains filtered or unexported fields }
Specifies a MySQL data source in the Glue Data Catalog.
type MySQLCatalogTarget ¶ added in v1.25.0
type MySQLCatalogTarget struct { // The name of the database to write to. // // This member is required. Database *string // The nodes that are inputs to the data target. // // This member is required. Inputs []string // The name of the data target. // // This member is required. Name *string // The name of the table in the database to write to. // // This member is required. Table *string // contains filtered or unexported fields }
Specifies a target that uses MySQL.
type NoScheduleException ¶
type NoScheduleException struct { Message *string ErrorCodeOverride *string // contains filtered or unexported fields }
There is no applicable schedule.
func (*NoScheduleException) Error ¶
func (e *NoScheduleException) Error() string
func (*NoScheduleException) ErrorCode ¶
func (e *NoScheduleException) ErrorCode() string
func (*NoScheduleException) ErrorFault ¶
func (e *NoScheduleException) ErrorFault() smithy.ErrorFault
func (*NoScheduleException) ErrorMessage ¶
func (e *NoScheduleException) ErrorMessage() string
type Node ¶
type Node struct { // Details of the crawler when the node represents a crawler. CrawlerDetails *CrawlerNodeDetails // Details of the Job when the node represents a Job. JobDetails *JobNodeDetails // The name of the Glue component represented by the node. Name *string // Details of the Trigger when the node represents a Trigger. TriggerDetails *TriggerNodeDetails // The type of Glue component represented by the node. Type NodeType // The unique Id assigned to the node within the workflow. UniqueId *string // contains filtered or unexported fields }
A node represents an Glue component (trigger, crawler, or job) on a workflow graph.
type NodeType ¶
type NodeType string
type NotificationProperty ¶
type NotificationProperty struct { // After a job run starts, the number of minutes to wait before sending a job run // delay notification. NotifyDelayAfter *int32 // contains filtered or unexported fields }
Specifies configuration properties of a notification.
type NullCheckBoxList ¶ added in v1.25.0
type NullCheckBoxList struct { // Specifies that an empty string is considered as a null value. IsEmpty *bool // Specifies that an integer value of -1 is considered as a null value. IsNegOne *bool // Specifies that a value spelling out the word 'null' is considered as a null // value. IsNullString *bool // contains filtered or unexported fields }
Represents whether certain values are recognized as null values for removal.
type NullValueField ¶ added in v1.25.0
type NullValueField struct { // The datatype of the value. // // This member is required. Datatype *Datatype // The value of the null placeholder. // // This member is required. Value *string // contains filtered or unexported fields }
Represents a custom null value such as a zeros or other value being used as a null placeholder unique to the dataset.
type OAuth2ClientApplication ¶ added in v1.83.0
type OAuth2ClientApplication struct { // The reference to the SaaS-side client app that is Amazon Web Services managed. AWSManagedClientApplicationReference *string // The client application clientID if the ClientAppType is USER_MANAGED . UserManagedClientApplicationClientId *string // contains filtered or unexported fields }
The OAuth2 client app used for the connection.
type OAuth2Credentials ¶ added in v1.103.0
type OAuth2Credentials struct { // The access token used when the authentication type is OAuth2. AccessToken *string // The JSON Web Token (JWT) used when the authentication type is OAuth2. JwtToken *string // The refresh token used when the authentication type is OAuth2. RefreshToken *string // The client application client secret if the client application is user managed. UserManagedClientApplicationClientSecret *string // contains filtered or unexported fields }
The credentials used when the authentication type is OAuth2 authentication.
type OAuth2GrantType ¶ added in v1.83.0
type OAuth2GrantType string
const ( OAuth2GrantTypeAuthorizationCode OAuth2GrantType = "AUTHORIZATION_CODE" OAuth2GrantTypeClientCredentials OAuth2GrantType = "CLIENT_CREDENTIALS" OAuth2GrantTypeJwtBearer OAuth2GrantType = "JWT_BEARER" )
Enum values for OAuth2GrantType
func (OAuth2GrantType) Values ¶ added in v1.83.0
func (OAuth2GrantType) Values() []OAuth2GrantType
Values returns all known values for OAuth2GrantType. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type OAuth2Properties ¶ added in v1.83.0
type OAuth2Properties struct { // The client application type. For example, AWS_MANAGED or USER_MANAGED. OAuth2ClientApplication *OAuth2ClientApplication // The OAuth2 grant type. For example, AUTHORIZATION_CODE , JWT_BEARER , or // CLIENT_CREDENTIALS . OAuth2GrantType OAuth2GrantType // The URL of the provider's authentication server, to exchange an authorization // code for an access token. TokenUrl *string // A map of parameters that are added to the token GET request. TokenUrlParametersMap map[string]string // contains filtered or unexported fields }
A structure containing properties for OAuth2 authentication.
type OAuth2PropertiesInput ¶ added in v1.83.0
type OAuth2PropertiesInput struct { // The set of properties required for the the OAuth2 AUTHORIZATION_CODE grant type. AuthorizationCodeProperties *AuthorizationCodeProperties // The client application type in the CreateConnection request. For example, // AWS_MANAGED or USER_MANAGED . OAuth2ClientApplication *OAuth2ClientApplication // The credentials used when the authentication type is OAuth2 authentication. OAuth2Credentials *OAuth2Credentials // The OAuth2 grant type in the CreateConnection request. For example, // AUTHORIZATION_CODE , JWT_BEARER , or CLIENT_CREDENTIALS . OAuth2GrantType OAuth2GrantType // The URL of the provider's authentication server, to exchange an authorization // code for an access token. TokenUrl *string // A map of parameters that are added to the token GET request. TokenUrlParametersMap map[string]string // contains filtered or unexported fields }
A structure containing properties for OAuth2 in the CreateConnection request.
type OpenTableFormatInput ¶ added in v1.54.0
type OpenTableFormatInput struct { // Specifies an IcebergInput structure that defines an Apache Iceberg metadata // table. IcebergInput *IcebergInput // contains filtered or unexported fields }
A structure representing an open format table.
type OperationNotSupportedException ¶ added in v1.86.0
type OperationNotSupportedException struct { Message *string ErrorCodeOverride *string // contains filtered or unexported fields }
The operation is not available in the region.
func (*OperationNotSupportedException) Error ¶ added in v1.86.0
func (e *OperationNotSupportedException) Error() string
func (*OperationNotSupportedException) ErrorCode ¶ added in v1.86.0
func (e *OperationNotSupportedException) ErrorCode() string
func (*OperationNotSupportedException) ErrorFault ¶ added in v1.86.0
func (e *OperationNotSupportedException) ErrorFault() smithy.ErrorFault
func (*OperationNotSupportedException) ErrorMessage ¶ added in v1.86.0
func (e *OperationNotSupportedException) ErrorMessage() string
type OperationTimeoutException ¶
type OperationTimeoutException struct { Message *string ErrorCodeOverride *string // contains filtered or unexported fields }
The operation timed out.
func (*OperationTimeoutException) Error ¶
func (e *OperationTimeoutException) Error() string
func (*OperationTimeoutException) ErrorCode ¶
func (e *OperationTimeoutException) ErrorCode() string
func (*OperationTimeoutException) ErrorFault ¶
func (e *OperationTimeoutException) ErrorFault() smithy.ErrorFault
func (*OperationTimeoutException) ErrorMessage ¶
func (e *OperationTimeoutException) ErrorMessage() string
type Option ¶ added in v1.47.0
type Option struct { // Specifies the description of the option. Description *string // Specifies the label of the option. Label *string // Specifies the value of the option. Value *string // contains filtered or unexported fields }
Specifies an option value.
type OracleSQLCatalogSource ¶ added in v1.25.0
type OracleSQLCatalogSource struct { // The name of the database to read from. // // This member is required. Database *string // The name of the data source. // // This member is required. Name *string // The name of the table in the database to read from. // // This member is required. Table *string // contains filtered or unexported fields }
Specifies an Oracle data source in the Glue Data Catalog.
type OracleSQLCatalogTarget ¶ added in v1.25.0
type OracleSQLCatalogTarget struct { // The name of the database to write to. // // This member is required. Database *string // The nodes that are inputs to the data target. // // This member is required. Inputs []string // The name of the data target. // // This member is required. Name *string // The name of the table in the database to write to. // // This member is required. Table *string // contains filtered or unexported fields }
Specifies a target that uses Oracle SQL.
type Order ¶
type Order struct { // The name of the column. // // This member is required. Column *string // Indicates that the column is sorted in ascending order ( == 1 ), or in // descending order ( ==0 ). // // This member is required. SortOrder int32 // contains filtered or unexported fields }
Specifies the sort order of a sorted column.
type OrphanFileDeletionConfiguration ¶ added in v1.96.0
type OrphanFileDeletionConfiguration struct { // The configuration for an Iceberg orphan file deletion optimizer. IcebergConfiguration *IcebergOrphanFileDeletionConfiguration // contains filtered or unexported fields }
The configuration for an orphan file deletion optimizer.
type OrphanFileDeletionMetrics ¶ added in v1.96.0
type OrphanFileDeletionMetrics struct { // A structure containing the Iceberg orphan file deletion metrics for the // optimizer run. IcebergMetrics *IcebergOrphanFileDeletionMetrics // contains filtered or unexported fields }
A structure that contains orphan file deletion metrics for the optimizer run.
type OtherMetadataValueListItem ¶ added in v1.3.0
type OtherMetadataValueListItem struct { // The time at which the entry was created. CreatedTime *string // The metadata key’s corresponding value for the other metadata belonging to the // same metadata key. MetadataValue *string // contains filtered or unexported fields }
A structure containing other metadata for a schema version belonging to the same metadata key.
type PIIDetection ¶ added in v1.25.0
type PIIDetection struct { // Indicates the types of entities the PIIDetection transform will identify as PII // data. // // PII type entities include: PERSON_NAME, DATE, USA_SNN, EMAIL, USA_ITIN, // USA_PASSPORT_NUMBER, PHONE_NUMBER, BANK_ACCOUNT, IP_ADDRESS, MAC_ADDRESS, // USA_CPT_CODE, USA_HCPCS_CODE, USA_NATIONAL_DRUG_CODE, // USA_MEDICARE_BENEFICIARY_IDENTIFIER, // USA_HEALTH_INSURANCE_CLAIM_NUMBER,CREDIT_CARD,USA_NATIONAL_PROVIDER_IDENTIFIER,USA_DEA_NUMBER,USA_DRIVING_LICENSE // // This member is required. EntityTypesToDetect []string // The node ID inputs to the transform. // // This member is required. Inputs []string // The name of the transform node. // // This member is required. Name *string // Indicates the type of PIIDetection transform. // // This member is required. PiiType PiiType // Indicates the value that will replace the detected entity. MaskValue *string // Indicates the output column name that will contain any entity type detected in // that row. OutputColumnName *string // Indicates the fraction of the data to sample when scanning for PII entities. SampleFraction *float64 // Indicates the fraction of the data that must be met in order for a column to be // identified as PII data. ThresholdFraction *float64 // contains filtered or unexported fields }
Specifies a transform that identifies, removes or masks PII data.
type ParamType ¶ added in v1.36.0
type ParamType string
type ParquetCompressionType ¶ added in v1.25.0
type ParquetCompressionType string
const ( ParquetCompressionTypeSnappy ParquetCompressionType = "snappy" ParquetCompressionTypeLzo ParquetCompressionType = "lzo" ParquetCompressionTypeGzip ParquetCompressionType = "gzip" ParquetCompressionTypeUncompressed ParquetCompressionType = "uncompressed" ParquetCompressionTypeNone ParquetCompressionType = "none" )
Enum values for ParquetCompressionType
func (ParquetCompressionType) Values ¶ added in v1.25.0
func (ParquetCompressionType) Values() []ParquetCompressionType
Values returns all known values for ParquetCompressionType. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type Partition ¶
type Partition struct { // The ID of the Data Catalog in which the partition resides. CatalogId *string // The time at which the partition was created. CreationTime *time.Time // The name of the catalog database in which to create the partition. DatabaseName *string // The last time at which the partition was accessed. LastAccessTime *time.Time // The last time at which column statistics were computed for this partition. LastAnalyzedTime *time.Time // These key-value pairs define partition parameters. Parameters map[string]string // Provides information about the physical location where the partition is stored. StorageDescriptor *StorageDescriptor // The name of the database table in which to create the partition. TableName *string // The values of the partition. Values []string // contains filtered or unexported fields }
Represents a slice of table data.
type PartitionError ¶
type PartitionError struct { // The details about the partition error. ErrorDetail *ErrorDetail // The values that define the partition. PartitionValues []string // contains filtered or unexported fields }
Contains information about a partition error.
type PartitionIndex ¶ added in v0.29.0
type PartitionIndex struct { // The name of the partition index. // // This member is required. IndexName *string // The keys for the partition index. // // This member is required. Keys []string // contains filtered or unexported fields }
A structure for a partition index.
type PartitionIndexDescriptor ¶ added in v0.29.0
type PartitionIndexDescriptor struct { // The name of the partition index. // // This member is required. IndexName *string // The status of the partition index. // // The possible statuses are: // // - CREATING: The index is being created. When an index is in a CREATING state, // the index or its table cannot be deleted. // // - ACTIVE: The index creation succeeds. // // - FAILED: The index creation fails. // // - DELETING: The index is deleted from the list of indexes. // // This member is required. IndexStatus PartitionIndexStatus // A list of one or more keys, as KeySchemaElement structures, for the partition // index. // // This member is required. Keys []KeySchemaElement // A list of errors that can occur when registering partition indexes for an // existing table. BackfillErrors []BackfillError // contains filtered or unexported fields }
A descriptor for a partition index in a table.
type PartitionIndexStatus ¶ added in v0.29.0
type PartitionIndexStatus string
const ( PartitionIndexStatusCreating PartitionIndexStatus = "CREATING" PartitionIndexStatusActive PartitionIndexStatus = "ACTIVE" PartitionIndexStatusDeleting PartitionIndexStatus = "DELETING" PartitionIndexStatusFailed PartitionIndexStatus = "FAILED" )
Enum values for PartitionIndexStatus
func (PartitionIndexStatus) Values ¶ added in v0.29.0
func (PartitionIndexStatus) Values() []PartitionIndexStatus
Values returns all known values for PartitionIndexStatus. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type PartitionInput ¶
type PartitionInput struct { // The last time at which the partition was accessed. LastAccessTime *time.Time // The last time at which column statistics were computed for this partition. LastAnalyzedTime *time.Time // These key-value pairs define partition parameters. Parameters map[string]string // Provides information about the physical location where the partition is stored. StorageDescriptor *StorageDescriptor // The values of the partition. Although this parameter is not required by the // SDK, you must specify this parameter for a valid input. // // The values for the keys for the new partition must be passed as an array of // String objects that must be ordered in the same order as the partition keys // appearing in the Amazon S3 prefix. Otherwise Glue will add the values to the // wrong keys. Values []string // contains filtered or unexported fields }
The structure used to create and update a partition.
type PartitionValueList ¶
type PartitionValueList struct { // The list of values. // // This member is required. Values []string // contains filtered or unexported fields }
Contains a list of values defining partitions.
type Permission ¶
type Permission string
const ( PermissionAll Permission = "ALL" PermissionSelect Permission = "SELECT" PermissionAlter Permission = "ALTER" PermissionDrop Permission = "DROP" PermissionDelete Permission = "DELETE" PermissionInsert Permission = "INSERT" PermissionCreateDatabase Permission = "CREATE_DATABASE" PermissionCreateTable Permission = "CREATE_TABLE" PermissionDataLocationAccess Permission = "DATA_LOCATION_ACCESS" )
Enum values for Permission
func (Permission) Values ¶ added in v0.29.0
func (Permission) Values() []Permission
Values returns all known values for Permission. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type PermissionType ¶ added in v1.18.0
type PermissionType string
const ( PermissionTypeColumnPermission PermissionType = "COLUMN_PERMISSION" PermissionTypeCellFilterPermission PermissionType = "CELL_FILTER_PERMISSION" PermissionTypeNestedPermission PermissionType = "NESTED_PERMISSION" PermissionTypeNestedCellPermission PermissionType = "NESTED_CELL_PERMISSION" )
Enum values for PermissionType
func (PermissionType) Values ¶ added in v1.18.0
func (PermissionType) Values() []PermissionType
Values returns all known values for PermissionType. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type PermissionTypeMismatchException ¶ added in v1.18.0
type PermissionTypeMismatchException struct { Message *string ErrorCodeOverride *string // contains filtered or unexported fields }
The operation timed out.
func (*PermissionTypeMismatchException) Error ¶ added in v1.18.0
func (e *PermissionTypeMismatchException) Error() string
func (*PermissionTypeMismatchException) ErrorCode ¶ added in v1.18.0
func (e *PermissionTypeMismatchException) ErrorCode() string
func (*PermissionTypeMismatchException) ErrorFault ¶ added in v1.18.0
func (e *PermissionTypeMismatchException) ErrorFault() smithy.ErrorFault
func (*PermissionTypeMismatchException) ErrorMessage ¶ added in v1.18.0
func (e *PermissionTypeMismatchException) ErrorMessage() string
type PhysicalConnectionRequirements ¶
type PhysicalConnectionRequirements struct { // The connection's Availability Zone. AvailabilityZone *string // The security group ID list used by the connection. SecurityGroupIdList []string // The subnet ID used by the connection. SubnetId *string // contains filtered or unexported fields }
The OAuth client app in GetConnection response.
type PiiType ¶ added in v1.25.0
type PiiType string
type PostgreSQLCatalogSource ¶ added in v1.25.0
type PostgreSQLCatalogSource struct { // The name of the database to read from. // // This member is required. Database *string // The name of the data source. // // This member is required. Name *string // The name of the table in the database to read from. // // This member is required. Table *string // contains filtered or unexported fields }
Specifies a PostgresSQL data source in the Glue Data Catalog.
type PostgreSQLCatalogTarget ¶ added in v1.25.0
type PostgreSQLCatalogTarget struct { // The name of the database to write to. // // This member is required. Database *string // The nodes that are inputs to the data target. // // This member is required. Inputs []string // The name of the data target. // // This member is required. Name *string // The name of the table in the database to write to. // // This member is required. Table *string // contains filtered or unexported fields }
Specifies a target that uses Postgres SQL.
type Predecessor ¶
type Predecessor struct { // The name of the job definition used by the predecessor job run. JobName *string // The job-run ID of the predecessor job run. RunId *string // contains filtered or unexported fields }
A job run that was used in the predicate of a conditional trigger that triggered this job run.
type Predicate ¶
type Predicate struct { // A list of the conditions that determine when the trigger will fire. Conditions []Condition // An optional field if only one condition is listed. If multiple conditions are // listed, then this field is required. Logical Logical // contains filtered or unexported fields }
Defines the predicate of the trigger, which determines when it fires.
type PrincipalPermissions ¶
type PrincipalPermissions struct { // The permissions that are granted to the principal. Permissions []Permission // The principal who is granted permissions. Principal *DataLakePrincipal // contains filtered or unexported fields }
Permissions granted to a principal.
type PrincipalType ¶
type PrincipalType string
const ( PrincipalTypeUser PrincipalType = "USER" PrincipalTypeRole PrincipalType = "ROLE" PrincipalTypeGroup PrincipalType = "GROUP" )
Enum values for PrincipalType
func (PrincipalType) Values ¶ added in v0.29.0
func (PrincipalType) Values() []PrincipalType
Values returns all known values for PrincipalType. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type ProfileConfiguration ¶ added in v1.86.0
type ProfileConfiguration struct { // A key-value map of configuration parameters for Glue jobs. JobConfiguration map[string]ConfigurationObject // A key-value map of configuration parameters for Glue sessions. SessionConfiguration map[string]ConfigurationObject // contains filtered or unexported fields }
Specifies the job and session values that an admin configures in an Glue usage profile.
type Property ¶ added in v1.103.0
type Property struct { // A description of the property. // // This member is required. Description *string // The name of the property. // // This member is required. Name *string // Describes the type of property. // // This member is required. PropertyTypes []PropertyType // Indicates whether the property is required. // // This member is required. Required *bool // A list of AllowedValue objects representing the values allowed for the property. AllowedValues []AllowedValue // Indicates which data operations are applicable to the property. DataOperationScopes []DataOperation // The default value for the property. DefaultValue *string // contains filtered or unexported fields }
An object that defines a connection type for a compute environment.
type PropertyPredicate ¶
type PropertyPredicate struct { // The comparator used to compare this property to others. Comparator Comparator // The key of the property. Key *string // The value of the property. Value *string // contains filtered or unexported fields }
Defines a property predicate.
type PropertyType ¶ added in v1.103.0
type PropertyType string
const ( PropertyTypeUserInput PropertyType = "USER_INPUT" PropertyTypeSecret PropertyType = "SECRET" PropertyTypeReadOnly PropertyType = "READ_ONLY" PropertyTypeUnused PropertyType = "UNUSED" PropertyTypeSecretOrUserInput PropertyType = "SECRET_OR_USER_INPUT" )
Enum values for PropertyType
func (PropertyType) Values ¶ added in v1.103.0
func (PropertyType) Values() []PropertyType
Values returns all known values for PropertyType. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type QuerySessionContext ¶ added in v1.73.0
type QuerySessionContext struct { // An opaque string-string map passed by the query engine. AdditionalContext map[string]string // An identifier string for the consumer cluster. ClusterId *string // A cryptographically generated query identifier generated by Glue or Lake // Formation. QueryAuthorizationId *string // A unique identifier generated by the query engine for the query. QueryId *string // A timestamp provided by the query engine for when the query started. QueryStartTime *time.Time // contains filtered or unexported fields }
A structure used as a protocol between query engines and Lake Formation or Glue. Contains both a Lake Formation generated authorization identifier and information from the request's authorization context.
type QuoteChar ¶ added in v1.25.0
type QuoteChar string
type Recipe ¶ added in v1.57.0
type Recipe struct { // The nodes that are inputs to the recipe node, identified by id. // // This member is required. Inputs []string // The name of the Glue Studio node. // // This member is required. Name *string // A reference to the DataBrew recipe used by the node. RecipeReference *RecipeReference // Transform steps used in the recipe node. RecipeSteps []RecipeStep // contains filtered or unexported fields }
A Glue Studio node that uses a Glue DataBrew recipe in Glue jobs.
type RecipeAction ¶ added in v1.90.0
type RecipeAction struct { // The operation of the recipe action. // // This member is required. Operation *string // The parameters of the recipe action. Parameters map[string]string // contains filtered or unexported fields }
Actions defined in the Glue Studio data preparation recipe node.
type RecipeReference ¶ added in v1.57.0
type RecipeReference struct { // The ARN of the DataBrew recipe. // // This member is required. RecipeArn *string // The RecipeVersion of the DataBrew recipe. // // This member is required. RecipeVersion *string // contains filtered or unexported fields }
A reference to a Glue DataBrew recipe.
type RecipeStep ¶ added in v1.90.0
type RecipeStep struct { // The transformation action of the recipe step. // // This member is required. Action *RecipeAction // The condition expressions for the recipe step. ConditionExpressions []ConditionExpression // contains filtered or unexported fields }
A recipe step used in a Glue Studio data preparation recipe node.
type RecrawlBehavior ¶ added in v0.29.0
type RecrawlBehavior string
const ( RecrawlBehaviorCrawlEverything RecrawlBehavior = "CRAWL_EVERYTHING" RecrawlBehaviorCrawlNewFoldersOnly RecrawlBehavior = "CRAWL_NEW_FOLDERS_ONLY" RecrawlBehaviorCrawlEventMode RecrawlBehavior = "CRAWL_EVENT_MODE" )
Enum values for RecrawlBehavior
func (RecrawlBehavior) Values ¶ added in v0.29.0
func (RecrawlBehavior) Values() []RecrawlBehavior
Values returns all known values for RecrawlBehavior. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type RecrawlPolicy ¶ added in v0.29.0
type RecrawlPolicy struct { // Specifies whether to crawl the entire dataset again or to crawl only folders // that were added since the last crawler run. // // A value of CRAWL_EVERYTHING specifies crawling the entire dataset again. // // A value of CRAWL_NEW_FOLDERS_ONLY specifies crawling only folders that were // added since the last crawler run. // // A value of CRAWL_EVENT_MODE specifies crawling only the changes identified by // Amazon S3 events. RecrawlBehavior RecrawlBehavior // contains filtered or unexported fields }
When crawling an Amazon S3 data source after the first crawl is complete, specifies whether to crawl the entire dataset again or to crawl only folders that were added since the last crawler run. For more information, see Incremental Crawls in Gluein the developer guide.
type RedshiftSource ¶ added in v1.25.0
type RedshiftSource struct { // The database to read from. // // This member is required. Database *string // The name of the Amazon Redshift data store. // // This member is required. Name *string // The database table to read from. // // This member is required. Table *string // The Amazon S3 path where temporary data can be staged when copying out of the // database. RedshiftTmpDir *string // The IAM role with permissions. TmpDirIAMRole *string // contains filtered or unexported fields }
Specifies an Amazon Redshift data store.
type RedshiftTarget ¶ added in v1.25.0
type RedshiftTarget struct { // The name of the database to write to. // // This member is required. Database *string // The nodes that are inputs to the data target. // // This member is required. Inputs []string // The name of the data target. // // This member is required. Name *string // The name of the table in the database to write to. // // This member is required. Table *string // The Amazon S3 path where temporary data can be staged when copying out of the // database. RedshiftTmpDir *string // The IAM role with permissions. TmpDirIAMRole *string // The set of options to configure an upsert operation when writing to a Redshift // target. UpsertRedshiftOptions *UpsertRedshiftTargetOptions // contains filtered or unexported fields }
Specifies a target that uses Amazon Redshift.
type RegistryId ¶ added in v0.30.0
type RegistryId struct { // Arn of the registry to be updated. One of RegistryArn or RegistryName has to be // provided. RegistryArn *string // Name of the registry. Used only for lookup. One of RegistryArn or RegistryName // has to be provided. RegistryName *string // contains filtered or unexported fields }
A wrapper structure that may contain the registry name and Amazon Resource Name (ARN).
type RegistryListItem ¶ added in v0.30.0
type RegistryListItem struct { // The data the registry was created. CreatedTime *string // A description of the registry. Description *string // The Amazon Resource Name (ARN) of the registry. RegistryArn *string // The name of the registry. RegistryName *string // The status of the registry. Status RegistryStatus // The date the registry was updated. UpdatedTime *string // contains filtered or unexported fields }
A structure containing the details for a registry.
type RegistryStatus ¶ added in v0.30.0
type RegistryStatus string
const ( RegistryStatusAvailable RegistryStatus = "AVAILABLE" RegistryStatusDeleting RegistryStatus = "DELETING" )
Enum values for RegistryStatus
func (RegistryStatus) Values ¶ added in v0.30.0
func (RegistryStatus) Values() []RegistryStatus
Values returns all known values for RegistryStatus. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type RelationalCatalogSource ¶ added in v1.25.0
type RelationalCatalogSource struct { // The name of the database to read from. // // This member is required. Database *string // The name of the data source. // // This member is required. Name *string // The name of the table in the database to read from. // // This member is required. Table *string // contains filtered or unexported fields }
Specifies a Relational database data source in the Glue Data Catalog.
type RenameField ¶ added in v1.25.0
type RenameField struct { // The data inputs identified by their node names. // // This member is required. Inputs []string // The name of the transform node. // // This member is required. Name *string // A JSON path to a variable in the data structure for the source data. // // This member is required. SourcePath []string // A JSON path to a variable in the data structure for the target data. // // This member is required. TargetPath []string // contains filtered or unexported fields }
Specifies a transform that renames a single data property key.
type ResourceAction ¶ added in v1.93.0
type ResourceAction string
const ( ResourceActionUpdate ResourceAction = "UPDATE" ResourceActionCreate ResourceAction = "CREATE" )
Enum values for ResourceAction
func (ResourceAction) Values ¶ added in v1.93.0
func (ResourceAction) Values() []ResourceAction
Values returns all known values for ResourceAction. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type ResourceNotFoundException ¶ added in v1.103.0
type ResourceNotFoundException struct { Message *string ErrorCodeOverride *string // contains filtered or unexported fields }
The resource could not be found.
func (*ResourceNotFoundException) Error ¶ added in v1.103.0
func (e *ResourceNotFoundException) Error() string
func (*ResourceNotFoundException) ErrorCode ¶ added in v1.103.0
func (e *ResourceNotFoundException) ErrorCode() string
func (*ResourceNotFoundException) ErrorFault ¶ added in v1.103.0
func (e *ResourceNotFoundException) ErrorFault() smithy.ErrorFault
func (*ResourceNotFoundException) ErrorMessage ¶ added in v1.103.0
func (e *ResourceNotFoundException) ErrorMessage() string
type ResourceNotReadyException ¶ added in v1.16.0
type ResourceNotReadyException struct { Message *string ErrorCodeOverride *string // contains filtered or unexported fields }
A resource was not ready for a transaction.
func (*ResourceNotReadyException) Error ¶ added in v1.16.0
func (e *ResourceNotReadyException) Error() string
func (*ResourceNotReadyException) ErrorCode ¶ added in v1.16.0
func (e *ResourceNotReadyException) ErrorCode() string
func (*ResourceNotReadyException) ErrorFault ¶ added in v1.16.0
func (e *ResourceNotReadyException) ErrorFault() smithy.ErrorFault
func (*ResourceNotReadyException) ErrorMessage ¶ added in v1.16.0
func (e *ResourceNotReadyException) ErrorMessage() string
type ResourceNumberLimitExceededException ¶
type ResourceNumberLimitExceededException struct { Message *string ErrorCodeOverride *string // contains filtered or unexported fields }
A resource numerical limit was exceeded.
func (*ResourceNumberLimitExceededException) Error ¶
func (e *ResourceNumberLimitExceededException) Error() string
func (*ResourceNumberLimitExceededException) ErrorCode ¶
func (e *ResourceNumberLimitExceededException) ErrorCode() string
func (*ResourceNumberLimitExceededException) ErrorFault ¶
func (e *ResourceNumberLimitExceededException) ErrorFault() smithy.ErrorFault
func (*ResourceNumberLimitExceededException) ErrorMessage ¶
func (e *ResourceNumberLimitExceededException) ErrorMessage() string
type ResourceShareType ¶
type ResourceShareType string
const ()
Enum values for ResourceShareType
func (ResourceShareType) Values ¶ added in v0.29.0
func (ResourceShareType) Values() []ResourceShareType
Values returns all known values for ResourceShareType. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type ResourceState ¶ added in v1.93.0
type ResourceState string
const ( ResourceStateQueued ResourceState = "QUEUED" ResourceStateInProgress ResourceState = "IN_PROGRESS" ResourceStateSuccess ResourceState = "SUCCESS" ResourceStateStopped ResourceState = "STOPPED" ResourceStateFailed ResourceState = "FAILED" )
Enum values for ResourceState
func (ResourceState) Values ¶ added in v1.93.0
func (ResourceState) Values() []ResourceState
Values returns all known values for ResourceState. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type ResourceType ¶
type ResourceType string
const ( ResourceTypeJar ResourceType = "JAR" ResourceTypeFile ResourceType = "FILE" ResourceTypeArchive ResourceType = "ARCHIVE" )
Enum values for ResourceType
func (ResourceType) Values ¶ added in v0.29.0
func (ResourceType) Values() []ResourceType
Values returns all known values for ResourceType. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type ResourceUri ¶
type ResourceUri struct { // The type of the resource. ResourceType ResourceType // The URI for accessing the resource. Uri *string // contains filtered or unexported fields }
The URIs for function resources.
type RetentionConfiguration ¶ added in v1.96.0
type RetentionConfiguration struct { // The configuration for an Iceberg snapshot retention optimizer. IcebergConfiguration *IcebergRetentionConfiguration // contains filtered or unexported fields }
The configuration for a snapshot retention optimizer.
type RetentionMetrics ¶ added in v1.96.0
type RetentionMetrics struct { // A structure containing the Iceberg retention metrics for the optimizer run. IcebergMetrics *IcebergRetentionMetrics // contains filtered or unexported fields }
A structure that contains retention metrics for the optimizer run.
type RunIdentifier ¶ added in v1.92.0
type RunIdentifier struct { // The Job Run ID. JobRunId *string // The Run ID. RunId *string // contains filtered or unexported fields }
A run identifier.
type RunMetrics ¶ added in v1.68.0
type RunMetrics struct { // The duration of the job in hours. JobDurationInHour *string // The number of bytes removed by the compaction job run. NumberOfBytesCompacted *string // The number of DPU hours consumed by the job. NumberOfDpus *string // The number of files removed by the compaction job run. NumberOfFilesCompacted *string // contains filtered or unexported fields }
Metrics for the optimizer run.
This structure is deprecated. See the individual metric members for compaction, retention, and orphan file deletion.
type S3CatalogDeltaSource ¶ added in v1.43.0
type S3CatalogDeltaSource struct { // The name of the database to read from. // // This member is required. Database *string // The name of the Delta Lake data source. // // This member is required. Name *string // The name of the table in the database to read from. // // This member is required. Table *string // Specifies additional connection options. AdditionalDeltaOptions map[string]string // Specifies the data schema for the Delta Lake source. OutputSchemas []GlueSchema // contains filtered or unexported fields }
Specifies a Delta Lake data source that is registered in the Glue Data Catalog. The data source must be stored in Amazon S3.
type S3CatalogHudiSource ¶ added in v1.40.0
type S3CatalogHudiSource struct { // The name of the database to read from. // // This member is required. Database *string // The name of the Hudi data source. // // This member is required. Name *string // The name of the table in the database to read from. // // This member is required. Table *string // Specifies additional connection options. AdditionalHudiOptions map[string]string // Specifies the data schema for the Hudi source. OutputSchemas []GlueSchema // contains filtered or unexported fields }
Specifies a Hudi data source that is registered in the Glue Data Catalog. The Hudi data source must be stored in Amazon S3.
type S3CatalogSource ¶ added in v1.25.0
type S3CatalogSource struct { // The database to read from. // // This member is required. Database *string // The name of the data store. // // This member is required. Name *string // The database table to read from. // // This member is required. Table *string // Specifies additional connection options. AdditionalOptions *S3SourceAdditionalOptions // Partitions satisfying this predicate are deleted. Files within the retention // period in these partitions are not deleted. Set to "" – empty by default. PartitionPredicate *string // contains filtered or unexported fields }
Specifies an Amazon S3 data store in the Glue Data Catalog.
type S3CatalogTarget ¶ added in v1.25.0
type S3CatalogTarget struct { // The name of the database to write to. // // This member is required. Database *string // The nodes that are inputs to the data target. // // This member is required. Inputs []string // The name of the data target. // // This member is required. Name *string // The name of the table in the database to write to. // // This member is required. Table *string // Specifies native partitioning using a sequence of keys. PartitionKeys [][]string // A policy that specifies update behavior for the crawler. SchemaChangePolicy *CatalogSchemaChangePolicy // contains filtered or unexported fields }
Specifies a data target that writes to Amazon S3 using the Glue Data Catalog.
type S3CsvSource ¶ added in v1.25.0
type S3CsvSource struct { // The name of the data store. // // This member is required. Name *string // A list of the Amazon S3 paths to read from. // // This member is required. Paths []string // Specifies the character to use for quoting. The default is a double quote: '"' . // Set this to -1 to turn off quoting entirely. // // This member is required. QuoteChar QuoteChar // Specifies the delimiter character. The default is a comma: ",", but any other // character can be specified. // // This member is required. Separator Separator // Specifies additional connection options. AdditionalOptions *S3DirectSourceAdditionalOptions // Specifies how the data is compressed. This is generally not necessary if the // data has a standard file extension. Possible values are "gzip" and "bzip" ). CompressionType CompressionType // Specifies a character to use for escaping. This option is used only when // reading CSV files. The default value is none . If enabled, the character which // immediately follows is used as-is, except for a small set of well-known escapes // ( \n , \r , \t , and \0 ). Escaper *string // A string containing a JSON list of Unix-style glob patterns to exclude. For // example, "[\"**.pdf\"]" excludes all PDF files. Exclusions []string // Grouping files is turned on by default when the input contains more than 50,000 // files. To turn on grouping with fewer than 50,000 files, set this parameter to // "inPartition". To disable grouping when there are more than 50,000 files, set // this parameter to "none" . GroupFiles *string // The target group size in bytes. The default is computed based on the input data // size and the size of your cluster. When there are fewer than 50,000 input files, // "groupFiles" must be set to "inPartition" for this to take effect. GroupSize *string // This option controls the duration in milliseconds after which the s3 listing is // likely to be consistent. Files with modification timestamps falling within the // last maxBand milliseconds are tracked specially when using JobBookmarks to // account for Amazon S3 eventual consistency. Most users don't need to set this // option. The default is 900000 milliseconds, or 15 minutes. MaxBand *int32 // This option specifies the maximum number of files to save from the last maxBand // seconds. If this number is exceeded, extra files are skipped and only processed // in the next job run. MaxFilesInBand *int32 // A Boolean value that specifies whether a single record can span multiple lines. // This can occur when a field contains a quoted new-line character. You must set // this option to True if any record spans multiple lines. The default value is // False , which allows for more aggressive file-splitting during parsing. Multiline *bool // A Boolean value that specifies whether to use the advanced SIMD CSV reader // along with Apache Arrow based columnar memory formats. Only available in Glue // version 3.0. OptimizePerformance bool // Specifies the data schema for the S3 CSV source. OutputSchemas []GlueSchema // If set to true, recursively reads files in all subdirectories under the // specified paths. Recurse *bool // A Boolean value that specifies whether to skip the first data line. The default // value is False . SkipFirst *bool // A Boolean value that specifies whether to treat the first line as a header. The // default value is False . WithHeader *bool // A Boolean value that specifies whether to write the header to output. The // default value is True . WriteHeader *bool // contains filtered or unexported fields }
Specifies a command-separated value (CSV) data store stored in Amazon S3.
type S3DeltaCatalogTarget ¶ added in v1.43.0
type S3DeltaCatalogTarget struct { // The name of the database to write to. // // This member is required. Database *string // The nodes that are inputs to the data target. // // This member is required. Inputs []string // The name of the data target. // // This member is required. Name *string // The name of the table in the database to write to. // // This member is required. Table *string // Specifies additional connection options for the connector. AdditionalOptions map[string]string // Specifies native partitioning using a sequence of keys. PartitionKeys [][]string // A policy that specifies update behavior for the crawler. SchemaChangePolicy *CatalogSchemaChangePolicy // contains filtered or unexported fields }
Specifies a target that writes to a Delta Lake data source in the Glue Data Catalog.
type S3DeltaDirectTarget ¶ added in v1.43.0
type S3DeltaDirectTarget struct { // Specifies how the data is compressed. This is generally not necessary if the // data has a standard file extension. Possible values are "gzip" and "bzip" ). // // This member is required. Compression DeltaTargetCompressionType // Specifies the data output format for the target. // // This member is required. Format TargetFormat // The nodes that are inputs to the data target. // // This member is required. Inputs []string // The name of the data target. // // This member is required. Name *string // The Amazon S3 path of your Delta Lake data source to write to. // // This member is required. Path *string // Specifies additional connection options for the connector. AdditionalOptions map[string]string // Specifies native partitioning using a sequence of keys. PartitionKeys [][]string // A policy that specifies update behavior for the crawler. SchemaChangePolicy *DirectSchemaChangePolicy // contains filtered or unexported fields }
Specifies a target that writes to a Delta Lake data source in Amazon S3.
type S3DeltaSource ¶ added in v1.43.0
type S3DeltaSource struct { // The name of the Delta Lake source. // // This member is required. Name *string // A list of the Amazon S3 paths to read from. // // This member is required. Paths []string // Specifies additional connection options. AdditionalDeltaOptions map[string]string // Specifies additional options for the connector. AdditionalOptions *S3DirectSourceAdditionalOptions // Specifies the data schema for the Delta Lake source. OutputSchemas []GlueSchema // contains filtered or unexported fields }
Specifies a Delta Lake data source stored in Amazon S3.
type S3DirectSourceAdditionalOptions ¶ added in v1.25.0
type S3DirectSourceAdditionalOptions struct { // Sets the upper limit for the target number of files that will be processed. BoundedFiles *int64 // Sets the upper limit for the target size of the dataset in bytes that will be // processed. BoundedSize *int64 // Sets option to enable a sample path. EnableSamplePath *bool // If enabled, specifies the sample path. SamplePath *string // contains filtered or unexported fields }
Specifies additional connection options for the Amazon S3 data store.
type S3DirectTarget ¶ added in v1.25.0
type S3DirectTarget struct { // Specifies the data output format for the target. // // This member is required. Format TargetFormat // The nodes that are inputs to the data target. // // This member is required. Inputs []string // The name of the data target. // // This member is required. Name *string // A single Amazon S3 path to write to. // // This member is required. Path *string // Specifies how the data is compressed. This is generally not necessary if the // data has a standard file extension. Possible values are "gzip" and "bzip" ). Compression *string // Specifies native partitioning using a sequence of keys. PartitionKeys [][]string // A policy that specifies update behavior for the crawler. SchemaChangePolicy *DirectSchemaChangePolicy // contains filtered or unexported fields }
Specifies a data target that writes to Amazon S3.
type S3Encryption ¶
type S3Encryption struct { // The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data. KmsKeyArn *string // The encryption mode to use for Amazon S3 data. S3EncryptionMode S3EncryptionMode // contains filtered or unexported fields }
Specifies how Amazon Simple Storage Service (Amazon S3) data should be encrypted.
type S3EncryptionMode ¶
type S3EncryptionMode string
const ( S3EncryptionModeDisabled S3EncryptionMode = "DISABLED" S3EncryptionModeSsekms S3EncryptionMode = "SSE-KMS" S3EncryptionModeSses3 S3EncryptionMode = "SSE-S3" )
Enum values for S3EncryptionMode
func (S3EncryptionMode) Values ¶ added in v0.29.0
func (S3EncryptionMode) Values() []S3EncryptionMode
Values returns all known values for S3EncryptionMode. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type S3GlueParquetTarget ¶ added in v1.25.0
type S3GlueParquetTarget struct { // The nodes that are inputs to the data target. // // This member is required. Inputs []string // The name of the data target. // // This member is required. Name *string // A single Amazon S3 path to write to. // // This member is required. Path *string // Specifies how the data is compressed. This is generally not necessary if the // data has a standard file extension. Possible values are "gzip" and "bzip" ). Compression ParquetCompressionType // Specifies native partitioning using a sequence of keys. PartitionKeys [][]string // A policy that specifies update behavior for the crawler. SchemaChangePolicy *DirectSchemaChangePolicy // contains filtered or unexported fields }
Specifies a data target that writes to Amazon S3 in Apache Parquet columnar storage.
type S3HudiCatalogTarget ¶ added in v1.40.0
type S3HudiCatalogTarget struct { // Specifies additional connection options for the connector. // // This member is required. AdditionalOptions map[string]string // The name of the database to write to. // // This member is required. Database *string // The nodes that are inputs to the data target. // // This member is required. Inputs []string // The name of the data target. // // This member is required. Name *string // The name of the table in the database to write to. // // This member is required. Table *string // Specifies native partitioning using a sequence of keys. PartitionKeys [][]string // A policy that specifies update behavior for the crawler. SchemaChangePolicy *CatalogSchemaChangePolicy // contains filtered or unexported fields }
Specifies a target that writes to a Hudi data source in the Glue Data Catalog.
type S3HudiDirectTarget ¶ added in v1.40.0
type S3HudiDirectTarget struct { // Specifies additional connection options for the connector. // // This member is required. AdditionalOptions map[string]string // Specifies how the data is compressed. This is generally not necessary if the // data has a standard file extension. Possible values are "gzip" and "bzip" ). // // This member is required. Compression HudiTargetCompressionType // Specifies the data output format for the target. // // This member is required. Format TargetFormat // The nodes that are inputs to the data target. // // This member is required. Inputs []string // The name of the data target. // // This member is required. Name *string // The Amazon S3 path of your Hudi data source to write to. // // This member is required. Path *string // Specifies native partitioning using a sequence of keys. PartitionKeys [][]string // A policy that specifies update behavior for the crawler. SchemaChangePolicy *DirectSchemaChangePolicy // contains filtered or unexported fields }
Specifies a target that writes to a Hudi data source in Amazon S3.
type S3HudiSource ¶ added in v1.40.0
type S3HudiSource struct { // The name of the Hudi source. // // This member is required. Name *string // A list of the Amazon S3 paths to read from. // // This member is required. Paths []string // Specifies additional connection options. AdditionalHudiOptions map[string]string // Specifies additional options for the connector. AdditionalOptions *S3DirectSourceAdditionalOptions // Specifies the data schema for the Hudi source. OutputSchemas []GlueSchema // contains filtered or unexported fields }
Specifies a Hudi data source stored in Amazon S3.
type S3JsonSource ¶ added in v1.25.0
type S3JsonSource struct { // The name of the data store. // // This member is required. Name *string // A list of the Amazon S3 paths to read from. // // This member is required. Paths []string // Specifies additional connection options. AdditionalOptions *S3DirectSourceAdditionalOptions // Specifies how the data is compressed. This is generally not necessary if the // data has a standard file extension. Possible values are "gzip" and "bzip" ). CompressionType CompressionType // A string containing a JSON list of Unix-style glob patterns to exclude. For // example, "[\"**.pdf\"]" excludes all PDF files. Exclusions []string // Grouping files is turned on by default when the input contains more than 50,000 // files. To turn on grouping with fewer than 50,000 files, set this parameter to // "inPartition". To disable grouping when there are more than 50,000 files, set // this parameter to "none" . GroupFiles *string // The target group size in bytes. The default is computed based on the input data // size and the size of your cluster. When there are fewer than 50,000 input files, // "groupFiles" must be set to "inPartition" for this to take effect. GroupSize *string // A JsonPath string defining the JSON data. JsonPath *string // This option controls the duration in milliseconds after which the s3 listing is // likely to be consistent. Files with modification timestamps falling within the // last maxBand milliseconds are tracked specially when using JobBookmarks to // account for Amazon S3 eventual consistency. Most users don't need to set this // option. The default is 900000 milliseconds, or 15 minutes. MaxBand *int32 // This option specifies the maximum number of files to save from the last maxBand // seconds. If this number is exceeded, extra files are skipped and only processed // in the next job run. MaxFilesInBand *int32 // A Boolean value that specifies whether a single record can span multiple lines. // This can occur when a field contains a quoted new-line character. You must set // this option to True if any record spans multiple lines. The default value is // False , which allows for more aggressive file-splitting during parsing. Multiline *bool // Specifies the data schema for the S3 JSON source. OutputSchemas []GlueSchema // If set to true, recursively reads files in all subdirectories under the // specified paths. Recurse *bool // contains filtered or unexported fields }
Specifies a JSON data store stored in Amazon S3.
type S3ParquetSource ¶ added in v1.25.0
type S3ParquetSource struct { // The name of the data store. // // This member is required. Name *string // A list of the Amazon S3 paths to read from. // // This member is required. Paths []string // Specifies additional connection options. AdditionalOptions *S3DirectSourceAdditionalOptions // Specifies how the data is compressed. This is generally not necessary if the // data has a standard file extension. Possible values are "gzip" and "bzip" ). CompressionType ParquetCompressionType // A string containing a JSON list of Unix-style glob patterns to exclude. For // example, "[\"**.pdf\"]" excludes all PDF files. Exclusions []string // Grouping files is turned on by default when the input contains more than 50,000 // files. To turn on grouping with fewer than 50,000 files, set this parameter to // "inPartition". To disable grouping when there are more than 50,000 files, set // this parameter to "none" . GroupFiles *string // The target group size in bytes. The default is computed based on the input data // size and the size of your cluster. When there are fewer than 50,000 input files, // "groupFiles" must be set to "inPartition" for this to take effect. GroupSize *string // This option controls the duration in milliseconds after which the s3 listing is // likely to be consistent. Files with modification timestamps falling within the // last maxBand milliseconds are tracked specially when using JobBookmarks to // account for Amazon S3 eventual consistency. Most users don't need to set this // option. The default is 900000 milliseconds, or 15 minutes. MaxBand *int32 // This option specifies the maximum number of files to save from the last maxBand // seconds. If this number is exceeded, extra files are skipped and only processed // in the next job run. MaxFilesInBand *int32 // Specifies the data schema for the S3 Parquet source. OutputSchemas []GlueSchema // If set to true, recursively reads files in all subdirectories under the // specified paths. Recurse *bool // contains filtered or unexported fields }
Specifies an Apache Parquet data store stored in Amazon S3.
type S3SourceAdditionalOptions ¶ added in v1.25.0
type S3SourceAdditionalOptions struct { // Sets the upper limit for the target number of files that will be processed. BoundedFiles *int64 // Sets the upper limit for the target size of the dataset in bytes that will be // processed. BoundedSize *int64 // contains filtered or unexported fields }
Specifies additional connection options for the Amazon S3 data store.
type S3Target ¶
type S3Target struct { // The name of a connection which allows a job or crawler to access data in Amazon // S3 within an Amazon Virtual Private Cloud environment (Amazon VPC). ConnectionName *string // A valid Amazon dead-letter SQS ARN. For example, // arn:aws:sqs:region:account:deadLetterQueue . DlqEventQueueArn *string // A valid Amazon SQS ARN. For example, arn:aws:sqs:region:account:sqs . EventQueueArn *string // A list of glob patterns used to exclude from the crawl. For more information, // see [Catalog Tables with a Crawler]. // // [Catalog Tables with a Crawler]: https://docs.aws.amazon.com/glue/latest/dg/add-crawler.html Exclusions []string // The path to the Amazon S3 target. Path *string // Sets the number of files in each leaf folder to be crawled when crawling sample // files in a dataset. If not set, all the files are crawled. A valid value is an // integer between 1 and 249. SampleSize *int32 // contains filtered or unexported fields }
Specifies a data store in Amazon Simple Storage Service (Amazon S3).
type Schedule ¶
type Schedule struct { // A cron expression used to specify the schedule (see [Time-Based Schedules for Jobs and Crawlers]. For example, to run // something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) . // // [Time-Based Schedules for Jobs and Crawlers]: https://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html ScheduleExpression *string // The state of the schedule. State ScheduleState // contains filtered or unexported fields }
A scheduling object using a cron statement to schedule an event.
type ScheduleState ¶
type ScheduleState string
const ( ScheduleStateScheduled ScheduleState = "SCHEDULED" ScheduleStateNotScheduled ScheduleState = "NOT_SCHEDULED" ScheduleStateTransitioning ScheduleState = "TRANSITIONING" )
Enum values for ScheduleState
func (ScheduleState) Values ¶ added in v0.29.0
func (ScheduleState) Values() []ScheduleState
Values returns all known values for ScheduleState. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type ScheduleType ¶ added in v1.103.0
type ScheduleType string
const ( ScheduleTypeCron ScheduleType = "CRON" ScheduleTypeAuto ScheduleType = "AUTO" )
Enum values for ScheduleType
func (ScheduleType) Values ¶ added in v1.103.0
func (ScheduleType) Values() []ScheduleType
Values returns all known values for ScheduleType. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type SchedulerNotRunningException ¶
type SchedulerNotRunningException struct { Message *string ErrorCodeOverride *string // contains filtered or unexported fields }
The specified scheduler is not running.
func (*SchedulerNotRunningException) Error ¶
func (e *SchedulerNotRunningException) Error() string
func (*SchedulerNotRunningException) ErrorCode ¶
func (e *SchedulerNotRunningException) ErrorCode() string
func (*SchedulerNotRunningException) ErrorFault ¶
func (e *SchedulerNotRunningException) ErrorFault() smithy.ErrorFault
func (*SchedulerNotRunningException) ErrorMessage ¶
func (e *SchedulerNotRunningException) ErrorMessage() string
type SchedulerRunningException ¶
type SchedulerRunningException struct { Message *string ErrorCodeOverride *string // contains filtered or unexported fields }
The specified scheduler is already running.
func (*SchedulerRunningException) Error ¶
func (e *SchedulerRunningException) Error() string
func (*SchedulerRunningException) ErrorCode ¶
func (e *SchedulerRunningException) ErrorCode() string
func (*SchedulerRunningException) ErrorFault ¶
func (e *SchedulerRunningException) ErrorFault() smithy.ErrorFault
func (*SchedulerRunningException) ErrorMessage ¶
func (e *SchedulerRunningException) ErrorMessage() string
type SchedulerTransitioningException ¶
type SchedulerTransitioningException struct { Message *string ErrorCodeOverride *string // contains filtered or unexported fields }
The specified scheduler is transitioning.
func (*SchedulerTransitioningException) Error ¶
func (e *SchedulerTransitioningException) Error() string
func (*SchedulerTransitioningException) ErrorCode ¶
func (e *SchedulerTransitioningException) ErrorCode() string
func (*SchedulerTransitioningException) ErrorFault ¶
func (e *SchedulerTransitioningException) ErrorFault() smithy.ErrorFault
func (*SchedulerTransitioningException) ErrorMessage ¶
func (e *SchedulerTransitioningException) ErrorMessage() string
type SchemaChangePolicy ¶
type SchemaChangePolicy struct { // The deletion behavior when the crawler finds a deleted object. DeleteBehavior DeleteBehavior // The update behavior when the crawler finds a changed schema. UpdateBehavior UpdateBehavior // contains filtered or unexported fields }
A policy that specifies update and deletion behaviors for the crawler.
type SchemaColumn ¶
type SchemaColumn struct { // The type of data in the column. DataType *string // The name of the column. Name *string // contains filtered or unexported fields }
A key-value pair representing a column and data type that this transform can run against. The Schema parameter of the MLTransform may contain up to 100 of these structures.
type SchemaDiffType ¶ added in v0.30.0
type SchemaDiffType string
const (
SchemaDiffTypeSyntaxDiff SchemaDiffType = "SYNTAX_DIFF"
)
Enum values for SchemaDiffType
func (SchemaDiffType) Values ¶ added in v0.30.0
func (SchemaDiffType) Values() []SchemaDiffType
Values returns all known values for SchemaDiffType. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type SchemaId ¶ added in v0.30.0
type SchemaId struct { // The name of the schema registry that contains the schema. RegistryName *string // The Amazon Resource Name (ARN) of the schema. One of SchemaArn or SchemaName // has to be provided. SchemaArn *string // The name of the schema. One of SchemaArn or SchemaName has to be provided. SchemaName *string // contains filtered or unexported fields }
The unique ID of the schema in the Glue schema registry.
type SchemaListItem ¶ added in v0.30.0
type SchemaListItem struct { // The date and time that a schema was created. CreatedTime *string // A description for the schema. Description *string // the name of the registry where the schema resides. RegistryName *string // The Amazon Resource Name (ARN) for the schema. SchemaArn *string // The name of the schema. SchemaName *string // The status of the schema. SchemaStatus SchemaStatus // The date and time that a schema was updated. UpdatedTime *string // contains filtered or unexported fields }
An object that contains minimal details for a schema.
type SchemaReference ¶ added in v0.30.0
type SchemaReference struct { // A structure that contains schema identity fields. Either this or the // SchemaVersionId has to be provided. SchemaId *SchemaId // The unique ID assigned to a version of the schema. Either this or the SchemaId // has to be provided. SchemaVersionId *string // The version number of the schema. SchemaVersionNumber *int64 // contains filtered or unexported fields }
An object that references a schema stored in the Glue Schema Registry.
type SchemaStatus ¶ added in v0.30.0
type SchemaStatus string
const ( SchemaStatusAvailable SchemaStatus = "AVAILABLE" SchemaStatusPending SchemaStatus = "PENDING" SchemaStatusDeleting SchemaStatus = "DELETING" )
Enum values for SchemaStatus
func (SchemaStatus) Values ¶ added in v0.30.0
func (SchemaStatus) Values() []SchemaStatus
Values returns all known values for SchemaStatus. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type SchemaVersionErrorItem ¶ added in v0.30.0
type SchemaVersionErrorItem struct { // The details of the error for the schema version. ErrorDetails *ErrorDetails // The version number of the schema. VersionNumber *int64 // contains filtered or unexported fields }
An object that contains the error details for an operation on a schema version.
type SchemaVersionListItem ¶ added in v0.30.0
type SchemaVersionListItem struct { // The date and time the schema version was created. CreatedTime *string // The Amazon Resource Name (ARN) of the schema. SchemaArn *string // The unique identifier of the schema version. SchemaVersionId *string // The status of the schema version. Status SchemaVersionStatus // The version number of the schema. VersionNumber *int64 // contains filtered or unexported fields }
An object containing the details about a schema version.
type SchemaVersionNumber ¶ added in v0.30.0
type SchemaVersionNumber struct { // The latest version available for the schema. LatestVersion bool // The version number of the schema. VersionNumber *int64 // contains filtered or unexported fields }
A structure containing the schema version information.
type SchemaVersionStatus ¶ added in v0.30.0
type SchemaVersionStatus string
const ( SchemaVersionStatusAvailable SchemaVersionStatus = "AVAILABLE" SchemaVersionStatusPending SchemaVersionStatus = "PENDING" SchemaVersionStatusFailure SchemaVersionStatus = "FAILURE" SchemaVersionStatusDeleting SchemaVersionStatus = "DELETING" )
Enum values for SchemaVersionStatus
func (SchemaVersionStatus) Values ¶ added in v0.30.0
func (SchemaVersionStatus) Values() []SchemaVersionStatus
Values returns all known values for SchemaVersionStatus. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type SecurityConfiguration ¶
type SecurityConfiguration struct { // The time at which this security configuration was created. CreatedTimeStamp *time.Time // The encryption configuration associated with this security configuration. EncryptionConfiguration *EncryptionConfiguration // The name of the security configuration. Name *string // contains filtered or unexported fields }
Specifies a security configuration.
type Segment ¶
type Segment struct { // The zero-based index number of the segment. For example, if the total number of // segments is 4, SegmentNumber values range from 0 through 3. // // This member is required. SegmentNumber int32 // The total number of segments. // // This member is required. TotalSegments *int32 // contains filtered or unexported fields }
Defines a non-overlapping region of a table's partitions, allowing multiple requests to be run in parallel.
type SelectFields ¶ added in v1.25.0
type SelectFields struct { // The data inputs identified by their node names. // // This member is required. Inputs []string // The name of the transform node. // // This member is required. Name *string // A JSON path to a variable in the data structure. // // This member is required. Paths [][]string // contains filtered or unexported fields }
Specifies a transform that chooses the data property keys that you want to keep.
type SelectFromCollection ¶ added in v1.25.0
type SelectFromCollection struct { // The index for the DynamicFrame to be selected. // // This member is required. Index int32 // The data inputs identified by their node names. // // This member is required. Inputs []string // The name of the transform node. // // This member is required. Name *string // contains filtered or unexported fields }
Specifies a transform that chooses one DynamicFrame from a collection of DynamicFrames . The output is the selected DynamicFrame
type Separator ¶ added in v1.25.0
type Separator string
type SerDeInfo ¶
type SerDeInfo struct { // Name of the SerDe. Name *string // These key-value pairs define initialization parameters for the SerDe. Parameters map[string]string // Usually the class that implements the SerDe. An example is // org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe . SerializationLibrary *string // contains filtered or unexported fields }
Information about a serialization/deserialization program (SerDe) that serves as an extractor and loader.
type Session ¶ added in v1.22.0
type Session struct { // The command object.See SessionCommand. Command *SessionCommand // The date and time that this session is completed. CompletedOn *time.Time // The number of connections used for the session. Connections *ConnectionsList // The time and date when the session was created. CreatedOn *time.Time // The DPUs consumed by the session (formula: ExecutionTime * MaxCapacity). DPUSeconds *float64 // A map array of key-value pairs. Max is 75 pairs. DefaultArguments map[string]string // The description of the session. Description *string // The error message displayed during the session. ErrorMessage *string // The total time the session ran for. ExecutionTime *float64 // The Glue version determines the versions of Apache Spark and Python that Glue // supports. The GlueVersion must be greater than 2.0. GlueVersion *string // The ID of the session. Id *string // The number of minutes when idle before the session times out. IdleTimeout *int32 // The number of Glue data processing units (DPUs) that can be allocated when the // job runs. A DPU is a relative measure of processing power that consists of 4 // vCPUs of compute capacity and 16 GB memory. MaxCapacity *float64 // The number of workers of a defined WorkerType to use for the session. NumberOfWorkers *int32 // The name of an Glue usage profile associated with the session. ProfileName *string // The code execution progress of the session. Progress float64 // The name or Amazon Resource Name (ARN) of the IAM role associated with the // Session. Role *string // The name of the SecurityConfiguration structure to be used with the session. SecurityConfiguration *string // The session status. Status SessionStatus // The type of predefined worker that is allocated when a session runs. Accepts a // value of G.1X , G.2X , G.4X , or G.8X for Spark sessions. Accepts the value Z.2X // for Ray sessions. WorkerType WorkerType // contains filtered or unexported fields }
The period in which a remote Spark runtime environment is running.
type SessionCommand ¶ added in v1.22.0
type SessionCommand struct { // Specifies the name of the SessionCommand. Can be 'glueetl' or 'gluestreaming'. Name *string // Specifies the Python version. The Python version indicates the version // supported for jobs of type Spark. PythonVersion *string // contains filtered or unexported fields }
The SessionCommand that runs the job.
type SessionStatus ¶ added in v1.22.0
type SessionStatus string
const ( SessionStatusProvisioning SessionStatus = "PROVISIONING" SessionStatusReady SessionStatus = "READY" SessionStatusFailed SessionStatus = "FAILED" SessionStatusTimeout SessionStatus = "TIMEOUT" SessionStatusStopping SessionStatus = "STOPPING" SessionStatusStopped SessionStatus = "STOPPED" )
Enum values for SessionStatus
func (SessionStatus) Values ¶ added in v1.22.0
func (SessionStatus) Values() []SessionStatus
Values returns all known values for SessionStatus. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type SettingSource ¶ added in v1.103.0
type SettingSource string
const ( SettingSourceCatalog SettingSource = "CATALOG" SettingSourceTable SettingSource = "TABLE" )
Enum values for SettingSource
func (SettingSource) Values ¶ added in v1.103.0
func (SettingSource) Values() []SettingSource
Values returns all known values for SettingSource. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type SkewedInfo ¶
type SkewedInfo struct { // A list of names of columns that contain skewed values. SkewedColumnNames []string // A mapping of skewed values to the columns that contain them. SkewedColumnValueLocationMaps map[string]string // A list of values that appear so frequently as to be considered skewed. SkewedColumnValues []string // contains filtered or unexported fields }
Specifies skewed values in a table. Skewed values are those that occur with very high frequency.
type SnowflakeNodeData ¶ added in v1.58.0
type SnowflakeNodeData struct { // Specifies what action to take when writing to a table with preexisting data. // Valid values: append , merge , truncate , drop . Action *string // Specifies additional options passed to the Snowflake connector. If options are // specified elsewhere in this node, this will take precedence. AdditionalOptions map[string]string // Specifies whether automatic query pushdown is enabled. If pushdown is enabled, // then when a query is run on Spark, if part of the query can be "pushed down" to // the Snowflake server, it is pushed down. This improves performance of some // queries. AutoPushdown bool // Specifies a Glue Data Catalog Connection to a Snowflake endpoint. Connection *Option // Specifies a Snowflake database for your node to use. Database *string // Not currently used. IamRole *Option // Specifies a merge action. Valid values: simple , custom . If simple, merge // behavior is defined by MergeWhenMatched and MergeWhenNotMatched . If custom, // defined by MergeClause . MergeAction *string // A SQL statement that specifies a custom merge behavior. MergeClause *string // Specifies how to resolve records that match preexisting data when merging. // Valid values: update , delete . MergeWhenMatched *string // Specifies how to process records that do not match preexisting data when // merging. Valid values: insert , none . MergeWhenNotMatched *string // A SQL string run after the Snowflake connector performs its standard actions. PostAction *string // A SQL string run before the Snowflake connector performs its standard actions. PreAction *string // A SQL string used to retrieve data with the query sourcetype. SampleQuery *string // Specifies a Snowflake database schema for your node to use. Schema *string // Specifies the columns combined to identify a record when detecting matches for // merges and upserts. A list of structures with value , label and description // keys. Each structure describes a column. SelectedColumns []Option // Specifies how retrieved data is specified. Valid values: "table" , "query" . SourceType *string // The name of a staging table used when performing merge or upsert append // actions. Data is written to this table, then moved to table by a generated // postaction. StagingTable *string // Specifies a Snowflake table for your node to use. Table *string // Manually defines the target schema for the node. A list of structures with value // , label and description keys. Each structure defines a column. TableSchema []Option // Not currently used. TempDir *string // Used when Action is append . Specifies the resolution behavior when a row // already exists. If true, preexisting rows will be updated. If false, those rows // will be inserted. Upsert bool // contains filtered or unexported fields }
Specifies configuration for Snowflake nodes in Glue Studio.
type SnowflakeSource ¶ added in v1.58.0
type SnowflakeSource struct { // Configuration for the Snowflake data source. // // This member is required. Data *SnowflakeNodeData // The name of the Snowflake data source. // // This member is required. Name *string // Specifies user-defined schemas for your output data. OutputSchemas []GlueSchema // contains filtered or unexported fields }
Specifies a Snowflake data source.
type SnowflakeTarget ¶ added in v1.58.0
type SnowflakeTarget struct { // Specifies the data of the Snowflake target node. // // This member is required. Data *SnowflakeNodeData // The name of the Snowflake target. // // This member is required. Name *string // The nodes that are inputs to the data target. Inputs []string // contains filtered or unexported fields }
Specifies a Snowflake target.
type SortCriterion ¶
type SortCriterion struct { // The name of the field on which to sort. FieldName *string // An ascending or descending sort. Sort Sort // contains filtered or unexported fields }
Specifies a field to sort by and a sort order.
type SortDirectionType ¶
type SortDirectionType string
const ( SortDirectionTypeDescending SortDirectionType = "DESCENDING" SortDirectionTypeAscending SortDirectionType = "ASCENDING" )
Enum values for SortDirectionType
func (SortDirectionType) Values ¶ added in v0.29.0
func (SortDirectionType) Values() []SortDirectionType
Values returns all known values for SortDirectionType. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type SourceControlAuthStrategy ¶ added in v1.33.0
type SourceControlAuthStrategy string
const ( SourceControlAuthStrategyPersonalAccessToken SourceControlAuthStrategy = "PERSONAL_ACCESS_TOKEN" SourceControlAuthStrategyAwsSecretsManager SourceControlAuthStrategy = "AWS_SECRETS_MANAGER" )
Enum values for SourceControlAuthStrategy
func (SourceControlAuthStrategy) Values ¶ added in v1.33.0
func (SourceControlAuthStrategy) Values() []SourceControlAuthStrategy
Values returns all known values for SourceControlAuthStrategy. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type SourceControlDetails ¶ added in v1.33.0
type SourceControlDetails struct { // The type of authentication, which can be an authentication token stored in // Amazon Web Services Secrets Manager, or a personal access token. AuthStrategy SourceControlAuthStrategy // The value of an authorization token. AuthToken *string // An optional branch in the remote repository. Branch *string // An optional folder in the remote repository. Folder *string // The last commit ID for a commit in the remote repository. LastCommitId *string // The owner of the remote repository that contains the job artifacts. Owner *string // The provider for the remote repository. Provider SourceControlProvider // The name of the remote repository that contains the job artifacts. Repository *string // contains filtered or unexported fields }
The details for a source control configuration for a job, allowing synchronization of job artifacts to or from a remote repository.
type SourceControlProvider ¶ added in v1.33.0
type SourceControlProvider string
const ( SourceControlProviderGithub SourceControlProvider = "GITHUB" SourceControlProviderGitlab SourceControlProvider = "GITLAB" SourceControlProviderBitbucket SourceControlProvider = "BITBUCKET" SourceControlProviderAwsCodeCommit SourceControlProvider = "AWS_CODE_COMMIT" )
Enum values for SourceControlProvider
func (SourceControlProvider) Values ¶ added in v1.33.0
func (SourceControlProvider) Values() []SourceControlProvider
Values returns all known values for SourceControlProvider. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type SourceProcessingProperties ¶ added in v1.103.0
type SourceProcessingProperties struct { // The IAM role to access the Glue connection. RoleArn *string // contains filtered or unexported fields }
The resource properties associated with the integration source.
type SourceTableConfig ¶ added in v1.103.0
type SourceTableConfig struct { // A list of fields used for column-level filtering. Fields []string // A condition clause used for row-level filtering. FilterPredicate *string // Unique identifier of a record. PrimaryKey []string // Incremental pull timestamp-based field. RecordUpdateField *string // contains filtered or unexported fields }
Properties used by the source leg to process data from the source.
type SparkConnectorSource ¶ added in v1.25.0
type SparkConnectorSource struct { // The name of the connection that is associated with the connector. // // This member is required. ConnectionName *string // The type of connection, such as marketplace.spark or custom.spark, designating // a connection to an Apache Spark data store. // // This member is required. ConnectionType *string // The name of a connector that assists with accessing the data store in Glue // Studio. // // This member is required. ConnectorName *string // The name of the data source. // // This member is required. Name *string // Additional connection options for the connector. AdditionalOptions map[string]string // Specifies data schema for the custom spark source. OutputSchemas []GlueSchema // contains filtered or unexported fields }
Specifies a connector to an Apache Spark data source.
type SparkConnectorTarget ¶ added in v1.25.0
type SparkConnectorTarget struct { // The name of a connection for an Apache Spark connector. // // This member is required. ConnectionName *string // The type of connection, such as marketplace.spark or custom.spark, designating // a connection to an Apache Spark data store. // // This member is required. ConnectionType *string // The name of an Apache Spark connector. // // This member is required. ConnectorName *string // The nodes that are inputs to the data target. // // This member is required. Inputs []string // The name of the data target. // // This member is required. Name *string // Additional connection options for the connector. AdditionalOptions map[string]string // Specifies the data schema for the custom spark target. OutputSchemas []GlueSchema // contains filtered or unexported fields }
Specifies a target that uses an Apache Spark connector.
type SparkSQL ¶ added in v1.25.0
type SparkSQL struct { // The data inputs identified by their node names. You can associate a table name // with each input node to use in the SQL query. The name you choose must meet the // Spark SQL naming restrictions. // // This member is required. Inputs []string // The name of the transform node. // // This member is required. Name *string // A list of aliases. An alias allows you to specify what name to use in the SQL // for a given input. For example, you have a datasource named "MyDataSource". If // you specify From as MyDataSource, and Alias as SqlName, then in your SQL you // can do: // // select * from SqlName // // and that gets data from MyDataSource. // // This member is required. SqlAliases []SqlAlias // A SQL query that must use Spark SQL syntax and return a single data set. // // This member is required. SqlQuery *string // Specifies the data schema for the SparkSQL transform. OutputSchemas []GlueSchema // contains filtered or unexported fields }
Specifies a transform where you enter a SQL query using Spark SQL syntax to transform the data. The output is a single DynamicFrame .
type Spigot ¶ added in v1.25.0
type Spigot struct { // The data inputs identified by their node names. // // This member is required. Inputs []string // The name of the transform node. // // This member is required. Name *string // A path in Amazon S3 where the transform will write a subset of records from the // dataset to a JSON file in an Amazon S3 bucket. // // This member is required. Path *string // The probability (a decimal value with a maximum value of 1) of picking any // given record. A value of 1 indicates that each row read from the dataset should // be included in the sample output. Prob *float64 // Specifies a number of records to write starting from the beginning of the // dataset. Topk *int32 // contains filtered or unexported fields }
Specifies a transform that writes samples of the data to an Amazon S3 bucket.
type SplitFields ¶ added in v1.25.0
type SplitFields struct { // The data inputs identified by their node names. // // This member is required. Inputs []string // The name of the transform node. // // This member is required. Name *string // A JSON path to a variable in the data structure. // // This member is required. Paths [][]string // contains filtered or unexported fields }
Specifies a transform that splits data property keys into two DynamicFrames . The output is a collection of DynamicFrames : one with selected data property keys, and one with the remaining data property keys.
type SqlAlias ¶ added in v1.25.0
type SqlAlias struct { // A temporary name given to a table, or a column in a table. // // This member is required. Alias *string // A table, or a column in a table. // // This member is required. From *string // contains filtered or unexported fields }
Represents a single entry in the list of values for SqlAliases .
type StartingEventBatchCondition ¶ added in v1.9.0
type StartingEventBatchCondition struct { // Number of events in the batch. BatchSize *int32 // Duration of the batch window in seconds. BatchWindow *int32 // contains filtered or unexported fields }
The batch condition that started the workflow run. Either the number of events in the batch size arrived, in which case the BatchSize member is non-zero, or the batch window expired, in which case the BatchWindow member is non-zero.
type StartingPosition ¶ added in v1.25.0
type StartingPosition string
const ( StartingPositionLatest StartingPosition = "latest" StartingPositionTrimHorizon StartingPosition = "trim_horizon" StartingPositionEarliest StartingPosition = "earliest" StartingPositionTimestamp StartingPosition = "timestamp" )
Enum values for StartingPosition
func (StartingPosition) Values ¶ added in v1.25.0
func (StartingPosition) Values() []StartingPosition
Values returns all known values for StartingPosition. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type Statement ¶ added in v1.22.0
type Statement struct { // The execution code of the statement. Code *string // The unix time and date that the job definition was completed. CompletedOn int64 // The ID of the statement. Id int32 // The output in JSON. Output *StatementOutput // The code execution progress. Progress float64 // The unix time and date that the job definition was started. StartedOn int64 // The state while request is actioned. State StatementState // contains filtered or unexported fields }
The statement or request for a particular action to occur in a session.
type StatementOutput ¶ added in v1.22.0
type StatementOutput struct { // The code execution output. Data *StatementOutputData // The name of the error in the output. ErrorName *string // The error value of the output. ErrorValue *string // The execution count of the output. ExecutionCount int32 // The status of the code execution output. Status StatementState // The traceback of the output. Traceback []string // contains filtered or unexported fields }
The code execution output in JSON format.
type StatementOutputData ¶ added in v1.22.0
type StatementOutputData struct { // The code execution output in text format. TextPlain *string // contains filtered or unexported fields }
The code execution output in JSON format.
type StatementState ¶ added in v1.22.0
type StatementState string
const ( StatementStateWaiting StatementState = "WAITING" StatementStateRunning StatementState = "RUNNING" StatementStateAvailable StatementState = "AVAILABLE" StatementStateCancelling StatementState = "CANCELLING" StatementStateCancelled StatementState = "CANCELLED" StatementStateError StatementState = "ERROR" )
Enum values for StatementState
func (StatementState) Values ¶ added in v1.22.0
func (StatementState) Values() []StatementState
Values returns all known values for StatementState. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type StatisticAnnotation ¶ added in v1.92.0
type StatisticAnnotation struct { // The inclusion annotation applied to the statistic. InclusionAnnotation *TimestampedInclusionAnnotation // The Profile ID. ProfileId *string // The Statistic ID. StatisticId *string // The timestamp when the annotated statistic was recorded. StatisticRecordedOn *time.Time // contains filtered or unexported fields }
A Statistic Annotation.
type StatisticEvaluationLevel ¶ added in v1.92.0
type StatisticEvaluationLevel string
const ( StatisticEvaluationLevelDataset StatisticEvaluationLevel = "Dataset" StatisticEvaluationLevelColumn StatisticEvaluationLevel = "Column" StatisticEvaluationLevelMulticolumn StatisticEvaluationLevel = "Multicolumn" )
Enum values for StatisticEvaluationLevel
func (StatisticEvaluationLevel) Values ¶ added in v1.92.0
func (StatisticEvaluationLevel) Values() []StatisticEvaluationLevel
Values returns all known values for StatisticEvaluationLevel. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type StatisticModelResult ¶ added in v1.92.0
type StatisticModelResult struct { // The actual value. ActualValue *float64 // The date. Date *time.Time // The inclusion annotation. InclusionAnnotation InclusionAnnotationValue // The lower bound. LowerBound *float64 // The predicted value. PredictedValue *float64 // The upper bound. UpperBound *float64 // contains filtered or unexported fields }
The statistic model result.
type StatisticSummary ¶ added in v1.92.0
type StatisticSummary struct { // The list of columns referenced by the statistic. ColumnsReferenced []string // The value of the statistic. DoubleValue float64 // The evaluation level of the statistic. Possible values: Dataset , Column , // Multicolumn . EvaluationLevel StatisticEvaluationLevel // The inclusion annotation for the statistic. InclusionAnnotation *TimestampedInclusionAnnotation // The Profile ID. ProfileId *string // The timestamp when the statistic was recorded. RecordedOn *time.Time // The list of datasets referenced by the statistic. ReferencedDatasets []string // The Run Identifier RunIdentifier *RunIdentifier // The Statistic ID. StatisticId *string // The name of the statistic. StatisticName *string // A StatisticPropertiesMap , which contains a NameString and DescriptionString StatisticProperties map[string]string // contains filtered or unexported fields }
Summary information about a statistic.
type StatusDetails ¶ added in v1.93.0
type StatusDetails struct { // A Table object representing the requested changes. RequestedChange *Table // A list of ViewValidation objects that contain information for an analytical // engine to validate a view. ViewValidations []ViewValidation // contains filtered or unexported fields }
A structure containing information about an asynchronous change to a table.
type StorageDescriptor ¶
type StorageDescriptor struct { // A list of locations that point to the path where a Delta table is located. AdditionalLocations []string // A list of reducer grouping columns, clustering columns, and bucketing columns // in the table. BucketColumns []string // A list of the Columns in the table. Columns []Column // True if the data in the table is compressed, or False if not. Compressed bool // The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a // custom format. InputFormat *string // The physical location of the table. By default, this takes the form of the // warehouse location, followed by the database location in the warehouse, followed // by the table name. Location *string // Must be specified if the table contains any dimension columns. NumberOfBuckets int32 // The output format: SequenceFileOutputFormat (binary), or // IgnoreKeyTextOutputFormat , or a custom format. OutputFormat *string // The user-supplied properties in key-value form. Parameters map[string]string // An object that references a schema stored in the Glue Schema Registry. // // When creating a table, you can pass an empty list of columns for the schema, // and instead use a schema reference. SchemaReference *SchemaReference // The serialization/deserialization (SerDe) information. SerdeInfo *SerDeInfo // The information about values that appear frequently in a column (skewed values). SkewedInfo *SkewedInfo // A list specifying the sort order of each bucket in the table. SortColumns []Order // True if the table data is stored in subdirectories, or False if not. StoredAsSubDirectories bool // contains filtered or unexported fields }
Describes the physical storage of table data.
type StreamingDataPreviewOptions ¶ added in v1.25.0
type StreamingDataPreviewOptions struct { // The polling time in milliseconds. PollingTime *int64 // The limit to the number of records polled. RecordPollingLimit *int64 // contains filtered or unexported fields }
Specifies options related to data preview for viewing a sample of your data.
type StringColumnStatisticsData ¶
type StringColumnStatisticsData struct { // The average string length in the column. // // This member is required. AverageLength float64 // The size of the longest string in the column. // // This member is required. MaximumLength int64 // The number of distinct values in a column. // // This member is required. NumberOfDistinctValues int64 // The number of null values in the column. // // This member is required. NumberOfNulls int64 // contains filtered or unexported fields }
Defines column statistics supported for character sequence data values.
type SupportedDialect ¶ added in v1.73.0
type SupportedDialect struct { // The dialect of the query engine. Dialect ViewDialect // The version of the dialect of the query engine. For example, 3.0.0. DialectVersion *string // contains filtered or unexported fields }
A structure specifying the dialect and dialect version used by the query engine.
type Table ¶
type Table struct { // The table name. For Hive compatibility, this must be entirely lowercase. // // This member is required. Name *string // The ID of the Data Catalog in which the table resides. CatalogId *string // The time when the table definition was created in the Data Catalog. CreateTime *time.Time // The person or entity who created the table. CreatedBy *string // The name of the database where the table metadata resides. For Hive // compatibility, this must be all lowercase. DatabaseName *string // A description of the table. Description *string // A FederatedTable structure that references an entity outside the Glue Data // Catalog. FederatedTable *FederatedTable // Specifies whether the view supports the SQL dialects of one or more different // query engines and can therefore be read by those engines. IsMultiDialectView *bool // Indicates whether the table has been registered with Lake Formation. IsRegisteredWithLakeFormation bool // The last time that the table was accessed. This is usually taken from HDFS, and // might not be reliable. LastAccessTime *time.Time // The last time that column statistics were computed for this table. LastAnalyzedTime *time.Time // The owner of the table. Owner *string // These key-value pairs define properties associated with the table. Parameters map[string]string // A list of columns by which the table is partitioned. Only primitive types are // supported as partition keys. // // When you create a table used by Amazon Athena, and you do not specify any // partitionKeys , you must at least set the value of partitionKeys to an empty // list. For example: // // "PartitionKeys": [] PartitionKeys []Column // The retention time for this table. Retention int32 // A structure containing information about the state of an asynchronous change to // a table. Status *TableStatus // A storage descriptor containing information about the physical storage of this // table. StorageDescriptor *StorageDescriptor // The type of this table. Glue will create tables with the EXTERNAL_TABLE type. // Other services, such as Athena, may create tables with additional table types. // // Glue related table types: // // EXTERNAL_TABLE Hive compatible attribute - indicates a non-Hive managed table. // // GOVERNED Used by Lake Formation. The Glue Data Catalog understands GOVERNED . TableType *string // A TableIdentifier structure that describes a target table for resource linking. TargetTable *TableIdentifier // The last time that the table was updated. UpdateTime *time.Time // The ID of the table version. VersionId *string // A structure that contains all the information that defines the view, including // the dialect or dialects for the view, and the query. ViewDefinition *ViewDefinition // Included for Apache Hive compatibility. Not used in the normal course of Glue // operations. ViewExpandedText *string // Included for Apache Hive compatibility. Not used in the normal course of Glue // operations. If the table is a VIRTUAL_VIEW , certain Athena configuration // encoded in base64. ViewOriginalText *string // contains filtered or unexported fields }
Represents a collection of related data organized in columns and rows.
type TableAttributes ¶ added in v1.94.0
type TableAttributes string
const ( TableAttributesName TableAttributes = "NAME" TableAttributesTableType TableAttributes = "TABLE_TYPE" )
Enum values for TableAttributes
func (TableAttributes) Values ¶ added in v1.94.0
func (TableAttributes) Values() []TableAttributes
Values returns all known values for TableAttributes. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type TableError ¶
type TableError struct { // The details about the error. ErrorDetail *ErrorDetail // The name of the table. For Hive compatibility, this must be entirely lowercase. TableName *string // contains filtered or unexported fields }
An error record for table operations.
type TableIdentifier ¶
type TableIdentifier struct { // The ID of the Data Catalog in which the table resides. CatalogId *string // The name of the catalog database that contains the target table. DatabaseName *string // The name of the target table. Name *string // Region of the target table. Region *string // contains filtered or unexported fields }
A structure that describes a target table for resource linking.
type TableInput ¶
type TableInput struct { // The table name. For Hive compatibility, this is folded to lowercase when it is // stored. // // This member is required. Name *string // A description of the table. Description *string // The last time that the table was accessed. LastAccessTime *time.Time // The last time that column statistics were computed for this table. LastAnalyzedTime *time.Time // The table owner. Included for Apache Hive compatibility. Not used in the normal // course of Glue operations. Owner *string // These key-value pairs define properties associated with the table. Parameters map[string]string // A list of columns by which the table is partitioned. Only primitive types are // supported as partition keys. // // When you create a table used by Amazon Athena, and you do not specify any // partitionKeys , you must at least set the value of partitionKeys to an empty // list. For example: // // "PartitionKeys": [] PartitionKeys []Column // The retention time for this table. Retention int32 // A storage descriptor containing information about the physical storage of this // table. StorageDescriptor *StorageDescriptor // The type of this table. Glue will create tables with the EXTERNAL_TABLE type. // Other services, such as Athena, may create tables with additional table types. // // Glue related table types: // // EXTERNAL_TABLE Hive compatible attribute - indicates a non-Hive managed table. // // GOVERNED Used by Lake Formation. The Glue Data Catalog understands GOVERNED . TableType *string // A TableIdentifier structure that describes a target table for resource linking. TargetTable *TableIdentifier // A structure that contains all the information that defines the view, including // the dialect or dialects for the view, and the query. ViewDefinition *ViewDefinitionInput // Included for Apache Hive compatibility. Not used in the normal course of Glue // operations. ViewExpandedText *string // Included for Apache Hive compatibility. Not used in the normal course of Glue // operations. If the table is a VIRTUAL_VIEW , certain Athena configuration // encoded in base64. ViewOriginalText *string // contains filtered or unexported fields }
A structure used to define a table.
type TableOptimizer ¶ added in v1.68.0
type TableOptimizer struct { // A TableOptimizerConfiguration object that was specified when creating or // updating a table optimizer. Configuration *TableOptimizerConfiguration // A TableOptimizerRun object representing the last run of the table optimizer. LastRun *TableOptimizerRun // The type of table optimizer. The valid values are: // // - compaction : for managing compaction with a table optimizer. // // - retention : for managing the retention of snapshot with a table optimizer. // // - orphan_file_deletion : for managing the deletion of orphan files with a // table optimizer. Type TableOptimizerType // contains filtered or unexported fields }
Contains details about an optimizer associated with a table.
type TableOptimizerConfiguration ¶ added in v1.68.0
type TableOptimizerConfiguration struct { // Whether table optimization is enabled. Enabled *bool // The configuration for an orphan file deletion optimizer. OrphanFileDeletionConfiguration *OrphanFileDeletionConfiguration // The configuration for a snapshot retention optimizer. RetentionConfiguration *RetentionConfiguration // A role passed by the caller which gives the service permission to update the // resources associated with the optimizer on the caller's behalf. RoleArn *string // A TableOptimizerVpcConfiguration object representing the VPC configuration for // a table optimizer. // // This configuration is necessary to perform optimization on tables that are in a // customer VPC. VpcConfiguration TableOptimizerVpcConfiguration // contains filtered or unexported fields }
Contains details on the configuration of a table optimizer. You pass this configuration when creating or updating a table optimizer.
type TableOptimizerEventType ¶ added in v1.68.0
type TableOptimizerEventType string
const ( TableOptimizerEventTypeStarting TableOptimizerEventType = "starting" TableOptimizerEventTypeCompleted TableOptimizerEventType = "completed" TableOptimizerEventTypeFailed TableOptimizerEventType = "failed" TableOptimizerEventTypeInProgress TableOptimizerEventType = "in_progress" )
Enum values for TableOptimizerEventType
func (TableOptimizerEventType) Values ¶ added in v1.68.0
func (TableOptimizerEventType) Values() []TableOptimizerEventType
Values returns all known values for TableOptimizerEventType. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type TableOptimizerRun ¶ added in v1.68.0
type TableOptimizerRun struct { // A CompactionMetrics object containing metrics for the optimizer run. CompactionMetrics *CompactionMetrics // Represents the epoch timestamp at which the compaction job ended. EndTimestamp *time.Time // An error that occured during the optimizer run. Error *string // An event type representing the status of the table optimizer run. EventType TableOptimizerEventType // A RunMetrics object containing metrics for the optimizer run. // // This member is deprecated. See the individual metric members for compaction, // retention, and orphan file deletion. // // Deprecated: Metrics has been replaced by optimizer type specific metrics such // as IcebergCompactionMetrics Metrics *RunMetrics // An OrphanFileDeletionMetrics object containing metrics for the optimizer run. OrphanFileDeletionMetrics *OrphanFileDeletionMetrics // A RetentionMetrics object containing metrics for the optimizer run. RetentionMetrics *RetentionMetrics // Represents the epoch timestamp at which the compaction job was started within // Lake Formation. StartTimestamp *time.Time // contains filtered or unexported fields }
Contains details for a table optimizer run.
type TableOptimizerType ¶ added in v1.68.0
type TableOptimizerType string
const ( TableOptimizerTypeCompaction TableOptimizerType = "compaction" TableOptimizerTypeRetention TableOptimizerType = "retention" TableOptimizerTypeOrphanFileDeletion TableOptimizerType = "orphan_file_deletion" )
Enum values for TableOptimizerType
func (TableOptimizerType) Values ¶ added in v1.68.0
func (TableOptimizerType) Values() []TableOptimizerType
Values returns all known values for TableOptimizerType. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type TableOptimizerVpcConfiguration ¶ added in v1.102.0
type TableOptimizerVpcConfiguration interface {
// contains filtered or unexported methods
}
An object that describes the VPC configuration for a table optimizer.
This configuration is necessary to perform optimization on tables that are in a customer VPC.
The following types satisfy this interface:
TableOptimizerVpcConfigurationMemberGlueConnectionName
Example (OutputUsage) ¶
// Code generated by smithy-go-codegen DO NOT EDIT. package main import ( "fmt" "github.com/aws/aws-sdk-go-v2/service/glue/types" ) func main() { var union types.TableOptimizerVpcConfiguration // type switches can be used to check the union value switch v := union.(type) { case *types.TableOptimizerVpcConfigurationMemberGlueConnectionName: _ = v.Value // Value is string case *types.UnknownUnionMember: fmt.Println("unknown tag:", v.Tag) default: fmt.Println("union is nil or unknown type") } } var _ *string
Output:
type TableOptimizerVpcConfigurationMemberGlueConnectionName ¶ added in v1.102.0
type TableOptimizerVpcConfigurationMemberGlueConnectionName struct { Value string // contains filtered or unexported fields }
The name of the Glue connection used for the VPC for the table optimizer.
type TableStatus ¶ added in v1.93.0
type TableStatus struct { // Indicates which action was called on the table, currently only CREATE or UPDATE . Action ResourceAction // A StatusDetails object with information about the requested change. Details *StatusDetails // An error that will only appear when the state is "FAILED". This is a parent // level exception message, there may be different Error s for each dialect. Error *ErrorDetail // An ISO 8601 formatted date string indicating the time that the change was // initiated. RequestTime *time.Time // The ARN of the user who requested the asynchronous change. RequestedBy *string // A generic status for the change in progress, such as QUEUED, IN_PROGRESS, // SUCCESS, or FAILED. State ResourceState // An ISO 8601 formatted date string indicating the time that the state was last // updated. UpdateTime *time.Time // The ARN of the user to last manually alter the asynchronous change (requesting // cancellation, etc). UpdatedBy *string // contains filtered or unexported fields }
A structure containing information about the state of an asynchronous change to a table.
type TableVersion ¶
type TableVersion struct { // The table in question. Table *Table // The ID value that identifies this table version. A VersionId is a string // representation of an integer. Each version is incremented by 1. VersionId *string // contains filtered or unexported fields }
Specifies a version of a table.
type TableVersionError ¶
type TableVersionError struct { // The details about the error. ErrorDetail *ErrorDetail // The name of the table in question. TableName *string // The ID value of the version in question. A VersionID is a string representation // of an integer. Each version is incremented by 1. VersionId *string // contains filtered or unexported fields }
An error record for table-version operations.
type Tag ¶ added in v1.103.0
type Tag struct { // The tag key. The key is required when you create a tag on an object. The key is // case-sensitive, and must not contain the prefix aws. Key *string // The tag value. The value is optional when you create a tag on an object. The // value is case-sensitive, and must not contain the prefix aws. Value *string // contains filtered or unexported fields }
The Tag object represents a label that you can assign to an Amazon Web Services resource. Each tag consists of a key and an optional value, both of which you define.
For more information about tags, and controlling access to resources in Glue, see Amazon Web Services Tags in Glueand Specifying Glue Resource ARNs in the developer guide.
type TargetFormat ¶ added in v1.25.0
type TargetFormat string
const ( TargetFormatJson TargetFormat = "json" TargetFormatCsv TargetFormat = "csv" TargetFormatAvro TargetFormat = "avro" TargetFormatOrc TargetFormat = "orc" TargetFormatParquet TargetFormat = "parquet" TargetFormatHudi TargetFormat = "hudi" TargetFormatDelta TargetFormat = "delta" )
Enum values for TargetFormat
func (TargetFormat) Values ¶ added in v1.25.0
func (TargetFormat) Values() []TargetFormat
Values returns all known values for TargetFormat. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type TargetProcessingProperties ¶ added in v1.103.0
type TargetProcessingProperties struct { // The Glue network connection to configure the Glue job running in the customer // VPC. ConnectionName *string // The ARN of an Eventbridge event bus to receive the integration status // notification. EventBusArn *string // The ARN of the KMS key used for encryption. KmsArn *string // The IAM role to access the Glue database. RoleArn *string // contains filtered or unexported fields }
The resource properties associated with the integration target.
type TargetRedshiftCatalog ¶ added in v1.103.0
type TargetRedshiftCatalog struct { // The Amazon Resource Name (ARN) of the catalog resource. // // This member is required. CatalogArn *string // contains filtered or unexported fields }
A structure that describes a target catalog for resource linking.
type TargetResourceNotFound ¶ added in v1.103.0
type TargetResourceNotFound struct { Message *string ErrorCodeOverride *string // contains filtered or unexported fields }
The target resource could not be found.
func (*TargetResourceNotFound) Error ¶ added in v1.103.0
func (e *TargetResourceNotFound) Error() string
func (*TargetResourceNotFound) ErrorCode ¶ added in v1.103.0
func (e *TargetResourceNotFound) ErrorCode() string
func (*TargetResourceNotFound) ErrorFault ¶ added in v1.103.0
func (e *TargetResourceNotFound) ErrorFault() smithy.ErrorFault
func (*TargetResourceNotFound) ErrorMessage ¶ added in v1.103.0
func (e *TargetResourceNotFound) ErrorMessage() string
type TargetTableConfig ¶ added in v1.103.0
type TargetTableConfig struct { // Determines the file layout on the target. PartitionSpec []IntegrationPartition // The optional name of a target table. TargetTableName *string // Specifies how nested objects are flattened to top-level elements. Valid values // are: "TOPLEVEL", "FULL", or "NOUNNEST". UnnestSpec UnnestSpec // contains filtered or unexported fields }
Properties used by the target leg to partition the data on the target.
type TaskRun ¶
type TaskRun struct { // The last point in time that the requested task run was completed. CompletedOn *time.Time // The list of error strings associated with this task run. ErrorString *string // The amount of time (in seconds) that the task run consumed resources. ExecutionTime int32 // The last point in time that the requested task run was updated. LastModifiedOn *time.Time // The names of the log group for secure logging, associated with this task run. LogGroupName *string // Specifies configuration properties associated with this task run. Properties *TaskRunProperties // The date and time that this task run started. StartedOn *time.Time // The current status of the requested task run. Status TaskStatusType // The unique identifier for this task run. TaskRunId *string // The unique identifier for the transform. TransformId *string // contains filtered or unexported fields }
The sampling parameters that are associated with the machine learning transform.
type TaskRunFilterCriteria ¶
type TaskRunFilterCriteria struct { // Filter on task runs started after this date. StartedAfter *time.Time // Filter on task runs started before this date. StartedBefore *time.Time // The current status of the task run. Status TaskStatusType // The type of task run. TaskRunType TaskType // contains filtered or unexported fields }
The criteria that are used to filter the task runs for the machine learning transform.
type TaskRunProperties ¶
type TaskRunProperties struct { // The configuration properties for an exporting labels task run. ExportLabelsTaskRunProperties *ExportLabelsTaskRunProperties // The configuration properties for a find matches task run. FindMatchesTaskRunProperties *FindMatchesTaskRunProperties // The configuration properties for an importing labels task run. ImportLabelsTaskRunProperties *ImportLabelsTaskRunProperties // The configuration properties for a labeling set generation task run. LabelingSetGenerationTaskRunProperties *LabelingSetGenerationTaskRunProperties // The type of task run. TaskType TaskType // contains filtered or unexported fields }
The configuration properties for the task run.
type TaskRunSortColumnType ¶
type TaskRunSortColumnType string
const ( TaskRunSortColumnTypeTaskRunType TaskRunSortColumnType = "TASK_RUN_TYPE" TaskRunSortColumnTypeStatus TaskRunSortColumnType = "STATUS" TaskRunSortColumnTypeStarted TaskRunSortColumnType = "STARTED" )
Enum values for TaskRunSortColumnType
func (TaskRunSortColumnType) Values ¶ added in v0.29.0
func (TaskRunSortColumnType) Values() []TaskRunSortColumnType
Values returns all known values for TaskRunSortColumnType. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type TaskRunSortCriteria ¶
type TaskRunSortCriteria struct { // The column to be used to sort the list of task runs for the machine learning // transform. // // This member is required. Column TaskRunSortColumnType // The sort direction to be used to sort the list of task runs for the machine // learning transform. // // This member is required. SortDirection SortDirectionType // contains filtered or unexported fields }
The sorting criteria that are used to sort the list of task runs for the machine learning transform.
type TaskStatusType ¶
type TaskStatusType string
const ( TaskStatusTypeStarting TaskStatusType = "STARTING" TaskStatusTypeRunning TaskStatusType = "RUNNING" TaskStatusTypeStopping TaskStatusType = "STOPPING" TaskStatusTypeStopped TaskStatusType = "STOPPED" TaskStatusTypeSucceeded TaskStatusType = "SUCCEEDED" TaskStatusTypeFailed TaskStatusType = "FAILED" TaskStatusTypeTimeout TaskStatusType = "TIMEOUT" )
Enum values for TaskStatusType
func (TaskStatusType) Values ¶ added in v0.29.0
func (TaskStatusType) Values() []TaskStatusType
Values returns all known values for TaskStatusType. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type TaskType ¶
type TaskType string
type TestConnectionInput ¶ added in v1.97.0
type TestConnectionInput struct { // The key-value pairs that define parameters for the connection. // // JDBC connections use the following connection properties: // // - Required: All of ( HOST , PORT , JDBC_ENGINE ) or JDBC_CONNECTION_URL . // // - Required: All of ( USERNAME , PASSWORD ) or SECRET_ID . // // - Optional: JDBC_ENFORCE_SSL , CUSTOM_JDBC_CERT , CUSTOM_JDBC_CERT_STRING , // SKIP_CUSTOM_JDBC_CERT_VALIDATION . These parameters are used to configure SSL // with JDBC. // // SALESFORCE connections require the AuthenticationConfiguration member to be // configured. // // This member is required. ConnectionProperties map[string]string // The type of connection to test. This operation is only available for the JDBC // or SALESFORCE connection types. // // This member is required. ConnectionType ConnectionType // A structure containing the authentication configuration in the TestConnection // request. Required for a connection to Salesforce using OAuth authentication. AuthenticationConfiguration *AuthenticationConfigurationInput // contains filtered or unexported fields }
A structure that is used to specify testing a connection to a service.
type ThrottlingException ¶ added in v1.96.0
type ThrottlingException struct { Message *string ErrorCodeOverride *string // contains filtered or unexported fields }
The throttling threshhold was exceeded.
func (*ThrottlingException) Error ¶ added in v1.96.0
func (e *ThrottlingException) Error() string
func (*ThrottlingException) ErrorCode ¶ added in v1.96.0
func (e *ThrottlingException) ErrorCode() string
func (*ThrottlingException) ErrorFault ¶ added in v1.96.0
func (e *ThrottlingException) ErrorFault() smithy.ErrorFault
func (*ThrottlingException) ErrorMessage ¶ added in v1.96.0
func (e *ThrottlingException) ErrorMessage() string
type TimestampFilter ¶ added in v1.92.0
type TimestampFilter struct { // The timestamp after which statistics should be included in the results. RecordedAfter *time.Time // The timestamp before which statistics should be included in the results. RecordedBefore *time.Time // contains filtered or unexported fields }
A timestamp filter.
type TimestampedInclusionAnnotation ¶ added in v1.92.0
type TimestampedInclusionAnnotation struct { // The timestamp when the inclusion annotation was last modified. LastModifiedOn *time.Time // The inclusion annotation value. Value InclusionAnnotationValue // contains filtered or unexported fields }
A timestamped inclusion annotation.
type TransformConfigParameter ¶ added in v1.36.0
type TransformConfigParameter struct { // Specifies the name of the parameter in the config file of the dynamic transform. // // This member is required. Name *string // Specifies the parameter type in the config file of the dynamic transform. // // This member is required. Type ParamType // Specifies whether the parameter is optional or not in the config file of the // dynamic transform. IsOptional *bool // Specifies the list type of the parameter in the config file of the dynamic // transform. ListType ParamType // Specifies the validation message in the config file of the dynamic transform. ValidationMessage *string // Specifies the validation rule in the config file of the dynamic transform. ValidationRule *string // Specifies the value of the parameter in the config file of the dynamic // transform. Value []string // contains filtered or unexported fields }
Specifies the parameters in the config file of the dynamic transform.
type TransformEncryption ¶ added in v0.29.0
type TransformEncryption struct { // An MLUserDataEncryption object containing the encryption mode and // customer-provided KMS key ID. MlUserDataEncryption *MLUserDataEncryption // The name of the security configuration. TaskRunSecurityConfigurationName *string // contains filtered or unexported fields }
The encryption-at-rest settings of the transform that apply to accessing user data. Machine learning transforms can access user data encrypted in Amazon S3 using KMS.
Additionally, imported labels and trained transforms can now be encrypted using a customer provided KMS key.
type TransformFilterCriteria ¶
type TransformFilterCriteria struct { // The time and date after which the transforms were created. CreatedAfter *time.Time // The time and date before which the transforms were created. CreatedBefore *time.Time // This value determines which version of Glue this machine learning transform is // compatible with. Glue 1.0 is recommended for most customers. If the value is not // set, the Glue compatibility defaults to Glue 0.9. For more information, see [Glue Versions]in // the developer guide. // // [Glue Versions]: https://docs.aws.amazon.com/glue/latest/dg/release-notes.html#release-notes-versions GlueVersion *string // Filter on transforms last modified after this date. LastModifiedAfter *time.Time // Filter on transforms last modified before this date. LastModifiedBefore *time.Time // A unique transform name that is used to filter the machine learning transforms. Name *string // Filters on datasets with a specific schema. The Map object is an array of // key-value pairs representing the schema this transform accepts, where Column is // the name of a column, and Type is the type of the data such as an integer or // string. Has an upper bound of 100 columns. Schema []SchemaColumn // Filters the list of machine learning transforms by the last known status of the // transforms (to indicate whether a transform can be used or not). One of // "NOT_READY", "READY", or "DELETING". Status TransformStatusType // The type of machine learning transform that is used to filter the machine // learning transforms. TransformType TransformType // contains filtered or unexported fields }
The criteria used to filter the machine learning transforms.
type TransformParameters ¶
type TransformParameters struct { // The type of machine learning transform. // // For information about the types of machine learning transforms, see [Creating Machine Learning Transforms]. // // [Creating Machine Learning Transforms]: https://docs.aws.amazon.com/glue/latest/dg/add-job-machine-learning-transform.html // // This member is required. TransformType TransformType // The parameters for the find matches algorithm. FindMatchesParameters *FindMatchesParameters // contains filtered or unexported fields }
The algorithm-specific parameters that are associated with the machine learning transform.
type TransformSortColumnType ¶
type TransformSortColumnType string
const ( TransformSortColumnTypeName TransformSortColumnType = "NAME" TransformSortColumnTypeTransformType TransformSortColumnType = "TRANSFORM_TYPE" TransformSortColumnTypeStatus TransformSortColumnType = "STATUS" TransformSortColumnTypeCreated TransformSortColumnType = "CREATED" TransformSortColumnTypeLastModified TransformSortColumnType = "LAST_MODIFIED" )
Enum values for TransformSortColumnType
func (TransformSortColumnType) Values ¶ added in v0.29.0
func (TransformSortColumnType) Values() []TransformSortColumnType
Values returns all known values for TransformSortColumnType. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type TransformSortCriteria ¶
type TransformSortCriteria struct { // The column to be used in the sorting criteria that are associated with the // machine learning transform. // // This member is required. Column TransformSortColumnType // The sort direction to be used in the sorting criteria that are associated with // the machine learning transform. // // This member is required. SortDirection SortDirectionType // contains filtered or unexported fields }
The sorting criteria that are associated with the machine learning transform.
type TransformStatusType ¶
type TransformStatusType string
const ( TransformStatusTypeNotReady TransformStatusType = "NOT_READY" TransformStatusTypeReady TransformStatusType = "READY" TransformStatusTypeDeleting TransformStatusType = "DELETING" )
Enum values for TransformStatusType
func (TransformStatusType) Values ¶ added in v0.29.0
func (TransformStatusType) Values() []TransformStatusType
Values returns all known values for TransformStatusType. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type TransformType ¶
type TransformType string
const (
TransformTypeFindMatches TransformType = "FIND_MATCHES"
)
Enum values for TransformType
func (TransformType) Values ¶ added in v0.29.0
func (TransformType) Values() []TransformType
Values returns all known values for TransformType. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type Trigger ¶
type Trigger struct { // The actions initiated by this trigger. Actions []Action // A description of this trigger. Description *string // Batch condition that must be met (specified number of events received or batch // time window expired) before EventBridge event trigger fires. EventBatchingCondition *EventBatchingCondition // Reserved for future use. Id *string // The name of the trigger. Name *string // The predicate of this trigger, which defines when it will fire. Predicate *Predicate // A cron expression used to specify the schedule (see [Time-Based Schedules for Jobs and Crawlers]. For example, to run // something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) . // // [Time-Based Schedules for Jobs and Crawlers]: https://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html Schedule *string // The current state of the trigger. State TriggerState // The type of trigger that this is. Type TriggerType // The name of the workflow associated with the trigger. WorkflowName *string // contains filtered or unexported fields }
Information about a specific trigger.
type TriggerNodeDetails ¶
type TriggerNodeDetails struct { // The information of the trigger represented by the trigger node. Trigger *Trigger // contains filtered or unexported fields }
The details of a Trigger node present in the workflow.
type TriggerState ¶
type TriggerState string
const ( TriggerStateCreating TriggerState = "CREATING" TriggerStateCreated TriggerState = "CREATED" TriggerStateActivating TriggerState = "ACTIVATING" TriggerStateActivated TriggerState = "ACTIVATED" TriggerStateDeactivating TriggerState = "DEACTIVATING" TriggerStateDeactivated TriggerState = "DEACTIVATED" TriggerStateDeleting TriggerState = "DELETING" TriggerStateUpdating TriggerState = "UPDATING" )
Enum values for TriggerState
func (TriggerState) Values ¶ added in v0.29.0
func (TriggerState) Values() []TriggerState
Values returns all known values for TriggerState. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type TriggerType ¶
type TriggerType string
const ( TriggerTypeScheduled TriggerType = "SCHEDULED" TriggerTypeConditional TriggerType = "CONDITIONAL" TriggerTypeOnDemand TriggerType = "ON_DEMAND" TriggerTypeEvent TriggerType = "EVENT" )
Enum values for TriggerType
func (TriggerType) Values ¶ added in v0.29.0
func (TriggerType) Values() []TriggerType
Values returns all known values for TriggerType. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type TriggerUpdate ¶
type TriggerUpdate struct { // The actions initiated by this trigger. Actions []Action // A description of this trigger. Description *string // Batch condition that must be met (specified number of events received or batch // time window expired) before EventBridge event trigger fires. EventBatchingCondition *EventBatchingCondition // Reserved for future use. Name *string // The predicate of this trigger, which defines when it will fire. Predicate *Predicate // A cron expression used to specify the schedule (see [Time-Based Schedules for Jobs and Crawlers]. For example, to run // something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) . // // [Time-Based Schedules for Jobs and Crawlers]: https://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html Schedule *string // contains filtered or unexported fields }
A structure used to provide information used to update a trigger. This object updates the previous trigger definition by overwriting it completely.
type UnfilteredPartition ¶ added in v1.18.0
type UnfilteredPartition struct { // The list of columns the user has permissions to access. AuthorizedColumns []string // A Boolean value indicating that the partition location is registered with Lake // Formation. IsRegisteredWithLakeFormation bool // The partition object. Partition *Partition // contains filtered or unexported fields }
A partition that contains unfiltered metadata.
type Union ¶ added in v1.25.0
type Union struct { // The node ID inputs to the transform. // // This member is required. Inputs []string // The name of the transform node. // // This member is required. Name *string // Indicates the type of Union transform. // // Specify ALL to join all rows from data sources to the resulting DynamicFrame. // The resulting union does not remove duplicate rows. // // Specify DISTINCT to remove duplicate rows in the resulting DynamicFrame. // // This member is required. UnionType UnionType // contains filtered or unexported fields }
Specifies a transform that combines the rows from two or more datasets into a single result.
type UnknownUnionMember ¶ added in v1.102.0
type UnknownUnionMember struct { Tag string Value []byte // contains filtered or unexported fields }
UnknownUnionMember is returned when a union member is returned over the wire, but has an unknown tag.
type UnnestSpec ¶ added in v1.103.0
type UnnestSpec string
const ( UnnestSpecToplevel UnnestSpec = "TOPLEVEL" UnnestSpecFull UnnestSpec = "FULL" UnnestSpecNounnest UnnestSpec = "NOUNNEST" )
Enum values for UnnestSpec
func (UnnestSpec) Values ¶ added in v1.103.0
func (UnnestSpec) Values() []UnnestSpec
Values returns all known values for UnnestSpec. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type UpdateBehavior ¶
type UpdateBehavior string
const ( UpdateBehaviorLog UpdateBehavior = "LOG" UpdateBehaviorUpdateInDatabase UpdateBehavior = "UPDATE_IN_DATABASE" )
Enum values for UpdateBehavior
func (UpdateBehavior) Values ¶ added in v0.29.0
func (UpdateBehavior) Values() []UpdateBehavior
Values returns all known values for UpdateBehavior. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type UpdateCatalogBehavior ¶ added in v1.25.0
type UpdateCatalogBehavior string
const ( UpdateCatalogBehaviorUpdateInDatabase UpdateCatalogBehavior = "UPDATE_IN_DATABASE" UpdateCatalogBehaviorLog UpdateCatalogBehavior = "LOG" )
Enum values for UpdateCatalogBehavior
func (UpdateCatalogBehavior) Values ¶ added in v1.25.0
func (UpdateCatalogBehavior) Values() []UpdateCatalogBehavior
Values returns all known values for UpdateCatalogBehavior. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type UpdateCsvClassifierRequest ¶
type UpdateCsvClassifierRequest struct { // The name of the classifier. // // This member is required. Name *string // Enables the processing of files that contain only one column. AllowSingleColumn *bool // Indicates whether the CSV file contains a header. ContainsHeader CsvHeaderOption // Specifies the configuration of custom datatypes. CustomDatatypeConfigured *bool // Specifies a list of supported custom datatypes. CustomDatatypes []string // A custom symbol to denote what separates each column entry in the row. Delimiter *string // Specifies not to trim values before identifying the type of column values. The // default value is true. DisableValueTrimming *bool // A list of strings representing column names. Header []string // A custom symbol to denote what combines content into a single column value. It // must be different from the column delimiter. QuoteSymbol *string // Sets the SerDe for processing CSV in the classifier, which will be applied in // the Data Catalog. Valid values are OpenCSVSerDe , LazySimpleSerDe , and None . // You can specify the None value when you want the crawler to do the detection. Serde CsvSerdeOption // contains filtered or unexported fields }
Specifies a custom CSV classifier to be updated.
type UpdateGrokClassifierRequest ¶
type UpdateGrokClassifierRequest struct { // The name of the GrokClassifier . // // This member is required. Name *string // An identifier of the data format that the classifier matches, such as Twitter, // JSON, Omniture logs, Amazon CloudWatch Logs, and so on. Classification *string // Optional custom grok patterns used by this classifier. CustomPatterns *string // The grok pattern used by this classifier. GrokPattern *string // contains filtered or unexported fields }
Specifies a grok classifier to update when passed to UpdateClassifier .
type UpdateJsonClassifierRequest ¶
type UpdateJsonClassifierRequest struct { // The name of the classifier. // // This member is required. Name *string // A JsonPath string defining the JSON data for the classifier to classify. Glue // supports a subset of JsonPath, as described in [Writing JsonPath Custom Classifiers]. // // [Writing JsonPath Custom Classifiers]: https://docs.aws.amazon.com/glue/latest/dg/custom-classifier.html#custom-classifier-json JsonPath *string // contains filtered or unexported fields }
Specifies a JSON classifier to be updated.
type UpdateXMLClassifierRequest ¶
type UpdateXMLClassifierRequest struct { // The name of the classifier. // // This member is required. Name *string // An identifier of the data format that the classifier matches. Classification *string // The XML tag designating the element that contains each record in an XML // document being parsed. This cannot identify a self-closing element (closed by /> // ). An empty row element that contains only attributes can be parsed as long as // it ends with a closing tag (for example, is okay, but is not). RowTag *string // contains filtered or unexported fields }
Specifies an XML classifier to be updated.
type UpsertRedshiftTargetOptions ¶ added in v1.25.0
type UpsertRedshiftTargetOptions struct { // The name of the connection to use to write to Redshift. ConnectionName *string // The physical location of the Redshift table. TableLocation *string // The keys used to determine whether to perform an update or insert. UpsertKeys []string // contains filtered or unexported fields }
The options to configure an upsert operation when writing to a Redshift target .
type UsageProfileDefinition ¶ added in v1.86.0
type UsageProfileDefinition struct { // The date and time when the usage profile was created. CreatedOn *time.Time // A description of the usage profile. Description *string // The date and time when the usage profile was last modified. LastModifiedOn *time.Time // The name of the usage profile. Name *string // contains filtered or unexported fields }
Describes an Glue usage profile.
type UserDefinedFunction ¶
type UserDefinedFunction struct { // The ID of the Data Catalog in which the function resides. CatalogId *string // The Java class that contains the function code. ClassName *string // The time at which the function was created. CreateTime *time.Time // The name of the catalog database that contains the function. DatabaseName *string // The name of the function. FunctionName *string // The owner of the function. OwnerName *string // The owner type. OwnerType PrincipalType // The resource URIs for the function. ResourceUris []ResourceUri // contains filtered or unexported fields }
Represents the equivalent of a Hive user-defined function ( UDF ) definition.
type UserDefinedFunctionInput ¶
type UserDefinedFunctionInput struct { // The Java class that contains the function code. ClassName *string // The name of the function. FunctionName *string // The owner of the function. OwnerName *string // The owner type. OwnerType PrincipalType // The resource URIs for the function. ResourceUris []ResourceUri // contains filtered or unexported fields }
A structure used to create or update a user-defined function.
type ValidationException ¶
type ValidationException struct { Message *string ErrorCodeOverride *string // contains filtered or unexported fields }
A value could not be validated.
func (*ValidationException) Error ¶
func (e *ValidationException) Error() string
func (*ValidationException) ErrorCode ¶
func (e *ValidationException) ErrorCode() string
func (*ValidationException) ErrorFault ¶
func (e *ValidationException) ErrorFault() smithy.ErrorFault
func (*ValidationException) ErrorMessage ¶
func (e *ValidationException) ErrorMessage() string
type VersionMismatchException ¶
type VersionMismatchException struct { Message *string ErrorCodeOverride *string // contains filtered or unexported fields }
There was a version conflict.
func (*VersionMismatchException) Error ¶
func (e *VersionMismatchException) Error() string
func (*VersionMismatchException) ErrorCode ¶
func (e *VersionMismatchException) ErrorCode() string
func (*VersionMismatchException) ErrorFault ¶
func (e *VersionMismatchException) ErrorFault() smithy.ErrorFault
func (*VersionMismatchException) ErrorMessage ¶
func (e *VersionMismatchException) ErrorMessage() string
type ViewDefinition ¶ added in v1.78.0
type ViewDefinition struct { // The definer of a view in SQL. Definer *string // You can set this flag as true to instruct the engine not to push user-provided // operations into the logical plan of the view during query planning. However, // setting this flag does not guarantee that the engine will comply. Refer to the // engine's documentation to understand the guarantees provided, if any. IsProtected *bool // A list of representations. Representations []ViewRepresentation // A list of table Amazon Resource Names (ARNs). SubObjects []string // contains filtered or unexported fields }
A structure containing details for representations.
type ViewDefinitionInput ¶ added in v1.84.0
type ViewDefinitionInput struct { // The definer of a view in SQL. Definer *string // You can set this flag as true to instruct the engine not to push user-provided // operations into the logical plan of the view during query planning. However, // setting this flag does not guarantee that the engine will comply. Refer to the // engine's documentation to understand the guarantees provided, if any. IsProtected *bool // A list of structures that contains the dialect of the view, and the query that // defines the view. Representations []ViewRepresentationInput // A list of base table ARNs that make up the view. SubObjects []string // contains filtered or unexported fields }
A structure containing details for creating or updating an Glue view.
type ViewDialect ¶ added in v1.73.0
type ViewDialect string
const ( ViewDialectRedshift ViewDialect = "REDSHIFT" ViewDialectAthena ViewDialect = "ATHENA" ViewDialectSpark ViewDialect = "SPARK" )
Enum values for ViewDialect
func (ViewDialect) Values ¶ added in v1.73.0
func (ViewDialect) Values() []ViewDialect
Values returns all known values for ViewDialect. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type ViewRepresentation ¶ added in v1.78.0
type ViewRepresentation struct { // The dialect of the query engine. Dialect ViewDialect // The version of the dialect of the query engine. For example, 3.0.0. DialectVersion *string // Dialects marked as stale are no longer valid and must be updated before they // can be queried in their respective query engines. IsStale *bool // The name of the connection to be used to validate the specific representation // of the view. ValidationConnection *string // The expanded SQL for the view. This SQL is used by engines while processing a // query on a view. Engines may perform operations during view creation to // transform ViewOriginalText to ViewExpandedText . For example: // // - Fully qualified identifiers: SELECT * from table1 -> SELECT * from // db1.table1 ViewExpandedText *string // The SELECT query provided by the customer during CREATE VIEW DDL . This SQL is // not used during a query on a view ( ViewExpandedText is used instead). // ViewOriginalText is used for cases like SHOW CREATE VIEW where users want to // see the original DDL command that created the view. ViewOriginalText *string // contains filtered or unexported fields }
A structure that contains the dialect of the view, and the query that defines the view.
type ViewRepresentationInput ¶ added in v1.84.0
type ViewRepresentationInput struct { // A parameter that specifies the engine type of a specific representation. Dialect ViewDialect // A parameter that specifies the version of the engine of a specific // representation. DialectVersion *string // The name of the connection to be used to validate the specific representation // of the view. ValidationConnection *string // A string that represents the SQL query that describes the view with expanded // resource ARNs ViewExpandedText *string // A string that represents the original SQL query that describes the view. ViewOriginalText *string // contains filtered or unexported fields }
A structure containing details of a representation to update or create a Lake Formation view.
type ViewUpdateAction ¶ added in v1.84.0
type ViewUpdateAction string
const ( ViewUpdateActionAdd ViewUpdateAction = "ADD" ViewUpdateActionReplace ViewUpdateAction = "REPLACE" ViewUpdateActionAddOrReplace ViewUpdateAction = "ADD_OR_REPLACE" ViewUpdateActionDrop ViewUpdateAction = "DROP" )
Enum values for ViewUpdateAction
func (ViewUpdateAction) Values ¶ added in v1.84.0
func (ViewUpdateAction) Values() []ViewUpdateAction
Values returns all known values for ViewUpdateAction. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type ViewValidation ¶ added in v1.93.0
type ViewValidation struct { // The dialect of the query engine. Dialect ViewDialect // The version of the dialect of the query engine. For example, 3.0.0. DialectVersion *string // An error associated with the validation. Error *ErrorDetail // The state of the validation. State ResourceState // The time of the last update. UpdateTime *time.Time // The SELECT query that defines the view, as provided by the customer. ViewValidationText *string // contains filtered or unexported fields }
A structure that contains information for an analytical engine to validate a view, prior to persisting the view metadata. Used in the case of direct UpdateTable or CreateTable API calls.
type WorkerType ¶
type WorkerType string
const ( WorkerTypeStandard WorkerType = "Standard" WorkerTypeG1x WorkerType = "G.1X" WorkerTypeG2x WorkerType = "G.2X" WorkerTypeG025x WorkerType = "G.025X" WorkerTypeG4x WorkerType = "G.4X" WorkerTypeG8x WorkerType = "G.8X" WorkerTypeZ2x WorkerType = "Z.2X" )
Enum values for WorkerType
func (WorkerType) Values ¶ added in v0.29.0
func (WorkerType) Values() []WorkerType
Values returns all known values for WorkerType. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type Workflow ¶
type Workflow struct { // This structure indicates the details of the blueprint that this particular // workflow is created from. BlueprintDetails *BlueprintDetails // The date and time when the workflow was created. CreatedOn *time.Time // A collection of properties to be used as part of each execution of the // workflow. The run properties are made available to each job in the workflow. A // job can modify the properties for the next jobs in the flow. DefaultRunProperties map[string]string // A description of the workflow. Description *string // The graph representing all the Glue components that belong to the workflow as // nodes and directed connections between them as edges. Graph *WorkflowGraph // The date and time when the workflow was last modified. LastModifiedOn *time.Time // The information about the last execution of the workflow. LastRun *WorkflowRun // You can use this parameter to prevent unwanted multiple updates to data, to // control costs, or in some cases, to prevent exceeding the maximum number of // concurrent runs of any of the component jobs. If you leave this parameter blank, // there is no limit to the number of concurrent workflow runs. MaxConcurrentRuns *int32 // The name of the workflow. Name *string // contains filtered or unexported fields }
A workflow is a collection of multiple dependent Glue jobs and crawlers that are run to complete a complex ETL task. A workflow manages the execution and monitoring of all its jobs and crawlers.
type WorkflowGraph ¶
type WorkflowGraph struct { // A list of all the directed connections between the nodes belonging to the // workflow. Edges []Edge // A list of the the Glue components belong to the workflow represented as nodes. Nodes []Node // contains filtered or unexported fields }
A workflow graph represents the complete workflow containing all the Glue components present in the workflow and all the directed connections between them.
type WorkflowRun ¶
type WorkflowRun struct { // The date and time when the workflow run completed. CompletedOn *time.Time // This error message describes any error that may have occurred in starting the // workflow run. Currently the only error message is "Concurrent runs exceeded for // workflow: foo ." ErrorMessage *string // The graph representing all the Glue components that belong to the workflow as // nodes and directed connections between them as edges. Graph *WorkflowGraph // Name of the workflow that was run. Name *string // The ID of the previous workflow run. PreviousRunId *string // The date and time when the workflow run was started. StartedOn *time.Time // The batch condition that started the workflow run. StartingEventBatchCondition *StartingEventBatchCondition // The statistics of the run. Statistics *WorkflowRunStatistics // The status of the workflow run. Status WorkflowRunStatus // The ID of this workflow run. WorkflowRunId *string // The workflow run properties which were set during the run. WorkflowRunProperties map[string]string // contains filtered or unexported fields }
A workflow run is an execution of a workflow providing all the runtime information.
type WorkflowRunStatistics ¶
type WorkflowRunStatistics struct { // Indicates the count of job runs in the ERROR state in the workflow run. ErroredActions int32 // Total number of Actions that have failed. FailedActions int32 // Total number Actions in running state. RunningActions int32 // Total number of Actions that have stopped. StoppedActions int32 // Total number of Actions that have succeeded. SucceededActions int32 // Total number of Actions that timed out. TimeoutActions int32 // Total number of Actions in the workflow run. TotalActions int32 // Indicates the count of job runs in WAITING state in the workflow run. WaitingActions int32 // contains filtered or unexported fields }
Workflow run statistics provides statistics about the workflow run.
type WorkflowRunStatus ¶
type WorkflowRunStatus string
const ( WorkflowRunStatusRunning WorkflowRunStatus = "RUNNING" WorkflowRunStatusCompleted WorkflowRunStatus = "COMPLETED" WorkflowRunStatusStopping WorkflowRunStatus = "STOPPING" WorkflowRunStatusStopped WorkflowRunStatus = "STOPPED" WorkflowRunStatusError WorkflowRunStatus = "ERROR" )
Enum values for WorkflowRunStatus
func (WorkflowRunStatus) Values ¶ added in v0.29.0
func (WorkflowRunStatus) Values() []WorkflowRunStatus
Values returns all known values for WorkflowRunStatus. Note that this can be expanded in the future, and so it is only as up to date as the client.
The ordering of this slice is not guaranteed to be stable across updates.
type XMLClassifier ¶
type XMLClassifier struct { // An identifier of the data format that the classifier matches. // // This member is required. Classification *string // The name of the classifier. // // This member is required. Name *string // The time that this classifier was registered. CreationTime *time.Time // The time that this classifier was last updated. LastUpdated *time.Time // The XML tag designating the element that contains each record in an XML // document being parsed. This can't identify a self-closing element (closed by /> // ). An empty row element that contains only attributes can be parsed as long as // it ends with a closing tag (for example, is okay, but is not). RowTag *string // The version of this classifier. Version int64 // contains filtered or unexported fields }
A classifier for XML content.