Documentation ¶
Index ¶
- Constants
- type AwsElasticsearchOutputConfig
- type AzureStorage
- type Buffer
- type CloudWatchOutput
- type Compress
- type DatadogOutput
- type ElasticsearchOutput
- type Endpoint
- type EndpointCredentials
- type Fields
- type FileOutputConfig
- type FluentdServer
- type Format
- type FormatRfc5424
- type ForwardOutput
- type GCSOutput
- type GELFOutputConfig
- type HTTPAuth
- type HTTPOutputConfig
- type KafkaOutputConfig
- type KinesisFirehoseAssumeRoleCredentials
- func (in *KinesisFirehoseAssumeRoleCredentials) DeepCopy() *KinesisFirehoseAssumeRoleCredentials
- func (in *KinesisFirehoseAssumeRoleCredentials) DeepCopyInto(out *KinesisFirehoseAssumeRoleCredentials)
- func (o *KinesisFirehoseAssumeRoleCredentials) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
- type KinesisFirehoseOutputConfig
- type KinesisFirehoseProcessCredentials
- func (in *KinesisFirehoseProcessCredentials) DeepCopy() *KinesisFirehoseProcessCredentials
- func (in *KinesisFirehoseProcessCredentials) DeepCopyInto(out *KinesisFirehoseProcessCredentials)
- func (o *KinesisFirehoseProcessCredentials) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
- type KinesisStreamAssumeRoleCredentials
- func (in *KinesisStreamAssumeRoleCredentials) DeepCopy() *KinesisStreamAssumeRoleCredentials
- func (in *KinesisStreamAssumeRoleCredentials) DeepCopyInto(out *KinesisStreamAssumeRoleCredentials)
- func (o *KinesisStreamAssumeRoleCredentials) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
- type KinesisStreamOutputConfig
- type KinesisStreamProcessCredentials
- func (in *KinesisStreamProcessCredentials) DeepCopy() *KinesisStreamProcessCredentials
- func (in *KinesisStreamProcessCredentials) DeepCopyInto(out *KinesisStreamProcessCredentials)
- func (o *KinesisStreamProcessCredentials) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
- type Label
- type LogDNAOutput
- type LogZOutput
- type LokiOutput
- type NewRelicOutputConfig
- type NullOutputConfig
- type OSSOutput
- type ObjectMetadata
- type OpenSearchOutput
- type RedisOutputConfig
- type S3AssumeRoleCredentials
- type S3InstanceProfileCredentials
- type S3OutputConfig
- type S3SharedCredentials
- type SQSOutputConfig
- type SplunkHecOutput
- type SumologicOutput
- type SyslogOutputConfig
Constants ¶
const ( OneEyePathTemplate string = "%v/%%Y/%%m/%%d/${$.kubernetes.namespace_name}/${$.kubernetes.pod_name}/${$.kubernetes.container_name}/" OneEyeObjectKeyFormat string = "%{path}%H:%M_%{index}.%{file_extension}" OneEyeTags string = "tag,time,$.kubernetes.namespace_name,$.kubernetes.pod_name,$.kubernetes.container_name" )
Variables ¶
This section is empty.
Functions ¶
This section is empty.
Types ¶
type AwsElasticsearchOutputConfig ¶
type AwsElasticsearchOutputConfig struct { // flush_interval FlushInterval string `json:"flush_interval,omitempty"` // AWS Endpoint Credentials Endpoint *EndpointCredentials `json:"endpoint,omitempty"` // +docLink:"Format,../format/" Format *Format `json:"format,omitempty"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` // ElasticSearch *ElasticsearchOutput `json:",omitempty"` }
+kubebuilder:object:generate=true +docName:"Amazon Elasticsearch" Send your logs to a Amazon Elasticsearch Service
func (*AwsElasticsearchOutputConfig) DeepCopy ¶
func (in *AwsElasticsearchOutputConfig) DeepCopy() *AwsElasticsearchOutputConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AwsElasticsearchOutputConfig.
func (*AwsElasticsearchOutputConfig) DeepCopyInto ¶
func (in *AwsElasticsearchOutputConfig) DeepCopyInto(out *AwsElasticsearchOutputConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*AwsElasticsearchOutputConfig) ToDirective ¶
func (e *AwsElasticsearchOutputConfig) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type AzureStorage ¶
type AzureStorage struct { // Path prefix of the files on Azure Path string `json:"path,omitempty"` // Your azure storage account // +docLink:"Secret,../secret/" AzureStorageAccount *secret.Secret `json:"azure_storage_account"` // Your azure storage access key // +docLink:"Secret,../secret/" AzureStorageAccessKey *secret.Secret `json:"azure_storage_access_key,omitempty"` // Your azure storage sas token // +docLink:"Secret,../secret/" AzureStorageSasToken *secret.Secret `json:"azure_storage_sas_token,omitempty"` // Your azure storage container AzureContainer string `json:"azure_container"` // Azure Instance Metadata Service API Version AzureImdsApiVersion string `json:"azure_imds_api_version,omitempty"` // Object key format (default: %{path}%{time_slice}_%{index}.%{file_extension}) AzureObjectKeyFormat string `json:"azure_object_key_format,omitempty"` // Automatically create container if not exists(default: true) AutoCreateContainer bool `json:"auto_create_container,omitempty"` // Compat format type: out_file, json, ltsv (default: out_file) Format string `json:"format,omitempty" plugin:"default:json"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` // The threshold for chunk flush performance check. // Parameter type is float, not time, default: 20.0 (seconds) // If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count. SlowFlushLogThreshold string `json:"slow_flush_log_threshold,omitempty"` }
+kubebuilder:object:generate=true +docName:"Output Config"
func (*AzureStorage) DeepCopy ¶
func (in *AzureStorage) DeepCopy() *AzureStorage
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureStorage.
func (*AzureStorage) DeepCopyInto ¶
func (in *AzureStorage) DeepCopyInto(out *AzureStorage)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*AzureStorage) ToDirective ¶
func (a *AzureStorage) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type Buffer ¶
type Buffer struct { // Disable buffer section (default: false) Disabled bool `json:"disabled,omitempty" plugin:"default:false,hidden"` // Fluentd core bundles memory and file plugins. 3rd party plugins are also available when installed. Type string `json:"type,omitempty"` // When tag is specified as buffer chunk key, output plugin writes events into chunks separately per tags. (default: tag,time) Tags *string `json:"tags,omitempty"` // The path where buffer chunks are stored. The '*' is replaced with random characters. It's highly recommended to leave this default. (default: operator generated) Path string `json:"path,omitempty"` // The max size of each chunks: events will be written into chunks until the size of chunks become this size (default: 8MB) ChunkLimitSize string `json:"chunk_limit_size,omitempty" plugin:"default:8MB"` // The max number of events that each chunks can store in it ChunkLimitRecords int `json:"chunk_limit_records,omitempty"` // The size limitation of this buffer plugin instance. Once the total size of stored buffer reached this threshold, all append operations will fail with error (and data will be lost) TotalLimitSize string `json:"total_limit_size,omitempty"` //The queue length limitation of this buffer plugin instance QueueLimitLength int `json:"queue_limit_length,omitempty"` // The percentage of chunk size threshold for flushing. output plugin will flush the chunk when actual size reaches chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in default) ChunkFullThreshold string `json:"chunk_full_threshold,omitempty"` //Limit the number of queued chunks. If you set smaller flush_interval, e.g. 1s, there are lots of small queued chunks in buffer. This is not good with file buffer because it consumes lots of fd resources when output destination has a problem. This parameter mitigates such situations. QueuedChunksLimitSize int `json:"queued_chunks_limit_size,omitempty"` // If you set this option to gzip, you can get Fluentd to compress data records before writing to buffer chunks. Compress string `json:"compress,omitempty"` // The value to specify to flush/write all buffer chunks at shutdown, or not FlushAtShutdown bool `json:"flush_at_shutdown,omitempty"` // Default: default (equals to lazy if time is specified as chunk key, interval otherwise) // lazy: flush/write chunks once per timekey // interval: flush/write chunks per specified time via flush_interval // immediate: flush/write chunks immediately after events are appended into chunks FlushMode string `json:"flush_mode,omitempty"` // Default: 60s FlushInterval string `json:"flush_interval,omitempty"` // The number of threads of output plugins, which is used to write chunks in parallel FlushThreadCount int `json:"flush_thread_count,omitempty"` // The sleep interval seconds of threads to wait next flush trial (when no chunks are waiting) FlushThreadInterval string `json:"flush_thread_interval,omitempty"` // The sleep interval seconds of threads between flushes when output plugin flushes waiting chunks next to next FlushThreadBurstInterval string `json:"flush_thread_burst_interval,omitempty"` // The timeout seconds until output plugin decides that async write operation fails DelayedCommitTimeout string `json:"delayed_commit_timeout,omitempty"` // How output plugin behaves when its buffer queue is full // throw_exception: raise exception to show this error in log // block: block processing of input plugin to emit events into that buffer // drop_oldest_chunk: drop/purge oldest chunk to accept newly incoming chunk OverflowAction string `json:"overflow_action,omitempty"` // The maximum seconds to retry to flush while failing, until plugin discards buffer chunks RetryTimeout string `json:"retry_timeout,omitempty"` // If true, plugin will ignore retry_timeout and retry_max_times options and retry flushing forever RetryForever *bool `json:"retry_forever,omitempty" plugin:"default:true"` // The maximum number of times to retry to flush while failing RetryMaxTimes int `json:"retry_max_times,omitempty"` // The ratio of retry_timeout to switch to use secondary while failing (Maximum valid value is 1.0) RetrySecondaryThreshold string `json:"retry_secondary_threshold,omitempty"` // exponential_backoff: wait seconds will become large exponentially per failures // periodic: output plugin will retry periodically with fixed intervals (configured via retry_wait) RetryType string `json:"retry_type,omitempty"` // Seconds to wait before next retry to flush, or constant factor of exponential backoff RetryWait string `json:"retry_wait,omitempty"` // The base number of exponential backoff for retries RetryExponentialBackoffBase string `json:"retry_exponential_backoff_base,omitempty"` // The maximum interval seconds for exponential backoff between retries while failing RetryMaxInterval string `json:"retry_max_interval,omitempty"` // If true, output plugin will retry after randomized interval not to do burst retries RetryRandomize bool `json:"retry_randomize,omitempty"` // Instead of storing unrecoverable chunks in the backup directory, just discard them. This option is new in Fluentd v1.2.6. DisableChunkBackup bool `json:"disable_chunk_backup,omitempty"` // Output plugin will flush chunks per specified time (enabled when time is specified in chunk keys) // +kubebuilder:validation:Optional Timekey string `json:"timekey" plugin:"default:10m"` // Output plugin writes chunks after timekey_wait seconds later after timekey expiration TimekeyWait string `json:"timekey_wait,omitempty" plugin:"default:1m"` // Output plugin decides to use UTC or not to format placeholders using timekey TimekeyUseUtc bool `json:"timekey_use_utc,omitempty"` // The timezone (-0700 or Asia/Tokyo) string for formatting timekey placeholders TimekeyZone string `json:"timekey_zone,omitempty"` }
func (*Buffer) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Buffer.
func (*Buffer) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*Buffer) ToDirective ¶
type CloudWatchOutput ¶
type CloudWatchOutput struct { // Create log group and stream automatically. (default: false) AutoCreateStream bool `json:"auto_create_stream,omitempty"` // AWS access key id // +docLink:"Secret,../secret/" AwsAccessKey *secret.Secret `json:"aws_key_id,omitempty"` // AWS secret key. // +docLink:"Secret,../secret/" AwsSecretKey *secret.Secret `json:"aws_sec_key,omitempty"` // Instance Profile Credentials call retries (default: nil) AwsInstanceProfileCredentialsRetries int `json:"aws_instance_profile_credentials_retries,omitempty"` // Enable AssumeRoleCredentials to authenticate, rather than the default credential hierarchy. See 'Cross-Account Operation' below for more detail. AwsUseSts bool `json:"aws_use_sts,omitempty"` // The role ARN to assume when using cross-account sts authentication AwsStsRoleArn string `json:"aws_sts_role_arn,omitempty"` // The session name to use with sts authentication (default: 'fluentd') AwsStsSessionName string `json:"aws_sts_session_name,omitempty"` // Use to set the number of threads pushing data to CloudWatch. (default: 1) Concurrency int `json:"concurrency,omitempty"` // Use this parameter to connect to the local API endpoint (for testing) Endpoint string `json:"endpoint,omitempty"` // Use to set an optional HTTP proxy HttpProxy string `json:"http_proxy,omitempty"` // Include time key as part of the log entry (default: UTC) IncludeTimeKey bool `json:"include_time_key,omitempty"` // Name of the library to be used to handle JSON data. For now, supported libraries are json (default) and yajl JsonHandler string `json:"json_handler,omitempty"` // Use localtime timezone for include_time_key output (overrides UTC default) Localtime bool `json:"localtime,omitempty"` // Set a hash with keys and values to tag the log group resource LogGroupAwsTags string `json:"log_group_aws_tags,omitempty"` // Specified field of records as AWS tags for the log group LogGroupAwsTagsKey string `json:"log_group_aws_tags_key,omitempty"` // Name of log group to store logs LogGroupName string `json:"log_group_name,omitempty"` // Specified field of records as log group name LogGroupNameKey string `json:"log_group_name_key,omitempty"` // Output rejected_log_events_info request log. (default: false) LogRejectedRequest string `json:"log_rejected_request,omitempty"` // Name of log stream to store logs LogStreamName string `json:"log_stream_name,omitempty"` // Specified field of records as log stream name LogStreamNameKey string `json:"log_stream_name_key,omitempty"` // Maximum number of events to send at once (default: 10000) MaxEventsPerBatch int `json:"max_events_per_batch,omitempty"` // Maximum length of the message MaxMessageLength int `json:"max_message_length,omitempty"` // Keys to send messages as events MessageKeys string `json:"message_keys,omitempty"` // If true, put_log_events_retry_limit will be ignored PutLogEventsDisableRetryLimit bool `json:"put_log_events_disable_retry_limit,omitempty"` // Maximum count of retry (if exceeding this, the events will be discarded) PutLogEventsRetryLimit int `json:"put_log_events_retry_limit,omitempty"` // Time before retrying PutLogEvents (retry interval increases exponentially like put_log_events_retry_wait * (2 ^ retry_count)) PutLogEventsRetryWait string `json:"put_log_events_retry_wait,omitempty"` // AWS Region Region string `json:"region"` // Remove field specified by log_group_aws_tags_key RemoveLogGroupAwsTagsKey string `json:"remove_log_group_aws_tags_key,omitempty"` // Remove field specified by log_group_name_key RemoveLogGroupNameKey string `json:"remove_log_group_name_key,omitempty"` // Remove field specified by log_stream_name_key RemoveLogStreamNameKey string `json:"remove_log_stream_name_key,omitempty"` // Remove field specified by retention_in_days RemoveRetentionInDays string `json:"remove_retention_in_days,omitempty"` // Use to set the expiry time for log group when created with auto_create_stream. (default to no expiry) RetentionInDays string `json:"retention_in_days,omitempty"` // Use specified field of records as retention period RetentionInDaysKey string `json:"retention_in_days_key,omitempty"` // Use tag as a group name UseTagAsGroup bool `json:"use_tag_as_group,omitempty"` // Use tag as a stream name UseTagAsStream bool `json:"use_tag_as_stream,omitempty"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` // The threshold for chunk flush performance check. // Parameter type is float, not time, default: 20.0 (seconds) // If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count. SlowFlushLogThreshold string `json:"slow_flush_log_threshold,omitempty"` // +docLink:"Format,../format/" Format *Format `json:"format,omitempty"` }
+kubebuilder:object:generate=true +docName:"Output Config"
func (*CloudWatchOutput) DeepCopy ¶
func (in *CloudWatchOutput) DeepCopy() *CloudWatchOutput
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudWatchOutput.
func (*CloudWatchOutput) DeepCopyInto ¶
func (in *CloudWatchOutput) DeepCopyInto(out *CloudWatchOutput)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*CloudWatchOutput) ToDirective ¶
func (c *CloudWatchOutput) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type Compress ¶
type Compress struct { // Parquet compression codec. (uncompressed, snappy, gzip, lzo, brotli, lz4, zstd)(default: snappy) ParquetCompressionCodec string `json:"parquet_compression_codec,omitempty"` // Parquet file page size. (default: 8192 bytes) ParquetPageSize string `json:"parquet_page_size,omitempty"` // Parquet file row group size. (default: 128 MB) ParquetRowGroupSize string `json:"parquet_row_group_size,omitempty"` // Record data format type. (avro csv jsonl msgpack tsv msgpack json) (default: msgpack) RecordType string `json:"record_type,omitempty"` // Schema type. (avro, bigquery) (default: avro) SchemaType string `json:"schema_type,omitempty"` // Path to schema file. SchemaFile string `json:"schema_file,omitempty"` }
+kubebuilder:object:generate=true +docName:"Parquet compressor" parquet compressor
func (*Compress) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Compress.
func (*Compress) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type DatadogOutput ¶
type DatadogOutput struct { // This parameter is required in order to authenticate your fluent agent. (default: nil) // +docLink:"Secret,../secret/" ApiKey *secret.Secret `json:"api_key"` // Event format, if true, the event is sent in json format. Othwerwise, in plain text. (default: true) UseJson bool `json:"use_json,omitempty"` // Automatically include the Fluentd tag in the record. (default: false) IncludeTagKey bool `json:"include_tag_key,omitempty"` // Where to store the Fluentd tag. (default: "tag") TagKey string `json:"tag_key,omitempty"` //Name of the attribute which will contain timestamp of the log event. If nil, timestamp attribute is not added. (default: "@timestamp") TimestampKey string `json:"timestamp_key,omitempty"` // If true, the agent initializes a secure connection to Datadog. In clear TCP otherwise. (default: true) UseSsl bool `json:"use_ssl,omitempty"` // Disable SSL validation (useful for proxy forwarding) (default: false) NoSslValidation bool `json:"no_ssl_validation,omitempty"` // Port used to send logs over a SSL encrypted connection to Datadog. If use_http is disabled, use 10516 for the US region and 443 for the EU region. (default: "443") SslPort string `json:"ssl_port,omitempty"` // The number of retries before the output plugin stops. Set to -1 for unlimited retries (default: "-1") MaxRetries string `json:"max_retries,omitempty"` // The maximum time waited between each retry in seconds (default: "30") MaxBackoff string `json:"max_backoff,omitempty"` // Enable HTTP forwarding. If you disable it, make sure to change the port to 10514 or ssl_port to 10516 (default: true) UseHttp bool `json:"use_http,omitempty"` // Enable log compression for HTTP (default: true) UseCompression bool `json:"use_compression,omitempty"` // Set the log compression level for HTTP (1 to 9, 9 being the best ratio) (default: "6") CompressionLevel string `json:"compression_level,omitempty"` // This tells Datadog what integration it is (default: nil) DdSource string `json:"dd_source,omitempty"` // Multiple value attribute. Can be used to refine the source attribute (default: nil) DdSourcecategory string `json:"dd_sourcecategory,omitempty"` // Custom tags with the following format "key1:value1, key2:value2" (default: nil) DdTags string `json:"dd_tags,omitempty"` // Used by Datadog to identify the host submitting the logs. (default: "hostname -f") DdHostname string `json:"dd_hostname,omitempty"` // Used by Datadog to correlate between logs, traces and metrics. (default: nil) Service string `json:"service,omitempty"` // Proxy port when logs are not directly forwarded to Datadog and ssl is not used (default: "80") Port string `json:"port,omitempty"` // Proxy endpoint when logs are not directly forwarded to Datadog (default: "http-intake.logs.datadoghq.com") Host string `json:"host,omitempty"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` // The threshold for chunk flush performance check. // Parameter type is float, not time, default: 20.0 (seconds) // If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count. SlowFlushLogThreshold string `json:"slow_flush_log_threshold,omitempty"` }
+kubebuilder:object:generate=true +docName:"Output Config"
func (*DatadogOutput) DeepCopy ¶
func (in *DatadogOutput) DeepCopy() *DatadogOutput
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatadogOutput.
func (*DatadogOutput) DeepCopyInto ¶
func (in *DatadogOutput) DeepCopyInto(out *DatadogOutput)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*DatadogOutput) ToDirective ¶
func (a *DatadogOutput) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type ElasticsearchOutput ¶
type ElasticsearchOutput struct { // You can specify Elasticsearch host by this parameter. (default:localhost) Host string `json:"host,omitempty"` // You can specify Elasticsearch port by this parameter.(default: 9200) Port int `json:"port,omitempty"` // You can specify multiple Elasticsearch hosts with separator ",". If you specify hosts option, host and port options are ignored. Hosts string `json:"hosts,omitempty"` // User for HTTP Basic authentication. This plugin will escape required URL encoded characters within %{} placeholders. e.g. %{demo+} User string `json:"user,omitempty"` // Password for HTTP Basic authentication. // +docLink:"Secret,../secret/" Password *secret.Secret `json:"password,omitempty"` // Path for HTTP Basic authentication. Path string `json:"path,omitempty"` // Connection scheme (default: http) Scheme string `json:"scheme,omitempty"` // +kubebuilder:validation:Optional // Skip ssl verification (default: true) SslVerify *bool `json:"ssl_verify,omitempty" plugin:"default:true"` // If you want to configure SSL/TLS version, you can specify ssl_version parameter. [SSLv23, TLSv1, TLSv1_1, TLSv1_2] SslVersion string `json:"ssl_version,omitempty"` // Specify min/max SSL/TLS version SslMaxVersion string `json:"ssl_max_version,omitempty"` SslMinVersion string `json:"ssl_min_version,omitempty"` // CA certificate SSLCACert *secret.Secret `json:"ca_file,omitempty"` // Client certificate SSLClientCert *secret.Secret `json:"client_cert,omitempty"` // Client certificate key SSLClientCertKey *secret.Secret `json:"client_key,omitempty"` // Client key password SSLClientCertKeyPass *secret.Secret `json:"client_key_pass,omitempty"` // Enable Logstash log format.(default: false) LogstashFormat bool `json:"logstash_format,omitempty"` // Adds a @timestamp field to the log, following all settings logstash_format does, except without the restrictions on index_name. This allows one to log to an alias in Elasticsearch and utilize the rollover API.(default: false) IncludeTimestamp bool `json:"include_timestamp,omitempty"` // Set the Logstash prefix.(default: logstash) LogstashPrefix string `json:"logstash_prefix,omitempty"` // Set the Logstash prefix separator.(default: -) LogstashPrefixSeparator string `json:"logstash_prefix_separator,omitempty"` // Set the Logstash date format.(default: %Y.%m.%d) LogstashDateformat string `json:"logstash_dateformat,omitempty"` // The index name to write events to (default: fluentd) IndexName string `json:"index_name,omitempty"` // Set the index type for elasticsearch. This is the fallback if `target_type_key` is missing. (default: fluentd) TypeName string `json:"type_name,omitempty"` // This param is to set a pipeline id of your elasticsearch to be added into the request, you can configure ingest node. Pipeline string `json:"pipeline,omitempty"` // The format of the time stamp field (@timestamp or what you specify with time_key). This parameter only has an effect when logstash_format is true as it only affects the name of the index we write to. TimeKeyFormat string `json:"time_key_format,omitempty"` // Should the record not include a time_key, define the degree of sub-second time precision to preserve from the time portion of the routed event. TimePrecision string `json:"time_precision,omitempty"` // By default, when inserting records in Logstash format, @timestamp is dynamically created with the time at log ingestion. If you'd like to use a custom time, include an @timestamp with your record. TimeKey string `json:"time_key,omitempty"` // By default, the records inserted into index logstash-YYMMDD with UTC (Coordinated Universal Time). This option allows to use local time if you describe utc_index to false.(default: true) // +kubebuilder:validation:Optional UtcIndex *bool `json:"utc_index,omitempty" plugin:"default:true"` // Suppress type name to avoid warnings in Elasticsearch 7.x SuppressTypeName *bool `json:"suppress_type_name,omitempty"` // Tell this plugin to find the index name to write to in the record under this key in preference to other mechanisms. Key can be specified as path to nested record using dot ('.') as a separator. https://github.com/uken/fluent-plugin-elasticsearch#target_index_key TargetIndexKey string `json:"target_index_key,omitempty"` // Similar to target_index_key config, find the type name to write to in the record under this key (or nested record). If key not found in record - fallback to type_name.(default: fluentd) TargetTypeKey string `json:"target_type_key,omitempty"` // The name of the template to define. If a template by the name given is already present, it will be left unchanged, unless template_overwrite is set, in which case the template will be updated. TemplateName string `json:"template_name,omitempty"` // The path to the file containing the template to install. // +docLink:"Secret,../secret/" TemplateFile *secret.Secret `json:"template_file,omitempty"` // Specify index templates in form of hash. Can contain multiple templates. Templates string `json:"templates,omitempty"` // Specify the string and its value to be replaced in form of hash. Can contain multiple key value pair that would be replaced in the specified template_file. This setting only creates template and to add rollover index please check the rollover_index configuration. CustomizeTemplate string `json:"customize_template,omitempty"` // Specify this as true when an index with rollover capability needs to be created.(default: false) https://github.com/uken/fluent-plugin-elasticsearch#rollover_index RolloverIndex bool `json:"rollover_index,omitempty"` // Specify this to override the index date pattern for creating a rollover index.(default: now/d) IndexDatePattern *string `json:"index_date_pattern,omitempty"` // Specify the deflector alias which would be assigned to the rollover index created. This is useful in case of using the Elasticsearch rollover API DeflectorAlias string `json:"deflector_alias,omitempty"` // Specify the index prefix for the rollover index to be created.(default: logstash) IndexPrefix string `json:"index_prefix,omitempty"` // Specify the application name for the rollover index to be created.(default: default) ApplicationName *string `json:"application_name,omitempty"` // Always update the template, even if it already exists.(default: false) TemplateOverwrite bool `json:"template_overwrite,omitempty"` // You can specify times of retry putting template.(default: 10) MaxRetryPuttingTemplate string `json:"max_retry_putting_template,omitempty"` // Indicates whether to fail when max_retry_putting_template is exceeded. If you have multiple output plugin, you could use this property to do not fail on fluentd statup.(default: true) // +kubebuilder:validation:Optional FailOnPuttingTemplateRetryExceed *bool `json:"fail_on_putting_template_retry_exceed,omitempty" plugin:"default:true"` // fail_on_detecting_es_version_retry_exceed (default: true) // +kubebuilder:validation:Optional FailOnDetectingEsVersionRetryExceed *bool `json:"fail_on_detecting_es_version_retry_exceed,omitempty" plugin:"default:true"` // You can specify times of retry obtaining Elasticsearch version.(default: 15) MaxRetryGetEsVersion string `json:"max_retry_get_es_version,omitempty"` // You can specify HTTP request timeout.(default: 5s) RequestTimeout string `json:"request_timeout,omitempty"` // You can tune how the elasticsearch-transport host reloading feature works.(default: true) // +kubebuilder:validation:Optional ReloadConnections *bool `json:"reload_connections,omitempty" plugin:"default:true"` //Indicates that the elasticsearch-transport will try to reload the nodes addresses if there is a failure while making the request, this can be useful to quickly remove a dead node from the list of addresses.(default: false) ReloadOnFailure bool `json:"reload_on_failure,omitempty"` // When reload_connections true, this is the integer number of operations after which the plugin will reload the connections. The default value is 10000. ReloadAfter string `json:"reload_after,omitempty"` // You can set in the elasticsearch-transport how often dead connections from the elasticsearch-transport's pool will be resurrected.(default: 60s) ResurrectAfter string `json:"resurrect_after,omitempty"` // This will add the Fluentd tag in the JSON record.(default: false) IncludeTagKey bool `json:"include_tag_key,omitempty"` // This will add the Fluentd tag in the JSON record.(default: tag) TagKey string `json:"tag_key,omitempty"` // https://github.com/uken/fluent-plugin-elasticsearch#id_key IdKey string `json:"id_key,omitempty"` // Similar to parent_key config, will add _routing into elasticsearch command if routing_key is set and the field does exist in input event. RoutingKey string `json:"routing_key,omitempty"` // https://github.com/uken/fluent-plugin-elasticsearch#remove_keys RemoveKeys string `json:"remove_keys,omitempty"` // Remove keys on update will not update the configured keys in elasticsearch when a record is being updated. This setting only has any effect if the write operation is update or upsert. RemoveKeysOnUpdate string `json:"remove_keys_on_update,omitempty"` // This setting allows remove_keys_on_update to be configured with a key in each record, in much the same way as target_index_key works. RemoveKeysOnUpdateKey string `json:"remove_keys_on_update_key,omitempty"` // This setting allows custom routing of messages in response to bulk request failures. The default behavior is to emit failed records using the same tag that was provided. RetryTag string `json:"retry_tag,omitempty"` // The write_operation can be any of: (index,create,update,upsert)(default: index) WriteOperation string `json:"write_operation,omitempty"` // Indicates that the plugin should reset connection on any error (reconnect on next send). By default it will reconnect only on "host unreachable exceptions". We recommended to set this true in the presence of elasticsearch shield.(default: false) ReconnectOnError bool `json:"reconnect_on_error,omitempty"` // This is debugging purpose option to enable to obtain transporter layer log. (default: false) WithTransporterLog bool `json:"with_transporter_log,omitempty"` // With content_type application/x-ndjson, elasticsearch plugin adds application/x-ndjson as Content-Profile in payload. (default: application/json) ContentType string `json:"content_type,omitempty"` //With this option set to true, Fluentd manifests the index name in the request URL (rather than in the request body). You can use this option to enforce an URL-based access control. IncludeIndexInUrl bool `json:"include_index_in_url,omitempty"` // With logstash_format true, elasticsearch plugin parses timestamp field for generating index name. If the record has invalid timestamp value, this plugin emits an error event to @ERROR label with time_parse_error_tag configured tag. TimeParseErrorTag string `json:"time_parse_error_tag,omitempty"` // With http_backend typhoeus, elasticsearch plugin uses typhoeus faraday http backend. Typhoeus can handle HTTP keepalive. (default: excon) HttpBackend string `json:"http_backend,omitempty"` // With default behavior, Elasticsearch client uses Yajl as JSON encoder/decoder. Oj is the alternative high performance JSON encoder/decoder. When this parameter sets as true, Elasticsearch client uses Oj as JSON encoder/decoder. (default: false) PreferOjSerializer bool `json:"prefer_oj_serializer,omitempty"` // Elasticsearch will complain if you send object and concrete values to the same field. For example, you might have logs that look this, from different places: //{"people" => 100} {"people" => {"some" => "thing"}} //The second log line will be rejected by the Elasticsearch parser because objects and concrete values can't live in the same field. To combat this, you can enable hash flattening. FlattenHashes bool `json:"flatten_hashes,omitempty"` // Flatten separator FlattenHashesSeparator string `json:"flatten_hashes_separator,omitempty"` // When you use mismatched Elasticsearch server and client libraries, fluent-plugin-elasticsearch cannot send data into Elasticsearch. (default: false) ValidateClientVersion bool `json:"validate_client_version,omitempty"` // Default unrecoverable_error_types parameter is set up strictly. Because es_rejected_execution_exception is caused by exceeding Elasticsearch's thread pool capacity. Advanced users can increase its capacity, but normal users should follow default behavior. // If you want to increase it and forcibly retrying bulk request, please consider to change unrecoverable_error_types parameter from default value. // Change default value of thread_pool.bulk.queue_size in elasticsearch.yml) UnrecoverableErrorTypes string `json:"unrecoverable_error_types,omitempty"` // Because Elasticsearch plugin should change behavior each of Elasticsearch major versions. // For example, Elasticsearch 6 starts to prohibit multiple type_names in one index, and Elasticsearch 7 will handle only _doc type_name in index. // If you want to disable to verify Elasticsearch version at start up, set it as false. // When using the following configuration, ES plugin intends to communicate into Elasticsearch 6. (default: true) // +kubebuilder:validation:Optional VerifyEsVersionAtStartup *bool `json:"verify_es_version_at_startup,omitempty" plugin:"default:true"` // This parameter changes that ES plugin assumes default Elasticsearch version.(default: 5) DefaultElasticsearchVersion string `json:"default_elasticsearch_version,omitempty"` // This parameter adds additional headers to request. Example: {"token":"secret"} (default: {}) CustomHeaders string `json:"custom_headers,omitempty"` // api_key parameter adds authentication header. ApiKey *secret.Secret `json:"api_key,omitempty"` // By default, the error logger won't record the reason for a 400 error from the Elasticsearch API unless you set log_level to debug. However, this results in a lot of log spam, which isn't desirable if all you want is the 400 error reasons. You can set this true to capture the 400 error reasons without all the other debug logs. (default: false) LogEs400Reason bool `json:"log_es_400_reason,omitempty"` // By default, record body is wrapped by 'doc'. This behavior can not handle update script requests. You can set this to suppress doc wrapping and allow record body to be untouched. (default: false) SuppressDocWrap bool `json:"suppress_doc_wrap,omitempty"` // A list of exception that will be ignored - when the exception occurs the chunk will be discarded and the buffer retry mechanism won't be called. It is possible also to specify classes at higher level in the hierarchy. For example // `ignore_exceptions ["Elasticsearch::Transport::Transport::ServerError"]` // will match all subclasses of ServerError - Elasticsearch::Transport::Transport::Errors::BadRequest, Elasticsearch::Transport::Transport::Errors::ServiceUnavailable, etc. IgnoreExceptions string `json:"ignore_exceptions,omitempty"` // Indicates whether to backup chunk when ignore exception occurs. (default: true) // +kubebuilder:validation:Optional ExceptionBackup *bool `json:"exception_backup,omitempty" plugin:"default:true"` // Configure bulk_message request splitting threshold size. // Default value is 20MB. (20 * 1024 * 1024) // If you specify this size as negative number, bulk_message request splitting feature will be disabled. (default: 20MB) BulkMessageRequestThreshold string `json:"bulk_message_request_threshold,omitempty"` // The default Sniffer used by the Elasticsearch::Transport class works well when Fluentd has a direct connection to all of the Elasticsearch servers and can make effective use of the _nodes API. This doesn't work well when Fluentd must connect through a load balancer or proxy. The parameter sniffer_class_name gives you the ability to provide your own Sniffer class to implement whatever connection reload logic you require. In addition, there is a new Fluent::Plugin::ElasticsearchSimpleSniffer class which reuses the hosts given in the configuration, which is typically the hostname of the load balancer or proxy. https://github.com/uken/fluent-plugin-elasticsearch#sniffer-class-name SnifferClassName string `json:"sniffer_class_name,omitempty"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` // The threshold for chunk flush performance check. // Parameter type is float, not time, default: 20.0 (seconds) // If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count. SlowFlushLogThreshold string `json:"slow_flush_log_threshold,omitempty"` // Enable Index Lifecycle Management (ILM). EnableIlm bool `json:"enable_ilm,omitempty"` // Specify ILM policy id. IlmPolicyID string `json:"ilm_policy_id,omitempty"` // Specify ILM policy contents as Hash. IlmPolicy string `json:"ilm_policy,omitempty"` // Specify whether overwriting ilm policy or not. IlmPolicyOverwrite bool `json:"ilm_policy_overwrite,omitempty"` // Use @type elasticsearch_data_stream DataStreamEnable *bool `json:"data_stream_enable,omitempty" plugin:"hidden"` // You can specify Elasticsearch data stream name by this parameter. This parameter is mandatory for elasticsearch_data_stream. There are some limitations about naming rule. For more details https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-create-data-stream.html#indices-create-data-stream-api-path-params DataStreamName string `json:"data_stream_name,omitempty"` // Specify an existing index template for the data stream. If not present, a new template is created and named after the data stream. (default: data_stream_name) Further details here https://github.com/uken/fluent-plugin-elasticsearch#configuration---elasticsearch-output-data-stream DataStreamTemplateName string `json:"data_stream_template_name,omitempty"` // Specify an existing ILM policy to be applied to the data stream. If not present, either the specified template's or a new ILM default policy is applied. (default: data_stream_name) Further details here https://github.com/uken/fluent-plugin-elasticsearch#configuration---elasticsearch-output-data-stream DataStreamILMName string `json:"data_stream_ilm_name,omitempty"` // Specify data stream ILM policy contents as Hash. DataStreamIlmPolicy string `json:"data_stream_ilm_policy,omitempty"` // Specify whether overwriting data stream ilm policy or not. DataStreamIlmPolicyOverwrite bool `json:"data_stream_ilm_policy_overwrite,omitempty"` }
+kubebuilder:object:generate=true +docName:"Elasticsearch" Send your logs to Elasticsearch
func (*ElasticsearchOutput) DeepCopy ¶
func (in *ElasticsearchOutput) DeepCopy() *ElasticsearchOutput
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchOutput.
func (*ElasticsearchOutput) DeepCopyInto ¶
func (in *ElasticsearchOutput) DeepCopyInto(out *ElasticsearchOutput)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*ElasticsearchOutput) ToDirective ¶
func (e *ElasticsearchOutput) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type Endpoint ¶
type Endpoint struct { // LogZ URL. URL string `json:"url,omitempty" plugin:"default:https://listener.logz.io"` // Port over which to connect to LogZ URL. Port int `json:"port,omitempty" plugin:"default:8071"` // LogZ API Token. // +docLink:"Secret,../secret/" Token *secret.Secret `json:"token,omitempty"` }
Endpoint defines connection details for LogZ.io. +kubebuilder:object:generate=true +docName:"Endpoint"
func (*Endpoint) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint.
func (*Endpoint) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*Endpoint) ToDirective ¶
ToDirective converts Endpoint struct to fluentd configuration.
type EndpointCredentials ¶
type EndpointCredentials struct { // AWS region. It should be in form like us-east-1, us-west-2. Default nil, which means try to find from environment variable AWS_REGION. Region string `json:"region,omitempty"` // AWS connection url. Url string `json:"url,omitempty"` // AWS access key id. This parameter is required when your agent is not running on EC2 instance with an IAM Role. AccessKeyId *secret.Secret `json:"access_key_id,omitempty"` // AWS secret key. This parameter is required when your agent is not running on EC2 instance with an IAM Role. SecretAccessKey *secret.Secret `json:"secret_access_key,omitempty"` // Typically, you can use AssumeRole for cross-account access or federation. AssumeRoleArn *secret.Secret `json:"assume_role_arn,omitempty"` // Set with AWS_CONTAINER_CREDENTIALS_RELATIVE_URI environment variable value EcsContainerCredentialsRelativeUri *secret.Secret `json:"ecs_container_credentials_relative_uri,omitempty"` // AssumeRoleWithWebIdentity https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html AssumeRoleSessionName *secret.Secret `json:"assume_role_session_name,omitempty"` // AssumeRoleWithWebIdentity https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html AssumeRoleWebIdentityTokenFile *secret.Secret `json:"assume_role_web_identity_token_file,omitempty"` // By default, the AWS Security Token Service (AWS STS) is available as a global service, and all AWS STS requests go to a single endpoint at https://sts.amazonaws.com. AWS recommends using Regional AWS STS endpoints instead of the global endpoint to reduce latency, build in redundancy, and increase session token validity. https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html StsCredentialsRegion *secret.Secret `json:"sts_credentials_region,omitempty"` }
+kubebuilder:object:generate=true +docName:"Endpoint Credentials" endpoint
func (*EndpointCredentials) DeepCopy ¶
func (in *EndpointCredentials) DeepCopy() *EndpointCredentials
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointCredentials.
func (*EndpointCredentials) DeepCopyInto ¶
func (in *EndpointCredentials) DeepCopyInto(out *EndpointCredentials)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*EndpointCredentials) ToDirective ¶
func (o *EndpointCredentials) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type Fields ¶
func (Fields) ToDirective ¶
type FileOutputConfig ¶
type FileOutputConfig struct { // The Path of the file. The actual path is path + time + ".log" by default. Path string `json:"path"` // The flushed chunk is appended to existence file or not. The default is not appended. Append bool `json:"append,omitempty"` // +kubebuilder:validation:Optional // Add path suffix(default: true) AddPathSuffix *bool `json:"add_path_suffix,omitempty" plugin:"default:true"` // The suffix of output result.(default: ".log") PathSuffix string `json:"path_suffix,omitempty"` // Create symlink to temporary buffered file when buffer_type is file. This is useful for tailing file content to check logs.(default: false) SymlinkPath bool `json:"symlink_path,omitempty"` // Compresses flushed files using gzip. No compression is performed by default. Compress string `json:"compress,omitempty"` // Performs compression again even if the buffer chunk is already compressed. (default: false) Recompress bool `json:"recompress,omitempty"` // +docLink:"Format,../format/" Format *Format `json:"format,omitempty"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` // The threshold for chunk flush performance check. // Parameter type is float, not time, default: 20.0 (seconds) // If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count. SlowFlushLogThreshold string `json:"slow_flush_log_threshold,omitempty"` }
+kubebuilder:object:generate=true
func (*FileOutputConfig) DeepCopy ¶
func (in *FileOutputConfig) DeepCopy() *FileOutputConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileOutputConfig.
func (*FileOutputConfig) DeepCopyInto ¶
func (in *FileOutputConfig) DeepCopyInto(out *FileOutputConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*FileOutputConfig) ToDirective ¶
func (c *FileOutputConfig) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type FluentdServer ¶
type FluentdServer struct { // The IP address or host name of the server. Host string `json:"host"` // The name of the server. Used for logging and certificate verification in TLS transport (when host is address). Name string `json:"name,omitempty"` // The port number of the host. Note that both TCP packets (event stream) and UDP packets (heartbeat message) are sent to this port. (default: 24224) Port int `json:"port,omitempty"` SharedKey *secret.Secret `json:"shared_key,omitempty"` // The username for authentication. Username *secret.Secret `json:"username,omitempty"` // The password for authentication. Password *secret.Secret `json:"password,omitempty"` // Marks a node as the standby node for an Active-Standby model between Fluentd nodes. When an active node goes down, the standby node is promoted to an active node. The standby node is not used by the out_forward plugin until then. Standby bool `json:"standby,omitempty"` // The load balancing weight. If the weight of one server is 20 and the weight of the other server is 30, events are sent in a 2:3 ratio. (default: 60). Weight int `json:"weight,omitempty"` }
+kubebuilder:object:generate=true +docName:"Fluentd Server" server
func (*FluentdServer) DeepCopy ¶
func (in *FluentdServer) DeepCopy() *FluentdServer
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentdServer.
func (*FluentdServer) DeepCopyInto ¶
func (in *FluentdServer) DeepCopyInto(out *FluentdServer)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*FluentdServer) ToDirective ¶
func (f *FluentdServer) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type Format ¶
type Format struct { // Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value (default: json) // +kubebuilder:validation:Enum=out_file;json;ltsv;csv;msgpack;hash;single_value Type string `json:"type,omitempty"` // When type is single_value add '\n' to the end of the message (default: true) AddNewline *bool `json:"add_newline,omitempty"` // When type is single_value specify the key holding information MessageKey string `json:"message_key,omitempty"` }
+kubebuilder:object:generate=true
func (*Format) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Format.
func (*Format) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*Format) ToDirective ¶
type FormatRfc5424 ¶
type FormatRfc5424 struct { // Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value (default: json) // +kubebuilder:validation:Enum=out_file;json;ltsv;csv;msgpack;hash;single_value Type string `json:"type,omitempty"` // Prepends message length for syslog transmission (default: true) Rfc6587MessageSize *bool `json:"rfc6587_message_size,omitempty"` // Sets host name in syslog from field in fluentd, delimited by '.' (default: hostname) HostnameField string `json:"hostname_field,omitempty"` // Sets app name in syslog from field in fluentd, delimited by '.' (default: app_name) AppNameField string `json:"app_name_field,omitempty"` // Sets proc id in syslog from field in fluentd, delimited by '.' (default: proc_id) ProcIdField string `json:"proc_id_field,omitempty"` // Sets msg id in syslog from field in fluentd, delimited by '.' (default: message_id) MessageIdField string `json:"message_id_field,omitempty"` // Sets structured data in syslog from field in fluentd, delimited by '.' (default structured_data) StructuredDataField string `json:"structured_data_field,omitempty"` // Sets log in syslog from field in fluentd, delimited by '.' (default: log) LogField string `json:"log_field,omitempty"` }
+kubebuilder:object:generate=true
func (*FormatRfc5424) DeepCopy ¶
func (in *FormatRfc5424) DeepCopy() *FormatRfc5424
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FormatRfc5424.
func (*FormatRfc5424) DeepCopyInto ¶
func (in *FormatRfc5424) DeepCopyInto(out *FormatRfc5424)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*FormatRfc5424) ToDirective ¶
func (f *FormatRfc5424) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type ForwardOutput ¶
type ForwardOutput struct { // Server definitions at least one is required // +docLink:"Server,#fluentd-server" FluentdServers []FluentdServer `json:"servers"` // The transport protocol to use [ tcp, tls ] Transport string `json:"transport,omitempty"` // Change the protocol to at-least-once. The plugin waits the ack from destination's in_forward plugin. RequireAckResponse bool `json:"require_ack_response,omitempty"` // This option is used when require_ack_response is true. This default value is based on popular tcp_syn_retries. (default: 190) AckResponseTimeout int `json:"ack_response_timeout,omitempty"` // The timeout time when sending event logs. (default: 60) SendTimeout int `json:"send_timeout,omitempty"` // The timeout time for socket connect. When the connection timed out during establishment, Errno::ETIMEDOUT is raised. ConnectTimeout int `json:"connect_timeout,omitempty"` // The wait time before accepting a server fault recovery. (default: 10) RecoverWait int `json:"recover_wait,omitempty"` // The transport protocol to use for heartbeats. Set "none" to disable heartbeat. [transport, tcp, udp, none] HeartbeatType string `json:"heartbeat_type,omitempty"` // The interval of the heartbeat packer. (default: 1) HeartbeatInterval int `json:"heartbeat_interval,omitempty"` // Use the "Phi accrual failure detector" to detect server failure. (default: true) PhiFailureDetector bool `json:"phi_failure_detector,omitempty"` // The threshold parameter used to detect server faults. (default: 16) //`phi_threshold` is deeply related to `heartbeat_interval`. If you are using longer `heartbeat_interval`, please use the larger `phi_threshold`. Otherwise you will see frequent detachments of destination servers. The default value 16 is tuned for `heartbeat_interval` 1s. PhiThreshold int `json:"phi_threshold,omitempty"` // The hard timeout used to detect server failure. The default value is equal to the send_timeout parameter. (default: 60) HardTimeout int `json:"hard_timeout,omitempty"` // Set TTL to expire DNS cache in seconds. Set 0 not to use DNS Cache. (default: 0) ExpireDnsCache int `json:"expire_dns_cache,omitempty"` // Enable client-side DNS round robin. Uniform randomly pick an IP address to send data when a hostname has several IP addresses. // `heartbeat_type udp` is not available with `dns_round_robin true`. Use `heartbeat_type tcp` or `heartbeat_type none`. DnsRoundRobin bool `json:"dns_round_robin,omitempty"` // Ignore DNS resolution and errors at startup time. IgnoreNetworkErrorsAtStartup bool `json:"ignore_network_errors_at_startup,omitempty"` // The default version of TLS transport. [TLSv1_1, TLSv1_2] (default: TLSv1_2) TlsVersion string `json:"tls_version,omitempty"` // The cipher configuration of TLS transport. (default: ALL:!aNULL:!eNULL:!SSLv2) TlsCiphers string `json:"tls_ciphers,omitempty"` // Skip all verification of certificates or not. (default: false) TlsInsecureMode bool `json:"tls_insecure_mode,omitempty"` // Allow self signed certificates or not. (default: false) TlsAllowSelfSignedCert bool `json:"tls_allow_self_signed_cert,omitempty"` // Verify hostname of servers and certificates or not in TLS transport. (default: true) TlsVerifyHostname bool `json:"tls_verify_hostname,omitempty"` // The additional CA certificate path for TLS. TlsCertPath *secret.Secret `json:"tls_cert_path,omitempty"` // The client certificate path for TLS TlsClientCertPath *secret.Secret `json:"tls_client_cert_path,omitempty"` // The client private key path for TLS. TlsClientPrivateKeyPath *secret.Secret `json:"tls_client_private_key_path,omitempty"` // The client private key passphrase for TLS. TlsClientPrivateKeyPassphrase *secret.Secret `json:"tls_client_private_key_passphrase,omitempty"` // The certificate thumbprint for searching from Windows system certstore This parameter is for Windows only. TlsCertThumbprint string `json:"tls_cert_thumbprint,omitempty"` // The certificate logical store name on Windows system certstore. This parameter is for Windows only. TlsCertLogicalStoreName string `json:"tls_cert_logical_store_name,omitempty"` // Enable to use certificate enterprise store on Windows system certstore. This parameter is for Windows only. TlsCertUseEnterpriseStore bool `json:"tls_cert_use_enterprise_store,omitempty"` // Enable keepalive connection. (default: false) Keepalive bool `json:"keepalive,omitempty"` // Expired time of keepalive. Default value is nil, which means to keep connection as long as possible. (default: 0) KeepaliveTimeout int `json:"keepalive_timeout,omitempty"` // +docLink:"Security,../../common/security/" Security *common.Security `json:"security,omitempty"` // Verify that a connection can be made with one of out_forward nodes at the time of startup. (default: false) VerifyConnectionAtStartup bool `json:"verify_connection_at_startup,omitempty"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` // The threshold for chunk flush performance check. // Parameter type is float, not time, default: 20.0 (seconds) // If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count. SlowFlushLogThreshold string `json:"slow_flush_log_threshold,omitempty"` }
+kubebuilder:object:generate=true
func (*ForwardOutput) DeepCopy ¶
func (in *ForwardOutput) DeepCopy() *ForwardOutput
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForwardOutput.
func (*ForwardOutput) DeepCopyInto ¶
func (in *ForwardOutput) DeepCopyInto(out *ForwardOutput)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*ForwardOutput) ToDirective ¶
func (f *ForwardOutput) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type GCSOutput ¶
type GCSOutput struct { // Project identifier for GCS Project string `json:"project"` // Path of GCS service account credentials JSON file Keyfile string `json:"keyfile,omitempty"` // GCS service account credentials in JSON format // +docLink:"Secret,../secret/" CredentialsJson *secret.Secret `json:"credentials_json,omitempty"` // Number of times to retry requests on server error ClientRetries int `json:"client_retries,omitempty"` // Default timeout to use in requests ClientTimeout int `json:"client_timeout,omitempty"` // Name of a GCS bucket Bucket string `json:"bucket"` // Format of GCS object keys (default: %{path}%{time_slice}_%{index}.%{file_extension}) ObjectKeyFormat string `json:"object_key_format,omitempty"` // Path prefix of the files on GCS Path string `json:"path,omitempty"` // Archive format on GCS: gzip json text (default: gzip) StoreAs string `json:"store_as,omitempty"` // Enable the decompressive form of transcoding Transcoding bool `json:"transcoding,omitempty"` // Create GCS bucket if it does not exists (default: true) AutoCreateBucket bool `json:"auto_create_bucket,omitempty"` // Max length of `%{hex_random}` placeholder(4-16) (default: 4) HexRandomLength int `json:"hex_random_length,omitempty"` // Overwrite already existing path (default: false) Overwrite bool `json:"overwrite,omitempty"` // Permission for the object in GCS: auth_read owner_full owner_read private project_private public_read // +kubebuilder:validation:enum=auth_read,owner_full,owner_read,private,project_private,public_read Acl string `json:"acl,omitempty"` // Storage class of the file: dra nearline coldline multi_regional regional standard // +kubebuilder:validation:enum=dra,nearline,coldline,multi_regional,regional,standard StorageClass string `json:"storage_class,omitempty"` // Customer-supplied, AES-256 encryption key EncryptionKey string `json:"encryption_key,omitempty"` // User provided web-safe keys and arbitrary string values that will returned with requests for the file as "x-goog-meta-" response headers. // +docLink:"Object Metadata,#objectmetadata" ObjectMetadata []ObjectMetadata `json:"object_metadata,omitempty"` // +docLink:"Format,../format/" Format *Format `json:"format,omitempty"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` // The threshold for chunk flush performance check. // Parameter type is float, not time, default: 20.0 (seconds) // If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count. SlowFlushLogThreshold string `json:"slow_flush_log_threshold,omitempty"` }
+kubebuilder:object:generate=true
func (*GCSOutput) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCSOutput.
func (*GCSOutput) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*GCSOutput) ToDirective ¶
type GELFOutputConfig ¶
type GELFOutputConfig struct { // Destination host Host string `json:"host"` // Destination host port Port int `json:"port"` // Transport Protocol (default: "udp") Protocol string `json:"protocol,omitempty"` // Enable TlS (default: false) TLS *bool `json:"tls,omitempty"` // TLS Options (default: {}) - for options see https://github.com/graylog-labs/gelf-rb/blob/72916932b789f7a6768c3cdd6ab69a3c942dbcef/lib/gelf/transport/tcp_tls.rb#L7-L12 TLSOptions map[string]string `json:"tls_options,omitempty"` }
+kubebuilder:object:generate=true +docName:"Output Config"
func (*GELFOutputConfig) DeepCopy ¶
func (in *GELFOutputConfig) DeepCopy() *GELFOutputConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GELFOutputConfig.
func (*GELFOutputConfig) DeepCopyInto ¶
func (in *GELFOutputConfig) DeepCopyInto(out *GELFOutputConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*GELFOutputConfig) ToDirective ¶
func (s *GELFOutputConfig) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type HTTPAuth ¶
type HTTPAuth struct { // Username for basic authentication. // +docLink:"Secret,../secret/" Username *secret.Secret `json:"username"` // Password for basic authentication. // +docLink:"Secret,../secret/" Password *secret.Secret `json:"password"` }
+kubebuilder:object:generate=true +docName:"HTTP auth config" http_auth
func (*HTTPAuth) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPAuth.
func (*HTTPAuth) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*HTTPAuth) ToDirective ¶
type HTTPOutputConfig ¶
type HTTPOutputConfig struct { // Endpoint for HTTP request. Endpoint string `json:"endpoint"` // Method for HTTP request. [post, put] (default: post) HTTPMethod string `json:"http_method,omitempty"` // Proxy for HTTP request. Proxy string `json:"proxy,omitempty"` // Content-Profile for HTTP request. ContentType string `json:"content_type,omitempty"` // Using array format of JSON. This parameter is used and valid only for json format. When json_array as true, Content-Profile should be application/json and be able to use JSON data for the HTTP request body. (default: false) JsonArray bool `json:"json_array,omitempty"` // +docLink:"Format,../format/" Format *Format `json:"format,omitempty"` // Additional headers for HTTP request. Headers map[string]string `json:"headers,omitempty"` // Connection open timeout in seconds. OpenTimeout int `json:"open_timeout,omitempty"` // Read timeout in seconds. ReadTimeout int `json:"read_timeout,omitempty"` // TLS timeout in seconds. SSLTimeout int `json:"ssl_timeout,omitempty"` // The default version of TLS transport. [TLSv1_1, TLSv1_2] (default: TLSv1_2) TlsVersion string `json:"tls_version,omitempty"` // The cipher configuration of TLS transport. (default: ALL:!aNULL:!eNULL:!SSLv2) TlsCiphers string `json:"tls_ciphers,omitempty"` // The CA certificate path for TLS. TlsCACertPath *secret.Secret `json:"tls_ca_cert_path,omitempty"` // The client certificate path for TLS. TlsClientCertPath *secret.Secret `json:"tls_client_cert_path,omitempty"` // The client private key path for TLS. TlsPrivateKeyPath *secret.Secret `json:"tls_private_key_path,omitempty"` // The client private key passphrase for TLS. TlsPrivateKeyPassphrase *secret.Secret `json:"tls_private_key_passphrase,omitempty"` // The verify mode of TLS. [peer, none] (default: peer) TlsVerifyMode string `json:"tls_verify_mode,omitempty"` // Raise UnrecoverableError when the response code is non success, 1xx/3xx/4xx/5xx. If false, the plugin logs error message instead of raising UnrecoverableError. (default: true) ErrorResponseAsUnrecoverable *bool `json:"error_response_as_unrecoverable,omitempty"` // List of retryable response codes. If the response code is included in this list, the plugin retries the buffer flush. Since Fluentd v2 the Status code 503 is going to be removed from default. (default: [503]) RetryableResponseCodes []int `json:"retryable_response_codes,omitempty"` // +docLink:"HTTP auth,#http-auth-config" Auth *HTTPAuth `json:"auth,omitempty"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` // The threshold for chunk flush performance check. // Parameter type is float, not time, default: 20.0 (seconds) // If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count. SlowFlushLogThreshold string `json:"slow_flush_log_threshold,omitempty"` }
+kubebuilder:object:generate=true +docName:"Output Config"
func (*HTTPOutputConfig) DeepCopy ¶
func (in *HTTPOutputConfig) DeepCopy() *HTTPOutputConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPOutputConfig.
func (*HTTPOutputConfig) DeepCopyInto ¶
func (in *HTTPOutputConfig) DeepCopyInto(out *HTTPOutputConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*HTTPOutputConfig) ToDirective ¶
func (c *HTTPOutputConfig) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type KafkaOutputConfig ¶
type KafkaOutputConfig struct { // The list of all seed brokers, with their host and port information. Brokers string `json:"brokers"` // Topic Key (default: "topic") TopicKey string `json:"topic_key,omitempty"` // Partition (default: "partition") PartitionKey string `json:"partition_key,omitempty"` // Partition Key (default: "partition_key") PartitionKeyKey string `json:"partition_key_key,omitempty"` // Message Key (default: "message_key") MessageKeyKey string `json:"message_key_key,omitempty"` // Client ID (default: "kafka") ClientId string `json:"client_id,omitempty"` // The name of default topic (default: nil). DefaultTopic string `json:"default_topic,omitempty"` // The name of default partition key (default: nil). DefaultPartitionKey string `json:"default_partition_key,omitempty"` // The name of default message key (default: nil). DefaultMessageKey string `json:"default_message_key,omitempty"` // Exclude Topic key (default: false) ExcludeTopicKey bool `json:"exclude_topic_key,omitempty"` // Exclude Partition key (default: false) ExcludePartitionKey bool `json:"exclude_partion_key,omitempty"` // Get Kafka Client log (default: false) GetKafkaClientLog bool `json:"get_kafka_client_log,omitempty"` // Headers (default: {}) Headers map[string]string `json:"headers,omitempty"` // Headers from Record (default: {}) HeadersFromRecord map[string]string `json:"headers_from_record,omitempty"` // Use default for unknown topics (default: false) UseDefaultForUnknownTopic bool `json:"use_default_for_unknown_topic,omitempty"` // Idempotent (default: false) Idempotent bool `json:"idempotent,omitempty"` // SASL over SSL (default: true) // +kubebuilder:validation:Optional SaslOverSSL bool `json:"sasl_over_ssl"` Principal string `json:"principal,omitempty"` Keytab *secret.Secret `json:"keytab,omitempty"` // Username when using PLAIN/SCRAM SASL authentication Username *secret.Secret `json:"username,omitempty"` // Password when using PLAIN/SCRAM SASL authentication Password *secret.Secret `json:"password,omitempty"` // If set, use SCRAM authentication with specified mechanism. When unset, default to PLAIN authentication ScramMechanism string `json:"scram_mechanism,omitempty"` // Number of times to retry sending of messages to a leader (default: 1) MaxSendRetries int `json:"max_send_retries,omitempty"` // The number of acks required per request (default: -1). RequiredAcks int `json:"required_acks,omitempty"` // How long the producer waits for acks. The unit is seconds (default: nil => Uses default of ruby-kafka library) AckTimeout int `json:"ack_timeout,omitempty"` // The codec the producer uses to compress messages (default: nil). The available options are gzip and snappy. CompressionCodec string `json:"compression_codec,omitempty"` // Maximum value of total message size to be included in one batch transmission. (default: 4096). KafkaAggMaxBytes int `json:"kafka_agg_max_bytes,omitempty"` // Maximum number of messages to include in one batch transmission. (default: nil). KafkaAggMaxMessages int `json:"kafka_agg_max_messages,omitempty"` // Discard the record where Kafka DeliveryFailed occurred (default: false) DiscardKafkaDeliveryFailed bool `json:"discard_kafka_delivery_failed,omitempty"` // System's CA cert store (default: false) SSLCACertsFromSystem *bool `json:"ssl_ca_certs_from_system,omitempty"` // CA certificate SSLCACert *secret.Secret `json:"ssl_ca_cert,omitempty"` // Client certificate SSLClientCert *secret.Secret `json:"ssl_client_cert,omitempty"` // Client certificate chain SSLClientCertChain *secret.Secret `json:"ssl_client_cert_chain,omitempty"` // Client certificate key SSLClientCertKey *secret.Secret `json:"ssl_client_cert_key,omitempty"` // Verify certificate hostname SSLVerifyHostname *bool `json:"ssl_verify_hostname,omitempty"` // +docLink:"Format,../format/" Format *Format `json:"format"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` // The threshold for chunk flush performance check. // Parameter type is float, not time, default: 20.0 (seconds) // If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count. SlowFlushLogThreshold string `json:"slow_flush_log_threshold,omitempty"` }
+kubebuilder:object:generate=true +docName:"Kafka" Send your logs to Kafka
func (*KafkaOutputConfig) DeepCopy ¶
func (in *KafkaOutputConfig) DeepCopy() *KafkaOutputConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaOutputConfig.
func (*KafkaOutputConfig) DeepCopyInto ¶
func (in *KafkaOutputConfig) DeepCopyInto(out *KafkaOutputConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*KafkaOutputConfig) ToDirective ¶
func (e *KafkaOutputConfig) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type KinesisFirehoseAssumeRoleCredentials ¶
type KinesisFirehoseAssumeRoleCredentials struct { // The Amazon Resource Name (ARN) of the role to assume RoleArn string `json:"role_arn"` // An identifier for the assumed role session RoleSessionName string `json:"role_session_name"` // An IAM policy in JSON format Policy string `json:"policy,omitempty"` // The duration, in seconds, of the role session (900-3600) DurationSeconds string `json:"duration_seconds,omitempty"` // A unique identifier that is used by third parties when assuming roles in their customers' accounts. ExternalId string `json:"external_id,omitempty"` }
+kubebuilder:object:generate=true +docName:"Assume Role Credentials" assume_role_credentials
func (*KinesisFirehoseAssumeRoleCredentials) DeepCopy ¶
func (in *KinesisFirehoseAssumeRoleCredentials) DeepCopy() *KinesisFirehoseAssumeRoleCredentials
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisFirehoseAssumeRoleCredentials.
func (*KinesisFirehoseAssumeRoleCredentials) DeepCopyInto ¶
func (in *KinesisFirehoseAssumeRoleCredentials) DeepCopyInto(out *KinesisFirehoseAssumeRoleCredentials)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*KinesisFirehoseAssumeRoleCredentials) ToDirective ¶
func (o *KinesisFirehoseAssumeRoleCredentials) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type KinesisFirehoseOutputConfig ¶
type KinesisFirehoseOutputConfig struct { // Name of the delivery stream to put data. DeliveryStreamName string `json:"delivery_stream_name"` // If it is enabled, the plugin adds new line character (\n) to each serialized record. //Before appending \n, plugin calls chomp and removes separator from the end of each record as chomp_record is true. Therefore, you don't need to enable chomp_record option when you use kinesis_firehose output with default configuration (append_new_line is true). If you want to set append_new_line false, you can choose chomp_record false (default) or true (compatible format with plugin v2). (Default:true) AppendNewLine *bool `json:"append_new_line,omitempty"` // AWS access key id. This parameter is required when your agent is not running on EC2 instance with an IAM Role. AWSKeyId *secret.Secret `json:"aws_key_id,omitempty"` // AWS secret key. This parameter is required when your agent is not running on EC2 instance with an IAM Role. AWSSECKey *secret.Secret `json:"aws_sec_key,omitempty"` // AWS session token. This parameter is optional, but can be provided if using MFA or temporary credentials when your agent is not running on EC2 instance with an IAM Role. AWSSESToken *secret.Secret `json:"aws_ses_token,omitempty"` // The number of attempts to make (with exponential backoff) when loading instance profile credentials from the EC2 metadata service using an IAM role. Defaults to 5 retries. AWSIAMRetries int `json:"aws_iam_retries,omitempty"` // Typically, you can use AssumeRole for cross-account access or federation. AssumeRoleCredentials *KinesisFirehoseAssumeRoleCredentials `json:"assume_role_credentials,omitempty"` // This loads AWS access credentials from an external process. ProcessCredentials *KinesisFirehoseProcessCredentials `json:"process_credentials,omitempty"` // AWS region of your stream. It should be in form like us-east-1, us-west-2. Default nil, which means try to find from environment variable AWS_REGION. Region string `json:"region,omitempty"` // The plugin will put multiple records to Amazon Kinesis Data Streams in batches using PutRecords. A set of records in a batch may fail for reasons documented in the Kinesis Service API Reference for PutRecords. Failed records will be retried retries_on_batch_request times RetriesOnBatchRequest int `json:"retries_on_batch_request,omitempty"` // Boolean, default true. If enabled, when after retrying, the next retrying checks the number of succeeded records on the former batch request and reset exponential backoff if there is any success. Because batch request could be composed by requests across shards, simple exponential backoff for the batch request wouldn't work some cases. ResetBackoffIfSuccess bool `json:"reset_backoff_if_success,omitempty"` // Integer, default 500. The number of max count of making batch request from record chunk. It can't exceed the default value because it's API limit. BatchRequestMaxCount int `json:"batch_request_max_count,omitempty"` // Integer. The number of max size of making batch request from record chunk. It can't exceed the default value because it's API limit. BatchRequestMaxSize int `json:"batch_request_max_size,omitempty"` // +docLink:"Format,../format/" Format *Format `json:"format,omitempty"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` // The threshold for chunk flush performance check. // Parameter type is float, not time, default: 20.0 (seconds) // If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count. SlowFlushLogThreshold string `json:"slow_flush_log_threshold,omitempty"` }
+kubebuilder:object:generate=true +docName:"KinesisStream" Send your logs to a Kinesis Stream
func (*KinesisFirehoseOutputConfig) DeepCopy ¶
func (in *KinesisFirehoseOutputConfig) DeepCopy() *KinesisFirehoseOutputConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisFirehoseOutputConfig.
func (*KinesisFirehoseOutputConfig) DeepCopyInto ¶
func (in *KinesisFirehoseOutputConfig) DeepCopyInto(out *KinesisFirehoseOutputConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*KinesisFirehoseOutputConfig) ToDirective ¶
func (e *KinesisFirehoseOutputConfig) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type KinesisFirehoseProcessCredentials ¶
type KinesisFirehoseProcessCredentials struct { // Command more info: https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/ProcessCredentials.html Process string `json:"process"` }
+kubebuilder:object:generate=true +docName:"Process Credentials" process_credentials
func (*KinesisFirehoseProcessCredentials) DeepCopy ¶
func (in *KinesisFirehoseProcessCredentials) DeepCopy() *KinesisFirehoseProcessCredentials
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisFirehoseProcessCredentials.
func (*KinesisFirehoseProcessCredentials) DeepCopyInto ¶
func (in *KinesisFirehoseProcessCredentials) DeepCopyInto(out *KinesisFirehoseProcessCredentials)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*KinesisFirehoseProcessCredentials) ToDirective ¶
func (o *KinesisFirehoseProcessCredentials) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type KinesisStreamAssumeRoleCredentials ¶
type KinesisStreamAssumeRoleCredentials struct { // The Amazon Resource Name (ARN) of the role to assume RoleArn string `json:"role_arn"` // An identifier for the assumed role session RoleSessionName string `json:"role_session_name"` // An IAM policy in JSON format Policy string `json:"policy,omitempty"` // The duration, in seconds, of the role session (900-3600) DurationSeconds string `json:"duration_seconds,omitempty"` // A unique identifier that is used by third parties when assuming roles in their customers' accounts. ExternalId string `json:"external_id,omitempty"` }
+kubebuilder:object:generate=true +docName:"Assume Role Credentials" assume_role_credentials
func (*KinesisStreamAssumeRoleCredentials) DeepCopy ¶
func (in *KinesisStreamAssumeRoleCredentials) DeepCopy() *KinesisStreamAssumeRoleCredentials
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisStreamAssumeRoleCredentials.
func (*KinesisStreamAssumeRoleCredentials) DeepCopyInto ¶
func (in *KinesisStreamAssumeRoleCredentials) DeepCopyInto(out *KinesisStreamAssumeRoleCredentials)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*KinesisStreamAssumeRoleCredentials) ToDirective ¶
func (o *KinesisStreamAssumeRoleCredentials) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type KinesisStreamOutputConfig ¶
type KinesisStreamOutputConfig struct { // Name of the stream to put data. StreamName string `json:"stream_name"` // A key to extract partition key from JSON object. Default nil, which means partition key will be generated randomly. PartitionKey string `json:"partition_key,omitempty"` // AWS access key id. This parameter is required when your agent is not running on EC2 instance with an IAM Role. AWSKeyId *secret.Secret `json:"aws_key_id,omitempty"` // AWS secret key. This parameter is required when your agent is not running on EC2 instance with an IAM Role. AWSSECKey *secret.Secret `json:"aws_sec_key,omitempty"` // AWS session token. This parameter is optional, but can be provided if using MFA or temporary credentials when your agent is not running on EC2 instance with an IAM Role. AWSSESToken *secret.Secret `json:"aws_ses_token,omitempty"` // The number of attempts to make (with exponential backoff) when loading instance profile credentials from the EC2 metadata service using an IAM role. Defaults to 5 retries. AWSIAMRetries int `json:"aws_iam_retries,omitempty"` // Typically, you can use AssumeRole for cross-account access or federation. AssumeRoleCredentials *KinesisStreamAssumeRoleCredentials `json:"assume_role_credentials,omitempty"` // This loads AWS access credentials from an external process. ProcessCredentials *KinesisStreamProcessCredentials `json:"process_credentials,omitempty"` // AWS region of your stream. It should be in form like us-east-1, us-west-2. Default nil, which means try to find from environment variable AWS_REGION. Region string `json:"region,omitempty"` // The plugin will put multiple records to Amazon Kinesis Data Streams in batches using PutRecords. A set of records in a batch may fail for reasons documented in the Kinesis Service API Reference for PutRecords. Failed records will be retried retries_on_batch_request times RetriesOnBatchRequest int `json:"retries_on_batch_request,omitempty"` // Boolean, default true. If enabled, when after retrying, the next retrying checks the number of succeeded records on the former batch request and reset exponential backoff if there is any success. Because batch request could be composed by requests across shards, simple exponential backoff for the batch request wouldn't work some cases. ResetBackoffIfSuccess bool `json:"reset_backoff_if_success,omitempty"` // Integer, default 500. The number of max count of making batch request from record chunk. It can't exceed the default value because it's API limit. BatchRequestMaxCount int `json:"batch_request_max_count,omitempty"` // Integer. The number of max size of making batch request from record chunk. It can't exceed the default value because it's API limit. BatchRequestMaxSize int `json:"batch_request_max_size,omitempty"` // +docLink:"Format,../format/" Format *Format `json:"format,omitempty"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` // The threshold for chunk flush performance check. // Parameter type is float, not time, default: 20.0 (seconds) // If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count. SlowFlushLogThreshold string `json:"slow_flush_log_threshold,omitempty"` }
+kubebuilder:object:generate=true +docName:"KinesisStream" Send your logs to a Kinesis Stream
func (*KinesisStreamOutputConfig) DeepCopy ¶
func (in *KinesisStreamOutputConfig) DeepCopy() *KinesisStreamOutputConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisStreamOutputConfig.
func (*KinesisStreamOutputConfig) DeepCopyInto ¶
func (in *KinesisStreamOutputConfig) DeepCopyInto(out *KinesisStreamOutputConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*KinesisStreamOutputConfig) ToDirective ¶
func (e *KinesisStreamOutputConfig) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type KinesisStreamProcessCredentials ¶
type KinesisStreamProcessCredentials struct { // Command more info: https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/ProcessCredentials.html Process string `json:"process"` }
+kubebuilder:object:generate=true +docName:"Process Credentials" process_credentials
func (*KinesisStreamProcessCredentials) DeepCopy ¶
func (in *KinesisStreamProcessCredentials) DeepCopy() *KinesisStreamProcessCredentials
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisStreamProcessCredentials.
func (*KinesisStreamProcessCredentials) DeepCopyInto ¶
func (in *KinesisStreamProcessCredentials) DeepCopyInto(out *KinesisStreamProcessCredentials)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*KinesisStreamProcessCredentials) ToDirective ¶
func (o *KinesisStreamProcessCredentials) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type Label ¶
func (Label) ToDirective ¶
type LogDNAOutput ¶
type LogDNAOutput struct { // LogDNA Api key ApiKey string `json:"api_key"` // Hostname HostName string `json:"hostname"` // Application name App string `json:"app,omitempty"` // Comma-Separated List of Tags, Optional Tags string `json:"tags,omitempty"` // HTTPS POST Request Timeout, Optional. Supports s and ms Suffices (default: 30 s) RequestTimeout string `json:"request_timeout,omitempty"` // Custom Ingester URL, Optional (default: https://logs.logdna.com) IngesterDomain string `json:"ingester_domain,omitempty"` // Custom Ingester Endpoint, Optional (default: /logs/ingest) IngesterEndpoint string `json:"ingester_endpoint,omitempty"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` // The threshold for chunk flush performance check. // Parameter type is float, not time, default: 20.0 (seconds) // If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count. SlowFlushLogThreshold string `json:"slow_flush_log_threshold,omitempty"` }
+kubebuilder:object:generate=true +docName:"LogDNA" Send your logs to LogDNA
func (*LogDNAOutput) DeepCopy ¶
func (in *LogDNAOutput) DeepCopy() *LogDNAOutput
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogDNAOutput.
func (*LogDNAOutput) DeepCopyInto ¶
func (in *LogDNAOutput) DeepCopyInto(out *LogDNAOutput)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*LogDNAOutput) ToDirective ¶
func (l *LogDNAOutput) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type LogZOutput ¶
type LogZOutput struct { // Define LogZ endpoint URL Endpoint *Endpoint `json:"endpoint"` // Should the appender add a timestamp to your logs on their process time (recommended). OutputIncludeTime bool `json:"output_include_time,omitempty"` // Should the appender add the fluentd tag to the document, called "fluentd_tag" OutputIncludeTags bool `json:"output_include_tags,omitempty"` // Timeout in seconds that the http persistent connection will stay open without traffic. HTTPIdleTimeout int `json:"http_idle_timeout,omitempty"` // How many times to resend failed bulks. RetryCount int `json:"retry_count,omitempty"` // How long to sleep initially between retries, exponential step-off. RetrySleep int `json:"retry_sleep,omitempty"` // Limit to the size of the Logz.io upload bulk. Defaults to 1000000 bytes leaving about 24kB for overhead. BulkLimit int `json:"bulk_limit,omitempty"` // Limit to the size of the Logz.io warning message when a record exceeds bulk_limit to prevent a recursion when Fluent warnings are sent to the Logz.io output. BulkLimitWarningLimit int `json:"bulk_limit_warning_limit,omitempty"` // Should the plugin ship the logs in gzip compression. Default is false. Gzip bool `json:"gzip,omitempty"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` // The threshold for chunk flush performance check. // Parameter type is float, not time, default: 20.0 (seconds) // If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count. SlowFlushLogThreshold string `json:"slow_flush_log_threshold,omitempty"` }
+kubebuilder:object:generate=true +docName:"Logzio" LogZ Send your logs to LogZ.io
func (*LogZOutput) DeepCopy ¶
func (in *LogZOutput) DeepCopy() *LogZOutput
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogZOutput.
func (*LogZOutput) DeepCopyInto ¶
func (in *LogZOutput) DeepCopyInto(out *LogZOutput)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*LogZOutput) ToDirective ¶
func (e *LogZOutput) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
ToDirective converts LogZOutput to fluentd configuration.
type LokiOutput ¶
type LokiOutput struct { // The url of the Loki server to send logs to. (default:https://logs-us-west1.grafana.net) Url string `json:"url,omitempty"` // Specify a username if the Loki server requires authentication. // +docLink:"Secret,../secret/" Username *secret.Secret `json:"username,omitempty"` // Specify password if the Loki server requires authentication. // +docLink:"Secret,../secret/" Password *secret.Secret `json:"password,omitempty"` // TLS: parameters for presenting a client certificate // +docLink:"Secret,../secret/" Cert *secret.Secret `json:"cert,omitempty"` // TLS: parameters for presenting a client certificate // +docLink:"Secret,../secret/" Key *secret.Secret `json:"key,omitempty"` // TLS: CA certificate file for server certificate verification // +docLink:"Secret,../secret/" CaCert *secret.Secret `json:"ca_cert,omitempty"` // TLS: disable server certificate verification (default: false) InsecureTLS *bool `json:"insecure_tls,omitempty"` // Loki is a multi-tenant log storage platform and all requests sent must include a tenant. Tenant string `json:"tenant,omitempty"` // Set of labels to include with every Loki stream. Labels Label `json:"labels,omitempty"` // Set of extra labels to include with every Loki stream. ExtraLabels map[string]string `json:"extra_labels,omitempty"` // Format to use when flattening the record to a log line: json, key_value (default: key_value) LineFormat string `json:"line_format,omitempty" plugin:"default:json"` // Extract kubernetes labels as loki labels (default: false) ExtractKubernetesLabels *bool `json:"extract_kubernetes_labels,omitempty"` // Comma separated list of needless record keys to remove (default: []) RemoveKeys []string `json:"remove_keys,omitempty"` // If a record only has 1 key, then just set the log line to the value and discard the key. (default: false) DropSingleKey *bool `json:"drop_single_key,omitempty"` // Configure Kubernetes metadata in a Prometheus like format (default: false) ConfigureKubernetesLabels *bool `json:"configure_kubernetes_labels,omitempty"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` // The threshold for chunk flush performance check. // Parameter type is float, not time, default: 20.0 (seconds) // If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count. SlowFlushLogThreshold string `json:"slow_flush_log_threshold,omitempty"` }
+kubebuilder:object:generate=true +docName:"Output Config"
func (*LokiOutput) DeepCopy ¶
func (in *LokiOutput) DeepCopy() *LokiOutput
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LokiOutput.
func (*LokiOutput) DeepCopyInto ¶
func (in *LokiOutput) DeepCopyInto(out *LokiOutput)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*LokiOutput) ToDirective ¶
func (l *LokiOutput) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type NewRelicOutputConfig ¶
type NewRelicOutputConfig struct { // New Relic API Insert key // +docLink:"Secret,../secret/" APIKey *secret.Secret `json:"api_key,omitempty"` // New Relic License Key (recommended) // +docLink:"Secret,../secret/" // LicenseKey *secret.Secret `json:"license_key,omitempty"` LicenseKey *secret.Secret `json:"license_key,omitempty"` // New Relic ingestion endpoint // +docLink:"Secret,../secret/" BaseURI string `json:"base_uri,omitempty" plugin:"default:https://log-api.newrelic.com/log/v1"` }
+kubebuilder:object:generate=true +docName:"Output Config"
func (*NewRelicOutputConfig) DeepCopy ¶
func (in *NewRelicOutputConfig) DeepCopy() *NewRelicOutputConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NewRelicOutputConfig.
func (*NewRelicOutputConfig) DeepCopyInto ¶
func (in *NewRelicOutputConfig) DeepCopyInto(out *NewRelicOutputConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*NewRelicOutputConfig) ToDirective ¶
func (c *NewRelicOutputConfig) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type NullOutputConfig ¶
type NullOutputConfig struct { }
func NewNullOutputConfig ¶
func NewNullOutputConfig() *NullOutputConfig
func (*NullOutputConfig) DeepCopy ¶
func (in *NullOutputConfig) DeepCopy() *NullOutputConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NullOutputConfig.
func (*NullOutputConfig) DeepCopyInto ¶
func (in *NullOutputConfig) DeepCopyInto(out *NullOutputConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*NullOutputConfig) ToDirective ¶
func (c *NullOutputConfig) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type OSSOutput ¶
type OSSOutput struct { // OSS endpoint to connect to' Endpoint string `json:"endpoint"` // Your bucket name Bucket string `json:"bucket"` // Your access key id // +docLink:"Secret,../secret/" AccessKeyId *secret.Secret `json:"access_key_id"` // Your access secret key // +docLink:"Secret,../secret/" AaccessKeySecret *secret.Secret `json:"aaccess_key_secret"` // Path prefix of the files on OSS (default: fluent/logs) Path string `json:"path,omitempty"` // Upload crc enabled (default: true) UploadCrcEnable bool `json:"upload_crc_enable,omitempty"` // Download crc enabled (default: true) DownloadCrcEnable bool `json:"download_crc_enable,omitempty"` // Timeout for open connections (default: 10) OpenTimeout int `json:"open_timeout,omitempty"` // Timeout for read response (default: 120) ReadTimeout int `json:"read_timeout,omitempty"` // OSS SDK log directory (default: /var/log/td-agent) OssSdkLogDir string `json:"oss_sdk_log_dir,omitempty"` // The format of OSS object keys (default: %{path}/%{time_slice}_%{index}_%{thread_id}.%{file_extension}) KeyFormat string `json:"key_format,omitempty"` // Archive format on OSS: gzip, json, text, lzo, lzma2 (default: gzip) StoreAs string `json:"store_as,omitempty"` // desc 'Create OSS bucket if it does not exists (default: false) AutoCreateBucket bool `json:"auto_create_bucket,omitempty"` // Overwrite already existing path (default: false) Overwrite bool `json:"overwrite,omitempty"` // Check bucket if exists or not (default: true) CheckBucket bool `json:"check_bucket,omitempty"` // Check object before creation (default: true) CheckObject bool `json:"check_object,omitempty"` // The length of `%{hex_random}` placeholder(4-16) (default: 4) HexRandomLength int `json:"hex_random_length,omitempty"` // `sprintf` format for `%{index}` (default: %d) IndexFormat string `json:"index_format,omitempty"` // Given a threshold to treat events as delay, output warning logs if delayed events were put into OSS WarnForDelay string `json:"warn_for_delay,omitempty"` // +docLink:"Format,../format/" Format *Format `json:"format,omitempty"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` // The threshold for chunk flush performance check. // Parameter type is float, not time, default: 20.0 (seconds) // If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count. SlowFlushLogThreshold string `json:"slow_flush_log_threshold,omitempty"` }
+kubebuilder:object:generate=true +docName:"Output Config"
func (*OSSOutput) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSSOutput.
func (*OSSOutput) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*OSSOutput) ToDirective ¶
type ObjectMetadata ¶
func (*ObjectMetadata) ToDirective ¶
func (o *ObjectMetadata) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type OpenSearchOutput ¶ added in v0.7.23
type OpenSearchOutput struct { // You can specify OpenSearch host by this parameter. (default:localhost) Host string `json:"host,omitempty"` // You can specify OpenSearch port by this parameter.(default: 9200) Port int `json:"port,omitempty"` // User for HTTP Basic authentication. This plugin will escape required URL encoded characters within %{} placeholders. e.g. %{demo+} User string `json:"user,omitempty"` // Password for HTTP Basic authentication. // +docLink:"Secret,../secret/" Password *secret.Secret `json:"password,omitempty"` // Path for HTTP Basic authentication. Path string `json:"path,omitempty"` // Connection scheme (default: http) Scheme string `json:"scheme,omitempty"` // You can specify multiple OpenSearch hosts with separator ",". If you specify hosts option, host and port options are ignored. Hosts string `json:"hosts,omitempty"` // Tell this plugin to find the index name to write to in the record under this key in preference to other mechanisms. Key can be specified as path to nested record using dot ('.') as a separator. TargetIndexKey string `json:"target_index_key,omitempty"` // The format of the time stamp field (@timestamp or what you specify with time_key). This parameter only has an effect when logstash_format is true as it only affects the name of the index we write to. TimeKeyFormat string `json:"time_key_format,omitempty"` // Should the record not include a time_key, define the degree of sub-second time precision to preserve from the time portion of the routed event. TimePrecision string `json:"time_precision,omitempty"` // Adds a @timestamp field to the log, following all settings logstash_format does, except without the restrictions on index_name. This allows one to log to an alias in OpenSearch and utilize the rollover API.(default: false) IncludeTimestamp bool `json:"include_timestamp,omitempty"` // Enable Logstash log format.(default: false) LogstashFormat bool `json:"logstash_format,omitempty"` // Set the Logstash prefix.(default: logstash) LogstashPrefix string `json:"logstash_prefix,omitempty"` // Set the Logstash prefix separator.(default: -) LogstashPrefixSeparator string `json:"logstash_prefix_separator,omitempty"` // Set the Logstash date format.(default: %Y.%m.%d) LogstashDateformat string `json:"logstash_dateformat,omitempty"` // By default, the records inserted into index logstash-YYMMDD with UTC (Coordinated Universal Time). This option allows to use local time if you describe utc_index to false.(default: true) // +kubebuilder:validation:Optional UtcIndex *bool `json:"utc_index,omitempty" plugin:"default:true"` // Suppress type name to avoid warnings in OpenSearch SuppressTypeName *bool `json:"suppress_type_name,omitempty"` // The index name to write events to (default: fluentd) IndexName string `json:"index_name,omitempty"` // Field on your data to identify the data uniquely IdKey string `json:"id_key,omitempty"` // The write_operation can be any of: (index,create,update,upsert)(default: index) WriteOperation string `json:"write_operation,omitempty"` // parent_key ParentKey string `json:"parent_key,omitempty"` // routing_key RoutingKey string `json:"routing_key,omitempty"` // You can specify HTTP request timeout.(default: 5s) RequestTimeout string `json:"request_timeout,omitempty"` // You can tune how the OpenSearch-transport host reloading feature works.(default: true) // +kubebuilder:validation:Optional ReloadConnections *bool `json:"reload_connections,omitempty" plugin:"default:true"` //Indicates that the OpenSearch-transport will try to reload the nodes addresses if there is a failure while making the request, this can be useful to quickly remove a dead node from the list of addresses.(default: false) ReloadOnFailure bool `json:"reload_on_failure,omitempty"` // This setting allows custom routing of messages in response to bulk request failures. The default behavior is to emit failed records using the same tag that was provided. RetryTag string `json:"retry_tag,omitempty"` // You can set in the OpenSearch-transport how often dead connections from the OpenSearch-transport's pool will be resurrected.(default: 60s) ResurrectAfter string `json:"resurrect_after,omitempty"` // By default, when inserting records in Logstash format, @timestamp is dynamically created with the time at log ingestion. If you'd like to use a custom time, include an @timestamp with your record. TimeKey string `json:"time_key,omitempty"` //time_key_exclude_timestamp (default: false) TimeKeyExcludeTimestamp bool `json:"time_key_exclude_timestamp,omitempty"` // +kubebuilder:validation:Optional // Skip ssl verification (default: true) SslVerify *bool `json:"ssl_verify,omitempty" plugin:"default:true"` // Client certificate key SSLClientCertKey *secret.Secret `json:"client_key,omitempty"` // Client certificate SSLClientCert *secret.Secret `json:"client_cert,omitempty"` // Client key password SSLClientCertKeyPass *secret.Secret `json:"client_key_pass,omitempty"` // CA certificate SSLCACert *secret.Secret `json:"ca_file,omitempty"` // If you want to configure SSL/TLS version, you can specify ssl_version parameter. [SSLv23, TLSv1, TLSv1_1, TLSv1_2] // Remove keys on update will not update the configured keys in OpenSearch when a record is being updated. This setting only has any effect if the write operation is update or upsert. RemoveKeysOnUpdate string `json:"remove_keys_on_update,omitempty"` // This setting allows remove_keys_on_update to be configured with a key in each record, in much the same way as target_index_key works. RemoveKeysOnUpdateKey string `json:"remove_keys_on_update_key,omitempty"` // https://github.com/fluent/fluent-plugin-opensearch#hash-flattening FlattenHashes bool `json:"flatten_hashes,omitempty"` // Flatten separator FlattenHashesSeparator string `json:"flatten_hashes_separator,omitempty"` // The name of the template to define. If a template by the name given is already present, it will be left unchanged, unless template_overwrite is set, in which case the template will be updated. TemplateName string `json:"template_name,omitempty"` // The path to the file containing the template to install. // +docLink:"Secret,../secret/" TemplateFile *secret.Secret `json:"template_file,omitempty"` // Always update the template, even if it already exists.(default: false) TemplateOverwrite bool `json:"template_overwrite,omitempty"` // Specify the string and its value to be replaced in form of hash. Can contain multiple key value pair that would be replaced in the specified template_file. This setting only creates template and to add rollover index please check the rollover_index configuration. CustomizeTemplate string `json:"customize_template,omitempty"` // Specify this to override the index date pattern for creating a rollover index.(default: now/d) IndexDatePattern *string `json:"index_date_pattern,omitempty"` // index_separator (default: -) IndexSeparator string `json:"index_separator,omitempty"` // Specify the application name for the rollover index to be created.(default: default) ApplicationName *string `json:"application_name,omitempty"` // Specify index templates in form of hash. Can contain multiple templates. Templates string `json:"templates,omitempty"` // You can specify times of retry putting template.(default: 10) MaxRetryPuttingTemplate string `json:"max_retry_putting_template,omitempty"` // Indicates whether to fail when max_retry_putting_template is exceeded. If you have multiple output plugin, you could use this property to do not fail on fluentd statup.(default: true) // +kubebuilder:validation:Optional FailOnPuttingTemplateRetryExceed *bool `json:"fail_on_putting_template_retry_exceed,omitempty" plugin:"default:true"` // fail_on_detecting_os_version_retry_exceed (default: true) // +kubebuilder:validation:Optional FailOnDetectingOsVersionRetryExceed *bool `json:"fail_on_detecting_os_version_retry_exceed,omitempty" plugin:"default:true"` // max_retry_get_os_version (default: 15) MaxRetryGetOsVersion int `json:"max_retry_get_os_version,omitempty"` // This will add the Fluentd tag in the JSON record.(default: false) IncludeTagKey bool `json:"include_tag_key,omitempty"` // This will add the Fluentd tag in the JSON record.(default: tag) TagKey string `json:"tag_key,omitempty"` // With logstash_format true, OpenSearch plugin parses timestamp field for generating index name. If the record has invalid timestamp value, this plugin emits an error event to @ERROR label with time_parse_error_tag configured tag. TimeParseErrorTag string `json:"time_parse_error_tag,omitempty"` // Indicates that the plugin should reset connection on any error (reconnect on next send). By default it will reconnect only on "host unreachable exceptions". We recommended to set this true in the presence of OpenSearch shield.(default: false) ReconnectOnError bool `json:"reconnect_on_error,omitempty"` // This param is to set a pipeline id of your OpenSearch to be added into the request, you can configure ingest node. Pipeline string `json:"pipeline,omitempty"` // This is debugging purpose option to enable to obtain transporter layer log. (default: false) WithTransporterLog bool `json:"with_transporter_log,omitempty"` // emit_error_for_missing_id (default: false) EmitErrorForMissingID bool `json:"emit_error_for_missing_id,omitempty"` // TThe default Sniffer used by the OpenSearch::Transport class works well when Fluentd has a direct connection to all of the OpenSearch servers and can make effective use of the _nodes API. This doesn't work well when Fluentd must connect through a load balancer or proxy. The parameter sniffer_class_name gives you the ability to provide your own Sniffer class to implement whatever connection reload logic you require. In addition, there is a new Fluent::Plugin::OpenSearchSimpleSniffer class which reuses the hosts given in the configuration, which is typically the hostname of the load balancer or proxy. For example, a configuration like this would cause connections to logging-os to reload every 100 operations: https://github.com/fluent/fluent-plugin-opensearch#sniffer-class-name SnifferClassName string `json:"sniffer_class_name,omitempty"` // selector_class_name SelectorClassName string `json:"selector_class_name,omitempty"` // When reload_connections true, this is the integer number of operations after which the plugin will reload the connections. The default value is 10000. ReloadAfter string `json:"reload_after,omitempty"` //With this option set to true, Fluentd manifests the index name in the request URL (rather than in the request body). You can use this option to enforce an URL-based access control. IncludeIndexInUrl bool `json:"include_index_in_url,omitempty"` // With http_backend typhoeus, opensearch plugin uses typhoeus faraday http backend. Typhoeus can handle HTTP keepalive. (default: excon) HttpBackend string `json:"http_backend,omitempty"` // http_backend_excon_nonblock (default: true) // +kubebuilder:validation:Optional HttpBackendExconNonblock *bool `json:"http_backend_excon_nonblock,omitempty" plugin:"default:true"` // When you use mismatched OpenSearch server and client libraries, fluent-plugin-opensearch cannot send data into OpenSearch. (default: false) ValidateClientVersion bool `json:"validate_client_version,omitempty"` // With default behavior, OpenSearch client uses Yajl as JSON encoder/decoder. Oj is the alternative high performance JSON encoder/decoder. When this parameter sets as true, OpenSearch client uses Oj as JSON encoder/decoder. (default: false) PreferOjSerializer bool `json:"prefer_oj_serializer,omitempty"` // Default unrecoverable_error_types parameter is set up strictly. Because rejected_execution_exception is caused by exceeding OpenSearch's thread pool capacity. Advanced users can increase its capacity, but normal users should follow default behavior. UnrecoverableErrorTypes string `json:"unrecoverable_error_types,omitempty"` // unrecoverable_record_types UnrecoverableRecordTypes string `json:"unrecoverable_record_types,omitempty"` // emit_error_label_event (default: true) // +kubebuilder:validation:Optional EmitErrorLabelEvent *bool `json:"emit_error_label_event,omitempty" plugin:"default:true"` // verify_os_version_at_startup (default: true) // +kubebuilder:validation:Optional VerifyOsVersionAtStartup *bool `json:"verify_os_version_at_startup,omitempty" plugin:"default:true"` // max_retry_get_os_version (default: 1) DefaultOpensearchVersion int `json:"default_opensearch_version,omitempty"` // log_os_400_reason (default: false) LogOs400Reason bool `json:"log_os_400_reason,omitempty"` // This parameter adds additional headers to request. Example: {"token":"secret"} (default: {}) CustomHeaders string `json:"custom_headers,omitempty"` // By default, record body is wrapped by 'doc'. This behavior can not handle update script requests. You can set this to suppress doc wrapping and allow record body to be untouched. (default: false) SuppressDocWrap bool `json:"suppress_doc_wrap,omitempty"` // A list of exception that will be ignored - when the exception occurs the chunk will be discarded and the buffer retry mechanism won't be called. It is possible also to specify classes at higher level in the hierarchy. IgnoreExceptions string `json:"ignore_exceptions,omitempty"` // Indicates whether to backup chunk when ignore exception occurs. (default: true) // +kubebuilder:validation:Optional ExceptionBackup *bool `json:"exception_backup,omitempty" plugin:"default:true"` // Configure bulk_message request splitting threshold size. // Default value is 20MB. (20 * 1024 * 1024) // If you specify this size as negative number, bulk_message request splitting feature will be disabled. (default: 20MB) BulkMessageRequestThreshold string `json:"bulk_message_request_threshold,omitempty"` // compression_level CompressionLevel string `json:"compression_level,omitempty"` // truncate_caches_interval TruncateCachesInterval string `json:"truncate_caches_interval,omitempty"` // use_legacy_template (default: true) // +kubebuilder:validation:Optional UseLegacyTemplate *bool `json:"use_legacy_template,omitempty" plugin:"default:true"` // catch_transport_exception_on_retry (default: true) // +kubebuilder:validation:Optional CatchTransportExceptionOnRetry *bool `json:"catch_transport_exception_on_retry,omitempty" plugin:"default:true"` // target_index_affinity (default: false) TargetIndexAffinity bool `json:"target_index_affinity,omitempty"` Buffer *Buffer `json:"buffer,omitempty"` // The threshold for chunk flush performance check. // Parameter type is float, not time, default: 20.0 (seconds) // If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count. SlowFlushLogThreshold string `json:"slow_flush_log_threshold,omitempty"` // Use @type opensearch_data_stream DataStreamEnable *bool `json:"data_stream_enable,omitempty" plugin:"hidden"` // You can specify Opensearch data stream name by this parameter. This parameter is mandatory for opensearch_data_stream. DataStreamName string `json:"data_stream_name,omitempty"` // Specify an existing index template for the data stream. If not present, a new template is created and named after the data stream. (default: data_stream_name) DataStreamTemplateName string `json:"data_stream_template_name,omitempty"` }
+kubebuilder:object:generate=true +docName:"OpenSearch" Send your logs to OpenSearch
func (*OpenSearchOutput) DeepCopy ¶ added in v0.7.23
func (in *OpenSearchOutput) DeepCopy() *OpenSearchOutput
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenSearchOutput.
func (*OpenSearchOutput) DeepCopyInto ¶ added in v0.7.23
func (in *OpenSearchOutput) DeepCopyInto(out *OpenSearchOutput)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*OpenSearchOutput) ToDirective ¶ added in v0.7.23
func (e *OpenSearchOutput) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type RedisOutputConfig ¶
type RedisOutputConfig struct { // Host Redis endpoint (default: localhost) Host string `json:"host,omitempty"` // Port of the Redis server (default: 6379) Port int `json:"port,omitempty"` // DbNumber database number is optional. (default: 0) DbNumber int `json:"db_number,omitempty"` // Redis Server password Password *secret.Secret `json:"password,omitempty"` // insert_key_prefix (default: "${tag}") InsertKeyPrefix string `json:"insert_key_prefix,omitempty"` // strftime_format Users can set strftime format. (default: "%s") StrftimeFormat string `json:"strftime_format,omitempty"` // allow_duplicate_key Allow insert key duplicate. It will work as update values. (default: false) AllowDuplicateKey bool `json:"allow_duplicate_key,omitempty"` // ttl If 0 or negative value is set, ttl is not set in each key. TTL int `json:"ttl,omitempty"` // +docLink:"Format,../format/" Format *Format `json:"format,omitempty"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` // The threshold for chunk flush performance check. // Parameter type is float, not time, default: 20.0 (seconds) // If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count. SlowFlushLogThreshold string `json:"slow_flush_log_threshold,omitempty"` }
+kubebuilder:object:generate=true +docName:"Output Config"
func (*RedisOutputConfig) DeepCopy ¶
func (in *RedisOutputConfig) DeepCopy() *RedisOutputConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisOutputConfig.
func (*RedisOutputConfig) DeepCopyInto ¶
func (in *RedisOutputConfig) DeepCopyInto(out *RedisOutputConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*RedisOutputConfig) ToDirective ¶
func (c *RedisOutputConfig) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type S3AssumeRoleCredentials ¶
type S3AssumeRoleCredentials struct { // The Amazon Resource Name (ARN) of the role to assume RoleArn string `json:"role_arn"` // An identifier for the assumed role session RoleSessionName string `json:"role_session_name"` // An IAM policy in JSON format Policy string `json:"policy,omitempty"` // The duration, in seconds, of the role session (900-3600) DurationSeconds string `json:"duration_seconds,omitempty"` // A unique identifier that is used by third parties when assuming roles in their customers' accounts. ExternalId string `json:"external_id,omitempty"` }
+kubebuilder:object:generate=true +docName:"Assume Role Credentials" assume_role_credentials
func (*S3AssumeRoleCredentials) DeepCopy ¶
func (in *S3AssumeRoleCredentials) DeepCopy() *S3AssumeRoleCredentials
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3AssumeRoleCredentials.
func (*S3AssumeRoleCredentials) DeepCopyInto ¶
func (in *S3AssumeRoleCredentials) DeepCopyInto(out *S3AssumeRoleCredentials)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type S3InstanceProfileCredentials ¶
type S3InstanceProfileCredentials struct { // IP address (default:169.254.169.254) IpAddress string `json:"ip_address,omitempty"` // Port number (default:80) Port string `json:"port,omitempty"` // Number of seconds to wait for the connection to open HttpOpenTimeout string `json:"http_open_timeout,omitempty"` // Number of seconds to wait for one block to be read HttpReadTimeout string `json:"http_read_timeout,omitempty"` // Number of times to retry when retrieving credentials Retries string `json:"retries,omitempty"` }
+kubebuilder:object:generate=true +docName:"Instance Profile Credentials" instance_profile_credentials
func (*S3InstanceProfileCredentials) DeepCopy ¶
func (in *S3InstanceProfileCredentials) DeepCopy() *S3InstanceProfileCredentials
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3InstanceProfileCredentials.
func (*S3InstanceProfileCredentials) DeepCopyInto ¶
func (in *S3InstanceProfileCredentials) DeepCopyInto(out *S3InstanceProfileCredentials)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type S3OutputConfig ¶
type S3OutputConfig struct { // AWS access key id // +docLink:"Secret,../secret/" AwsAccessKey *secret.Secret `json:"aws_key_id,omitempty"` // AWS secret key. // +docLink:"Secret,../secret/" AwsSecretKey *secret.Secret `json:"aws_sec_key,omitempty"` // Check AWS key on start CheckApikeyOnStart string `json:"check_apikey_on_start,omitempty"` // Allows grantee to read the object data and its metadata GrantRead string `json:"grant_read,omitempty"` // Overwrite already existing path Overwrite string `json:"overwrite,omitempty"` // Path prefix of the files on S3 Path string `json:"path,omitempty"` // Allows grantee to write the ACL for the applicable object GrantWriteAcp string `json:"grant_write_acp,omitempty"` // Check bucket if exists or not CheckBucket string `json:"check_bucket,omitempty"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data SseCustomerKey string `json:"sse_customer_key,omitempty" default:"10m"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321 SseCustomerKeyMd5 string `json:"sse_customer_key_md5,omitempty"` // AWS SDK uses MD5 for API request/response by default ComputeChecksums string `json:"compute_checksums,omitempty"` // Given a threshold to treat events as delay, output warning logs if delayed events were put into s3 WarnForDelay string `json:"warn_for_delay,omitempty"` // Use aws-sdk-ruby bundled cert UseBundledCert string `json:"use_bundled_cert,omitempty"` // Custom S3 endpoint (like minio) S3Endpoint string `json:"s3_endpoint,omitempty"` // Specifies the AWS KMS key ID to use for object encryption SsekmsKeyId string `json:"ssekms_key_id,omitempty"` // Arbitrary S3 metadata headers to set for the object S3Metadata string `json:"s3_metadata,omitempty"` // If true, the bucket name is always left in the request URI and never moved to the host as a sub-domain ForcePathStyle string `json:"force_path_style,omitempty"` // Create S3 bucket if it does not exists AutoCreateBucket string `json:"auto_create_bucket,omitempty"` // `sprintf` format for `%{index}` IndexFormat string `json:"index_format,omitempty"` // Signature version for API Request (s3,v4) SignatureVersion string `json:"signature_version,omitempty"` // If true, S3 Transfer Acceleration will be enabled for uploads. IMPORTANT: You must first enable this feature on your destination S3 bucket EnableTransferAcceleration string `json:"enable_transfer_acceleration,omitempty"` // If false, the certificate of endpoint will not be verified SslVerifyPeer string `json:"ssl_verify_peer,omitempty"` // URI of proxy environment ProxyUri string `json:"proxy_uri,omitempty"` // Allows grantee to read the object ACL GrantReadAcp string `json:"grant_read_acp,omitempty"` // Check object before creation CheckObject string `json:"check_object,omitempty"` // Specifies the algorithm to use to when encrypting the object SseCustomerAlgorithm string `json:"sse_customer_algorithm,omitempty"` // The Server-side encryption algorithm used when storing this object in S3 (AES256, aws:kms) UseServerSideEncryption string `json:"use_server_side_encryption,omitempty"` // S3 region name S3Region string `json:"s3_region,omitempty"` // Permission for the object in S3 Acl string `json:"acl,omitempty"` // Allows grantee READ, READ_ACP, and WRITE_ACP permissions on the object GrantFullControl string `json:"grant_full_control,omitempty"` // The length of `%{hex_random}` placeholder(4-16) HexRandomLength string `json:"hex_random_length,omitempty"` // The format of S3 object keys (default: %{path}%{time_slice}_%{uuid_hash}_%{index}.%{file_extension}) S3ObjectKeyFormat string `json:"s3_object_key_format,omitempty" plugin:"default:%{path}%{time_slice}_%{uuid_hash}_%{index}.%{file_extension}"` // S3 bucket name S3Bucket string `json:"s3_bucket"` // Archive format on S3 StoreAs string `json:"store_as,omitempty"` // The type of storage to use for the object, for example STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR // For a complete list of possible values, see the [Amazon S3 API reference](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html#AmazonS3-PutObject-request-header-StorageClass). StorageClass string `json:"storage_class,omitempty"` // The number of attempts to load instance profile credentials from the EC2 metadata service using IAM role AwsIamRetries string `json:"aws_iam_retries,omitempty"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` // The threshold for chunk flush performance check. // Parameter type is float, not time, default: 20.0 (seconds) // If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count. SlowFlushLogThreshold string `json:"slow_flush_log_threshold,omitempty"` // +docLink:"Format,../format/" Format *Format `json:"format,omitempty"` // +docLink:"Assume Role Credentials,#assume-role-credentials" AssumeRoleCredentials *S3AssumeRoleCredentials `json:"assume_role_credentials,omitempty"` // +docLink:"Instance Profile Credentials,#instance-profile-credentials" InstanceProfileCredentials *S3InstanceProfileCredentials `json:"instance_profile_credentials,omitempty"` SharedCredentials *S3SharedCredentials `json:"shared_credentials,omitempty"` // Parquet compressor Compress *Compress `json:"compress,omitempty"` // One-eye format trigger (default:false) OneEyeFormat bool `json:"oneeye_format,omitempty"` // Custom cluster name (default:one-eye) ClusterName string `json:"clustername,omitempty"` }
+kubebuilder:object:generate=true +docName:"Output Config"
func (*S3OutputConfig) DeepCopy ¶
func (in *S3OutputConfig) DeepCopy() *S3OutputConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3OutputConfig.
func (*S3OutputConfig) DeepCopyInto ¶
func (in *S3OutputConfig) DeepCopyInto(out *S3OutputConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*S3OutputConfig) ToDirective ¶
func (c *S3OutputConfig) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type S3SharedCredentials ¶
type S3SharedCredentials struct { string `json:"profile_name,omitempty"` Path string `json:"path,omitempty"` }ProfileName
+kubebuilder:object:generate=true +docName:"Shared Credentials" shared_credentials
func (*S3SharedCredentials) DeepCopy ¶
func (in *S3SharedCredentials) DeepCopy() *S3SharedCredentials
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3SharedCredentials.
func (*S3SharedCredentials) DeepCopyInto ¶
func (in *S3SharedCredentials) DeepCopyInto(out *S3SharedCredentials)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type SQSOutputConfig ¶
type SQSOutputConfig struct { // SQS queue url e.g. https://sqs.us-west-2.amazonaws.com/123456789012/myqueue SQSUrl string `json:"sqs_url,omitempty"` // SQS queue name - required if sqs_url is not set QueueName string `json:"queue_name,omitempty"` // AWS access key id AWSKeyId *secret.Secret `json:"aws_key_id,omitempty"` // AWS secret key AWSSecKey *secret.Secret `json:"aws_sec_key,omitempty"` // Create SQS queue (default: true) CreateQueue *bool `json:"create_queue,omitempty"` // AWS region (default: ap-northeast-1) Region string `json:"region,omitempty"` // Message group id for FIFO queue MessageGroupId string `json:"message_group_id,omitempty"` // Delivery delay seconds (default: 0) DelaySeconds int `json:"delay_seconds,omitempty"` // Include tag (default: true) IncludeTag *bool `json:"include_tag,omitempty"` // Tags property name in json (default: '__tag') TagPropertyName string `json:"tag_property_name,omitempty"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` // The threshold for chunk flush performance check. // Parameter type is float, not time, default: 20.0 (seconds) // If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count. SlowFlushLogThreshold string `json:"slow_flush_log_threshold,omitempty"` }
+kubebuilder:object:generate=true +docName:"Output Config"
func (*SQSOutputConfig) DeepCopy ¶
func (in *SQSOutputConfig) DeepCopy() *SQSOutputConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQSOutputConfig.
func (*SQSOutputConfig) DeepCopyInto ¶
func (in *SQSOutputConfig) DeepCopyInto(out *SQSOutputConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*SQSOutputConfig) ToDirective ¶
func (s *SQSOutputConfig) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type SplunkHecOutput ¶
type SplunkHecOutput struct { // The type of data that will be sent to Sumo Logic, either event or metric (default: event) DataType string `json:"data_type,omitempty"` // You can specify SplunkHec host by this parameter. HecHost string `json:"hec_host"` // The port number for the Hec token or the Hec load balancer. (default: 8088) HecPort int `json:"hec_port,omitempty"` // This is the protocol to use for calling the Hec API. Available values are: http, https. (default: https) Protocol string `json:"protocol,omitempty"` // Identifier for the Hec token. // +docLink:"Secret,../secret/" HecToken *secret.Secret `json:"hec_token"` // When data_type is set to "metric", the ingest API will treat every key-value pair in the input event as a metric name-value pair. Set metrics_from_event to false to disable this behavior and use metric_name_key and metric_value_key to define metrics. (Default:true) MetricsFromEvent *bool `json:"metrics_from_event,omitempty"` // Field name that contains the metric name. This parameter only works in conjunction with the metrics_from_event parameter. When this prameter is set, the metrics_from_event parameter is automatically set to false. (default: true) MetricNameKey string `json:"metric_name_key,omitempty"` // Field name that contains the metric value, this parameter is required when metric_name_key is configured. MetricValueKey string `json:"metric_value_key,omitempty"` // Indicates whether to allow non-UTF-8 characters in user logs. If set to true, any non-UTF-8 character is replaced by the string specified in non_utf8_replacement_string. If set to false, the Ingest API errors out any non-UTF-8 characters. (default: true). CoerceToUtf8 *bool `json:"coerce_to_utf8,omitempty"` // If coerce_to_utf8 is set to true, any non-UTF-8 character is replaced by the string you specify in this parameter. (default: ' '). NonUtf8ReplacementString string `json:"non_utf8_replacement_string,omitempty"` // Identifier for the Splunk index to be used for indexing events. If this parameter is not set, the indexer is chosen by HEC. Cannot set both index and index_key parameters at the same time. Index string `json:"index,omitempty"` // The field name that contains the Splunk index name. Cannot set both index and index_key parameters at the same time. IndexKey string `json:"index_key,omitempty"` // The host location for events. Cannot set both host and host_key parameters at the same time. (Default:hostname) Host string `json:"host,omitempty"` // Key for the host location. Cannot set both host and host_key parameters at the same time. HostKey string `json:"host_key,omitempty"` // The source field for events. If this parameter is not set, the source will be decided by HEC. Cannot set both source and source_key parameters at the same time. Source string `json:"source,omitempty"` // Field name to contain source. Cannot set both source and source_key parameters at the same time. SourceKey string `json:"source_key,omitempty"` // The sourcetype field for events. When not set, the sourcetype is decided by HEC. Cannot set both source and source_key parameters at the same time. SourceType string `json:"sourcetype,omitempty"` // Field name that contains the sourcetype. Cannot set both source and source_key parameters at the same time. SourceTypeKey string `json:"sourcetype_key,omitempty"` // By default, all the fields used by the *_key parameters are removed from the original input events. To change this behavior, set this parameter to true. This parameter is set to false by default. When set to true, all fields defined in index_key, host_key, source_key, sourcetype_key, metric_name_key, and metric_value_key are saved in the original event. KeepKeys bool `json:"keep_keys,omitempty"` //If a connection has not been used for this number of seconds it will automatically be reset upon the next use to avoid attempting to send to a closed connection. nil means no timeout. IdleTimeout int `json:"idle_timeout,omitempty"` // The amount of time allowed between reading two chunks from the socket. ReadTimeout int `json:"read_timeout,omitempty"` // The amount of time to wait for a connection to be opened. OpenTimeout int `json:"open_timeout,omitempty"` // The path to a file containing a PEM-format CA certificate for this client. // +docLink:"Secret,../secret/" ClientCert *secret.Secret `json:"client_cert,omitempty"` // The private key for this client.' // +docLink:"Secret,../secret/" ClientKey *secret.Secret `json:"client_key,omitempty"` // The path to a file containing a PEM-format CA certificate. // +docLink:"Secret,../secret/" CAFile *secret.Secret `json:"ca_file,omitempty"` // The path to a directory containing CA certificates in PEM format. // +docLink:"Secret,../secret/" CAPath *secret.Secret `json:"ca_path,omitempty"` // List of SSL ciphers allowed. SSLCiphers string `json:"ssl_ciphers,omitempty"` // Indicates if insecure SSL connection is allowed (default:false) InsecureSSL *bool `json:"insecure_ssl,omitempty"` // In this case, parameters inside <fields> are used as indexed fields and removed from the original input events Fields Fields `json:"fields,omitempty"` // +docLink:"Format,../format/" Format *Format `json:"format,omitempty"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` // The threshold for chunk flush performance check. // Parameter type is float, not time, default: 20.0 (seconds) // If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count. SlowFlushLogThreshold string `json:"slow_flush_log_threshold,omitempty"` }
+kubebuilder:object:generate=true +docName:"SplunkHecOutput" SplunkHecOutput sends your logs to Splunk via Hec
func (*SplunkHecOutput) DeepCopy ¶
func (in *SplunkHecOutput) DeepCopy() *SplunkHecOutput
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SplunkHecOutput.
func (*SplunkHecOutput) DeepCopyInto ¶
func (in *SplunkHecOutput) DeepCopyInto(out *SplunkHecOutput)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*SplunkHecOutput) ToDirective ¶
func (c *SplunkHecOutput) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type SumologicOutput ¶
type SumologicOutput struct { // The type of data that will be sent to Sumo Logic, either logs or metrics (default: logs) DataType string `json:"data_type,omitempty"` // SumoLogic HTTP Collector URL Endpoint *secret.Secret `json:"endpoint"` // Verify ssl certificate. (default: true) VerifySsl bool `json:"verify_ssl,omitempty"` // The format of metrics you will be sending, either graphite or carbon2 or prometheus (default: graphite) MetricDataFormat string `json:"metric_data_format,omitempty"` // Format to post logs into Sumo. (default: json) LogFormat string `json:"log_format,omitempty"` // Used to specify the key when merging json or sending logs in text format (default: message) LogKey string `json:"log_key,omitempty"` // Set _sourceCategory metadata field within SumoLogic (default: nil) SourceCategory string `json:"source_category,omitempty"` // Set _sourceName metadata field within SumoLogic - overrides source_name_key (default is nil) SourceName string `json:"source_name"` // Set as source::path_key's value so that the source_name can be extracted from Fluentd's buffer (default: source_name) SourceNameKey string `json:"source_name_key,omitempty"` // Set _sourceHost metadata field within SumoLogic (default: nil) SourceHost string `json:"source_host,omitempty"` // Set timeout seconds to wait until connection is opened. (default: 60) OpenTimeout int `json:"open_timeout,omitempty"` // Add timestamp (or timestamp_key) field to logs before sending to sumologic (default: true) AddTimestamp bool `json:"add_timestamp,omitempty"` // Field name when add_timestamp is on (default: timestamp) TimestampKey string `json:"timestamp_key,omitempty"` // Add the uri of the proxy environment if present. ProxyUri string `json:"proxy_uri,omitempty"` // Option to disable cookies on the HTTP Client. (default: false) DisableCookies bool `json:"disable_cookies,omitempty"` // Delimiter (default: .) Delimiter string `json:"delimiter,omitempty"` // Comma-separated key=value list of fields to apply to every log. [more information](https://help.sumologic.com/Manage/Fields#http-source-fields) CustomFields []string `json:"custom_fields,omitempty"` // Name of sumo client which is send as X-Sumo-Client header (default: fluentd-output) SumoClient string `json:"sumo_client,omitempty"` // Compress payload (default: false) Compress *bool `json:"compress,omitempty"` // Encoding method of compression (either gzip or deflate) (default: gzip) CompressEncoding string `json:"compress_encoding,omitempty"` // Dimensions string (eg "cluster=payment, service=credit_card") which is going to be added to every metric record. CustomDimensions string `json:"custom_dimensions,omitempty"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` // The threshold for chunk flush performance check. // Parameter type is float, not time, default: 20.0 (seconds) // If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count. SlowFlushLogThreshold string `json:"slow_flush_log_threshold,omitempty"` }
+kubebuilder:object:generate=true +docName:"Output Config"
func (*SumologicOutput) DeepCopy ¶
func (in *SumologicOutput) DeepCopy() *SumologicOutput
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SumologicOutput.
func (*SumologicOutput) DeepCopyInto ¶
func (in *SumologicOutput) DeepCopyInto(out *SumologicOutput)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*SumologicOutput) ToDirective ¶
func (s *SumologicOutput) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type SyslogOutputConfig ¶
type SyslogOutputConfig struct { // Destination host address Host string `json:"host"` // Destination host port (default: "514") Port int `json:"port,omitempty"` // Transport Protocol (default: "tls") Transport string `json:"transport,omitempty"` // skip ssl validation (default: false) Insecure *bool `json:"insecure,omitempty"` // verify_fqdn (default: nil) VerifyFqdn *bool `json:"verify_fqdn,omitempty"` // cert_store to set ca_certificate for ssl context EnableSystemCertStore *bool `json:"enable_system_cert_store,omitempty"` // file path to ca to trust TrustedCaPath *secret.Secret `json:"trusted_ca_path,omitempty"` // file path for private_key_path ClientCertPath *secret.Secret `json:"client_cert_path,omitempty"` // file path for private_key_path PrivateKeyPath *secret.Secret `json:"private_key_path,omitempty"` // PrivateKeyPassphrase for private key (default: "nil") PrivateKeyPassphrase *secret.Secret `json:"private_key_passphrase,omitempty"` // allow_self_signed_cert for mutual tls (default: false) AllowSelfSignedCert *bool `json:"allow_self_signed_cert,omitempty"` // Fqdn (default: "nil") Fqdn string `json:"fqdn,omitempty"` // TLS Version (default: "TLSv1_2") Version string `json:"version,omitempty"` // +docLink:"Format,../format_rfc5424/" Format *FormatRfc5424 `json:"format,omitempty"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` // The threshold for chunk flush performance check. // Parameter type is float, not time, default: 20.0 (seconds) // If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count. SlowFlushLogThreshold string `json:"slow_flush_log_threshold,omitempty"` }
+kubebuilder:object:generate=true
func (*SyslogOutputConfig) DeepCopy ¶
func (in *SyslogOutputConfig) DeepCopy() *SyslogOutputConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyslogOutputConfig.
func (*SyslogOutputConfig) DeepCopyInto ¶
func (in *SyslogOutputConfig) DeepCopyInto(out *SyslogOutputConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*SyslogOutputConfig) ToDirective ¶
func (s *SyslogOutputConfig) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
Source Files ¶
- aws_elasticsearch.go
- azurestore.go
- buffer.go
- cloudwatch.go
- datadog.go
- elasticsearch.go
- file.go
- format.go
- format_rfc5424.go
- forward.go
- gcs.go
- gelf.go
- http.go
- kafka.go
- kinesis_firehose.go
- kinesis_stream.go
- logdna.go
- logz.go
- loki.go
- newrelic.go
- null.go
- opensearch.go
- oss.go
- redis.go
- s3.go
- splunk_hec.go
- sqs.go
- sumologic.go
- syslog.go
- zz_generated.deepcopy.go