Documentation ¶
Index ¶
Constants ¶
View Source
const ( OpLogNamespace = "log_analysis" OpLogComponent = "log_processor" OpLogServiceDim = "service" )
labels for oplog
View Source
const (
MaxRetries = 13 // ~7'
)
Variables ¶
View Source
var ( // Session and clients that can be used by components of the log processor // FIXME: these should be removed as globals Session *session.Session LambdaClient lambdaiface.LambdaAPI S3Client s3iface.S3API SqsClient sqsiface.SQSAPI SnsClient snsiface.SNSAPI Config EnvConfig )
View Source
var ( OpLogManager = oplog.NewManager(OpLogNamespace, OpLogComponent) OpLogLambdaServiceDim = zap.String(OpLogServiceDim, "lambda") OpLogS3ServiceDim = zap.String(OpLogServiceDim, "s3") OpLogSNSServiceDim = zap.String(OpLogServiceDim, "sns") OpLogProcessorServiceDim = zap.String(OpLogServiceDim, "processor") OpLogGlueServiceDim = zap.String(OpLogServiceDim, "glue") )
Functions ¶
func ConfigForDataLakeWriters ¶ added in v1.13.0
ConfigForDataLakeWriters returns a jsoniter.API configured to be used for JSON log events written to the data-lake.
WARNING: This is meant to be used for encoding ONLY FOR WRITING TO S3 Processed logs.
For all other uses please use pantherlog.ConfigJSON() You should probably not use it.
Types ¶
type DataStream ¶
type DataStream struct { Stream logstream.Stream Closer io.Closer Source *models.SourceIntegration S3ObjectKey string S3Bucket string }
DataStream represents a data stream for an s3 object read by the processor
type EnvConfig ¶ added in v1.2.0
type EnvConfig struct { AwsLambdaFunctionMemorySize int `required:"true" split_words:"true"` ProcessedDataBucket string `required:"true" split_words:"true"` SqsQueueURL string `required:"true" split_words:"true"` SqsBatchSize int64 `required:"true" split_words:"true"` SnsTopicARN string `required:"true" split_words:"true"` }
Click to show internal directories.
Click to hide internal directories.