oteleport

package module
v0.2.3 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Nov 6, 2024 License: MIT Imports: 50 Imported by: 0

README

oteleport

oteleport is an OpenTelemetry Signal Receiver Server that stores received signals in S3 and provides a REST API to retrieve them. The project focuses on "teleporting" OpenTelemetry signals, acting as a buffering server that enables custom signal replays. It is designed to offer flexibility in how signals are managed and retrieved, making it easy to handle and replay telemetry data as needed.

Features

  • OpenTelemetry Signal Receiver: Receives OpenTelemetry signals and stores them in S3.
  • REST API: Provides a REST API to retrieve stored signals.

Installation

Pre-built release binaries are provided. Download the latest binary from the Releases page.

Usage

simple config file example. oteleport.jsonnet

local must_env = std.native('must_env');

{
  access_keys: [
    must_env('OTELEPORT_ACCESS_KEY'),
  ],
  storage: {
    cursor_encryption_key: must_env('OTELEPORT_CURSOR_ENCRYPTION_KEY'),
    location: 's3://' + must_env('OTELEPORT_S3_BUCKET') + '/',
  },
  otlp: {
    grpc: {
      enable: true,
      address: '0.0.0.0:4317',
    },
  },
  api: {
    http: {
      enable: true,
      address: '0.0.0.0:8080',
    },
  },
}

set environment variables for your S3 bucket and encryption key.

$ oteleport --config oteleport.jsonnet

and send signals to the oteleport. for example using otel-cli.

$ otel-cli exec --protocol grpc --endpoint http://localhost:4317/ --service my-service --otlp-headers Oteleport-Access-Key=$OTELEPORT_ACCESS_KEY --name "curl google" curl https://www.google.com

after get traces from the oteleport.

$ curl -X POST -H 'Content-Type: application/json' -H "Oteleport-Access-Key: $OTELEPORT_ACCESS_KEY" -d "{\"startTimeUnixNano\":$(date -v -5M +%s)000000000, \"limit\": 100}" http://localhost:8080/api/traces/fetch | jq
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
100  1582  100  1527  100    55  87970   3168 --:--:-- --:--:-- --:--:-- 93058
{
  "resourceSpans": [
    {
      "resource": {
        "attributes": [
          {
            "key": "service.name",
            "value": {
              "stringValue": "my-service"
            }
          }
        ]
      },
      "schemaUrl": "https://opentelemetry.io/schemas/1.17.0",
      "scopeSpans": [
        {
          "schemaUrl": "https://opentelemetry.io/schemas/1.17.0",
          "scope": {
            "name": "github.com/equinix-labs/otel-cli",
            "version": "0.4.5 0d4b8a9c49f60a6fc25ed22863259ff573332060 2024-04-01T20:56:07Z"
          },
          "spans": [
            {
              "attributes": [
                {
                  "key": "process.command",
                  "value": {
                    "stringValue": "curl"
                  }
                },
                {
                  "key": "process.command_args",
                  "value": {
                    "arrayValue": {
                      "values": [
                        {
                          "stringValue": "curl"
                        },
                        {
                          "stringValue": "https://www.google.com"
                        }
                      ]
                    }
                  }
                },
                {
                  "key": "process.owner",
                  "value": {
                    "stringValue": "ikeda-masashi"
                  }
                },
                {
                  "key": "process.pid",
                  "value": {
                    "intValue": "23539"
                  }
                },
                {
                  "key": "process.parent_pid",
                  "value": {
                    "intValue": "23540"
                  }
                }
              ],
              "endTimeUnixNano": "1729674137524531000",
              "kind": 3,
              "name": "curl google",
              "spanId": "EB1F5DC940D26FD8",
              "startTimeUnixNano": "1729674137341884000",
              "status": {},
              "traceId": "D69015A574B485137570DB01CC5B8D7D"
            },
            {
              "attributes": [
                {
                  "key": "process.command",
                  "value": {
                    "stringValue": "curl"
                  }
                },
                {
                  "key": "process.command_args",
                  "value": {
                    "arrayValue": {
                      "values": [
                        {
                          "stringValue": "curl"
                        },
                        {
                          "stringValue": "https://www.google.com"
                        }
                      ]
                    }
                  }
                },
                {
                  "key": "process.owner",
                  "value": {
                    "stringValue": "mashiike"
                  }
                },
                {
                  "key": "process.pid",
                  "value": {
                    "intValue": "00000"
                  }
                },
                {
                  "key": "process.parent_pid",
                  "value": {
                    "intValue": "00001"
                  }
                }
              ],
              "endTimeUnixNano": "1729677243239248000",
              "kind": 3,
              "name": "curl google",
              "spanId": "9F53FAE65642617D",
              "startTimeUnixNano": "1729677242838971000",
              "status": {},
              "traceId": "44E4B5F99D51AEACBF0AB243C01849A5"
            }
          ]
        }
      ]
    }
  ]
}

Usage as AWS Lambda function

oteleport can be used as an AWS Lambda function bootstrap. this Lambda function is triggered by Lambda Function URL. work as a http/otlp and rest api endpoint.

See _examples dir for more details. Include terraform code and lambroll configuration.

Storage Flatten Options

if you followoing config, oteleport save OpenTelemetry signals convert to flat structure and json lines. this option is useful for Amazon Ahena

local must_env = std.native('must_env');

{
  access_keys: [
    must_env('OTELEPORT_ACCESS_KEY'),
  ],
  storage: {
    cursor_encryption_key: must_env('OTELEPORT_CURSOR_ENCRYPTION_KEY'),
    location: 's3://' + must_env('OTELEPORT_S3_BUCKET') + '/',
    flatten: true, // <- add this option
  },
  otlp: {
    grpc: {
      enable: true,
      address: '0.0.0.0:4317',
    },
  },
  api: {
    http: {
      enable: true,
      address: '0.0.0.0:8080',
    },
  },
}
traces table schema
CREATE EXTERNAL TABLE IF NOT EXISTS oteleport_traces (
    traceId STRING,
    spanId STRING,
    parentSpanId STRING,
    name STRING,
    kind INT,
    startTimeUnixNano BIGINT,
    endTimeUnixNano BIGINT,
    traceState STRING,
    resourceAttributes ARRAY<STRUCT<key: STRING, value: STRUCT<
        stringValue: STRING,
        boolValue: BOOLEAN,
        intValue: BIGINT,
        doubleValue: DOUBLE,
        arrayValue: STRUCT<values: ARRAY<STRUCT<stringValue: STRING>>>,
        kvlistValue: STRUCT<values: ARRAY<STRUCT<key: STRING, value: STRUCT<stringValue: STRING>>>>
    >>>,
    droppedResourceAttributesCount INT,
    resourceSpanSchemaUrl STRING,
    scopeName STRING,
    scopeVersion STRING,
    scopeAttributes ARRAY<STRUCT<key: STRING, value: STRUCT<
        stringValue: STRING,
        boolValue: BOOLEAN,
        intValue: BIGINT,
        doubleValue: DOUBLE,
        arrayValue: STRUCT<values: ARRAY<STRUCT<stringValue: STRING>>>,
        kvlistValue: STRUCT<values: ARRAY<STRUCT<key: STRING, value: STRUCT<stringValue: STRING>>>>
    >>>,
    droppedScopeAttributesCount INT,
    scopeSpanSchemaUrl STRING,
    attributes ARRAY<STRUCT<key: STRING, value: STRUCT<
        stringValue: STRING,
        boolValue: BOOLEAN,
        intValue: BIGINT,
        doubleValue: DOUBLE,
        arrayValue: STRUCT<values: ARRAY<STRUCT<stringValue: STRING>>>,
        kvlistValue: STRUCT<values: ARRAY<STRUCT<key: STRING, value: STRUCT<stringValue: STRING>>>>
    >>>,
    droppedAttributesCount INT,
    events ARRAY<STRUCT<name: STRING, timeUnixNano: BIGINT, attributes: ARRAY<STRUCT<key: STRING, value: STRUCT<
        stringValue: STRING,
        boolValue: BOOLEAN,
        intValue: BIGINT,
        doubleValue: DOUBLE,
        arrayValue: STRUCT<values: ARRAY<STRUCT<stringValue: STRING>>>,
        kvlistValue: STRUCT<values: ARRAY<STRUCT<key: STRING, value: STRUCT<stringValue: STRING>>>>
    >>>>>,
    droppedEventsCount INT,
    links ARRAY<STRUCT<traceId: STRING, spanId: STRING, attributes: ARRAY<STRUCT<key: STRING, value: STRUCT<
        stringValue: STRING,
        boolValue: BOOLEAN,
        intValue: BIGINT,
        doubleValue: DOUBLE,
        arrayValue: STRUCT<values: ARRAY<STRUCT<stringValue: STRING>>>,
        kvlistValue: STRUCT<values: ARRAY<STRUCT<key: STRING, value: STRUCT<stringValue: STRING>>>>
    >>>>>,
    droppedLinksCount INT,
    status STRUCT<code: INT, message: STRING>,
    flags INT
)
PARTITIONED BY (
    partition STRING
)
ROW FORMAT SERDE 'org.openx.data.jsonserde.JsonSerDe'
WITH SERDEPROPERTIES (
    'ignore.malformed.json' = 'true'
)
LOCATION 's3://<your s3 bucket name>/traces/'
TBLPROPERTIES (
    'projection.enabled' = 'true',
    'projection.partition.type' = 'date',
    'projection.partition.format' = 'yyyy/MM/dd/HH',
    'projection.partition.range' = '2023/01/01/00,NOW',
    'projection.partition.interval' = '1',
    'projection.partition.interval.unit' = 'HOURS',
    'storage.location.template' = 's3://<your s3 bucket name>/traces/${partition}/'
);
metrics table schema
CREATE EXTERNAL TABLE IF NOT EXISTS oteleport_metrics (
    description STRING,
    name STRING,
    unit STRING,
    startTimeUnixNano BIGINT,
    timeUnixNano BIGINT,
    resourceAttributes ARRAY<STRUCT<key: STRING, value: STRUCT<
        stringValue: STRING,
        boolValue: BOOLEAN,
        intValue: BIGINT,
        doubleValue: DOUBLE,
        arrayValue: STRUCT<values: ARRAY<STRUCT<stringValue: STRING>>>,
        kvlistValue: STRUCT<values: ARRAY<STRUCT<key: STRING, value: STRUCT<stringValue: STRING>>>>
    >>>,
    droppedResourceAttributesCount INT,
    resourceMetricSchemaUrl STRING,
    scopeName STRING,
    scopeVersion STRING,
    scopeAttributes ARRAY<STRUCT<key: STRING, value: STRUCT<
        stringValue: STRING,
        boolValue: BOOLEAN,
        intValue: BIGINT,
        doubleValue: DOUBLE,
        arrayValue: STRUCT<values: ARRAY<STRUCT<stringValue: STRING>>>,
        kvlistValue: STRUCT<values: ARRAY<STRUCT<key: STRING, value: STRUCT<stringValue: STRING>>>>
    >>>,
    droppedScopeAttributesCount INT,
    scopeMetricSchemaUrl STRING,
    histogram STRUCT<
        dataPoint: STRUCT<
            attributes: ARRAY<STRUCT<key: STRING, value: STRUCT<
                stringValue: STRING,
                boolValue: BOOLEAN,
                intValue: BIGINT,
                doubleValue: DOUBLE,
                arrayValue: STRUCT<values: ARRAY<STRUCT<stringValue: STRING>>>,
                kvlistValue: STRUCT<values: ARRAY<STRUCT<key: STRING, value: STRUCT<stringValue: STRING>>>>
            >>>,
            startTimeUnixNano: BIGINT,
            timeUnixNano: BIGINT,
            count: BIGINT,
            sum: DOUBLE,
            bucketCounts: ARRAY<BIGINT>,
            explicitBounds: ARRAY<DOUBLE>,
            exemplars: ARRAY<STRUCT<value: DOUBLE, timestampUnixNano: BIGINT, filteredAttributes: ARRAY<STRUCT<key: STRING, value: STRUCT<stringValue: STRING>>>>>,
            flags: INT,
            min: DOUBLE,
            max: DOUBLE
        >,
        aggregationTemporality: INT
    >,
    exponentialHistogram STRUCT<
        dataPoint: STRUCT<
            attributes: ARRAY<STRUCT<key: STRING, value: STRUCT<
                stringValue: STRING,
                boolValue: BOOLEAN,
                intValue: BIGINT,
                doubleValue: DOUBLE,
                arrayValue: STRUCT<values: ARRAY<STRUCT<stringValue: STRING>>>,
                kvlistValue: STRUCT<values: ARRAY<STRUCT<key: STRING, value: STRUCT<stringValue: STRING>>>>
            >>>,
            startTimeUnixNano: BIGINT,
            timeUnixNano: BIGINT,
            count: BIGINT,
            sum: DOUBLE,
            scale: INT,
            zeroCount: BIGINT,
            positive: STRUCT<bucketCounts: ARRAY<BIGINT>, offset: INT>,
            negative: STRUCT<bucketCounts: ARRAY<BIGINT>, offset: INT>,
            flags: INT,
            exemplars: ARRAY<STRUCT<value: DOUBLE, timestampUnixNano: BIGINT, filteredAttributes: ARRAY<STRUCT<key: STRING, value: STRUCT<stringValue: STRING>>>>>,
            min: DOUBLE,
            max: DOUBLE,
            zeroThreshold: DOUBLE
        >,
        aggregationTemporality: INT
    >,
    summary STRUCT<
        dataPoint: STRUCT<
            attributes: ARRAY<STRUCT<key: STRING, value: STRUCT<
                stringValue: STRING,
                boolValue: BOOLEAN,
                intValue: BIGINT,
                doubleValue: DOUBLE,
                arrayValue: STRUCT<values: ARRAY<STRUCT<stringValue: STRING>>>,
                kvlistValue: STRUCT<values: ARRAY<STRUCT<key: STRING, value: STRUCT<stringValue: STRING>>>>
            >>>,
            startTimeUnixNano: BIGINT,
            timeUnixNano: BIGINT,
            count: BIGINT,
            sum: DOUBLE,
            quantileValues: ARRAY<STRUCT<quantile: DOUBLE, value: DOUBLE>>
        >
    >,
    gauge STRUCT<dataPoint: STRUCT<
        attributes: ARRAY<STRUCT<key: STRING, value: STRUCT<
            stringValue: STRING,
            boolValue: BOOLEAN,
            intValue: BIGINT,
            doubleValue: DOUBLE,
            arrayValue: STRUCT<values: ARRAY<STRUCT<stringValue: STRING>>>,
            kvlistValue: STRUCT<values: ARRAY<STRUCT<key: STRING, value: STRUCT<stringValue: STRING>>>>
        >>>,
        startTimeUnixNano: BIGINT,
        timeUnixNano: BIGINT,
        asDouble: DOUBLE, 
        asInt: BIGINT,
        exemplars: ARRAY<STRUCT<value: DOUBLE, timestampUnixNano: BIGINT, filteredAttributes: ARRAY<STRUCT<key: STRING, value: STRUCT<stringValue: STRING>>>>>,
        flags: INT
    >>,
    sum STRUCT<dataPoint: STRUCT<
        attributes: ARRAY<STRUCT<key: STRING, value: STRUCT<
            stringValue: STRING,
            boolValue: BOOLEAN,
            intValue: BIGINT,
            doubleValue: DOUBLE,
            arrayValue: STRUCT<values: ARRAY<STRUCT<stringValue: STRING>>>,
            kvlistValue: STRUCT<values: ARRAY<STRUCT<key: STRING, value: STRUCT<stringValue: STRING>>>>
        >>>,
        startTimeUnixNano: BIGINT,
        timeUnixNano: BIGINT,
        asDouble: DOUBLE, 
        asInt: BIGINT,
        exemplars: ARRAY<STRUCT<value: DOUBLE, timestampUnixNano: BIGINT, filteredAttributes: ARRAY<STRUCT<key: STRING, value: STRUCT<stringValue: STRING>>>>>,
        flags: INT
    >, aggregationTemporality: INT, isMonotonic: BOOLEAN>,
    metadata ARRAY<STRUCT<key: STRING, value: STRUCT<stringValue: STRING>>>
)
PARTITIONED BY (
    partition STRING
)
ROW FORMAT SERDE 'org.openx.data.jsonserde.JsonSerDe'
WITH SERDEPROPERTIES (
    'ignore.malformed.json' = 'true'
)
LOCATION 's3://<your s3 bucket name>/metrics/'
TBLPROPERTIES (
    'projection.enabled' = 'true',
    'projection.partition.type' = 'date',
    'projection.partition.format' = 'yyyy/MM/dd/HH',
    'projection.partition.range' = '2023/01/01/00,NOW',
    'projection.partition.interval' = '1',
    'projection.partition.interval.unit' = 'HOURS',
    'storage.location.template' = 's3://<your s3 bucket name>/metrics/${partition}/'
);
logs table schema
REATE EXTERNAL TABLE IF NOT EXISTS oteleport_logs (
    resourceAttributes ARRAY<STRUCT<key: STRING, value: STRUCT<
        stringValue: STRING,
        boolValue: BOOLEAN,
        intValue: BIGINT,
        doubleValue: DOUBLE,
        arrayValue: STRUCT<values: ARRAY<STRUCT<stringValue: STRING>>>,
        kvlistValue: STRUCT<values: ARRAY<STRUCT<key: STRING, value: STRUCT<stringValue: STRING>>>>
    >>>,
    droppedResourceAttributesCount INT,
    resourceLogSchemaUrl STRING,
    scopeName STRING,
    scopeVersion STRING,
    scopeAttributes ARRAY<STRUCT<key: STRING, value: STRUCT<
        stringValue: STRING,
        boolValue: BOOLEAN,
        intValue: BIGINT,
        doubleValue: DOUBLE,
        arrayValue: STRUCT<values: ARRAY<STRUCT<stringValue: STRING>>>,
        kvlistValue: STRUCT<values: ARRAY<STRUCT<key: STRING, value: STRUCT<stringValue: STRING>>>>
    >>>,
    droppedScopeAttributesCount INT,
    scopeLogSchemaUrl STRING,
    timeUnixNano BIGINT,
    severityNumber INT,
    severityText STRING,
    body STRUCT<stringValue: STRING>,
    attributes ARRAY<STRUCT<key: STRING, value: STRUCT<
        stringValue: STRING,
        boolValue: BOOLEAN,
        intValue: BIGINT,
        doubleValue: DOUBLE,
        arrayValue: STRUCT<values: ARRAY<STRUCT<stringValue: STRING>>>,
        kvlistValue: STRUCT<values: ARRAY<STRUCT<key: STRING, value: STRUCT<stringValue: STRING>>>>
    >>>,
    droppedAttributesCount INT,
    flags INT,
    traceId STRING,
    spanId STRING,
    observedTimeUnixNano BIGINT
)
PARTITIONED BY (
    partition STRING
)
ROW FORMAT SERDE 'org.openx.data.jsonserde.JsonSerDe'
WITH SERDEPROPERTIES (
    'ignore.malformed.json' = 'true'
)
LOCATION 's3://<your s3 bucket name>/logs/'
TBLPROPERTIES (
    'projection.enabled' = 'true',
    'projection.partition.type' = 'date',
    'projection.partition.format' = 'yyyy/MM/dd/HH',
    'projection.partition.range' = '2023/01/01/00,NOW',
    'projection.partition.interval' = '1',
    'projection.partition.interval.unit' = 'HOURS',
    'storage.location.template' = 's3://<your s3 bucket name>/logs/${partition}/'
);

example query:

select 
    cast(from_unixtime(traces.startTimeUnixNano/1000000000) as date) as ymd,
    rs.value.stringValue as service_name,
    count(distinct traceId) as trace_count,
    count(distinct spanId) as span_count
from oteleport_traces as traces
cross join unnest(traces.resourceAttributes) with ordinality as t(rs, rs_index)
where traces.partition like '2024/11/%' and rs.key = 'service.name'
group by 1,2 

License

This project is licensed under the MIT License. See the LICENSE file for more details.

Documentation

Index

Constants

This section is empty.

Variables

View Source
var EnvNativeFunction = &jsonnet.NativeFunction{
	Name:   "env",
	Params: []ast.Identifier{"name", "default"},
	Func: func(args []interface{}) (interface{}, error) {
		if len(args) != 2 {
			return nil, oops.Errorf("env: invalid arguments length expected 2 got %d", len(args))
		}
		key, ok := args[0].(string)
		if !ok {
			return nil, oops.Errorf("env: invalid 1st arguments, expected string got %T", args[0])
		}
		val := os.Getenv(key)
		if val == "" {
			return args[1], nil
		}
		return val, nil
	},
}
View Source
var JsonescapeNativeFunction = &jsonnet.NativeFunction{
	Name:   "json_escape",
	Params: []ast.Identifier{"str"},
	Func: func(args []interface{}) (interface{}, error) {
		if len(args) != 1 {
			return nil, oops.Errorf("jsonescape: invalid arguments length expected 1 got %d", len(args))
		}
		str, ok := args[0].(string)
		if !ok {
			return nil, oops.Errorf("jsonescape: invalid arguments, expected string got %T", args[0])
		}
		bs, err := json.Marshal(str)
		if err != nil {
			return nil, oops.Wrapf(err, "jsonescape")
		}
		return string(bs), nil
	},
}
View Source
var MastEnvNativeFunction = &jsonnet.NativeFunction{
	Name:   "must_env",
	Params: []ast.Identifier{"name"},
	Func: func(args []interface{}) (interface{}, error) {
		if len(args) != 1 {
			return nil, oops.Errorf("must_env: invalid arguments length expected 1 got %d", len(args))
		}
		key, ok := args[0].(string)
		if !ok {
			return nil, oops.Errorf("must_env: invalid arguments, expected string got %T", args[0])
		}
		val, ok := os.LookupEnv(key)
		if !ok {
			return nil, oops.Errorf("must_env: %s not set", key)
		}
		return val, nil
	},
}
View Source
var NativeFunctions = []*jsonnet.NativeFunction{
	MastEnvNativeFunction,
	EnvNativeFunction,
	JsonescapeNativeFunction,
}
View Source
var Version = "v0.2.3"

Functions

func ClientCLI added in v0.2.0

func ClientCLI(ctx context.Context, parse ClientCLIParseFunc) (int, error)

func Coalasce

func Coalasce[T any](v ...*T) *T

func MakeVM

func MakeVM(ctx context.Context) (*jsonnet.VM, error)

func Pointer

func Pointer[T any](v T) *T

func RandomString

func RandomString(n int) string

func ServerCLI

func ServerCLI(ctx context.Context, parse ServerCLIParseFunc) (int, error)

Types

type APIConfig

type APIConfig struct {
	Enable *bool         `json:"enable,omitempty"`
	HTTP   APIHTTPConfig `json:"http"`
	GRPC   APIGRPCConfig `json:"grpc"`
}

API configuration

func (*APIConfig) Validate

func (c *APIConfig) Validate() error

type APIGRPCConfig

type APIGRPCConfig struct {
	Enable   *bool        `json:"enable,omitempty"`
	Address  string       `json:"address"`
	Listener net.Listener `json:"-"`
}

type APIHTTPConfig

type APIHTTPConfig struct {
	Enable   *bool        `json:"enable,omitempty"`
	Prefix   string       `json:"prefix"`
	Address  string       `json:"address"`
	Listener net.Listener `json:"-"`
}

API HTTP configuration

func (*APIHTTPConfig) Validate

func (c *APIHTTPConfig) Validate(parent *APIConfig) error

type AccessKeyConfig

type AccessKeyConfig struct {
	KeyID     string `json:"key_id"`
	SecretKey string `json:"secret_key"`
}

func (*AccessKeyConfig) UnmarshalJSON

func (c *AccessKeyConfig) UnmarshalJSON(data []byte) error

type ClientApp added in v0.2.0

type ClientApp struct {
	// contains filtered or unexported fields
}

func NewClientApp added in v0.2.0

func NewClientApp(p *Profile) (*ClientApp, error)

func (*ClientApp) FetchLogsData added in v0.2.0

func (a *ClientApp) FetchLogsData(ctx context.Context, opts *ClientLogsCommandOptions) error

func (*ClientApp) FetchMetricsData added in v0.2.0

func (a *ClientApp) FetchMetricsData(ctx context.Context, opts *ClientMetricsCommandOptions) error

func (*ClientApp) FetchTracesData added in v0.2.0

func (a *ClientApp) FetchTracesData(ctx context.Context, opts *ClientTracesCommandOptions) error

type ClientCLIOptions added in v0.2.0

type ClientCLIOptions struct {
	LogLevel string `help:"log level (debug, info, warn, error)" default:"info" enum:"debug,info,warn,error" env:"OTELPORT_LOG_LEVEL"`
	Color    *bool  `help:"enable colored output" env:"OTELPORT_COLOR"`

	ProfilePath string            `help:"oteleport client profile" default:"" env:"OTELPORT_PROFILE"`
	ExtStr      map[string]string `help:"external string values for Jsonnet" env:"OTELEPORT_EXTSTR"`
	ExtCode     map[string]string `help:"external code values for Jsonnet" env:"OTELEPORT_EXTCODE"`

	Endpoint        string `help:"oteleport server endpoint" default:"http://localhost:8080" env:"OTELPORT_ENDPOINT"`
	AccessKey       string `help:"oteleport server access key" env:"OTELPORT_ACCESS_KEY"`
	AccessKeyHeader string `help:"oteleport server access key header" default:"Oteleport-Access-Key" env:"OTELEPORT_ACCESS_KEY_HEADER"`
	ClientSignalOutputOptions

	Version struct{}                    `cmd:"version" help:"show version"`
	Traces  ClientTracesCommandOptions  `cmd:"traces" help:"traces subcommand"`
	Metrics ClientMetricsCommandOptions `cmd:"metrics" help:"metrics subcommand"`
	Logs    ClientLogsCommandOptions    `cmd:"logs" help:"logs subcommand"`
}

func ParseClientCLI added in v0.2.0

func ParseClientCLI(args []string) (string, *ClientCLIOptions, func(), error)

type ClientCLIParseFunc added in v0.2.0

type ClientCLIParseFunc func([]string) (string, *ClientCLIOptions, func(), error)

type ClientLogsCommandOptions added in v0.2.0

type ClientLogsCommandOptions struct {
	ClientTimeRangeOptions
}

type ClientMetricsCommandOptions added in v0.2.0

type ClientMetricsCommandOptions struct {
	ClientTimeRangeOptions
}

type ClientSignalOutputOptions added in v0.2.0

type ClientSignalOutputOptions struct {
	OtelExporterOTLPEndpoint        string `` /* 168-byte string literal not displayed */
	OtelExporterOTLPTracesEndpoint  string `` /* 153-byte string literal not displayed */
	OtelExporterOTLPMetricsEndpoint string `` /* 156-byte string literal not displayed */
	OtelExporterOTLPLogsEndpoint    string `` /* 147-byte string literal not displayed */

	OtelExporterOTLPProtocol        string `` /* 153-byte string literal not displayed */
	OtelExporterOTLPTracesProtocol  string `` /* 174-byte string literal not displayed */
	OtelExporterOTLPMetricsProtocol string `` /* 177-byte string literal not displayed */
	OtelExporterOTLPLogsProtocol    string `` /* 168-byte string literal not displayed */

	OtelExporterOTLPHeaders        map[string]string `help:"exporter headers" env:"OTEL_EXPORTER_OTLP_HEADERS" group:"OpenTelemetry Exporter Parameters" json:"otlp_headers"`
	OtelExporterOTLPTracesHeaders  map[string]string `` /* 139-byte string literal not displayed */
	OtelExporterOTLPMetricsHeaders map[string]string `` /* 142-byte string literal not displayed */
	OtelExporterOTLPLogsHeaders    map[string]string `` /* 133-byte string literal not displayed */

	OtelExporterOTLPCompression        string `` /* 162-byte string literal not displayed */
	OtelExporterOTLPTracesCompression  string `` /* 183-byte string literal not displayed */
	OtelExporterOTLPMetricsCompression string `` /* 186-byte string literal not displayed */
	OtelExporterOTLPLogsCompression    string `` /* 177-byte string literal not displayed */

	OtelExporterOTLPTimeout        time.Duration `` /* 132-byte string literal not displayed */
	OtelExporterOTLPTracesTimeout  time.Duration `` /* 150-byte string literal not displayed */
	OtelExporterOTLPMetricsTimeout time.Duration `` /* 153-byte string literal not displayed */
	OtelExporterOTLPLogsTimeout    time.Duration `` /* 144-byte string literal not displayed */
}

func (ClientSignalOutputOptions) OTLPClientOptions added in v0.2.0

func (o ClientSignalOutputOptions) OTLPClientOptions() []otlp.ClientOption

type ClientTimeRangeOptions added in v0.2.0

type ClientTimeRangeOptions struct {
	StartTime *time.Time `help:"return Otel Signals newer than this time. RFC3339 format" env:"OTELPORT_START_TIME" format:"2006-01-02T15:04:05Z"`
	EndTime   *time.Time `help:"return Otel Signals older than this time. RFC3339 format" env:"OTELPORT_END_TIME" format:"2006-01-02T15:04:05Z"`
	Since     string     `help:"return Otel Signals newer than a relative duration. like 52, 2m, or 3h (default: 5m)" env:"OTELPORT_SINCE" default:"5m"`
	Until     string     `help:"return Otel Signals older than a relative duration. like 52, 2m, or 3h" env:"OTELPORT_UNTIL"`
}

func (*ClientTimeRangeOptions) TimeRangeUnixNano added in v0.2.0

func (o *ClientTimeRangeOptions) TimeRangeUnixNano() (int64, int64)

type ClientTracesCommandOptions added in v0.2.0

type ClientTracesCommandOptions struct {
	ClientTimeRangeOptions
}

type LoadOptions

type LoadOptions struct {
	ExtVars  map[string]string
	ExtCodes map[string]string
}

type OTLPConfig

type OTLPConfig struct {
	Enable *bool          `json:"enable,omitempty"`
	GRPC   OTLPGRPCConfig `json:"grpc"`
	HTTP   OTLPHTTPConfig `json:"http"`
}

OTLP gRPC and HTTP configuration

func (*OTLPConfig) Validate

func (c *OTLPConfig) Validate() error

type OTLPGRPCConfig

type OTLPGRPCConfig struct {
	Enable   *bool        `json:"enable,omitempty"`
	Address  string       `json:"address"`
	Listener net.Listener `json:"-"`
}

gRPC configuration

func (*OTLPGRPCConfig) Validate

func (c *OTLPGRPCConfig) Validate(parent *OTLPConfig) error

type OTLPHTTPConfig

type OTLPHTTPConfig struct {
	Enable   *bool        `json:"enable,omitempty"`
	Prefix   string       `json:"prefix"`
	Address  string       `json:"address"`
	Listener net.Listener `json:"-"`
}

HTTP configuration

func (*OTLPHTTPConfig) Validate

func (c *OTLPHTTPConfig) Validate(parent *OTLPConfig) error

type Profile added in v0.2.0

type Profile struct {
	*client.Profile
	Output ClientSignalOutputOptions `json:"output"`
}

func (*Profile) Load added in v0.2.0

func (p *Profile) Load(path string, opts *LoadOptions) error

func (*Profile) Validate added in v0.2.0

func (p *Profile) Validate() error

type S3SignalRepository

type S3SignalRepository struct {
	// contains filtered or unexported fields
}

func (*S3SignalRepository) FetchLogsData

func (*S3SignalRepository) FetchTracesData

func (*S3SignalRepository) PushLogsData

func (r *S3SignalRepository) PushLogsData(ctx context.Context, data *logspb.LogsData) error

func (*S3SignalRepository) PushMetricsData

func (r *S3SignalRepository) PushMetricsData(ctx context.Context, data *metricspb.MetricsData) error

func (*S3SignalRepository) PushTracesData

func (r *S3SignalRepository) PushTracesData(ctx context.Context, data *tracepb.TracesData) error

type Server

type Server struct {
	TermHandler func()
	// contains filtered or unexported fields
}

func NewServer

func NewServer(cfg *ServerConfig) (*Server, error)

func (*Server) Run

func (s *Server) Run(ctx context.Context) error

type ServerCLIOptions

type ServerCLIOptions struct {
	ConfigPath string            `name:"config" help:"config file path" default:"oteleport.jsonnet" env:"OTELPORT_CONFIG"`
	ExtStr     map[string]string `help:"external string values for Jsonnet" env:"OTELEPORT_EXTSTR"`
	ExtCode    map[string]string `help:"external code values for Jsonnet" env:"OTELEPORT_EXTCODE"`

	LogLevel string `help:"log level (debug, info, warn, error)" default:"info" enum:"debug,info,warn,error" env:"OTELPORT_LOG_LEVEL"`
	Color    *bool  `help:"enable colored output" env:"OTELPORT_COLOR" negatable:""`

	Serve   struct{} `cmd:"" help:"start oteleport server" default:"1"`
	Version struct{} `cmd:"version" help:"show version"`
}

func ParseServerCLI

func ParseServerCLI(args []string) (string, *ServerCLIOptions, func(), error)

type ServerCLIParseFunc

type ServerCLIParseFunc func([]string) (string, *ServerCLIOptions, func(), error)

type ServerConfig

type ServerConfig struct {
	AccessKeyHeader string             `json:"access_key_header"`
	AccessKeys      []*AccessKeyConfig `json:"access_keys"`
	Storage         StorageConfig      `json:"storage"`
	OTLP            OTLPConfig         `json:"otlp"`
	API             APIConfig          `json:"api"`
}

Overall Configuration structure

func DefaultServerConfig

func DefaultServerConfig() *ServerConfig

func (*ServerConfig) EnableAuth

func (c *ServerConfig) EnableAuth() bool

func (*ServerConfig) Load

func (c *ServerConfig) Load(path string, opts *LoadOptions) error

func (*ServerConfig) Validate

func (c *ServerConfig) Validate() error

Validate function to check the configuration for validity

type SignalRepository

type SignalRepository interface {
	PushTracesData(ctx context.Context, data *tracepb.TracesData) error
	PushMetricsData(ctx context.Context, data *metricspb.MetricsData) error
	PushLogsData(ctx context.Context, data *logspb.LogsData) error
	FetchTracesData(ctx context.Context, input *oteleportpb.FetchTracesDataRequest) (*oteleportpb.FetchTracesDataResponse, error)
	FetchMetricsData(ctx context.Context, input *oteleportpb.FetchMetricsDataRequest) (*oteleportpb.FetchMetricsDataResponse, error)
	FetchLogsData(ctx context.Context, input *oteleportpb.FetchLogsDataRequest) (*oteleportpb.FetchLogsDataResponse, error)
}

func NewSignalRepository

func NewSignalRepository(cfg *StorageConfig) (SignalRepository, error)

type StorageAWSConfig

type StorageAWSConfig struct {
	Region         string                       `json:"region"`
	Endpoint       string                       `json:"endpoint"`
	UseS3PathStyle bool                         `json:"use_s3_path_style"`
	Credentials    *StorageAWSCredentialsConfig `json:"credentials"`
	// contains filtered or unexported fields
}

func (*StorageAWSConfig) Validate

func (c *StorageAWSConfig) Validate() error

type StorageAWSCredentialsConfig

type StorageAWSCredentialsConfig struct {
	AccessKeyID     string `json:"access_key_id"`
	SecretAccessKey string `json:"secret_access_key"`
	SessionToken    string `json:"session_token"`
}

type StorageConfig

type StorageConfig struct {
	CursorEncryptionKey []byte `json:"cursor_encryption_key"`
	GZip                *bool  `json:"gzip,omitempty"`
	Flatten             *bool  `json:"flatten,omitempty"`
	Location            string `json:"location"`

	AWS StorageAWSConfig `json:"aws,omitempty"`
	// contains filtered or unexported fields
}

func (*StorageConfig) Validate

func (c *StorageConfig) Validate(parent *ServerConfig) error

Directories

Path Synopsis
cmd
pkg

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL