Documentation
¶
Index ¶
- Constants
- Variables
- type CacheConfig
- type CacheProvider
- type Config
- type DebuginfodClient
- type DebuginfodClientObjectStorageCache
- type Fetcher
- type FilesystemCacheConfig
- type GrpcDebuginfoUploadServiceClient
- type GrpcUploadClient
- type HTTPDebuginfodClient
- type MetadataManager
- type NopDebuginfodClient
- type ObjectStoreMetadata
- func (m *ObjectStoreMetadata) Fetch(ctx context.Context, buildID string) (*debuginfopb.Debuginfo, error)
- func (m *ObjectStoreMetadata) MarkAsDebuginfodSource(ctx context.Context, buildID string) error
- func (m *ObjectStoreMetadata) MarkAsUploaded(ctx context.Context, buildID, uploadID string, ...) error
- func (m *ObjectStoreMetadata) MarkAsUploading(ctx context.Context, buildID, uploadID, hash string, ...) error
- func (m *ObjectStoreMetadata) SetQuality(ctx context.Context, buildID string, quality *debuginfopb.DebuginfoQuality) error
- type SignedUpload
- type Store
- func (s *Store) InitiateUpload(ctx context.Context, req *debuginfopb.InitiateUploadRequest) (*debuginfopb.InitiateUploadResponse, error)
- func (s *Store) MarkUploadFinished(ctx context.Context, req *debuginfopb.MarkUploadFinishedRequest) (*debuginfopb.MarkUploadFinishedResponse, error)
- func (s *Store) ShouldInitiateUpload(ctx context.Context, req *debuginfopb.ShouldInitiateUploadRequest) (*debuginfopb.ShouldInitiateUploadResponse, error)
- func (s *Store) Upload(stream debuginfopb.DebuginfoService_UploadServer) error
- type UploadReader
Constants ¶
const ( // ChunkSize 8MB is the size of the chunks in which debuginfo files are // uploaded and downloaded. AWS S3 has a minimum of 5MB for multi-part uploads // and a maximum of 15MB, and a default of 8MB. ChunkSize = 1024 * 1024 * 8 // MaxMsgSize is the maximum message size the server can receive or send. By default, it is 64MB. MaxMsgSize = 1024 * 1024 * 64 )
const ( ReasonDebuginfoInDebuginfod = "Debuginfo exists in debuginfod, therefore no upload is necessary." ReasonFirstTimeSeen = "First time we see this Build ID, and it does not exist in debuginfod, therefore please upload!" ReasonUploadStale = "A previous upload was started but not finished and is now stale, so it can be retried." ReasonUploadInProgress = "A previous upload is still in-progress and not stale yet (only stale uploads can be retried)." ReasonDebuginfoAlreadyExists = "Debuginfo already exists and is not marked as invalid, therefore no new upload is needed." ReasonDebuginfoInvalid = "" /* 128-byte string literal not displayed */ ReasonDebuginfoEqual = "" /* 202-byte string literal not displayed */ ReasonDebuginfoNotEqual = "Debuginfo already exists but is marked as invalid, therefore a new upload will be accepted." ReasonDebuginfodSource = "Debuginfo is available from debuginfod already and not marked as invalid, therefore no new upload is needed." ReasonDebuginfodInvalid = "Debuginfo is available from debuginfod already but is marked as invalid, therefore a new upload is needed." )
Variables ¶
var ( ErrUnknownDebuginfoSource = errors.New("unknown debuginfo source") ErrNotUploadedYet = errors.New("debuginfo not uploaded yet") )
var ( ErrMetadataShouldExist = errors.New("debuginfo metadata should exist") ErrMetadataUnexpectedState = errors.New("debuginfo metadata state is unexpected") ErrMetadataNotFound = errors.New("debuginfo metadata not found") ErrUploadMetadataNotFound = errors.New("debuginfo upload metadata not found") ErrUploadIDMismatch = errors.New("debuginfo upload id mismatch") )
var ErrDebuginfoAlreadyExists = errors.New("debug info already exists")
var ErrDebuginfoNotFound = errors.New("debuginfo not found")
Functions ¶
This section is empty.
Types ¶
type CacheConfig ¶
type CacheConfig struct { Type CacheProvider `yaml:"type"` Config interface{} `yaml:"config"` }
type Config ¶
type Config struct { Bucket *client.BucketConfig `yaml:"bucket"` Cache *CacheConfig `yaml:"cache"` }
type DebuginfodClient ¶ added in v0.15.0
type DebuginfodClient interface { Get(ctx context.Context, buildid string) (io.ReadCloser, error) Exists(ctx context.Context, buildid string) (bool, error) }
func NewDebuginfodClientWithObjectStorageCache ¶ added in v0.15.0
func NewDebuginfodClientWithObjectStorageCache(logger log.Logger, bucket objstore.Bucket, h DebuginfodClient) (DebuginfodClient, error)
NewDebuginfodClientWithObjectStorageCache creates a new DebuginfodClient that caches the debug information in the object storage.
type DebuginfodClientObjectStorageCache ¶ added in v0.15.0
type DebuginfodClientObjectStorageCache struct {
// contains filtered or unexported fields
}
func (*DebuginfodClientObjectStorageCache) Exists ¶ added in v0.15.0
func (c *DebuginfodClientObjectStorageCache) Exists(ctx context.Context, buildID string) (bool, error)
Exists returns true if debuginfo for given buildid exists.
func (*DebuginfodClientObjectStorageCache) Get ¶ added in v0.15.0
func (c *DebuginfodClientObjectStorageCache) Get(ctx context.Context, buildID string) (io.ReadCloser, error)
Get returns debuginfo for given buildid while caching it in object storage.
type Fetcher ¶ added in v0.15.0
type Fetcher struct {
// contains filtered or unexported fields
}
func NewFetcher ¶ added in v0.15.0
func NewFetcher( debuginfodClient DebuginfodClient, bucket objstore.Bucket, ) *Fetcher
func (*Fetcher) FetchDebuginfo ¶ added in v0.15.0
func (f *Fetcher) FetchDebuginfo(ctx context.Context, dbginfo *debuginfopb.Debuginfo) (io.ReadCloser, error)
type FilesystemCacheConfig ¶
type FilesystemCacheConfig struct {
Directory string `yaml:"directory"`
}
type GrpcDebuginfoUploadServiceClient ¶ added in v0.15.0
type GrpcDebuginfoUploadServiceClient interface {
Upload(ctx context.Context, opts ...grpc.CallOption) (debuginfopb.DebuginfoService_UploadClient, error)
}
type GrpcUploadClient ¶ added in v0.15.0
type GrpcUploadClient struct {
GrpcDebuginfoUploadServiceClient
}
func NewGrpcUploadClient ¶ added in v0.15.0
func NewGrpcUploadClient(client GrpcDebuginfoUploadServiceClient) *GrpcUploadClient
func (*GrpcUploadClient) Upload ¶ added in v0.15.0
func (c *GrpcUploadClient) Upload(ctx context.Context, uploadInstructions *debuginfopb.UploadInstructions, r io.Reader) (uint64, error)
type HTTPDebuginfodClient ¶ added in v0.15.0
type HTTPDebuginfodClient struct {
// contains filtered or unexported fields
}
func NewHTTPDebuginfodClient ¶ added in v0.15.0
func NewHTTPDebuginfodClient(logger log.Logger, serverURLs []string, timeoutDuration time.Duration) (*HTTPDebuginfodClient, error)
NewHTTPDebuginfodClient returns a new HTTP debug info client.
func (*HTTPDebuginfodClient) Get ¶ added in v0.15.0
func (c *HTTPDebuginfodClient) Get(ctx context.Context, buildID string) (io.ReadCloser, error)
Get returns debug information file for given buildID by downloading it from upstream servers.
type MetadataManager ¶ added in v0.12.0
type MetadataManager interface { MarkAsDebuginfodSource(ctx context.Context, buildID string) error MarkAsUploading(ctx context.Context, buildID, uploadID, hash string, startedAt *timestamppb.Timestamp) error MarkAsUploaded(ctx context.Context, buildID, uploadID string, finishedAt *timestamppb.Timestamp) error Fetch(ctx context.Context, buildID string) (*debuginfopb.Debuginfo, error) }
type NopDebuginfodClient ¶ added in v0.15.0
type NopDebuginfodClient struct{}
func (NopDebuginfodClient) Get ¶ added in v0.15.0
func (NopDebuginfodClient) Get(context.Context, string) (io.ReadCloser, error)
type ObjectStoreMetadata ¶ added in v0.12.0
type ObjectStoreMetadata struct {
// contains filtered or unexported fields
}
func NewObjectStoreMetadata ¶ added in v0.12.0
func NewObjectStoreMetadata(logger log.Logger, bucket objstore.Bucket) *ObjectStoreMetadata
func (*ObjectStoreMetadata) Fetch ¶ added in v0.12.0
func (m *ObjectStoreMetadata) Fetch(ctx context.Context, buildID string) (*debuginfopb.Debuginfo, error)
func (*ObjectStoreMetadata) MarkAsDebuginfodSource ¶ added in v0.15.0
func (m *ObjectStoreMetadata) MarkAsDebuginfodSource(ctx context.Context, buildID string) error
func (*ObjectStoreMetadata) MarkAsUploaded ¶ added in v0.12.0
func (m *ObjectStoreMetadata) MarkAsUploaded(ctx context.Context, buildID, uploadID string, finishedAt *timestamppb.Timestamp) error
func (*ObjectStoreMetadata) MarkAsUploading ¶ added in v0.12.0
func (m *ObjectStoreMetadata) MarkAsUploading(ctx context.Context, buildID, uploadID, hash string, startedAt *timestamppb.Timestamp) error
func (*ObjectStoreMetadata) SetQuality ¶ added in v0.15.0
func (m *ObjectStoreMetadata) SetQuality(ctx context.Context, buildID string, quality *debuginfopb.DebuginfoQuality) error
type SignedUpload ¶ added in v0.15.0
type SignedUpload struct { Enabled bool Client signedupload.Client }
type Store ¶
type Store struct { debuginfopb.UnimplementedDebuginfoServiceServer // contains filtered or unexported fields }
func NewStore ¶
func NewStore( tracer trace.Tracer, logger log.Logger, metadata MetadataManager, bucket objstore.Bucket, debuginfodClient DebuginfodClient, signedUpload SignedUpload, maxUploadDuration time.Duration, maxUploadSize int64, ) (*Store, error)
NewStore returns a new debug info store.
func (*Store) InitiateUpload ¶ added in v0.15.0
func (s *Store) InitiateUpload(ctx context.Context, req *debuginfopb.InitiateUploadRequest) (*debuginfopb.InitiateUploadResponse, error)
func (*Store) MarkUploadFinished ¶ added in v0.15.0
func (s *Store) MarkUploadFinished(ctx context.Context, req *debuginfopb.MarkUploadFinishedRequest) (*debuginfopb.MarkUploadFinishedResponse, error)
func (*Store) ShouldInitiateUpload ¶ added in v0.15.0
func (s *Store) ShouldInitiateUpload(ctx context.Context, req *debuginfopb.ShouldInitiateUploadRequest) (*debuginfopb.ShouldInitiateUploadResponse, error)
ShouldInitiateUpload returns whether an upload should be initiated for the given build ID. Checking if an upload should even be initiated allows the parca-agent to avoid extracting debuginfos unnecessarily from a binary.
func (*Store) Upload ¶
func (s *Store) Upload(stream debuginfopb.DebuginfoService_UploadServer) error
type UploadReader ¶
type UploadReader struct {
// contains filtered or unexported fields
}