Documentation ¶
Index ¶
- Constants
- Variables
- func BuildFullObjectStoragePath(sink, dataset, table, jobID string) string
- func BuildObjectStoragePathPattern(dataset, table, jobID string) string
- func BuildStoragePath(dataset, table string) string
- type Backup
- type BackupFilter
- type BackupOptions
- type BackupRepository
- type BackupStatus
- type BackupType
- type BigQueryOptions
- type CloudStorageOptions
- type DefaultSourceMetadataJobRepository
- type EntityAudit
- type ExtractJobID
- type ForeignJobID
- type Group
- type InvalidBackupType
- type Job
- type JobPage
- type JobPatch
- type JobRepository
- type JobStatistics
- type JobStatus
- type MirrorOptions
- type MirrorRevision
- type Operation
- type Region
- type SinkOptions
- type SnapshotOptions
- type SourceMetadata
- type SourceMetadataJob
- type SourceMetadataJobRepository
- type SourceMetadataRepository
- type SourceTrashcan
- type SourceTrashcanRepository
- type StorageClass
- type Strategy
- type TransferJobID
- type UpdateFields
- type User
- type UserGroup
Constants ¶
const AllJobs = -101
AllJobs will fetch all jobs
Variables ¶
var BackupTypes = []BackupType{BigQuery, CloudStorage}
BackupTypes source for a backup
var JobStatutses = []JobStatus{NotScheduled, Scheduled, Error, Pending, FinishedOk, FinishedError, FinishedQuotaError, JobDeleted}
JobStatutses available job statuses
var Regions = []Region{EuropeWest1, EuropeWest3, EuropeWest4}
Regions available regions classes for a sink
var StorageClasses = []StorageClass{Regional, Nearline, Coldline}
StorageClasses available storage classes for a sink
var Strategies = []Strategy{Snapshot, Mirror}
Strategies for a backups
Functions ¶
func BuildFullObjectStoragePath ¶
BuildFullObjectStoragePath create a sink's path for a GCS data
func BuildObjectStoragePathPattern ¶
BuildObjectStoragePathPattern create a sink's path for a BigQuery data
func BuildStoragePath ¶
BuildStoragePath create a path for BigQuery dataset/table
Types ¶
type Backup ¶
type Backup struct { ID string `pg:"id,pk"` Status BackupStatus `pg:"status"` Type BackupType `pg:"type"` Strategy Strategy SourceProject string `pg:"project"` LastScheduledTime time.Time `pg:"last_scheduled_timestamp"` LastCleanupTime time.Time `pg:"last_cleanup_timestamp"` SinkOptions SnapshotOptions BackupOptions EntityAudit MirrorOptions // contains filtered or unexported fields }
Backup core entity
func (Backup) GetTrashcanPath ¶
GetTrashcanPath give a patho to object moved into trashcan
type BackupFilter ¶
type BackupFilter struct {
Project string
}
TODO include defer .close() for .conn() in each method BackupFilter possible filtering for backups in Query
type BackupOptions ¶
type BackupOptions struct { BigQueryOptions CloudStorageOptions }
BackupOptions backup options for specific technology
type BackupRepository ¶
type BackupRepository interface { AddBackup(context.Context, *Backup) (*Backup, error) GetBackup(ctxIn context.Context, backupID string) (*Backup, error) GetBackups(context.Context, BackupFilter) ([]*Backup, error) MarkStatus(ctxIn context.Context, backupId string, status BackupStatus) error MarkDeleted(context.Context, string) error UpdateBackup(ctxIn context.Context, updateFields UpdateFields) error UpdateLastScheduledTime(ctxIn context.Context, backupID string, lastScheduledTime time.Time, status BackupStatus) error UpdateLastCleanupTime(ctxIn context.Context, backupID string, lastCleanupTime time.Time) error GetByBackupStatus(ctxIn context.Context, status BackupStatus) ([]*Backup, error) GetByBackupStrategy(ctxIn context.Context, strategy Strategy) ([]*Backup, error) GetExpired(context.Context, BackupType) ([]*Backup, error) GetExpiredBigQueryMirrorRevisions(ctxIn context.Context, maxRevisionLifetimeInWeeks int) ([]*MirrorRevision, error) GetBigQueryOneShotSnapshots(ctxIn context.Context, status BackupStatus) ([]*Backup, error) GetScheduledBackups(context.Context, BackupType) ([]*Backup, error) }
BackupRepository defines operations for a Backup
func NewBackupRepository ¶
func NewBackupRepository(ctxIn context.Context, credentialsProvider secret.SecretProvider) (BackupRepository, error)
NewBackupRepository return instance of BackupRepository
type BackupStatus ¶
type BackupStatus string
BackupStatus for backup
const ( // NotStarted for a newly created backup NotStarted BackupStatus = "NotStarted" // Prepared backup had jobs prepared Prepared BackupStatus = "Prepared" // Finished backup was successful (for requring backup it will stay in that state unless error apreas) Finished BackupStatus = "Finished" // Paused backup will not schedule new jobs Paused BackupStatus = "Paused" // ToDelete was marked to deletion ToDelete BackupStatus = "ToDelete" // BackupDeleted was deleted BackupDeleted BackupStatus = "BackupDeleted" )
func (BackupStatus) EqualTo ¶
func (bs BackupStatus) EqualTo(status string) bool
EqualTo compare string with the Operation type
func (BackupStatus) String ¶
func (bs BackupStatus) String() string
type BackupType ¶
type BackupType string
BackupType for a backup
const ( // BigQuery type BigQuery BackupType = "BigQuery" // CloudStorage type CloudStorage BackupType = "CloudStorage" )
func (BackupType) EqualTo ¶
func (s BackupType) EqualTo(backupType string) bool
EqualTo compare string with the BackupType type
func (BackupType) String ¶
func (s BackupType) String() string
type BigQueryOptions ¶
type BigQueryOptions struct { Dataset string `pg:"bigquery_dataset"` Table []string `pg:"bigquery_table"` ExcludedTables []string `pg:"bigquery_excluded_tables"` }
BigQueryOptions for a BigQuery backup
type CloudStorageOptions ¶
type CloudStorageOptions struct { Bucket string `pg:"cloudstorage_bucket"` IncludePath []string `pg:"cloudstorage_include_path"` ExcludePath []string `pg:"cloudstorage_exclude_path"` }
CloudStorageOptions for a GCS backup
type DefaultSourceMetadataJobRepository ¶
type DefaultSourceMetadataJobRepository struct {
// contains filtered or unexported fields
}
DefaultSourceMetadataJobRepository implements instance of SourceMetadataJobRepository
type EntityAudit ¶
type EntityAudit struct { CreatedTimestamp time.Time `pg:"audit_created_timestamp"` UpdatedTimestamp time.Time `pg:"audit_updated_timestamp"` DeletedTimestamp time.Time `pg:"audit_deleted_timestamp"` }
EntityAudit defines changes that happened to a given entity
type ExtractJobID ¶
type ExtractJobID string
ExtractJobID for GCS technology
func (ExtractJobID) String ¶
func (j ExtractJobID) String() string
type ForeignJobID ¶
type ForeignJobID struct { BigQueryID ExtractJobID `pg:"bigquery_extract_job_id"` CloudStorageID TransferJobID `pg:"cloudstorage_transfer_job_id"` }
ForeignJobID job id for a specific technology
type InvalidBackupType ¶
type InvalidBackupType struct {
Type BackupType
}
InvalidBackupType for cases when backup type is incorrect
func (*InvalidBackupType) Error ¶
func (i *InvalidBackupType) Error() string
type Job ¶
type Job struct { ID string `pg:"id,pk"` BackupID string `pg:"backup_id"` Type BackupType `pg:"type"` Status JobStatus `pg:"status"` Source string `pg:"source"` ForeignJobID EntityAudit // contains filtered or unexported fields }
Job a backup unit of work
type JobPage ¶
type JobPage struct { // Size how many elements to fetch, value AllJobs will fetch all Size int Number int }
JobPage represent what subset of jobs to fetch
type JobPatch ¶
type JobPatch struct { ID string Status JobStatus ForeignJobID }
JobPatch defines update fields for a Job
type JobRepository ¶
type JobRepository interface { AddJob(context.Context, *Job) error AddJobs(ctxIn context.Context, jobs []*Job) error DeleteJob(context.Context, string) error GetJob(context.Context, string) (*Job, error) MarkDeleted(context.Context, string) error GetByJobTypeAndStatus(context.Context, BackupType, ...JobStatus) ([]*Job, error) GetByStatusAndBefore(context.Context, []JobStatus, int) ([]*Job, error) PatchJobStatus(ctx context.Context, patch JobPatch) error GetJobsForBackupID(ctx context.Context, backupID string, jobPage JobPage) ([]*Job, error) GetMostRecentJobForBackupID(ctxIn context.Context, backupID string, status ...JobStatus) (*Job, error) GetBackupRestoreJobs(ctx context.Context, backupID, jobID string) ([]*Job, error) GetStatisticsForBackupID(ctx context.Context, backupID string) (JobStatistics, error) }
JobRepository defines operation with backup job
func NewJobRepository ¶
func NewJobRepository(ctxIn context.Context, credentialsProvider secret.SecretProvider) (JobRepository, error)
NewJobRepository create new instance of JobRepository
type JobStatus ¶
type JobStatus string
JobStatus for backup
const ( // NotScheduled job is not scheduled NotScheduled JobStatus = "NotScheduled" // Scheduled is scheduled Scheduled JobStatus = "Scheduled" // Pending BigQuery/GCS job is ongoing Pending JobStatus = "Pending" // Error job finished with error Error JobStatus = "Error" // FinishedOk job finished with success FinishedOk JobStatus = "FinishedOk" // FinishedError job finished with error FinishedError JobStatus = "FinishedError" // FinishedQuotaError job finished with quota errir FinishedQuotaError JobStatus = "FinishedQuotaError" // JobDeleted was deleted JobDeleted JobStatus = "JobDeleted" )
type MirrorOptions ¶
type MirrorOptions struct {
LifetimeInDays uint `pg:"mirror_lifetime_in_days,use_zero"`
}
MirrorOptions strategy backup options
type MirrorRevision ¶
type MirrorRevision struct { SourceMetadataID int JobID string BackupID string BigqueryDataset string Source string TargetProject string TargetSink string }
MirrorRevision track changes for a BigQuery tables
func (MirrorRevision) String ¶
func (b MirrorRevision) String() string
type Operation ¶
type Operation string
Operation for a backup
type Region ¶
type Region string
Region for a GCS sink bucket
type SinkOptions ¶
type SinkOptions struct { TargetProject string Region string `pg:"target_region"` Sink string `pg:"target_sink"` StorageClass string `pg:"target_storage_class"` ArchiveTTM uint `pg:"archive_ttm"` }
SinkOptions for a backup
type SnapshotOptions ¶
type SnapshotOptions struct { LifetimeInDays uint `pg:"snapshot_lifetime_in_days,use_zero"` FrequencyInHours uint `pg:"snapshot_frequency_in_hours,use_zero"` }
SnapshotOptions strategy backup options
type SourceMetadata ¶
type SourceMetadata struct { ID int `pg:"id,pk"` BackupID string `pg:"backup_id"` Source string `pg:"source"` SourceChecksum string `pg:"source_checksum"` LastModifiedTime time.Time `pg:"last_modified_time"` Operation string `pg:"operation"` CreatedTimestamp time.Time `pg:"audit_created_timestamp"` DeletedTimestamp time.Time `pg:"audit_deleted_timestamp"` // contains filtered or unexported fields }
SourceMetadata for a BigQuery mirroring
type SourceMetadataJob ¶
type SourceMetadataJob struct { SourceMetadataID int `pg:"source_metadata_id"` JobId string `pg:"job_id"` // contains filtered or unexported fields }
SourceMetadataJob for a BigQuery mirroring
type SourceMetadataJobRepository ¶
type SourceMetadataJobRepository interface {
Add(ctxIn context.Context, sourceMetadataID int, jobID string) error
}
SourceMetadataJobRepository defines operation for sourceMetadata
func NewSourceMetadataJobRepository ¶
func NewSourceMetadataJobRepository(ctxIn context.Context, credentialsProvider secret.SecretProvider) (SourceMetadataJobRepository, error)
NewSourceMetadataJobRepository return instance of SourceMetadataJobRepository
type SourceMetadataRepository ¶
type SourceMetadataRepository interface { Add(context.Context, []*SourceMetadata) ([]*SourceMetadata, error) GetLastByBackupID(ctxIn context.Context, backupID string) ([]*SourceMetadata, error) MarkDeleted(context.Context, int) error }
SourceMetadataRepository defines operation for SourceMetadata
func NewSourceMetadataRepository ¶
func NewSourceMetadataRepository(ctxIn context.Context, credentialsProvider secret.SecretProvider) (SourceMetadataRepository, error)
NewSourceMetadataRepository return instance of SourceMetadataRepository
type SourceTrashcan ¶
type SourceTrashcan struct { BackupID string Source string CreatedTimestamp time.Time `pg:"audit_created_timestamp"` // contains filtered or unexported fields }
SourceTrashcan holds information about objects moved into trashcan
type SourceTrashcanRepository ¶
type SourceTrashcanRepository interface { Add(ctxIn context.Context, backupID string, source string, timestamp time.Time) error Delete(ctxIn context.Context, backupID string, source string) error FilterExistingEntries(ctxIn context.Context, sources []SourceTrashcan) ([]SourceTrashcan, error) GetBefore(ctxIn context.Context, deltaWeeks int) ([]*SourceTrashcan, error) }
SourceTrashcanRepository defines operation for SourceTrashcan
func NewSourceTrashcanRepository ¶
func NewSourceTrashcanRepository(ctxIn context.Context, credentialsProvider secret.SecretProvider) (SourceTrashcanRepository, error)
NewSourceTrashcanRepository return instance of SourceTrashcanRepository
type StorageClass ¶
type StorageClass string
StorageClass for a GCS sink bucket
const ( // Regional GCS sink bucket storage class Regional StorageClass = "REGIONAL" // Nearline GCS sink bucket storage class Nearline StorageClass = "NEARLINE" // Coldline GCS sink bucket storage class Coldline StorageClass = "COLDLINE" )
func (StorageClass) EqualTo ¶
func (s StorageClass) EqualTo(storageClass string) bool
EqualTo compare string with the StorageClass type
func (StorageClass) String ¶
func (s StorageClass) String() string
type Strategy ¶
type Strategy string
Strategy for a backup
type TransferJobID ¶
type TransferJobID string
TransferJobID for BigQuery technology
func (TransferJobID) String ¶
func (j TransferJobID) String() string
type UpdateFields ¶
type UpdateFields struct { BackupID string Status BackupStatus IncludePath []string ExcludePath []string Table []string ExcludedTables []string MirrorTTL uint SnapshotTTL uint ArchiveTTM uint }
UpdateFields what is changing