Documentation ¶
Index ¶
- Constants
- Variables
- func AtomicMorphInt32(target *int32, morpher AtomicMorpherInt32) interface{}
- func AtomicMorphInt64(target *int64, morpher AtomicMorpherInt64) interface{}
- func AtomicMorphUint32(target *uint32, morpher AtomicMorpherUint32) interface{}
- func AtomicMorphUint64(target *uint64, morpher AtomicMorpherUint64) interface{}
- func ConsolidatePathSeparators(path string) string
- func CreateBlobCredential(ctx context.Context, credInfo CredentialInfo, options CredentialOpOptions) azblob.Credential
- func CreateBlobFSCredential(ctx context.Context, credInfo CredentialInfo, options CredentialOpOptions) azbfs.Credential
- func CreateDirectoryIfNotExist(directory string, tracker FolderCreationTracker) error
- func CreateFileOfSizeWithWriteThroughOption(destinationPath string, fileSize int64, writeThrough bool, ...) (*os.File, error)
- func CreateGCPClient(ctx context.Context) (*gcpUtils.Client, error)
- func CreateParentDirectoryIfNotExist(destinationPath string, tracker FolderCreationTracker) error
- func CreateS3Client(ctx context.Context, credInfo CredentialInfo, option CredentialOpOptions) (*minio.Client, error)
- func CreateS3Credential(ctx context.Context, credInfo CredentialInfo, options CredentialOpOptions) (*credentials.Credentials, error)
- func DeterminePathSeparator(path string) string
- func DocumentationForDependencyOnChangeDetection()
- func EnvVarOAuthTokenInfoExists() bool
- func GenerateFullPath(rootPath, childPath string) string
- func GenerateFullPathWithQuery(rootPath, childPath, extraQuery string) string
- func GetBlocksRoundedUp(size uint64, blockSize uint64) uint16
- func GetClientProvidedKey(options CpkOptions) azblob.ClientProvidedKeyOptions
- func GetJsonStringFromTemplate(template interface{}) string
- func IffError(test bool, trueVal, falseVal error) error
- func IffString(test bool, trueVal, falseVal string) string
- func IffStringNotNil(wanted *string, instead string) string
- func IffUint8(test bool, trueVal, falseVal uint8) byte
- func Iffint16(test bool, trueVal, falseVal int16) int16
- func Iffint32(test bool, trueVal, falseVal int32) int32
- func Iffint64(test bool, trueVal, falseVal int64) int64
- func Iffint8(test bool, trueVal, falseVal int8) int8
- func Iffloat64(test bool, trueVal, falseVal float64) float64
- func Iffuint16(test bool, trueVal, falseVal uint16) uint16
- func Iffuint32(test bool, trueVal, falseVal uint32) uint32
- func Iffuint64(test bool, trueVal, falseVal uint64) uint64
- func IsErrorEnvVarOAuthTokenInfoNotSet(err error) bool
- func IsForceLoggingDisabled() bool
- func IsGCPURL(u url.URL) bool
- func IsS3URL(u url.URL) bool
- func IsShortPath(s string) bool
- func NewAzCopyLogSanitizer() pipeline.LogSanitizer
- func NewDecompressingWriter(destination io.WriteCloser, ct CompressionType) io.WriteCloser
- func NewNullHasher() hash.Hash
- func NewRandomDataGenerator(length int64) *randomDataGenerator
- func NewReadLogFunc(logger ILogger, fullUrl *url.URL) func(int, error, int64, int64, bool)
- func OSOpenFile(name string, flag int, perm os.FileMode) (*os.File, error)
- func OSStat(name string) (os.FileInfo, error)
- func PanicIfErr(err error)
- func RedactSecretQueryParam(rawQuery, queryKeyNeedRedact string) (bool, string)
- func SetBackupMode(enable bool, fromTo FromTo) error
- func ToClientProvidedKeyOptions(cpkInfo CpkInfo, cpkScopeInfo CpkScopeInfo) azblob.ClientProvidedKeyOptions
- func ToExtendedPath(short string) string
- func ToShortPath(long string) string
- type AtomicMorpherInt32
- type AtomicMorpherInt64
- type AtomicMorpherUint32
- type AtomicMorpherUint64
- type AzError
- type BenchMarkMode
- type BlobTags
- type BlobTransferAttributes
- type BlobType
- type BlockBlobTier
- func (BlockBlobTier) Archive() BlockBlobTier
- func (BlockBlobTier) Cool() BlockBlobTier
- func (BlockBlobTier) Hot() BlockBlobTier
- func (bbt BlockBlobTier) MarshalJSON() ([]byte, error)
- func (BlockBlobTier) None() BlockBlobTier
- func (bbt *BlockBlobTier) Parse(s string) error
- func (bbt BlockBlobTier) String() string
- func (bbt BlockBlobTier) ToAccessTierType() azblob.AccessTierType
- func (bbt *BlockBlobTier) UnmarshalJSON(b []byte) error
- type ByteSlice
- type ByteSliceExtension
- type ByteSlicePooler
- type CPUMonitor
- type CacheLimiter
- type CancelPauseResumeResponse
- type ChunkID
- type ChunkReaderSourceFactory
- type ChunkStatusLogger
- type ChunkStatusLoggerCloser
- type ChunkedFileWriter
- type CloseableReaderAt
- type CompressionType
- type CopyJobPartOrderErrorType
- type CopyJobPartOrderRequest
- type CopyJobPartOrderResponse
- type CopyTransfer
- type CountPerSecond
- type CpkInfo
- type CpkOptions
- type CpkScopeInfo
- type CredCache
- type CredCacheInternalIntegration
- type CredCacheOptions
- type CredentialInfo
- type CredentialOpOptions
- type CredentialType
- func (CredentialType) Anonymous() CredentialType
- func (CredentialType) GoogleAppCredentials() CredentialType
- func (CredentialType) OAuthToken() CredentialType
- func (ct *CredentialType) Parse(s string) error
- func (CredentialType) S3AccessKey() CredentialType
- func (CredentialType) S3PublicBucket() CredentialType
- func (CredentialType) SharedKey() CredentialType
- func (ct CredentialType) String() string
- func (CredentialType) Unknown() CredentialType
- type DeleteDestination
- type DeleteSnapshotsOption
- func (DeleteSnapshotsOption) Include() DeleteSnapshotsOption
- func (DeleteSnapshotsOption) None() DeleteSnapshotsOption
- func (DeleteSnapshotsOption) Only() DeleteSnapshotsOption
- func (d *DeleteSnapshotsOption) Parse(s string) error
- func (d DeleteSnapshotsOption) String() string
- func (d DeleteSnapshotsOption) ToDeleteSnapshotsOptionType() azblob.DeleteSnapshotsOptionType
- type EntityType
- type EnvironmentVariable
- func (EnvironmentVariable) AADEndpoint() EnvironmentVariable
- func (EnvironmentVariable) AWSAccessKeyID() EnvironmentVariable
- func (EnvironmentVariable) AWSSecretAccessKey() EnvironmentVariable
- func (EnvironmentVariable) AccountKey() EnvironmentVariable
- func (EnvironmentVariable) AccountName() EnvironmentVariable
- func (EnvironmentVariable) ApplicationID() EnvironmentVariable
- func (EnvironmentVariable) AutoLoginType() EnvironmentVariable
- func (EnvironmentVariable) AutoTuneToCpu() EnvironmentVariable
- func (EnvironmentVariable) AwsSessionToken() EnvironmentVariable
- func (EnvironmentVariable) BufferGB() EnvironmentVariable
- func (EnvironmentVariable) CPKEncryptionKey() EnvironmentVariable
- func (EnvironmentVariable) CPKEncryptionKeySHA256() EnvironmentVariable
- func (EnvironmentVariable) CacheProxyLookup() EnvironmentVariable
- func (EnvironmentVariable) CertificatePassword() EnvironmentVariable
- func (EnvironmentVariable) CertificatePath() EnvironmentVariable
- func (EnvironmentVariable) ClientSecret() EnvironmentVariable
- func (EnvironmentVariable) ConcurrencyValue() EnvironmentVariable
- func (EnvironmentVariable) CredentialType() EnvironmentVariable
- func (EnvironmentVariable) DefaultServiceApiVersion() EnvironmentVariable
- func (EnvironmentVariable) DisableHierarchicalScanning() EnvironmentVariable
- func (EnvironmentVariable) DisableSyslog() EnvironmentVariable
- func (EnvironmentVariable) EnumerationPoolSize() EnvironmentVariable
- func (EnvironmentVariable) GoogleAppCredentials() EnvironmentVariable
- func (EnvironmentVariable) GoogleCloudProject() EnvironmentVariable
- func (EnvironmentVariable) JobPlanLocation() EnvironmentVariable
- func (EnvironmentVariable) LogLocation() EnvironmentVariable
- func (EnvironmentVariable) ManagedIdentityClientID() EnvironmentVariable
- func (EnvironmentVariable) ManagedIdentityObjectID() EnvironmentVariable
- func (EnvironmentVariable) ManagedIdentityResourceString() EnvironmentVariable
- func (EnvironmentVariable) MimeMapping() EnvironmentVariable
- func (EnvironmentVariable) OAuthTokenInfo() EnvironmentVariable
- func (EnvironmentVariable) OptimizeSparsePageBlobTransfers() EnvironmentVariable
- func (EnvironmentVariable) PacePageBlobs() EnvironmentVariable
- func (EnvironmentVariable) ParallelStatFiles() EnvironmentVariable
- func (EnvironmentVariable) ProfileCPU() EnvironmentVariable
- func (EnvironmentVariable) ProfileMemory() EnvironmentVariable
- func (EnvironmentVariable) RequestTryTimeout() EnvironmentVariable
- func (EnvironmentVariable) ShowPerfStates() EnvironmentVariable
- func (EnvironmentVariable) TenantID() EnvironmentVariable
- func (EnvironmentVariable) TransferInitiationPoolSize() EnvironmentVariable
- func (EnvironmentVariable) UserAgentPrefix() EnvironmentVariable
- func (EnvironmentVariable) UserDir() EnvironmentVariable
- type ExclusiveStringMap
- type ExitCode
- type FileURLPartsExtension
- type FolderCreationTracker
- type FolderDeletionFunc
- type FolderDeletionManager
- type FolderPropertyOption
- type FromTo
- func (ft *FromTo) AreBothFolderAware() bool
- func (FromTo) BenchmarkBlob() FromTo
- func (FromTo) BenchmarkBlobFS() FromTo
- func (FromTo) BenchmarkFile() FromTo
- func (FromTo) BlobBlob() FromTo
- func (FromTo) BlobFSLocal() FromTo
- func (FromTo) BlobFSTrash() FromTo
- func (FromTo) BlobFile() FromTo
- func (FromTo) BlobLocal() FromTo
- func (FromTo) BlobPipe() FromTo
- func (FromTo) BlobTrash() FromTo
- func (FromTo) FileBlob() FromTo
- func (FromTo) FileFile() FromTo
- func (FromTo) FileLocal() FromTo
- func (FromTo) FilePipe() FromTo
- func (FromTo) FileTrash() FromTo
- func (ft *FromTo) From() Location
- func (ft *FromTo) FromAndTo(s string) (srcLocation, dstLocation Location, err error)
- func (FromTo) GCPBlob() FromTo
- func (ft *FromTo) IsDownload() bool
- func (ft *FromTo) IsS2S() bool
- func (ft *FromTo) IsUpload() bool
- func (FromTo) LocalBlob() FromTo
- func (FromTo) LocalBlobFS() FromTo
- func (FromTo) LocalFile() FromTo
- func (ft *FromTo) Parse(s string) error
- func (FromTo) PipeBlob() FromTo
- func (FromTo) PipeFile() FromTo
- func (FromTo) S3Blob() FromTo
- func (ft FromTo) String() string
- func (ft *FromTo) To() Location
- func (FromTo) Unknown() FromTo
- type GCPClientFactory
- type GCPCredentialInfo
- type GCPObjectInfoExtension
- func (gie *GCPObjectInfoExtension) CacheControl() string
- func (gie *GCPObjectInfoExtension) ContentDisposition() string
- func (gie *GCPObjectInfoExtension) ContentEncoding() string
- func (gie *GCPObjectInfoExtension) ContentLanguage() string
- func (gie *GCPObjectInfoExtension) ContentMD5() []byte
- func (gie *GCPObjectInfoExtension) ContentType() string
- func (gie *GCPObjectInfoExtension) NewCommonMetadata() Metadata
- type GCPURLParts
- type GenericResourceURLParts
- type GetJobFromToRequest
- type GetJobFromToResponse
- type HTTPResponseExtension
- type HashValidationOption
- func (HashValidationOption) FailIfDifferent() HashValidationOption
- func (HashValidationOption) FailIfDifferentOrMissing() HashValidationOption
- func (HashValidationOption) LogOnly() HashValidationOption
- func (hvo HashValidationOption) MarshalJSON() ([]byte, error)
- func (HashValidationOption) NoCheck() HashValidationOption
- func (hvo *HashValidationOption) Parse(s string) error
- func (hvo HashValidationOption) String() string
- func (hvo *HashValidationOption) UnmarshalJSON(b []byte) error
- type ILogger
- type ILoggerCloser
- type ILoggerResetable
- type IdentityInfo
- type InitMsgJsonTemplate
- type InvalidMetadataHandleOption
- func (InvalidMetadataHandleOption) ExcludeIfInvalid() InvalidMetadataHandleOption
- func (InvalidMetadataHandleOption) FailIfInvalid() InvalidMetadataHandleOption
- func (i InvalidMetadataHandleOption) MarshalJSON() ([]byte, error)
- func (i *InvalidMetadataHandleOption) Parse(s string) error
- func (InvalidMetadataHandleOption) RenameIfInvalid() InvalidMetadataHandleOption
- func (i InvalidMetadataHandleOption) String() string
- func (i *InvalidMetadataHandleOption) UnmarshalJSON(b []byte) error
- type JobID
- type JobIDDetails
- type JobPriority
- type JobStatus
- func (JobStatus) All() JobStatus
- func (j *JobStatus) AtomicLoad() JobStatus
- func (j *JobStatus) AtomicStore(newJobStatus JobStatus)
- func (JobStatus) Cancelled() JobStatus
- func (JobStatus) Cancelling() JobStatus
- func (JobStatus) Completed() JobStatus
- func (JobStatus) CompletedWithErrors() JobStatus
- func (JobStatus) CompletedWithErrorsAndSkipped() JobStatus
- func (JobStatus) CompletedWithSkipped() JobStatus
- func (j *JobStatus) EnhanceJobStatusInfo(skippedTransfers, failedTransfers, successfulTransfers bool) JobStatus
- func (JobStatus) Failed() JobStatus
- func (JobStatus) InProgress() JobStatus
- func (j *JobStatus) IsJobDone() bool
- func (j JobStatus) MarshalJSON() ([]byte, error)
- func (j *JobStatus) Parse(s string) error
- func (JobStatus) Paused() JobStatus
- func (js JobStatus) String() string
- func (j *JobStatus) UnmarshalJSON(b []byte) error
- type JsonOutputTemplate
- type LifecycleMgr
- type ListContainerResponse
- type ListJobSummaryResponse
- type ListJobTransfersRequest
- type ListJobTransfersResponse
- type ListJobsResponse
- type ListOfFiles
- type ListRequest
- type ListSyncJobSummaryResponse
- type Location
- func (Location) AllStandardLocations() []Location
- func (Location) Benchmark() Location
- func (Location) Blob() Location
- func (Location) BlobFS() Location
- func (Location) File() Location
- func (Location) GCP() Location
- func (l Location) IsFolderAware() bool
- func (l Location) IsLocal() bool
- func (l Location) IsRemote() bool
- func (Location) Local() Location
- func (Location) Pipe() Location
- func (Location) S3() Location
- func (l Location) String() string
- func (Location) Unknown() Location
- type LogLevel
- func (LogLevel) Debug() LogLevel
- func (LogLevel) Error() LogLevel
- func (LogLevel) Fatal() LogLevel
- func (LogLevel) Info() LogLevel
- func (LogLevel) None() LogLevel
- func (LogLevel) Panic() LogLevel
- func (ll *LogLevel) Parse(s string) error
- func (ll LogLevel) String() string
- func (ll LogLevel) ToPipelineLogLevel() pipeline.LogLevel
- func (LogLevel) Warning() LogLevel
- type MMF
- type Metadata
- func (m Metadata) ConcatenatedKeys() string
- func (m Metadata) ExcludeInvalidKey() (retainedMetadata Metadata, excludedMetadata Metadata, invalidKeyExists bool)
- func (m Metadata) Marshal() (string, error)
- func (m Metadata) ResolveInvalidKey() (resolvedMetadata Metadata, err error)
- func (m Metadata) ToAzBlobMetadata() azblob.Metadata
- func (m Metadata) ToAzFileMetadata() azfile.Metadata
- type NoCopy
- type OAuthTokenInfo
- func (credInfo *OAuthTokenInfo) GetNewTokenFromCert(ctx context.Context) (*adal.Token, error)
- func (credInfo *OAuthTokenInfo) GetNewTokenFromMSI(ctx context.Context) (*adal.Token, error)
- func (credInfo *OAuthTokenInfo) GetNewTokenFromSecret(ctx context.Context) (*adal.Token, error)
- func (credInfo *OAuthTokenInfo) GetNewTokenFromTokenStore(ctx context.Context) (*adal.Token, error)
- func (credInfo OAuthTokenInfo) IsEmpty() bool
- func (credInfo *OAuthTokenInfo) Refresh(ctx context.Context) (*adal.Token, error)
- func (credInfo *OAuthTokenInfo) RefreshTokenWithUserCredential(ctx context.Context) (*adal.Token, error)
- type ObjectInfoExtension
- func (oie *ObjectInfoExtension) CacheControl() string
- func (oie *ObjectInfoExtension) ContentDisposition() string
- func (oie *ObjectInfoExtension) ContentEncoding() string
- func (oie *ObjectInfoExtension) ContentLanguage() string
- func (oie *ObjectInfoExtension) ContentMD5() []byte
- func (oie *ObjectInfoExtension) ContentType() string
- func (oie *ObjectInfoExtension) NewCommonMetadata() Metadata
- type OutputBuilder
- type OutputFormat
- type OverwriteOption
- type PageBlobTier
- func (pbt PageBlobTier) MarshalJSON() ([]byte, error)
- func (PageBlobTier) None() PageBlobTier
- func (PageBlobTier) P10() PageBlobTier
- func (PageBlobTier) P15() PageBlobTier
- func (PageBlobTier) P20() PageBlobTier
- func (PageBlobTier) P30() PageBlobTier
- func (PageBlobTier) P4() PageBlobTier
- func (PageBlobTier) P40() PageBlobTier
- func (PageBlobTier) P50() PageBlobTier
- func (PageBlobTier) P6() PageBlobTier
- func (pbt *PageBlobTier) Parse(s string) error
- func (pbt PageBlobTier) String() string
- func (pbt PageBlobTier) ToAccessTierType() azblob.AccessTierType
- func (pbt *PageBlobTier) UnmarshalJSON(b []byte) error
- type PartNumber
- type PerfConstraint
- func (PerfConstraint) CPU() PerfConstraint
- func (PerfConstraint) Disk() PerfConstraint
- func (PerfConstraint) PageBlobService() PerfConstraint
- func (pc *PerfConstraint) Parse(s string) error
- func (PerfConstraint) Service() PerfConstraint
- func (pc PerfConstraint) String() string
- func (PerfConstraint) Unknown() PerfConstraint
- type PerformanceAdvice
- type Predicate
- type PreservePermissionsOption
- type PrologueState
- type PromptDetails
- type PromptType
- type Prompter
- type ProxyLookupFunc
- type ResourceHTTPHeaders
- type ResourceString
- type ResponseOption
- type ResumeJobRequest
- type RetryCounter
- type RpcCmd
- func (RpcCmd) CancelJob() RpcCmd
- func (RpcCmd) CopyJobPartOrder() RpcCmd
- func (RpcCmd) GetJobFromTo() RpcCmd
- func (RpcCmd) GetJobLCMWrapper() RpcCmd
- func (RpcCmd) ListJobSummary() RpcCmd
- func (RpcCmd) ListJobTransfers() RpcCmd
- func (RpcCmd) ListJobs() RpcCmd
- func (RpcCmd) ListSyncJobSummary() RpcCmd
- func (RpcCmd) None() RpcCmd
- func (c *RpcCmd) Parse(s string) error
- func (c RpcCmd) Pattern() string
- func (RpcCmd) PauseJob() RpcCmd
- func (RpcCmd) ResumeJob() RpcCmd
- func (c RpcCmd) String() string
- type S3ClientFactory
- type S3CredentialInfo
- type S3URLParts
- type SPNInfo
- type SingleChunkReader
- type Status
- type TestOAuthInjection
- type TransferDetail
- type TransferDirection
- func (td *TransferDirection) AtomicLoad() TransferDirection
- func (td *TransferDirection) AtomicStore(newTransferDirection TransferDirection)
- func (TransferDirection) Download() TransferDirection
- func (td *TransferDirection) Parse(s string) error
- func (TransferDirection) S2SCopy() TransferDirection
- func (td TransferDirection) String() string
- func (TransferDirection) UnKnown() TransferDirection
- func (TransferDirection) Upload() TransferDirection
- type TransferStatus
- func (TransferStatus) All() TransferStatus
- func (ts *TransferStatus) AtomicLoad() TransferStatus
- func (ts *TransferStatus) AtomicStore(newTransferStatus TransferStatus)
- func (TransferStatus) BlobTierFailure() TransferStatus
- func (TransferStatus) Cancelled() TransferStatus
- func (TransferStatus) Failed() TransferStatus
- func (TransferStatus) FolderCreated() TransferStatus
- func (ts TransferStatus) MarshalJSON() ([]byte, error)
- func (TransferStatus) NotStarted() TransferStatus
- func (ts *TransferStatus) Parse(s string) error
- func (ts TransferStatus) ShouldTransfer() bool
- func (TransferStatus) SkippedBlobHasSnapshots() TransferStatus
- func (TransferStatus) SkippedEntityAlreadyExists() TransferStatus
- func (TransferStatus) Started() TransferStatus
- func (ts TransferStatus) String() string
- func (TransferStatus) Success() TransferStatus
- func (TransferStatus) TierAvailabilityCheckFailure() TransferStatus
- func (ts *TransferStatus) UnmarshalJSON(b []byte) error
- type Transfers
- type URLExtension
- type URLStringExtension
- type UUID
- type UserOAuthTokenManager
- func (uotm *UserOAuthTokenManager) CertLogin(tenantID, activeDirectoryEndpoint, certPath, certPass, applicationID string, ...) (*OAuthTokenInfo, error)
- func (uotm *UserOAuthTokenManager) GetTokenInfo(ctx context.Context) (*OAuthTokenInfo, error)
- func (uotm *UserOAuthTokenManager) HasCachedToken() (bool, error)
- func (uotm *UserOAuthTokenManager) MSILogin(ctx context.Context, identityInfo IdentityInfo, persist bool) (*OAuthTokenInfo, error)
- func (uotm *UserOAuthTokenManager) RemoveCachedToken() error
- func (uotm *UserOAuthTokenManager) SecretLogin(tenantID, activeDirectoryEndpoint, secret, applicationID string, persist bool) (*OAuthTokenInfo, error)
- func (uotm *UserOAuthTokenManager) UserLogin(tenantID, activeDirectoryEndpoint string, persist bool) (*OAuthTokenInfo, error)
- type Version
- type WaitReason
- func (WaitReason) Body() WaitReason
- func (WaitReason) BodyReReadDueToMem() WaitReason
- func (WaitReason) BodyReReadDueToSpeed() WaitReason
- func (WaitReason) Cancelled() WaitReason
- func (WaitReason) ChunkDone() WaitReason
- func (WaitReason) CreateLocalFile() WaitReason
- func (WaitReason) DiskIO() WaitReason
- func (WaitReason) Epilogue() WaitReason
- func (WaitReason) FilePacer() WaitReason
- func (WaitReason) HeaderResponse() WaitReason
- func (WaitReason) LockDestination() WaitReason
- func (WaitReason) ModifiedTimeRefresh() WaitReason
- func (WaitReason) Nothing() WaitReason
- func (WaitReason) OpenLocalSource() WaitReason
- func (WaitReason) PriorChunk() WaitReason
- func (WaitReason) QueueToWrite() WaitReason
- func (WaitReason) RAMToSchedule() WaitReason
- func (WaitReason) S2SCopyOnWire() WaitReason
- func (WaitReason) Sorting() WaitReason
- func (wr WaitReason) String() string
- func (WaitReason) WorkerGR() WaitReason
- func (WaitReason) XferStart() WaitReason
- type WorkController
Constants ¶
const ( AZCOPY_PATH_SEPARATOR_STRING = "/" AZCOPY_PATH_SEPARATOR_CHAR = '/' OS_PATH_SEPARATOR = string(os.PathSeparator) EXTENDED_PATH_PREFIX = `\\?\` EXTENDED_UNC_PATH_PREFIX = `\\?\UNC` Dev_Null = os.DevNull // this is the perm that AzCopy has used throughout its preview. So, while we considered relaxing it to 0666 // we decided that the best option was to leave it as is, and only relax it if user feedback so requires. DEFAULT_FILE_PERM = 0644 // Since we haven't updated the Go SDKs to handle CPK just yet, we need to detect CPK related errors // and inform the user that we don't support CPK yet. CPK_ERROR_SERVICE_CODE = "BlobUsesCustomerSpecifiedEncryption" )
const ( DefaultBlockBlobBlockSize = 8 * 1024 * 1024 MaxBlockBlobBlockSize = 4000 * 1024 * 1024 MaxAppendBlobBlockSize = 4 * 1024 * 1024 DefaultPageBlobChunkSize = 4 * 1024 * 1024 DefaultAzureFileChunkSize = 4 * 1024 * 1024 MaxNumberOfBlocksPerBlob = 50000 BlockSizeThreshold = 256 * 1024 * 1024 MinParallelChunkCountThreshold = 4 /* minimum number of chunks in parallel for AzCopy to be performant. */ )
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
const ApplicationID = "579a7132-0e58-4d80-b1e1-7a1e2d337859"
ApplicationID represents 1st party ApplicationID for AzCopy. const ApplicationID = "a45c21f4-7066-40b4-97d8-14f4313c3caa" // 3rd party test ApplicationID for AzCopy.
const AzcopyVersion = "10.12.2"
const BackupModeFlagName = "backup" // original name, backup mode, matches the name used for the same thing in Robocopy
const BenchmarkFinalDisclaimer = `` /* 226-byte string literal not displayed */
const BenchmarkLinuxExtraDisclaimer = `` /* 159-byte string literal not displayed */
const BenchmarkPreviewNotice = "The benchmark feature is currently in Preview status."
const BenchmarkUserAgent = "Benchmark " + UserAgent
const DefaultActiveDirectoryEndpoint = "https://login.microsoftonline.com"
const DefaultTenantID = "common"
const EncryptionAlgorithmAES256 string = "AES256"
Default Encryption Algorithm Supported
const EnvVarOAuthTokenInfo = "AZCOPY_OAUTH_TOKEN_INFO"
EnvVarOAuthTokenInfo passes oauth token info into AzCopy through environment variable. Note: this is only used for testing, and not encouraged to be used in production environments.
const ErrorCodeEnvVarOAuthTokenInfoNotSet = "environment variable AZCOPY_OAUTH_TOKEN_INFO is not set"
ErrorCodeEnvVarOAuthTokenInfoNotSet defines error code when environment variable AZCOPY_OAUTH_TOKEN_INFO is not set.
const FileCountDefault = 100
const FileCountParam = "file-count"
const GCPImportUserAgent = "GCPImport " + UserAgent
const IMDSAPIVersionArcVM = "2019-11-01"
const IMDSAPIVersionAzureVM = "2018-02-01"
const IncludeAfterFlagName = "include-after"
const IncludeBeforeFlagName = "include-before"
const MSIEndpointArcVM = "http://localhost:40342/metadata/identity/oauth2/token"
const MSIEndpointAzureVM = "http://169.254.169.254/metadata/identity/oauth2/token"
const PreserveOwnerDefault = true
const PreserveOwnerFlagName = "preserve-owner"
const Resource = "https://storage.azure.com"
Resource used in azure storage OAuth authentication
const S3ImportUserAgent = "S3Import " + UserAgent
const SigAzure = azbfs.SigAzure
const SigXAmzForAws = azbfs.SigXAmzForAws
const SizePerFileParam = "size-per-file"
const TokenRefreshSourceTokenStore = "tokenstore"
TokenRefreshSourceTokenStore indicates enabling azcopy oauth integration through tokenstore. Note: This should be only used for internal integrations.
const TryEquals string = "Try=" // TODO: refactor so that this can be used by the retry policies too? So that when you search the logs for Try= you are guaranteed to find both types of retry (i.e. request send retries, and body read retries)
const UserAgent = "AzCopy/" + AzcopyVersion
const WSAECONNREFUSED = 10061
Refer to https://docs.microsoft.com/en-us/windows/win32/winsock/windows-sockets-error-codes-2 for details
Variables ¶
var AzcopyJobPlanFolder string
var BenchmarkLmt = time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC)
var ChunkWriterAlreadyFailed = errors.New("chunk Writer already failed")
var DefaultHashValidationOption = EHashValidationOption.FailIfDifferent()
var DefaultInvalidMetadataHandleOption = EInvalidMetadataHandleOption.ExcludeIfInvalid()
var DefaultTokenExpiryWithinThreshold = time.Minute * 10
var EBenchMarkMode = BenchMarkMode(0)
var EBlobType = BlobType(0)
var EBlockBlobTier = BlockBlobTier(0)
var ECompressionType = CompressionType(0)
var ECredentialType = CredentialType(0)
var EDeleteDestination = DeleteDestination(0)
var EDeleteSnapshotsOption = DeleteSnapshotsOption(0)
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
var EEntityType = EntityType(0)
var EEnvironmentVariable = EnvironmentVariable{}
var EExitCode = ExitCode(0)
var EFolderPropertiesOption = FolderPropertyOption(0)
var EFromTo = FromTo(0)
var EHashValidationOption = HashValidationOption(0)
var EInvalidMetadataHandleOption = InvalidMetadataHandleOption(0)
var EJobPriority = JobPriority(0)
var EJobStatus = JobStatus(0)
var ELocation = Location(0)
var ELogLevel = LogLevel(pipeline.LogNone)
var EOutputFormat = OutputFormat(0)
var EOverwriteOption = OverwriteOption(0)
var EPageBlobTier = PageBlobTier(0)
var EPerfConstraint = PerfConstraint(0)
var EPreservePermissionsOption = PreservePermissionsOption(0)
var EPromptType = PromptType("")
var EResponseOption = ResponseOption{ResponseType: "", UserFriendlyResponseType: "", ResponseString: ""}
represents one possible response
var ERpcCmd = RpcCmd("")
var ETransferDirection = TransferDirection(0)
var ETransferStatus = TransferStatus(0)
var EWaitReason = WaitReason{0, ""}
var GlobalTestOAuthInjection = TestOAuthInjection{ DoTokenRefreshInjection: false, TokenRefreshDuration: time.Second * 10, }
GlobalTestOAuthInjection is the global setting for OAuth testing injection control
var ProxyLookupTimeoutError = errors.New("proxy lookup timed out")
var RootDriveRegex = regexp.MustCompile(`(?i)(^[A-Z]:\/?$)`)
The regex doesn't require a / on the ending, it just requires something similar to the following C: C:/ //myShare //myShare/ demonstrated at: https://regexr.com/4mf6l
var VisibleEnvironmentVariables = []EnvironmentVariable{ EEnvironmentVariable.LogLocation(), EEnvironmentVariable.JobPlanLocation(), EEnvironmentVariable.ConcurrencyValue(), EEnvironmentVariable.TransferInitiationPoolSize(), EEnvironmentVariable.EnumerationPoolSize(), EEnvironmentVariable.DisableHierarchicalScanning(), EEnvironmentVariable.ParallelStatFiles(), EEnvironmentVariable.BufferGB(), EEnvironmentVariable.AWSAccessKeyID(), EEnvironmentVariable.AWSSecretAccessKey(), EEnvironmentVariable.GoogleAppCredentials(), EEnvironmentVariable.ShowPerfStates(), EEnvironmentVariable.PacePageBlobs(), EEnvironmentVariable.AutoTuneToCpu(), EEnvironmentVariable.CacheProxyLookup(), EEnvironmentVariable.DefaultServiceApiVersion(), EEnvironmentVariable.UserAgentPrefix(), EEnvironmentVariable.AWSAccessKeyID(), EEnvironmentVariable.AWSSecretAccessKey(), EEnvironmentVariable.ClientSecret(), EEnvironmentVariable.CertificatePassword(), EEnvironmentVariable.AutoLoginType(), EEnvironmentVariable.TenantID(), EEnvironmentVariable.AADEndpoint(), EEnvironmentVariable.ApplicationID(), EEnvironmentVariable.CertificatePath(), EEnvironmentVariable.ManagedIdentityClientID(), EEnvironmentVariable.ManagedIdentityObjectID(), EEnvironmentVariable.ManagedIdentityResourceString(), EEnvironmentVariable.RequestTryTimeout(), EEnvironmentVariable.CPKEncryptionKey(), EEnvironmentVariable.CPKEncryptionKeySHA256(), EEnvironmentVariable.DisableSyslog(), EEnvironmentVariable.MimeMapping(), }
This array needs to be updated when a new public environment variable is added Things are here, rather than in command line parameters for one of two reasons: 1. They are optional and obscure (e.g. performance tuning parameters) or 2. They are authentication secrets, which we do not accept on the command line
Functions ¶
func AtomicMorphInt32 ¶
func AtomicMorphInt32(target *int32, morpher AtomicMorpherInt32) interface{}
AtomicMorph atomically morphs target in to new value (and result) as indicated by the AtomicMorpher callback function.
func AtomicMorphInt64 ¶
func AtomicMorphInt64(target *int64, morpher AtomicMorpherInt64) interface{}
AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
func AtomicMorphUint32 ¶
func AtomicMorphUint32(target *uint32, morpher AtomicMorpherUint32) interface{}
AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
func AtomicMorphUint64 ¶
func AtomicMorphUint64(target *uint64, morpher AtomicMorpherUint64) interface{}
AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
func ConsolidatePathSeparators ¶
Replace azcopy path separators (/) with the OS path separator
func CreateBlobCredential ¶
func CreateBlobCredential(ctx context.Context, credInfo CredentialInfo, options CredentialOpOptions) azblob.Credential
CreateBlobCredential creates Blob credential according to credential info.
func CreateBlobFSCredential ¶
func CreateBlobFSCredential(ctx context.Context, credInfo CredentialInfo, options CredentialOpOptions) azbfs.Credential
CreateBlobFSCredential creates BlobFS credential according to credential info.
func CreateDirectoryIfNotExist ¶
func CreateDirectoryIfNotExist(directory string, tracker FolderCreationTracker) error
func CreateGCPClient ¶
==================================================================== GCP credential factory related methods ====================================================================
func CreateParentDirectoryIfNotExist ¶
func CreateParentDirectoryIfNotExist(destinationPath string, tracker FolderCreationTracker) error
func CreateS3Client ¶
func CreateS3Client(ctx context.Context, credInfo CredentialInfo, option CredentialOpOptions) (*minio.Client, error)
============================================================================================== S3 credential related factory methods ==============================================================================================
func CreateS3Credential ¶
func CreateS3Credential(ctx context.Context, credInfo CredentialInfo, options CredentialOpOptions) (*credentials.Credentials, error)
CreateS3Credential creates AWS S3 credential according to credential info.
func DeterminePathSeparator ¶
func DocumentationForDependencyOnChangeDetection ¶
func DocumentationForDependencyOnChangeDetection()
func EnvVarOAuthTokenInfoExists ¶
func EnvVarOAuthTokenInfoExists() bool
EnvVarOAuthTokenInfoExists verifies if environment variable for OAuthTokenInfo is specified. The method returns true if the environment variable is set. Note: This is useful for only checking whether the env var exists, please use getTokenInfoFromEnvVar directly in the case getting token info is necessary.
func GenerateFullPath ¶
it's possible that enumerators didn't form rootPath and childPath correctly for them to be combined plainly so we must behave defensively and make sure the full path is correct
func GetBlocksRoundedUp ¶
GetBlocksRoundedUp returns the number of blocks given sie, rounded up
func GetClientProvidedKey ¶
func GetClientProvidedKey(options CpkOptions) azblob.ClientProvidedKeyOptions
func GetJsonStringFromTemplate ¶
func GetJsonStringFromTemplate(template interface{}) string
func IffStringNotNil ¶
used to get properties in a safe, but not so verbose manner
func IsErrorEnvVarOAuthTokenInfoNotSet ¶
IsErrorEnvVarOAuthTokenInfoNotSet verifies if an error indicates environment variable AZCOPY_OAUTH_TOKEN_INFO is not set.
func IsForceLoggingDisabled ¶
func IsForceLoggingDisabled() bool
func IsShortPath ¶
func NewAzCopyLogSanitizer ¶
func NewAzCopyLogSanitizer() pipeline.LogSanitizer
func NewDecompressingWriter ¶
func NewDecompressingWriter(destination io.WriteCloser, ct CompressionType) io.WriteCloser
NewDecompressingWriter returns a WriteCloser which decompresses the data that is written to it, before passing the decompressed data on to a final destination. This decompressor is intended to work with compressed data wrapped in either the ZLib headers or the slightly larger Gzip headers. Both of those formats compress a single file (often a .tar archive in the case of Gzip). So there is no need to to expand the decompressed info out into multiple files (as we would have to do, if we were to support "zip" compression). See https://stackoverflow.com/a/20765054
func NewNullHasher ¶
func NewRandomDataGenerator ¶
func NewRandomDataGenerator(length int64) *randomDataGenerator
func NewReadLogFunc ¶
func OSOpenFile ¶
NOTE: OSOpenFile not safe to use on directories on Windows. See comment on the Windows version of this routine
func PanicIfErr ¶
func PanicIfErr(err error)
captures the common logic of exiting if there's an expected error
func RedactSecretQueryParam ¶
func SetBackupMode ¶
func ToClientProvidedKeyOptions ¶
func ToClientProvidedKeyOptions(cpkInfo CpkInfo, cpkScopeInfo CpkScopeInfo) azblob.ClientProvidedKeyOptions
func ToExtendedPath ¶
ToExtendedPath converts short paths to an extended path.
func ToShortPath ¶
ToShortPath converts an extended path to a short path.
Types ¶
type AtomicMorpherInt32 ¶
AtomicMorpherInt32 identifies a method passed to and invoked by the AtomicMorphInt32 function. The AtomicMorpher callback is passed a startValue and based on this value it returns what the new value should be and the result that AtomicMorph should return to its caller.
type AtomicMorpherInt64 ¶
AtomicMorpherUint64 identifies a method passed to and invoked by the AtomicMorphUint64 function. The AtomicMorpher callback is passed a startValue and based on this value it returns what the new value should be and the result that AtomicMorph should return to its caller.
type AtomicMorpherUint32 ¶
AtomicMorpherUint32 identifies a method passed to and invoked by the AtomicMorph function. The AtomicMorpher callback is passed a startValue and based on this value it returns what the new value should be and the result that AtomicMorph should return to its caller.
type AtomicMorpherUint64 ¶
AtomicMorpherUint64 identifies a method passed to and invoked by the AtomicMorphUint64 function. The AtomicMorpher callback is passed a startValue and based on this value it returns what the new value should be and the result that AtomicMorph should return to its caller.
type AzError ¶
type AzError struct {
// contains filtered or unexported fields
}
AzError is to handle AzCopy internal errors in a fine way
var EAzError AzError
func NewAzError ¶
NewAzError composes an AzError with given code and messgae
func (AzError) InvalidBlobName ¶
func (AzError) LoginCredMissing ¶
type BenchMarkMode ¶
type BenchMarkMode uint8
BenchMarkMode enumerates values for Azcopy bench command. Valid values Upload or Download
func (BenchMarkMode) Download ¶
func (BenchMarkMode) Download() BenchMarkMode
func (*BenchMarkMode) Parse ¶
func (bm *BenchMarkMode) Parse(s string) error
func (BenchMarkMode) String ¶
func (bm BenchMarkMode) String() string
func (BenchMarkMode) Upload ¶
func (BenchMarkMode) Upload() BenchMarkMode
type BlobTags ¶
BlobTags is a map of key-value pair
func ToCommonBlobTagsMap ¶
func (BlobTags) ToAzBlobTagsMap ¶
func (bt BlobTags) ToAzBlobTagsMap() azblob.BlobTagsMap
ToAzBlobTagsMap converts BlobTagsMap to azblob's BlobTagsMap
type BlobTransferAttributes ¶
type BlobTransferAttributes struct { BlobType BlobType // The type of a blob - BlockBlob, PageBlob, AppendBlob ContentType string // The content type specified for the blob. ContentEncoding string // Specifies which content encodings have been applied to the blob. ContentLanguage string // Specifies the language of the content ContentDisposition string // Specifies the content disposition CacheControl string // Specifies the cache control header BlockBlobTier BlockBlobTier // Specifies the tier to set on the block blobs. PageBlobTier PageBlobTier // Specifies the tier to set on the page blobs. Metadata string // User-defined Name-value pairs associated with the blob NoGuessMimeType bool // represents user decision to interpret the content-encoding from source file PreserveLastModifiedTime bool // when downloading, tell engine to set file's timestamp to timestamp of blob PutMd5 bool // when uploading, should we create and PUT Content-MD5 hashes MD5ValidationOption HashValidationOption // when downloading, how strictly should we validate MD5 hashes? BlockSizeInBytes int64 // when uploading/downloading/copying, specify the size of each chunk DeleteSnapshotsOption DeleteSnapshotsOption // when deleting, specify what to do with the snapshots BlobTagsString string // when user explicitly provides blob tags }
This struct represents the optional attribute for blob request header
type BlobType ¶
type BlobType uint8
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// Enumerates the values for blob type.
func FromAzBlobType ¶
func (BlobType) AppendBlob ¶
func (*BlobType) ToAzBlobType ¶
ToAzBlobType returns the equivalent azblob.BlobType for given string.
type BlockBlobTier ¶
type BlockBlobTier uint8
func (BlockBlobTier) Archive ¶
func (BlockBlobTier) Archive() BlockBlobTier
func (BlockBlobTier) Cool ¶
func (BlockBlobTier) Cool() BlockBlobTier
func (BlockBlobTier) Hot ¶
func (BlockBlobTier) Hot() BlockBlobTier
func (BlockBlobTier) MarshalJSON ¶
func (bbt BlockBlobTier) MarshalJSON() ([]byte, error)
func (BlockBlobTier) None ¶
func (BlockBlobTier) None() BlockBlobTier
func (*BlockBlobTier) Parse ¶
func (bbt *BlockBlobTier) Parse(s string) error
func (BlockBlobTier) String ¶
func (bbt BlockBlobTier) String() string
func (BlockBlobTier) ToAccessTierType ¶
func (bbt BlockBlobTier) ToAccessTierType() azblob.AccessTierType
func (*BlockBlobTier) UnmarshalJSON ¶
func (bbt *BlockBlobTier) UnmarshalJSON(b []byte) error
Implementing UnmarshalJSON() method for type BlockBlobTier.
type ByteSlice ¶
type ByteSlice []byte
///////////////////////////////////////////////////////////////////////////////////////////////
type ByteSliceExtension ¶
type ByteSliceExtension struct {
ByteSlice
}
func (ByteSliceExtension) RemoveBOM ¶
func (bs ByteSliceExtension) RemoveBOM() []byte
RemoveBOM removes any BOM from the byte slice
type ByteSlicePooler ¶
type ByteSlicePooler interface { RentSlice(desiredLength int64) []byte ReturnSlice(slice []byte) Prune() }
A pool of byte slices Like sync.Pool, but strongly-typed to byte slices
func NewMultiSizeSlicePool ¶
func NewMultiSizeSlicePool(maxSliceLength int64) ByteSlicePooler
Create new slice pool capable of pooling slices up to maxSliceLength in size
type CPUMonitor ¶
type CPUMonitor interface {
CPUContentionExists() bool
}
func NewCalibratedCpuUsageMonitor ¶
func NewCalibratedCpuUsageMonitor() CPUMonitor
NewCalibratedCpuUsageMonitor should be called early in the app's life cycle, before we are creating any significant CPU load so that it's self-calibration will be accurate
func NewNullCpuMonitor ¶
func NewNullCpuMonitor() CPUMonitor
type CacheLimiter ¶
type CacheLimiter interface { TryAdd(count int64, useRelaxedLimit bool) (added bool) WaitUntilAdd(ctx context.Context, count int64, useRelaxedLimit Predicate) error Remove(count int64) Limit() int64 }
Used to limit the amounts of things. E.g. amount of in-flight data in RAM, to keep it an an acceptable level. Also used for number of open files (since that's limited on Linux). In the case of RAM usage, for downloads, network is producer and disk is consumer, while for uploads the roles are reversed. In either case, if the producer is faster than the consumer, this CacheLimiter is necessary prevent unbounded RAM usage.
func NewCacheLimiter ¶
func NewCacheLimiter(limit int64) CacheLimiter
type ChunkID ¶
type ChunkID struct { Name string // contains filtered or unexported fields }
Identifies a chunk. Always create with NewChunkID
func (ChunkID) IsPseudoChunk ¶
func (ChunkID) OffsetInFile ¶
func (ChunkID) SetCompletionNotificationSent ¶
func (id ChunkID) SetCompletionNotificationSent()
type ChunkReaderSourceFactory ¶
type ChunkReaderSourceFactory func() (CloseableReaderAt, error)
Factory method for data source for singleChunkReader
type ChunkStatusLogger ¶
type ChunkStatusLogger interface { LogChunkStatus(id ChunkID, reason WaitReason) IsWaitingOnFinalBodyReads() bool }
type ChunkStatusLoggerCloser ¶
type ChunkStatusLoggerCloser interface { ChunkStatusLogger GetCounts(td TransferDirection) []chunkStatusCount GetPrimaryPerfConstraint(td TransferDirection, rc RetryCounter) PerfConstraint FlushLog() // not close, because we had issues with writes coming in after this // TODO: see if that issue still exists CloseLogger() }
func NewChunkStatusLogger ¶
func NewChunkStatusLogger(jobID JobID, cpuMon CPUMonitor, logFileFolder string, enableOutput bool) ChunkStatusLoggerCloser
type ChunkedFileWriter ¶
type ChunkedFileWriter interface { // WaitToScheduleChunk blocks until enough RAM is available to handle the given chunk, then it // "reserves" that amount of RAM in the CacheLimiter and returns. WaitToScheduleChunk(ctx context.Context, id ChunkID, chunkSize int64) error // EnqueueChunk hands the given chunkContents over to the ChunkedFileWriter, to be written to disk. // Because ChunkedFileWriter writes sequentially, the actual time of writing is not known to the caller. // All the caller knows, is that responsibility for writing the chunk has been passed to the ChunkedFileWriter. // While any error may be returned immediately, errors are more likely to be returned later, on either a subsequent // call to this routine or on the final return to Flush. // After the chunk is written to disk, its reserved memory byte allocation is automatically subtracted from the CacheLimiter. EnqueueChunk(ctx context.Context, id ChunkID, chunkSize int64, chunkContents io.Reader, retryable bool) error // Flush will block until all the chunks have been written to disk. err will be non-nil if and only in any chunk failed to write. // Flush must be called exactly once, after all chunks have been enqueued with EnqueueChunk. Flush(ctx context.Context) (md5HashOfFileAsWritten []byte, err error) // MaxRetryPerDownloadBody returns the maximum number of retries that will be done for the download of a single chunk body MaxRetryPerDownloadBody() int }
Used to write all the chunks to a disk file
func NewChunkedFileWriter ¶
func NewChunkedFileWriter(ctx context.Context, slicePool ByteSlicePooler, cacheLimiter CacheLimiter, chunkLogger ChunkStatusLogger, file io.WriteCloser, numChunks uint32, maxBodyRetries int, md5ValidationOption HashValidationOption, sourceMd5Exists bool) ChunkedFileWriter
type CloseableReaderAt ¶
Simple aggregation of existing io interfaces
type CompressionType ¶
type CompressionType uint8
func GetCompressionType ¶
func GetCompressionType(contentEncoding string) (CompressionType, error)
func (CompressionType) GZip ¶
func (CompressionType) GZip() CompressionType
func (CompressionType) None ¶
func (CompressionType) None() CompressionType
func (CompressionType) String ¶
func (ct CompressionType) String() string
func (CompressionType) Unsupported ¶
func (CompressionType) Unsupported() CompressionType
func (CompressionType) ZLib ¶
func (CompressionType) ZLib() CompressionType
type CopyJobPartOrderErrorType ¶
type CopyJobPartOrderErrorType string
var ECopyJobPartOrderErrorType CopyJobPartOrderErrorType
func (CopyJobPartOrderErrorType) NoTransfersScheduledErr ¶
func (CopyJobPartOrderErrorType) NoTransfersScheduledErr() CopyJobPartOrderErrorType
type CopyJobPartOrderRequest ¶
type CopyJobPartOrderRequest struct { Version Version // version of azcopy JobID JobID // Guid - job identifier PartNum PartNumber // part number of the job IsFinalPart bool // to determine the final part for a specific job ForceWrite OverwriteOption // to determine if the existing needs to be overwritten or not. If set to true, existing blobs are overwritten ForceIfReadOnly bool // Supplements ForceWrite with addition setting for Azure Files objects with read-only attribute AutoDecompress bool // if true, source data with encodings that represent compression are automatically decompressed when downloading Priority JobPriority // priority of the task FromTo FromTo Fpo FolderPropertyOption // passed in from front-end to ensure that front-end and STE agree on the desired behaviour for the job // list of blobTypes to exclude. ExcludeBlobType []azblob.BlobType SourceRoot ResourceString DestinationRoot ResourceString Transfers Transfers LogLevel LogLevel BlobAttributes BlobTransferAttributes CommandString string // commandString hold the user given command which is logged to the Job log file CredentialInfo CredentialInfo PreserveSMBPermissions PreservePermissionsOption PreserveSMBInfo bool S2SGetPropertiesInBackend bool S2SSourceChangeValidation bool DestLengthValidation bool S2SInvalidMetadataHandleOption InvalidMetadataHandleOption S2SPreserveBlobTags bool CpkOptions CpkOptions }
This struct represents the job info (a single part) to be sent to the storage engine
type CopyJobPartOrderResponse ¶
type CopyJobPartOrderResponse struct { ErrorMsg CopyJobPartOrderErrorType JobStarted bool }
type CopyTransfer ¶
type CopyTransfer struct { Source string Destination string EntityType EntityType LastModifiedTime time.Time //represents the last modified time of source which ensures that source hasn't changed while transferring SourceSize int64 // size of the source entity in bytes. // Properties for service to service copy (some also used in upload or download too) ContentType string ContentEncoding string ContentDisposition string ContentLanguage string CacheControl string ContentMD5 []byte Metadata Metadata // Properties for S2S blob copy BlobType azblob.BlobType BlobTier azblob.AccessTierType BlobVersionID string // Blob index tags categorize data in your storage account utilizing key-value tag attributes BlobTags BlobTags }
This struct represent a single transfer entry with source and destination details ** DO NOT construct directly. Use cmd.storedObject.ToNewCopyTransfer **
type CountPerSecond ¶
type CountPerSecond interface { // Add atomically adds delta to *addr and returns the new value. // To subtract a signed positive constant value c, do Add(^uint64(c-1)). Add(delta uint64) uint64 // Pass 0 to get the current count value LatestRate() float64 Reset() }
CountPerSecond ...
func NewCountPerSecond ¶
func NewCountPerSecond() CountPerSecond
type CpkInfo ¶
type CpkInfo struct { // The algorithm used to produce the encryption key hash. // Currently, the only accepted value is "AES256". // Must be provided if the x-ms-encryption-key header is provided. EncryptionAlgorithm *string // Optional. Specifies the encryption key to use to encrypt the data provided in the request. // If not specified, encryption is performed with the root account encryption key. EncryptionKey *string // The SHA-256 hash of the provided encryption key. // Must be provided if the x-ms-encryption-key header is provided. EncryptionKeySha256 *string }
func GetCpkInfo ¶
type CpkOptions ¶
type CpkOptions struct { // Optional flag to encrypt user data with user provided key. // Key is provide in the REST request itself // Provided key (EncryptionKey and EncryptionKeySHA256) and its hash will be fetched from environment variables // Set EncryptionAlgorithm = "AES256" by default. CpkInfo bool // Key is present in AzureKeyVault and Azure KeyVault is linked with storage account. // Provided key name will be fetched from Azure Key Vault and will be used to encrypt the data CpkScopeInfo string // flag to check if the source is encrypted by user provided key or not. // True only if user wishes to download source encrypted by user provided key IsSourceEncrypted bool }
type CpkScopeInfo ¶
type CpkScopeInfo struct {
EncryptionScope *string
}
CpkScopeInfo specifies the name of the encryption scope to use to encrypt the data provided in the request. If not specified, encryption is performed with the default account encryption scope. For more information, see Encryption at Rest for Azure Storage Services.
func GetCpkScopeInfo ¶
func GetCpkScopeInfo(cpkScopeInfo string) CpkScopeInfo
func (CpkScopeInfo) Marshal ¶
func (csi CpkScopeInfo) Marshal() (string, error)
type CredCache ¶
type CredCache struct {
// contains filtered or unexported fields
}
CredCache manages credential caches. Use keyring in Linux OS. Session keyring is choosed, the session hooks key should be created since user first login (i.e. by pam). So the session is inherited by processes created from login session. When user logout, the session keyring is recycled.
func NewCredCache ¶
func NewCredCache(options CredCacheOptions) *CredCache
NewCredCache creates a cred cache.
func (*CredCache) HasCachedToken ¶
HasCachedToken returns if there is cached token for current executing user.
func (*CredCache) LoadToken ¶
func (c *CredCache) LoadToken() (*OAuthTokenInfo, error)
LoadToken gets the cached oauth token.
func (*CredCache) RemoveCachedToken ¶
RemoveCachedToken deletes the cached token.
func (*CredCache) SaveToken ¶
func (c *CredCache) SaveToken(token OAuthTokenInfo) error
SaveToken saves an oauth token.
type CredCacheInternalIntegration ¶
type CredCacheInternalIntegration struct {
// contains filtered or unexported fields
}
CredCacheInternalIntegration manages credential caches with Gnome keyring. Note: This should be only used for internal integration.
func NewCredCacheInternalIntegration ¶
func NewCredCacheInternalIntegration(options CredCacheOptions) *CredCacheInternalIntegration
NewCredCacheInternalIntegration creates a cred cache.
func (*CredCacheInternalIntegration) HasCachedToken ¶
func (c *CredCacheInternalIntegration) HasCachedToken() (bool, error)
HasCachedToken returns if there is cached token for current executing user.
func (*CredCacheInternalIntegration) LoadToken ¶
func (c *CredCacheInternalIntegration) LoadToken() (*OAuthTokenInfo, error)
LoadToken gets the cached oauth token.
func (*CredCacheInternalIntegration) RemoveCachedToken ¶
func (c *CredCacheInternalIntegration) RemoveCachedToken() error
RemoveCachedToken deletes the cached token.
func (*CredCacheInternalIntegration) SaveToken ¶
func (c *CredCacheInternalIntegration) SaveToken(token OAuthTokenInfo) error
SaveToken saves an oauth token.
type CredCacheOptions ¶
type CredCacheOptions struct { // Used by credCache in Windows. DPAPIFilePath string // Used by credCacheSegmented in Windows, and keyring in Linux. KeyName string // Used by keychain in Mac OS, and gnome keyring in Linux. ServiceName string AccountName string }
CredCacheOptions contains options could be used in different kinds of cred caches in different platform.
type CredentialInfo ¶
type CredentialInfo struct { CredentialType CredentialType OAuthTokenInfo OAuthTokenInfo S3CredentialInfo S3CredentialInfo GCPCredentialInfo GCPCredentialInfo }
CredentialInfo contains essential credential info which need be transited between modules, and used during creating Azure storage client Credential.
type CredentialOpOptions ¶
type CredentialOpOptions struct { LogInfo func(string) LogError func(string) Panic func(error) CallerID string // Used to cancel operations, if fatal error happened during operation. Cancel context.CancelFunc }
CredentialOpOptions contains credential operations' parameters.
type CredentialType ¶
type CredentialType uint8
CredentialType defines the different types of credentials
func (CredentialType) Anonymous ¶
func (CredentialType) Anonymous() CredentialType
func (CredentialType) GoogleAppCredentials ¶
func (CredentialType) GoogleAppCredentials() CredentialType
func (CredentialType) OAuthToken ¶
func (CredentialType) OAuthToken() CredentialType
func (*CredentialType) Parse ¶
func (ct *CredentialType) Parse(s string) error
func (CredentialType) S3AccessKey ¶
func (CredentialType) S3AccessKey() CredentialType
func (CredentialType) S3PublicBucket ¶
func (CredentialType) S3PublicBucket() CredentialType
func (CredentialType) SharedKey ¶
func (CredentialType) SharedKey() CredentialType
func (CredentialType) String ¶
func (ct CredentialType) String() string
func (CredentialType) Unknown ¶
func (CredentialType) Unknown() CredentialType
type DeleteDestination ¶
type DeleteDestination uint32
func (DeleteDestination) False ¶
func (DeleteDestination) False() DeleteDestination
func (*DeleteDestination) Parse ¶
func (dd *DeleteDestination) Parse(s string) error
func (DeleteDestination) Prompt ¶
func (DeleteDestination) Prompt() DeleteDestination
func (DeleteDestination) String ¶
func (dd DeleteDestination) String() string
func (DeleteDestination) True ¶
func (DeleteDestination) True() DeleteDestination
type DeleteSnapshotsOption ¶
type DeleteSnapshotsOption uint8
func (DeleteSnapshotsOption) Include ¶
func (DeleteSnapshotsOption) Include() DeleteSnapshotsOption
func (DeleteSnapshotsOption) None ¶
func (DeleteSnapshotsOption) None() DeleteSnapshotsOption
func (DeleteSnapshotsOption) Only ¶
func (DeleteSnapshotsOption) Only() DeleteSnapshotsOption
func (*DeleteSnapshotsOption) Parse ¶
func (d *DeleteSnapshotsOption) Parse(s string) error
func (DeleteSnapshotsOption) String ¶
func (d DeleteSnapshotsOption) String() string
func (DeleteSnapshotsOption) ToDeleteSnapshotsOptionType ¶
func (d DeleteSnapshotsOption) ToDeleteSnapshotsOptionType() azblob.DeleteSnapshotsOptionType
type EntityType ¶
type EntityType uint8
func (EntityType) File ¶
func (EntityType) File() EntityType
func (EntityType) Folder ¶
func (EntityType) Folder() EntityType
func (EntityType) String ¶
func (e EntityType) String() string
type EnvironmentVariable ¶
func (EnvironmentVariable) AADEndpoint ¶
func (EnvironmentVariable) AADEndpoint() EnvironmentVariable
func (EnvironmentVariable) AWSAccessKeyID ¶
func (EnvironmentVariable) AWSAccessKeyID() EnvironmentVariable
func (EnvironmentVariable) AWSSecretAccessKey ¶
func (EnvironmentVariable) AWSSecretAccessKey() EnvironmentVariable
func (EnvironmentVariable) AccountKey ¶
func (EnvironmentVariable) AccountKey() EnvironmentVariable
func (EnvironmentVariable) AccountName ¶
func (EnvironmentVariable) AccountName() EnvironmentVariable
func (EnvironmentVariable) ApplicationID ¶
func (EnvironmentVariable) ApplicationID() EnvironmentVariable
func (EnvironmentVariable) AutoLoginType ¶
func (EnvironmentVariable) AutoLoginType() EnvironmentVariable
func (EnvironmentVariable) AutoTuneToCpu ¶
func (EnvironmentVariable) AutoTuneToCpu() EnvironmentVariable
added in so that CPU usage detection can be disabled if advanced users feel it is causing tuning to be too conservative (i.e. not enough concurrency, due to detected CPU usage)
func (EnvironmentVariable) AwsSessionToken ¶
func (EnvironmentVariable) AwsSessionToken() EnvironmentVariable
AwsSessionToken is temporarily internally reserved, and not exposed to users.
func (EnvironmentVariable) BufferGB ¶
func (EnvironmentVariable) BufferGB() EnvironmentVariable
func (EnvironmentVariable) CPKEncryptionKey ¶
func (EnvironmentVariable) CPKEncryptionKey() EnvironmentVariable
func (EnvironmentVariable) CPKEncryptionKeySHA256 ¶
func (EnvironmentVariable) CPKEncryptionKeySHA256() EnvironmentVariable
func (EnvironmentVariable) CacheProxyLookup ¶
func (EnvironmentVariable) CacheProxyLookup() EnvironmentVariable
func (EnvironmentVariable) CertificatePassword ¶
func (EnvironmentVariable) CertificatePassword() EnvironmentVariable
func (EnvironmentVariable) CertificatePath ¶
func (EnvironmentVariable) CertificatePath() EnvironmentVariable
func (EnvironmentVariable) ClientSecret ¶
func (EnvironmentVariable) ClientSecret() EnvironmentVariable
func (EnvironmentVariable) ConcurrencyValue ¶
func (EnvironmentVariable) ConcurrencyValue() EnvironmentVariable
func (EnvironmentVariable) CredentialType ¶
func (EnvironmentVariable) CredentialType() EnvironmentVariable
CredentialType is only used for internal integration.
func (EnvironmentVariable) DefaultServiceApiVersion ¶
func (EnvironmentVariable) DefaultServiceApiVersion() EnvironmentVariable
func (EnvironmentVariable) DisableHierarchicalScanning ¶
func (EnvironmentVariable) DisableHierarchicalScanning() EnvironmentVariable
func (EnvironmentVariable) DisableSyslog ¶
func (EnvironmentVariable) DisableSyslog() EnvironmentVariable
func (EnvironmentVariable) EnumerationPoolSize ¶
func (EnvironmentVariable) EnumerationPoolSize() EnvironmentVariable
func (EnvironmentVariable) GoogleAppCredentials ¶
func (EnvironmentVariable) GoogleAppCredentials() EnvironmentVariable
func (EnvironmentVariable) GoogleCloudProject ¶
func (EnvironmentVariable) GoogleCloudProject() EnvironmentVariable
func (EnvironmentVariable) JobPlanLocation ¶
func (EnvironmentVariable) JobPlanLocation() EnvironmentVariable
func (EnvironmentVariable) LogLocation ¶
func (EnvironmentVariable) LogLocation() EnvironmentVariable
func (EnvironmentVariable) ManagedIdentityClientID ¶
func (EnvironmentVariable) ManagedIdentityClientID() EnvironmentVariable
For MSI login
func (EnvironmentVariable) ManagedIdentityObjectID ¶
func (EnvironmentVariable) ManagedIdentityObjectID() EnvironmentVariable
func (EnvironmentVariable) ManagedIdentityResourceString ¶
func (EnvironmentVariable) ManagedIdentityResourceString() EnvironmentVariable
func (EnvironmentVariable) MimeMapping ¶
func (EnvironmentVariable) MimeMapping() EnvironmentVariable
func (EnvironmentVariable) OAuthTokenInfo ¶
func (EnvironmentVariable) OAuthTokenInfo() EnvironmentVariable
OAuthTokenInfo is only used for internal integration.
func (EnvironmentVariable) OptimizeSparsePageBlobTransfers ¶
func (EnvironmentVariable) OptimizeSparsePageBlobTransfers() EnvironmentVariable
func (EnvironmentVariable) PacePageBlobs ¶
func (EnvironmentVariable) PacePageBlobs() EnvironmentVariable
func (EnvironmentVariable) ParallelStatFiles ¶
func (EnvironmentVariable) ParallelStatFiles() EnvironmentVariable
func (EnvironmentVariable) ProfileCPU ¶
func (EnvironmentVariable) ProfileCPU() EnvironmentVariable
func (EnvironmentVariable) ProfileMemory ¶
func (EnvironmentVariable) ProfileMemory() EnvironmentVariable
func (EnvironmentVariable) RequestTryTimeout ¶
func (EnvironmentVariable) RequestTryTimeout() EnvironmentVariable
func (EnvironmentVariable) ShowPerfStates ¶
func (EnvironmentVariable) ShowPerfStates() EnvironmentVariable
func (EnvironmentVariable) TenantID ¶
func (EnvironmentVariable) TenantID() EnvironmentVariable
func (EnvironmentVariable) TransferInitiationPoolSize ¶
func (EnvironmentVariable) TransferInitiationPoolSize() EnvironmentVariable
func (EnvironmentVariable) UserAgentPrefix ¶
func (EnvironmentVariable) UserAgentPrefix() EnvironmentVariable
func (EnvironmentVariable) UserDir ¶
func (EnvironmentVariable) UserDir() EnvironmentVariable
type ExclusiveStringMap ¶
type ExclusiveStringMap struct {
// contains filtered or unexported fields
}
func NewExclusiveStringMap ¶
func NewExclusiveStringMap(fromTo FromTo, goos string) *ExclusiveStringMap
func (*ExclusiveStringMap) Add ¶
func (e *ExclusiveStringMap) Add(key string) error
Add succeeds if and only if key is not currently in the map
func (*ExclusiveStringMap) Remove ¶
func (e *ExclusiveStringMap) Remove(key string)
type ExitCode ¶
type ExitCode uint32
type FileURLPartsExtension ¶
type FileURLPartsExtension struct {
azfile.FileURLParts
}
///////////////////////////////////////////////////////////////////////////////////////////////
func (FileURLPartsExtension) GetServiceURL ¶
func (parts FileURLPartsExtension) GetServiceURL() url.URL
func (FileURLPartsExtension) GetShareURL ¶
func (parts FileURLPartsExtension) GetShareURL() url.URL
type FolderCreationTracker ¶
type FolderCreationTracker interface { RecordCreation(folder string) ShouldSetProperties(folder string, overwrite OverwriteOption, prompter Prompter) bool StopTracking(folder string) }
folderCreationTracker is used to ensure than in an overwrite=false situation we only set folder properties on folders which were created by the current job. (To be consistent with the fact that when overwrite == false, we only set file properties on files created by the current job)
type FolderDeletionFunc ¶
folderDeletionFunc should delete the folder IF IT IS EMPTY, and return true. If it is not empty, false must be returned. FolderDeletionManager is allowed to call this on a folder that is not yet empty. In that case, FolderDeletionManager may call it again later. Errors are not returned because of the delay to when deletion might happen, so it's up to the func to do its own logging
type FolderDeletionManager ¶
type FolderDeletionManager interface { // RecordChildExists takes a child name and counts it against the child's immediate parent // Should be called for both types of child: folders and files. // Only counts it against the immediate parent (that's all that's necessary, because we recurse in tryDeletion) RecordChildExists(childFileOrFolder *url.URL) // RecordChildDelete records that a file, previously passed to RecordChildExists, has now been deleted // Only call for files, not folders RecordChildDeleted(childFile *url.URL) // RequestDeletion registers a function that will be called to delete the given folder, when that // folder has no more known children. May be called before, after or during the time that // the folder's children are being passed to RecordChildExists and RecordChildDeleted // // Warning: only pass in deletionFuncs that will do nothing and return FALSE if the // folder is not yet empty. If they return false, they may be called again later. RequestDeletion(folder *url.URL, deletionFunc FolderDeletionFunc) }
FolderDeletionManager handles the fact that (in most locations) we can't delete folders that still contain files. So it allows us to request deletion of a folder, and have that be attempted after the last file is removed. Note that maybe the apparent last file isn't the last (e.g. there are other files, still to be deleted, in future job parts), in which case any failed deletion will be retried if there's a new "candidate last child" removed. Takes URLs rather than strings because that ensures correct (un)escaping, and makes it clear that we don't support Windows & MacOS local paths (which have cases insensitivity that we don't support here).
func NewFolderDeletionManager ¶
func NewFolderDeletionManager(ctx context.Context, fpo FolderPropertyOption, logger ILogger) FolderDeletionManager
type FolderPropertyOption ¶
type FolderPropertyOption uint8
FolderPropertyOption controls which folders get their properties recorded in the Plan file
func (FolderPropertyOption) AllFolders ¶
func (FolderPropertyOption) AllFolders() FolderPropertyOption
func (FolderPropertyOption) AllFoldersExceptRoot ¶
func (FolderPropertyOption) AllFoldersExceptRoot() FolderPropertyOption
func (FolderPropertyOption) NoFolders ¶
func (FolderPropertyOption) NoFolders() FolderPropertyOption
func (FolderPropertyOption) Unspecified ¶
func (FolderPropertyOption) Unspecified() FolderPropertyOption
no FPO has been selected. Make sure the zero-like value is "unspecified" so that we detect any code paths that that do not nominate any FPO
type FromTo ¶
type FromTo uint16
FromTo defines the different types of sources/destination location combinations FromTo is 16 bit where first 8 bit represents the from location and other 8 bits represents the to location
func (*FromTo) AreBothFolderAware ¶
func (FromTo) BenchmarkBlob ¶
todo: to we really want these? Starts to look like a bit of a combinatorial explosion
func (FromTo) BenchmarkBlobFS ¶
func (FromTo) BenchmarkFile ¶
func (FromTo) BlobFSLocal ¶
func (FromTo) BlobFSTrash ¶
func (*FromTo) IsDownload ¶
func (FromTo) LocalBlobFS ¶
type GCPClientFactory ¶
type GCPClientFactory struct {
// contains filtered or unexported fields
}
func NewGCPClientFactory ¶
func NewGCPClientFactory() GCPClientFactory
func (*GCPClientFactory) GetGCPClient ¶
func (f *GCPClientFactory) GetGCPClient(ctx context.Context, credInfo CredentialInfo, option CredentialOpOptions) (*gcpUtils.Client, error)
type GCPCredentialInfo ¶
type GCPCredentialInfo struct { }
type GCPObjectInfoExtension ¶
type GCPObjectInfoExtension struct {
ObjectInfo gcpUtils.ObjectAttrs
}
func (*GCPObjectInfoExtension) CacheControl ¶
func (gie *GCPObjectInfoExtension) CacheControl() string
func (*GCPObjectInfoExtension) ContentDisposition ¶
func (gie *GCPObjectInfoExtension) ContentDisposition() string
func (*GCPObjectInfoExtension) ContentEncoding ¶
func (gie *GCPObjectInfoExtension) ContentEncoding() string
func (*GCPObjectInfoExtension) ContentLanguage ¶
func (gie *GCPObjectInfoExtension) ContentLanguage() string
func (*GCPObjectInfoExtension) ContentMD5 ¶
func (gie *GCPObjectInfoExtension) ContentMD5() []byte
func (*GCPObjectInfoExtension) ContentType ¶
func (gie *GCPObjectInfoExtension) ContentType() string
func (*GCPObjectInfoExtension) NewCommonMetadata ¶
func (gie *GCPObjectInfoExtension) NewCommonMetadata() Metadata
NewCommonMetadata returns a map of user-defined key/value pairs
type GCPURLParts ¶
type GCPURLParts struct { Scheme string Host string BucketName string ObjectKey string UnparsedParams string }
GCPURLParts structure is used to parse and hold the different components of GCP Object/Service/Bucket URL
func NewGCPURLParts ¶
func NewGCPURLParts(u url.URL) (GCPURLParts, error)
NewGCPURLParts processes the given URL and returns a valid GCPURLParts structure that contains all the necessary components.
func (*GCPURLParts) IsBucketSyntactically ¶
func (gUrl *GCPURLParts) IsBucketSyntactically() bool
func (*GCPURLParts) IsDirectorySyntactically ¶
func (gUrl *GCPURLParts) IsDirectorySyntactically() bool
IsDirectorySyntactically returns true if the given GCPURLParts points to a directory or not based on the path.
func (*GCPURLParts) IsObjectSyntactically ¶
func (gUrl *GCPURLParts) IsObjectSyntactically() bool
func (*GCPURLParts) IsServiceSyntactically ¶
func (gUrl *GCPURLParts) IsServiceSyntactically() bool
func (*GCPURLParts) String ¶
func (gUrl *GCPURLParts) String() string
func (*GCPURLParts) URL ¶
func (gUrl *GCPURLParts) URL() url.URL
URL returns a valid net/url.URL object initialised from the components of GCP URL
type GenericResourceURLParts ¶
type GenericResourceURLParts struct {
// contains filtered or unexported fields
}
GenericResourceURLParts is intended to be a generic solution to code duplication when using *URLParts TODO: Use this to reduce code dupe in the cca.Source and jobPartOrder.Source setups Currently this just contains generic functions for what we *need*. This isn't an overarching, perfect implementation. The above suggestion would be preferable to continuing to expand this (due to 4x code dupe for every function)-- it's just a bridge over a LARGE gap for now.
func NewGenericResourceURLParts ¶
func NewGenericResourceURLParts(resourceURL url.URL, location Location) GenericResourceURLParts
func (GenericResourceURLParts) GetContainerName ¶
func (g GenericResourceURLParts) GetContainerName() string
func (GenericResourceURLParts) GetObjectName ¶
func (g GenericResourceURLParts) GetObjectName() string
func (*GenericResourceURLParts) SetObjectName ¶
func (g *GenericResourceURLParts) SetObjectName(objectName string)
func (GenericResourceURLParts) String ¶
func (g GenericResourceURLParts) String() string
func (GenericResourceURLParts) URL ¶
func (g GenericResourceURLParts) URL() url.URL
type GetJobFromToRequest ¶
type GetJobFromToRequest struct {
JobID JobID
}
GetJobFromToRequest indicates request to get job's FromTo info from job part plan header
type GetJobFromToResponse ¶
GetJobFromToResponse indicates response to get job's FromTo info.
type HTTPResponseExtension ¶
///////////////////////////////////////////////////////////////////////////////////////////////
func (HTTPResponseExtension) IsSuccessStatusCode ¶
func (r HTTPResponseExtension) IsSuccessStatusCode(successStatusCodes ...int) bool
IsSuccessStatusCode checks if response's status code is contained in specified success status codes.
type HashValidationOption ¶
type HashValidationOption uint8
func (HashValidationOption) FailIfDifferent ¶
func (HashValidationOption) FailIfDifferent() HashValidationOption
FailIfDifferent says fail if hashes different, but NOT fail if saved hash is totally missing. This is a balance of convenience (for cases where no hash is saved) vs strictness (to validate strictly when one is present)
func (HashValidationOption) FailIfDifferentOrMissing ¶
func (HashValidationOption) FailIfDifferentOrMissing() HashValidationOption
FailIfDifferentOrMissing is the strictest option, and useful for testing or validation in cases when we _know_ there should be a hash
func (HashValidationOption) LogOnly ¶
func (HashValidationOption) LogOnly() HashValidationOption
LogOnly means only log if missing or different, don't fail the transfer
func (HashValidationOption) MarshalJSON ¶
func (hvo HashValidationOption) MarshalJSON() ([]byte, error)
func (HashValidationOption) NoCheck ¶
func (HashValidationOption) NoCheck() HashValidationOption
Do not check hashes at download time at all
func (*HashValidationOption) Parse ¶
func (hvo *HashValidationOption) Parse(s string) error
func (HashValidationOption) String ¶
func (hvo HashValidationOption) String() string
func (*HashValidationOption) UnmarshalJSON ¶
func (hvo *HashValidationOption) UnmarshalJSON(b []byte) error
type ILoggerCloser ¶
type ILoggerCloser interface { ILogger CloseLog() }
func NewAppLogger ¶
func NewAppLogger(minimumLevelToLog pipeline.LogLevel, logFileFolder string) ILoggerCloser
type ILoggerResetable ¶
type ILoggerResetable interface { OpenLog() MinimumLogLevel() pipeline.LogLevel ChangeLogLevel(pipeline.LogLevel) ILoggerCloser }
var AzcopyCurrentJobLogger ILoggerResetable
func NewJobLogger ¶
func NewJobLogger(jobID JobID, minimumLevelToLog LogLevel, logFileFolder string, logFileNameSuffix string) ILoggerResetable
type IdentityInfo ¶
type IdentityInfo struct { ClientID string `json:"_identity_client_id"` ObjectID string `json:"_identity_object_id"` MSIResID string `json:"_identity_msi_res_id"` }
IdentityInfo contains info for MSI.
func (*IdentityInfo) Validate ¶
func (identityInfo *IdentityInfo) Validate() error
Validate validates identity info, at most only one of clientID, objectID or MSI resource ID could be set.
type InitMsgJsonTemplate ¶
type InvalidMetadataHandleOption ¶
type InvalidMetadataHandleOption uint8
func (InvalidMetadataHandleOption) ExcludeIfInvalid ¶
func (InvalidMetadataHandleOption) ExcludeIfInvalid() InvalidMetadataHandleOption
ExcludeIfInvalid indicates whenever invalid metadata key is found, exclude the specific metadata with WARNING logged.
func (InvalidMetadataHandleOption) FailIfInvalid ¶
func (InvalidMetadataHandleOption) FailIfInvalid() InvalidMetadataHandleOption
FailIfInvalid indicates whenever invalid metadata key is found, directly fail the transfer.
func (InvalidMetadataHandleOption) MarshalJSON ¶
func (i InvalidMetadataHandleOption) MarshalJSON() ([]byte, error)
func (*InvalidMetadataHandleOption) Parse ¶
func (i *InvalidMetadataHandleOption) Parse(s string) error
func (InvalidMetadataHandleOption) RenameIfInvalid ¶
func (InvalidMetadataHandleOption) RenameIfInvalid() InvalidMetadataHandleOption
RenameIfInvalid indicates whenever invalid metadata key is found, rename the metadata key and save the metadata with renamed key.
func (InvalidMetadataHandleOption) String ¶
func (i InvalidMetadataHandleOption) String() string
func (*InvalidMetadataHandleOption) UnmarshalJSON ¶
func (i *InvalidMetadataHandleOption) UnmarshalJSON(b []byte) error
type JobID ¶
type JobID UUID
func ParseJobID ¶
func (JobID) MarshalJSON ¶
Implementing MarshalJSON() method for type JobID
func (*JobID) UnmarshalJSON ¶
Implementing UnmarshalJSON() method for type JobID
type JobIDDetails ¶
type JobPriority ¶
type JobPriority uint8
JobPriority defines the transfer priorities supported by the Storage Transfer Engine's channels The default priority is Normal
func (JobPriority) Low ¶
func (JobPriority) Low() JobPriority
func (JobPriority) Normal ¶
func (JobPriority) Normal() JobPriority
func (JobPriority) String ¶
func (jp JobPriority) String() string
type JobStatus ¶
type JobStatus uint32 // Must be 32-bit for atomic operations
JobStatus indicates the status of a Job; the default is InProgress.
func (*JobStatus) AtomicLoad ¶
func (*JobStatus) AtomicStore ¶
func (JobStatus) Cancelling ¶
func (JobStatus) CompletedWithErrors ¶
func (JobStatus) CompletedWithErrorsAndSkipped ¶
func (JobStatus) CompletedWithSkipped ¶
func (*JobStatus) EnhanceJobStatusInfo ¶
func (JobStatus) InProgress ¶
func (JobStatus) MarshalJSON ¶
Implementing MarshalJSON() method for type JobStatus
func (*JobStatus) UnmarshalJSON ¶
Implementing UnmarshalJSON() method for type JobStatus
type JsonOutputTemplate ¶
type JsonOutputTemplate struct { TimeStamp time.Time MessageType string MessageContent string // a simple string for INFO and ERROR, a serialized JSON for INIT, PROGRESS, EXIT PromptDetails PromptDetails }
defines the general output template when the format is set to json
type LifecycleMgr ¶
type LifecycleMgr interface { Init(OutputBuilder) // let the user know the job has started and initial information like log location Progress(OutputBuilder) // print on the same line over and over again, not allowed to float up Exit(OutputBuilder, ExitCode) // indicates successful execution exit after printing, allow user to specify exit code Info(string) // simple print, allowed to float up Dryrun(OutputBuilder) // print files for dry run mode Error(string) // indicates fatal error, exit after printing, exit code is always Failed (1) Prompt(message string, details PromptDetails) ResponseOption // ask the user a question(after erasing the progress), then return the response SurrenderControl() // give up control, this should never return InitiateProgressReporting(WorkController) // start writing progress with another routine AllowReinitiateProgressReporting() // allow re-initiation of progress reporting for followup job GetEnvironmentVariable(EnvironmentVariable) string // get the environment variable or its default value ClearEnvironmentVariable(EnvironmentVariable) // clears the environment variable SetOutputFormat(OutputFormat) // change the output format of the entire application EnableInputWatcher() // depending on the command, we may allow user to give input through Stdin EnableCancelFromStdIn() // allow user to send in `cancel` to stop the job AddUserAgentPrefix(string) string // append the global user agent prefix, if applicable E2EAwaitContinue() // used by E2E tests E2EAwaitAllowOpenFiles() // used by E2E tests E2EEnableAwaitAllowOpenFiles(enable bool) // used by E2E tests RegisterCloseFunc(func()) SetForceLogging() IsForceLoggingDisabled() bool }
create a public interface so that consumers outside of this package can refer to the lifecycle manager but they would not be able to instantiate one
func GetLifecycleMgr ¶
func GetLifecycleMgr() LifecycleMgr
type ListContainerResponse ¶
type ListContainerResponse struct {
Blobs []string
}
ListContainerResponse represents the list of blobs within the container.
type ListJobSummaryResponse ¶
type ListJobSummaryResponse struct { ErrorMsg string Timestamp time.Time `json:"-"` JobID JobID // TODO: added for debugging purpose. remove later ActiveConnections int64 `json:",string"` // CompleteJobOrdered determines whether the Job has been completely ordered or not CompleteJobOrdered bool JobStatus JobStatus TotalTransfers uint32 `json:",string"` // = FileTransfers + FolderPropertyTransfers. It also = TransfersCompleted + TransfersFailed + TransfersSkipped // FileTransfers and FolderPropertyTransfers just break the total down into the two types. // The name FolderPropertyTransfers is used to emphasise that is is only counting transferring the properties and existence of // folders. A "folder property transfer" does not include any files that may be in the folder. Those are counted as // FileTransfers. FileTransfers uint32 `json:",string"` FolderPropertyTransfers uint32 `json:",string"` TransfersCompleted uint32 `json:",string"` TransfersFailed uint32 `json:",string"` TransfersSkipped uint32 `json:",string"` // includes bytes sent in retries (i.e. has double counting, if there are retries) and in failed transfers BytesOverWire uint64 `json:",string"` // does not include failed transfers or bytes sent in retries (i.e. no double counting). Includes successful transfers and transfers in progress TotalBytesTransferred uint64 `json:",string"` // sum of the total transfer enumerated so far. TotalBytesEnumerated uint64 `json:",string"` // sum of total bytes expected in the job (i.e. based on our current expectation of which files will be successful) TotalBytesExpected uint64 `json:",string"` PercentComplete float32 `json:",string"` // Stats measured from the network pipeline // Values are all-time values, for the duration of the job. // Will be zero if read outside the process running the job (e.g. with 'jobs show' command) AverageIOPS int `json:",string"` AverageE2EMilliseconds int `json:",string"` ServerBusyPercentage float32 `json:",string"` NetworkErrorPercentage float32 `json:",string"` FailedTransfers []TransferDetail SkippedTransfers []TransferDetail PerfConstraint PerfConstraint PerfStrings []string `json:"-"` PerformanceAdvice []PerformanceAdvice IsCleanupJob bool }
represents the JobProgressPercentage Summary response for list command when requested the Job Progress Summary for given JobId
type ListJobTransfersRequest ¶
type ListJobTransfersRequest struct { JobID JobID OfStatus TransferStatus }
type ListJobTransfersResponse ¶
type ListJobTransfersResponse struct { ErrorMsg string JobID JobID Details []TransferDetail }
represents the list of Details and details of number of transfers
type ListJobsResponse ¶
type ListJobsResponse struct { ErrorMessage string JobIDDetails []JobIDDetails }
ListJobsResponse represent the Job with JobId and
type ListOfFiles ¶
type ListOfFiles struct {
Files []string
}
this struct is used to parse the contents of file passed with list-of-files flag.
type ListRequest ¶
type ListRequest struct { JobID JobID OfStatus string // TODO: OfStatus with string type sounds not good, change it to enum Output OutputFormat }
represents the raw list command input from the user when requested the list of transfer with given status for given JobId
type ListSyncJobSummaryResponse ¶
type ListSyncJobSummaryResponse struct { ListJobSummaryResponse DeleteTotalTransfers uint32 `json:",string"` DeleteTransfersCompleted uint32 `json:",string"` }
wraps the standard ListJobSummaryResponse with sync-specific stats
type Location ¶
type Location uint8
Location indicates the type of Location
func (Location) AllStandardLocations ¶
AllStandardLocations returns all locations that are "normal" for testing purposes. Excludes the likes of Unknown, Benchmark and Pipe
func (Location) IsFolderAware ¶
IsFolderAware returns true if the location has real folders (e.g. there's such a thing as an empty folder, and folders may have properties). Folders are only virtual, and so not real, in Blob Storage.
type MMF ¶
type MMF struct {
// contains filtered or unexported fields
}
type Metadata ¶
Metadata used in AzCopy.
func FromAzBlobMetadataToCommonMetadata ¶
FromAzBlobMetadataToCommonMetadata converts azblob's metadata to common metadata.
func FromAzFileMetadataToCommonMetadata ¶
FromAzFileMetadataToCommonMetadata converts azfile's metadata to common metadata.
func UnMarshalToCommonMetadata ¶
UnMarshalToCommonMetadata unmarshals string to common metadata.
func (Metadata) ConcatenatedKeys ¶
func (Metadata) ExcludeInvalidKey ¶
func (Metadata) ResolveInvalidKey ¶
ResolveInvalidKey resolves invalid metadata key with following steps: 1. replace all invalid char(i.e. ASCII chars expect [0-9A-Za-z_]) with '_' 2. add 'rename_' as prefix for the new valid key, this key will be used to save original metadata's value. 3. add 'rename_key_' as prefix for the new valid key, this key will be used to save original metadata's invalid key. Example, given invalid metadata for Azure: '123-invalid':'content', it will be resolved as two new k:v pairs: 'rename_123_invalid':'content' 'rename_key_123_invalid':'123-invalid' So user can try to recover the metadata in Azure side. Note: To keep first version simple, whenever collision is found during key resolving, error will be returned. This can be further improved once any user feedback get.
func (Metadata) ToAzBlobMetadata ¶
ToAzBlobMetadata converts metadata to azblob's metadata.
func (Metadata) ToAzFileMetadata ¶
ToAzFileMetadata converts metadata to azfile's metadata.
type NoCopy ¶
type NoCopy struct {
// contains filtered or unexported fields
}
The NoCopy struct is used as a field inside another struct that should not be copied by value. After embedded this field, the out struct's members can call the Check method which will panic if it detects the out struct has been copied by value.
type OAuthTokenInfo ¶
type OAuthTokenInfo struct { adal.Token Tenant string `json:"_tenant"` ActiveDirectoryEndpoint string `json:"_ad_endpoint"` TokenRefreshSource string `json:"_token_refresh_source"` ApplicationID string `json:"_application_id"` Identity bool `json:"_identity"` IdentityInfo IdentityInfo ServicePrincipalName bool `json:"_spn"` SPNInfo SPNInfo // Note: ClientID should be only used for internal integrations through env var with refresh token. // It indicates the Application ID assigned to your app when you registered it with Azure AD. // In this case AzCopy refresh token on behalf of caller. // For more details, please refer to // https://docs.microsoft.com/en-us/azure/active-directory/develop/v1-protocols-oauth-code#refreshing-the-access-tokens ClientID string `json:"_client_id"` }
OAuthTokenInfo contains info necessary for refresh OAuth credentials.
func (*OAuthTokenInfo) GetNewTokenFromCert ¶
GetNewTokenFromCert refreshes a token manually from a certificate.
func (*OAuthTokenInfo) GetNewTokenFromMSI ¶
GetNewTokenFromMSI gets token from Azure Instance Metadata Service identity endpoint. It first checks if the VM is registered with Azure Arc. Failing that case, it checks if it is an Azure VM. For details, please refer to https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview Note: Currently the msiTokenHTTPClient timeout is configured for 30 secs. Should be reduced to 5 sec as IMDS endpoint is local to the machine. Without this change, if some router is configured to not return "ICMP unreachable" then it will take 30 secs to timeout and fallback to ARC. For now, this has been mitigated by checking Arc first, and then Azure
func (*OAuthTokenInfo) GetNewTokenFromSecret ¶
GetNewTokenFromSecret is a refresh shell for secretLoginNoUOTM
func (*OAuthTokenInfo) GetNewTokenFromTokenStore ¶
GetNewTokenFromTokenStore gets token from token store. (Credential Manager in Windows, keyring in Linux and keychain in MacOS.) Note: This approach should only be used in internal integrations.
func (OAuthTokenInfo) IsEmpty ¶
func (credInfo OAuthTokenInfo) IsEmpty() bool
IsEmpty returns if current OAuthTokenInfo is empty and doesn't contain any useful info.
func (*OAuthTokenInfo) RefreshTokenWithUserCredential ¶
func (credInfo *OAuthTokenInfo) RefreshTokenWithUserCredential(ctx context.Context) (*adal.Token, error)
RefreshTokenWithUserCredential gets new token with user credential through refresh.
type ObjectInfoExtension ¶
type ObjectInfoExtension struct {
ObjectInfo minio.ObjectInfo
}
func (*ObjectInfoExtension) CacheControl ¶
func (oie *ObjectInfoExtension) CacheControl() string
CacheControl returns the value for header Cache-Control.
func (*ObjectInfoExtension) ContentDisposition ¶
func (oie *ObjectInfoExtension) ContentDisposition() string
ContentDisposition returns the value for header Content-Disposition.
func (*ObjectInfoExtension) ContentEncoding ¶
func (oie *ObjectInfoExtension) ContentEncoding() string
ContentEncoding returns the value for header Content-Encoding.
func (*ObjectInfoExtension) ContentLanguage ¶
func (oie *ObjectInfoExtension) ContentLanguage() string
ContentLanguage returns the value for header Content-Language.
func (*ObjectInfoExtension) ContentMD5 ¶
func (oie *ObjectInfoExtension) ContentMD5() []byte
ContentMD5 returns the value for header Content-MD5.
func (*ObjectInfoExtension) ContentType ¶
func (oie *ObjectInfoExtension) ContentType() string
func (*ObjectInfoExtension) NewCommonMetadata ¶
func (oie *ObjectInfoExtension) NewCommonMetadata() Metadata
NewMetadata returns user-defined key/value pairs.
type OutputBuilder ¶
type OutputBuilder func(OutputFormat) string
used for output types that are not simple strings, such as progress and init a given format(text,json) is passed in, and the appropriate string is returned
func GetStandardInitOutputBuilder ¶
func GetStandardInitOutputBuilder(jobID string, logFileLocation string, isCleanupJob bool, cleanupMessage string) OutputBuilder
type OutputFormat ¶
type OutputFormat uint32
func (OutputFormat) Json ¶
func (OutputFormat) Json() OutputFormat
func (OutputFormat) None ¶
func (OutputFormat) None() OutputFormat
func (*OutputFormat) Parse ¶
func (of *OutputFormat) Parse(s string) error
func (OutputFormat) String ¶
func (of OutputFormat) String() string
func (OutputFormat) Text ¶
func (OutputFormat) Text() OutputFormat
type OverwriteOption ¶
type OverwriteOption uint8
func (OverwriteOption) False ¶
func (OverwriteOption) False() OverwriteOption
func (OverwriteOption) IfSourceNewer ¶
func (OverwriteOption) IfSourceNewer() OverwriteOption
func (*OverwriteOption) Parse ¶
func (o *OverwriteOption) Parse(s string) error
func (OverwriteOption) Prompt ¶
func (OverwriteOption) Prompt() OverwriteOption
func (OverwriteOption) String ¶
func (o OverwriteOption) String() string
func (OverwriteOption) True ¶
func (OverwriteOption) True() OverwriteOption
type PageBlobTier ¶
type PageBlobTier uint8
func (PageBlobTier) MarshalJSON ¶
func (pbt PageBlobTier) MarshalJSON() ([]byte, error)
func (PageBlobTier) None ¶
func (PageBlobTier) None() PageBlobTier
func (PageBlobTier) P10 ¶
func (PageBlobTier) P10() PageBlobTier
func (PageBlobTier) P15 ¶
func (PageBlobTier) P15() PageBlobTier
func (PageBlobTier) P20 ¶
func (PageBlobTier) P20() PageBlobTier
func (PageBlobTier) P30 ¶
func (PageBlobTier) P30() PageBlobTier
func (PageBlobTier) P4 ¶
func (PageBlobTier) P4() PageBlobTier
func (PageBlobTier) P40 ¶
func (PageBlobTier) P40() PageBlobTier
func (PageBlobTier) P50 ¶
func (PageBlobTier) P50() PageBlobTier
func (PageBlobTier) P6 ¶
func (PageBlobTier) P6() PageBlobTier
func (*PageBlobTier) Parse ¶
func (pbt *PageBlobTier) Parse(s string) error
func (PageBlobTier) String ¶
func (pbt PageBlobTier) String() string
func (PageBlobTier) ToAccessTierType ¶
func (pbt PageBlobTier) ToAccessTierType() azblob.AccessTierType
func (*PageBlobTier) UnmarshalJSON ¶
func (pbt *PageBlobTier) UnmarshalJSON(b []byte) error
Implementing UnmarshalJSON() method for type BlockBlobTier.
type PartNumber ¶
type PartNumber uint32
type PerfConstraint ¶
type PerfConstraint int32
func (PerfConstraint) CPU ¶
func (PerfConstraint) CPU() PerfConstraint
func (PerfConstraint) Disk ¶
func (PerfConstraint) Disk() PerfConstraint
func (PerfConstraint) PageBlobService ¶
func (PerfConstraint) PageBlobService() PerfConstraint
func (*PerfConstraint) Parse ¶
func (pc *PerfConstraint) Parse(s string) error
func (PerfConstraint) Service ¶
func (PerfConstraint) Service() PerfConstraint
func (PerfConstraint) String ¶
func (pc PerfConstraint) String() string
func (PerfConstraint) Unknown ¶
func (PerfConstraint) Unknown() PerfConstraint
type PerformanceAdvice ¶
type PerformanceAdvice struct { // Code representing the type of the advice Code string `json:"Code"` // reminder that PerformanceAdvice may be serialized in JSON output // Human-friendly title (directly corresponds to Code, but more readable) Title string // Reason why this advice has been given Reason string // Is this the primary advice (used to distinguish most important advice in cases where multiple advice objects are returned) PriorityAdvice bool }
type PreservePermissionsOption ¶
type PreservePermissionsOption uint8
func NewPreservePermissionsOption ¶
func NewPreservePermissionsOption(preserve, includeOwnership bool, fromTo FromTo) PreservePermissionsOption
func (PreservePermissionsOption) ACLsOnly ¶
func (PreservePermissionsOption) ACLsOnly() PreservePermissionsOption
func (PreservePermissionsOption) IsTruthy ¶
func (p PreservePermissionsOption) IsTruthy() bool
func (PreservePermissionsOption) None ¶
func (PreservePermissionsOption) None() PreservePermissionsOption
func (PreservePermissionsOption) OwnershipAndACLs ¶
func (PreservePermissionsOption) OwnershipAndACLs() PreservePermissionsOption
type PrologueState ¶
type PrologueState struct { // Leading bytes are the early bytes of the file, to be used // for mime-type detection (or nil if file is empty or the bytes code // not be read). LeadingBytes []byte }
PrologueState contains info necessary for different sending operations' prologue.
func (PrologueState) GetInferredContentType ¶
func (ps PrologueState) GetInferredContentType(jptm cutdownJptm) string
type PromptDetails ¶
type PromptDetails struct { PromptType PromptType ResponseOptions []ResponseOption // used from prompt messages where we expect a response PromptTarget string // used when prompt message is targeting a specific resource, ease partner team integration }
type PromptType ¶
type PromptType string
func (PromptType) Cancel ¶
func (PromptType) Cancel() PromptType
func (PromptType) DeleteDestination ¶
func (PromptType) DeleteDestination() PromptType
func (PromptType) Overwrite ¶
func (PromptType) Overwrite() PromptType
type Prompter ¶
type Prompter interface {
ShouldOverwrite(objectPath string, objectType EntityType) bool
}
type ProxyLookupFunc ¶
type ProxyLookupFunc func(req *http.Request) (*url.URL, error) // signature of normal Transport.Proxy lookup
var GlobalProxyLookup ProxyLookupFunc
type ResourceHTTPHeaders ¶
type ResourceHTTPHeaders struct { ContentType string ContentMD5 []byte ContentEncoding string ContentLanguage string ContentDisposition string CacheControl string }
Common resource's HTTP headers stands for properties used in AzCopy.
func (ResourceHTTPHeaders) ToAzBlobHTTPHeaders ¶
func (h ResourceHTTPHeaders) ToAzBlobHTTPHeaders() azblob.BlobHTTPHeaders
ToAzBlobHTTPHeaders converts ResourceHTTPHeaders to azblob's BlobHTTPHeaders.
func (ResourceHTTPHeaders) ToAzFileHTTPHeaders ¶
func (h ResourceHTTPHeaders) ToAzFileHTTPHeaders() azfile.FileHTTPHeaders
ToAzFileHTTPHeaders converts ResourceHTTPHeaders to azfile's FileHTTPHeaders.
func (ResourceHTTPHeaders) ToBlobFSHTTPHeaders ¶
func (h ResourceHTTPHeaders) ToBlobFSHTTPHeaders() azbfs.BlobFSHTTPHeaders
ToBlobFSHTTPHeaders converts ResourceHTTPHeaders to BlobFS Headers.
type ResourceString ¶
type ResourceString struct { Value string SAS string // SAS should NOT be persisted in the plan files (both for security reasons, and because, at the time of any resume, it may be stale anyway. Resume requests fresh SAS on command line) ExtraQuery string }
ResourceString represents a source or dest string, that can have three parts: the main part, a sas, and extra query parameters that are not part of the sas.
func (ResourceString) Clone ¶
func (r ResourceString) Clone() ResourceString
func (ResourceString) CloneWithConsolidatedSeparators ¶
func (r ResourceString) CloneWithConsolidatedSeparators() ResourceString
func (ResourceString) CloneWithValue ¶
func (r ResourceString) CloneWithValue(newValue string) ResourceString
func (ResourceString) ValueLocal ¶
func (r ResourceString) ValueLocal() string
to be used when the value is assumed to be a local path Using this signals "Yes, I really am ignoring the SAS and ExtraQuery on purpose", and will result in a panic in the case of programmer error of calling this method when those fields have values
type ResponseOption ¶
type ResponseOption struct { ResponseType string // helps us clarify the user's intent and support partner team's localization UserFriendlyResponseType string // text to print in interactive mode ResponseString string // short (abbreviation) string that gets sent back by the user to indicate that this response is chosen }
func (ResponseOption) Default ¶
func (ResponseOption) Default() ResponseOption
func (ResponseOption) No ¶
func (ResponseOption) No() ResponseOption
func (ResponseOption) NoForAll ¶
func (ResponseOption) NoForAll() ResponseOption
func (*ResponseOption) Parse ¶
func (o *ResponseOption) Parse(s string) error
func (ResponseOption) Yes ¶
func (ResponseOption) Yes() ResponseOption
NOTE: these enums are shared with StgExp, so the text is spelled out explicitly (for easy json marshalling)
func (ResponseOption) YesForAll ¶
func (ResponseOption) YesForAll() ResponseOption
type ResumeJobRequest ¶
type RetryCounter ¶
type RetryCounter interface {
GetTotalRetries() int64
}
type RpcCmd ¶
type RpcCmd string
JobStatus indicates the status of a Job; the default is InProgress.
func (RpcCmd) CopyJobPartOrder ¶
func (RpcCmd) GetJobFromTo ¶
func (RpcCmd) GetJobLCMWrapper ¶
func (RpcCmd) ListJobSummary ¶
func (RpcCmd) ListJobTransfers ¶
func (RpcCmd) ListSyncJobSummary ¶
type S3ClientFactory ¶
type S3ClientFactory struct {
// contains filtered or unexported fields
}
func NewS3ClientFactory ¶
func NewS3ClientFactory() S3ClientFactory
NewS3ClientFactory creates new S3 client factory.
func (*S3ClientFactory) GetS3Client ¶
func (f *S3ClientFactory) GetS3Client(ctx context.Context, credInfo CredentialInfo, option CredentialOpOptions) (*minio.Client, error)
GetS3Client gets S3 client from pool, or create a new S3 client if no client created for specific credInfo.
type S3CredentialInfo ¶
S3CredentialInfo contains essential credential info which need to build up S3 client.
type S3URLParts ¶
type S3URLParts struct { Scheme string // Ex: "https://", "s3://" Host string // Ex: "s3.amazonaws.com", "s3-eu-west-1.amazonaws.com", "bucket.s3-eu-west-1.amazonaws.com" Endpoint string // Ex: "s3.amazonaws.com", "s3-eu-west-1.amazonaws.com" BucketName string // Ex: "MyBucket" ObjectKey string // Ex: "hello.txt", "foo/bar" Version string Region string // Ex: endpoint region, e.g. "eu-west-1" UnparsedParams string // contains filtered or unexported fields }
S3URLParts represents the components that make up AWS S3 Service/Bucket/Object URL. You parse an existing URL into its parts by calling NewS3URLParts(). According to http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro and https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region, S3URLParts supports virtual-hosted-style and path-style URL: Ex, virtual-hosted-style(the bucket name is part of the domain name in the URL) : a. http://bucket.s3.amazonaws.com b. http://bucket.s3-aws-region.amazonaws.com Ex, path-style URL(the bucket name is not part of the domain (unless you use a Region-specific endpoint)): a. http://s3.amazonaws.com/bucket (US East (N. Virginia) Region endpoint) b. http://s3-aws-region.amazonaws.com/bucket (Region-specific endpoint) Dual stack endpoint(IPv6&IPv4) is also supported (https://docs.aws.amazon.com/AmazonS3/latest/dev/dual-stack-endpoints.html#dual-stack-endpoints-description) i.e. the endpoint in http://bucketname.s3.dualstack.aws-region.amazonaws.com or http://s3.dualstack.aws-region.amazonaws.com/bucketname
func NewS3URLParts ¶
func NewS3URLParts(u url.URL) (S3URLParts, error)
NewS3URLParts parses a URL initializing S3URLParts' fields. This method overwrites all fields in the S3URLParts object.
func (*S3URLParts) IsBucketSyntactically ¶
func (p *S3URLParts) IsBucketSyntactically() bool
func (*S3URLParts) IsDirectorySyntactically ¶
func (p *S3URLParts) IsDirectorySyntactically() bool
IsDirectorySyntactically validates if the S3URLParts is indicating a directory. Note: directory in S3 is a virtual abstract, and a object as well.
func (*S3URLParts) IsObjectSyntactically ¶
func (p *S3URLParts) IsObjectSyntactically() bool
func (*S3URLParts) IsServiceSyntactically ¶
func (p *S3URLParts) IsServiceSyntactically() bool
func (*S3URLParts) String ¶
func (p *S3URLParts) String() string
func (*S3URLParts) URL ¶
func (p *S3URLParts) URL() url.URL
URL returns a URL object whose fields are initialized from the S3URLParts fields.
type SPNInfo ¶
type SPNInfo struct { // Secret is used for two purposes: The certificate secret, and a client secret. // The secret is persisted to the JSON file because AAD does not issue a refresh token. // Thus, the original secret is needed to refresh. Secret string `json:"_spn_secret"` CertPath string `json:"_spn_cert_path"` }
SPNInfo contains info for authenticating with Service Principal Names
type SingleChunkReader ¶
type SingleChunkReader interface { // ReadSeeker is used to read the contents of the chunk, and because the sending pipeline seeks at various times io.ReadSeeker // Closer is needed to clean up resources io.Closer // BlockingPrefetch tries to read the full contents of the chunk into RAM. BlockingPrefetch(fileReader io.ReaderAt, isRetry bool) error // GetPrologueState is used to grab enough of the initial bytes to do MIME-type detection. Expected to be called only // on the first chunk in each file (since there's no point in calling it on others) // There is deliberately no error return value from the Prologue. // If it failed, the Prologue itself must call jptm.FailActiveSend. GetPrologueState() PrologueState // Length is the number of bytes in the chunk Length() int64 // HasPrefectchedEntirelyZeros gives an indication of whether this chunk is entirely zeros. If it returns true // then the chunk content has been prefetched AND it was all zeroes. For some remote destinations, that support "sparse file" // semantics, it is safe and correct to skip the upload of those chunks where this returns true. // In the rare edge case where this returns false due to the prefetch having failed (rather than the contents being non-zero), // we'll just treat it as a non-zero chunk. That's simpler (to code, to review and to test) than having this code force a prefetch. HasPrefetchedEntirelyZeros() bool // WriteBufferTo writes the entire contents of the prefetched buffer to h // Panics if the internal buffer has not been prefetched (or if its been discarded after a complete Read) WriteBufferTo(h hash.Hash) }
Reader of ONE chunk of a file. Maybe used to re-read multiple times (e.g. if we must retry the sending of the chunk). A instance of this type cannot be used by multiple threads (since it's Read/Seek are inherently stateful) The reader can throw away the data after each successful read, and then re-read it from disk if there is a need to retry the transmission of the chunk. That saves us the RAM cost of from having to keep every transmitted chunk in RAM until acknowledged by the service. We just re-read if the service says we need to retry. Although there's a time (performance) cost in the re-read, that's fine in a retry situation because the retry indicates we were going too fast for the service anyway.
func NewSingleChunkReader ¶
func NewSingleChunkReader(ctx context.Context, sourceFactory ChunkReaderSourceFactory, chunkId ChunkID, length int64, chunkLogger ChunkStatusLogger, generalLogger ILogger, slicePool ByteSlicePooler, cacheLimiter CacheLimiter) SingleChunkReader
type TestOAuthInjection ¶
TestOAuthInjection controls variables for OAuth testing injections
type TransferDetail ¶
type TransferDetail struct { Src string Dst string IsFolderProperties bool TransferStatus TransferStatus TransferSize uint64 ErrorCode int32 `json:",string"` ErrorMessage string `json:",string"` }
represents the Details and details of a single transfer
type TransferDirection ¶
type TransferDirection int32
func (*TransferDirection) AtomicLoad ¶
func (td *TransferDirection) AtomicLoad() TransferDirection
func (*TransferDirection) AtomicStore ¶
func (td *TransferDirection) AtomicStore(newTransferDirection TransferDirection)
func (TransferDirection) Download ¶
func (TransferDirection) Download() TransferDirection
func (*TransferDirection) Parse ¶
func (td *TransferDirection) Parse(s string) error
func (TransferDirection) S2SCopy ¶
func (TransferDirection) S2SCopy() TransferDirection
func (TransferDirection) String ¶
func (td TransferDirection) String() string
func (TransferDirection) UnKnown ¶
func (TransferDirection) UnKnown() TransferDirection
func (TransferDirection) Upload ¶
func (TransferDirection) Upload() TransferDirection
type TransferStatus ¶
type TransferStatus int32 // Must be 32-bit for atomic operations; negative #s represent a specific failure code
func (TransferStatus) All ¶
func (TransferStatus) All() TransferStatus
Transfer is any of the three possible state (InProgress, Completer or Failed)
func (*TransferStatus) AtomicLoad ¶
func (ts *TransferStatus) AtomicLoad() TransferStatus
func (*TransferStatus) AtomicStore ¶
func (ts *TransferStatus) AtomicStore(newTransferStatus TransferStatus)
func (TransferStatus) BlobTierFailure ¶
func (TransferStatus) BlobTierFailure() TransferStatus
Transfer failed due to failure while Setting blob tier.
func (TransferStatus) Cancelled ¶
func (TransferStatus) Cancelled() TransferStatus
func (TransferStatus) Failed ¶
func (TransferStatus) Failed() TransferStatus
Transfer failed due to some error.
func (TransferStatus) FolderCreated ¶
func (TransferStatus) FolderCreated() TransferStatus
Folder was created, but properties have not been persisted yet. Equivalent to Started, but never intended to be set on anything BUT folders.
func (TransferStatus) MarshalJSON ¶
func (ts TransferStatus) MarshalJSON() ([]byte, error)
Implementing MarshalJSON() method for type Transfer Status
func (TransferStatus) NotStarted ¶
func (TransferStatus) NotStarted() TransferStatus
Transfer is ready to transfer and not started transferring yet
func (*TransferStatus) Parse ¶
func (ts *TransferStatus) Parse(s string) error
func (TransferStatus) ShouldTransfer ¶
func (ts TransferStatus) ShouldTransfer() bool
func (TransferStatus) SkippedBlobHasSnapshots ¶
func (TransferStatus) SkippedBlobHasSnapshots() TransferStatus
func (TransferStatus) SkippedEntityAlreadyExists ¶
func (TransferStatus) SkippedEntityAlreadyExists() TransferStatus
func (TransferStatus) Started ¶
func (TransferStatus) Started() TransferStatus
TODO confirm whether this is actually needed
Outdated: Transfer started & at least 1 chunk has successfully been transferred. Used to resume a transfer that started to avoid transferring all chunks thereby improving performance
Update(Jul 2020): This represents the state of transfer as soon as the file is scheduled.
func (TransferStatus) String ¶
func (ts TransferStatus) String() string
func (TransferStatus) Success ¶
func (TransferStatus) Success() TransferStatus
Transfer successfully completed
func (TransferStatus) TierAvailabilityCheckFailure ¶
func (TransferStatus) TierAvailabilityCheckFailure() TransferStatus
func (*TransferStatus) UnmarshalJSON ¶
func (ts *TransferStatus) UnmarshalJSON(b []byte) error
Implementing UnmarshalJSON() method for type Transfer Status
type Transfers ¶
type Transfers struct { List []CopyTransfer TotalSizeInBytes uint64 FileTransferCount uint32 FolderTransferCount uint32 }
Transfers describes each file/folder being transferred in a given JobPartOrder, and other auxilliary details of this order.
type URLExtension ¶
///////////////////////////////////////////////////////////////////////////////////////////////
func (URLExtension) RedactSecretQueryParamForLogging ¶
func (u URLExtension) RedactSecretQueryParamForLogging() string
func (URLExtension) URLWithPlusDecodedInPath ¶
func (u URLExtension) URLWithPlusDecodedInPath() url.URL
URLWithPlusDecodedInPath returns a URL with '+' in path decoded as ' '(space). This is useful for the cases, e.g: S3 management console encode ' '(space) as '+', which is not supported by Azure resources.
type URLStringExtension ¶
type URLStringExtension string
///////////////////////////////////////////////////////////////////////////////////////////////
func (URLStringExtension) RedactSecretQueryParamForLogging ¶
func (s URLStringExtension) RedactSecretQueryParamForLogging() string
type UUID ¶
A UUID representation compliant with specification in RFC 4122 document.
func ParseUUID ¶
ParseUUID parses a string formatted as "003020100-0504-0706-0809-0a0b0c0d0e0f" or "{03020100-0504-0706-0809-0a0b0c0d0e0f}" into a UUID.
func (UUID) MarshalJSON ¶
Implementing MarshalJSON() method for type UUID
func (*UUID) UnmarshalJSON ¶
Implementing UnmarshalJSON() method for type UUID
type UserOAuthTokenManager ¶
type UserOAuthTokenManager struct {
// contains filtered or unexported fields
}
UserOAuthTokenManager for token management.
func NewUserOAuthTokenManagerInstance ¶
func NewUserOAuthTokenManagerInstance(credCacheOptions CredCacheOptions) *UserOAuthTokenManager
NewUserOAuthTokenManagerInstance creates a token manager instance.
func (*UserOAuthTokenManager) CertLogin ¶
func (uotm *UserOAuthTokenManager) CertLogin(tenantID, activeDirectoryEndpoint, certPath, certPass, applicationID string, persist bool) (*OAuthTokenInfo, error)
CertLogin non-interactively logs in using a specified certificate, certificate password, and activedirectory endpoint.
func (*UserOAuthTokenManager) GetTokenInfo ¶
func (uotm *UserOAuthTokenManager) GetTokenInfo(ctx context.Context) (*OAuthTokenInfo, error)
GetTokenInfo gets token info, it follows rule:
- If there is token passed from environment variable(note this is only for testing purpose), use token passed from environment variable.
- Otherwise, try to get token from cache.
This method either successfully return token, or return error.
func (*UserOAuthTokenManager) HasCachedToken ¶
func (uotm *UserOAuthTokenManager) HasCachedToken() (bool, error)
HasCachedToken returns if there is cached token in token manager.
func (*UserOAuthTokenManager) MSILogin ¶
func (uotm *UserOAuthTokenManager) MSILogin(ctx context.Context, identityInfo IdentityInfo, persist bool) (*OAuthTokenInfo, error)
MSILogin tries to get token from MSI, persist indicates whether to cache the token on local disk.
func (*UserOAuthTokenManager) RemoveCachedToken ¶
func (uotm *UserOAuthTokenManager) RemoveCachedToken() error
RemoveCachedToken delete all the cached token.
func (*UserOAuthTokenManager) SecretLogin ¶
func (uotm *UserOAuthTokenManager) SecretLogin(tenantID, activeDirectoryEndpoint, secret, applicationID string, persist bool) (*OAuthTokenInfo, error)
SecretLogin is a UOTM shell for secretLoginNoUOTM.
func (*UserOAuthTokenManager) UserLogin ¶
func (uotm *UserOAuthTokenManager) UserLogin(tenantID, activeDirectoryEndpoint string, persist bool) (*OAuthTokenInfo, error)
UserLogin interactively logins in with specified tenantID and activeDirectoryEndpoint, persist indicates whether to cache the token on local disk.
type WaitReason ¶
type WaitReason struct { Name string // contains filtered or unexported fields }
WaitReason identifies the one thing that a given chunk is waiting on, at a given moment. Basically = state, phrased in terms of "the thing I'm waiting for"
func (WaitReason) Body ¶
func (WaitReason) Body() WaitReason
func (WaitReason) BodyReReadDueToMem ¶
func (WaitReason) BodyReReadDueToMem() WaitReason
func (WaitReason) BodyReReadDueToSpeed ¶
func (WaitReason) BodyReReadDueToSpeed() WaitReason
func (WaitReason) Cancelled ¶
func (WaitReason) Cancelled() WaitReason
NOTE: when adding new statuses please renumber to make Cancelled numerically the last, to avoid the need to also change numWaitReasons()
func (WaitReason) ChunkDone ¶
func (WaitReason) ChunkDone() WaitReason
func (WaitReason) CreateLocalFile ¶
func (WaitReason) CreateLocalFile() WaitReason
func (WaitReason) DiskIO ¶
func (WaitReason) DiskIO() WaitReason
func (WaitReason) Epilogue ¶
func (WaitReason) Epilogue() WaitReason
func (WaitReason) FilePacer ¶
func (WaitReason) FilePacer() WaitReason
func (WaitReason) HeaderResponse ¶
func (WaitReason) HeaderResponse() WaitReason
func (WaitReason) LockDestination ¶
func (WaitReason) LockDestination() WaitReason
func (WaitReason) ModifiedTimeRefresh ¶
func (WaitReason) ModifiedTimeRefresh() WaitReason
func (WaitReason) Nothing ¶
func (WaitReason) Nothing() WaitReason
Head (below) has index between GB and Body, just so the ordering is numerical ascending during typical chunk lifetime for both upload and download We use just the first letters of these when displaying perf states as we run (if enabled) so try to keep the first letters unique (except for Done and Cancelled, which are not displayed, and so may duplicate the first letter of something else)
func (WaitReason) OpenLocalSource ¶
func (WaitReason) OpenLocalSource() WaitReason
func (WaitReason) PriorChunk ¶
func (WaitReason) PriorChunk() WaitReason
func (WaitReason) QueueToWrite ¶
func (WaitReason) QueueToWrite() WaitReason
func (WaitReason) RAMToSchedule ¶
func (WaitReason) RAMToSchedule() WaitReason
func (WaitReason) S2SCopyOnWire ¶
func (WaitReason) S2SCopyOnWire() WaitReason
func (WaitReason) Sorting ¶
func (WaitReason) Sorting() WaitReason
func (WaitReason) String ¶
func (wr WaitReason) String() string
func (WaitReason) WorkerGR ¶
func (WaitReason) WorkerGR() WaitReason
func (WaitReason) XferStart ¶
func (WaitReason) XferStart() WaitReason
extra ones for start of uploads (prior to chunk scheduling)
type WorkController ¶
type WorkController interface { Cancel(mgr LifecycleMgr) // handle to cancel the work ReportProgressOrExit(mgr LifecycleMgr) (totalKnownCount uint32) // print the progress status, optionally exit the application if work is done }
for the lifecycleMgr to babysit a job, it must be given a controller to get information about the job
Source Files ¶
- CountPerSecond.go
- LongPathHandler.go
- ProxyLookupCache.go
- atomicmorph.go
- azError.go
- cacheLimiter.go
- chunkStatusLogger.go
- chunkedFileWriter.go
- cpuMonitor.go
- credCacheGnomeKeyringShim_linux.go
- credCacheInternal_linux.go
- credCacheModel.go
- credCache_linux.go
- credentialFactory.go
- decompressingWriter.go
- emptyChunkReader.go
- environment.go
- exclusiveStringMap.go
- extensions.go
- fe-ste-models.go
- folderCreationTracker_interface.go
- folderDeletionManager.go
- gcpModels.go
- gcpURLParts.go
- genericResourceURLParts.go
- iff.go
- lifecyleMgr.go
- logSanitizer.go
- logger.go
- mmf_linux.go
- multiSizeSlicePool.go
- nocopy.go
- nullHasher.go
- oauthTokenManager.go
- osOpen_fallback.go
- output.go
- prologueState.go
- randomDataGenerator.go
- rpc-models.go
- s3Models.go
- s3URLParts.go
- singleChunkReader.go
- util.go
- uuid.go
- version.go
- writeThoughFile.go
- writeThoughFile_linux.go