Documentation ¶
Index ¶
- Constants
- Variables
- func Execute(logPathFolder, jobPlanFolder string, maxFileAndSocketHandles int, ...)
- func GetAccountRoot(resource common.ResourceString, location common.Location) (string, error)
- func GetContainerName(path string, location common.Location) (string, error)
- func GetCredTypeFromEnvVar() common.CredentialType
- func GetCredentialInfoForLocation(ctx context.Context, location common.Location, resource, resourceSAS string, ...) (credInfo common.CredentialInfo, isPublic bool, err error)
- func GetOAuthTokenManagerInstance() (*common.UserOAuthTokenManager, error)
- func GetResourceRoot(resource string, location common.Location) (resourceBase string, err error)
- func GetUserOAuthTokenManagerInstance() *common.UserOAuthTokenManager
- func HandleListJobsCommand(jobStatus common.JobStatus) error
- func HandlePauseCommand(jobIdString string)
- func HandleShowCommand(listRequest common.ListRequest) error
- func InferArgumentLocation(arg string) common.Location
- func InitPipeline(ctx context.Context, location common.Location, ...) (p pipeline.Pipeline, err error)
- func IsDestinationCaseInsensitive(fromTo common.FromTo) bool
- func NewFolderPropertyOption(fromTo common.FromTo, recursive, stripTopDir bool, filters []ObjectFilter, ...) (common.FolderPropertyOption, string)
- func ParseSizeString(s string, name string) (int64, error)
- func PrintExistingJobIds(listJobResponse common.ListJobsResponse) error
- func PrintJobProgressSummary(summary common.ListJobSummaryResponse)
- func PrintJobTransfers(listTransfersResponse common.ListJobTransfersResponse)
- func SplitResourceString(raw string, loc common.Location) (common.ResourceString, error)
- func UnfurlSymlinks(symlinkPath string) (result string, err error)
- func ValidateFromTo(src, dst string, userSpecifiedFromTo string) (common.FromTo, error)
- func WalkWithSymlinks(appCtx context.Context, fullPath string, walkFunc filepath.WalkFunc, ...) (err error)
- func WarnStdoutAndScanningLog(toLog string)
- func WrapFolder(fullpath string, stat os.FileInfo) (os.FileInfo, error)
- type AccountTraverser
- type BucketToContainerNameResolver
- type CookedCopyCmdArgs
- func (cca *CookedCopyCmdArgs) Cancel(lcm common.LifecycleMgr)
- func (cca *CookedCopyCmdArgs) InitModularFilters() []ObjectFilter
- func (cca *CookedCopyCmdArgs) MakeEscapedRelativePath(source bool, dstIsDir bool, asSubdir bool, object StoredObject) (relativePath string)
- func (cca *CookedCopyCmdArgs) ReportProgressOrExit(lcm common.LifecycleMgr) (totalKnownCount uint32)
- type CopyEnumerator
- type ErrorFileInfo
- type FilterSet
- type GCPBucketNameToAzureResourcesResolver
- type IncludeAfterDateFilter
- func (f *IncludeAfterDateFilter) AppliesOnlyToFiles() bool
- func (f *IncludeAfterDateFilter) DoesPass(storedObject StoredObject) bool
- func (f *IncludeAfterDateFilter) DoesSupportThisOS() (msg string, supported bool)
- func (_ IncludeAfterDateFilter) FormatAsUTC(t time.Time) string
- func (_ IncludeAfterDateFilter) ParseISO8601(s string, chooseEarliest bool) (time.Time, error)
- type IncludeBeforeDateFilter
- func (f *IncludeBeforeDateFilter) AppliesOnlyToFiles() bool
- func (f *IncludeBeforeDateFilter) DoesPass(storedObject StoredObject) bool
- func (f *IncludeBeforeDateFilter) DoesSupportThisOS() (msg string, supported bool)
- func (_ IncludeBeforeDateFilter) FormatAsUTC(t time.Time) string
- func (_ IncludeBeforeDateFilter) ParseISO8601(s string, chooseEarliest bool) (time.Time, error)
- type IncludeFilter
- type ListReq
- type ListResponse
- type LocationLevel
- type ObjectFilter
- type ResourceTraverser
- type S3BucketNameToAzureResourcesResolver
- type StoredObject
- type Version
Constants ¶
const MAX_SYMLINKS_TO_FOLLOW = 40
const (
NumOfFilesPerDispatchJobPart = 10000
)
const PreservePermissionsFlag = "preserve-permissions"
Variables ¶
var AzcopyAppPathFolder string
var EnumerationParallelStatFiles = false
var EnumerationParallelism = 1
var ErrorHashAsyncCalculation = errors.New("hash is calculating asynchronously")
ErrorHashAsyncCalculation is not a strict "the hash is unobtainable", but a "the hash is not currently present". In effect, when it is returned, it indicates we have placed the target onto a queue to be handled later. It can be treated like a promise, and the item can cease processing in the immediate term. This option is only used locally on sync-downloads when the user has specified that azcopy should create a new hash.
var ErrorHashNoLongerValid = errors.New("attached hash no longer valid")
var ErrorHashNotCompatible = errors.New("hash types do not match")
var ErrorNoHashPresent = errors.New("no hash present on file")
ErrorNoHashPresent , ErrorHashNoLongerValid, and ErrorHashNotCompatible indicate a hash is not present, not obtainable, and/or not usable. For the sake of best-effort, when these errors are emitted, depending on the sync hash policy
var FinalPartCreatedMessage = "Final job part has been created"
var IPv4Regex = regexp.MustCompile(`\d+\.\d+\.\d+\.\d+`) // simple regex
var NothingScheduledError = errors.New("no transfers were scheduled because no files matched the specified criteria")
var NothingToRemoveError = errors.New("nothing found to remove")
var Rpc = func(cmd common.RpcCmd, request interface{}, response interface{}) { err := inprocSend(cmd, request, response) common.PanicIfErr(err) }
Global singleton for sending RPC requests from the frontend to the STE
Functions ¶
func GetAccountRoot ¶
func GetContainerName ¶
func GetCredTypeFromEnvVar ¶
func GetCredTypeFromEnvVar() common.CredentialType
GetCredTypeFromEnvVar tries to get credential type from environment variable defined by envVarCredentialType.
func GetCredentialInfoForLocation ¶ added in v10.12.2
func GetOAuthTokenManagerInstance ¶
func GetOAuthTokenManagerInstance() (*common.UserOAuthTokenManager, error)
* GetInstanceOAuthTokenInfo returns OAuth token, obtained by auto-login, * for current instance of AzCopy.
func GetResourceRoot ¶
GetResourceRoot should eliminate wildcards and error out in invalid scenarios. This is intended for the jobPartOrder.SourceRoot.
func GetUserOAuthTokenManagerInstance ¶
func GetUserOAuthTokenManagerInstance() *common.UserOAuthTokenManager
GetUserOAuthTokenManagerInstance gets or creates OAuthTokenManager for current user. Note: Currently, only support to have TokenManager for one user mapping to one tenantID.
func HandleListJobsCommand ¶
HandleListJobsCommand sends the ListJobs request to transfer engine Print the Jobs in the history of Azcopy
func HandlePauseCommand ¶
func HandlePauseCommand(jobIdString string)
handles the pause command dispatches the pause Job order to the storage engine
func HandleShowCommand ¶
func HandleShowCommand(listRequest common.ListRequest) error
handles the list command dispatches the list order to the transfer engine
func InferArgumentLocation ¶ added in v10.12.2
func InitPipeline ¶ added in v10.12.2
func IsDestinationCaseInsensitive ¶ added in v10.12.0
func NewFolderPropertyOption ¶ added in v10.18.0
func NewFolderPropertyOption(fromTo common.FromTo, recursive, stripTopDir bool, filters []ObjectFilter, preserveSmbInfo, preservePermissions, preservePosixProperties, isDstNull, includeDirectoryStubs bool) (common.FolderPropertyOption, string)
we assume that preserveSmbPermissions and preserveSmbInfo have already been validated, such that they are only true if both resource types support them
func PrintExistingJobIds ¶
func PrintExistingJobIds(listJobResponse common.ListJobsResponse) error
PrintExistingJobIds prints the response of listOrder command when listOrder command requested the list of existing jobs
func PrintJobProgressSummary ¶
func PrintJobProgressSummary(summary common.ListJobSummaryResponse)
PrintJobProgressSummary prints the response of listOrder command when listOrder command requested the progress summary of an existing job
func PrintJobTransfers ¶
func PrintJobTransfers(listTransfersResponse common.ListJobTransfersResponse)
PrintJobTransfers prints the response of listOrder command when list Order command requested the list of specific transfer of an existing job
func SplitResourceString ¶
func UnfurlSymlinks ¶
func ValidateFromTo ¶ added in v10.12.2
func WalkWithSymlinks ¶
func WalkWithSymlinks(appCtx context.Context, fullPath string, walkFunc filepath.WalkFunc, symlinkHandling common.SymlinkHandlingType, errorChannel chan ErrorFileInfo) (err error)
WalkWithSymlinks is a symlinks-aware, parallelized, version of filePath.Walk. Separate this from the traverser for two purposes: 1) Cleaner code 2) Easier to test individually than to test the entire traverser.
func WarnStdoutAndScanningLog ¶ added in v10.11.0
func WarnStdoutAndScanningLog(toLog string)
Types ¶
type AccountTraverser ¶ added in v10.12.2
type AccountTraverser interface { ResourceTraverser // contains filtered or unexported methods }
type CookedCopyCmdArgs ¶ added in v10.12.2
type CookedCopyCmdArgs struct { // from arguments Source common.ResourceString Destination common.ResourceString FromTo common.FromTo // new include/exclude only apply to file names // implemented for remove (and sync) only // includePathPatterns are handled like a list-of-files. Do not panic. This is not a bug that it is not present here. IncludePatterns []string ExcludePatterns []string ExcludePathPatterns []string IncludeFileAttributes []string ExcludeFileAttributes []string IncludeBefore *time.Time IncludeAfter *time.Time // list of version ids ListOfVersionIDs chan string // filters from flags ListOfFilesChannel chan string // Channels are nullable. Recursive bool StripTopDir bool SymlinkHandling common.SymlinkHandlingType ForceWrite common.OverwriteOption // says whether we should try to overwrite ForceIfReadOnly bool // says whether we should _force_ any overwrites (triggered by forceWrite) to work on Azure Files objects that are set to read-only IsSourceDir bool CheckLength bool // To specify whether user wants to preserve the blob index tags during service to service transfer. S2sPreserveBlobTags bool // whether to include blobs that have metadata 'hdi_isfolder = true' IncludeDirectoryStubs bool CpkOptions common.CpkOptions // contains filtered or unexported fields }
represents the processed copy command input from the user
func (*CookedCopyCmdArgs) Cancel ¶ added in v10.12.2
func (cca *CookedCopyCmdArgs) Cancel(lcm common.LifecycleMgr)
func (*CookedCopyCmdArgs) InitModularFilters ¶ added in v10.12.2
func (cca *CookedCopyCmdArgs) InitModularFilters() []ObjectFilter
Initialize the modular filters outside of copy to increase readability.
func (*CookedCopyCmdArgs) MakeEscapedRelativePath ¶ added in v10.12.2
func (cca *CookedCopyCmdArgs) MakeEscapedRelativePath(source bool, dstIsDir bool, asSubdir bool, object StoredObject) (relativePath string)
func (*CookedCopyCmdArgs) ReportProgressOrExit ¶ added in v10.12.2
func (cca *CookedCopyCmdArgs) ReportProgressOrExit(lcm common.LifecycleMgr) (totalKnownCount uint32)
type CopyEnumerator ¶ added in v10.12.2
type CopyEnumerator struct { Traverser ResourceTraverser // general filters apply to the objects returned by the traverser Filters []ObjectFilter // receive objects from the traverser and dispatch them for transferring ObjectDispatcher objectProcessor // a finalizer that is always called if the enumeration finishes properly Finalize func() error }
func NewCopyEnumerator ¶ added in v10.12.2
func NewCopyEnumerator(traverser ResourceTraverser, filters []ObjectFilter, objectDispatcher objectProcessor, finalizer func() error) *CopyEnumerator
type ErrorFileInfo ¶ added in v10.16.0
ErrorFileInfo holds information about files and folders that failed enumeration.
type FilterSet ¶ added in v10.12.2
type FilterSet []ObjectFilter
func (FilterSet) GetEnumerationPreFilter ¶ added in v10.12.2
GetEnumerationPreFilter returns a prefix that is common to all the include filters, or "" if no such prefix can be found. (The implementation may return "" even in cases where such a prefix does exist, but in at least the simplest cases, it should return a non-empty prefix.) The result can be used to optimize enumeration, since anything without this prefix will fail the FilterSet
type GCPBucketNameToAzureResourcesResolver ¶
type GCPBucketNameToAzureResourcesResolver struct {
// contains filtered or unexported fields
}
func NewGCPBucketNameToAzureResourcesResolver ¶
func NewGCPBucketNameToAzureResourcesResolver(gcpBucketNames []string) *GCPBucketNameToAzureResourcesResolver
func (*GCPBucketNameToAzureResourcesResolver) ResolveName ¶
func (resolver *GCPBucketNameToAzureResourcesResolver) ResolveName(bucketName string) (string, error)
type IncludeAfterDateFilter ¶ added in v10.12.2
includeAfterDateFilter includes files with Last Modified Times >= the specified threshold Used for copy, but doesn't make conceptual sense for sync
func (*IncludeAfterDateFilter) AppliesOnlyToFiles ¶ added in v10.12.2
func (f *IncludeAfterDateFilter) AppliesOnlyToFiles() bool
func (*IncludeAfterDateFilter) DoesPass ¶ added in v10.12.2
func (f *IncludeAfterDateFilter) DoesPass(storedObject StoredObject) bool
func (*IncludeAfterDateFilter) DoesSupportThisOS ¶ added in v10.12.2
func (f *IncludeAfterDateFilter) DoesSupportThisOS() (msg string, supported bool)
func (IncludeAfterDateFilter) FormatAsUTC ¶ added in v10.12.2
func (_ IncludeAfterDateFilter) FormatAsUTC(t time.Time) string
func (IncludeAfterDateFilter) ParseISO8601 ¶ added in v10.12.2
type IncludeBeforeDateFilter ¶ added in v10.12.2
IncludeBeforeDateFilter includes files with Last Modified Times <= the specified Threshold Used for copy, but doesn't make conceptual sense for sync
func (*IncludeBeforeDateFilter) AppliesOnlyToFiles ¶ added in v10.12.2
func (f *IncludeBeforeDateFilter) AppliesOnlyToFiles() bool
func (*IncludeBeforeDateFilter) DoesPass ¶ added in v10.12.2
func (f *IncludeBeforeDateFilter) DoesPass(storedObject StoredObject) bool
func (*IncludeBeforeDateFilter) DoesSupportThisOS ¶ added in v10.12.2
func (f *IncludeBeforeDateFilter) DoesSupportThisOS() (msg string, supported bool)
func (IncludeBeforeDateFilter) FormatAsUTC ¶ added in v10.12.2
func (_ IncludeBeforeDateFilter) FormatAsUTC(t time.Time) string
func (IncludeBeforeDateFilter) ParseISO8601 ¶ added in v10.12.2
type IncludeFilter ¶ added in v10.12.2
type IncludeFilter struct {
// contains filtered or unexported fields
}
design explanation: include filters are different from the exclude ones, which work together in the "AND" manner meaning and if an StoredObject is rejected by any of the exclude filters, then it is rejected by all of them as a result, the exclude filters can be in their own struct, and work correctly on the other hand, include filters work in the "OR" manner meaning that if an StoredObject is accepted by any of the include filters, then it is accepted by all of them consequently, all the include Patterns must be stored together
func (*IncludeFilter) AppliesOnlyToFiles ¶ added in v10.12.2
func (f *IncludeFilter) AppliesOnlyToFiles() bool
func (*IncludeFilter) DoesPass ¶ added in v10.12.2
func (f *IncludeFilter) DoesPass(storedObject StoredObject) bool
func (*IncludeFilter) DoesSupportThisOS ¶ added in v10.12.2
func (f *IncludeFilter) DoesSupportThisOS() (msg string, supported bool)
type ListResponse ¶
type ListResponse struct {
ErrorMsg string
}
type LocationLevel ¶
type LocationLevel uint8
----- LOCATION LEVEL HANDLING -----
var ELocationLevel LocationLevel = 0
func DetermineLocationLevel ¶ added in v10.12.2
func DetermineLocationLevel(location string, locationType common.Location, source bool) (LocationLevel, error)
Uses syntax to assume the "level" of a location. This is typically used to
func (LocationLevel) Container ¶
func (LocationLevel) Container() LocationLevel
func (LocationLevel) Object ¶
func (LocationLevel) Object() LocationLevel
func (LocationLevel) Service ¶
func (LocationLevel) Service() LocationLevel
type ObjectFilter ¶ added in v10.12.2
type ObjectFilter interface { DoesSupportThisOS() (msg string, supported bool) DoesPass(storedObject StoredObject) bool AppliesOnlyToFiles() bool }
given a StoredObject, verify if it satisfies the defined conditions if yes, return true
type ResourceTraverser ¶ added in v10.12.2
type ResourceTraverser interface { Traverse(preprocessor objectMorpher, processor objectProcessor, filters []ObjectFilter) error IsDirectory(isSource bool) (bool, error) }
capable of traversing a structured resource like container or local directory pass each StoredObject to the given objectProcessor if it passes all the filters
func InitResourceTraverser ¶ added in v10.12.2
func InitResourceTraverser(resource common.ResourceString, location common.Location, ctx *context.Context, credential *common.CredentialInfo, symlinkHandling common.SymlinkHandlingType, listOfFilesChannel chan string, recursive, getProperties, includeDirectoryStubs bool, permanentDeleteOption common.PermanentDeleteOption, incrementEnumerationCounter enumerationCounterFunc, listOfVersionIds chan string, s2sPreserveBlobTags bool, syncHashType common.SyncHashType, preservePermissions common.PreservePermissionsOption, logLevel pipeline.LogLevel, cpkOptions common.CpkOptions, errorChannel chan ErrorFileInfo, stripTopDir bool, trailingDot common.TrailingDotOption, p pipeline.Pipeline, destination *common.Location) (ResourceTraverser, error)
source, location, recursive, and incrementEnumerationCounter are always required. ctx, pipeline are only required for remote resources. symlinkHandling is only required for local resources (defaults to false) errorOnDirWOutRecursive is used by copy. If errorChannel is non-nil, all errors encountered during enumeration will be conveyed through this channel. To avoid slowdowns, use a buffered channel of enough capacity.
type S3BucketNameToAzureResourcesResolver ¶
type S3BucketNameToAzureResourcesResolver struct {
// contains filtered or unexported fields
}
S3BucketNameToAzureResourcesResolver resolves s3 bucket name to Azure Blob container/ADLS Gen2 filesystem/File share. For Azure, container/filesystem/share's naming follows: 1. Lower case letters, numbers and hyphen. 2. 3-63 length. 3. Name should not contain two consecutive hyphens. 4. Name should not start or end with hyphen. For S3, bucket's naming follows: 1. The bucket name can be between 3 and 63 characters long, and can contain only lower-case characters, numbers, periods, and dashes. 2. Each label in the bucket name must start with a lowercase letter or number. 3. The bucket name cannot contain underscores, end with a dash or period, have consecutive periods, or use dashes adjacent to periods. 4. The bucket name cannot be formatted as an IP address (198.51.100.24). Two common cases need be solved are: 1. bucket name with period. In this case, AzCopy try to replace period with hyphen. e.g. bucket.with.period -> bucket-with-period 2. bucket name with consecutive hyphens. In this case, AzCopy try to replace consecutive hyphen, with -[numberOfHyphens]-. e.g. bucket----hyphens -> bucket-4-hyphens The resolver checks if there are naming collision with other existing bucket names, and try to add suffix when there is any collision. e.g. There is buckets with name: bucket-name, bucket.name. Azcopy will resolve bucket.name -> bucket-name -> bucket-name-2 All the resolving should be logged and warned to user.
func NewS3BucketNameToAzureResourcesResolver ¶
func NewS3BucketNameToAzureResourcesResolver(s3BucketNames []string) *S3BucketNameToAzureResourcesResolver
NewS3BucketNameToAzureResourcesResolver creates S3BucketNameToAzureResourcesResolver. Users can provide bucket names upfront and on-demand via ResolveName. Previously resolved names will be returned outright by ResolveName.
func (*S3BucketNameToAzureResourcesResolver) ResolveName ¶
func (s3Resolver *S3BucketNameToAzureResourcesResolver) ResolveName(bucketName string) (string, error)
ResolveName returns resolved name for given bucket name.
type StoredObject ¶ added in v10.12.2
type StoredObject struct { // container source, only included by account traversers. ContainerName string // destination container name. Included in the processor after resolving container names. DstContainerName string // metadata, included in S2S transfers Metadata common.Metadata // contains filtered or unexported fields }
represent a local or remote resource object (ex: local file, blob, etc.) we can add more properties if needed, as this is easily extensible ** DO NOT instantiate directly, always use newStoredObject ** (to make sure its fully populated and any preprocessor method runs)
func (*StoredObject) ToNewCopyTransfer ¶ added in v10.12.2
func (s *StoredObject) ToNewCopyTransfer(steWillAutoDecompress bool, Source string, Destination string, preserveBlobTier bool, folderPropertiesOption common.FolderPropertyOption, symlinkHandlingType common.SymlinkHandlingType) (transfer common.CopyTransfer, shouldSendToSte bool)
type Version ¶
type Version struct {
// contains filtered or unexported fields
}
func NewVersion ¶
To keep the code simple, we assume we only use a simple subset of semantic versions. Namely, the version is either a normal stable version, or a pre-release version with '-preview' attached. Examples: 10.1.0, 11.2.0-preview
Source Files ¶
- benchmark.go
- cancel.go
- copy.go
- copyEnumeratorHelper.go
- copyEnumeratorInit.go
- copyUtil.go
- credentialUtil.go
- doc.go
- env.go
- gcpNameResolver.go
- helpMessages.go
- jobs.go
- jobsClean.go
- jobsList.go
- jobsRemove.go
- jobsResume.go
- jobsShow.go
- list.go
- load.go
- loadCLFS.go
- login.go
- loginStatus.go
- logout.go
- make.go
- pathUtils.go
- pause.go
- remove.go
- removeEnumerator.go
- removeProcessor.go
- root.go
- rpc.go
- s3NameResolver.go
- setProperties.go
- setPropertiesEnumerator.go
- setPropertiesProcessor.go
- sync.go
- syncComparator.go
- syncEnumerator.go
- syncIndexer.go
- syncProcessor.go
- validators.go
- versionChecker.go
- zc_attr_filter_notwin.go
- zc_enumerator.go
- zc_filter.go
- zc_newobjectadapters.go
- zc_pipeline_init.go
- zc_processor.go
- zc_traverser_benchmark.go
- zc_traverser_blob.go
- zc_traverser_blob_account.go
- zc_traverser_blob_versions.go
- zc_traverser_file.go
- zc_traverser_file_account.go
- zc_traverser_gcp.go
- zc_traverser_gcp_service.go
- zc_traverser_list.go
- zc_traverser_local.go
- zc_traverser_local_other.go
- zc_traverser_s3.go
- zc_traverser_s3_service.go