file

package
v1.0.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Oct 17, 2023 License: MIT Imports: 30 Imported by: 8

Documentation

Overview

Example (File_AppendAndFlushDataWithValidation)

make sure you create the filesystem before running this example

package main

import (
	"bytes"
	"context"
	"encoding/binary"
	"fmt"
	"github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming"
	"github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake"
	"github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/file"
	"github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/shared"
	"hash/crc64"
	"log"
	"os"
)

func handleError(err error) {
	if err != nil {
		log.Fatal(err.Error())
	}
}

func main() {
	accountName, accountKey := os.Getenv("AZURE_STORAGE_ACCOUNT_NAME"), os.Getenv("AZURE_STORAGE_ACCOUNT_KEY")

	// Create a file client
	u := fmt.Sprintf("https://%s.dfs.core.windows.net/fs/file.txt", accountName)
	credential, err := azdatalake.NewSharedKeyCredential(accountName, accountKey)
	handleError(err)
	fClient, err := file.NewClientWithSharedKeyCredential(u, credential, nil)
	handleError(err)

	contentSize := 1024 * 8 // 8KB
	content := make([]byte, contentSize)
	body := bytes.NewReader(content)
	rsc := streaming.NopCloser(body)
	contentCRC64 := crc64.Checksum(content, shared.CRC64Table)

	// validate data using crc64
	opts := &file.AppendDataOptions{
		TransactionalValidation: file.TransferValidationTypeComputeCRC64(),
	}
	putResp, err := fClient.AppendData(context.Background(), 0, rsc, opts)
	handleError(err)
	fmt.Println(putResp.ContentCRC64)
	fmt.Println(binary.LittleEndian.Uint64(putResp.ContentCRC64), contentCRC64)

	// after appending data, flush it
	_, err = fClient.FlushData(context.Background(), int64(contentSize), nil)
	handleError(err)

	// compare content length as well
	gResp2, err := fClient.GetProperties(context.Background(), nil)
	handleError(err)
	fmt.Println(*gResp2.ContentLength, int64(contentSize))
}
Output:

Example (File_Client_SetMetadata)

make sure you create the filesystem and file before running this example

package main

import (
	"context"
	"fmt"
	"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
	"github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake"
	"github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/file"
	"log"
	"os"
)

func handleError(err error) {
	if err != nil {
		log.Fatal(err.Error())
	}
}

func main() {
	accountName, accountKey := os.Getenv("AZURE_STORAGE_ACCOUNT_NAME"), os.Getenv("AZURE_STORAGE_ACCOUNT_KEY")

	// Create a file client
	u := fmt.Sprintf("https://%s.dfs.core.windows.net/fs/file.txt", accountName)
	credential, err := azdatalake.NewSharedKeyCredential(accountName, accountKey)
	handleError(err)

	fileClient, err := file.NewClientWithSharedKeyCredential(u, credential, nil)
	handleError(err)

	_, err = fileClient.SetMetadata(context.TODO(), map[string]*string{"author": to.Ptr("Tamer")}, nil)
	handleError(err)

	// Query the directory's properties and metadata
	get, err := fileClient.GetProperties(context.TODO(), nil)
	handleError(err)

	// Show the directory's metadata
	if get.Metadata == nil {
		log.Fatal("No metadata returned")
	}

	for k, v := range get.Metadata {
		fmt.Print(k + "=" + *v + "\n")
	}
}
Output:

Example (File_CreateAndDelete)

make sure you create the filesystem before running this example

package main

import (
	"context"
	"fmt"
	"github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake"
	"github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/file"
	"log"
	"os"
)

func handleError(err error) {
	if err != nil {
		log.Fatal(err.Error())
	}
}

func main() {
	accountName, accountKey := os.Getenv("AZURE_STORAGE_ACCOUNT_NAME"), os.Getenv("AZURE_STORAGE_ACCOUNT_KEY")

	// Create a file client
	u := fmt.Sprintf("https://%s.dfs.core.windows.net/fs/file.txt", accountName)
	credential, err := azdatalake.NewSharedKeyCredential(accountName, accountKey)
	handleError(err)
	fileClient, err := file.NewClientWithSharedKeyCredential(u, credential, nil)
	handleError(err)

	_, err = fileClient.Create(context.Background(), nil)
	handleError(err)

	_, err = fileClient.Delete(context.Background(), nil)
	handleError(err)
}
Output:

Example (File_CreateFileWithExpiryRelativeToNow)

make sure you create the filesystem before running this example

package main

import (
	"context"
	"fmt"
	"github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake"
	"github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/file"
	"log"
	"os"
	"strconv"
	"time"
)

func handleError(err error) {
	if err != nil {
		log.Fatal(err.Error())
	}
}

func main() {
	accountName, accountKey := os.Getenv("AZURE_STORAGE_ACCOUNT_NAME"), os.Getenv("AZURE_STORAGE_ACCOUNT_KEY")

	// Create a file client
	u := fmt.Sprintf("https://%s.dfs.core.windows.net/fs/file.txt", accountName)
	credential, err := azdatalake.NewSharedKeyCredential(accountName, accountKey)
	handleError(err)
	fClient, err := file.NewClientWithSharedKeyCredential(u, credential, nil)
	handleError(err)

	createFileOpts := &file.CreateOptions{
		Expiry: file.CreateExpiryValues{
			ExpiryType: file.CreateExpiryTypeRelativeToNow,
			ExpiresOn:  strconv.FormatInt((8 * time.Second).Milliseconds(), 10),
		},
	}

	_, err = fClient.Create(context.Background(), createFileOpts)
	handleError(err)

	resp, err := fClient.GetProperties(context.Background(), nil)
	handleError(err)
	fmt.Println(*resp.ExpiresOn)

	time.Sleep(time.Second * 10)
	_, err = fClient.GetProperties(context.Background(), nil)
	// we expect datalakeerror.PathNotFound
	handleError(err)
}
Output:

Example (File_CreateFileWithNeverExpire)

make sure you create the filesystem before running this example

package main

import (
	"context"
	"fmt"
	"github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake"
	"github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/file"
	"log"
	"os"
)

func handleError(err error) {
	if err != nil {
		log.Fatal(err.Error())
	}
}

func main() {
	accountName, accountKey := os.Getenv("AZURE_STORAGE_ACCOUNT_NAME"), os.Getenv("AZURE_STORAGE_ACCOUNT_KEY")

	// Create a file client
	u := fmt.Sprintf("https://%s.dfs.core.windows.net/fs/file.txt", accountName)
	credential, err := azdatalake.NewSharedKeyCredential(accountName, accountKey)
	handleError(err)
	fClient, err := file.NewClientWithSharedKeyCredential(u, credential, nil)
	handleError(err)

	createFileOpts := &file.CreateOptions{
		Expiry: file.CreateExpiryValues{
			ExpiryType: file.CreateExpiryTypeNeverExpire,
		},
	}

	_, err = fClient.Create(context.Background(), createFileOpts)
	handleError(err)

	resp, err := fClient.GetProperties(context.Background(), nil)
	handleError(err)
	// should be empty since we never expire
	fmt.Println(*resp.ExpiresOn)
}
Output:

Example (File_HTTPHeaders)

This examples shows how to create a file with HTTP Headers, how to read, and how to update the file's HTTP headers. make sure you create the filesystem and file before running this example.

package main

import (
	"context"
	"fmt"
	"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
	"github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake"
	"github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/file"
	"log"
	"os"
)

func handleError(err error) {
	if err != nil {
		log.Fatal(err.Error())
	}
}

func main() {
	accountName, accountKey := os.Getenv("AZURE_STORAGE_ACCOUNT_NAME"), os.Getenv("AZURE_STORAGE_ACCOUNT_KEY")

	// Create a file client
	u := fmt.Sprintf("https://%s.dfs.core.windows.net/fs/file.txt", accountName)
	credential, err := azdatalake.NewSharedKeyCredential(accountName, accountKey)
	handleError(err)
	fileClient, err := file.NewClientWithSharedKeyCredential(u, credential, nil)
	handleError(err)

	// Create a directory with HTTP headers
	_, err = fileClient.SetHTTPHeaders(context.TODO(), file.HTTPHeaders{
		ContentType:        to.Ptr("text/html; charset=utf-8"),
		ContentDisposition: to.Ptr("attachment"),
	}, nil)
	handleError(err)

	get, err := fileClient.GetProperties(context.TODO(), nil)
	handleError(err)

	fmt.Println(get.ContentType)
	fmt.Println(get.ContentDisposition)
}
Output:

Example (File_Rename)

make sure you create the filesystem before running this example

package main

import (
	"context"
	"fmt"
	"github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake"
	"github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/file"
	"log"
	"os"
)

func handleError(err error) {
	if err != nil {
		log.Fatal(err.Error())
	}
}

func main() {
	accountName, accountKey := os.Getenv("AZURE_STORAGE_ACCOUNT_NAME"), os.Getenv("AZURE_STORAGE_ACCOUNT_KEY")

	// Create a file client
	u := fmt.Sprintf("https://%s.dfs.core.windows.net/fs/file.txt", accountName)
	credential, err := azdatalake.NewSharedKeyCredential(accountName, accountKey)
	handleError(err)
	fileClient, err := file.NewClientWithSharedKeyCredential(u, credential, nil)
	handleError(err)

	_, err = fileClient.Create(context.Background(), nil)
	handleError(err)

	_, err = fileClient.Rename(context.Background(), "renameFile", nil)
	handleError(err)
}
Output:

Example (File_SetACL)

set acl on a file make sure you create the filesystem and file before running this example

package main

import (
	"context"
	"fmt"
	"github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake"
	"github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/file"
	"log"
	"os"
)

func handleError(err error) {
	if err != nil {
		log.Fatal(err.Error())
	}
}

func main() {
	accountName, accountKey := os.Getenv("AZURE_STORAGE_ACCOUNT_NAME"), os.Getenv("AZURE_STORAGE_ACCOUNT_KEY")

	// Create a file client
	acl := "user::rwx,group::r-x,other::rwx"
	u := fmt.Sprintf("https://%s.dfs.core.windows.net/fs/file.txt", accountName)
	credential, err := azdatalake.NewSharedKeyCredential(accountName, accountKey)
	handleError(err)
	fileClient, err := file.NewClientWithSharedKeyCredential(u, credential, nil)
	handleError(err)

	_, err = fileClient.SetAccessControl(context.Background(), &file.SetAccessControlOptions{
		ACL: &acl,
	})
	handleError(err)
}
Output:

Example (File_SetAccessControlIfUnmodifiedSinceTrue)

make sure you create the filesystem before running this example

package main

import (
	"context"
	"fmt"
	"github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake"
	"github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/directory"
	"log"
	"os"
	"time"
)

func handleError(err error) {
	if err != nil {
		log.Fatal(err.Error())
	}
}

func getRelativeTimeFromAnchor(anchorTime *time.Time, amount time.Duration) time.Time {
	return anchorTime.Add(amount * time.Second)
}

func main() {
	accountName, accountKey := os.Getenv("AZURE_STORAGE_ACCOUNT_NAME"), os.Getenv("AZURE_STORAGE_ACCOUNT_KEY")

	// Create a directory client
	owner := "4cf4e284-f6a8-4540-b53e-c3469af032dc"
	group := owner
	acl := "user::rwx,group::r-x,other::rwx"
	u := fmt.Sprintf("https://%s.dfs.core.windows.net/fs/file.txt", accountName)
	credential, err := azdatalake.NewSharedKeyCredential(accountName, accountKey)
	handleError(err)
	fileClient, err := directory.NewClientWithSharedKeyCredential(u, credential, nil)
	handleError(err)
	resp, err := fileClient.Create(context.Background(), nil)
	handleError(err)

	currentTime := getRelativeTimeFromAnchor(resp.Date, 10)
	opts := &directory.SetAccessControlOptions{
		Owner: &owner,
		Group: &group,
		ACL:   &acl,
		AccessConditions: &directory.AccessConditions{
			ModifiedAccessConditions: &directory.ModifiedAccessConditions{
				IfUnmodifiedSince: &currentTime,
			},
		}}

	_, err = fileClient.SetAccessControl(context.Background(), opts)
	handleError(err)
}
Output:

Example (File_UploadBufferAndDownloadStream)

make sure you create the filesystem before running this example

package main

import (
	"bytes"
	"context"
	"crypto/md5"
	"fmt"
	"github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming"
	"github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake"
	"github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/file"
	"io"
	"log"
	"os"
	"strings"
)

func handleError(err error) {
	if err != nil {
		log.Fatal(err.Error())
	}
}

const random64BString string = "2SDgZj6RkKYzJpu04sweQek4uWHO8ndPnYlZ0tnFS61hjnFZ5IkvIGGY44eKABov"

func generateData(sizeInBytes int) (io.ReadSeekCloser, []byte) {
	data := make([]byte, sizeInBytes)
	_len := len(random64BString)
	if sizeInBytes > _len {
		count := sizeInBytes / _len
		if sizeInBytes%_len != 0 {
			count = count + 1
		}
		copy(data[:], strings.Repeat(random64BString, count))
	} else {
		copy(data[:], random64BString)
	}
	return streaming.NopCloser(bytes.NewReader(data)), data
}

func main() {
	accountName, accountKey := os.Getenv("AZURE_STORAGE_ACCOUNT_NAME"), os.Getenv("AZURE_STORAGE_ACCOUNT_KEY")

	// Create a file client
	u := fmt.Sprintf("https://%s.dfs.core.windows.net/fs/file.txt", accountName)
	credential, err := azdatalake.NewSharedKeyCredential(accountName, accountKey)
	handleError(err)
	fClient, err := file.NewClientWithSharedKeyCredential(u, credential, nil)
	handleError(err)
	var fileSize int64 = 10 * 1024

	_, content := generateData(int(fileSize))
	md5Value := md5.Sum(content)
	contentMD5 := md5Value[:]

	err = fClient.UploadBuffer(context.Background(), content, &file.UploadBufferOptions{
		Concurrency: 5,
		ChunkSize:   2 * 1024,
	})
	handleError(err)
	gResp2, err := fClient.GetProperties(context.Background(), nil)
	handleError(err)
	fmt.Println(*gResp2.ContentLength, fileSize)

	dResp, err := fClient.DownloadStream(context.Background(), nil)
	handleError(err)

	data, err := io.ReadAll(dResp.Body)
	handleError(err)

	downloadedMD5Value := md5.Sum(data)
	downloadedContentMD5 := downloadedMD5Value[:]

	fmt.Println(downloadedContentMD5, contentMD5)
}
Output:

Example (File_UploadFileAndDownloadStream)

make sure you create the filesystem before running this example

package main

import (
	"bytes"
	"context"
	"crypto/md5"
	"fmt"
	"github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming"
	"github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake"
	"github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/file"
	"io"
	"log"
	"os"
	"strings"
)

func handleError(err error) {
	if err != nil {
		log.Fatal(err.Error())
	}
}

const random64BString string = "2SDgZj6RkKYzJpu04sweQek4uWHO8ndPnYlZ0tnFS61hjnFZ5IkvIGGY44eKABov"

func generateData(sizeInBytes int) (io.ReadSeekCloser, []byte) {
	data := make([]byte, sizeInBytes)
	_len := len(random64BString)
	if sizeInBytes > _len {
		count := sizeInBytes / _len
		if sizeInBytes%_len != 0 {
			count = count + 1
		}
		copy(data[:], strings.Repeat(random64BString, count))
	} else {
		copy(data[:], random64BString)
	}
	return streaming.NopCloser(bytes.NewReader(data)), data
}

func main() {
	accountName, accountKey := os.Getenv("AZURE_STORAGE_ACCOUNT_NAME"), os.Getenv("AZURE_STORAGE_ACCOUNT_KEY")

	// Create a file client
	u := fmt.Sprintf("https://%s.dfs.core.windows.net/fs/file.txt", accountName)
	credential, err := azdatalake.NewSharedKeyCredential(accountName, accountKey)
	handleError(err)
	fClient, err := file.NewClientWithSharedKeyCredential(u, credential, nil)
	handleError(err)
	var fileSize int64 = 10 * 1024

	_, err = fClient.Create(context.Background(), nil)
	handleError(err)

	// create local file
	_, content := generateData(int(fileSize))
	err = os.WriteFile("testFile", content, 0644)
	handleError(err)

	defer func() {
		err = os.Remove("testFile")
		handleError(err)
	}()

	fh, err := os.Open("testFile")
	handleError(err)

	defer func(fh *os.File) {
		err := fh.Close()
		handleError(err)
	}(fh)

	// get md5 hash to compare against after download
	hash := md5.New()
	_, err = io.Copy(hash, fh)
	handleError(err)
	contentMD5 := hash.Sum(nil)

	// upload the file
	err = fClient.UploadFile(context.Background(), fh, &file.UploadFileOptions{
		Concurrency: 5,
		ChunkSize:   2 * 1024,
	})
	handleError(err)

	gResp2, err := fClient.GetProperties(context.Background(), nil)
	handleError(err)
	fmt.Println(*gResp2.ContentLength, fileSize)

	dResp, err := fClient.DownloadStream(context.Background(), nil)
	handleError(err)

	data, err := io.ReadAll(dResp.Body)
	handleError(err)

	downloadedMD5Value := md5.Sum(data)
	downloadedContentMD5 := downloadedMD5Value[:]

	// compare the hashes
	fmt.Println(downloadedContentMD5, contentMD5)
}
Output:

Index

Examples

Constants

View Source
const (
	CountToEnd = 0

	// MaxAppendBytes indicates the maximum number of bytes that can be updated in a call to Client.AppendData.
	MaxAppendBytes = 100 * 1024 * 1024 // 100iB

	// MaxFileSize indicates the maximum size of the file allowed.
	MaxFileSize = 4 * 1024 * 1024 * 1024 * 1024 // 4 TiB
)
View Source
const ReadOnClosedBodyMessage = "read on closed response body"

ReadOnClosedBodyMessage of retry reader

Variables

This section is empty.

Functions

This section is empty.

Types

type ACLFailedEntry

type ACLFailedEntry = path.ACLFailedEntry

ACLFailedEntry contains the failed ACL entry (response model).

type AccessConditions

type AccessConditions = path.AccessConditions

AccessConditions identifies file-specific access conditions which you optionally set.

type AppendDataOptions

type AppendDataOptions struct {
	// TransactionalValidation specifies the transfer validation type to use.
	// The default is nil (no transfer validation).
	TransactionalValidation TransferValidationType
	// LeaseAccessConditions contains optional parameters to access leased entity.
	LeaseAccessConditions *LeaseAccessConditions
	// CPKInfo contains optional parameters to perform encryption using customer-provided key.
	CPKInfo *CPKInfo
}

AppendDataOptions contains the optional parameters for the Client.AppendData method.

type AppendDataResponse

type AppendDataResponse = generated.PathClientAppendDataResponse

AppendDataResponse contains the response from method Client.AppendData.

type CPKInfo

type CPKInfo = path.CPKInfo

CPKInfo contains CPK related information.

type CPKScopeInfo

type CPKScopeInfo = path.CPKScopeInfo

CPKScopeInfo contains a group of parameters for the PathClient.SetMetadata method.

type Client

Client represents a URL to the Azure Datalake Storage service.

func NewClient

func NewClient(fileURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error)

NewClient creates an instance of Client with the specified values.

  • fileURL - the URL of the blob e.g. https://<account>.dfs.core.windows.net/fs/file.txt
  • cred - an Azure AD credential, typically obtained via the azidentity module
  • options - client options; pass nil to accept the default values

func NewClientFromConnectionString

func NewClientFromConnectionString(connectionString string, filePath, fsName string, options *ClientOptions) (*Client, error)

NewClientFromConnectionString creates an instance of Client with the specified values.

  • connectionString - a connection string for the desired storage account
  • options - client options; pass nil to accept the default values

func NewClientWithNoCredential

func NewClientWithNoCredential(fileURL string, options *ClientOptions) (*Client, error)

NewClientWithNoCredential creates an instance of Client with the specified values. This is used to anonymously access a storage account or with a shared access signature (SAS) token.

  • fileURL - the URL of the storage account e.g. https://<account>.dfs.core.windows.net/fs/file.txt?<sas token>
  • options - client options; pass nil to accept the default values

func NewClientWithSharedKeyCredential

func NewClientWithSharedKeyCredential(fileURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error)

NewClientWithSharedKeyCredential creates an instance of Client with the specified values.

  • fileURL - the URL of the storage account e.g. https://<account>.dfs.core.windows.net/fs/file.txt
  • cred - a SharedKeyCredential created with the matching storage account and access key
  • options - client options; pass nil to accept the default values

func (*Client) AppendData

func (f *Client) AppendData(ctx context.Context, offset int64, body io.ReadSeekCloser, options *AppendDataOptions) (AppendDataResponse, error)

AppendData appends data to existing file with a given offset.

func (*Client) BlobURL

func (f *Client) BlobURL() string

BlobURL returns the URL endpoint used by the Client object.

func (*Client) Create

func (f *Client) Create(ctx context.Context, options *CreateOptions) (CreateResponse, error)

Create creates a new file.

func (*Client) DFSURL

func (f *Client) DFSURL() string

DFSURL returns the URL endpoint used by the Client object.

func (*Client) Delete

func (f *Client) Delete(ctx context.Context, options *DeleteOptions) (DeleteResponse, error)

Delete deletes a file.

func (*Client) DownloadBuffer

func (f *Client) DownloadBuffer(ctx context.Context, buffer []byte, o *DownloadBufferOptions) (int64, error)

DownloadBuffer downloads an Azure file to a buffer with parallel.

func (*Client) DownloadFile

func (f *Client) DownloadFile(ctx context.Context, file *os.File, o *DownloadFileOptions) (int64, error)

DownloadFile downloads a datalake file to a local file. The file would be truncated if the size doesn't match.

func (*Client) DownloadStream

DownloadStream reads a range of bytes from a file. The response also includes the file's properties and metadata. For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob.

func (*Client) FlushData

func (f *Client) FlushData(ctx context.Context, offset int64, options *FlushDataOptions) (FlushDataResponse, error)

FlushData commits appended data to file

func (*Client) GetAccessControl

func (f *Client) GetAccessControl(ctx context.Context, options *GetAccessControlOptions) (GetAccessControlResponse, error)

GetAccessControl gets the owner, owning group, and permissions for a file.

func (*Client) GetProperties

func (f *Client) GetProperties(ctx context.Context, options *GetPropertiesOptions) (GetPropertiesResponse, error)

GetProperties gets the properties of a file.

func (*Client) GetSASURL

func (f *Client) GetSASURL(permissions sas.FilePermissions, expiry time.Time, o *GetSASURLOptions) (string, error)

GetSASURL is a convenience method for generating a SAS token for the currently pointed at file. It can only be used if the credential supplied during creation was a SharedKeyCredential.

func (*Client) RemoveAccessControl

func (f *Client) RemoveAccessControl(ctx context.Context, ACL string, options *RemoveAccessControlOptions) (RemoveAccessControlResponse, error)

RemoveAccessControl removes the owner, owning group, and permissions for a file.

func (*Client) Rename

func (f *Client) Rename(ctx context.Context, destinationPath string, options *RenameOptions) (RenameResponse, error)

Rename renames a file. The original file will no longer exist and the client will be stale.

func (*Client) SetAccessControl

func (f *Client) SetAccessControl(ctx context.Context, options *SetAccessControlOptions) (SetAccessControlResponse, error)

SetAccessControl sets the owner, owning group, and permissions for a file.

func (*Client) SetExpiry

func (f *Client) SetExpiry(ctx context.Context, expiryValues SetExpiryValues, o *SetExpiryOptions) (SetExpiryResponse, error)

SetExpiry operation sets an expiry time on an existing file (blob2).

func (*Client) SetHTTPHeaders

func (f *Client) SetHTTPHeaders(ctx context.Context, httpHeaders HTTPHeaders, options *SetHTTPHeadersOptions) (SetHTTPHeadersResponse, error)

SetHTTPHeaders sets the HTTP headers for a file.

func (*Client) SetMetadata

func (f *Client) SetMetadata(ctx context.Context, metadata map[string]*string, options *SetMetadataOptions) (SetMetadataResponse, error)

SetMetadata sets the metadata for a file.

func (*Client) UpdateAccessControl

func (f *Client) UpdateAccessControl(ctx context.Context, ACL string, options *UpdateAccessControlOptions) (UpdateAccessControlResponse, error)

UpdateAccessControl updates the owner, owning group, and permissions for a file.

func (*Client) UploadBuffer

func (f *Client) UploadBuffer(ctx context.Context, buffer []byte, options *UploadBufferOptions) error

UploadBuffer uploads a buffer in chunks to a file.

func (*Client) UploadFile

func (f *Client) UploadFile(ctx context.Context, file *os.File, options *UploadFileOptions) error

UploadFile uploads a file in chunks to a file.

func (*Client) UploadStream

func (f *Client) UploadStream(ctx context.Context, body io.Reader, options *UploadStreamOptions) error

UploadStream copies the file held in io.Reader to the file at fileClient. A Context deadline or cancellation will cause this to error.

type ClientOptions

type ClientOptions base.ClientOptions

ClientOptions contains the optional parameters when creating a Client.

type CopyStatusType

type CopyStatusType = path.CopyStatusType

CopyStatusType defines values for CopyStatusType

const (
	CopyStatusTypePending CopyStatusType = path.CopyStatusTypePending
	CopyStatusTypeSuccess CopyStatusType = path.CopyStatusTypeSuccess
	CopyStatusTypeAborted CopyStatusType = path.CopyStatusTypeAborted
	CopyStatusTypeFailed  CopyStatusType = path.CopyStatusTypeFailed
)

type CreateExpiryType

type CreateExpiryType = generated.PathExpiryOptions

CreateExpiryType defines the values for modes of file expiration specified during creation.

const (
	// CreateExpiryTypeAbsolute sets the expiration date as an absolute value expressed in RFC1123 format.
	CreateExpiryTypeAbsolute CreateExpiryType = generated.PathExpiryOptionsAbsolute

	// CreateExpiryTypeNeverExpire sets the file to never expire or removes the current expiration date.
	CreateExpiryTypeNeverExpire CreateExpiryType = generated.PathExpiryOptionsNeverExpire

	// CreateExpiryTypeRelativeToNow sets the expiration date relative to the current time.
	// The value is expressed as the number of milliseconds to elapse from the present time.
	CreateExpiryTypeRelativeToNow CreateExpiryType = generated.PathExpiryOptionsRelativeToNow
)

type CreateExpiryValues

type CreateExpiryValues struct {
	// ExpiryType indicates how the value of ExpiresOn should be interpreted (absolute, relative to now, etc).
	ExpiryType CreateExpiryType

	// ExpiresOn contains the time the file should expire.
	// The value will either be an absolute UTC time in RFC1123 format or an integer expressing a number of milliseconds.
	// NOTE: when ExpiryType is CreateExpiryTypeNeverExpire, this value is ignored.
	ExpiresOn string
}

CreateExpiryValues describes when a newly created file should expire. A zero-value indicates the file has no expiration date.

type CreateOptions

type CreateOptions struct {
	// AccessConditions contains parameters for accessing the file.
	AccessConditions *AccessConditions
	// CPKInfo contains a group of parameters for client provided encryption key.
	CPKInfo *CPKInfo
	// HTTPHeaders contains the HTTP headers for path operations.
	HTTPHeaders *HTTPHeaders
	// Expiry specifies the type and time of expiry for the file.
	Expiry CreateExpiryValues
	// LeaseDuration specifies the duration of the lease, in seconds, or negative one
	// (-1) for a lease that never expires. A non-infinite lease can be
	// between 15 and 60 seconds.
	LeaseDuration *int64
	// ProposedLeaseID specifies the proposed lease ID for the file.
	ProposedLeaseID *string
	// Permissions is the octal representation of the permissions for user, group and mask.
	Permissions *string
	// Umask is the umask for the file.
	Umask *string
	// Owner is the owner of the file.
	Owner *string
	// Group is the owning group of the file.
	Group *string
	// ACL is the access control list for the file.
	ACL *string
}

CreateOptions contains the optional parameters when calling the Create operation.

type CreateResponse

type CreateResponse = path.CreateResponse

CreateResponse contains the response fields for the Create operation.

type DeleteOptions

type DeleteOptions = path.DeleteOptions

DeleteOptions contains the optional parameters when calling the Delete operation.

type DeleteResponse

type DeleteResponse = path.DeleteResponse

DeleteResponse contains the response fields for the Delete operation.

type DownloadBufferOptions

type DownloadBufferOptions struct {
	// Range specifies a range of bytes.  The default value is all bytes.
	Range *HTTPRange
	// ChunkSize specifies the chunk size to use for each parallel download; the default size is 4MB.
	ChunkSize int64
	// Progress is a function that is invoked periodically as bytes are received.
	Progress func(bytesTransferred int64)
	// AccessConditions indicates the access conditions used when making HTTP GET requests against the file.
	AccessConditions *AccessConditions
	// CPKInfo contains a group of parameters for client provided encryption key.
	CPKInfo *CPKInfo
	// CPKScopeInfo contains a group of parameters for client provided encryption scope.
	CPKScopeInfo *CPKScopeInfo
	// Concurrency indicates the maximum number of chunks to download in parallel (0=default).
	Concurrency uint16
	// RetryReaderOptionsPerChunk is used when downloading each chunk.
	RetryReaderOptionsPerChunk *RetryReaderOptions
}

DownloadBufferOptions contains the optional parameters for the DownloadBuffer method.

type DownloadFileOptions

type DownloadFileOptions struct {
	// Range specifies a range of bytes.  The default value is all bytes.
	Range *HTTPRange
	// ChunkSize specifies the chunk size to use for each parallel download; the default size is 4MB.
	ChunkSize int64
	// Progress is a function that is invoked periodically as bytes are received.
	Progress func(bytesTransferred int64)
	// AccessConditions indicates the access conditions used when making HTTP GET requests against the file.
	AccessConditions *AccessConditions
	// CPKInfo contains a group of parameters for client provided encryption key.
	CPKInfo *CPKInfo
	// CPKScopeInfo contains a group of parameters for client provided encryption scope.
	CPKScopeInfo *CPKScopeInfo
	// Concurrency indicates the maximum number of chunks to download in parallel. The default value is 5.
	Concurrency uint16
	// RetryReaderOptionsPerChunk is used when downloading each chunk.
	RetryReaderOptionsPerChunk *RetryReaderOptions
}

DownloadFileOptions contains the optional parameters for the Client.DownloadFile method.

type DownloadResponse

type DownloadResponse struct {
	// AcceptRanges contains the information returned from the Accept-Ranges header response.
	AcceptRanges *string

	// Body contains the streaming response.
	Body io.ReadCloser

	// CacheControl contains the information returned from the Cache-Control header response.
	CacheControl *string

	// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
	ClientRequestID *string

	// ContentCRC64 contains the information returned from the x-ms-content-crc64 header response.
	ContentCRC64 []byte

	// ContentDisposition contains the information returned from the Content-Disposition header response.
	ContentDisposition *string

	// ContentEncoding contains the information returned from the Content-Encoding header response.
	ContentEncoding *string

	// ContentLanguage contains the information returned from the Content-Language header response.
	ContentLanguage *string

	// ContentLength contains the information returned from the Content-Length header response.
	ContentLength *int64

	// ContentMD5 contains the information returned from the Content-MD5 header response.
	ContentMD5 []byte

	// ContentRange contains the information returned from the Content-Range header response.
	ContentRange *string

	// ContentType contains the information returned from the Content-Type header response.
	ContentType *string

	// CopyCompletionTime contains the information returned from the x-ms-copy-completion-time header response.
	CopyCompletionTime *time.Time

	// CopyID contains the information returned from the x-ms-copy-id header response.
	CopyID *string

	// CopyProgress contains the information returned from the x-ms-copy-progress header response.
	CopyProgress *string

	// CopySource contains the information returned from the x-ms-copy-source header response.
	CopySource *string

	// CopyStatus contains the information returned from the x-ms-copy-status header response.
	CopyStatus *CopyStatusType

	// CopyStatusDescription contains the information returned from the x-ms-copy-status-description header response.
	CopyStatusDescription *string

	// Date contains the information returned from the Date header response.
	Date *time.Time

	// ETag contains the information returned from the ETag header response.
	ETag *azcore.ETag

	// EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response.
	EncryptionKeySHA256 *string

	// EncryptionScope contains the information returned from the x-ms-encryption-scope header response.
	EncryptionScope *string

	// ErrorCode contains the information returned from the x-ms-error-code header response.
	ErrorCode *string

	// ImmutabilityPolicyExpiresOn contains the information returned from the x-ms-immutability-policy-until-date header response.
	ImmutabilityPolicyExpiresOn *time.Time

	// ImmutabilityPolicyMode contains the information returned from the x-ms-immutability-policy-mode header response.
	ImmutabilityPolicyMode *ImmutabilityPolicyMode

	// IsCurrentVersion contains the information returned from the x-ms-is-current-version header response.
	IsCurrentVersion *bool

	// IsSealed contains the information returned from the x-ms-blob-sealed header response.
	IsSealed *bool

	// IsServerEncrypted contains the information returned from the x-ms-server-encrypted header response.
	IsServerEncrypted *bool

	// LastAccessed contains the information returned from the x-ms-last-access-time header response.
	LastAccessed *time.Time

	// LastModified contains the information returned from the Last-Modified header response.
	LastModified *time.Time

	// LeaseDuration contains the information returned from the x-ms-lease-duration header response.
	LeaseDuration *DurationType

	// LeaseState contains the information returned from the x-ms-lease-state header response.
	LeaseState *StateType

	// LeaseStatus contains the information returned from the x-ms-lease-status header response.
	LeaseStatus *StatusType

	// LegalHold contains the information returned from the x-ms-legal-hold header response.
	LegalHold *bool

	// Metadata contains the information returned from the x-ms-meta header response.
	Metadata map[string]*string

	// ObjectReplicationPolicyID contains the information returned from the x-ms-or-policy-id header response.
	ObjectReplicationPolicyID *string

	// ObjectReplicationRules contains the information returned from the x-ms-or header response.
	ObjectReplicationRules map[string]*string

	// RequestID contains the information returned from the x-ms-request-id header response.
	RequestID *string

	// TagCount contains the information returned from the x-ms-tag-count header response.
	TagCount *int64

	// Version contains the information returned from the x-ms-version header response.
	Version *string

	// VersionID contains the information returned from the x-ms-version-id header response.
	VersionID *string
}

DownloadResponse contains the response fields for the GetProperties operation.

func FormatDownloadStreamResponse

func FormatDownloadStreamResponse(r *blob.DownloadStreamResponse) DownloadResponse

type DownloadStreamOptions

type DownloadStreamOptions struct {
	// When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the
	// range is less than or equal to 4 MB in size.
	RangeGetContentMD5 *bool
	// Range specifies a range of bytes.  The default value is all bytes.
	Range *HTTPRange
	// AccessConditions contains parameters for accessing the file.
	AccessConditions *AccessConditions
	// CPKInfo contains optional parameters to perform encryption using customer-provided key.
	CPKInfo *CPKInfo
	// CPKScopeInfo contains a group of parameters for client provided encryption scope.
	CPKScopeInfo *CPKScopeInfo
}

DownloadStreamOptions contains the optional parameters for the Client.DownloadStream method.

type DownloadStreamResponse

type DownloadStreamResponse struct {
	// DownloadResponse contains response fields from DownloadStream.
	DownloadResponse
	// contains filtered or unexported fields
}

DownloadStreamResponse contains the response from the DownloadStream method. To read from the stream, read from the Body field, or call the NewRetryReader method.

func (*DownloadStreamResponse) NewRetryReader

func (r *DownloadStreamResponse) NewRetryReader(ctx context.Context, options *RetryReaderOptions) *RetryReader

NewRetryReader constructs new RetryReader stream for reading data. If a connection fails while reading, it will make additional requests to reestablish a connection and continue reading. Pass nil for options to accept the default options. Callers of this method should not access the DownloadStreamResponse.Body field.

type DurationType

type DurationType = azdatalake.DurationType

DurationType defines values for DurationType

const (
	DurationTypeInfinite DurationType = azdatalake.DurationTypeInfinite
	DurationTypeFixed    DurationType = azdatalake.DurationTypeFixed
)

func PossibleDurationTypeValues

func PossibleDurationTypeValues() []DurationType

PossibleDurationTypeValues returns the possible values for the DurationType const type.

type EncryptionAlgorithmType

type EncryptionAlgorithmType = path.EncryptionAlgorithmType

EncryptionAlgorithmType defines values for EncryptionAlgorithmType.

const (
	EncryptionAlgorithmTypeNone   EncryptionAlgorithmType = path.EncryptionAlgorithmTypeNone
	EncryptionAlgorithmTypeAES256 EncryptionAlgorithmType = path.EncryptionAlgorithmTypeAES256
)

type FlushDataOptions

type FlushDataOptions struct {
	// AccessConditions contains parameters for accessing the file.
	AccessConditions *AccessConditions
	// CPKInfo contains optional parameters to perform encryption using customer-provided key.
	CPKInfo *CPKInfo
	// HTTPHeaders contains the HTTP headers for path operations.
	HTTPHeaders *HTTPHeaders
	// Close This event has a property indicating whether this is the final change to distinguish the
	// difference between an intermediate flush to a file stream and the final close of a file stream.
	Close *bool
	// RetainUncommittedData if "true", uncommitted data is retained after the flush operation
	// completes, otherwise, the uncommitted data is deleted after the flush operation.
	RetainUncommittedData *bool
}

FlushDataOptions contains the optional parameters for the Client.FlushData method.

type FlushDataResponse

type FlushDataResponse = generated.PathClientFlushDataResponse

FlushDataResponse contains the response from method Client.FlushData.

type GetAccessControlOptions

type GetAccessControlOptions = path.GetAccessControlOptions

GetAccessControlOptions contains the optional parameters when calling the GetAccessControl operation.

type GetAccessControlResponse

type GetAccessControlResponse = path.GetAccessControlResponse

GetAccessControlResponse contains the response fields for the GetAccessControl operation.

type GetPropertiesOptions

type GetPropertiesOptions = path.GetPropertiesOptions

GetPropertiesOptions contains the optional parameters for the Client.GetProperties method

type GetPropertiesResponse

type GetPropertiesResponse = path.GetPropertiesResponse

GetPropertiesResponse contains the response fields for the GetProperties operation.

type GetSASURLOptions

type GetSASURLOptions = path.GetSASURLOptions

GetSASURLOptions contains the optional parameters for the Client.GetSASURL method.

type HTTPHeaders

type HTTPHeaders = path.HTTPHeaders

HTTPHeaders contains the HTTP headers for path operations.

type HTTPRange

type HTTPRange = exported.HTTPRange

HTTPRange defines a range of bytes within an HTTP resource, starting at offset and ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange which has an offset but no zero value count indicates from the offset to the resource's end.

type ImmutabilityPolicyMode

type ImmutabilityPolicyMode = path.ImmutabilityPolicyMode

ImmutabilityPolicyMode Specifies the immutability policy mode to set on the file.

const (
	ImmutabilityPolicyModeMutable  ImmutabilityPolicyMode = path.ImmutabilityPolicyModeMutable
	ImmutabilityPolicyModeUnlocked ImmutabilityPolicyMode = path.ImmutabilityPolicyModeUnlocked
	ImmutabilityPolicyModeLocked   ImmutabilityPolicyMode = path.ImmutabilityPolicyModeLocked
)

type LeaseAccessConditions

type LeaseAccessConditions = path.LeaseAccessConditions

LeaseAccessConditions contains optional parameters to access leased entity.

type ModifiedAccessConditions

type ModifiedAccessConditions = path.ModifiedAccessConditions

ModifiedAccessConditions contains a group of parameters for specifying access conditions.

type RemoveAccessControlOptions

type RemoveAccessControlOptions struct {
}

RemoveAccessControlOptions contains the optional parameters when calling the RemoveAccessControlRecursive operation.

type RemoveAccessControlResponse

type RemoveAccessControlResponse = path.RemoveAccessControlResponse

RemoveAccessControlResponse contains the response fields for the RemoveAccessControlRecursive operation.

type RenameOptions

type RenameOptions = path.RenameOptions

RenameOptions contains the optional parameters when calling the Rename operation.

type RenameResponse

type RenameResponse = path.RenameResponse

RenameResponse contains the response fields for the Rename operation.

type RetryReader

type RetryReader struct {
	// contains filtered or unexported fields
}

RetryReader attempts to read from response, and if there is a retry-able network error returned during reading, it will retry according to retry reader option through executing user defined action with provided data to get a new response, and continue the overall reading process through reading from the new response. RetryReader implements the io.ReadCloser interface.

func (*RetryReader) Close

func (s *RetryReader) Close() error

Close retry reader

func (*RetryReader) Read

func (s *RetryReader) Read(p []byte) (n int, err error)

Read from retry reader

type RetryReaderOptions

type RetryReaderOptions struct {
	// MaxRetries specifies the maximum number of attempts a failed read will be retried
	// before producing an error.
	// The default value is three.
	MaxRetries int32

	// OnFailedRead, when non-nil, is called after any failure to read. Expected usage is diagnostic logging.
	OnFailedRead func(failureCount int32, lastError error, rnge HTTPRange, willRetry bool)

	// EarlyCloseAsError can be set to true to prevent retries after "read on closed response body". By default,
	// retryReader has the following special behaviour: closing the response body before it is all read is treated as a
	// retryable error. This is to allow callers to force a retry by closing the body from another goroutine (e.g. if the =
	// read is too slow, caller may want to force a retry in the hope that the retry will be quicker).  If
	// TreatEarlyCloseAsError is true, then retryReader's special behaviour is suppressed, and "read on closed body" is instead
	// treated as a fatal (non-retryable) error.
	// Note that setting TreatEarlyCloseAsError only guarantees that Closing will produce a fatal error if the Close happens
	// from the same "thread" (goroutine) as Read.  Concurrent Close calls from other goroutines may instead produce network errors
	// which will be retried.
	// The default value is false.
	EarlyCloseAsError bool
	// contains filtered or unexported fields
}

RetryReaderOptions configures the retry reader's behavior. Zero-value fields will have their specified default values applied during use. This allows for modification of a subset of fields.

type SetAccessControlOptions

type SetAccessControlOptions = path.SetAccessControlOptions

SetAccessControlOptions contains the optional parameters when calling the SetAccessControl operation.

type SetAccessControlRecursiveResponse

type SetAccessControlRecursiveResponse = generated.SetAccessControlRecursiveResponse

SetAccessControlRecursiveResponse contains part of the response data returned by the []OP_AccessControl operations.

type SetAccessControlResponse

type SetAccessControlResponse = path.SetAccessControlResponse

SetAccessControlResponse contains the response fields for the SetAccessControl operation.

type SetExpiryOptions

type SetExpiryOptions struct {
}

SetExpiryOptions contains the optional parameters for the Client.SetExpiry method.

type SetExpiryResponse

type SetExpiryResponse = generated_blob.BlobClientSetExpiryResponse

SetExpiryResponse contains the response fields for the SetExpiry operation.

type SetExpiryType

type SetExpiryType = generated_blob.ExpiryOptions

SetExpiryType defines the values for modes of file expiration.

const (
	// SetExpiryTypeAbsolute sets the expiration date as an absolute value expressed in RFC1123 format.
	SetExpiryTypeAbsolute SetExpiryType = generated_blob.ExpiryOptionsAbsolute

	// SetExpiryTypeNeverExpire sets the file to never expire or removes the current expiration date.
	SetExpiryTypeNeverExpire SetExpiryType = generated_blob.ExpiryOptionsNeverExpire

	// SetExpiryTypeRelativeToCreation sets the expiration date relative to the time of file creation.
	// The value is expressed as the number of miliseconds to elapse from the time of creation.
	SetExpiryTypeRelativeToCreation SetExpiryType = generated_blob.ExpiryOptionsRelativeToCreation

	// SetExpiryTypeRelativeToNow sets the expiration date relative to the current time.
	// The value is expressed as the number of milliseconds to elapse from the present time.
	SetExpiryTypeRelativeToNow SetExpiryType = generated_blob.ExpiryOptionsRelativeToNow
)

type SetExpiryValues

type SetExpiryValues struct {
	// ExpiryType indicates how the value of ExpiresOn should be interpreted (absolute, relative to now, etc).
	ExpiryType SetExpiryType

	// ExpiresOn contains the time the file should expire.
	// The value will either be an absolute UTC time in RFC1123 format or an integer expressing a number of milliseconds.
	// NOTE: when ExpiryType is SetExpiryTypeNeverExpire, this value is ignored.
	ExpiresOn string
}

SetExpiryValues describes when a file should expire. A zero-value indicates the file has no expiration date.

type SetHTTPHeadersOptions

type SetHTTPHeadersOptions = path.SetHTTPHeadersOptions

SetHTTPHeadersOptions contains the optional parameters for the Client.SetHTTPHeaders method.

type SetHTTPHeadersResponse

type SetHTTPHeadersResponse = path.SetHTTPHeadersResponse

SetHTTPHeadersResponse contains the response from method Client.SetHTTPHeaders.

type SetMetadataOptions

type SetMetadataOptions = path.SetMetadataOptions

SetMetadataOptions provides set of configurations for Set Metadata on path operation

type SetMetadataResponse

type SetMetadataResponse = path.SetMetadataResponse

SetMetadataResponse contains the response fields for the SetMetadata operation.

type SharedKeyCredential

type SharedKeyCredential = path.SharedKeyCredential

SharedKeyCredential contains an account's name and its primary or secondary key.

type SourceAccessConditions

type SourceAccessConditions = path.SourceAccessConditions

SourceAccessConditions identifies file-specific source access conditions which you optionally set.

type SourceModifiedAccessConditions

type SourceModifiedAccessConditions = path.SourceModifiedAccessConditions

SourceModifiedAccessConditions contains a group of parameters for specifying access conditions.

type StateType

type StateType = azdatalake.StateType

StateType defines values for StateType

const (
	StateTypeAvailable StateType = azdatalake.StateTypeAvailable
	StateTypeLeased    StateType = azdatalake.StateTypeLeased
	StateTypeExpired   StateType = azdatalake.StateTypeExpired
	StateTypeBreaking  StateType = azdatalake.StateTypeBreaking
	StateTypeBroken    StateType = azdatalake.StateTypeBroken
)

type StatusType

type StatusType = azdatalake.StatusType

StatusType defines values for StatusType

const (
	StatusTypeLocked   StatusType = azdatalake.StatusTypeLocked
	StatusTypeUnlocked StatusType = azdatalake.StatusTypeUnlocked
)

func PossibleStatusTypeValues

func PossibleStatusTypeValues() []StatusType

PossibleStatusTypeValues returns the possible values for the StatusType const type.

type TransferValidationType

type TransferValidationType = exported.TransferValidationType

TransferValidationType abstracts the various mechanisms used to verify a transfer.

func TransferValidationTypeComputeCRC64

func TransferValidationTypeComputeCRC64() TransferValidationType

TransferValidationTypeComputeCRC64 is a TransferValidationType that indicates a CRC64 should be computed during transfer.

type TransferValidationTypeCRC64

type TransferValidationTypeCRC64 = exported.TransferValidationTypeCRC64

TransferValidationTypeCRC64 is a TransferValidationType used to provide a precomputed crc64.

type UpdateAccessControlOptions

type UpdateAccessControlOptions struct {
}

UpdateAccessControlOptions contains the optional parameters when calling the UpdateAccessControlRecursive operation.

type UpdateAccessControlResponse

type UpdateAccessControlResponse = path.UpdateAccessControlResponse

UpdateAccessControlResponse contains the response fields for the UpdateAccessControlRecursive operation.

type UploadBufferOptions

type UploadBufferOptions = uploadFromReaderOptions

UploadBufferOptions provides set of configurations for Client.UploadBuffer operation.

type UploadFileOptions

type UploadFileOptions = uploadFromReaderOptions

UploadFileOptions provides set of configurations for Client.UploadFile operation.

type UploadStreamOptions

type UploadStreamOptions struct {
	// ChunkSize specifies the chunk size to use in bytes; the default (and maximum size) is MaxAppendBytes.
	ChunkSize int64
	// Concurrency indicates the maximum number of chunks to upload in parallel (default is 5)
	Concurrency uint16
	// AccessConditions contains optional parameters to access leased entity.
	AccessConditions *AccessConditions
	// HTTPHeaders contains the optional path HTTP headers to set when the file is created.
	HTTPHeaders *HTTPHeaders
	// CPKInfo contains optional parameters to perform encryption using customer-provided key.
	CPKInfo *CPKInfo
}

UploadStreamOptions provides set of configurations for Client.UploadStream operation.

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL