Documentation ¶
Overview ¶
Package driver implements a Go driver for the ArangoDB database.
To get started, create a connection to the database and wrap a client around it.
// Create an HTTP connection to the database conn, err := http.NewConnection(http.ConnectionConfig{ Endpoints: []string{"http://localhost:8529"}, }) if err != nil { // Handle error } // Create a client c, err := driver.NewClient(driver.ClientConfig{ Connection: conn, }) if err != nil { // Handle error }
Index ¶
- Constants
- Variables
- func ApplyVersionHeader(ctx context.Context, req Request)
- func DriverVersion() string
- func HasAsyncID(ctx context.Context) (string, bool)
- func HasReturnNew(ctx context.Context) (interface{}, bool)
- func HasReturnOld(ctx context.Context) (interface{}, bool)
- func IsArangoError(err error) bool
- func IsArangoErrorWithCode(err error, code int) bool
- func IsArangoErrorWithErrorNum(err error, errorNum ...int) bool
- func IsAsyncRequest(ctx context.Context) bool
- func IsCanceled(err error) bool
- func IsConflict(err error) bool
- func IsDataSourceOrDocumentNotFound(err error) bool
- func IsExternalStorageError(err error) bool
- func IsForbidden(err error) bool
- func IsInvalidArgument(err error) bool
- func IsInvalidRequest(err error) bool
- func IsNoLeader(err error) bool
- func IsNoLeaderOrOngoing(err error) bool
- func IsNoMoreDocuments(err error) bool
- func IsNotFound(err error) booldeprecated
- func IsNotFoundGeneral(err error) bool
- func IsPreconditionFailed(err error) bool
- func IsResponse(err error) bool
- func IsTimeout(err error) bool
- func IsUnauthorized(err error) bool
- func WithAllowDirtyReads(parent context.Context, wasDirtyRead *bool) context.Context
- func WithArangoQueueTime(parent context.Context, duration time.Duration) context.Context
- func WithArangoQueueTimeout(parent context.Context, useQueueTimeout bool) context.Context
- func WithAsync(parent context.Context) context.Context
- func WithAsyncID(parent context.Context, asyncID string) context.Context
- func WithBatchID(parent context.Context, id string) context.Context
- func WithConfigured(parent context.Context, value ...bool) context.Context
- func WithDBServerID(parent context.Context, id string) context.Context
- func WithDetails(parent context.Context, value ...bool) context.Context
- func WithDriverFlags(parent context.Context, value []string) context.Context
- func WithDropCollections(parent context.Context, value ...bool) context.Context
- func WithEndpoint(parent context.Context, endpoint string) context.Context
- func WithEnforceReplicationFactor(parent context.Context, value bool) context.Context
- func WithFollowLeaderRedirect(parent context.Context, value bool) context.Context
- func WithIgnoreRevisions(parent context.Context, value ...bool) context.Context
- func WithImportDetails(parent context.Context, value *[]string) context.Context
- func WithIsRestore(parent context.Context, value bool) context.Context
- func WithIsSystem(parent context.Context, value bool) context.Context
- func WithJobIDResponse(parent context.Context, jobID *string) context.Context
- func WithKeepNull(parent context.Context, value bool) context.Context
- func WithMergeObjects(parent context.Context, value bool) context.Context
- func WithOverwrite(parent context.Context) context.Context
- func WithOverwriteMode(parent context.Context, mode OverwriteMode) context.Context
- func WithQueryAllowRetry(parent context.Context, value ...bool) context.Context
- func WithQueryBatchSize(parent context.Context, value int) context.Context
- func WithQueryCache(parent context.Context, value ...bool) context.Context
- func WithQueryCount(parent context.Context, value ...bool) context.Context
- func WithQueryFillBlockCache(parent context.Context, value ...bool) context.Context
- func WithQueryForceOneShardAttributeValue(parent context.Context, value string) context.Context
- func WithQueryFullCount(parent context.Context, value ...bool) context.Context
- func WithQueryMaxRuntime(parent context.Context, value ...float64) context.Context
- func WithQueryMemoryLimit(parent context.Context, value int64) context.Context
- func WithQueryOptimizerRules(parent context.Context, value []string) context.Context
- func WithQueryProfile(parent context.Context, value ...int) context.Context
- func WithQuerySatelliteSyncWait(parent context.Context, value time.Duration) context.Context
- func WithQueryShardIds(parent context.Context, value []string) context.Context
- func WithQueryStream(parent context.Context, value ...bool) context.Context
- func WithQueryTTL(parent context.Context, value time.Duration) context.Context
- func WithRawResponse(parent context.Context, value *[]byte) context.Context
- func WithRefillIndexCaches(parent context.Context, value bool) context.Context
- func WithResponse(parent context.Context, value *Response) context.Context
- func WithReturnNew(parent context.Context, result interface{}) context.Context
- func WithReturnOld(parent context.Context, result interface{}) context.Context
- func WithRevision(parent context.Context, revision string) context.Context
- func WithRevisions(parent context.Context, revisions []string) context.Context
- func WithSilent(parent context.Context, value ...bool) context.Context
- func WithSkipExistCheck(parent context.Context, value bool) context.Context
- func WithTransactionID(parent context.Context, tid TransactionID) context.Context
- func WithWaitForSync(parent context.Context, value ...bool) context.Context
- type AbortTransactionOptions
- type AccessTarget
- type AggregatedStatus
- type AllGSSStatus
- type ArangoError
- type ArangoID
- type ArangoSearchAliasIndex
- type ArangoSearchAliasViewProperties
- type ArangoSearchAnalyzer
- type ArangoSearchAnalyzerAQLReturnType
- type ArangoSearchAnalyzerDefinition
- type ArangoSearchAnalyzerFeature
- type ArangoSearchAnalyzerGeoJSONType
- type ArangoSearchAnalyzerGeoOptions
- type ArangoSearchAnalyzerPipeline
- type ArangoSearchAnalyzerProperties
- type ArangoSearchAnalyzerType
- type ArangoSearchBreakType
- type ArangoSearchCaseType
- type ArangoSearchConsolidationPolicy
- type ArangoSearchConsolidationPolicyBytesAccum
- type ArangoSearchConsolidationPolicyTier
- type ArangoSearchConsolidationPolicyType
- type ArangoSearchEdgeNGram
- type ArangoSearchElementProperties
- type ArangoSearchFields
- type ArangoSearchFormat
- type ArangoSearchLinks
- type ArangoSearchNGramStreamType
- type ArangoSearchPrimarySortEntry
- type ArangoSearchSortDirection
- type ArangoSearchStoreValues
- type ArangoSearchView
- type ArangoSearchViewAlias
- type ArangoSearchViewBase
- type ArangoSearchViewProperties
- type AsyncJobDeleteOptions
- type AsyncJobDeleteType
- type AsyncJobListOptions
- type AsyncJobService
- type AsyncJobStatusType
- type Authentication
- type AuthenticationType
- type BackupCreateOptions
- type BackupCreateResponse
- type BackupID
- type BackupListOptions
- type BackupMeta
- type BackupMetaSha256
- type BackupRestoreOptions
- type BackupTransferJobID
- type BackupTransferProgressReport
- type BackupTransferReport
- type BackupTransferStatus
- type Batch
- type BeginTransactionOptions
- type BodyBuilder
- type Client
- type ClientAdminBackup
- type ClientAsyncJob
- type ClientBackup
- type ClientCluster
- type ClientConfig
- type ClientDatabases
- type ClientFoxx
- type ClientLog
- type ClientReplication
- type ClientServerAdmin
- type ClientServerInfo
- type ClientStats
- type ClientUsers
- type Cluster
- type ClusterHealth
- type Collection
- type CollectionChecksum
- type CollectionDocuments
- type CollectionIndexes
- type CollectionInfo
- type CollectionKeyOptions
- type CollectionProperties
- type CollectionSchemaLevel
- type CollectionSchemaOptions
- type CollectionShards
- type CollectionStatistics
- type CollectionStatus
- type CollectionType
- type CommitTransactionOptions
- type ComputeOn
- type ComputedValue
- type Connection
- type ContentType
- type ContextKey
- type CreateCollectionOptions
- type CreateDatabaseDefaultOptions
- type CreateDatabaseOptions
- type CreateDatabaseUserOptions
- type CreateEdgeCollectionOptions
- type CreateGraphOptions
- type CreateVertexCollectionOptions
- type Cursor
- type Database
- type DatabaseArangoSearchAnalyzers
- type DatabaseCollections
- type DatabaseGraphs
- type DatabaseInfo
- type DatabaseInventory
- type DatabasePregels
- type DatabaseReplicationVersion
- type DatabaseSharding
- type DatabaseStreamingTransactions
- type DatabaseViews
- type DocumentID
- type DocumentMeta
- type DocumentMetaSlice
- type EdgeDefinition
- type EdgeDocument
- type EngineInfo
- type EngineType
- type EnsureFullTextIndexOptionsdeprecated
- type EnsureGeoIndexOptions
- type EnsureHashIndexOptions
- type EnsureMDIIndexOptions
- type EnsureMDIPrefixedIndexOptions
- type EnsurePersistentIndexOptions
- type EnsureSkipListIndexOptions
- type EnsureTTLIndexOptions
- type EnsureZKDIndexOptions
- type ErrorSlice
- type ExplainQueryOptimizerOptions
- type ExplainQueryOptions
- type ExplainQueryResult
- type ExplainQueryResultExecutionCollection
- type ExplainQueryResultExecutionNodeRaw
- type ExplainQueryResultExecutionStats
- type ExplainQueryResultExecutionVariable
- type ExplainQueryResultPlan
- type FoxxCreateOptions
- type FoxxDeleteOptions
- type FoxxService
- type GSSStatus
- type Grant
- type Graph
- type GraphEdgeCollections
- type GraphStoreStatus
- type GraphVertexCollections
- type HTTPStats
- type ImportDocumentOptions
- type ImportDocumentStatistics
- type ImportOnDuplicate
- type Index
- type IndexType
- type InvalidArgumentError
- type InventoryCollection
- type InventoryCollectionParameters
- type InventoryIndex
- type InventoryView
- type InvertedIndexField
- type InvertedIndexOptions
- type InvertedIndexPrimarySort
- type KeyGeneratorType
- type License
- type LicenseFeatures
- type LicenseStatus
- type LogLevels
- type LogLevelsGetOptions
- type LogLevelsSetOptions
- type MemoryStats
- type NoMoreDocumentsError
- type NumberOfServersResponse
- type OverwriteMode
- type PregelAlgorithm
- type PregelJob
- type PregelJobOptions
- type PregelJobState
- type PregelRunDetails
- type PrimarySortCompression
- type Protocol
- type ProtocolSet
- type QueryExtra
- type QueryFlags
- type QueryRule
- type QueryStatistics
- type RawObject
- type RemoveGraphOptions
- type Replication
- type Request
- type Response
- type ResponseError
- type RevisionMinMax
- type RevisionRanges
- type RevisionTree
- type RevisionTreeNode
- type RevisionUInt64
- type Revisions
- type ServerHealth
- type ServerID
- type ServerLogMessage
- type ServerLogs
- type ServerMode
- type ServerRole
- type ServerStatistics
- type ServerStats
- type ServerStatus
- type ServerSyncStatus
- type SetCollectionPropertiesOptions
- type ShardID
- type ShardingStrategy
- type ShutdownInfo
- type State
- type Stats
- type StoredValue
- type SystemStats
- type ThreadStats
- type Tick
- type TransactionCollections
- type TransactionID
- type TransactionOptions
- type TransactionStats
- type TransactionStatus
- type TransactionStatusRecord
- type User
- type UserOptions
- type V8ContextStats
- type Version
- type VersionInfo
- type VertexConstraints
- type View
- type ViewType
Constants ¶
const ( CollectionStatusNewBorn = CollectionStatus(1) CollectionStatusUnloaded = CollectionStatus(2) CollectionStatusLoaded = CollectionStatus(3) CollectionStatusUnloading = CollectionStatus(4) CollectionStatusDeleted = CollectionStatus(5) CollectionStatusLoading = CollectionStatus(6) )
const ( // ImportOnDuplicateError will not import the current document because of the unique key constraint violation. // This is the default setting. ImportOnDuplicateError = ImportOnDuplicate("error") // ImportOnDuplicateUpdate will update an existing document in the database with the data specified in the request. // Attributes of the existing document that are not present in the request will be preserved. ImportOnDuplicateUpdate = ImportOnDuplicate("update") // ImportOnDuplicateReplace will replace an existing document in the database with the data specified in the request. ImportOnDuplicateReplace = ImportOnDuplicate("replace") // ImportOnDuplicateIgnore will not update an existing document and simply ignore the error caused by a unique key constraint violation. ImportOnDuplicateIgnore = ImportOnDuplicate("ignore") )
const ( EngineTypeMMFiles = EngineType("mmfiles") EngineTypeRocksDB = EngineType("rocksdb") )
const ( // CollectionTypeDocument specifies a document collection CollectionTypeDocument = CollectionType(2) // CollectionTypeEdge specifies an edges collection CollectionTypeEdge = CollectionType(3) )
const ( KeyGeneratorTraditional = KeyGeneratorType("traditional") KeyGeneratorAutoIncrement = KeyGeneratorType("autoincrement") )
const ( // ViewTypeArangoSearch specifies an ArangoSearch view type. ViewTypeArangoSearch = ViewType("arangosearch") // ViewTypeArangoSearchAlias specifies an ArangoSearch view type alias. ViewTypeArangoSearchAlias = ViewType("search-alias") )
const ( // general errors ErrNotImplemented = 9 ErrForbidden = 11 ErrDisabled = 36 // HTTP error status codes ErrHttpForbidden = 403 ErrHttpInternal = 501 // Internal ArangoDB storage errors ErrArangoReadOnly = 1004 // External ArangoDB storage errors ErrArangoCorruptedDatafile = 1100 ErrArangoIllegalParameterFile = 1101 ErrArangoCorruptedCollection = 1102 ErrArangoFileSystemFull = 1104 ErrArangoDataDirLocked = 1107 // General ArangoDB storage errors ErrArangoConflict = 1200 ErrArangoDocumentNotFound = 1202 ErrArangoDataSourceNotFound = 1203 ErrArangoIllegalName = 1208 ErrArangoUniqueConstraintViolated = 1210 ErrArangoDatabaseNotFound = 1228 ErrArangoDatabaseNameInvalid = 1229 // ArangoDB cluster errors ErrClusterReplicationWriteConcernNotFulfilled = 1429 ErrClusterLeadershipChallengeOngoing = 1495 ErrClusterNotLeader = 1496 // User management errors ErrUserDuplicate = 1702 )
const ( PrimaryIndex = IndexType("primary") FullTextIndex = IndexType("fulltext") // Deprecated: since 3.10 version. Use ArangoSearch view instead. HashIndex = IndexType("hash") // Deprecated use PersistentIndexType instead SkipListIndex = IndexType("skiplist") // Deprecated use PersistentIndexType instead PersistentIndex = IndexType("persistent") GeoIndex = IndexType("geo") EdgeIndex = IndexType("edge") TTLIndex = IndexType("ttl") ZKDIndex = IndexType("zkd") // Deprecated: since 3.12 version use MDIIndexType instead. InvertedIndex = IndexType("inverted") MDIIndex = IndexType("mdi") MDIPrefixedIndex = IndexType("mdi-prefixed") )
Symbolic constants for index types
const ( // ReplicationFactorSatellite represents a satellite collection's replication factor ReplicationFactorSatellite int = -1 )
const (
SatelliteGraph = -100
)
Variables ¶
var ( // WithStack is called on every return of an error to add stacktrace information to the error. // When setting this function, also set the Cause function. // The interface of this function is compatible with functions in github.com/pkg/errors. WithStack = func(err error) error { return err } // Cause is used to get the root cause of the given error. // The interface of this function is compatible with functions in github.com/pkg/errors. Cause = func(err error) error { return err } )
var ErrBatchClosed = errors.New("Batch already closed")
ErrBatchClosed occurs when there is an attempt closing or prolonging closed batch
Functions ¶
func ApplyVersionHeader ¶ added in v1.5.1
ApplyVersionHeader adds the driver version to the request.
func DriverVersion ¶ added in v1.6.1
func DriverVersion() string
func HasAsyncID ¶ added in v1.6.1
HasAsyncID returns the async Job ID from the given context.
func HasReturnNew ¶ added in v1.6.1
HasReturnNew is used to fetch the new document from the context.
func HasReturnOld ¶ added in v1.6.1
HasReturnOld is used to fetch the old document from the context.
func IsArangoError ¶
IsArangoError returns true when the given error is an ArangoError.
func IsArangoErrorWithCode ¶
IsArangoErrorWithCode returns true when the given error is an ArangoError and its Code field is equal to the given code.
func IsArangoErrorWithErrorNum ¶
IsArangoErrorWithErrorNum returns true when the given error is an ArangoError and its ErrorNum field is equal to one of the given numbers.
func IsAsyncRequest ¶ added in v1.6.1
IsAsyncRequest returns true if the given context is an async request.
func IsCanceled ¶
IsCanceled returns true if the given error is the result on a cancelled context.
func IsConflict ¶
IsConflict returns true if the given error is an ArangoError with code 409, indicating a conflict.
func IsDataSourceOrDocumentNotFound ¶ added in v1.4.0
IsDataSourceOrDocumentNotFound returns true if the given error is an Arango storage error, indicating an object is not found.
func IsExternalStorageError ¶ added in v1.5.0
IsExternalStorageError returns true if ArangoDB is having an error with accessing or writing to storage.
func IsForbidden ¶
IsForbidden returns true if the given error is an ArangoError with code 403, indicating a forbidden request.
func IsInvalidArgument ¶
IsInvalidArgument returns true if the given error is an InvalidArgumentError.
func IsInvalidRequest ¶
IsInvalidRequest returns true if the given error is an ArangoError with code 400, indicating an invalid request.
func IsNoLeader ¶
IsNoLeader returns true if the given error is an ArangoError with code 503 error number 1496.
func IsNoLeaderOrOngoing ¶
IsNoLeaderOrOngoing return true if the given error is an ArangoError with code 503 and error number 1496 or 1495
func IsNoMoreDocuments ¶
IsNoMoreDocuments returns true if the given error is an NoMoreDocumentsError.
func IsNotFound
deprecated
Deprecated: Use IsNotFoundGeneral instead. For ErrArangoDocumentNotFound error there is a chance that we get a different HTTP code if the API requires an existing document as input, which is not found.
IsNotFound returns true if the given error is an ArangoError with code 404, indicating an object not found.
func IsNotFoundGeneral ¶ added in v1.4.0
IsNotFoundGeneral returns true if the given error is an ArangoError with code 404, indicating an object is not found.
func IsPreconditionFailed ¶
IsPreconditionFailed returns true if the given error is an ArangoError with code 412, indicating a failed precondition.
func IsResponse ¶
IsResponse returns true if the given error is (or is caused by) a ResponseError.
func IsTimeout ¶
IsTimeout returns true if the given error is the result on a deadline that has been exceeded.
func IsUnauthorized ¶
IsUnauthorized returns true if the given error is an ArangoError with code 401, indicating an unauthorized request.
func WithAllowDirtyReads ¶
WithAllowDirtyReads is used in an active failover deployment to allow reads from the follower. You can pass a reference to a boolean that will set according to whether a potentially dirty read happened or not. nil is allowed. This is valid for document reads, aql queries, gharial vertex and edge reads. Since 3.10 This feature is available in the Enterprise Edition for cluster deployments as well
func WithArangoQueueTime ¶ added in v1.3.0
WithArangoQueueTime defines max queue timeout on the server side.
func WithArangoQueueTimeout ¶ added in v1.3.0
WithArangoQueueTimeout is used to enable Queue timeout on the server side. If WithArangoQueueTime is used then its value takes precedence in other case value of ctx.Deadline will be taken
func WithAsync ¶ added in v1.6.1
WithAsync is used to configure a context to make an async operation - requires Connection with Async wrapper!
func WithAsyncID ¶ added in v1.6.1
WithAsyncID is used to check an async operation result - requires Connection with Async wrapper!
func WithBatchID ¶
WithBatchID is used to configure a context that includes an ID of a Batch. This is used in replication functions.
func WithConfigured ¶
WithConfigured is used to configure a context to return the configured value of a user grant instead of the effective grant.
func WithDBServerID ¶
WithDBServerID is used to configure a context that includes an ID of a specific DBServer.
func WithDetails ¶
WithDetails is used to configure a context to make Client.Version return additional details. You can pass a single (optional) boolean. If that is set to false, you explicitly ask to not provide details.
func WithDriverFlags ¶ added in v1.5.1
WithDriverFlags is used to configure additional flags for the `x-arango-driver` header.
func WithDropCollections ¶ added in v1.6.0
WithDropCollections is used to configure a context to make graph removal functions to also drop the collections of the graph instead only the graph definition. You can pass a single (optional) boolean. If that is set to true, you explicitly ask to also drop the collections of the graph.
func WithEndpoint ¶
WithEndpoint is used to configure a context that forces a request to be executed on a specific endpoint. If you specify an endpoint like this, failover is disabled. If you specify an unknown endpoint, an InvalidArgumentError is returned from requests.
func WithEnforceReplicationFactor ¶
WithEnforceReplicationFactor is used to configure a context to make adding collections fail if the replication factor is too high (default or true) or silently accept (false).
func WithFollowLeaderRedirect ¶
WithFollowLeaderRedirect is used to configure a context to return turn on/off following redirection responses from the server when the request is answered by a follower. Default behavior is "on".
func WithIgnoreRevisions ¶
WithIgnoreRevisions is used to configure a context to make modification functions ignore revisions in the update. Do not use in combination with WithRevision or WithRevisions.
func WithImportDetails ¶
WithImportDetails is used to configure a context that will make import document requests return details about documents that could not be imported.
func WithIsRestore ¶
WithIsRestore is used to configure a context to make insert functions use the "isRestore=<value>" setting. Note: This function is intended for internal (replication) use. It is NOT intended to be used by normal client. This CAN screw up your database.
func WithIsSystem ¶
WithIsSystem is used to configure a context to make insert functions use the "isSystem=<value>" setting.
func WithJobIDResponse ¶
WithJobIDResponse is used to configure a context that includes a reference to a JobID that is filled on a error-free response. This is used in cluster functions.
func WithKeepNull ¶
WithKeepNull is used to configure a context to make update functions keep null fields (value==true) or remove fields with null values (value==false).
func WithMergeObjects ¶
WithMergeObjects is used to configure a context to make update functions merge objects present in both the existing document and the patch document (value==true) or overwrite objects in the existing document with objects found in the patch document (value==false)
func WithOverwrite ¶
WithOverwrite is used to configure a context to instruct if a document should be overwritten.
func WithOverwriteMode ¶
func WithOverwriteMode(parent context.Context, mode OverwriteMode) context.Context
WithOverwriteMode is used to configure a context to instruct how a document should be overwritten.
func WithQueryAllowRetry ¶ added in v1.6.0
func WithQueryBatchSize ¶
WithQueryBatchSize is used to configure a context that will set the BatchSize of a query request,
func WithQueryCache ¶
WithQueryCache is used to configure a context that will set the Cache of a query request, If value is not given it defaults to true.
func WithQueryCount ¶
WithQueryCount is used to configure a context that will set the Count of a query request, If value is not given it defaults to true.
func WithQueryFillBlockCache ¶ added in v1.3.0
WithQueryFillBlockCache if is set to true or not specified, this will make the query store the data it reads via the RocksDB storage engine in the RocksDB block cache. This is usually the desired behavior. The option can be set to false for queries that are known to either read a lot of data which would thrash the block cache, or for queries that read data which are known to be outside of the hot set. By setting the option to false, data read by the query will not make it into the RocksDB block cache if not already in there, thus leaving more room for the actual hot set.
func WithQueryForceOneShardAttributeValue ¶
WithQueryForceOneShardAttributeValue is used to configure a context that will set the ForceOneShardAttributeValue of a query request,
func WithQueryFullCount ¶
WithQueryFullCount is used to configure whether the query returns the full count of results before the last LIMIT statement
func WithQueryMaxRuntime ¶
func WithQueryMemoryLimit ¶
WithQueryMemoryLimit is used to configure a context that will set the MemoryList of a query request,
func WithQueryOptimizerRules ¶ added in v1.6.0
WithQueryOptimizerRules applies optimizer rules for a query.
func WithQueryProfile ¶
WithQueryProfile is used to configure whether Query should be profiled.
func WithQuerySatelliteSyncWait ¶
WithQuerySatelliteSyncWait sets the satelliteSyncWait query value on the query cursor request
func WithQueryShardIds ¶ added in v1.3.0
WithQueryShardIds is used to configure a context that will set the ShardIds of a query request,
func WithQueryStream ¶
WithQueryStream is used to configure whether this becomes a stream query. A stream query is not executed right away, but continually evaluated when the client is requesting more results. Should the cursor expire the query transaction is canceled. This means for writing queries clients have to read the query-cursor until the HasMore() method returns false.
func WithQueryTTL ¶
WithQueryTTL is used to configure a context that will set the TTL of a query request,
func WithRawResponse ¶
WithRawResponse is used to configure a context that will make all functions store the raw response into a buffer.
func WithRefillIndexCaches ¶ added in v1.6.0
WithRefillIndexCaches is used to refill index caches during AQL operations.
func WithResponse ¶
WithResponse is used to configure a context that will make all functions store the response into the given value.
func WithReturnNew ¶
WithReturnNew is used to configure a context to make create, update & replace document functions return the new document into the given result.
func WithReturnOld ¶
WithReturnOld is used to configure a context to make update & replace document functions return the old document into the given result.
func WithRevision ¶
WithRevision is used to configure a context to make document functions specify an explicit revision of the document using an `If-Match` condition.
func WithRevisions ¶
WithRevisions is used to configure a context to make multi-document functions specify explicit revisions of the documents.
func WithSilent ¶
WithSilent is used to configure a context to make functions return an empty result (silent==true), instead of a metadata result (silent==false, default). You can pass a single (optional) boolean. If that is set to false, you explicitly ask to return metadata result.
func WithSkipExistCheck ¶ added in v1.6.2
WithSkipExistCheck is used to disable validation for resource existence e.g.: ClientDatabases.Database will do not call the additional check to ArangoDB for ensuring that DB exist
func WithTransactionID ¶
func WithTransactionID(parent context.Context, tid TransactionID) context.Context
WithTransactionID is used to bind a request to a specific transaction
func WithWaitForSync ¶
WithWaitForSync is used to configure a context to make modification functions wait until the data has been synced to disk (or not). You can pass a single (optional) boolean. If that is set to false, you explicitly do not wait for data to be synced to disk.
Types ¶
type AbortTransactionOptions ¶
type AbortTransactionOptions struct{}
AbortTransactionOptions provides options for CommitTransaction. Currently unused
type AccessTarget ¶
type AccessTarget interface { // Name returns the name of the database/collection. Name() string }
AccessTarget is implemented by Database & Collection and it used to get/set/remove collection permissions.
type AggregatedStatus ¶ added in v1.4.0
type AggregatedStatus struct { // The time at which the status was measured. TimeStamp time.Time `json:"timeStamp,omitempty"` // The status of the in memory graph. GraphStoreStatus *GraphStoreStatus `json:"graphStoreStatus,omitempty"` // Information about the global supersteps. AllGSSStatus *AllGSSStatus `json:"allGssStatus,omitempty"` }
AggregatedStatus The aggregated details of the full Pregel run. The values are totals of all the DB-Server.
type AllGSSStatus ¶ added in v1.4.0
type AllGSSStatus struct { // A list of objects with details for each global superstep. Items []GSSStatus `json:"items,omitempty"` }
AllGSSStatus Information about the global supersteps.
type ArangoError ¶
type ArangoError struct { HasError bool `json:"error"` Code int `json:"code"` ErrorNum int `json:"errorNum"` ErrorMessage string `json:"errorMessage"` }
ArangoError is a Go error with arangodb specific error information.
func AsArangoError ¶
func AsArangoError(err error) (ArangoError, bool)
AsArangoError returns true when the given error is an ArangoError together with an object.
func (ArangoError) Error ¶
func (ae ArangoError) Error() string
Error returns the error message of an ArangoError.
func (ArangoError) Temporary ¶
func (ae ArangoError) Temporary() bool
Temporary returns true when the given error is a temporary error.
func (ArangoError) Timeout ¶
func (ae ArangoError) Timeout() bool
Timeout returns true when the given error is a timeout error.
type ArangoID ¶ added in v1.3.0
type ArangoID struct { ID string `json:"id,omitempty"` GloballyUniqueId string `json:"globallyUniqueId,omitempty"` }
ArangoID is a generic Arango ID struct representation
type ArangoSearchAliasIndex ¶ added in v1.4.0
type ArangoSearchAliasViewProperties ¶ added in v1.4.0
type ArangoSearchAliasViewProperties struct { ArangoSearchViewBase // Indexes A list of inverted indexes to add to the View. Indexes []ArangoSearchAliasIndex `json:"indexes,omitempty"` }
type ArangoSearchAnalyzer ¶
type ArangoSearchAnalyzer interface { // Name returns the analyzer name Name() string // Type returns the analyzer type Type() ArangoSearchAnalyzerType // UniqueName returns the unique name: <database>::<analyzer-name> UniqueName() string // Definition returns the analyzer definition Definition() ArangoSearchAnalyzerDefinition // Properties returns the analyzer properties Properties() ArangoSearchAnalyzerProperties // Database returns the database of this analyzer Database() Database // Removes the analyzers Remove(ctx context.Context, force bool) error }
type ArangoSearchAnalyzerAQLReturnType ¶
type ArangoSearchAnalyzerAQLReturnType string
const ( ArangoSearchAnalyzerAQLReturnTypeString ArangoSearchAnalyzerAQLReturnType = "string" ArangoSearchAnalyzerAQLReturnTypeNumber ArangoSearchAnalyzerAQLReturnType = "number" ArangoSearchAnalyzerAQLReturnTypeBool ArangoSearchAnalyzerAQLReturnType = "bool" )
func (ArangoSearchAnalyzerAQLReturnType) New ¶
func (a ArangoSearchAnalyzerAQLReturnType) New() *ArangoSearchAnalyzerAQLReturnType
New returns pointer to selected return type
type ArangoSearchAnalyzerDefinition ¶
type ArangoSearchAnalyzerDefinition struct { Name string `json:"name,omitempty"` Type ArangoSearchAnalyzerType `json:"type,omitempty"` Properties ArangoSearchAnalyzerProperties `json:"properties,omitempty"` Features []ArangoSearchAnalyzerFeature `json:"features,omitempty"` ArangoError }
ArangoSearchAnalyzerDefinition provides definition of an analyzer
type ArangoSearchAnalyzerFeature ¶
type ArangoSearchAnalyzerFeature string
ArangoSearchAnalyzerFeature specifies a feature to an analyzer
const ( // ArangoSearchAnalyzerFeatureFrequency how often a term is seen, required for PHRASE() ArangoSearchAnalyzerFeatureFrequency ArangoSearchAnalyzerFeature = "frequency" // ArangoSearchAnalyzerFeatureNorm the field normalization factor ArangoSearchAnalyzerFeatureNorm ArangoSearchAnalyzerFeature = "norm" // ArangoSearchAnalyzerFeaturePosition sequentially increasing term position, required for PHRASE(). If present then the frequency feature is also required ArangoSearchAnalyzerFeaturePosition ArangoSearchAnalyzerFeature = "position" // ArangoSearchAnalyzerFeatureOffset can be specified if 'position' feature is set ArangoSearchAnalyzerFeatureOffset ArangoSearchAnalyzerFeature = "offset" )
type ArangoSearchAnalyzerGeoJSONType ¶
type ArangoSearchAnalyzerGeoJSONType string
ArangoSearchAnalyzerGeoJSONType GeoJSON Type parameter.
const ( // ArangoSearchAnalyzerGeoJSONTypeShape define index all GeoJSON geometry types (Point, Polygon etc.). (default) ArangoSearchAnalyzerGeoJSONTypeShape ArangoSearchAnalyzerGeoJSONType = "shape" // ArangoSearchAnalyzerGeoJSONTypeCentroid define compute and only index the centroid of the input geometry. ArangoSearchAnalyzerGeoJSONTypeCentroid ArangoSearchAnalyzerGeoJSONType = "centroid" // ArangoSearchAnalyzerGeoJSONTypePoint define only index GeoJSON objects of type Point, ignore all other geometry types. ArangoSearchAnalyzerGeoJSONTypePoint ArangoSearchAnalyzerGeoJSONType = "point" )
func (ArangoSearchAnalyzerGeoJSONType) New ¶
func (a ArangoSearchAnalyzerGeoJSONType) New() *ArangoSearchAnalyzerGeoJSONType
New returns pointer to selected return type
type ArangoSearchAnalyzerGeoOptions ¶
type ArangoSearchAnalyzerGeoOptions struct { // MaxCells define maximum number of S2 cells. MaxCells *int `json:"maxCells,omitempty"` // MinLevel define the least precise S2 level. MinLevel *int `json:"minLevel,omitempty"` // MaxLevel define the most precise S2 level MaxLevel *int `json:"maxLevel,omitempty"` }
ArangoSearchAnalyzerGeoOptions for fine-tuning geo queries. These options should generally remain unchanged.
type ArangoSearchAnalyzerPipeline ¶
type ArangoSearchAnalyzerPipeline struct { // Type of the Pipeline Analyzer Type ArangoSearchAnalyzerType `json:"type"` // Properties of the Pipeline Analyzer Properties ArangoSearchAnalyzerProperties `json:"properties,omitempty"` }
ArangoSearchAnalyzerPipeline provides object definition for Pipeline array parameter
type ArangoSearchAnalyzerProperties ¶
type ArangoSearchAnalyzerProperties struct { // Locale used by Stem, Norm, Text Locale string `json:"locale,omitempty"` // Delimiter used by Delimiter Delimiter string `json:"delimiter,omitempty"` // Accent used by Norm, Text Accent *bool `json:"accent,omitempty"` // Case used by Norm, Text, Segmentation Case ArangoSearchCaseType `json:"case,omitempty"` // EdgeNGram used by Text EdgeNGram *ArangoSearchEdgeNGram `json:"edgeNgram,omitempty"` // Min used by NGram Min *int64 `json:"min,omitempty"` // Max used by NGram Max *int64 `json:"max,omitempty"` // PreserveOriginal used by NGram PreserveOriginal *bool `json:"preserveOriginal,omitempty"` // StartMarker used by NGram StartMarker *string `json:"startMarker,omitempty"` // EndMarker used by NGram EndMarker *string `json:"endMarker,omitempty"` // StreamType used by NGram StreamType *ArangoSearchNGramStreamType `json:"streamType,omitempty"` // Stemming used by Text Stemming *bool `json:"stemming,omitempty"` // Stopword used by Text and Stopwords. This field is not mandatory since version 3.7 of arangod so it can not be omitted in 3.6. Stopwords []string `json:"stopwords"` // StopwordsPath used by Text StopwordsPath []string `json:"stopwordsPath,omitempty"` // QueryString used by AQL. QueryString string `json:"queryString,omitempty"` // CollapsePositions used by AQL. CollapsePositions *bool `json:"collapsePositions,omitempty"` // KeepNull used by AQL. KeepNull *bool `json:"keepNull,omitempty"` // BatchSize used by AQL. BatchSize *int `json:"batchSize,omitempty"` // MemoryLimit used by AQL. MemoryLimit *int `json:"memoryLimit,omitempty"` // ReturnType used by AQL. ReturnType *ArangoSearchAnalyzerAQLReturnType `json:"returnType,omitempty"` // Pipeline used by Pipeline. Pipeline []ArangoSearchAnalyzerPipeline `json:"pipeline,omitempty"` // Type used by GeoJSON. Type *ArangoSearchAnalyzerGeoJSONType `json:"type,omitempty"` // Options used by GeoJSON and GeoPoint Options *ArangoSearchAnalyzerGeoOptions `json:"options,omitempty"` // Latitude used by GetPoint. Latitude []string `json:"latitude,omitempty"` // Longitude used by GetPoint. Longitude []string `json:"longitude,omitempty"` // Break used by Segmentation Break ArangoSearchBreakType `json:"break,omitempty"` // Hex used by stopwords. // If false then each string in stopwords is used verbatim. // If true, then each string in stopwords needs to be hex-encoded. Hex *bool `json:"hex,omitempty"` // ModelLocation used by Classification, NearestNeighbors // The on-disk path to the trained fastText supervised model. // Note: if you are running this in an ArangoDB cluster, this model must exist on every machine in the cluster. ModelLocation string `json:"model_location,omitempty"` // TopK used by Classification, NearestNeighbors // The number of class labels that will be produced per input (default: 1) TopK *uint64 `json:"top_k,omitempty"` // Threshold used by Classification // The probability threshold for which a label will be assigned to an input. // A fastText model produces a probability per class label, and this is what will be filtered (default: 0.99). Threshold *float64 `json:"threshold,omitempty"` // Analyzer used by Minhash // Definition of inner analyzer to use for incoming data. In case if omitted field or empty object falls back to 'identity' analyzer. Analyzer *ArangoSearchAnalyzerDefinition `json:"analyzer,omitempty"` // NumHashes used by Minhash // Size of min hash signature. Must be greater or equal to 1. NumHashes *uint64 `json:"numHashes,omitempty"` // Format is the internal binary representation to use for storing the geo-spatial data in an index. Format *ArangoSearchFormat `json:"format,omitempty"` }
ArangoSearchAnalyzerProperties specifies options for the analyzer. Which fields are required and respected depends on the analyzer type. more information can be found here: https://www.arangodb.com/docs/stable/arangosearch-analyzers.html#analyzer-properties
type ArangoSearchAnalyzerType ¶
type ArangoSearchAnalyzerType string
ArangoSearchAnalyzerType specifies type of analyzer
const ( // ArangoSearchAnalyzerTypeIdentity treat value as atom (no transformation) ArangoSearchAnalyzerTypeIdentity ArangoSearchAnalyzerType = "identity" // ArangoSearchAnalyzerTypeDelimiter split into tokens at user-defined character ArangoSearchAnalyzerTypeDelimiter ArangoSearchAnalyzerType = "delimiter" // ArangoSearchAnalyzerTypeStem apply stemming to the value as a whole ArangoSearchAnalyzerTypeStem ArangoSearchAnalyzerType = "stem" // ArangoSearchAnalyzerTypeNorm apply normalization to the value as a whole ArangoSearchAnalyzerTypeNorm ArangoSearchAnalyzerType = "norm" // ArangoSearchAnalyzerTypeNGram create n-grams from value with user-defined lengths ArangoSearchAnalyzerTypeNGram ArangoSearchAnalyzerType = "ngram" // ArangoSearchAnalyzerTypeText tokenize into words, optionally with stemming, normalization and stop-word filtering ArangoSearchAnalyzerTypeText ArangoSearchAnalyzerType = "text" // ArangoSearchAnalyzerTypeAQL an Analyzer capable of running a restricted AQL query to perform data manipulation / filtering. ArangoSearchAnalyzerTypeAQL ArangoSearchAnalyzerType = "aql" // ArangoSearchAnalyzerTypePipeline an Analyzer capable of chaining effects of multiple Analyzers into one. The pipeline is a list of Analyzers, where the output of an Analyzer is passed to the next for further processing. The final token value is determined by last Analyzer in the pipeline. ArangoSearchAnalyzerTypePipeline ArangoSearchAnalyzerType = "pipeline" // ArangoSearchAnalyzerTypeStopwords an Analyzer capable of removing specified tokens from the input. ArangoSearchAnalyzerTypeStopwords ArangoSearchAnalyzerType = "stopwords" // ArangoSearchAnalyzerTypeGeoJSON an Analyzer capable of breaking up a GeoJSON object into a set of indexable tokens for further usage with ArangoSearch Geo functions. ArangoSearchAnalyzerTypeGeoJSON ArangoSearchAnalyzerType = "geojson" // ArangoSearchAnalyzerTypeGeoS2 an Analyzer capable of index GeoJSON data with inverted indexes or Views similar // to the existing `geojson` Analyzer, but it internally uses a format for storing the geo-spatial data. // that is more efficient. ArangoSearchAnalyzerTypeGeoS2 ArangoSearchAnalyzerType = "geo_s2" // ArangoSearchAnalyzerTypeGeoPoint an Analyzer capable of breaking up JSON object describing a coordinate into a set of indexable tokens for further usage with ArangoSearch Geo functions. ArangoSearchAnalyzerTypeGeoPoint ArangoSearchAnalyzerType = "geopoint" // ArangoSearchAnalyzerTypeSegmentation an Analyzer capable of breaking up the input text into tokens in a language-agnostic manner ArangoSearchAnalyzerTypeSegmentation ArangoSearchAnalyzerType = "segmentation" // ArangoSearchAnalyzerTypeCollation an Analyzer capable of converting the input into a set of language-specific tokens ArangoSearchAnalyzerTypeCollation ArangoSearchAnalyzerType = "collation" // ArangoSearchAnalyzerTypeClassification An Analyzer capable of classifying tokens in the input text. (EE only) ArangoSearchAnalyzerTypeClassification ArangoSearchAnalyzerType = "classification" // ArangoSearchAnalyzerTypeNearestNeighbors An Analyzer capable of finding nearest neighbors of tokens in the input. (EE only) ArangoSearchAnalyzerTypeNearestNeighbors ArangoSearchAnalyzerType = "nearest_neighbors" // ArangoSearchAnalyzerTypeMinhash an analyzer which is capable of evaluating so called MinHash signatures as a stream of tokens. (EE only) ArangoSearchAnalyzerTypeMinhash ArangoSearchAnalyzerType = "minhash" )
type ArangoSearchBreakType ¶ added in v1.3.0
type ArangoSearchBreakType string
const ( // ArangoSearchBreakTypeAll to return all tokens ArangoSearchBreakTypeAll ArangoSearchBreakType = "all" // ArangoSearchBreakTypeAlpha to return tokens composed of alphanumeric characters only (default) ArangoSearchBreakTypeAlpha ArangoSearchBreakType = "alpha" // ArangoSearchBreakTypeGraphic to return tokens composed of non-whitespace characters only ArangoSearchBreakTypeGraphic ArangoSearchBreakType = "graphic" )
type ArangoSearchCaseType ¶
type ArangoSearchCaseType string
const ( // ArangoSearchCaseUpper to convert to all lower-case characters ArangoSearchCaseUpper ArangoSearchCaseType = "upper" // ArangoSearchCaseLower to convert to all upper-case characters ArangoSearchCaseLower ArangoSearchCaseType = "lower" // ArangoSearchCaseNone to not change character case (default) ArangoSearchCaseNone ArangoSearchCaseType = "none" )
type ArangoSearchConsolidationPolicy ¶
type ArangoSearchConsolidationPolicy struct { // Type returns the type of the ConsolidationPolicy. This interface can then be casted to the corresponding ArangoSearchConsolidationPolicy* struct. Type ArangoSearchConsolidationPolicyType `json:"type,omitempty"` ArangoSearchConsolidationPolicyBytesAccum ArangoSearchConsolidationPolicyTier }
ArangoSearchConsolidationPolicy holds threshold values specifying when to consolidate view data. Semantics of the values depend on where they are used.
type ArangoSearchConsolidationPolicyBytesAccum ¶
type ArangoSearchConsolidationPolicyBytesAccum struct { // Threshold, see ArangoSearchConsolidationTypeBytesAccum Threshold *float64 `json:"threshold,omitempty"` }
ArangoSearchConsolidationPolicyBytesAccum contains fields used for ArangoSearchConsolidationPolicyTypeBytesAccum
type ArangoSearchConsolidationPolicyTier ¶
type ArangoSearchConsolidationPolicyTier struct { MinScore *int64 `json:"minScore,omitempty"` // MinSegments specifies the minimum number of segments that will be evaluated as candidates for consolidation. MinSegments *int64 `json:"segmentsMin,omitempty"` // MaxSegments specifies the maximum number of segments that will be evaluated as candidates for consolidation. MaxSegments *int64 `json:"segmentsMax,omitempty"` // SegmentsBytesMax specifies the maxinum allowed size of all consolidated segments in bytes. SegmentsBytesMax *int64 `json:"segmentsBytesMax,omitempty"` // SegmentsBytesFloor defines the value (in bytes) to treat all smaller segments as equal for consolidation selection. SegmentsBytesFloor *int64 `json:"segmentsBytesFloor,omitempty"` // Lookahead specifies the number of additionally searched tiers except initially chosen candidated based on min_segments, // max_segments, segments_bytes_max, segments_bytes_floor with respect to defined values. // Default value falls to integer_traits<size_t>::const_max (in C++ source code). Lookahead *int64 `json:"lookahead,omitempty"` }
ArangoSearchConsolidationPolicyTier contains fields used for ArangoSearchConsolidationPolicyTypeTier
type ArangoSearchConsolidationPolicyType ¶
type ArangoSearchConsolidationPolicyType string
ArangoSearchConsolidationPolicyType strings for consolidation types
const ( // ArangoSearchConsolidationPolicyTypeTier consolidate based on segment byte size and live document count as dictated by the customization attributes. ArangoSearchConsolidationPolicyTypeTier ArangoSearchConsolidationPolicyType = "tier" // ArangoSearchConsolidationPolicyTypeBytesAccum consolidate if and only if ({threshold} range [0.0, 1.0]) // {threshold} > (segment_bytes + sum_of_merge_candidate_segment_bytes) / all_segment_bytes, // i.e. the sum of all candidate segment's byte size is less than the total segment byte size multiplied by the {threshold}. ArangoSearchConsolidationPolicyTypeBytesAccum ArangoSearchConsolidationPolicyType = "bytes_accum" )
type ArangoSearchEdgeNGram ¶
type ArangoSearchEdgeNGram struct { // Min used by Text Min *int64 `json:"min,omitempty"` // Max used by Text Max *int64 `json:"max,omitempty"` // PreserveOriginal used by Text PreserveOriginal *bool `json:"preserveOriginal,omitempty"` }
ArangoSearchEdgeNGram specifies options for the edgeNGram text analyzer. More information can be found here: https://www.arangodb.com/docs/stable/arangosearch-analyzers.html#text
type ArangoSearchElementProperties ¶
type ArangoSearchElementProperties struct { AnalyzerDefinitions []ArangoSearchAnalyzerDefinition `json:"analyzerDefinitions,omitempty"` // The list of analyzers to be used for indexing of string values. Defaults to ["identify"]. Analyzers []string `json:"analyzers,omitempty"` // If set to true, all fields of this element will be indexed. Defaults to false. IncludeAllFields *bool `json:"includeAllFields,omitempty"` // If set to true, values in a listed are treated as separate values. Defaults to false. TrackListPositions *bool `json:"trackListPositions,omitempty"` // This values specifies how the view should track values. StoreValues ArangoSearchStoreValues `json:"storeValues,omitempty"` // Fields contains the properties for individual fields of the element. // The key of the map are field names. Fields ArangoSearchFields `json:"fields,omitempty"` // If set to true, then no exclusive lock is used on the source collection during View index creation, // so that it remains basically available. inBackground is an option that can be set when adding links. // It does not get persisted as it is not a View property, but only a one-off option InBackground *bool `json:"inBackground,omitempty"` // Nested contains the properties for nested fields (sub-objects) of the element // Enterprise Edition only Nested ArangoSearchFields `json:"nested,omitempty"` // Cache If you enable this option, then field normalization values are always cached in memory. // Introduced in v3.9.5, Enterprise Edition only Cache *bool `json:"cache,omitempty"` }
ArangoSearchElementProperties contains properties that specify how an element is indexed in an ArangoSearch view. Note that this structure is recursive. Settings not specified (nil) at a given level will inherit their setting from a lower level.
type ArangoSearchFields ¶
type ArangoSearchFields map[string]ArangoSearchElementProperties
ArangoSearchFields is a strongly typed map containing properties per field. The keys in the map are field names.
type ArangoSearchFormat ¶ added in v1.6.0
type ArangoSearchFormat string
const ( // FormatLatLngDouble stores each latitude and longitude value as an 8-byte floating-point value (16 bytes per coordinate pair). // It is default value. FormatLatLngDouble ArangoSearchFormat = "latLngDouble" // FormatLatLngInt stores each latitude and longitude value as an 4-byte integer value (8 bytes per coordinate pair). // This is the most compact format but the precision is limited to approximately 1 to 10 centimeters. FormatLatLngInt ArangoSearchFormat = "latLngInt" // FormatS2Point store each longitude-latitude pair in the native format of Google S2 which is used for geo-spatial // calculations (24 bytes per coordinate pair). FormatS2Point ArangoSearchFormat = "s2Point" )
func (ArangoSearchFormat) New ¶ added in v1.6.0
func (a ArangoSearchFormat) New() *ArangoSearchFormat
type ArangoSearchLinks ¶
type ArangoSearchLinks map[string]ArangoSearchElementProperties
ArangoSearchLinks is a strongly typed map containing links between a collection and a view. The keys in the map are collection names.
type ArangoSearchNGramStreamType ¶
type ArangoSearchNGramStreamType string
const ( // ArangoSearchNGramStreamBinary used by NGram. Default value ArangoSearchNGramStreamBinary ArangoSearchNGramStreamType = "binary" // ArangoSearchNGramStreamUTF8 used by NGram ArangoSearchNGramStreamUTF8 ArangoSearchNGramStreamType = "utf8" )
type ArangoSearchPrimarySortEntry ¶
type ArangoSearchPrimarySortEntry struct { Field string `json:"field,omitempty"` Ascending *bool `json:"asc,omitempty"` // deprecated, please use Ascending instead Direction *ArangoSearchSortDirection `json:"direction,omitempty"` }
ArangoSearchPrimarySortEntry describes an entry for the primarySort list
func (ArangoSearchPrimarySortEntry) GetAscending ¶
func (pse ArangoSearchPrimarySortEntry) GetAscending() bool
GetAscending returns the value of Ascending or false if not set
func (ArangoSearchPrimarySortEntry) GetDirection ¶
func (pse ArangoSearchPrimarySortEntry) GetDirection() ArangoSearchSortDirection
GetDirection returns the sort direction or empty string if not set
type ArangoSearchSortDirection ¶
type ArangoSearchSortDirection string
ArangoSearchSortDirection describes the sorting direction
const ( // ArangoSearchSortDirectionAsc sort ascending ArangoSearchSortDirectionAsc ArangoSearchSortDirection = "ASC" // ArangoSearchSortDirectionDesc sort descending ArangoSearchSortDirectionDesc ArangoSearchSortDirection = "DESC" )
type ArangoSearchStoreValues ¶
type ArangoSearchStoreValues string
ArangoSearchStoreValues is the type of the StoreValues option of an ArangoSearch element.
const ( // ArangoSearchStoreValuesNone specifies that a view should not store values. ArangoSearchStoreValuesNone ArangoSearchStoreValues = "none" // ArangoSearchStoreValuesID specifies that a view should only store // information about value presence, to allow use of the EXISTS() function. ArangoSearchStoreValuesID ArangoSearchStoreValues = "id" )
type ArangoSearchView ¶
type ArangoSearchView interface { // View Includes generic View functions View // Properties fetches extended information about the view. Properties(ctx context.Context) (ArangoSearchViewProperties, error) // SetProperties changes properties of the view. SetProperties(ctx context.Context, options ArangoSearchViewProperties) error }
ArangoSearchView provides access to the information of a view. Views are only available in ArangoDB 3.4 and higher.
type ArangoSearchViewAlias ¶ added in v1.4.0
type ArangoSearchViewAlias interface { // View Includes generic View functions View // Properties fetches extended information about the view. Properties(ctx context.Context) (ArangoSearchAliasViewProperties, error) // SetProperties changes properties of the view. SetProperties(ctx context.Context, options ArangoSearchAliasViewProperties) (ArangoSearchAliasViewProperties, error) }
ArangoSearchViewAlias provides access to the information of a view alias Views aliases are only available in ArangoDB 3.10 and higher.
type ArangoSearchViewBase ¶ added in v1.3.0
type ArangoSearchViewBase struct { Type ViewType `json:"type,omitempty"` Name string `json:"name,omitempty"` ArangoID ArangoError }
type ArangoSearchViewProperties ¶
type ArangoSearchViewProperties struct { // CleanupIntervalStep specifies the minimum number of commits to wait between // removing unused files in the data directory. // Defaults to 10. // Use 0 to disable waiting. // For the case where the consolidation policies merge segments often // (i.e. a lot of commit+consolidate), a lower value will cause a lot of // disk space to be wasted. // For the case where the consolidation policies rarely merge segments // (i.e. few inserts/deletes), a higher value will impact performance // without any added benefits. CleanupIntervalStep *int64 `json:"cleanupIntervalStep,omitempty"` // ConsolidationInterval specifies the minimum number of milliseconds that must be waited // between committing index data changes and making them visible to queries. // Defaults to 60000. // Use 0 to disable. // For the case where there are a lot of inserts/updates, a lower value, // until commit, will cause the index not to account for them and memory usage // would continue to grow. // For the case where there are a few inserts/updates, a higher value will // impact performance and waste disk space for each commit call without // any added benefits. ConsolidationInterval *int64 `json:"consolidationIntervalMsec,omitempty"` // ConsolidationPolicy specifies thresholds for consolidation. ConsolidationPolicy *ArangoSearchConsolidationPolicy `json:"consolidationPolicy,omitempty"` // CommitInterval ArangoSearch waits at least this many milliseconds between committing view data store changes and making documents visible to queries CommitInterval *int64 `json:"commitIntervalMsec,omitempty"` // WriteBufferIdle specifies the maximum number of writers (segments) cached in the pool. // 0 value turns off caching, default value is 64. WriteBufferIdel *int64 `json:"writebufferIdle,omitempty"` // WriteBufferActive specifies the maximum number of concurrent active writers (segments) performs (a transaction). // Other writers (segments) are wait till current active writers (segments) finish. // 0 value turns off this limit and used by default. WriteBufferActive *int64 `json:"writebufferActive,omitempty"` // WriteBufferSizeMax specifies maximum memory byte size per writer (segment) before a writer (segment) flush is triggered. // 0 value turns off this limit fon any writer (buffer) and will be flushed only after a period defined for special thread during ArangoDB server startup. // 0 value should be used with carefully due to high potential memory consumption. WriteBufferSizeMax *int64 `json:"writebufferSizeMax,omitempty"` // Links contains the properties for how individual collections // are indexed in the view. // The key of the map are collection names. Links ArangoSearchLinks `json:"links,omitempty"` // OptimizeTopK is an array of strings defining optimized sort expressions. // Introduced in v3.11.0, Enterprise Edition only. OptimizeTopK []string `json:"optimizeTopK,omitempty"` // PrimarySort describes how individual fields are sorted PrimarySort []ArangoSearchPrimarySortEntry `json:"primarySort,omitempty"` // PrimarySortCompression Defines how to compress the primary sort data (introduced in v3.7.1). // ArangoDB v3.5 and v3.6 always compress the index using LZ4. This option is immutable. PrimarySortCompression PrimarySortCompression `json:"primarySortCompression,omitempty"` // PrimarySortCache If you enable this option, then the primary sort columns are always cached in memory. // Can't be changed after creating View. // Introduced in v3.9.5, Enterprise Edition only PrimarySortCache *bool `json:"primarySortCache,omitempty"` // PrimaryKeyCache If you enable this option, then the primary key columns are always cached in memory. // Introduced in v3.9.6, Enterprise Edition only // Can't be changed after creating View. PrimaryKeyCache *bool `json:"primaryKeyCache,omitempty"` // StoredValues An array of objects to describe which document attributes to store in the View index (introduced in v3.7.1). // It can then cover search queries, which means the data can be taken from the index directly and accessing the storage engine can be avoided. // This option is immutable. StoredValues []StoredValue `json:"storedValues,omitempty"` ArangoSearchViewBase }
ArangoSearchViewProperties contains properties on an ArangoSearch view.
type AsyncJobDeleteOptions ¶ added in v1.6.1
type AsyncJobDeleteType ¶ added in v1.6.1
type AsyncJobDeleteType string
const ( DeleteAllJobs AsyncJobDeleteType = "all" DeleteExpiredJobs AsyncJobDeleteType = "expired" DeleteSingleJob AsyncJobDeleteType = "single" )
type AsyncJobListOptions ¶ added in v1.6.1
type AsyncJobListOptions struct { // Count The maximum number of ids to return per call. // If not specified, a server-defined maximum value will be used. Count int `json:"count,omitempty"` }
type AsyncJobService ¶ added in v1.6.1
type AsyncJobService interface { // List Returns the ids of job results with a specific status List(ctx context.Context, jobType AsyncJobStatusType, opts *AsyncJobListOptions) ([]string, error) // Status Returns the status of a specific job Status(ctx context.Context, jobID string) (AsyncJobStatusType, error) // Cancel Cancels a specific async job Cancel(ctx context.Context, jobID string) (bool, error) // Delete Deletes async job result Delete(ctx context.Context, deleteType AsyncJobDeleteType, opts *AsyncJobDeleteOptions) (bool, error) }
AsyncJobService https://www.arangodb.com/docs/devel/http/jobs.html
type AsyncJobStatusType ¶ added in v1.6.1
type AsyncJobStatusType string
const ( JobDone AsyncJobStatusType = "done" JobPending AsyncJobStatusType = "pending" )
type Authentication ¶
type Authentication interface { // Returns the type of authentication Type() AuthenticationType // Get returns a configuration property of the authentication. // Supported properties depend on type of authentication. Get(property string) string }
Authentication implements a kind of authentication.
func BasicAuthentication ¶
func BasicAuthentication(userName, password string) Authentication
BasicAuthentication creates an authentication implementation based on the given username & password.
func JWTAuthentication ¶
func JWTAuthentication(userName, password string) Authentication
JWTAuthentication creates a JWT token authentication implementation based on the given username & password.
func RawAuthentication ¶
func RawAuthentication(value string) Authentication
RawAuthentication creates a raw authentication implementation based on the given value for the Authorization header.
type AuthenticationType ¶
type AuthenticationType int
const ( // AuthenticationTypeBasic uses username+password basic authentication AuthenticationTypeBasic AuthenticationType = iota // AuthenticationTypeJWT uses username+password JWT token based authentication AuthenticationTypeJWT // AuthenticationTypeRaw uses a raw value for the Authorization header, only JWT is supported in VST and the value must be the response of calling /_open/auth, or your own signed jwt based on the same secret as the server has AuthenticationTypeRaw )
type BackupCreateOptions ¶
type BackupCreateOptions struct { Label string `json:"label,omitempty"` Timeout time.Duration `json:"timeout,omitempty"` // @deprecated - since 3.10.10 it exists only for backwards compatibility AllowInconsistent bool `json:"allowInconsistent,omitempty"` }
BackupCreateOptions provides options for Create
type BackupCreateResponse ¶
type BackupCreateResponse struct { NumberOfFiles uint NumberOfDBServers uint SizeInBytes uint64 PotentiallyInconsistent bool CreationTime time.Time }
BackupCreateResponse contains information about a newly created backup
type BackupListOptions ¶
type BackupListOptions struct { // Only receive meta data about a specific id ID BackupID `json:"id,omitempty"` }
BackupListOptions provides options for List
type BackupMeta ¶
type BackupMeta struct { ID BackupID `json:"id,omitempty"` Version string `json:"version,omitempty"` DateTime time.Time `json:"datetime,omitempty"` NumberOfFiles uint `json:"nrFiles,omitempty"` NumberOfDBServers uint `json:"nrDBServers,omitempty"` SizeInBytes uint64 `json:"sizeInBytes,omitempty"` PotentiallyInconsistent bool `json:"potentiallyInconsistent,omitempty"` Available bool `json:"available,omitempty"` NumberOfPiecesPresent uint `json:"nrPiecesPresent,omitempty"` Keys []BackupMetaSha256 `json:"keys,omitempty"` }
BackupMeta provides meta data of a backup
type BackupMetaSha256 ¶
type BackupMetaSha256 struct {
SHA256 string `json:"sha256"`
}
BackupMetaSha256 backup sha details
type BackupRestoreOptions ¶
type BackupRestoreOptions struct { // do not version check when doing a restore (expert only) IgnoreVersion bool `json:"ignoreVersion,omitempty"` }
BackupRestoreOptions provides options for Restore
type BackupTransferJobID ¶
type BackupTransferJobID string
BackupTransferJobID represents a Transfer (upload/download) job
type BackupTransferProgressReport ¶
type BackupTransferProgressReport struct { BackupID BackupID `json:"BackupID,omitempty"` Cancelled bool `json:"Cancelled,omitempty"` Timestamp string `json:"Timestamp,omitempty"` DBServers map[string]BackupTransferReport `json:"DBServers,omitempty"` }
BackupTransferProgressReport provides progress information for a backup transfer job
type BackupTransferReport ¶
type BackupTransferReport struct { Status BackupTransferStatus `json:"Status,omitempty"` Error int `json:"Error,omitempty"` ErrorMessage string `json:"ErrorMessage,omitempty"` Progress struct { Total int `json:"Total,omitempty"` Done int `json:"Done,omitempty"` Timestamp string `json:"Timestamp,omitempty"` } `json:"Progress,omitempty"` }
BackupTransferReport provides progress information of a backup transfer job for a single dbserver
type BackupTransferStatus ¶
type BackupTransferStatus string
BackupTransferStatus represents all possible states a transfer job can be in
const ( TransferAcknowledged BackupTransferStatus = "ACK" TransferStarted BackupTransferStatus = "STARTED" TransferCompleted BackupTransferStatus = "COMPLETED" TransferFailed BackupTransferStatus = "FAILED" TransferCancelled BackupTransferStatus = "CANCELLED" )
type Batch ¶
type Batch interface { // id of this batch BatchID() string // LastTick reported by the server for this batch LastTick() Tick // Extend the lifetime of an existing batch on the server Extend(ctx context.Context, ttl time.Duration) error // DeleteBatch deletes an existing batch on the server Delete(ctx context.Context) error }
Batch represents state on the server used during certain replication operations to keep state required by the client (such as Write-Ahead Log, inventory and data-files)
type BeginTransactionOptions ¶
type BeginTransactionOptions struct { WaitForSync bool AllowImplicit bool LockTimeout time.Duration MaxTransactionSize uint64 }
BeginTransactionOptions provides options for BeginTransaction call
type BodyBuilder ¶
type BodyBuilder interface { // GetBody returns data which are generated by the body builder GetBody() []byte // SetBody sets the content of the request. // The protocol of the connection determines what kinds of marshalling is taking place. // When multiple bodies are given, they are merged, with fields in the first document prevailing. SetBody(body ...interface{}) error // SetBodyArray sets the content of the request as an array. // If the given mergeArray is not nil, its elements are merged with the elements in the body array (mergeArray data overrides bodyArray data). // The merge is NOT recursive. // The protocol of the connection determines what kinds of marshalling is taking place. SetBodyArray(bodyArray interface{}, mergeArray []map[string]interface{}) error // SetBodyImportArray sets the content of the request as an array formatted for importing documents. // The protocol of the connection determines what kinds of marshalling is taking place. SetBodyImportArray(bodyArray interface{}) error // GetContentType returns the type of the data in a body GetContentType() string // Clone creates new Body builder Clone() BodyBuilder }
type Client ¶
type Client interface { // SynchronizeEndpoints fetches all endpoints from an ArangoDB cluster and updates the // connection to use those endpoints. // When this client is connected to a single server, nothing happens. // When this client is connected to a cluster of servers, the connection will be updated to reflect // the layout of the cluster. // This function requires ArangoDB 3.1.15 or up. SynchronizeEndpoints(ctx context.Context) error // SynchronizeEndpoints2 fetches all endpoints from an ArangoDB cluster and updates the // connection to use those endpoints. // When this client is connected to a single server, nothing happens. // When this client is connected to a cluster of servers, the connection will be updated to reflect // the layout of the cluster. // Compared to SynchronizeEndpoints, this function expects a database name as additional parameter. // This database name is used to call `_db/<dbname>/_api/cluster/endpoints`. SynchronizeEndpoints uses // the default database, i.e. `_system`. In the case the user does not have access to `_system`, // SynchronizeEndpoints does not work with earlier versions of arangodb. SynchronizeEndpoints2(ctx context.Context, dbname string) error // Connection returns the connection used by this client Connection() Connection // ClientDatabases - Database functions ClientDatabases // ClientUsers - User functions ClientUsers // ClientCluster - Cluster functions ClientCluster // ClientServerInfo - Individual server information functions ClientServerInfo // ClientServerAdmin - Server/cluster administration functions ClientServerAdmin // ClientReplication - Replication functions ClientReplication // ClientAdminBackup - Backup functions ClientAdminBackup // ClientFoxx - Foxx functions ClientFoxx // ClientAsyncJob - Asynchronous job functions ClientAsyncJob ClientLog }
Client provides access to a single ArangoDB database server, or an entire cluster of ArangoDB servers.
func NewClient ¶
func NewClient(config ClientConfig) (Client, error)
NewClient creates a new Client based on the given config setting.
type ClientAdminBackup ¶
type ClientAdminBackup interface {
Backup() ClientBackup
}
ClientAdminBackup provides access to the Backup API via the Client interface
type ClientAsyncJob ¶ added in v1.6.1
type ClientAsyncJob interface {
AsyncJob() AsyncJobService
}
type ClientBackup ¶
type ClientBackup interface { // Create creates a new backup and returns its id Create(ctx context.Context, opt *BackupCreateOptions) (BackupID, BackupCreateResponse, error) // Delete deletes the backup with given id Delete(ctx context.Context, id BackupID) error // Restore restores the backup with given id Restore(ctx context.Context, id BackupID, opt *BackupRestoreOptions) error // List returns meta data about some/all backups available List(ctx context.Context, opt *BackupListOptions) (map[BackupID]BackupMeta, error) // Upload triggers an upload to the remote repository of backup with id using the given config // and returns the job id. Upload(ctx context.Context, id BackupID, remoteRepository string, config interface{}) (BackupTransferJobID, error) // Download triggers an download to the remote repository of backup with id using the given config // and returns the job id. Download(ctx context.Context, id BackupID, remoteRepository string, config interface{}) (BackupTransferJobID, error) // Progress returns the progress state of the given Transfer job Progress(ctx context.Context, job BackupTransferJobID) (BackupTransferProgressReport, error) // Abort aborts the Transfer job if possible Abort(ctx context.Context, job BackupTransferJobID) error }
ClientBackup provides access to server/cluster backup functions of an arangodb database server or an entire cluster of arangodb servers.
type ClientCluster ¶
type ClientCluster interface { // Cluster provides access to cluster wide specific operations. // To use this interface, an ArangoDB cluster is required. // If this method is a called without a cluster, a PreconditionFailed error is returned. Cluster(ctx context.Context) (Cluster, error) }
ClientCluster provides methods needed to access cluster functionality from a client.
type ClientConfig ¶
type ClientConfig struct { // Connection is the actual server/cluster connection. // See http.NewConnection. Connection Connection // Authentication implements authentication on the server. Authentication Authentication // Deprecated: using non-zero duration causes routine leak. Please create your own implementation using Client.SynchronizeEndpoints2 // SynchronizeEndpointsInterval is the interval between automatic synchronization of endpoints. // If this value is 0, no automatic synchronization is performed. // If this value is > 0, automatic synchronization is started on a go routine. // This feature requires ArangoDB 3.1.15 or up. SynchronizeEndpointsInterval time.Duration }
ClientConfig contains all settings needed to create a client.
type ClientDatabases ¶
type ClientDatabases interface { // Database opens a connection to an existing database. // If no database with given name exists, an NotFoundError is returned. Database(ctx context.Context, name string) (Database, error) // DatabaseExists returns true if a database with given name exists. DatabaseExists(ctx context.Context, name string) (bool, error) // Databases returns a list of all databases found by the client. Databases(ctx context.Context) ([]Database, error) // AccessibleDatabases returns a list of all databases that can be accessed by the authenticated user. AccessibleDatabases(ctx context.Context) ([]Database, error) // CreateDatabase creates a new database with given name and opens a connection to it. // If the a database with given name already exists, a DuplicateError is returned. CreateDatabase(ctx context.Context, name string, options *CreateDatabaseOptions) (Database, error) }
ClientDatabases provides access to the databases in a single arangodb database server, or an entire cluster of arangodb servers.
type ClientFoxx ¶
type ClientFoxx interface {
Foxx() FoxxService
}
type ClientLog ¶ added in v1.6.0
type ClientLog interface { // GetLogLevels returns log levels for topics. GetLogLevels(ctx context.Context, opts *LogLevelsGetOptions) (LogLevels, error) // SetLogLevels sets log levels for a given topics SetLogLevels(ctx context.Context, logLevels LogLevels, opts *LogLevelsSetOptions) error }
ClientLog provides access to client logs' wide specific operations.
type ClientReplication ¶
type ClientReplication interface { // Replication provides access to replication specific operations. Replication() Replication }
ClientReplication provides methods needed to access replication functionality from a client.
type ClientServerAdmin ¶
type ClientServerAdmin interface { // ServerMode returns the current mode in which the server/cluster is operating. // This call needs ArangoDB 3.3 and up. ServerMode(ctx context.Context) (ServerMode, error) // SetServerMode changes the current mode in which the server/cluster is operating. // This call needs a client that uses JWT authentication. // This call needs ArangoDB 3.3 and up. SetServerMode(ctx context.Context, mode ServerMode) error // Shutdown a specific server, optionally removing it from its cluster. Shutdown(ctx context.Context, removeFromCluster bool) error // Metrics returns the metrics of the server in Prometheus format. // List of metrics: https://www.arangodb.com/docs/devel/http/administration-and-monitoring-metrics.html // You can parse it using Prometheus client: /* var parser expfmt.TextParser metricsProm, err := parser.TextToMetricFamilies(strings.NewReader(string(metrics))) */ Metrics(ctx context.Context) ([]byte, error) // MetricsForSingleServer returns the metrics of the specific server in Prometheus format. // This parameter 'serverID' is only meaningful on Coordinators. // List of metrics: https://www.arangodb.com/docs/devel/http/administration-and-monitoring-metrics.html // You can parse it using Prometheus client: /* var parser expfmt.TextParser metricsProm, err := parser.TextToMetricFamilies(strings.NewReader(string(metrics))) */ MetricsForSingleServer(ctx context.Context, serverID string) ([]byte, error) // Deprecated: Use Metrics instead. // Statistics queries statistics from a specific server Statistics(ctx context.Context) (ServerStatistics, error) // ShutdownV2 shuts down a specific coordinator, optionally removing it from the cluster with a graceful manner. ShutdownV2(ctx context.Context, removeFromCluster, graceful bool) error // ShutdownInfoV2 queries information about shutdown progress. ShutdownInfoV2(ctx context.Context) (ShutdownInfo, error) // Logs retrieve logs from server in ArangoDB 3.8.0+ format Logs(ctx context.Context) (ServerLogs, error) // GetLicense returns license of an ArangoDB deployment. GetLicense(ctx context.Context) (License, error) }
ClientServerAdmin provides access to server administrations functions of an arangodb database server or an entire cluster of arangodb servers.
type ClientServerInfo ¶
type ClientServerInfo interface { // Version returns version information from the connected database server. // Use WithDetails to configure a context that will include additional details in the return VersionInfo. Version(ctx context.Context) (VersionInfo, error) // ServerRole returns the role of the server that answers the request. ServerRole(ctx context.Context) (ServerRole, error) // ServerID Gets the ID of this server in the cluster. // An error is returned when calling this to a server that is not part of a cluster. ServerID(ctx context.Context) (string, error) }
ClientServerInfo provides access to information about a single ArangoDB server. When your client uses multiple endpoints, it is undefined which server will respond to requests of this interface.
type ClientStats ¶
type ClientStats struct { HTTPConnections int64 `json:"httpConnections"` ConnectionTime Stats `json:"connectionTime"` TotalTime Stats `json:"totalTime"` RequestTime Stats `json:"requestTime"` QueueTime Stats `json:"queueTime"` IoTime Stats `json:"ioTime"` BytesSent Stats `json:"bytesSent"` BytesReceived Stats `json:"bytesReceived"` }
type ClientUsers ¶
type ClientUsers interface { // User opens a connection to an existing user. // If no user with given name exists, an NotFoundError is returned. User(ctx context.Context, name string) (User, error) // UserExists returns true if a user with given name exists. UserExists(ctx context.Context, name string) (bool, error) // Users returns a list of all users found by the client. Users(ctx context.Context) ([]User, error) // CreateUser creates a new user with given name and opens a connection to it. // If a user with given name already exists, a Conflict error is returned. CreateUser(ctx context.Context, name string, options *UserOptions) (User, error) }
ClientUsers provides access to the users in a single arangodb database server, or an entire cluster of arangodb servers.
type Cluster ¶
type Cluster interface { // Get the cluster configuration & health Health(ctx context.Context) (ClusterHealth, error) // Get the inventory of the cluster containing all collections (with entire details) of a database. DatabaseInventory(ctx context.Context, db Database) (DatabaseInventory, error) // MoveShard moves a single shard of the given collection from server `fromServer` to // server `toServer`. MoveShard(ctx context.Context, col Collection, shard ShardID, fromServer, toServer ServerID) error // CleanOutServer triggers activities to clean out a DBServer. CleanOutServer(ctx context.Context, serverID string) error // ResignServer triggers activities to let a DBServer resign for all shards. ResignServer(ctx context.Context, serverID string) error // IsCleanedOut checks if the dbserver with given ID has been cleaned out. IsCleanedOut(ctx context.Context, serverID string) (bool, error) // RemoveServer is a low-level option to remove a server from a cluster. // This function is suitable for servers of type coordinator or dbserver. // The use of `ClientServerAdmin.Shutdown` is highly recommended above this function. RemoveServer(ctx context.Context, serverID ServerID) error }
Cluster provides access to cluster wide specific operations. To use this interface, an ArangoDB cluster is required.
type ClusterHealth ¶
type ClusterHealth struct { // Unique identifier of the entire cluster. // This ID is created when the cluster was first created. ID string `json:"ClusterId"` // Health per server Health map[ServerID]ServerHealth `json:"Health"` }
ClusterHealth contains health information for all servers in a cluster.
type Collection ¶
type Collection interface { // Name returns the name of the collection. Name() string // Database returns the database containing the collection. Database() Database // Status fetches the current status of the collection. Status(ctx context.Context) (CollectionStatus, error) // Count fetches the number of document in the collection. Count(ctx context.Context) (int64, error) // Statistics returns the number of documents and additional statistical information about the collection. Statistics(ctx context.Context) (CollectionStatistics, error) // Revision fetches the revision ID of the collection. // The revision ID is a server-generated string that clients can use to check whether data // in a collection has changed since the last revision check. Revision(ctx context.Context) (string, error) // Checksum returns a checksum for the specified collection // withRevisions - Whether to include document revision ids in the checksum calculation. // withData - Whether to include document body data in the checksum calculation. Checksum(ctx context.Context, withRevisions bool, withData bool) (CollectionChecksum, error) // Properties fetches extended information about the collection. Properties(ctx context.Context) (CollectionProperties, error) // SetProperties changes properties of the collection. SetProperties(ctx context.Context, options SetCollectionPropertiesOptions) error // Shards fetches shards information of the collection. Shards(ctx context.Context, details bool) (CollectionShards, error) // Load the collection into memory. Load(ctx context.Context) error // Unload unloads the collection from memory. Unload(ctx context.Context) error // Remove removes the entire collection. // If the collection does not exist, a NotFoundError is returned. Remove(ctx context.Context) error // Truncate removes all documents from the collection, but leaves the indexes intact. Truncate(ctx context.Context) error // Rename renames the collection (SINGLE server only). // If the collection does not exist, a NotFoundError is returned. Rename(ctx context.Context, newName string) error // All index functions CollectionIndexes // All document functions CollectionDocuments }
Collection provides access to the information of a single collection, all its documents and all its indexes.
type CollectionChecksum ¶ added in v1.4.1
type CollectionChecksum struct { ArangoError CollectionInfo // The collection revision id as a string. Revision string `json:"revision,omitempty"` }
CollectionChecksum contains information about a collection checksum response
type CollectionDocuments ¶
type CollectionDocuments interface { // DocumentExists checks if a document with given key exists in the collection. DocumentExists(ctx context.Context, key string) (bool, error) // ReadDocument reads a single document with given key from the collection. // The document data is stored into result, the document meta data is returned. // If no document exists with given key, a NotFoundError is returned. ReadDocument(ctx context.Context, key string, result interface{}) (DocumentMeta, error) // ReadDocuments reads multiple documents with given keys from the collection. // The documents data is stored into elements of the given results slice, // the documents meta data is returned. // If no document exists with a given key, a NotFoundError is returned at its errors index. ReadDocuments(ctx context.Context, keys []string, results interface{}) (DocumentMetaSlice, ErrorSlice, error) // CreateDocument creates a single document in the collection. // The document data is loaded from the given document, the document meta data is returned. // If the document data already contains a `_key` field, this will be used as key of the new document, // otherwise a unique key is created. // A ConflictError is returned when a `_key` field contains a duplicate key, other any other field violates an index constraint. // To return the NEW document, prepare a context with `WithReturnNew`. // To wait until document has been synced to disk, prepare a context with `WithWaitForSync`. CreateDocument(ctx context.Context, document interface{}) (DocumentMeta, error) // CreateDocuments creates multiple documents in the collection. // The document data is loaded from the given documents slice, the documents meta data is returned. // If a documents element already contains a `_key` field, this will be used as key of the new document, // otherwise a unique key is created. // If a documents element contains a `_key` field with a duplicate key, other any other field violates an index constraint, // a ConflictError is returned in its index in the errors slice. // To return the NEW documents, prepare a context with `WithReturnNew`. The data argument passed to `WithReturnNew` must be // a slice with the same number of entries as the `documents` slice. // To wait until document has been synced to disk, prepare a context with `WithWaitForSync`. // If the create request itself fails or one of the arguments is invalid, an error is returned. CreateDocuments(ctx context.Context, documents interface{}) (DocumentMetaSlice, ErrorSlice, error) // UpdateDocument updates a single document with given key in the collection. // The document meta data is returned. // To return the NEW document, prepare a context with `WithReturnNew`. // To return the OLD document, prepare a context with `WithReturnOld`. // To wait until document has been synced to disk, prepare a context with `WithWaitForSync`. // If no document exists with given key, a NotFoundError is returned. // If `_id` field is present in the document body, it is always ignored. UpdateDocument(ctx context.Context, key string, update interface{}) (DocumentMeta, error) // UpdateDocuments updates multiple document with given keys in the collection. // The updates are loaded from the given updates slice, the documents meta data are returned. // To return the NEW documents, prepare a context with `WithReturnNew` with a slice of documents. // To return the OLD documents, prepare a context with `WithReturnOld` with a slice of documents. // To wait until documents has been synced to disk, prepare a context with `WithWaitForSync`. // If no document exists with a given key, a NotFoundError is returned at its errors index. // If keys are nil, each element in the update slice must contain a `_key` field. // If `_id` field is present in the document body, it is always ignored. UpdateDocuments(ctx context.Context, keys []string, updates interface{}) (DocumentMetaSlice, ErrorSlice, error) // ReplaceDocument replaces a single document with given key in the collection with the document given in the document argument. // The document meta data is returned. // To return the NEW document, prepare a context with `WithReturnNew`. // To return the OLD document, prepare a context with `WithReturnOld`. // To wait until document has been synced to disk, prepare a context with `WithWaitForSync`. // If no document exists with given key, a NotFoundError is returned. // If `_id` field is present in the document body, it is always ignored. ReplaceDocument(ctx context.Context, key string, document interface{}) (DocumentMeta, error) // ReplaceDocuments replaces multiple documents with given keys in the collection with the documents given in the documents argument. // The replacements are loaded from the given documents slice, the documents meta data are returned. // To return the NEW documents, prepare a context with `WithReturnNew` with a slice of documents. // To return the OLD documents, prepare a context with `WithReturnOld` with a slice of documents. // To wait until documents has been synced to disk, prepare a context with `WithWaitForSync`. // If no document exists with a given key, a NotFoundError is returned at its errors index. // If keys is nil, each element in the documents slice must contain a `_key` field. // If `_id` field is present in the document body, it is always ignored. ReplaceDocuments(ctx context.Context, keys []string, documents interface{}) (DocumentMetaSlice, ErrorSlice, error) // RemoveDocument removes a single document with given key from the collection. // The document meta data is returned. // To return the OLD document, prepare a context with `WithReturnOld`. // To wait until removal has been synced to disk, prepare a context with `WithWaitForSync`. // If no document exists with given key, a NotFoundError is returned. RemoveDocument(ctx context.Context, key string) (DocumentMeta, error) // RemoveDocuments removes multiple documents with given keys from the collection. // The document meta data are returned. // To return the OLD documents, prepare a context with `WithReturnOld` with a slice of documents. // To wait until removal has been synced to disk, prepare a context with `WithWaitForSync`. // If no document exists with a given key, a NotFoundError is returned at its errors index. RemoveDocuments(ctx context.Context, keys []string) (DocumentMetaSlice, ErrorSlice, error) // ImportDocuments imports one or more documents into the collection. // The document data is loaded from the given documents argument, statistics are returned. // The documents argument can be one of the following: // - An array of structs: All structs will be imported as individual documents. // - An array of maps: All maps will be imported as individual documents. // To wait until all documents have been synced to disk, prepare a context with `WithWaitForSync`. // To return details about documents that could not be imported, prepare a context with `WithImportDetails`. ImportDocuments(ctx context.Context, documents interface{}, options *ImportDocumentOptions) (ImportDocumentStatistics, error) }
CollectionDocuments provides access to the documents in a single collection.
type CollectionIndexes ¶
type CollectionIndexes interface { // Index opens a connection to an existing index within the collection. // If no index with given name exists, an NotFoundError is returned. Index(ctx context.Context, name string) (Index, error) // IndexExists returns true if an index with given name exists within the collection. IndexExists(ctx context.Context, name string) (bool, error) // Indexes returns a list of all indexes in the collection. Indexes(ctx context.Context) ([]Index, error) // Deprecated: since 3.10 version. Use ArangoSearch view instead. // EnsureFullTextIndex creates a fulltext index in the collection, if it does not already exist. // Fields is a slice of attribute names. Currently, the slice is limited to exactly one attribute. // The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). EnsureFullTextIndex(ctx context.Context, fields []string, options *EnsureFullTextIndexOptions) (Index, bool, error) // EnsureGeoIndex creates a hash index in the collection, if it does not already exist. // Fields is a slice with one or two attribute paths. If it is a slice with one attribute path location, // then a geo-spatial index on all documents is created using location as path to the coordinates. // The value of the attribute must be a slice with at least two double values. The slice must contain the latitude (first value) // and the longitude (second value). All documents, which do not have the attribute path or with value that are not suitable, are ignored. // If it is a slice with two attribute paths latitude and longitude, then a geo-spatial index on all documents is created // using latitude and longitude as paths the latitude and the longitude. The value of the attribute latitude and of the // attribute longitude must a double. All documents, which do not have the attribute paths or which values are not suitable, are ignored. // The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). EnsureGeoIndex(ctx context.Context, fields []string, options *EnsureGeoIndexOptions) (Index, bool, error) // EnsureHashIndex creates a hash index in the collection, if it does not already exist. // Fields is a slice of attribute paths. // The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). EnsureHashIndex(ctx context.Context, fields []string, options *EnsureHashIndexOptions) (Index, bool, error) // EnsurePersistentIndex creates a persistent index in the collection, if it does not already exist. // Fields is a slice of attribute paths. // The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). EnsurePersistentIndex(ctx context.Context, fields []string, options *EnsurePersistentIndexOptions) (Index, bool, error) // EnsureSkipListIndex creates a skiplist index in the collection, if it does not already exist. // Fields is a slice of attribute paths. // The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). EnsureSkipListIndex(ctx context.Context, fields []string, options *EnsureSkipListIndexOptions) (Index, bool, error) // EnsureTTLIndex creates a TLL collection, if it does not already exist. // The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). EnsureTTLIndex(ctx context.Context, field string, expireAfter int, options *EnsureTTLIndexOptions) (Index, bool, error) // EnsureZKDIndex creates a ZKD multi-dimensional index for the collection, if it does not already exist. // Note that zkd indexes are an experimental feature in ArangoDB 3.9. EnsureZKDIndex(ctx context.Context, fields []string, options *EnsureZKDIndexOptions) (Index, bool, error) // EnsureMDIIndex creates a multidimensional index for the collection, if it does not already exist. // The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). // Available in ArangoDB 3.12 and later. EnsureMDIIndex(ctx context.Context, fields []string, options *EnsureMDIIndexOptions) (Index, bool, error) // EnsureMDIPrefixedIndex creates is an additional index variant of mdi index that lets you specify additional // attributes for the index to narrow down the search space using equality checks. // Available in ArangoDB 3.12 and later. EnsureMDIPrefixedIndex(ctx context.Context, fields []string, options *EnsureMDIPrefixedIndexOptions) (Index, bool, error) // EnsureInvertedIndex creates an inverted index in the collection, if it does not already exist. // Available in ArangoDB 3.10 and later. EnsureInvertedIndex(ctx context.Context, options *InvertedIndexOptions) (Index, bool, error) }
CollectionIndexes provides access to the indexes in a single collection.
type CollectionInfo ¶
type CollectionInfo struct { // The identifier of the collection. ID string `json:"id,omitempty"` // The name of the collection. Name string `json:"name,omitempty"` // The status of the collection Status CollectionStatus `json:"status,omitempty"` // StatusString represents status as a string. StatusString string `json:"statusString,omitempty"` // The type of the collection Type CollectionType `json:"type,omitempty"` // If true then the collection is a system collection. IsSystem bool `json:"isSystem,omitempty"` // Global unique name for the collection GloballyUniqueId string `json:"globallyUniqueId,omitempty"` // The calculated checksum as a number. Checksum string `json:"checksum,omitempty"` }
CollectionInfo contains information about a collection
type CollectionKeyOptions ¶
type CollectionKeyOptions struct { // If set to true, then it is allowed to supply own key values in the _key attribute of a document. // If set to false, then the key generator will solely be responsible for generating keys and supplying own // key values in the _key attribute of documents is considered an error. // Deprecated: Use AllowUserKeysPtr instead AllowUserKeys bool `json:"-"` // If set to true, then it is allowed to supply own key values in the _key attribute of a document. // If set to false, then the key generator will solely be responsible for generating keys and supplying own // key values in the _key attribute of documents is considered an error. AllowUserKeysPtr *bool `json:"allowUserKeys,omitempty"` // Specifies the type of the key generator. The currently available generators are traditional and autoincrement. Type KeyGeneratorType `json:"type,omitempty"` // increment value for autoincrement key generator. Not used for other key generator types. Increment int `json:"increment,omitempty"` // Initial offset value for autoincrement key generator. Not used for other key generator types. Offset int `json:"offset,omitempty"` }
CollectionKeyOptions specifies ways for creating keys of a collection.
func (*CollectionKeyOptions) Init ¶
func (c *CollectionKeyOptions) Init()
Init translate deprecated fields into current one for backward compatibility
type CollectionProperties ¶
type CollectionProperties struct { CollectionInfo ArangoError // WaitForSync; If true then creating, changing or removing documents will wait until the data has been synchronized to disk. WaitForSync bool `json:"waitForSync,omitempty"` // DoCompact specifies whether or not the collection will be compacted. DoCompact bool `json:"doCompact,omitempty"` // JournalSize is the maximal size setting for journals / datafiles in bytes. JournalSize int64 `json:"journalSize,omitempty"` // CacheEnabled set cacheEnabled option in collection properties CacheEnabled bool `json:"cacheEnabled,omitempty"` // ComputedValues let configure collections to generate document attributes when documents are created or modified, using an AQL expression ComputedValues []ComputedValue `json:"computedValues,omitempty"` // KeyOptions KeyOptions struct { // Type specifies the type of the key generator. The currently available generators are traditional and autoincrement. Type KeyGeneratorType `json:"type,omitempty"` // AllowUserKeys; if set to true, then it is allowed to supply own key values in the _key attribute of a document. // If set to false, then the key generator is solely responsible for generating keys and supplying own key values in // the _key attribute of documents is considered an error. AllowUserKeys bool `json:"allowUserKeys,omitempty"` LastValue uint64 `json:"lastValue,omitempty"` } `json:"keyOptions,omitempty"` // NumberOfShards is the number of shards of the collection. // Only available in cluster setup. NumberOfShards int `json:"numberOfShards,omitempty"` // ShardKeys contains the names of document attributes that are used to determine the target shard for documents. // Only available in cluster setup. ShardKeys []string `json:"shardKeys,omitempty"` // ReplicationFactor contains how many copies of each shard are kept on different DBServers. // Only available in cluster setup. ReplicationFactor int `json:"-"` // Deprecated: use 'WriteConcern' instead MinReplicationFactor int `json:"minReplicationFactor,omitempty"` // WriteConcern contains how many copies must be available before a collection can be written. // It is required that 1 <= WriteConcern <= ReplicationFactor. // Default is 1. Not available for satellite collections. // Available from 3.6 arangod version. WriteConcern int `json:"writeConcern,omitempty"` // SmartJoinAttribute // See documentation for smart joins. // This requires ArangoDB Enterprise Edition. SmartJoinAttribute string `json:"smartJoinAttribute,omitempty"` // This attribute specifies the name of the sharding strategy to use for the collection. // Can not be changed after creation. ShardingStrategy ShardingStrategy `json:"shardingStrategy,omitempty"` // This attribute specifies that the sharding of a collection follows that of another // one. DistributeShardsLike string `json:"distributeShardsLike,omitempty"` // This attribute specifies if the new format introduced in 3.7 is used for this // collection. UsesRevisionsAsDocumentIds bool `json:"usesRevisionsAsDocumentIds,omitempty"` // The following attribute specifies if the new MerkleTree based sync protocol // can be used on the collection. SyncByRevision bool `json:"syncByRevision,omitempty"` // The collection revision id as a string. Revision string `json:"revision,omitempty"` // Schema for collection validation Schema *CollectionSchemaOptions `json:"schema,omitempty"` // IsDisjoint set isDisjoint flag for Graph. Required ArangoDB 3.7+ IsDisjoint bool `json:"isDisjoint,omitempty"` IsSmartChild bool `json:"isSmartChild,omitempty"` InternalValidatorType *int `json:"internalValidatorType, omitempty"` // Set to create a smart edge or vertex collection. // This requires ArangoDB Enterprise Edition. IsSmart bool `json:"isSmart,omitempty"` // StatusString represents status as a string. StatusString string `json:"statusString,omitempty"` TempObjectId string `json:"tempObjectId,omitempty"` ObjectId string `json:"objectId,omitempty"` }
CollectionProperties contains extended information about a collection.
func (*CollectionProperties) IsSatellite ¶
func (p *CollectionProperties) IsSatellite() bool
IsSatellite returns true if the collection is a satellite collection
type CollectionSchemaLevel ¶
type CollectionSchemaLevel string
const ( CollectionSchemaLevelNone CollectionSchemaLevel = "none" CollectionSchemaLevelNew CollectionSchemaLevel = "new" CollectionSchemaLevelModerate CollectionSchemaLevel = "moderate" CollectionSchemaLevelStrict CollectionSchemaLevel = "strict" )
type CollectionSchemaOptions ¶
type CollectionSchemaOptions struct { Rule interface{} `json:"rule,omitempty"` Level CollectionSchemaLevel `json:"level,omitempty"` Message string `json:"message,omitempty"` Type string `json:"type,omitempty"` }
func (*CollectionSchemaOptions) LoadRule ¶
func (d *CollectionSchemaOptions) LoadRule(data []byte) error
type CollectionShards ¶
type CollectionShards struct { CollectionProperties // Shards is a list of shards that belong to the collection. // Each shard contains a list of DB servers where the first one is the leader and the rest are followers. Shards map[ShardID][]ServerID `json:"shards,omitempty"` }
CollectionShards contains shards information about a collection.
type CollectionStatistics ¶
type CollectionStatistics struct { ArangoError CollectionProperties //The number of documents currently present in the collection. Count int64 `json:"count,omitempty"` // The maximal size of a journal or datafile in bytes. JournalSize int64 `json:"journalSize,omitempty"` Figures struct { DataFiles struct { // The number of datafiles. Count int64 `json:"count,omitempty"` // The total filesize of datafiles (in bytes). FileSize int64 `json:"fileSize,omitempty"` } `json:"datafiles"` // The number of markers in the write-ahead log for this collection that have not been transferred to journals or datafiles. UncollectedLogfileEntries int64 `json:"uncollectedLogfileEntries,omitempty"` // The number of references to documents in datafiles that JavaScript code currently holds. This information can be used for debugging compaction and unload issues. DocumentReferences int64 `json:"documentReferences,omitempty"` CompactionStatus struct { // The action that was performed when the compaction was last run for the collection. This information can be used for debugging compaction issues. Message string `json:"message,omitempty"` // The point in time the compaction for the collection was last executed. This information can be used for debugging compaction issues. Time time.Time `json:"time,omitempty"` } `json:"compactionStatus"` Compactors struct { // The number of compactor files. Count int64 `json:"count,omitempty"` // The total filesize of all compactor files (in bytes). FileSize int64 `json:"fileSize,omitempty"` } `json:"compactors"` Dead struct { // The number of dead documents. This includes document versions that have been deleted or replaced by a newer version. Documents deleted or replaced that are contained the write-ahead log only are not reported in this figure. Count int64 `json:"count,omitempty"` // The total number of deletion markers. Deletion markers only contained in the write-ahead log are not reporting in this figure. Deletion int64 `json:"deletion,omitempty"` // The total size in bytes used by all dead documents. Size int64 `json:"size,omitempty"` } `json:"dead"` Indexes struct { // The total number of indexes defined for the collection, including the pre-defined indexes (e.g. primary index). Count int64 `json:"count,omitempty"` // The total memory allocated for indexes in bytes. Size int64 `json:"size,omitempty"` } `json:"indexes"` ReadCache struct { // The number of revisions of this collection stored in the document revisions cache. Count int64 `json:"count,omitempty"` // The memory used for storing the revisions of this collection in the document revisions cache (in bytes). This figure does not include the document data but only mappings from document revision ids to cache entry locations. Size int64 `json:"size,omitempty"` } `json:"readcache"` // An optional string value that contains information about which object type is at the head of the collection's cleanup queue. This information can be used for debugging compaction and unload issues. WaitingFor string `json:"waitingFor,omitempty"` Alive struct { // The number of currently active documents in all datafiles and journals of the collection. Documents that are contained in the write-ahead log only are not reported in this figure. Count int64 `json:"count,omitempty"` // The total size in bytes used by all active documents of the collection. Documents that are contained in the write-ahead log only are not reported in this figure. Size int64 `json:"size,omitempty"` } `json:"alive"` // The tick of the last marker that was stored in a journal of the collection. This might be 0 if the collection does not yet have a journal. LastTick int64 `json:"lastTick,omitempty"` Journals struct { // The number of journal files. Count int64 `json:"count,omitempty"` // The total filesize of all journal files (in bytes). FileSize int64 `json:"fileSize,omitempty"` } `json:"journals"` Revisions struct { // The number of revisions of this collection managed by the storage engine. Count int64 `json:"count,omitempty"` // The memory used for storing the revisions of this collection in the storage engine (in bytes). This figure does not include the document data but only mappings from document revision ids to storage engine datafile positions. Size int64 `json:"size,omitempty"` } `json:"revisions"` DocumentsSize *int64 `json:"documentsSize,omitempty"` // RocksDB cache statistics CacheInUse *bool `json:"cacheInUse,omitempty"` CacheSize *int64 `json:"cacheSize,omitempty"` CacheUsage *int64 `json:"cacheUsage,omitempty"` } `json:"figures"` }
CollectionStatistics contains the number of documents and additional statistical information about a collection.
type CollectionStatus ¶
type CollectionStatus int
CollectionStatus indicates the status of a collection.
type CommitTransactionOptions ¶
type CommitTransactionOptions struct{}
CommitTransactionOptions provides options for CommitTransaction. Currently unused
type ComputedValue ¶ added in v1.4.0
type ComputedValue struct { // The name of the target attribute. Can only be a top-level attribute, but you // may return a nested object. Cannot be `_key`, `_id`, `_rev`, `_from`, `_to`, // or a shard key attribute. Name string `json:"name"` // An AQL `RETURN` operation with an expression that computes the desired value. Expression string `json:"expression"` // An array of strings to define on which write operations the value shall be // computed. The possible values are `"insert"`, `"update"`, and `"replace"`. // The default is `["insert", "update", "replace"]`. ComputeOn []ComputeOn `json:"computeOn,omitempty"` // Whether the computed value shall take precedence over a user-provided or existing attribute. Overwrite bool `json:"overwrite"` // Whether to let the write operation fail if the expression produces a warning. The default is false. FailOnWarning *bool `json:"failOnWarning,omitempty"` // Whether the result of the expression shall be stored if it evaluates to `null`. // This can be used to skip the value computation if any pre-conditions are not met. KeepNull *bool `json:"keepNull,omitempty"` }
type Connection ¶
type Connection interface { // NewRequest creates a new request with given method and path. NewRequest(method, path string) (Request, error) // Do performs a given request, returning its response. Do(ctx context.Context, req Request) (Response, error) // Unmarshal unmarshals the given raw object into the given result interface. Unmarshal(data RawObject, result interface{}) error // Endpoints returns the endpoints used by this connection. Endpoints() []string // UpdateEndpoints reconfigures the connection to use the given endpoints. UpdateEndpoints(endpoints []string) error // SetAuthentication creates a copy of connection wrapper for given auth parameters. SetAuthentication(Authentication) (Connection, error) // Protocols returns all protocols used by this connection. Protocols() ProtocolSet }
Connection is a connection to a database server using a specific protocol.
type ContentType ¶
type ContentType int
ContentType identifies the type of encoding to use for the data.
const ( // ContentTypeJSON encodes data as json ContentTypeJSON ContentType = iota // ContentTypeVelocypack encodes data as Velocypack ContentTypeVelocypack )
func (ContentType) String ¶
func (ct ContentType) String() string
type ContextKey ¶
type ContextKey string
ContextKey is an internal type used for holding values in a `context.Context` do not use!.
type CreateCollectionOptions ¶
type CreateCollectionOptions struct { // CacheEnabled set cacheEnabled option in collection properties CacheEnabled *bool `json:"cacheEnabled,omitempty"` // ComputedValues let configure collections to generate document attributes when documents are created or modified, using an AQL expression ComputedValues []ComputedValue `json:"computedValues,omitempty"` // This field is used for internal purposes only. DO NOT USE. DistributeShardsLike string `json:"distributeShardsLike,omitempty"` // DoCompact checks if the collection will be compacted (default is true) DoCompact *bool `json:"doCompact,omitempty"` // The number of buckets into which indexes using a hash table are split. The default is 16 and this number has to be a power // of 2 and less than or equal to 1024. For very large collections one should increase this to avoid long pauses when the hash // table has to be initially built or resized, since buckets are resized individually and can be initially built in parallel. // For example, 64 might be a sensible value for a collection with 100 000 000 documents. // Currently, only the edge index respects this value, but other index types might follow in future ArangoDB versions. // Changes are applied when the collection is loaded the next time. IndexBuckets int `json:"indexBuckets,omitempty"` // Available from 3.9 ArangoD version. InternalValidatorType int `json:"internalValidatorType,omitempty"` // IsDisjoint set isDisjoint flag for Graph. Required ArangoDB 3.7+ IsDisjoint bool `json:"isDisjoint,omitempty"` // Set to create a smart edge or vertex collection. // This requires ArangoDB Enterprise Edition. IsSmart bool `json:"isSmart,omitempty"` // If true, create a system collection. In this case collection-name should start with an underscore. // End users should normally create non-system collections only. API implementors may be required to create system // collections in very special occasions, but normally a regular collection will do. (The default is false) IsSystem bool `json:"isSystem,omitempty"` // If true then the collection data is kept in-memory only and not made persistent. // Unloading the collection will cause the collection data to be discarded. Stopping or re-starting the server will also // cause full loss of data in the collection. Setting this option will make the resulting collection be slightly faster // than regular collections because ArangoDB does not enforce any synchronization to disk and does not calculate any // CRC checksums for datafiles (as there are no datafiles). This option should therefore be used for cache-type collections only, // and not for data that cannot be re-created otherwise. (The default is false) IsVolatile bool `json:"isVolatile,omitempty"` // The maximal size of a journal or datafile in bytes. The value must be at least 1048576 (1 MiB). (The default is a configuration parameter) JournalSize int `json:"journalSize,omitempty"` // Specifies how keys in the collection are created. KeyOptions *CollectionKeyOptions `json:"keyOptions,omitempty"` // Deprecated: use 'WriteConcern' instead MinReplicationFactor int `json:"minReplicationFactor,omitempty"` // In a cluster, this value determines the number of shards to create for the collection. In a single server setup, this option is meaningless. (default is 1) NumberOfShards int `json:"numberOfShards,omitempty"` // ReplicationFactor in a cluster (default is 1), this attribute determines how many copies of each shard are kept on different DBServers. // The value 1 means that only one copy (no synchronous replication) is kept. // A value of k means that k-1 replicas are kept. Any two copies reside on different DBServers. // Replication between them is synchronous, that is, every write operation to the "leader" copy will be replicated to all "follower" replicas, // before the write operation is reported successful. If a server fails, this is detected automatically // and one of the servers holding copies take over, usually without an error being reported. ReplicationFactor int `json:"replicationFactor,omitempty"` // Schema for collection validation Schema *CollectionSchemaOptions `json:"schema,omitempty"` // This attribute specifies the name of the sharding strategy to use for the collection. // Must be one of ShardingStrategy* values. ShardingStrategy ShardingStrategy `json:"shardingStrategy,omitempty"` // In a cluster, this attribute determines which document attributes are used to // determine the target shard for documents. Documents are sent to shards based on the values of their shard key attributes. // The values of all shard key attributes in a document are hashed, and the hash value is used to determine the target shard. // Note: Values of shard key attributes cannot be changed once set. This option is meaningless in a single server setup. // The default is []string{"_key"}. ShardKeys []string `json:"shardKeys,omitempty"` // This field must be set to the attribute that will be used for sharding or smart graphs. // All vertices are required to have this attribute set. Edges derive the attribute from their connected vertices. // This requires ArangoDB Enterprise Edition. SmartGraphAttribute string `json:"smartGraphAttribute,omitempty"` // SmartJoinAttribute // In the specific case that the two collections have the same number of shards, the data of the two collections can // be co-located on the same server for the same shard key values. In this case the extra hop via the coordinator will not be necessary. // See documentation for smart joins. // This requires ArangoDB Enterprise Edition. SmartJoinAttribute string `json:"smartJoinAttribute,omitempty"` // Available from 3.7 ArangoDB version SyncByRevision bool `json:"syncByRevision,omitempty"` // The type of the collection to create. (default is CollectionTypeDocument) Type CollectionType `json:"type,omitempty"` // If true then the data is synchronized to disk before returning from a document create, update, replace or removal operation. (default: false) WaitForSync bool `json:"waitForSync,omitempty"` // WriteConcern contains how many copies must be available before a collection can be written. // It is required that 1 <= WriteConcern <= ReplicationFactor. // Default is 1. Not available for satellite collections. // Available from 3.6 ArangoDB version. WriteConcern int `json:"writeConcern,omitempty"` }
CreateCollectionOptions contains options that customize the creating of a collection.
func (*CreateCollectionOptions) Init ¶
func (c *CreateCollectionOptions) Init()
Init translate deprecated fields into current one for backward compatibility
type CreateDatabaseDefaultOptions ¶
type CreateDatabaseDefaultOptions struct { // Default replication factor for collections in database ReplicationFactor int `json:"replicationFactor,omitempty"` // Default write concern for collections in database WriteConcern int `json:"writeConcern,omitempty"` // Default sharding for collections in database Sharding DatabaseSharding `json:"sharding,omitempty"` // Replication version to use for this database // Available since ArangoDB version 3.11 ReplicationVersion DatabaseReplicationVersion `json:"replicationVersion,omitempty"` }
CreateDatabaseDefaultOptions contains options that change defaults for collections
type CreateDatabaseOptions ¶
type CreateDatabaseOptions struct { // List of users to initially create for the new database. User information will not be changed for users that already exist. // If users is not specified or does not contain any users, a default user root will be created with an empty string password. // This ensures that the new database will be accessible after it is created. Users []CreateDatabaseUserOptions `json:"users,omitempty"` // Options database defaults Options CreateDatabaseDefaultOptions `json:"options,omitempty"` }
CreateDatabaseOptions contains options that customize the creating of a database.
type CreateDatabaseUserOptions ¶
type CreateDatabaseUserOptions struct { // Loginname of the user to be created UserName string `json:"user,omitempty"` // The user password as a string. If not specified, it will default to an empty string. Password string `json:"passwd,omitempty"` // A flag indicating whether the user account should be activated or not. The default value is true. If set to false, the user won't be able to log into the database. Active *bool `json:"active,omitempty"` // A JSON object with extra user information. The data contained in extra will be stored for the user but not be interpreted further by ArangoDB. Extra interface{} `json:"extra,omitempty"` }
CreateDatabaseUserOptions contains options for creating a single user for a database.
type CreateEdgeCollectionOptions ¶ added in v1.3.0
type CreateEdgeCollectionOptions struct { // Satellites contains an array of collection names that will be used to create SatelliteCollections for a Hybrid (Disjoint) SmartGraph (Enterprise Edition only) // Requires ArangoDB 3.9+ Satellites []string `json:"satellites,omitempty"` }
CreateEdgeCollectionOptions contains optional parameters for creating a new edge collection
type CreateGraphOptions ¶
type CreateGraphOptions struct { // OrphanVertexCollections is an array of additional vertex collections used in the graph. // These are vertices for which there are no edges linking these vertices with anything. OrphanVertexCollections []string // EdgeDefinitions is an array of edge definitions for the graph. EdgeDefinitions []EdgeDefinition // IsSmart defines if the created graph should be smart. // This only has effect in Enterprise Edition. IsSmart bool // SmartGraphAttribute is the attribute name that is used to smartly shard the vertices of a graph. // Every vertex in this Graph has to have this attribute. // Cannot be modified later. SmartGraphAttribute string // NumberOfShards is the number of shards that is used for every collection within this graph. // Cannot be modified later. NumberOfShards int // ReplicationFactor is the number of replication factor that is used for every collection within this graph. // Cannot be modified later. ReplicationFactor int // WriteConcern is the number of min replication factor that is used for every collection within this graph. // Cannot be modified later. WriteConcern int // IsDisjoint set isDisjoint flag for Graph. Required ArangoDB 3.7+ IsDisjoint bool // Satellites contains an array of collection names that will be used to create SatelliteCollections for a Hybrid (Disjoint) SmartGraph (Enterprise Edition only) // Requires ArangoDB 3.9+ Satellites []string `json:"satellites,omitempty"` }
CreateGraphOptions contains options that customize the creating of a graph.
type CreateVertexCollectionOptions ¶ added in v1.3.0
type CreateVertexCollectionOptions struct { // Satellites contains an array of collection names that will be used to create SatelliteCollections for a Hybrid (Disjoint) SmartGraph (Enterprise Edition only) // Requires ArangoDB 3.9+ Satellites []string `json:"satellites,omitempty"` }
CreateVertexCollectionOptions contains optional parameters for creating a new vertex collection
type Cursor ¶
type Cursor interface { io.Closer // HasMore returns true if the next call to ReadDocument does not return a NoMoreDocuments error. HasMore() bool // ReadDocument reads the next document from the cursor. // The document data is stored into result, the document metadata is returned. // If the cursor has no more documents, a NoMoreDocuments error is returned. // Note: If the query (resulting in this cursor) does not return documents, // then the returned DocumentMeta will be empty. ReadDocument(ctx context.Context, result interface{}) (DocumentMeta, error) // RetryReadDocument reads the last document from the cursor once more time // It can be used e.g., in case of network error during ReadDocument // It requires 'driver.WithQueryAllowRetry' to be set to true on the Context during Cursor creation. RetryReadDocument(ctx context.Context, result interface{}) (DocumentMeta, error) // Count returns the total number of result documents available. // A valid return value is only available when the cursor has been created with a context that was // prepared with `WithQueryCount` and not with `WithQueryStream`. Count() int64 // Statistics returns the query execution statistics for this cursor. // This might not be valid if the cursor has been created with a context that was // prepared with `WithQueryStream` Statistics() QueryStatistics // Extra returns the query extras for this cursor. Extra() QueryExtra }
Cursor is returned from a query, used to iterate over a list of documents. Note that a Cursor must always be closed to avoid holding on to resources in the server while they are no longer needed.
type Database ¶
type Database interface { // Name returns the name of the database. Name() string // Info fetches information about the database. Info(ctx context.Context) (DatabaseInfo, error) // EngineInfo returns information about the database engine being used. // Note: When your cluster has multiple endpoints (cluster), you will get information // from the server that is currently being used. // If you want to know exactly which server the information is from, use a client // with only a single endpoint and avoid automatic synchronization of endpoints. EngineInfo(ctx context.Context) (EngineInfo, error) // Remove removes the entire database. // If the database does not exist, a NotFoundError is returned. Remove(ctx context.Context) error // DatabaseCollections - Collection functions DatabaseCollections // DatabaseViews - View functions DatabaseViews // DatabaseGraphs - Graph functions DatabaseGraphs // DatabasePregels - Pregel functions DatabasePregels // DatabaseStreamingTransactions - Streaming Transactions functions DatabaseStreamingTransactions // DatabaseArangoSearchAnalyzers - ArangoSearch Analyzers API DatabaseArangoSearchAnalyzers // Query performs an AQL query, returning a cursor used to iterate over the returned documents. // Note that the returned Cursor must always be closed to avoid holding on to resources in the server while they are no longer needed. Query(ctx context.Context, query string, bindVars map[string]interface{}) (Cursor, error) // ValidateQuery validates an AQL query. // When the query is valid, nil returned, otherwise an error is returned. // The query is not executed. ValidateQuery(ctx context.Context, query string) error // ExplainQuery explains an AQL query and return information about it. ExplainQuery(ctx context.Context, query string, bindVars map[string]interface{}, opts *ExplainQueryOptions) (ExplainQueryResult, error) // OptimizerRulesForQueries returns the available optimizer rules for AQL queries // returns an array of objects that contain the name of each available rule and its respective flags. OptimizerRulesForQueries(ctx context.Context) ([]QueryRule, error) // Transaction performs a javascript transaction. The result of the transaction function is returned. Transaction(ctx context.Context, action string, options *TransactionOptions) (interface{}, error) }
Database provides access to all collections & graphs in a single database.
type DatabaseArangoSearchAnalyzers ¶
type DatabaseArangoSearchAnalyzers interface { // Ensure ensures that the given analyzer exists. If it does not exist it is created. // The function returns whether the analyzer already existed or an error. EnsureAnalyzer(ctx context.Context, analyzer ArangoSearchAnalyzerDefinition) (bool, ArangoSearchAnalyzer, error) // Get returns the analyzer definition for the given analyzer or returns an error Analyzer(ctx context.Context, name string) (ArangoSearchAnalyzer, error) // List returns a list of all analyzers Analyzers(ctx context.Context) ([]ArangoSearchAnalyzer, error) }
type DatabaseCollections ¶
type DatabaseCollections interface { // Collection opens a connection to an existing collection within the database. // If no collection with given name exists, an NotFoundError is returned. Collection(ctx context.Context, name string) (Collection, error) // CollectionExists returns true if a collection with given name exists within the database. CollectionExists(ctx context.Context, name string) (bool, error) // Collections returns a list of all collections in the database. Collections(ctx context.Context) ([]Collection, error) // CreateCollection creates a new collection with given name and options, and opens a connection to it. // If a collection with given name already exists within the database, a DuplicateError is returned. CreateCollection(ctx context.Context, name string, options *CreateCollectionOptions) (Collection, error) }
DatabaseCollections provides access to all collections in a single database.
type DatabaseGraphs ¶
type DatabaseGraphs interface { // Graph opens a connection to an existing graph within the database. // If no graph with given name exists, an NotFoundError is returned. Graph(ctx context.Context, name string) (Graph, error) // GraphExists returns true if a graph with given name exists within the database. GraphExists(ctx context.Context, name string) (bool, error) // Graphs returns a list of all graphs in the database. Graphs(ctx context.Context) ([]Graph, error) // CreateGraph creates a new graph with given name and options, and opens a connection to it. // If a graph with given name already exists within the database, a DuplicateError is returned. // Deprecated: since ArangoDB 3.9 - please use CreateGraphV2 instead CreateGraph(ctx context.Context, name string, options *CreateGraphOptions) (Graph, error) // CreateGraphV2 creates a new graph with given name and options, and opens a connection to it. // If a graph with given name already exists within the database, a DuplicateError is returned. CreateGraphV2(ctx context.Context, name string, options *CreateGraphOptions) (Graph, error) }
DatabaseGraphs provides access to all graphs in a single database.
type DatabaseInfo ¶
type DatabaseInfo struct { // The identifier of the database. ID string `json:"id,omitempty"` // The name of the database. Name string `json:"name,omitempty"` // The filesystem path of the database. Path string `json:"path,omitempty"` // If true then the database is the _system database. IsSystem bool `json:"isSystem,omitempty"` // Default replication factor for collections in database ReplicationFactor int `json:"replicationFactor,omitempty"` // Default write concern for collections in database WriteConcern int `json:"writeConcern,omitempty"` // Default sharding for collections in database Sharding DatabaseSharding `json:"sharding,omitempty"` // Replication version used for this database ReplicationVersion DatabaseReplicationVersion `json:"replicationVersion,omitempty"` }
DatabaseInfo contains information about a database
type DatabaseInventory ¶
type DatabaseInventory struct { // Details of database, this is present since ArangoDB 3.6 Info DatabaseInfo `json:"properties,omitempty"` // Details of all collections Collections []InventoryCollection `json:"collections,omitempty"` // Details of all views Views []InventoryView `json:"views,omitempty"` State State `json:"state,omitempty"` Tick string `json:"tick,omitempty"` }
DatabaseInventory describes a detailed state of the collections & shards of a specific database within a cluster.
func (DatabaseInventory) CollectionByName ¶
func (i DatabaseInventory) CollectionByName(name string) (InventoryCollection, bool)
CollectionByName returns the InventoryCollection with given name. Return false if not found.
func (DatabaseInventory) IsReady ¶
func (i DatabaseInventory) IsReady() bool
IsReady returns true if the IsReady flag of all collections is set.
func (DatabaseInventory) PlanVersion ¶
func (i DatabaseInventory) PlanVersion() int64
PlanVersion returns the plan version of the first collection in the given inventory.
func (DatabaseInventory) ViewByName ¶
func (i DatabaseInventory) ViewByName(name string) (InventoryView, bool)
ViewByName returns the InventoryView with given name. Return false if not found.
type DatabasePregels ¶ added in v1.4.0
type DatabasePregels interface { // StartJob - Start the execution of a Pregel algorithm StartJob(ctx context.Context, options PregelJobOptions) (string, error) // GetJob - Get the status of a Pregel execution GetJob(ctx context.Context, id string) (*PregelJob, error) // GetJobs - Returns a list of currently running and recently finished Pregel jobs without retrieving their results. GetJobs(ctx context.Context) ([]*PregelJob, error) // CancelJob - Cancel an ongoing Pregel execution CancelJob(ctx context.Context, id string) error }
DatabasePregels provides access to all Pregel Jobs in a single database. @deprecated It will be removed in version 3.12
type DatabaseReplicationVersion ¶ added in v1.6.0
type DatabaseReplicationVersion string
DatabaseReplicationVersion defines replication protocol version to use for this database Available since ArangoDB version 3.11 Note: this feature is still considered experimental and should not be used in production
const ( DatabaseReplicationVersionOne DatabaseReplicationVersion = "1" DatabaseReplicationVersionTwo DatabaseReplicationVersion = "2" )
type DatabaseSharding ¶
type DatabaseSharding string
const ( DatabaseShardingSingle DatabaseSharding = "single" DatabaseShardingNone DatabaseSharding = "" )
type DatabaseStreamingTransactions ¶
type DatabaseStreamingTransactions interface { BeginTransaction(ctx context.Context, cols TransactionCollections, opts *BeginTransactionOptions) (TransactionID, error) CommitTransaction(ctx context.Context, tid TransactionID, opts *CommitTransactionOptions) error AbortTransaction(ctx context.Context, tid TransactionID, opts *AbortTransactionOptions) error TransactionStatus(ctx context.Context, tid TransactionID) (TransactionStatusRecord, error) }
DatabaseStreamingTransactions provides access to the Streaming Transactions API
type DatabaseViews ¶
type DatabaseViews interface { // View opens a connection to an existing view within the database. // If no collection with given name exists, an NotFoundError is returned. View(ctx context.Context, name string) (View, error) // ViewExists returns true if a view with given name exists within the database. ViewExists(ctx context.Context, name string) (bool, error) // Views return a list of all views in the database. Views(ctx context.Context) ([]View, error) // CreateArangoSearchView creates a new view of type ArangoSearch, // with given name and options, and opens a connection to it. // If a view with given name already exists within the database, a ConflictError is returned. CreateArangoSearchView(ctx context.Context, name string, options *ArangoSearchViewProperties) (ArangoSearchView, error) // CreateArangoSearchAliasView creates ArangoSearch alias view with given name and options, and opens a connection to it. // If a view with given name already exists within the database, a ConflictError is returned. CreateArangoSearchAliasView(ctx context.Context, name string, options *ArangoSearchAliasViewProperties) (ArangoSearchViewAlias, error) }
DatabaseViews provides access to all views in a single database. Views are only available in ArangoDB 3.4 and higher.
type DocumentID ¶
type DocumentID string
DocumentID references a document in a collection. Format: collection/_key
func NewDocumentID ¶
func NewDocumentID(collection, key string) DocumentID
NewDocumentID creates a new document ID from the given collection, key pair.
func (DocumentID) Collection ¶
func (id DocumentID) Collection() string
Collection returns the collection part of the ID.
func (DocumentID) IsEmpty ¶
func (id DocumentID) IsEmpty() bool
IsEmpty returns true if the given ID is empty, false otherwise.
func (DocumentID) String ¶
func (id DocumentID) String() string
String returns a string representation of the document ID.
func (DocumentID) ValidateOrEmpty ¶
func (id DocumentID) ValidateOrEmpty() error
ValidateOrEmpty validates the given id unless it is empty. In case of empty, nil is returned.
type DocumentMeta ¶
type DocumentMeta struct { Key string `json:"_key,omitempty"` ID DocumentID `json:"_id,omitempty"` Rev string `json:"_rev,omitempty"` OldRev string `json:"_oldRev,omitempty"` }
DocumentMeta contains all meta data used to identifier a document.
type DocumentMetaSlice ¶
type DocumentMetaSlice []DocumentMeta
DocumentMetaSlice is a slice of DocumentMeta elements
func (DocumentMetaSlice) IDs ¶
func (l DocumentMetaSlice) IDs() []DocumentID
IDs returns the ID's of all elements.
func (DocumentMetaSlice) Keys ¶
func (l DocumentMetaSlice) Keys() []string
Keys returns the keys of all elements.
func (DocumentMetaSlice) Revs ¶
func (l DocumentMetaSlice) Revs() []string
Revs returns the revisions of all elements.
type EdgeDefinition ¶
type EdgeDefinition struct { // The name of the edge collection to be used. Collection string `json:"collection"` // To contains the names of one or more vertex collections that can contain target vertices. To []string `json:"to"` // From contains the names of one or more vertex collections that can contain source vertices. From []string `json:"from"` // Options contains optional parameters Options CreateEdgeCollectionOptions `json:"options,omitempty"` }
EdgeDefinition contains all information needed to define a single edge in a graph.
type EdgeDocument ¶
type EdgeDocument struct { From DocumentID `json:"_from,omitempty"` To DocumentID `json:"_to,omitempty"` }
EdgeDocument is a minimal document for use in edge collection. You can use this in your own edge document structures completely use your own. If you use your own, make sure to include a `_from` and `_to` field.
type EngineInfo ¶
type EngineInfo struct { Type EngineType `json:"name"` Supports map[string]interface{} `json:"supports,omitempty"` }
EngineInfo contains information about the database engine being used.
type EngineType ¶
type EngineType string
EngineType indicates type of database engine being used.
func (EngineType) String ¶
func (t EngineType) String() string
type EnsureFullTextIndexOptions
deprecated
type EnsureFullTextIndexOptions struct { // MinLength is the minimum character length of words to index. Will default to a server-defined // value if unspecified (0). It is thus recommended to set this value explicitly when creating the index. MinLength int // InBackground if true will not hold an exclusive collection lock for the entire index creation period (rocksdb only). InBackground bool // Name optional user defined name used for hints in AQL queries Name string // Estimates determines if the to-be-created index should maintain selectivity estimates or not. Estimates *bool }
Deprecated: since 3.10 version. Use ArangoSearch view instead. EnsureFullTextIndexOptions contains specific options for creating a full text index.
type EnsureGeoIndexOptions ¶
type EnsureGeoIndexOptions struct { // If a geo-spatial index on a location is constructed and GeoJSON is true, then the order within the array // is longitude followed by latitude. This corresponds to the format described in http://geojson.org/geojson-spec.html#positions GeoJSON bool // InBackground if true will not hold an exclusive collection lock for the entire index creation period (rocksdb only). InBackground bool // Name optional user defined name used for hints in AQL queries Name string // Estimates determines if the to-be-created index should maintain selectivity estimates or not. Estimates *bool // LegacyPolygons determines if the to-be-created index should use legacy polygons or not. // It is relevant for those that have geoJson set to true only. // Old geo indexes from versions from below 3.10 will always implicitly have the legacyPolygons option set to true. // Newly generated geo indexes from 3.10 on will have the legacyPolygons option by default set to false, // however, it can still be explicitly overwritten with true to create a legacy index but is not recommended. LegacyPolygons bool }
EnsureGeoIndexOptions contains specific options for creating a geo index.
type EnsureHashIndexOptions ¶
type EnsureHashIndexOptions struct { // If true, then create a unique index. Unique bool // If true, then create a sparse index. Sparse bool // If true, de-duplication of array-values, before being added to the index, will be turned off. // This flag requires ArangoDB 3.2. // Note: this setting is only relevant for indexes with array fields (e.g. "fieldName[*]") NoDeduplicate bool // InBackground if true will not hold an exclusive collection lock for the entire index creation period (rocksdb only). InBackground bool // Name optional user defined name used for hints in AQL queries Name string // Estimates determines if the to-be-created index should maintain selectivity estimates or not. Estimates *bool }
EnsureHashIndexOptions contains specific options for creating a hash index. Note: "hash" and "skiplist" are only aliases for "persistent" with the RocksDB storage engine which is only storage engine since 3.7
type EnsureMDIIndexOptions ¶ added in v1.6.2
type EnsureMDIIndexOptions struct { // If true, then create a unique index. Unique bool // InBackground if true will not hold an exclusive collection lock for the entire index creation period (rocksdb only). InBackground bool // Name optional user defined name used for hints in AQL queries Name string // fieldValueTypes is required and the only allowed value is "double". Future extensions of the index will allow other types. FieldValueTypes string // Sparse If `true`, then create a sparse index to exclude documents from the index that do not have the defined // attributes or are explicitly set to `null` values. If a non-value is set, it still needs to be numeric. Sparse bool // StoredValues The optional `storedValues` attribute can contain an array of paths to additional attributes to // store in the index. StoredValues []string }
EnsureMDIIndexOptions provides specific options for creating a MDI index
type EnsureMDIPrefixedIndexOptions ¶ added in v1.6.2
type EnsureMDIPrefixedIndexOptions struct { EnsureMDIIndexOptions // PrefixFields is required and contains nn array of attribute names used as search prefix. // Array expansions are not allowed. PrefixFields []string }
type EnsurePersistentIndexOptions ¶
type EnsurePersistentIndexOptions struct { // If true, then create a unique index. Unique bool // If true, then create a sparse index. Sparse bool // If true, de-duplication of array-values, before being added to the index, will be turned off. // This flag requires ArangoDB 3.2. // Note: this setting is only relevant for indexes with array fields (e.g. "fieldName[*]") NoDeduplicate bool // InBackground if true will not hold an exclusive collection lock for the entire index creation period (rocksdb only). InBackground bool // Name optional user defined name used for hints in AQL queries Name string // Estimates determines if the to-be-created index should maintain selectivity estimates or not. Estimates *bool // CacheEnabled if true, then the index will be cached in memory. Caching is turned off by default. CacheEnabled bool // StoreValues if true, then the additional attributes will be included. // These additional attributes cannot be used for index lookups or sorts, but they can be used for projections. // There must be no overlap of attribute paths between `fields` and `storedValues`. The maximum number of values is 32. StoredValues []string }
EnsurePersistentIndexOptions contains specific options for creating a persistent index. Note: "hash" and "skiplist" are only aliases for "persistent" with the RocksDB storage engine which is only storage engine since 3.7
type EnsureSkipListIndexOptions ¶
type EnsureSkipListIndexOptions struct { // If true, then create a unique index. Unique bool // If true, then create a sparse index. Sparse bool // If true, de-duplication of array-values, before being added to the index, will be turned off. // This flag requires ArangoDB 3.2. // Note: this setting is only relevant for indexes with array fields (e.g. "fieldName[*]") NoDeduplicate bool // InBackground if true will not hold an exclusive collection lock for the entire index creation period (rocksdb only). InBackground bool // Name optional user defined name used for hints in AQL queries Name string // Estimates determines if the to-be-created index should maintain selectivity estimates or not. Estimates *bool }
EnsureSkipListIndexOptions contains specific options for creating a skip-list index. Note: "hash" and "skiplist" are only aliases for "persistent" with the RocksDB storage engine which is only storage engine since 3.7
type EnsureTTLIndexOptions ¶
type EnsureTTLIndexOptions struct { // InBackground if true will not hold an exclusive collection lock for the entire index creation period (rocksdb only). InBackground bool // Name optional user defined name used for hints in AQL queries Name string // Estimates determines if the to-be-created index should maintain selectivity estimates or not. Estimates *bool }
EnsureTTLIndexOptions provides specific options for creating a TTL index
type EnsureZKDIndexOptions ¶ added in v1.3.0
type EnsureZKDIndexOptions struct { // If true, then create a unique index. Unique bool // InBackground if true will not hold an exclusive collection lock for the entire index creation period (rocksdb only). InBackground bool // Name optional user defined name used for hints in AQL queries Name string // fieldValueTypes is required and the only allowed value is "double". Future extensions of the index will allow other types. FieldValueTypes string }
EnsureZKDIndexOptions provides specific options for creating a ZKD index
type ErrorSlice ¶
type ErrorSlice []error
ErrorSlice is a slice of errors
func (ErrorSlice) FirstNonNil ¶
func (l ErrorSlice) FirstNonNil() error
FirstNonNil returns the first error in the slice that is not nil. If all errors in the slice are nil, nil is returned.
type ExplainQueryOptimizerOptions ¶ added in v1.6.0
type ExplainQueryOptimizerOptions struct { // A list of to-be-included or to-be-excluded optimizer rules can be put into this attribute, // telling the optimizer to include or exclude specific rules. // To disable a rule, prefix its name with a "-", to enable a rule, prefix it with a "+". // There is also a pseudo-rule "all", which matches all optimizer rules. "-all" disables all rules. Rules []string `json:"rules,omitempty"` }
type ExplainQueryOptions ¶ added in v1.6.0
type ExplainQueryOptions struct { // If set to true, all possible execution plans will be returned. // The default is false, meaning only the optimal plan will be returned. AllPlans bool `json:"allPlans,omitempty"` // An optional maximum number of plans that the optimizer is allowed to generate. // Setting this attribute to a low value allows to put a cap on the amount of work the optimizer does. MaxNumberOfPlans *int `json:"maxNumberOfPlans,omitempty"` // Options related to the query optimizer. Optimizer ExplainQueryOptimizerOptions `json:"optimizer,omitempty"` }
type ExplainQueryResult ¶ added in v1.6.0
type ExplainQueryResult struct { Plan ExplainQueryResultPlan `json:"plan,omitempty"` Plans []ExplainQueryResultPlan `json:"plans,omitempty"` // List of warnings that occurred during optimization or execution plan creation Warnings []string `json:"warnings,omitempty"` // Info about optimizer statistics Stats ExplainQueryResultExecutionStats `json:"stats,omitempty"` // Cacheable states whether the query results can be cached on the server if the query result cache were used. // This attribute is not present when allPlans is set to true. Cacheable *bool `json:"cacheable,omitempty"` }
type ExplainQueryResultExecutionCollection ¶ added in v1.6.0
type ExplainQueryResultExecutionCollection cursorPlanCollection
type ExplainQueryResultExecutionNodeRaw ¶ added in v1.6.0
type ExplainQueryResultExecutionNodeRaw map[string]interface{}
type ExplainQueryResultExecutionStats ¶ added in v1.6.0
type ExplainQueryResultExecutionStats struct { RulesExecuted int `json:"rulesExecuted,omitempty"` RulesSkipped int `json:"rulesSkipped,omitempty"` PlansCreated int `json:"plansCreated,omitempty"` PeakMemoryUsage uint64 `json:"peakMemoryUsage,omitempty"` ExecutionTime float64 `json:"executionTime,omitempty"` }
type ExplainQueryResultExecutionVariable ¶ added in v1.6.0
type ExplainQueryResultExecutionVariable cursorPlanVariable
type ExplainQueryResultPlan ¶ added in v1.6.0
type ExplainQueryResultPlan struct { // Execution nodes of the plan. NodesRaw []ExplainQueryResultExecutionNodeRaw `json:"nodes,omitempty"` // List of rules the optimizer applied Rules []string `json:"rules,omitempty"` // List of collections used in the query Collections []ExplainQueryResultExecutionCollection `json:"collections,omitempty"` // List of variables used in the query (note: this may contain internal variables created by the optimizer) Variables []ExplainQueryResultExecutionVariable `json:"variables,omitempty"` // The total estimated cost for the plan. If there are multiple plans, the optimizer will choose the plan with the lowest total cost EstimatedCost float64 `json:"estimatedCost,omitempty"` // The estimated number of results. EstimatedNrItems int `json:"estimatedNrItems,omitempty"` }
type FoxxCreateOptions ¶
type FoxxCreateOptions struct {
Mount string
}
type FoxxDeleteOptions ¶
type FoxxService ¶
type FoxxService interface { // InstallFoxxService installs a new service at a given mount path. InstallFoxxService(ctx context.Context, zipFile string, options FoxxCreateOptions) error // UninstallFoxxService uninstalls service at a given mount path. UninstallFoxxService(ctx context.Context, options FoxxDeleteOptions) error }
type GSSStatus ¶ added in v1.4.0
type GSSStatus struct { // The number of vertices that have been processed in this step. VerticesProcessed uint64 `json:"verticesProcessed,omitempty"` // The number of messages sent in this step. MessagesSent uint64 `json:"messagesSent,omitempty"` // The number of messages received in this step. MessagesReceived uint64 `json:"messagesReceived,omitempty"` // The number of bytes used in memory for the messages in this step. MemoryBytesUsedForMessages uint64 `json:"memoryBytesUsedForMessages,omitempty"` }
GSSStatus Information about the global superstep
type Graph ¶
type Graph interface { // Name returns the name of the graph. Name() string // Remove removes the entire graph. // If the graph does not exist, a NotFoundError is returned. Remove(ctx context.Context) error // RemoveWithOpts removes the entire graph with options. RemoveWithOpts(ctx context.Context, opts *RemoveGraphOptions) error // IsSmart returns true of smart is smart. In case of Community Edition it is always false IsSmart() bool // IsSatellite returns true of smart is satellite. In case of Community Edition it is always false IsSatellite() bool // IsDisjoint return information if graph have isDisjoint flag set to true IsDisjoint() bool // GraphEdgeCollections Edge collection functions GraphEdgeCollections // GraphVertexCollections Vertex collection functions GraphVertexCollections // ID returns the id of the graph. ID() string // Key returns the key of the graph. Key() DocumentID // Rev returns the revision of the graph. Rev() string // EdgeDefinitions returns the edge definitions of the graph. EdgeDefinitions() []EdgeDefinition // SmartGraphAttribute returns the attributes of a smart graph if there are any. SmartGraphAttribute() string // MinReplicationFactor returns the minimum replication factor for the graph. MinReplicationFactor() int // NumberOfShards returns the number of shards for the graph. NumberOfShards() int // OrphanCollections returns the orphan collections of the graph. OrphanCollections() []string // ReplicationFactor returns the current replication factor. ReplicationFactor() int // WriteConcern returns the write concern setting of the graph. WriteConcern() int }
Graph provides access to all edge & vertex collections of a single graph in a database.
type GraphEdgeCollections ¶
type GraphEdgeCollections interface { // EdgeCollection opens a connection to an existing edge-collection within the graph. // If no edge-collection with given name exists, an NotFoundError is returned. // Note: When calling Remove on the returned Collection, the collection is removed from the graph. Not from the database. EdgeCollection(ctx context.Context, name string) (Collection, VertexConstraints, error) // EdgeCollectionExists returns true if an edge-collection with given name exists within the graph. EdgeCollectionExists(ctx context.Context, name string) (bool, error) // EdgeCollections returns all edge collections of this graph // Note: When calling Remove on any of the returned Collection's, the collection is removed from the graph. Not from the database. EdgeCollections(ctx context.Context) ([]Collection, []VertexConstraints, error) // CreateEdgeCollection creates an edge collection in the graph. // collection: The name of the edge collection to be used. // constraints.From: contains the names of one or more vertex collections that can contain source vertices. // constraints.To: contains the names of one or more edge collections that can contain target vertices. CreateEdgeCollection(ctx context.Context, collection string, constraints VertexConstraints) (Collection, error) // CreateEdgeCollectionWithOptions creates an edge collection in the graph with additional options CreateEdgeCollectionWithOptions(ctx context.Context, collection string, constraints VertexConstraints, options CreateEdgeCollectionOptions) (Collection, error) // SetVertexConstraints modifies the vertex constraints of an existing edge collection in the graph. SetVertexConstraints(ctx context.Context, collection string, constraints VertexConstraints) error }
GraphEdgeCollections provides access to all edge collections of a single graph in a database.
type GraphStoreStatus ¶ added in v1.4.0
type GraphStoreStatus struct { // The number of vertices that are loaded from the database into memory. VerticesLoaded uint64 `json:"verticesLoaded,omitempty"` // The number of edges that are loaded from the database into memory. EdgesLoaded uint64 `json:"edgesLoaded,omitempty"` // The number of bytes used in-memory for the loaded graph. MemoryBytesUsed uint64 `json:"memoryBytesUsed,omitempty"` // The number of vertices that are written back to the database after the Pregel computation finished. It is only set if the store parameter is set to true. VerticesStored uint64 `json:"verticesStored,omitempty"` }
GraphStoreStatus The status of the in memory graph.
type GraphVertexCollections ¶
type GraphVertexCollections interface { // VertexCollection opens a connection to an existing vertex-collection within the graph. // If no vertex-collection with given name exists, an NotFoundError is returned. // Note: When calling Remove on the returned Collection, the collection is removed from the graph. Not from the database. VertexCollection(ctx context.Context, name string) (Collection, error) // VertexCollectionExists returns true if an vertex-collection with given name exists within the graph. VertexCollectionExists(ctx context.Context, name string) (bool, error) // VertexCollections returns all vertex collections of this graph // Note: When calling Remove on any of the returned Collection's, the collection is removed from the graph. Not from the database. VertexCollections(ctx context.Context) ([]Collection, error) // CreateVertexCollection creates a vertex collection in the graph. // collection: The name of the vertex collection to be used. CreateVertexCollection(ctx context.Context, collection string) (Collection, error) // CreateVertexCollectionWithOptions creates a vertex collection in the graph CreateVertexCollectionWithOptions(ctx context.Context, collection string, options CreateVertexCollectionOptions) (Collection, error) }
GraphVertexCollections provides access to all vertex collections of a single graph in a database.
type HTTPStats ¶
type HTTPStats struct { RequestsTotal int64 `json:"requestsTotal"` RequestsAsync int64 `json:"requestsAsync"` RequestsGet int64 `json:"requestsGet"` RequestsHead int64 `json:"requestsHead"` RequestsPost int64 `json:"requestsPost"` RequestsPut int64 `json:"requestsPut"` RequestsPatch int64 `json:"requestsPatch"` RequestsDelete int64 `json:"requestsDelete"` RequestsOptions int64 `json:"requestsOptions"` RequestsOther int64 `json:"requestsOther"` RequestsSuperuser int64 `json:"requestsSuperuser,omitempty"` RequestsUser int64 `json:"requestsUser,omitempty"` }
HTTPStats contains statistics about the HTTP traffic.
type ImportDocumentOptions ¶
type ImportDocumentOptions struct { // FromPrefix is an optional prefix for the values in _from attributes. If specified, the value is automatically // prepended to each _from input value. This allows specifying just the keys for _from. FromPrefix string `json:"fromPrefix,omitempty"` // ToPrefix is an optional prefix for the values in _to attributes. If specified, the value is automatically // prepended to each _to input value. This allows specifying just the keys for _to. ToPrefix string `json:"toPrefix,omitempty"` // Overwrite is a flag that if set, then all data in the collection will be removed prior to the import. // Note that any existing index definitions will be preseved. Overwrite bool `json:"overwrite,omitempty"` // OnDuplicate controls what action is carried out in case of a unique key constraint violation. // Possible values are: // - ImportOnDuplicateError // - ImportOnDuplicateUpdate // - ImportOnDuplicateReplace // - ImportOnDuplicateIgnore OnDuplicate ImportOnDuplicate `json:"onDuplicate,omitempty"` // Complete is a flag that if set, will make the whole import fail if any error occurs. // Otherwise the import will continue even if some documents cannot be imported. Complete bool `json:"complete,omitempty"` }
ImportDocumentOptions holds optional options that control the import document process.
type ImportDocumentStatistics ¶
type ImportDocumentStatistics struct { // Created holds the number of documents imported. Created int64 `json:"created,omitempty"` // Errors holds the number of documents that were not imported due to an error. Errors int64 `json:"errors,omitempty"` // Empty holds the number of empty lines found in the input (will only contain a value greater zero for types documents or auto). Empty int64 `json:"empty,omitempty"` // Updated holds the number of updated/replaced documents (in case onDuplicate was set to either update or replace). Updated int64 `json:"updated,omitempty"` // Ignored holds the number of failed but ignored insert operations (in case onDuplicate was set to ignore). Ignored int64 `json:"ignored,omitempty"` // if query parameter details is set to true, the result will contain a details attribute which is an array // with more detailed information about which documents could not be inserted. Details []string ArangoError }
ImportDocumentStatistics holds statistics of an import action.
type ImportOnDuplicate ¶
type ImportOnDuplicate string
ImportOnDuplicate is a type to control what action is carried out in case of a unique key constraint violation.
type Index ¶
type Index interface { // Name returns the collection specific ID of the index. This value should be used for all functions // the require a index _name_. Name() string // ID returns the ID of the index. Effectively this is `<collection-name>/<index.Name()>`. ID() string // UserName returns the user provided name of the index or empty string if non is provided. This _name_ // is used in query to provide hints for the optimizer about preferred indexes. UserName() string // Type returns the type of the index Type() IndexType // Remove removes the entire index. // If the index does not exist, a NotFoundError is returned. Remove(ctx context.Context) error // Fields returns a list of attributes of this index. Fields() []string // Unique returns if this index is unique. Unique() bool // Deduplicate returns deduplicate setting of this index. Deduplicate() bool // Sparse returns if this is a sparse index or not. Sparse() bool // GeoJSON returns if geo json was set for this index or not. GeoJSON() bool // InBackground if true will not hold an exclusive collection lock for the entire index creation period (rocksdb only). InBackground() bool // Estimates determines if the to-be-created index should maintain selectivity estimates or not. Estimates() bool // MinLength returns min length for this index if set. MinLength() int // ExpireAfter returns an expire after for this index if set. ExpireAfter() int // LegacyPolygons determines if the index uses legacy polygons or not - GeoIndex only LegacyPolygons() bool // CacheEnabled returns if the index is enabled for caching or not - PersistentIndex only CacheEnabled() bool // StoredValues returns a list of stored values for this index - PersistentIndex only StoredValues() []string // InvertedIndexOptions returns the inverted index options for this index - InvertedIndex only InvertedIndexOptions() InvertedIndexOptions }
Index provides access to a single index in a single collection.
type InvalidArgumentError ¶
type InvalidArgumentError struct {
Message string
}
InvalidArgumentError is returned when a go function argument is invalid.
func (InvalidArgumentError) Error ¶
func (e InvalidArgumentError) Error() string
Error implements the error interface for InvalidArgumentError.
type InventoryCollection ¶
type InventoryCollection struct { Parameters InventoryCollectionParameters `json:"parameters"` Indexes []InventoryIndex `json:"indexes,omitempty"` PlanVersion int64 `json:"planVersion,omitempty"` IsReady bool `json:"isReady,omitempty"` AllInSync bool `json:"allInSync,omitempty"` }
InventoryCollection is a single element of a DatabaseInventory, containing all information of a specific collection.
func (InventoryCollection) IndexByFieldsAndType ¶
func (i InventoryCollection) IndexByFieldsAndType(fields []string, indexType string) (InventoryIndex, bool)
IndexByFieldsAndType returns the InventoryIndex with given fields & type. Return false if not found.
type InventoryCollectionParameters ¶
type InventoryCollectionParameters struct { // Available from 3.7 ArangoD version. CacheEnabled bool `json:"cacheEnabled,omitempty"` Deleted bool `json:"deleted,omitempty"` DistributeShardsLike string `json:"distributeShardsLike,omitempty"` // Deprecated: since 3.7 version. It is related only to MMFiles. DoCompact bool `json:"doCompact,omitempty"` // Available from 3.7 ArangoD version. GloballyUniqueId string `json:"globallyUniqueId,omitempty"` ID string `json:"id,omitempty"` // Deprecated: since 3.7 version. It is related only to MMFiles. IndexBuckets int `json:"indexBuckets,omitempty"` Indexes []InventoryIndex `json:"indexes,omitempty"` // Available from 3.9 ArangoD version. InternalValidatorType int `json:"internalValidatorType,omitempty"` // Available from 3.7 ArangoD version. IsDisjoint bool `json:"isDisjoint,omitempty"` IsSmart bool `json:"isSmart,omitempty"` // Available from 3.7 ArangoD version. IsSmartChild bool `json:"isSmartChild,omitempty"` IsSystem bool `json:"isSystem,omitempty"` // Deprecated: since 3.7 version. It is related only to MMFiles. IsVolatile bool `json:"isVolatile,omitempty"` // Deprecated: since 3.7 version. It is related only to MMFiles. JournalSize int64 `json:"journalSize,omitempty"` KeyOptions struct { AllowUserKeys bool `json:"allowUserKeys,omitempty"` // Deprecated: this field has wrong type and will be removed in the future. It is not used anymore since it can cause parsing issues. LastValue int64 `json:"-"` LastValueV2 uint64 `json:"lastValue,omitempty"` Type string `json:"type,omitempty"` } `json:"keyOptions"` // Deprecated: use 'WriteConcern' instead. MinReplicationFactor int `json:"minReplicationFactor,omitempty"` Name string `json:"name,omitempty"` NumberOfShards int `json:"numberOfShards,omitempty"` // Deprecated: since 3.7 ArangoD version. Path string `json:"path,omitempty"` PlanID string `json:"planId,omitempty"` ReplicationFactor int `json:"replicationFactor,omitempty"` // Schema for collection validation. Schema *CollectionSchemaOptions `json:"schema,omitempty"` ShadowCollections []int `json:"shadowCollections,omitempty"` ShardingStrategy ShardingStrategy `json:"shardingStrategy,omitempty"` ShardKeys []string `json:"shardKeys,omitempty"` Shards map[ShardID][]ServerID `json:"shards,omitempty"` // Optional only for some collections. SmartGraphAttribute string `json:"smartGraphAttribute,omitempty"` // Optional only for some collections. SmartJoinAttribute string `json:"smartJoinAttribute,omitempty"` Status CollectionStatus `json:"status,omitempty"` // Available from 3.7 ArangoD version. SyncByRevision bool `json:"syncByRevision,omitempty"` Type CollectionType `json:"type,omitempty"` // Available from 3.7 ArangoD version. UsesRevisionsAsDocumentIds bool `json:"usesRevisionsAsDocumentIds,omitempty"` WaitForSync bool `json:"waitForSync,omitempty"` // Available from 3.6 ArangoD version. WriteConcern int `json:"writeConcern,omitempty"` // Available from 3.10 ArangoD version. ComputedValues []ComputedValue `json:"computedValues,omitempty"` }
InventoryCollectionParameters contains all configuration parameters of a collection in a database inventory.
func (*InventoryCollectionParameters) IsSatellite ¶
func (icp *InventoryCollectionParameters) IsSatellite() bool
IsSatellite returns true if the collection is a satellite collection
func (*InventoryCollectionParameters) MarshalJSON ¶
func (p *InventoryCollectionParameters) MarshalJSON() ([]byte, error)
MarshalJSON converts InventoryCollectionParameters into json
func (*InventoryCollectionParameters) UnmarshalJSON ¶
func (p *InventoryCollectionParameters) UnmarshalJSON(d []byte) error
UnmarshalJSON loads InventoryCollectionParameters from json
type InventoryIndex ¶
type InventoryIndex struct { ID string `json:"id,omitempty"` Type string `json:"type,omitempty"` Fields []string `json:"fields,omitempty"` Unique bool `json:"unique"` Sparse bool `json:"sparse"` Deduplicate bool `json:"deduplicate"` MinLength int `json:"minLength,omitempty"` GeoJSON bool `json:"geoJson,omitempty"` Name string `json:"name,omitempty"` ExpireAfter int `json:"expireAfter,omitempty"` Estimates bool `json:"estimates,omitempty"` FieldValueTypes string `json:"fieldValueTypes,omitempty"` CacheEnabled *bool `json:"cacheEnabled,omitempty"` }
InventoryIndex contains all configuration parameters of a single index of a collection in a database inventory.
func (InventoryIndex) FieldsEqual ¶
func (i InventoryIndex) FieldsEqual(fields []string) bool
FieldsEqual returns true when the given fields list equals the Fields list in the InventoryIndex. The order of fields is irrelevant.
type InventoryView ¶
type InventoryView struct { Name string `json:"name,omitempty"` Deleted bool `json:"deleted,omitempty"` ID string `json:"id,omitempty"` IsSystem bool `json:"isSystem,omitempty"` PlanID string `json:"planId,omitempty"` Type ViewType `json:"type,omitempty"` // Include all properties from an arangosearch view. ArangoSearchViewProperties }
InventoryView is a single element of a DatabaseInventory, containing all information of a specific view.
type InvertedIndexField ¶ added in v1.4.0
type InvertedIndexField struct { // Name An attribute path. The . character denotes sub-attributes. Name string `json:"name"` // Analyzer indicating the name of an analyzer instance // Default: the value defined by the top-level analyzer option, or if not set, the default identity Analyzer. Analyzer string `json:"analyzer,omitempty"` // IncludeAllFields This option only applies if you use the inverted index in a search-alias Views. // If set to true, then all sub-attributes of this field are indexed, excluding any sub-attributes that are configured separately by other elements in the fields array (and their sub-attributes). The analyzer and features properties apply to the sub-attributes. // If set to false, then sub-attributes are ignored. The default value is defined by the top-level includeAllFields option, or false if not set. IncludeAllFields bool `json:"includeAllFields,omitempty"` // SearchField This option only applies if you use the inverted index in a search-alias Views. // You can set the option to true to get the same behavior as with arangosearch Views regarding the indexing of array values for this field. If enabled, both, array and primitive values (strings, numbers, etc.) are accepted. Every element of an array is indexed according to the trackListPositions option. // If set to false, it depends on the attribute path. If it explicitly expand an array ([*]), then the elements are indexed separately. Otherwise, the array is indexed as a whole, but only geopoint and aql Analyzers accept array inputs. You cannot use an array expansion if searchField is enabled. // Default: the value defined by the top-level searchField option, or false if not set. SearchField bool `json:"searchField,omitempty"` // TrackListPositions This option only applies if you use the inverted index in a search-alias Views. // If set to true, then track the value position in arrays for array values. For example, when querying a document like { attr: [ "valueX", "valueY", "valueZ" ] }, you need to specify the array element, e.g. doc.attr[1] == "valueY". // If set to false, all values in an array are treated as equal alternatives. You don’t specify an array element in queries, e.g. doc.attr == "valueY", and all elements are searched for a match. // Default: the value defined by the top-level trackListPositions option, or false if not set. TrackListPositions bool `json:"trackListPositions,omitempty"` // A list of Analyzer features to use for this field. They define what features are enabled for the analyzer Features []ArangoSearchAnalyzerFeature `json:"features,omitempty"` // Nested - Index the specified sub-objects that are stored in an array. // Other than with the fields property, the values get indexed in a way that lets you query for co-occurring values. // For example, you can search the sub-objects and all the conditions need to be met by a single sub-object instead of across all of them. // Enterprise-only feature Nested []InvertedIndexField `json:"nested,omitempty"` }
InvertedIndexField contains configuration for indexing of the field
type InvertedIndexOptions ¶ added in v1.4.0
type InvertedIndexOptions struct { // Name optional user defined name used for hints in AQL queries Name string `json:"name"` // InBackground This attribute can be set to true to create the index in the background, // not write-locking the underlying collection for as long as if the index is built in the foreground. // The default value is false. InBackground bool `json:"inBackground,omitempty"` IsNewlyCreated bool `json:"isNewlyCreated,omitempty"` // The number of threads to use for indexing the fields. Default: 2 Parallelism int `json:"parallelism,omitempty"` // PrimarySort You can define a primary sort order to enable an AQL optimization. // If a query iterates over all documents of a collection, wants to sort them by attribute values, and the (left-most) fields to sort by, // as well as their sorting direction, match with the primarySort definition, then the SORT operation is optimized away. PrimarySort InvertedIndexPrimarySort `json:"primarySort,omitempty"` // StoredValues The optional storedValues attribute can contain an array of paths to additional attributes to store in the index. // These additional attributes cannot be used for index lookups or for sorting, but they can be used for projections. // This allows an index to fully cover more queries and avoid extra document lookups. StoredValues []StoredValue `json:"storedValues,omitempty"` // Analyzer The name of an Analyzer to use by default. This Analyzer is applied to the values of the indexed fields for which you don’t define Analyzers explicitly. Analyzer string `json:"analyzer,omitempty"` // Features list of analyzer features, default [] Features []ArangoSearchAnalyzerFeature `json:"features,omitempty"` // IncludeAllFields If set to true, all fields of this element will be indexed. Defaults to false. IncludeAllFields bool `json:"includeAllFields,omitempty"` // TrackListPositions If set to true, values in a listed are treated as separate values. Defaults to false. TrackListPositions bool `json:"trackListPositions,omitempty"` // This option only applies if you use the inverted index in a search-alias Views. // You can set the option to true to get the same behavior as with arangosearch Views regarding the indexing of array values as the default. // If enabled, both, array and primitive values (strings, numbers, etc.) are accepted. Every element of an array is indexed according to the trackListPositions option. // If set to false, it depends on the attribute path. If it explicitly expand an array ([*]), then the elements are indexed separately. // Otherwise, the array is indexed as a whole, but only geopoint and aql Analyzers accept array inputs. // You cannot use an array expansion if searchField is enabled. SearchField bool `json:"searchField,omitempty"` // Fields contains the properties for individual fields of the element. // The key of the map are field names. Fields []InvertedIndexField `json:"fields,omitempty"` // ConsolidationIntervalMsec Wait at least this many milliseconds between applying ‘consolidationPolicy’ to consolidate View data store // and possibly release space on the filesystem (default: 1000, to disable use: 0). ConsolidationIntervalMsec *int64 `json:"consolidationIntervalMsec,omitempty"` // CommitIntervalMsec Wait at least this many milliseconds between committing View data store changes and making // documents visible to queries (default: 1000, to disable use: 0). CommitIntervalMsec *int64 `json:"commitIntervalMsec,omitempty"` // CleanupIntervalStep Wait at least this many commits between removing unused files in the ArangoSearch data directory // (default: 2, to disable use: 0). CleanupIntervalStep *int64 `json:"cleanupIntervalStep,omitempty"` // ConsolidationPolicy The consolidation policy to apply for selecting which segments should be merged (default: {}). ConsolidationPolicy *ArangoSearchConsolidationPolicy `json:"consolidationPolicy,omitempty"` // WriteBufferIdle Maximum number of writers (segments) cached in the pool (default: 64, use 0 to disable) WriteBufferIdle *int64 `json:"writebufferIdle,omitempty"` // WriteBufferActive Maximum number of concurrent active writers (segments) that perform a transaction. // Other writers (segments) wait till current active writers (segments) finish (default: 0, use 0 to disable) WriteBufferActive *int64 `json:"writebufferActive,omitempty"` // WriteBufferSizeMax Maximum memory byte size per writer (segment) before a writer (segment) flush is triggered. // 0 value turns off this limit for any writer (buffer) and data will be flushed periodically based on the value defined for the flush thread (ArangoDB server startup option). // 0 value should be used carefully due to high potential memory consumption (default: 33554432, use 0 to disable) WriteBufferSizeMax *int64 `json:"writebufferSizeMax,omitempty"` // OptimizeTopK is an array of strings defining optimized sort expressions. // Introduced in v3.11.0, Enterprise Edition only. OptimizeTopK []string `json:"optimizeTopK,omitempty"` }
InvertedIndexOptions provides specific options for creating an inverted index Available since ArangoDB 3.10
type InvertedIndexPrimarySort ¶ added in v1.4.0
type InvertedIndexPrimarySort struct { Fields []ArangoSearchPrimarySortEntry `json:"fields,omitempty"` // Compression optional Compression PrimarySortCompression `json:"compression,omitempty"` }
InvertedIndexPrimarySort defines compression and list of fields to be sorted.
type KeyGeneratorType ¶
type KeyGeneratorType string
KeyGeneratorType is a type of key generated, used in `CollectionKeyOptions`.
type License ¶ added in v1.6.1
type License struct { // Features describes properties of the license. Features LicenseFeatures `json:"features"` // License is an encrypted license key in Base64 encoding. License string `json:"license"` // Status is a status of a license. Status LicenseStatus `json:"status"` // Version is a version of a license. Version int `json:"version"` }
License describes license information.
type LicenseFeatures ¶ added in v1.6.1
type LicenseFeatures struct { // Expires is expiry date as Unix timestamp (seconds since January 1st, 1970 UTC). Expires int `json:"expires"` }
LicenseFeatures describes license's features.
type LicenseStatus ¶ added in v1.6.1
type LicenseStatus string
LicenseStatus describes license's status.
const ( // LicenseStatusGood - The license is valid for more than 2 weeks. LicenseStatusGood LicenseStatus = "good" // LicenseStatusExpired - The license has expired. In this situation, no new Enterprise Edition features can be utilized. LicenseStatusExpired LicenseStatus = "expired" // LicenseStatusExpiring - The license is valid for less than 2 weeks. LicenseStatusExpiring LicenseStatus = "expiring" // LicenseStatusReadOnly - The license is expired over 2 weeks. The instance is now restricted to read-only mode. LicenseStatusReadOnly LicenseStatus = "read-only" )
type LogLevelsGetOptions ¶ added in v1.6.0
type LogLevelsGetOptions struct { // serverID describes log levels for a specific server ID. ServerID ServerID }
LogLevelsGetOptions describes log levels get options.
type LogLevelsSetOptions ¶ added in v1.6.0
type LogLevelsSetOptions struct { // serverID describes log levels for a specific server ID. ServerID ServerID }
LogLevelsSetOptions describes log levels set options.
type MemoryStats ¶
type MemoryStats struct { ContextID int64 `json:"contextId"` TMax float64 `json:"tMax"` CountOfTimes int64 `json:"countOfTimes"` HeapMax int64 `json:"heapMax"` HeapMin int64 `json:"heapMin"` Invocations int64 `json:"invocations,omitempty"` }
MemoryStats contains statistics about memory usage.
type NoMoreDocumentsError ¶
type NoMoreDocumentsError struct{}
NoMoreDocumentsError is returned by Cursor's, when an attempt is made to read documents when there are no more.
func (NoMoreDocumentsError) Error ¶
func (e NoMoreDocumentsError) Error() string
Error implements the error interface for NoMoreDocumentsError.
type NumberOfServersResponse ¶
type NumberOfServersResponse struct { NoCoordinators int `json:"numberOfCoordinators,omitempty"` NoDBServers int `json:"numberOfDBServers,omitempty"` CleanedServerIDs []string `json:"cleanedServers,omitempty"` }
NumberOfServersResponse holds the data returned from a NumberOfServer request.
type OverwriteMode ¶
type OverwriteMode string
const ( OverwriteModeIgnore OverwriteMode = "ignore" OverwriteModeReplace OverwriteMode = "replace" OverwriteModeUpdate OverwriteMode = "update" OverwriteModeConflict OverwriteMode = "conflict" )
type PregelAlgorithm ¶ added in v1.4.0
type PregelAlgorithm string
const ( PregelAlgorithmPageRank PregelAlgorithm = "pagerank" PregelAlgorithmSingleSourceShortestPath PregelAlgorithm = "sssp" PregelAlgorithmConnectedComponents PregelAlgorithm = "connectedcomponents" PregelAlgorithmWeaklyConnectedComponents PregelAlgorithm = "wcc" PregelAlgorithmStronglyConnectedComponents PregelAlgorithm = "scc" PregelAlgorithmHyperlinkInducedTopicSearch PregelAlgorithm = "hits" PregelAlgorithmEffectiveCloseness PregelAlgorithm = "effectivecloseness" PregelAlgorithmLineRank PregelAlgorithm = "linerank" PregelAlgorithmLabelPropagation PregelAlgorithm = "labelpropagation" PregelAlgorithmSpeakerListenerLabelPropagation PregelAlgorithm = "slpa" )
type PregelJob ¶ added in v1.4.0
type PregelJob struct { // The ID of the Pregel job, as a string. ID string `json:"id"` // The algorithm used by the job. Algorithm PregelAlgorithm `json:"algorithm,omitempty"` // The date and time when the job was created. Created time.Time `json:"created,omitempty"` // The date and time when the job results expire. // The expiration date is only meaningful for jobs that were completed, canceled or resulted in an error. // Such jobs are cleaned up by the garbage collection when they reach their expiration date/time. Started time.Time `json:"started,omitempty"` // The TTL (time to live) value for the job results, specified in seconds. The TTL is used to calculate the expiration date for the job’s results. TTL uint64 `json:"ttl,omitempty"` // The state of the execution. State PregelJobState `json:"state,omitempty"` // The number of global supersteps executed. Gss uint64 `json:"gss,omitempty"` // The total runtime of the execution up to now (if the execution is still ongoing). TotalRuntime float64 `json:"totalRuntime,omitempty"` // The startup runtime of the execution. The startup time includes the data loading time and can be substantial. StartupTime float64 `json:"startupTime,omitempty"` // The algorithm execution time. Is shown when the computation started. ComputationTime float64 `json:"computationTime,omitempty"` // The time for storing the results if the job includes results storage. Is shown when the storing started. StorageTime float64 `json:"storageTime,omitempty"` // Computation time of each global super step. Is shown when the computation started. GSSTimes []float64 `json:"gssTimes,omitempty"` // This attribute is used by Programmable Pregel Algorithms (air, experimental). The value is only populated once the algorithm has finished. Reports []map[string]interface{} `json:"reports,omitempty"` // The total number of vertices processed. VertexCount uint64 `json:"vertexCount,omitempty"` // The total number of edges processed. EdgeCount uint64 `json:"edgeCount,omitempty"` // UseMemoryMaps UseMemoryMaps *bool `json:"useMemoryMaps,omitempty"` // The Pregel run details. // Available from 3.10 arangod version. Detail *PregelRunDetails `json:"detail,omitempty"` }
type PregelJobOptions ¶ added in v1.4.0
type PregelJobOptions struct { // Name of the algorithm Algorithm PregelAlgorithm `json:"algorithm"` // Name of a graph. Either this or the parameters VertexCollections and EdgeCollections are required. // Please note that there are special sharding requirements for graphs in order to be used with Pregel. GraphName string `json:"graphName,omitempty"` // List of vertex collection names. Please note that there are special sharding requirements for collections in order to be used with Pregel. VertexCollections []string `json:"vertexCollections,omitempty"` // List of edge collection names. Please note that there are special sharding requirements for collections in order to be used with Pregel. EdgeCollections []string `json:"edgeCollections,omitempty"` // General as well as algorithm-specific options. Params map[string]interface{} `json:"params,omitempty"` }
type PregelJobState ¶ added in v1.4.0
type PregelJobState string
const ( // PregelJobStateNone - The Pregel run did not yet start. PregelJobStateNone PregelJobState = "none" // PregelJobStateLoading - The graph is loaded from the database into memory before the execution of the algorithm. PregelJobStateLoading PregelJobState = "loading" // PregelJobStateRunning - The algorithm is executing normally. PregelJobStateRunning PregelJobState = "running" // PregelJobStateStoring - The algorithm finished, but the results are still being written back into the collections. Occurs only if the store parameter is set to true. PregelJobStateStoring PregelJobState = "storing" // PregelJobStateDone - The execution is done. In version 3.7.1 and later, this means that storing is also done. // In earlier versions, the results may not be written back into the collections yet. This event is announced in the server log (requires at least info log level for the pregel log topic). PregelJobStateDone PregelJobState = "done" // PregelJobStateCanceled - The execution was permanently canceled, either by the user or by an error. PregelJobStateCanceled PregelJobState = "canceled" // PregelJobStateFatalError - The execution has failed and cannot recover. PregelJobStateFatalError PregelJobState = "fatal error" // PregelJobStateInError - The execution is in an error state. This can be caused by DB-Servers being not reachable or being non-responsive. // The execution might recover later, or switch to "canceled" if it was not able to recover successfully. PregelJobStateInError PregelJobState = "in error" // PregelJobStateRecovering - (currently unused): The execution is actively recovering and switches back to running if the recovery is successful. PregelJobStateRecovering PregelJobState = "recovering" )
type PregelRunDetails ¶ added in v1.4.0
type PregelRunDetails struct { // The aggregated details of the full Pregel run. The values are totals of all the DB-Server. AggregatedStatus *AggregatedStatus `json:"aggregatedStatus,omitempty"` // The details of the Pregel for every DB-Server. Each object key is a DB-Server ID, and each value is a nested object similar to the aggregatedStatus attribute. // In a single server deployment, there is only a single entry with an empty string as key. WorkerStatus map[string]*AggregatedStatus `json:"workerStatus,omitempty"` }
PregelRunDetails - The Pregel run details. Available from 3.10 arangod version.
type PrimarySortCompression ¶ added in v1.3.0
type PrimarySortCompression string
PrimarySortCompression Defines how to compress the primary sort data (introduced in v3.7.1)
const ( // PrimarySortCompressionLz4 (default): use LZ4 fast compression. PrimarySortCompressionLz4 PrimarySortCompression = "lz4" // PrimarySortCompressionNone disable compression to trade space for speed. PrimarySortCompressionNone PrimarySortCompression = "none" )
type ProtocolSet ¶
type ProtocolSet []Protocol
ProtocolSet is a set of protocols.
func (ProtocolSet) Contains ¶
func (ps ProtocolSet) Contains(p Protocol) bool
Contains returns true if the given protocol is contained in the given set, false otherwise.
func (ProtocolSet) ContainsAny ¶
func (ps ProtocolSet) ContainsAny(p ...Protocol) bool
ContainsAny returns true if any of the given protocols is contained in the given set, false otherwise.
type QueryExtra ¶
type QueryExtra interface { // GetStatistics returns Query statistics GetStatistics() QueryStatistics // GetProfileRaw returns raw profile information in json GetProfileRaw() ([]byte, bool, error) // GetPlanRaw returns raw plan GetPlanRaw() ([]byte, bool, error) }
QueryExtra holds Query extra information
type QueryFlags ¶ added in v1.4.0
type QueryFlags struct { Hidden bool `json:"hidden,omitempty"` ClusterOnly bool `json:"clusterOnly,omitempty"` CanBeDisabled bool `json:"canBeDisabled,omitempty"` CanCreateAdditionalPlans bool `json:"canCreateAdditionalPlans,omitempty"` DisabledByDefault bool `json:"disabledByDefault,omitempty"` EnterpriseOnly bool `json:"enterpriseOnly,omitempty"` }
type QueryRule ¶ added in v1.4.0
type QueryRule struct { Name string `json:"name"` Flags QueryFlags `json:"flags,omitempty"` }
type QueryStatistics ¶
type QueryStatistics interface { // WritesExecuted the total number of data-modification operations successfully executed. WritesExecuted() int64 // WritesIgnored The total number of data-modification operations that were unsuccessful WritesIgnored() int64 // ScannedFull The total number of documents iterated over when scanning a collection without an index. ScannedFull() int64 // ScannedIndex The total number of documents iterated over when scanning a collection using an index. ScannedIndex() int64 // Filtered the total number of documents that were removed after executing a filter condition in a FilterNode Filtered() int64 // FullCount Returns the number of results before the last LIMIT in the query was applied. // A valid return value is only available when the has been created with a context that was // prepared with `WithFullCount`. Additionally, this will also not return a valid value if // the context was prepared with `WithStream`. FullCount() int64 // ExecutionTime of the query (wall-clock time). value will be set from the outside ExecutionTime() time.Duration }
QueryStatistics Statistics returned with the query cursor
type RawObject ¶
type RawObject []byte
RawObject is a raw encoded object. Connection implementations must be able to unmarshal *RawObject into Go objects.
func (*RawObject) MarshalJSON ¶
MarshalJSON returns *r as the JSON encoding of r.
func (RawObject) MarshalVPack ¶
func (r RawObject) MarshalVPack() (velocypack.Slice, error)
MarshalVPack returns m as the Velocypack encoding of m.
func (*RawObject) UnmarshalJSON ¶
UnmarshalJSON sets *r to a copy of data.
func (*RawObject) UnmarshalVPack ¶
func (r *RawObject) UnmarshalVPack(data velocypack.Slice) error
UnmarshalVPack sets *m to a copy of data.
type RemoveGraphOptions ¶ added in v1.6.2
type RemoveGraphOptions struct {
DropCollections bool `json:"dropCollections,omitempty"`
}
type Replication ¶
type Replication interface { // CreateBatch creates a "batch" to prevent removal of state required for replication CreateBatch(ctx context.Context, db Database, serverID int64, ttl time.Duration) (Batch, error) // Get the inventory of the server containing all collections (with entire details) of a database. // When this function is called on a coordinator is a cluster, an ID of a DBServer must be provided // using a context that is prepare with `WithDBServerID`. DatabaseInventory(ctx context.Context, db Database) (DatabaseInventory, error) // GetRevisionTree retrieves the Revision tree (Merkel tree) associated with the collection. GetRevisionTree(ctx context.Context, db Database, batchId, collection string) (RevisionTree, error) // GetRevisionsByRanges retrieves the revision IDs of documents within requested ranges. GetRevisionsByRanges(ctx context.Context, db Database, batchId, collection string, minMaxRevision []RevisionMinMax, resume RevisionUInt64) (RevisionRanges, error) // GetRevisionDocuments retrieves documents by revision. GetRevisionDocuments(ctx context.Context, db Database, batchId, collection string, revisions Revisions) ([]map[string]interface{}, error) }
Replication provides access to replication related operations.
type Request ¶
type Request interface { // SetQuery sets a single query argument of the request. // Any existing query argument with the same key is overwritten. SetQuery(key, value string) Request // SetBody sets the content of the request. // The protocol of the connection determines what kinds of marshalling is taking place. // When multiple bodies are given, they are merged, with fields in the first document prevailing. SetBody(body ...interface{}) (Request, error) // SetBodyArray sets the content of the request as an array. // If the given mergeArray is not nil, its elements are merged with the elements in the body array (mergeArray data overrides bodyArray data). // The merge is NOT recursive. // The protocol of the connection determines what kinds of marshalling is taking place. SetBodyArray(bodyArray interface{}, mergeArray []map[string]interface{}) (Request, error) // SetBodyImportArray sets the content of the request as an array formatted for importing documents. // The protocol of the connection determines what kinds of marshalling is taking place. SetBodyImportArray(bodyArray interface{}) (Request, error) // SetHeader sets a single header arguments of the request. // Any existing header argument with the same key is overwritten. SetHeader(key, value string) Request // Written returns true as soon as this request has been written completely to the network. // This does not guarantee that the server has received or processed the request. Written() bool // Clone creates a new request containing the same data as this request Clone() Request // Path returns the Request path Path() string // Method returns the Request method Method() string }
Request represents the input to a request on the server.
type Response ¶
type Response interface { // StatusCode returns an HTTP compatible status code of the response. StatusCode() int // Endpoint returns the endpoint that handled the request. Endpoint() string // CheckStatus checks if the status of the response equals to one of the given status codes. // If so, nil is returned. // If not, an attempt is made to parse an error response in the body and an error is returned. CheckStatus(validStatusCodes ...int) error // Header returns the value of a response header with given key. // If no such header is found, an empty string is returned. // On nested Response's, this function will always return an empty string. Header(key string) string // ParseBody performs protocol specific unmarshalling of the response data into the given result. // If the given field is non-empty, the contents of that field will be parsed into the given result. // This can only be used for requests that return a single object. ParseBody(field string, result interface{}) error // ParseArrayBody performs protocol specific unmarshalling of the response array data into individual response objects. // This can only be used for requests that return an array of objects. ParseArrayBody() ([]Response, error) }
Response represents the response from the server on a given request.
type ResponseError ¶
type ResponseError struct {
Err error
}
A ResponseError is returned when a request was completely written to a server, but the server did not respond, or some kind of network error occurred during the response.
func (*ResponseError) Error ¶
func (e *ResponseError) Error() string
Error returns the Error() result of the underlying error.
type RevisionMinMax ¶
type RevisionMinMax [2]RevisionUInt64
RevisionMinMax is an array of two Revisions which create range of them
type RevisionRanges ¶
type RevisionRanges struct { Ranges []Revisions `json:"ranges"` Resume RevisionUInt64 `json:"resume,string" velocypack:"resume"` }
type RevisionTree ¶
type RevisionTree struct { Version int `json:"version"` MaxDepth int `json:"maxDepth"` RangeMin RevisionUInt64 `json:"rangeMin,string" velocypack:"rangeMin"` RangeMax RevisionUInt64 `json:"rangeMax,string" velocypack:"rangeMax"` InitialRangeMin RevisionUInt64 `json:"initialRangeMin,string" velocypack:"initialRangeMin"` Count uint64 `json:"count,int"` Hash uint64 `json:"hash"` Nodes []RevisionTreeNode `json:"nodes"` }
RevisionTree is a list of Revisions in a Merkle tree
type RevisionTreeNode ¶
RevisionTreeNode is a leaf in Merkle tree with hashed Revisions and with count of documents in the leaf
type RevisionUInt64 ¶
type RevisionUInt64 uint64
RevisionUInt64 is representation of '_rev' string value as an uint64 number
func (*RevisionUInt64) MarshalJSON ¶
func (n *RevisionUInt64) MarshalJSON() ([]byte, error)
MarshalJSON converts RevisionUInt64 into string revision
func (*RevisionUInt64) MarshalVPack ¶
func (n *RevisionUInt64) MarshalVPack() (velocypack.Slice, error)
MarshalVPack converts RevisionUInt64 into string revision
func (*RevisionUInt64) UnmarshalJSON ¶
func (n *RevisionUInt64) UnmarshalJSON(revision []byte) (err error)
UnmarshalJSON parses string revision document into RevisionUInt64 number
func (*RevisionUInt64) UnmarshalVPack ¶
func (n *RevisionUInt64) UnmarshalVPack(slice velocypack.Slice) error
UnmarshalVPack parses string revision document into RevisionUInt64 number
type ServerHealth ¶
type ServerHealth struct { Endpoint string `json:"Endpoint"` LastHeartbeatAcked time.Time `json:"LastHeartbeatAcked"` LastHeartbeatSent time.Time `json:"LastHeartbeatSent"` LastHeartbeatStatus string `json:"LastHeartbeatStatus"` Role ServerRole `json:"Role"` ShortName string `json:"ShortName"` Status ServerStatus `json:"Status"` CanBeDeleted bool `json:"CanBeDeleted"` HostID string `json:"Host,omitempty"` Version Version `json:"Version,omitempty"` Engine EngineType `json:"Engine,omitempty"` SyncStatus ServerSyncStatus `json:"SyncStatus,omitempty"` // Only for Coordinators AdvertisedEndpoint *string `json:"AdvertisedEndpoint,omitempty"` // Only for Agents Leader *string `json:"Leader,omitempty"` Leading *bool `json:"Leading,omitempty"` }
ServerHealth contains health information of a single server in a cluster.
type ServerLogMessage ¶
type ServerLogs ¶
type ServerLogs struct { Total int `json:"total"` Messages []ServerLogMessage `json:"messages,omitempty"` }
type ServerMode ¶
type ServerMode string
const ( // ServerModeDefault is the normal mode of the database in which read and write requests // are allowed. ServerModeDefault ServerMode = "default" // ServerModeReadOnly is the mode in which all modifications to th database are blocked. // Behavior is the same as user that has read-only access to all databases & collections. ServerModeReadOnly ServerMode = "readonly" )
type ServerRole ¶
type ServerRole string
ServerRole is the role of an arangod server
const ( // ServerRoleSingle indicates that the server is a single-server instance ServerRoleSingle ServerRole = "Single" // ServerRoleSingleActive indicates that the server is a the leader of a single-server resilient pair ServerRoleSingleActive ServerRole = "SingleActive" // ServerRoleSinglePassive indicates that the server is a a follower of a single-server resilient pair ServerRoleSinglePassive ServerRole = "SinglePassive" // ServerRoleDBServer indicates that the server is a dbserver within a cluster ServerRoleDBServer ServerRole = "DBServer" // ServerRoleCoordinator indicates that the server is a coordinator within a cluster ServerRoleCoordinator ServerRole = "Coordinator" // ServerRoleAgent indicates that the server is an agent within a cluster ServerRoleAgent ServerRole = "Agent" // ServerRoleUndefined indicates that the role of the server cannot be determined ServerRoleUndefined ServerRole = "Undefined" )
type ServerStatistics ¶
type ServerStatistics struct { Time float64 `json:"time"` Enabled bool `json:"enabled"` System SystemStats `json:"system"` Client ClientStats `json:"client"` ClientUser ClientStats `json:"clientUser,omitempty"` HTTP HTTPStats `json:"http"` Server ServerStats `json:"server"` ArangoError }
ServerStatistics contains statistical data about the server as a whole.
type ServerStats ¶
type ServerStats struct { Uptime float64 `json:"uptime"` PhysicalMemory int64 `json:"physicalMemory"` Transactions TransactionStats `json:"transactions"` V8Context V8ContextStats `json:"v8Context"` Threads ThreadStats `json:"threads"` }
ServerStats contains statistics about the server.
type ServerStatus ¶
type ServerStatus string
ServerStatus describes the health status of a server
const ( // ServerStatusGood indicates server is in good state ServerStatusGood ServerStatus = "GOOD" // ServerStatusBad indicates server has missed 1 heartbeat ServerStatusBad ServerStatus = "BAD" // ServerStatusFailed indicates server has been declared failed by the supervision, this happens after about 15s being bad. ServerStatusFailed ServerStatus = "FAILED" )
type ServerSyncStatus ¶
type ServerSyncStatus string
ServerSyncStatus describes the servers sync status
const ( ServerSyncStatusUnknown ServerSyncStatus = "UNKNOWN" ServerSyncStatusUndefined ServerSyncStatus = "UNDEFINED" ServerSyncStatusStartup ServerSyncStatus = "STARTUP" ServerSyncStatusStopping ServerSyncStatus = "STOPPING" ServerSyncStatusStopped ServerSyncStatus = "STOPPED" ServerSyncStatusServing ServerSyncStatus = "SERVING" ServerSyncStatusShutdown ServerSyncStatus = "SHUTDOWN" )
type SetCollectionPropertiesOptions ¶
type SetCollectionPropertiesOptions struct { // If true then creating or changing a document will wait until the data has been synchronized to disk. WaitForSync *bool `json:"waitForSync,omitempty"` // The maximal size of a journal or datafile in bytes. The value must be at least 1048576 (1 MB). Note that when changing the journalSize value, it will only have an effect for additional journals or datafiles that are created. Already existing journals or datafiles will not be affected. JournalSize int64 `json:"journalSize,omitempty"` // ReplicationFactor contains how many copies of each shard are kept on different DBServers. // Only available in cluster setup. ReplicationFactor int `json:"replicationFactor,omitempty"` // Deprecated: use 'WriteConcern' instead MinReplicationFactor int `json:"minReplicationFactor,omitempty"` // WriteConcern contains how many copies must be available before a collection can be written. // Available from 3.6 arangod version. WriteConcern int `json:"writeConcern,omitempty"` // CacheEnabled set cacheEnabled option in collection properties CacheEnabled *bool `json:"cacheEnabled,omitempty"` // Schema for collection validation Schema *CollectionSchemaOptions `json:"schema,omitempty"` // ComputedValues let configure collections to generate document attributes when documents are created or modified, using an AQL expression ComputedValues []ComputedValue `json:"computedValues,omitempty"` }
SetCollectionPropertiesOptions contains data for Collection.SetProperties.
func (*SetCollectionPropertiesOptions) MarshalJSON ¶
func (p *SetCollectionPropertiesOptions) MarshalJSON() ([]byte, error)
MarshalJSON converts SetCollectionPropertiesOptions into json
func (*SetCollectionPropertiesOptions) UnmarshalJSON ¶
func (p *SetCollectionPropertiesOptions) UnmarshalJSON(d []byte) error
UnmarshalJSON loads SetCollectionPropertiesOptions from json
type ShardingStrategy ¶
type ShardingStrategy string
ShardingStrategy describes the sharding strategy of a collection
const ( ShardingStrategyCommunityCompat ShardingStrategy = "community-compat" ShardingStrategyEnterpriseCompat ShardingStrategy = "enterprise-compat" ShardingStrategyEnterpriseSmartEdgeCompat ShardingStrategy = "enterprise-smart-edge-compat" ShardingStrategyHash ShardingStrategy = "hash" ShardingStrategyEnterpriseHashSmartEdge ShardingStrategy = "enterprise-hash-smart-edge" )
type ShutdownInfo ¶
type ShutdownInfo struct { // AQLCursors stores a number of AQL cursors that are still active. AQLCursors int `json:"AQLcursors"` // Transactions stores a number of ongoing transactions. Transactions int `json:"transactions"` // PendingJobs stores a number of ongoing asynchronous requests. PendingJobs int `json:"pendingJobs"` // DoneJobs stores a number of finished asynchronous requests, whose result has not yet been collected. DoneJobs int `json:"doneJobs"` // PregelConductors stores a number of ongoing Pregel jobs. PregelConductors int `json:"pregelConductors"` // LowPrioOngoingRequests stores a number of ongoing low priority requests. LowPrioOngoingRequests int `json:"lowPrioOngoingRequests"` // LowPrioQueuedRequests stores a number of queued low priority requests. LowPrioQueuedRequests int `json:"lowPrioQueuedRequests"` // AllClear is set if all operations are closed. AllClear bool `json:"allClear"` // SoftShutdownOngoing describes whether a soft shutdown of the Coordinator is in progress. SoftShutdownOngoing bool `json:"softShutdownOngoing"` }
ShutdownInfo stores information about shutdown of the coordinator.
type State ¶ added in v1.3.0
type State struct { Running bool `json:"running,omitempty"` LastLogTick string `json:"lastLogTick,omitempty"` LastUncommittedLogTick string `json:"lastUncommittedLogTick,omitempty"` TotalEvents int64 `json:"totalEvents,omitempty"` Time time.Time `json:"time,omitempty"` }
func (*State) UnmarshalJSON ¶ added in v1.3.0
UnmarshalJSON marshals State to arangodb json representation
type Stats ¶
type Stats struct { Sum float64 `json:"sum"` Count int64 `json:"count"` Counts []int64 `json:"counts"` }
Stats is used for various time-related statistics.
type StoredValue ¶ added in v1.3.0
type StoredValue struct { Fields []string `json:"fields,omitempty"` Compression PrimarySortCompression `json:"compression,omitempty"` // Cache attribute allows you to always cache stored values in memory // Introduced in v3.9.5, Enterprise Edition only Cache *bool `json:"cache,omitempty"` }
type SystemStats ¶
type SystemStats struct { MinorPageFaults int64 `json:"minorPageFaults"` MajorPageFaults int64 `json:"majorPageFaults"` UserTime float64 `json:"userTime"` SystemTime float64 `json:"systemTime"` NumberOfThreads int64 `json:"numberOfThreads"` ResidentSize int64 `json:"residentSize"` ResidentSizePercent float64 `json:"residentSizePercent"` VirtualSize int64 `json:"virtualSize"` }
SystemStats contains statistical data about the system, this is part of ServerStatistics.
type ThreadStats ¶
type ThreadStats struct { SchedulerThreads int64 `json:"scheduler-threads"` Blocked int64 `json:"blocked"` Queued int64 `json:"queued"` InProgress int64 `json:"in-progress"` DirectExec int64 `json:"direct-exec"` }
ThreadsStats contains statistics about threads.
type Tick ¶
type Tick string
Tick is represent a place in either the Write-Ahead Log, journals and datafiles value reported by the server
type TransactionCollections ¶
type TransactionCollections struct { Read []string `json:"read,omitempty"` Write []string `json:"write,omitempty"` Exclusive []string `json:"exclusive,omitempty"` }
TransactionCollections is used to specify which collections are accessed by a transaction and how
type TransactionID ¶
type TransactionID string
TransactionID identifies a transaction
func HasTransactionID ¶ added in v1.6.1
func HasTransactionID(ctx context.Context) (TransactionID, bool)
HasTransactionID returns the transaction ID from the given context.
type TransactionOptions ¶
type TransactionOptions struct { // Transaction size limit in bytes. Honored by the RocksDB storage engine only. MaxTransactionSize int // An optional numeric value that can be used to set a timeout for waiting on collection // locks. If not specified, a default value will be used. // Setting lockTimeout to 0 will make ArangoDB not time out waiting for a lock. LockTimeout *int // An optional boolean flag that, if set, will force the transaction to write // all data to disk before returning. WaitForSync bool // @deprecated // Maximum number of operations after which an intermediate commit is performed // automatically. Honored by the RocksDB storage engine only. IntermediateCommitCount *int // Optional arguments passed to action. Params []interface{} // @deprecated // Maximum total size of operations after which an intermediate commit is // performed automatically. Honored by the RocksDB storage engine only. IntermediateCommitSize *int // ReadCollections Collections that the transaction reads from. ReadCollections []string // WriteCollections Collections that the transaction writes to. WriteCollections []string // ExclusiveCollections Collections that the transaction writes exclusively to. ExclusiveCollections []string }
TransactionOptions contains options that customize the transaction.
type TransactionStats ¶
type TransactionStats struct { Started int64 `json:"started"` Aborted int64 `json:"aborted"` Committed int64 `json:"committed"` IntermediateCommits int64 `json:"intermediateCommits"` ReadOnly int64 `json:"readOnly,omitempty"` DirtyReadOnly int64 `json:"dirtyReadOnly,omitempty"` }
TransactionStats contains statistics about transactions.
type TransactionStatus ¶
type TransactionStatus string
TransactionStatus describes the status of an transaction
const ( TransactionRunning TransactionStatus = "running" TransactionCommitted TransactionStatus = "committed" TransactionAborted TransactionStatus = "aborted" )
type TransactionStatusRecord ¶
type TransactionStatusRecord struct {
Status TransactionStatus
}
TransactionStatusRecord provides insight about the status of transaction
type User ¶
type User interface { // Name returns the name of the user. Name() string // Is this an active user? IsActive() bool // Is a password change for this user needed? IsPasswordChangeNeeded() bool // Get extra information about this user that was passed during its creation/update/replacement Extra(result interface{}) error // Remove removes the user. // If the user does not exist, a NotFoundError is returned. Remove(ctx context.Context) error // Update updates individual properties of the user. // If the user does not exist, a NotFoundError is returned. Update(ctx context.Context, options UserOptions) error // Replace replaces all properties of the user. // If the user does not exist, a NotFoundError is returned. Replace(ctx context.Context, options UserOptions) error // AccessibleDatabases returns a list of all databases that can be accessed (read/write or read-only) by this user. AccessibleDatabases(ctx context.Context) ([]Database, error) // SetDatabaseAccess sets the access this user has to the given database. // Pass a `nil` database to set the default access this user has to any new database. // This function requires ArangoDB 3.2 and up for access value `GrantReadOnly`. SetDatabaseAccess(ctx context.Context, db Database, access Grant) error // GetDatabaseAccess gets the access rights for this user to the given database. // Pass a `nil` database to get the default access this user has to any new database. // This function requires ArangoDB 3.2 and up. // By default this function returns the "effective" grant. // To return the "configured" grant, pass a context configured with `WithConfigured`. // This distinction is only relevant in ArangoDB 3.3 in the context of a readonly database. GetDatabaseAccess(ctx context.Context, db Database) (Grant, error) // RemoveDatabaseAccess removes the access this user has to the given database. // As a result the users access falls back to its default access. // If you remove default access (db==`nil`) for a user (and there are no specific access // rules for a database), the user's access falls back to no-access. // Pass a `nil` database to set the default access this user has to any new database. // This function requires ArangoDB 3.2 and up. RemoveDatabaseAccess(ctx context.Context, db Database) error // SetCollectionAccess sets the access this user has to a collection. // If you pass a `Collection`, it will set access for that collection. // If you pass a `Database`, it will set the default collection access for that database. // If you pass `nil`, it will set the default collection access for the default database. // This function requires ArangoDB 3.2 and up. SetCollectionAccess(ctx context.Context, col AccessTarget, access Grant) error // GetCollectionAccess gets the access rights for this user to the given collection. // If you pass a `Collection`, it will get access for that collection. // If you pass a `Database`, it will get the default collection access for that database. // If you pass `nil`, it will get the default collection access for the default database. // By default this function returns the "effective" grant. // To return the "configured" grant, pass a context configured with `WithConfigured`. // This distinction is only relevant in ArangoDB 3.3 in the context of a readonly database. GetCollectionAccess(ctx context.Context, col AccessTarget) (Grant, error) // RemoveCollectionAccess removes the access this user has to a collection. // If you pass a `Collection`, it will removes access for that collection. // If you pass a `Database`, it will removes the default collection access for that database. // If you pass `nil`, it will removes the default collection access for the default database. // This function requires ArangoDB 3.2 and up. RemoveCollectionAccess(ctx context.Context, col AccessTarget) error // GrantReadWriteAccess grants this user read/write access to the given database. // // Deprecated: use GrantDatabaseReadWriteAccess instead. GrantReadWriteAccess(ctx context.Context, db Database) error // RevokeAccess revokes this user access to the given database. // // Deprecated: use `SetDatabaseAccess(ctx, db, GrantNone)` instead. RevokeAccess(ctx context.Context, db Database) error }
User provides access to a single user of a single server / cluster of servers.
type UserOptions ¶
type UserOptions struct { // The user password as a string. If not specified, it will default to an empty string. Password string `json:"passwd,omitempty"` // A flag indicating whether the user account should be activated or not. The default value is true. If set to false, the user won't be able to log into the database. Active *bool `json:"active,omitempty"` // A JSON object with extra user information. The data contained in extra will be stored for the user but not be interpreted further by ArangoDB. Extra interface{} `json:"extra,omitempty"` }
UserOptions contains options for creating a new user, updating or replacing a user.
type V8ContextStats ¶
type V8ContextStats struct { Available int64 `json:"available"` Busy int64 `json:"busy"` Dirty int64 `json:"dirty"` Free int64 `json:"free"` Min int64 `json:"min,omitempty"` Max int64 `json:"max"` Memory []MemoryStats `json:"memory"` }
V8ContextStats contains statistics about V8 contexts.
type Version ¶
type Version string
Version holds a server version string. The string has the format "major.minor.sub". Major and minor will be numeric, and sub may contain a number or a textual version.
func (Version) CompareTo ¶
CompareTo returns an integer comparing two version. The result will be 0 if v==other, -1 if v < other, and +1 if v > other. If major & minor parts are equal and sub part is not a number, the sub part will be compared using lexicographical string comparison.
type VersionInfo ¶
type VersionInfo struct { // This will always contain "arango" Server string `json:"server,omitempty"` // The server version string. The string has the format "major.minor.sub". // Major and minor will be numeric, and sub may contain a number or a textual version. Version Version `json:"version,omitempty"` // Type of license of the server License string `json:"license,omitempty"` // Optional additional details. This is returned only if the context is configured using WithDetails. Details map[string]interface{} `json:"details,omitempty"` }
VersionInfo describes the version of a database server.
func (*VersionInfo) IsEnterprise ¶
func (v *VersionInfo) IsEnterprise() bool
func (VersionInfo) String ¶
func (v VersionInfo) String() string
String creates a string representation of the given VersionInfo.
type VertexConstraints ¶
type VertexConstraints struct { // From contains names of vertex collection that are allowed to be used in the From part of an edge. From []string // To contains names of vertex collection that are allowed to be used in the To part of an edge. To []string }
VertexConstraints limit the vertex collection you can use in an edge.
type View ¶
type View interface { // Name returns the name of the view. Name() string // Type returns the type of this view. Type() ViewType // ArangoSearchView returns this view as an ArangoSearch view. // When the type of the view is not ArangoSearch, an error is returned. ArangoSearchView() (ArangoSearchView, error) // ArangoSearchViewAlias returns this view as an ArangoSearch view alias. // When the type of the view is not ArangoSearch alias, an error is returned. ArangoSearchViewAlias() (ArangoSearchViewAlias, error) // Database returns the database containing the view. Database() Database // Rename renames the view (SINGLE server only). Rename(ctx context.Context, newName string) error // Remove removes the entire view. // If the view does not exist, a NotFoundError is returned. Remove(ctx context.Context) error }
View provides access to the information of a view. Views are only available in ArangoDB 3.4 and higher.
Source Files ¶
- asyncjob.go
- asyncjob_impl.go
- authentication.go
- client.go
- client_admin_backup.go
- client_admin_backup_impl.go
- client_cluster.go
- client_cluster_impl.go
- client_databases.go
- client_databases_impl.go
- client_foxx.go
- client_foxx_impl.go
- client_impl.go
- client_replication.go
- client_replication_impl.go
- client_server_admin.go
- client_server_admin_impl.go
- client_server_info.go
- client_server_info_impl.go
- client_users.go
- client_users_impl.go
- cluster.go
- cluster_impl.go
- collection.go
- collection_document_impl.go
- collection_documents.go
- collection_impl.go
- collection_indexes.go
- collection_indexes_impl.go
- connection.go
- content_type.go
- context.go
- context_read.go
- cursor.go
- cursor_impl.go
- database.go
- database_arangosearch_analyzers.go
- database_arangosearch_analyzers_impl.go
- database_collections.go
- database_collections_impl.go
- database_collections_schema.go
- database_graphs.go
- database_graphs_impl.go
- database_impl.go
- database_pregel.go
- database_pregel_impl.go
- database_transactions.go
- database_transactions_impl.go
- database_views.go
- database_views_impl.go
- doc.go
- edge.go
- edge_collection_documents_impl.go
- edge_collection_impl.go
- edge_collection_indexes_impl.go
- encode-go_1_8.go
- error.go
- foxx.go
- graph.go
- graph_edge_collections.go
- graph_edge_collections_impl.go
- graph_impl.go
- graph_vertex_collections.go
- graph_vertex_collections_impl.go
- id.go
- index.go
- index_impl.go
- meta.go
- protocol.go
- query.go
- query_explain.go
- replication.go
- replication_impl.go
- revision.go
- transaction.go
- user.go
- user_impl.go
- version-driver.go
- version.go
- vertex_collection_documents_impl.go
- vertex_collection_impl.go
- vertex_collection_indexes_impl.go
- view.go
- view_arangosearch.go
- view_arangosearch_alias.go
- view_arangosearch_alias_impl.go
- view_arangosearch_impl.go
- view_impl.go
Directories ¶
Path | Synopsis |
---|---|
Package agency provides an API to access the ArangoDB agency (it is unlikely that you need this package directly).
|
Package agency provides an API to access the ArangoDB agency (it is unlikely that you need this package directly). |
Package cluster implements a driver.Connection that provides cluster failover support (it is not intended to be used directly).
|
Package cluster implements a driver.Connection that provides cluster failover support (it is not intended to be used directly). |
Package http implements driver.Connection using an HTTP connection.
|
Package http implements driver.Connection using an HTTP connection. |
Package jwt provides a helper function used to access ArangoDB servers using a JWT secret.
|
Package jwt provides a helper function used to access ArangoDB servers using a JWT secret. |
tools
|
|
Package util provides some helper methods for the go-driver (it is unlikely that you need this package directly).
|
Package util provides some helper methods for the go-driver (it is unlikely that you need this package directly). |
Package vst implements driver.Connection using a VelocyStream connection.
|
Package vst implements driver.Connection using a VelocyStream connection. |
protocol
Package protocol implements the VelocyStream protocol (it is not intended to be used directly).
|
Package protocol implements the VelocyStream protocol (it is not intended to be used directly). |