Documentation ¶
Index ¶
- Variables
- func AddRawFilters(c context.Context, filts ...RawFilter) context.Context
- func AllocateIDs(c context.Context, ent ...interface{}) error
- func Count(c context.Context, q *Query) (int64, error)
- func CountBatch(c context.Context, batchSize int32, q *Query) (int64, error)
- func Delete(c context.Context, ent ...interface{}) error
- func Get(c context.Context, dst ...interface{}) error
- func GetAll(c context.Context, q *Query, dst interface{}) error
- func GetMetaDefault(getter MetaGetter, key string, dflt interface{}) interface{}
- func GetPLS(obj interface{}) interface{ ... }
- func IntToTime(v int64) time.Time
- func IsErrInvalidKey(err error) bool
- func IsErrNoSuchEntity(err error) (found bool)
- func MakeErrInvalidKey(reason string, args ...interface{}) *errors.Annotator
- func PopulateKey(obj interface{}, key *Key) bool
- func Put(c context.Context, src ...interface{}) error
- func RoundTime(t time.Time) time.Time
- func Run(c context.Context, q *Query, cb interface{}) error
- func RunBatch(c context.Context, batchSize int32, q *Query, cb interface{}) error
- func RunInTransaction(c context.Context, f func(c context.Context) error, opts *TransactionOptions) error
- func SetRaw(c context.Context, rds RawInterface) context.Context
- func SetRawFactory(c context.Context, rdsf RawFactory) context.Context
- func TimeToInt(t time.Time) int64
- func UpconvertUnderlyingType(o interface{}) interface{}
- func WithBatching(c context.Context, enabled bool) context.Context
- func WithoutTransaction(c context.Context) context.Context
- type BoolList
- type Constraints
- type Cursor
- type CursorCB
- type DeleteMultiCB
- type DroppedArgLookup
- type DroppedArgTracker
- func (dat DroppedArgTracker) DropKeys(keys []*Key) ([]*Key, DroppedArgLookup)
- func (dat DroppedArgTracker) DropKeysAndMeta(keys []*Key, meta MultiMetaGetter) ([]*Key, MultiMetaGetter, DroppedArgLookup)
- func (dat DroppedArgTracker) DropKeysAndVals(keys []*Key, vals []PropertyMap) ([]*Key, []PropertyMap, DroppedArgLookup)
- func (dat *DroppedArgTracker) MarkForRemoval(originalIndex, N int)
- func (dat *DroppedArgTracker) MarkNilKeys(keys []*Key)
- func (dat *DroppedArgTracker) MarkNilKeysMeta(keys []*Key, meta MultiMetaGetter)
- func (dat *DroppedArgTracker) MarkNilKeysVals(keys []*Key, vals []PropertyMap)
- type ErrFieldMismatch
- type ExistsResult
- type FinalizedQuery
- func (q *FinalizedQuery) Ancestor() *Key
- func (q *FinalizedQuery) Bounds() (start, end Cursor)
- func (q *FinalizedQuery) Distinct() bool
- func (q *FinalizedQuery) EqFilters() map[string]PropertySlice
- func (q *FinalizedQuery) EventuallyConsistent() bool
- func (q *FinalizedQuery) GQL() string
- func (q *FinalizedQuery) IneqFilterHigh() (field, op string, val Property)
- func (q *FinalizedQuery) IneqFilterLow() (field, op string, val Property)
- func (q *FinalizedQuery) IneqFilterProp() string
- func (q *FinalizedQuery) KeysOnly() bool
- func (q *FinalizedQuery) Kind() string
- func (q *FinalizedQuery) Limit() (int32, bool)
- func (q *FinalizedQuery) Offset() (int32, bool)
- func (q *FinalizedQuery) Orders() []IndexColumn
- func (q *FinalizedQuery) Original() *Query
- func (q *FinalizedQuery) Project() []string
- func (q *FinalizedQuery) String() string
- func (q *FinalizedQuery) Valid(kc KeyContext) error
- type GeoPoint
- type GetMultiCB
- type IndexColumn
- type IndexDefinition
- func (id *IndexDefinition) Builtin() bool
- func (id *IndexDefinition) Compound() bool
- func (id *IndexDefinition) Equal(o *IndexDefinition) bool
- func (id *IndexDefinition) Flip() *IndexDefinition
- func (id *IndexDefinition) GetFullSortOrder() []IndexColumn
- func (id *IndexDefinition) Less(o *IndexDefinition) bool
- func (id *IndexDefinition) MarshalYAML() (interface{}, error)
- func (id *IndexDefinition) Normalize() *IndexDefinition
- func (id *IndexDefinition) PrepForIdxTable() *IndexDefinition
- func (id *IndexDefinition) String() string
- func (id *IndexDefinition) YAMLString() (string, error)
- type IndexSetting
- type Key
- func KeyForObj(c context.Context, src interface{}) *Key
- func KeyForObjErr(c context.Context, src interface{}) (*Key, error)
- func MakeKey(c context.Context, elems ...interface{}) *Key
- func NewIncompleteKeys(c context.Context, count int, kind string, parent *Key) (keys []*Key)
- func NewKey(c context.Context, kind, stringID string, intID int64, parent *Key) *Key
- func NewKeyEncoded(encoded string) (ret *Key, err error)
- func NewKeyToks(c context.Context, toks []KeyTok) *Key
- func (k *Key) AppID() string
- func (k *Key) Encode() string
- func (k *Key) Equal(other *Key) bool
- func (k *Key) EstimateSize() int64
- func (k *Key) GQL() string
- func (k *Key) GobDecode(buf []byte) error
- func (k *Key) GobEncode() ([]byte, error)
- func (k *Key) HasAncestor(other *Key) bool
- func (k *Key) Incomplete() *Key
- func (k *Key) IncompleteEqual(other *Key) (ret bool)
- func (k *Key) IntID() int64
- func (k *Key) IsIncomplete() bool
- func (k *Key) KeyContext() *KeyContext
- func (k *Key) Kind() string
- func (k *Key) LastTok() KeyTok
- func (k *Key) Less(other *Key) bool
- func (k *Key) MarshalJSON() ([]byte, error)
- func (k *Key) Namespace() string
- func (k *Key) Parent() *Key
- func (k *Key) PartialValid(kc KeyContext) bool
- func (k *Key) Root() *Key
- func (k *Key) Split() (appID, namespace string, toks []KeyTok)
- func (k *Key) String() string
- func (k *Key) StringID() string
- func (k *Key) UnmarshalJSON(buf []byte) error
- func (k *Key) Valid(allowSpecial bool, kc KeyContext) bool
- func (k *Key) WithID(stringID string, intID int64) *Key
- type KeyContext
- type KeyTok
- type MetaGetter
- type MetaGetterSetter
- type MultiMetaGetter
- type NewKeyCB
- type Property
- func (p Property) Clone() PropertyData
- func (p *Property) Compare(other *Property) int
- func (p *Property) Equal(other *Property) bool
- func (p *Property) EstimateSize() int64
- func (p *Property) GQL() string
- func (p *Property) IndexSetting() IndexSetting
- func (p Property) IndexTypeAndValue() (PropertyType, interface{})
- func (p *Property) Less(other *Property) bool
- func (p *Property) Project(to PropertyType) (interface{}, error)
- func (p *Property) SetValue(value interface{}, is IndexSetting) (err error)
- func (p Property) Slice() PropertySlice
- func (p Property) String() string
- func (p *Property) Type() PropertyType
- func (p *Property) Value() interface{}
- type PropertyConverter
- type PropertyData
- type PropertyLoadSaver
- type PropertyMap
- func (pm PropertyMap) Clone() PropertyMap
- func (pm PropertyMap) EstimateSize() int64
- func (pm PropertyMap) GetAllMeta() PropertyMap
- func (pm PropertyMap) GetMeta(key string) (interface{}, bool)
- func (pm PropertyMap) Load(props PropertyMap) error
- func (pm PropertyMap) Problem() error
- func (pm PropertyMap) Save(withMeta bool) (PropertyMap, error)
- func (pm PropertyMap) SetMeta(key string, val interface{}) bool
- func (pm PropertyMap) Slice(key string) PropertySlice
- type PropertySlice
- type PropertyType
- type Query
- func (q *Query) Ancestor(ancestor *Key) *Query
- func (q *Query) ClearFilters() *Query
- func (q *Query) ClearOrder() *Query
- func (q *Query) ClearProject() *Query
- func (q *Query) Distinct(on bool) *Query
- func (q *Query) End(c Cursor) *Query
- func (q *Query) Eq(field string, values ...interface{}) *Query
- func (q *Query) EventualConsistency(on bool) *Query
- func (q *Query) Finalize() (*FinalizedQuery, error)
- func (q *Query) FirestoreMode(on bool) *Query
- func (q *Query) GetFirestoreMode() bool
- func (q *Query) Gt(field string, value interface{}) *Query
- func (q *Query) Gte(field string, value interface{}) *Query
- func (q *Query) KeysOnly(on bool) *Query
- func (q *Query) Kind(kind string) *Query
- func (q *Query) Limit(limit int32) *Query
- func (q *Query) Lt(field string, value interface{}) *Query
- func (q *Query) Lte(field string, value interface{}) *Query
- func (q *Query) Offset(offset int32) *Query
- func (q *Query) Order(fieldNames ...string) *Query
- func (q *Query) Project(fieldNames ...string) *Query
- func (q *Query) Start(c Cursor) *Query
- func (q *Query) String() string
- type RawFactory
- type RawFilter
- type RawInterface
- type RawRunCB
- type Testable
- type TestingSnapshot
- type Toggle
- type Transaction
- type TransactionOptions
Constants ¶
This section is empty.
Variables ¶
var ( ErrNoSuchEntity = datastore.ErrNoSuchEntity ErrConcurrentTransaction = datastore.ErrConcurrentTransaction // Stop is understood by various services to stop iterative processes. Examples // include datastore.Interface.Run's callback. Stop = stopErr{} )
These errors are returned by various datastore.Interface methods.
var ( // ErrMultipleInequalityFilter is returned from Query.Finalize if you build a // query which has inequality filters on multiple fields. ErrMultipleInequalityFilter = errors.New( "inequality filters on multiple properties in the same Query is not allowed") // ErrNullQuery is returned from Query.Finalize if you build a query for which // there cannot possibly be any results. ErrNullQuery = errors.New( "the query is overconstrained and can never have results") )
Functions ¶
func AddRawFilters ¶
AddRawFilters adds RawInterface filters to the context.
func AllocateIDs ¶
AllocateIDs allows you to allocate IDs from the datastore without putting any data.
A partial valid key will be constructed from each entity's kind and parent, if present. An allocation will then be performed against the datastore for each key, and the partial key will be populated with a unique integer ID. The resulting keys will be applied to their objects using PopulateKey. If successful, any existing ID will be destroyed.
If the object is supplied that cannot accept an integer key, this method will panic.
ent must be one of:
- *S where S is a struct
- *P where *P is a concrete type implementing PropertyLoadSaver
- []S or []*S where S is a struct
- []P or []*P where *P is a concrete type implementing PropertyLoadSaver
- []I where i is some interface type. Each element of the slice must be non-nil, and its underlying type must be either *S or *P.
- []*Key, to populate a slice of partial-valid keys.
If an error is encountered, the returned error value will depend on the input arguments. If one argument is supplied, the result will be the encountered error type. If multiple arguments are supplied, the result will be a MultiError whose error index corresponds to the argument in which the error was encountered.
If an ent argument is a slice, its error type will be a MultiError. Note that in the scenario where multiple slices are provided, this will return a MultiError containing a nested MultiError for each slice argument.
func Count ¶
Count executes the given query and returns the number of entries which match it.
By default, datastore applies a short (~5s) timeout to queries. This can be increased, usually to around several minutes, by explicitly setting a deadline on the supplied Context.
func CountBatch ¶
CountBatch is a batching version of Count. See RunBatch for more information about batching, and CountBatch for more information about the parameters.
If the Context supplied to CountBatch is cancelled or reaches its deadline, CountBatch will terminate with the Context's error.
By default, datastore applies a short (~5s) timeout to queries. This can be increased, usually to around several minutes, by explicitly setting a deadline on the supplied Context.
If the specified `batchSize` is <= 0, no batching will be performed.
func Delete ¶
Delete removes the supplied entities from the datastore.
ent must be one of:
- *S, where S is a struct
- *P, where *P is a concrete type implementing PropertyLoadSaver
- []S or []*S, where S is a struct
- []P or []*P, where *P is a concrete type implementing PropertyLoadSaver
- []I, where I is some interface type. Each element of the slice must be non-nil, and its underlying type must be either *S or *P.
- *Key, to remove a specific key from the datastore.
- []*Key, to remove a slice of keys from the datastore.
If an error is encountered, the returned error value will depend on the input arguments. If one argument is supplied, the result will be the encountered error type. If multiple arguments are supplied, the result will be a MultiError whose error index corresponds to the argument in which the error was encountered.
If an ent argument is a slice, its error type will be a MultiError. Note that in the scenario where multiple slices are provided, this will return a MultiError containing a nested MultiError for each slice argument.
func Get ¶
Get retrieves objects from the datastore.
Each element in dst must be one of:
- *S, where S is a struct
- *P, where *P is a concrete type implementing PropertyLoadSaver
- []S or []*S, where S is a struct
- []P or []*P, where *P is a concrete type implementing PropertyLoadSaver
- []I, where I is some interface type. Each element of the slice must be non-nil, and its underlying type must be either *S or *P.
If an error is encountered, the returned error value will depend on the input arguments. If one argument is supplied, the result will be the encountered error type. If multiple arguments are supplied, the result will be a MultiError whose error index corresponds to the argument in which the error was encountered.
If a dst argument is a slice, its error type will be a MultiError. Note that in the scenario where multiple slices are provided, this will return a MultiError containing a nested MultiError for each slice argument.
If there was an issue retrieving the entity, the input `dst` objects will not be affected. This means that you can populate an object for dst with some values, do a Get, and on an ErrNoSuchEntity, do a Put (inside a transaction, of course :)).
func GetAll ¶
GetAll retrieves all of the Query results into dst.
By default, datastore applies a short (~5s) timeout to queries. This can be increased, usually to around several minutes, by explicitly setting a deadline on the supplied Context.
dst must be one of:
- *[]S or *[]*S, where S is a struct
- *[]P or *[]*P, where *P is a concrete type implementing PropertyLoadSaver
- *[]*Key implies a keys-only query.
func GetMetaDefault ¶
func GetMetaDefault(getter MetaGetter, key string, dflt interface{}) interface{}
GetMetaDefault is a helper for GetMeta, allowing a default value.
If the metadata key is not available, or its type doesn't equal the homogenized type of dflt, then dflt will be returned.
Type homogenization:
signed integer types -> int64 bool -> Toggle fields (bool)
Example:
pls.GetMetaDefault("foo", 100).(int64)
func GetPLS ¶
func GetPLS(obj interface{}) interface { PropertyLoadSaver MetaGetterSetter }
GetPLS resolves obj into default struct PropertyLoadSaver and MetaGetterSetter implementation.
obj must be a non-nil pointer to a struct of some sort.
By default, exported fields will be serialized to/from the datastore. If the field is not exported, it will be skipped by the serialization routines.
If a field is of a non-supported type (see Property for the list of supported property types), this function will panic. Other problems include duplicate field names (due to tagging), recursively defined structs, nested structures with multiple slices (e.g. slices of slices, either directly `[][]type` or indirectly `[]Embedded` where Embedded contains a slice.)
The following field types are supported:
- int64, int32, int16, int8, int
- uint32, uint16, uint8, byte
- float64, float32
- string
- []byte
- bool
- time.Time
- GeoPoint
- *Key
- any Type whose underlying type is one of the above types
- Types which implement PropertyConverter on (*Type)
- A struct composed of the above types (except for nested slices)
- A slice of any of the above types
GetPLS supports the following struct tag syntax:
`gae:"fieldName[,noindex]"` -- an alternate fieldname for an exportable field. When the struct is serialized or deserialized, fieldName will be associated with the struct field instead of the field's Go name. This is useful when writing Go code which interfaces with appengine code written in other languages (like python) which use lowercase as their default datastore field names. A fieldName of "-" means that gae will ignore the field for all serialization/deserialization. if noindex is specified, then this field will not be indexed in the datastore, even if it was an otherwise indexable type. If fieldName is blank, and noindex is specifed, then fieldName will default to the field's actual name. Note that by default, all fields (with indexable types) are indexed. `gae:"$metaKey[,<value>]` -- indicates a field is metadata. Metadata can be used to control filter behavior, or to store key data when using the Interface.KeyForObj* methods. The supported field types are: - *Key - int64, int32, int16, int8, uint32, uint16, uint8, byte - string - Toggle (GetMeta and SetMeta treat the field as if it were bool) - Any type which implements PropertyConverter Additionally, numeric, string and Toggle types allow setting a default value in the struct field tag (the "<value>" portion). Only exported fields allow SetMeta, but all fields of appropriate type allow tagged defaults for use with GetMeta. See Examples. `gae:"[-],extra"` -- indicates that any extra, unrecognized or mismatched property types (type in datastore doesn't match your struct's field type) should be loaded into and saved from this field. The precise type of the field must be PropertyMap. This form allows you to control the behavior of reads and writes when your schema changes, or to implement something like ndb.Expando with a mix of structured and unstructured fields. If the `-` is present, then datastore write operations will not put elements of this map into the datastore. If the field is non-exported, then read operations from the datastore will not populate the members of this map, but extra fields or structural differences encountered when reading into this struct will be silently ignored. This is useful if you want to just ignore old fields. If there is a conflict between a field in the struct and a same-named Property in the extra field, the field in the struct takes precedence. Recursive structs are supported, but all extra properties go to the topmost structure's Extra field. This is a bit non-intuitive, but the implementation complexity was deemed not worth it, since that sort of thing is generally only useful on schema changes, which should be transient. Examples: // "black hole": ignore mismatches, ignore on write _ PropertyMap `gae:"-,extra" // "expando": full content is read/written Expando PropertyMap `gae:",extra" // "convert": content is read from datastore, but lost on writes. This // is useful for doing conversions from an old schema to a new one, // since you can retrieve the old data and populate it into new fields, // for example. Probably should be used in conjunction with an // implementation of the PropertyLoadSaver interface so that you can // transparently upconvert to the new schema on load. Convert PropertyMap `gae:"-,extra"
Example "special" structure. This is supposed to be some sort of datastore singleton object.
struct secretFoo { // _id and _kind are not exported, so setting their values will not be // reflected by GetMeta. _id int64 `gae:"$id,1"` _kind string `gae:"$kind,InternalFooSingleton"` // Value is exported, so can be read and written by the PropertyLoadSaver, // but secretFoo is shared with a python appengine module which has // stored this field as 'value' instead of 'Value'. Value int64 `gae:"value"` }
Example "normal" structure that you might use in a go-only appengine app.
struct User { ID string `gae:"$id"` // "kind" is automatically implied by the struct name: "User" // "parent" is nil... Users are root entities // 'Name' will serialized to the datastore in the field 'Name' Name string } struct Comment { ID int64 `gae:"$id"` // "kind" is automatically implied by the struct name: "Comment" // Parent will be enforced by the application to be a User key. Parent *Key `gae:"$parent"` // 'Lines' will serialized to the datastore in the field 'Lines' Lines []string }
A pointer-to-struct may also implement MetaGetterSetter to provide more sophistocated metadata values. Explicitly defined fields (as shown above) always take precedence over fields manipulated by the MetaGetterSetter methods. So if your GetMeta handles "kind", but you explicitly have a $kind field, the $kind field will take precedence and your GetMeta implementation will not be called for "kind".
A struct overloading any of the PropertyLoadSaver or MetaGetterSetter interfaces may evoke the default struct behavior by using GetPLS on itself. For example:
struct Special { Name string foo string } func (s *Special) Load(props PropertyMap) error { if foo, ok := props["foo"]; ok && len(foo) == 1 { s.foo = foo delete(props, "foo") } return GetPLS(s).Load(props) } func (s *Special) Save(withMeta bool) (PropertyMap, error) { props, err := GetPLS(s).Save(withMeta) if err != nil { return nil, err } props["foo"] = []Property{MkProperty(s.foo)} return props, nil } func (s *Special) Problem() error { return GetPLS(s).Problem() }
Additionally, any field ptr-to-type may implement the PropertyConverter interface to allow a single field to, for example, implement some alternate encoding (json, gzip), or even just serialize to/from a simple string field. This applies to normal fields, as well as metadata fields. It can be useful for storing struct '$id's which have multi-field meanings. For example, the Person struct below could be initialized in go as `&Person{Name{"Jane", "Doe"}}`, retaining Jane's name as manipulable Go fields. However, in the datastore, it would have a key of `/Person,"Jane|Doe"`, and loading the struct from the datastore as part of a Query, for example, would correctly populate Person.Name.First and Person.Name.Last.
type Name struct { First string Last string } func (n *Name) ToProperty() (Property, error) { return fmt.Sprintf("%s|%s", n.First, n.Last) } func (n *Name) FromProperty(p Property) error { // check p to be a PTString // split on "|" // assign to n.First, n.Last } type Person struct { ID Name `gae:"$id"` }
func IsErrInvalidKey ¶
IsErrInvalidKey tests if a given error is a wrapped datastore.ErrInvalidKey error.
func IsErrNoSuchEntity ¶
IsErrNoSuchEntity tests if an error is ErrNoSuchEntity, or is a MultiError that contains ErrNoSuchEntity and no other errors.
func MakeErrInvalidKey ¶
MakeErrInvalidKey returns an errors.Annotator instance that wraps an invalid key error. Calling IsErrInvalidKey on this Annotator or its derivatives will return true.
func PopulateKey ¶
PopulateKey loads key into obj.
obj is any object that Interface.Get is able to accept.
Upon successful application, this method will return true. If the key could not be applied to the object, this method will return false. It will panic if obj is an invalid datastore model.
This method will panic if obj is an invalid datastore model. If the key could not be applied to the object, nothing will happen.
func Put ¶
Put writes objects into the datastore.
src must be one of:
- *S, where S is a struct
- *P, where *P is a concrete type implementing PropertyLoadSaver
- []S or []*S, where S is a struct
- []P or []*P, where *P is a concrete type implementing PropertyLoadSaver
- []I, where I is some interface type. Each element of the slice must be non-nil, and its underlying type must be either *S or *P.
A *Key will be extracted from src via KeyForObj. If extractedKey.IsIncomplete() is true, and the object is put to the datastore successfully, then Put will write the resolved (datastore-generated) *Key back to src.
NOTE: The datastore only autogenerates *Keys with integer IDs. Only models which use a raw `$key` or integer-typed `$id` field are elegible for this. A model with a string-typed `$id` field will not accept an integer id'd *Key and will cause the Put to fail.
If an error is encountered, the returned error value will depend on the input arguments. If one argument is supplied, the result will be the encountered error type. If multiple arguments are supplied, the result will be a MultiError whose error index corresponds to the argument in which the error was encountered.
If a src argument is a slice, its error type will be a MultiError. Note that in the scenario where multiple slices are provided, this will return a MultiError containing a nested MultiError for each slice argument.
func RoundTime ¶
RoundTime rounds a time.Time to microseconds, which is the (undocumented) way that the AppEngine SDK stores it.
func Run ¶
Run executes the given query, and calls `cb` for each successfully retrieved item.
By default, datastore applies a short (~5s) timeout to queries. This can be increased, usually to around several minutes, by explicitly setting a deadline on the supplied Context.
cb is a callback function whose signature is
func(obj TYPE[, getCursor CursorCB]) [error]
Where TYPE is one of:
- S or *S, where S is a struct
- P or *P, where *P is a concrete type implementing PropertyLoadSaver
- *Key (implies a keys-only query)
If the error is omitted from the signature, this will run until the query returns all its results, or has an error/times out.
If error is in the signature, the query will continue as long as the callback returns nil. If it returns `Stop`, the query will stop and Run will return nil. Otherwise, the query will stop and Run will return the user's error.
Run may also stop on the first datastore error encountered, which can occur due to flakiness, timeout, etc. If it encounters such an error, it will be returned.
func RunBatch ¶
RunBatch is a batching version of Run. Like Run, executes a query and invokes the supplied callback for each returned result. RunBatch differs from Run in that it performs the query in batches, using a cursor to continue the query in between batches.
See Run for more information about the parameters.
Batching processes the supplied query in batches, buffering the full batch set locally before sending its results to the user. It will then proceed to the next batch until finished or cancelled. This is useful:
- For efficiency, decoupling the processing of query data from the underlying datastore operation.
- For very long-running queries, where the duration of the query would normally exceed datastore's maximum query timeout.
- The caller may count return callbacks and perform processing at each `batchSize` interval with confidence that the underlying query will not timeout during that processing.
If the Context supplied to RunBatch is cancelled or reaches its deadline, RunBatch will terminate with the Context's error.
By default, datastore applies a short (~5s) timeout to queries. This can be increased, usually to around several minutes, by explicitly setting a deadline on the supplied Context.
If the specified `batchSize` is <= 0, no batching will be performed.
func RunInTransaction ¶
func RunInTransaction(c context.Context, f func(c context.Context) error, opts *TransactionOptions) error
RunInTransaction runs f inside of a transaction. See the appengine SDK's documentation for full details on the behavior of transactions in the datastore.
Note that the behavior of transactions may change depending on what filters have been installed. It's possible that we'll end up implementing things like nested/buffered transactions as filters.
func SetRaw ¶
func SetRaw(c context.Context, rds RawInterface) context.Context
SetRaw sets the current Datastore object in the context. Useful for testing with a quick mock. This is just a shorthand SetRawFactory invocation to set a factory which always returns the same object.
func SetRawFactory ¶
func SetRawFactory(c context.Context, rdsf RawFactory) context.Context
SetRawFactory sets the function to produce Datastore instances, as returned by the Raw method.
func TimeToInt ¶
TimeToInt converts a time value to a datastore-appropraite integer value.
This method truncates the time to microseconds and drops the timezone, because that's the (undocumented) way that the appengine SDK does it.
func UpconvertUnderlyingType ¶
func UpconvertUnderlyingType(o interface{}) interface{}
UpconvertUnderlyingType takes an object o, and attempts to convert it to its native datastore-compatible type. e.g. int16 will convert to int64, and `type Foo string` will convert to `string`.
func WithBatching ¶
WithBatching enables or disables automatic operation batching. Batching is enabled by default, and batch sizes are defined by the datastore's Constraints.
Datastore has built-in constraints that it applies to some operations:
- For Get, there is a maximum number of elements that can be processed in a single RPC (see Constriants.MaxGetSize).
- For Put, there is a maximum number of elements that can be processed in a single RPC (see Constriants.MaxPutSize).
- For Delete, there is a maximum number of elements that can be processed in a single RPC (see Constriants.MaxDeleteSize).
Batching masks these limitations, providing an interface that meets user expectations. Behind the scenes, it splits large operations into a series of parallel smaller operations that fit within the datastore's constraints.
func WithoutTransaction ¶
WithoutTransaction returns a Context that isn't bound to a transaction. This may be called even when outside of a transaction, in which case the input Context is a valid return value.
This can be useful to perform non-transactional tasks given only a Context that is bound to a transaction.
Types ¶
type BoolList ¶
type BoolList []bool
BoolList is a convenience wrapper for []bool that provides summary methods for working with the list in aggregate.
type Constraints ¶
type Constraints struct { // MaxGetSize is the maximum number of entities that can be referenced in a // single GetMulti call. If <= 0, no constraint is applied. MaxGetSize int // MaxPutSize is the maximum number of entities that can be referenced in a // single PutMulti call. If <= 0, no constraint is applied. MaxPutSize int // MaxDeleteSize is the maximum number of entities that can be referenced in a // single DeleteMulti call. If <= 0, no constraint is applied. MaxDeleteSize int }
Constraints represent implementation constraints.
A zero-value Constraints is valid, and indicates that no constraints are present.
type CursorCB ¶
CursorCB is used to obtain a Cursor while Run'ing a query on either Interface or RawInterface.
it can be invoked to obtain the current cursor.
type DeleteMultiCB ¶
DeleteMultiCB is the callback signature provided to RawInterface.DeleteMulti
- idx is the index of the entity, ranging from 0 through len-1.
- err is an error associated with deleting this entity.
The callback is called once per element. It may be called concurrently, and may be called out of order. The "idx" variable describes which element is being processed. If any callbacks are invoked, exactly one callback will be invoked for each supplied element.
type DroppedArgLookup ¶
type DroppedArgLookup []idxPair
DroppedArgLookup is returned from using a DroppedArgTracker.
It can be used to recover the index from the original slice by providing the reduced slice index.
func (DroppedArgLookup) OriginalIndex ¶
func (dal DroppedArgLookup) OriginalIndex(reducedIndex int) int
OriginalIndex maps from an index into the array(s) returned from MustDrop back to the corresponding index in the original arrays.
type DroppedArgTracker ¶
type DroppedArgTracker []int
DroppedArgTracker is used to track dropping items from Keys as well as meta and/or PropertyMap arrays from one layer of the RawInterface to the next.
If you're not writing a datastore backend implementation (like "go.chromium.org/gae/impl/*"), then you can ignore this type.
For example, say your GetMulti method was passed 4 arguments, but one of them was bad. DroppedArgTracker would allow you to "drop" the bad entry, and then synthesize new keys/meta/values arrays excluding the bad entry. You could then map from the new arrays back to the indexes of the original arrays.
This DroppedArgTracker will do no allocations if you don't end up dropping any arguments (so in the 'good' case, there are zero allocations).
Example:
Say we're given a list of arguments which look like ("_" means a bad value that we drop): input: A B _ C D _ _ E Idxs: 0 1 2 3 4 5 6 7 dropped: 2 5 6 DropKeys(input): A B C D E 0 1 2 3 4 OriginalIndex(0) -> 0 OriginalIndex(1) -> 1 OriginalIndex(2) -> 3 OriginalIndex(3) -> 4 OriginalIndex(4) -> 7
Methods on this type are NOT goroutine safe.
func (DroppedArgTracker) DropKeys ¶
func (dat DroppedArgTracker) DropKeys(keys []*Key) ([]*Key, DroppedArgLookup)
DropKeys returns a compressed version of `keys`, dropping all elements which were marked with MarkForRemoval.
func (DroppedArgTracker) DropKeysAndMeta ¶
func (dat DroppedArgTracker) DropKeysAndMeta(keys []*Key, meta MultiMetaGetter) ([]*Key, MultiMetaGetter, DroppedArgLookup)
DropKeysAndMeta returns a compressed version of `keys` and `meta`, dropping all elements which were marked with MarkForRemoval.
`keys` and `meta` must have the same lengths.
func (DroppedArgTracker) DropKeysAndVals ¶
func (dat DroppedArgTracker) DropKeysAndVals(keys []*Key, vals []PropertyMap) ([]*Key, []PropertyMap, DroppedArgLookup)
DropKeysAndVals returns a compressed version of `keys` and `vals`, dropping all elements which were marked with MarkForRemoval.
`keys` and `vals` must have the same lengths.
func (*DroppedArgTracker) MarkForRemoval ¶
func (dat *DroppedArgTracker) MarkForRemoval(originalIndex, N int)
MarkForRemoval tracks `originalIndex` for removal when `Drop*` methods are called.
N is a size hint for the maximum number of entries that `dat` could have. If `dat` has a capacity of < N, it will be allocated to N.
If called with N == len(args) and originalIndex is always increasing, then this will only do one allocation for the life of this DroppedArgTracker, and each MarkForRemoval will only cost a single slice append. If called out of order, or with a bad value of N, this will do more allocations and will do a binary search on each call.
func (*DroppedArgTracker) MarkNilKeys ¶
func (dat *DroppedArgTracker) MarkNilKeys(keys []*Key)
MarkNilKeys is a helper method which calls MarkForRemoval for each nil key.
func (*DroppedArgTracker) MarkNilKeysMeta ¶
func (dat *DroppedArgTracker) MarkNilKeysMeta(keys []*Key, meta MultiMetaGetter)
MarkNilKeysMeta is a helper method which calls MarkForRemoval for each nil key or meta.
func (*DroppedArgTracker) MarkNilKeysVals ¶
func (dat *DroppedArgTracker) MarkNilKeysVals(keys []*Key, vals []PropertyMap)
MarkNilKeysVals is a helper method which calls MarkForRemoval for each nil key or value.
type ErrFieldMismatch ¶
ErrFieldMismatch is returned when a field is to be loaded into a different type than the one it was stored from, or when a field is missing or unexported in the destination struct. StructType is the type of the struct pointed to by the destination argument passed to Get or to Iterator.Next.
func (*ErrFieldMismatch) Error ¶
func (e *ErrFieldMismatch) Error() string
type ExistsResult ¶
type ExistsResult struct {
// contains filtered or unexported fields
}
ExistsResult is a 2-dimensional boolean array that represents the existence of entries in the datastore. It is returned by the datastore Exists method. It is designed to accommodate the potentially-nested variadic arguments that can be passed to Exists.
The first dimension contains one entry for each Exists input index. If the argument is a single entry, the boolean value at this index will be true if that argument was present in the datastore and false otherwise. If the argument is a slice, it will contain an aggregate value that is true iff no values in that slice were missing from the datastore.
The second dimension presents a boolean slice for each input argument. Single arguments will have a slice of size 1 whose value corresponds to the first dimension value for that argument. Slice arguments have a slice of the same size. A given index in the second dimension slice is true iff the element at that index was present.
func Exists ¶
func Exists(c context.Context, ent ...interface{}) (*ExistsResult, error)
Exists tests if the supplied objects are present in the datastore.
ent must be one of:
- *S, where S is a struct
- *P, where *P is a concrete type implementing PropertyLoadSaver
- []S or []*S, where S is a struct
- []P or []*P, where *P is a concrete type implementing PropertyLoadSaver
- []I, where I is some interface type. Each element of the slice must be non-nil, and its underlying type must be either *S or *P.
- *Key, to check a specific key from the datastore.
- []*Key, to check a slice of keys from the datastore.
If an error is encountered, the returned error value will depend on the input arguments. If one argument is supplied, the result will be the encountered error type. If multiple arguments are supplied, the result will be a MultiError whose error index corresponds to the argument in which the error was encountered.
If an ent argument is a slice, its error type will be a MultiError. Note that in the scenario, where multiple slices are provided, this will return a MultiError containing a nested MultiError for each slice argument.
func (*ExistsResult) All ¶
func (r *ExistsResult) All() bool
All returns true if all of the available boolean slots are true.
func (*ExistsResult) Any ¶
func (r *ExistsResult) Any() bool
Any returns true if any of the boolean slots are true.
func (*ExistsResult) Get ¶
func (r *ExistsResult) Get(i int, j ...int) bool
Get returns the boolean value at the specified index.
The one-argument form returns the first-dimension boolean. If i is a slice argument, this will be true iff all of the slice's booleans are true.
An optional second argument can be passed to access a specific boolean value in slice i. If the argument at i is a single argument, the only valid index, 0, will be the same as calling the single-argument Get.
Passing more than one additional argument will result in a panic.
func (*ExistsResult) Len ¶
func (r *ExistsResult) Len(i ...int) int
Len returns the number of boolean results available.
The zero-argument form returns the first-dimension size, which will equal the total number of arguments passed to Exists.
The one-argument form returns the number of booleans in the slice for argument i.
Passing more than one argument will result in a panic.
func (*ExistsResult) List ¶
func (r *ExistsResult) List(i ...int) BoolList
List returns the BoolList for the given argument index.
The zero-argument form returns the first-dimension boolean list.
An optional argument can be passed to access a specific argument's boolean slice. If the argument at i is a non-slice argument, the list will be a slice of size 1 containing i's first-dimension value.
Passing more than one argument will result in a panic.
type FinalizedQuery ¶
type FinalizedQuery struct {
// contains filtered or unexported fields
}
FinalizedQuery is the representation of a Query which has been normalized.
It contains only fully-specified, non-redundant, non-conflicting information pertaining to the Query to run. It can only represent a valid query.
func (*FinalizedQuery) Ancestor ¶
func (q *FinalizedQuery) Ancestor() *Key
Ancestor returns the ancestor filter key, if any. This is a convenience function for getting the value from EqFilters()["__ancestor__"].
func (*FinalizedQuery) Bounds ¶
func (q *FinalizedQuery) Bounds() (start, end Cursor)
Bounds returns the start and end Cursors. One or both may be nil. The Cursors returned are implementation-specific depending on the actual RawInterface implementation and the filters installed (if the filters interfere with Cursor production).
func (*FinalizedQuery) Distinct ¶
func (q *FinalizedQuery) Distinct() bool
Distinct returnst true iff this is a distinct projection query. It will never be true for non-projection queries.
func (*FinalizedQuery) EqFilters ¶
func (q *FinalizedQuery) EqFilters() map[string]PropertySlice
EqFilters returns all the equality filters. The map key is the field name and the PropertySlice is the values that field should equal.
This includes a special equality filter on "__ancestor__". If "__ancestor__" is present in the result, it's guaranteed to have 1 value in the PropertySlice which is of type *Key.
func (*FinalizedQuery) EventuallyConsistent ¶
func (q *FinalizedQuery) EventuallyConsistent() bool
EventuallyConsistent returns true iff this query will be eventually consistent. This is true when the query is a non-ancestor query, or when it's an ancestory query with the 'EventualConsistency(true)' option set.
func (*FinalizedQuery) GQL ¶
func (q *FinalizedQuery) GQL() string
GQL returns a correctly formatted Cloud Datastore GQL expression which is equivalent to this query.
The flavor of GQL that this emits is defined here:
https://cloud.google.com/datastore/docs/apis/gql/gql_reference
NOTE: Cursors are omitted because currently there's currently no syntax for literal cursors.
NOTE: GeoPoint values are emitted with speculated future syntax. There is currently no syntax for literal GeoPoint values.
func (*FinalizedQuery) IneqFilterHigh ¶
func (q *FinalizedQuery) IneqFilterHigh() (field, op string, val Property)
IneqFilterHigh returns the field name, operator and value for the high-side inequality filter. If the returned field name is "", it means that there's no upper inequality bound on this query.
If field is non-empty, op may have the values "<" or "<=".
func (*FinalizedQuery) IneqFilterLow ¶
func (q *FinalizedQuery) IneqFilterLow() (field, op string, val Property)
IneqFilterLow returns the field name, operator and value for the low-side inequality filter. If the returned field name is "", it means that there's no lower inequality bound on this query.
If field is non-empty, op may have the values ">" or ">=".
func (*FinalizedQuery) IneqFilterProp ¶
func (q *FinalizedQuery) IneqFilterProp() string
IneqFilterProp returns the inequality filter property name, if one is used for this filter. An empty return value means that this query does not contain any inequality filters.
func (*FinalizedQuery) KeysOnly ¶
func (q *FinalizedQuery) KeysOnly() bool
KeysOnly returns true iff this query will only return keys (as opposed to a normal or projection query).
func (*FinalizedQuery) Kind ¶
func (q *FinalizedQuery) Kind() string
Kind returns the datastore 'Kind' over which this query operates. It may be empty for a kindless query.
func (*FinalizedQuery) Limit ¶
func (q *FinalizedQuery) Limit() (int32, bool)
Limit returns the maximum number of responses this query will retrieve, and a boolean indicating if the limit is set.
func (*FinalizedQuery) Offset ¶
func (q *FinalizedQuery) Offset() (int32, bool)
Offset returns the number of responses this query will skip before returning data, and a boolean indicating if the offset is set.
func (*FinalizedQuery) Orders ¶
func (q *FinalizedQuery) Orders() []IndexColumn
Orders returns the sort orders that this query will use, including all orders implied by the projections, and the implicit __key__ order at the end.
func (*FinalizedQuery) Original ¶
func (q *FinalizedQuery) Original() *Query
Original returns the original Query object from which this FinalizedQuery was derived.
func (*FinalizedQuery) Project ¶
func (q *FinalizedQuery) Project() []string
Project is the list of fields that this query projects on, or empty if this is not a projection query.
func (*FinalizedQuery) String ¶
func (q *FinalizedQuery) String() string
func (*FinalizedQuery) Valid ¶
func (q *FinalizedQuery) Valid(kc KeyContext) error
Valid returns true iff this FinalizedQuery is valid in the provided KeyContext's App ID and Namespace.
This checks the ancestor filter (if any), as well as the inequality filters if they filter on '__key__'.
In particular, it does NOT validate equality filters which happen to have values of type PTKey, nor does it validate inequality filters that happen to have values of type PTKey (but don't filter on the magic '__key__' field).
type GeoPoint ¶
type GeoPoint struct {
Lat, Lng float64
}
GeoPoint represents a location as latitude/longitude in degrees.
You probably shouldn't use these, but their inclusion here is so that the datastore service can interact (and round-trip) correctly with other datastore API implementations.
type GetMultiCB ¶
type GetMultiCB func(idx int, val PropertyMap, err error)
GetMultiCB is the callback signature provided to RawInterface.GetMulti
- idx is the index of the entity, ranging from 0 through len-1.
- val is the data of the entity
- It may be nil if some of the keys to the GetMulti were bad, since all keys are validated before the RPC occurs!
- err is an error associated with this entity (e.g. ErrNoSuchEntity).
The callback is called once per element. It may be called concurrently, and may be called out of order. The "idx" variable describes which element is being processed. If any callbacks are invoked, exactly one callback will be invoked for each supplied element.
type IndexColumn ¶
IndexColumn represents a sort order for a single entity field.
func ParseIndexColumn ¶
func ParseIndexColumn(spec string) (IndexColumn, error)
ParseIndexColumn takes a spec in the form of /\s*-?\s*.+\s*/, and returns an IndexColumn. Examples are:
`- Field `: IndexColumn{Property: "Field", Descending: true} `Something`: IndexColumn{Property: "Something", Descending: false}
`+Field` is invalid. “ is invalid.
func (IndexColumn) GQL ¶
func (i IndexColumn) GQL() string
GQL returns a correctly formatted Cloud Datastore GQL literal which is valid for the `ORDER BY` clause.
The flavor of GQL that this emits is defined here:
https://cloud.google.com/datastore/docs/apis/gql/gql_reference
func (*IndexColumn) MarshalYAML ¶
func (i *IndexColumn) MarshalYAML() (interface{}, error)
MarshalYAML serializes an IndexColumn into a index.yml `property`.
func (IndexColumn) String ¶
func (i IndexColumn) String() string
String returns a human-readable version of this IndexColumn which is compatible with ParseIndexColumn.
func (*IndexColumn) UnmarshalYAML ¶
func (i *IndexColumn) UnmarshalYAML(unmarshal func(interface{}) error) error
UnmarshalYAML deserializes a index.yml `property` into an IndexColumn.
type IndexDefinition ¶
type IndexDefinition struct { Kind string `yaml:"kind"` Ancestor bool `yaml:"ancestor"` SortBy []IndexColumn `yaml:"properties"` }
IndexDefinition holds the parsed definition of a datastore index definition.
func FindAndParseIndexYAML ¶
func FindAndParseIndexYAML(path string) ([]*IndexDefinition, error)
FindAndParseIndexYAML walks up from the directory specified by path until it finds a `index.yaml` or `index.yml` file. If an index YAML file is found, it opens and parses the file, and returns all the indexes found. If path is a relative path, it is converted into an absolute path relative to the calling test file. To determine the path of the calling test file, FindAndParseIndexYAML walks upto a maximum of 100 call stack frames looking for a file ending with `_test.go`.
FindAndParseIndexYAML returns a non-nil error if the root of the drive is reached without finding an index YAML file, if there was an error reading the found index YAML file, or if the calling test file could not be located in the case of a relative path argument.
func ParseIndexYAML ¶
func ParseIndexYAML(content io.Reader) ([]*IndexDefinition, error)
ParseIndexYAML parses the contents of a index YAML file into a list of IndexDefinitions.
func (*IndexDefinition) Builtin ¶
func (id *IndexDefinition) Builtin() bool
Builtin returns true iff the IndexDefinition is one of the automatic built-in indexes.
func (*IndexDefinition) Compound ¶
func (id *IndexDefinition) Compound() bool
Compound returns true iff this IndexDefinition is a valid compound index definition.
NOTE: !Builtin() does not imply Compound().
func (*IndexDefinition) Equal ¶
func (id *IndexDefinition) Equal(o *IndexDefinition) bool
Equal returns true if the two IndexDefinitions are equivalent.
func (*IndexDefinition) Flip ¶
func (id *IndexDefinition) Flip() *IndexDefinition
Flip returns an IndexDefinition with its SortBy field in reverse order.
func (*IndexDefinition) GetFullSortOrder ¶
func (id *IndexDefinition) GetFullSortOrder() []IndexColumn
GetFullSortOrder gets the full sort order for this IndexDefinition, including an extra "__ancestor__" column at the front if this index has Ancestor set to true.
func (*IndexDefinition) Less ¶
func (id *IndexDefinition) Less(o *IndexDefinition) bool
Less returns true iff id is ordered before o.
func (*IndexDefinition) MarshalYAML ¶
func (id *IndexDefinition) MarshalYAML() (interface{}, error)
MarshalYAML serializes an IndexDefinition into a index.yml `index`.
func (*IndexDefinition) Normalize ¶
func (id *IndexDefinition) Normalize() *IndexDefinition
Normalize returns an IndexDefinition which has a normalized SortBy field.
This is just appending __key__ if it's not explicitly the last field in this IndexDefinition.
func (*IndexDefinition) PrepForIdxTable ¶
func (id *IndexDefinition) PrepForIdxTable() *IndexDefinition
PrepForIdxTable normalize and then flips the IndexDefinition.
func (*IndexDefinition) String ¶
func (id *IndexDefinition) String() string
func (*IndexDefinition) YAMLString ¶
func (id *IndexDefinition) YAMLString() (string, error)
YAMLString returns the YAML representation of this IndexDefinition.
If the index definition is Builtin() or not Compound(), this will return an error.
type IndexSetting ¶
type IndexSetting bool
IndexSetting indicates whether or not a Property should be indexed by the datastore.
const ( ShouldIndex IndexSetting = false NoIndex IndexSetting = true )
ShouldIndex is the default, which is why it must assume the zero value, even though it's werid :(.
func (IndexSetting) String ¶
func (i IndexSetting) String() string
type Key ¶
type Key struct {
// contains filtered or unexported fields
}
Key is the type used for all datastore operations.
func KeyForObj ¶
KeyForObj extracts a key from src.
It is the same as KeyForObjErr, except that if KeyForObjErr would have returned an error, this method panics. It's safe to use if you know that src statically meets the metadata constraints described by KeyForObjErr.
func KeyForObjErr ¶
KeyForObjErr extracts a key from src.
src must be one of:
- *S, where S is a struct
- a PropertyLoadSaver
It is expected that the struct exposes the following metadata (as retrieved by MetaGetter.GetMeta):
- "key" (type: Key) - The full datastore key to use. Must not be nil. OR
- "id" (type: int64 or string) - The id of the Key to create.
- "kind" (optional, type: string) - The kind of the Key to create. If blank or not present, KeyForObjErr will extract the name of the src object's type.
- "parent" (optional, type: Key) - The parent key to use.
By default, the metadata will be extracted from the struct and its tagged properties. However, if the struct implements MetaGetterSetter it is wholly responsible for exporting the required fields. A struct that implements GetMeta to make some minor tweaks can evoke the defualt behavior by using GetPLS(s).GetMeta.
If a required metadata item is missing or of the wrong type, then this will return an error.
func MakeKey ¶
MakeKey is a convenience method for manufacturing a *Key. It should only be used when elems... is known statically (e.g. in the code) to be correct.
elems is pairs of (string, string|int|int32|int64) pairs, which correspond to Kind/id pairs. Example:
dstore.MakeKey("Parent", 1, "Child", "id")
Would create the key:
<current appID>:<current Namespace>:/Parent,1/Child,id
If elems is not parsable (e.g. wrong length, wrong types, etc.) this method will panic.
func NewIncompleteKeys ¶
NewIncompleteKeys allocates count incomplete keys sharing the same kind and parent. It is useful as input to AllocateIDs.
func NewKey ¶
NewKey constructs a new key in the current appID/Namespace, using the specified parameters.
func NewKeyEncoded ¶
NewKeyEncoded decodes and returns a *Key
func NewKeyToks ¶
NewKeyToks constructs a new key in the current appID/Namespace, using the specified key tokens.
func (*Key) Encode ¶
Encode encodes the provided key as a base64-encoded protobuf.
This encoding is compatible with the SDK-provided encoding and is agnostic to the underlying implementation of the Key.
It's encoded with the urlsafe base64 table without padding.
func (*Key) EstimateSize ¶
EstimateSize estimates the size of a Key.
It uses https://cloud.google.com/appengine/articles/storage_breakdown?csw=1 as a guide for these values.
func (*Key) GQL ¶
GQL returns a correctly formatted Cloud Datastore GQL key literal.
The flavor of GQL that this emits is defined here:
https://cloud.google.com/datastore/docs/apis/gql/gql_reference
func (*Key) HasAncestor ¶
HasAncestor returns true iff other is an ancestor of k (or if other == k).
func (*Key) Incomplete ¶
Incomplete returns an incomplete version of the key. The ID fields of the last token will be set to zero/empty.
func (*Key) IncompleteEqual ¶
IncompleteEqual asserts that, were the two keys incomplete, they would be equal.
This asserts equality for the full lineage of the key, except for its last token ID.
func (*Key) IsIncomplete ¶
IsIncomplete returns true iff the last token of this Key doesn't define either a StringID or an IntID.
func (*Key) KeyContext ¶
func (k *Key) KeyContext() *KeyContext
KeyContext returns the KeyContext that this Key is using.
func (*Key) LastTok ¶
LastTok returns the last KeyTok in this Key. Non-nil Keys are always guaranteed to have at least one token.
func (*Key) MarshalJSON ¶
MarshalJSON allows this key to be automatically marshaled by encoding/json.
func (*Key) Parent ¶
Parent returns the parent Key of this *Key, or nil. The parent will always have the concrete type of *Key.
func (*Key) PartialValid ¶
func (k *Key) PartialValid(kc KeyContext) bool
PartialValid returns true iff this key is suitable for use in a Put operation. This is the same as Valid(k, false, ...), but also allowing k to be IsIncomplete().
func (*Key) Split ¶
Split componentizes the key into pieces (AppID, Namespace and tokens)
Each token represents one piece of they key's 'path'.
toks is guaranteed to be empty if and only if k is nil. If k is non-nil then it contains at least one token.
func (*Key) String ¶
String returns a human-readable representation of the key in the form of
AID:NS:/Kind,id/Kind,id/...
func (*Key) UnmarshalJSON ¶
UnmarshalJSON allows this key to be automatically unmarshaled by encoding/json.
func (*Key) Valid ¶
func (k *Key) Valid(allowSpecial bool, kc KeyContext) bool
Valid determines if a key is valid, according to a couple of rules:
- k is not nil
- every token of k:
- (if !allowSpecial) token's kind doesn't start with '__'
- token's kind and appid are non-blank
- token is not incomplete
- all tokens have the same namespace and appid
type KeyContext ¶
KeyContext is the context in which a key is generated.
func GetKeyContext ¶
func GetKeyContext(c context.Context) KeyContext
GetKeyContext returns the KeyContext whose AppID and Namespace match those installed in the supplied Context.
func MkKeyContext ¶
func MkKeyContext(appID, namespace string) KeyContext
MkKeyContext is a helper function to create a new KeyContext.
It is preferable to field-based struct initialization because, as a function, it has the ability to enforce an exact number of parameters.
func (KeyContext) MakeKey ¶
func (kc KeyContext) MakeKey(elems ...interface{}) *Key
MakeKey is a convenience function for manufacturing a *Key. It should only be used when elems... is known statically (e.g. in the code) to be correct.
elems is pairs of (string, string|int|int32|int64) pairs, which correspond to Kind/id pairs. Example:
MkKeyContext("aid", "namespace").MakeKey("Parent", 1, "Child", "id")
Would create the key:
aid:namespace:/Parent,1/Child,id
If elems is not parsable (e.g. wrong length, wrong types, etc.) this method will panic.
See MakeKey for a version of this function which automatically provides aid and ns.
func (KeyContext) Matches ¶
func (kc KeyContext) Matches(o KeyContext) bool
Matches returns true iff the AppID and Namespace parameters are the same for the two KeyContext instances.
func (KeyContext) NewKey ¶
func (kc KeyContext) NewKey(kind, stringID string, intID int64, parent *Key) *Key
NewKey is a wrapper around NewToks which has an interface similar to NewKey in the SDK.
See NewKey for a version of this function which automatically provides aid and ns.
func (KeyContext) NewKeyToks ¶
func (kc KeyContext) NewKeyToks(toks []KeyTok) *Key
NewKeyToks creates a new Key. It is the Key implementation returned from the various PropertyMap serialization routines, as well as the native key implementation for the in-memory implementation of gae.
See NewKeyToks for a version of this function which automatically provides aid and ns.
type KeyTok ¶
KeyTok is a single token from a multi-part Key.
func (KeyTok) IsIncomplete ¶
IsIncomplete returns true iff this token doesn't define either a StringID or an IntID.
type MetaGetter ¶
type MetaGetter interface { // GetMeta will get information about the field which has the struct tag in // the form of `gae:"$<key>[,<default>]?"`. // // It returns the value, if any, and true iff the value was retrieved. // // Supported metadata types are: // int64 - may have default (ascii encoded base-10) // string - may have default // Toggle - MUST have default ("true" or "false") // *Key - NO default allowed // // Struct fields of type Toggle (which is an Auto/On/Off) require you to // specify a value of 'true' or 'false' for the default value of the struct // tag, and GetMeta will return the combined value as a regular boolean true // or false value. // Example: // type MyStruct struct { // CoolField int64 `gae:"$id,1"` // } // val, err := helper.GetPLS(&MyStruct{}).GetMeta("id") // // val == 1 // // err == nil // // val, err := helper.GetPLS(&MyStruct{10}).GetMeta("id") // // val == 10 // // err == nil // // type MyStruct struct { // TFlag Toggle `gae:"$flag1,true"` // defaults to true // FFlag Toggle `gae:"$flag2,false"` // defaults to false // // BadFlag Toggle `gae:"$flag3"` // ILLEGAL // } GetMeta(key string) (interface{}, bool) }
MetaGetter is a subinterface of PropertyLoadSaver, but is also used to abstract the meta argument for RawInterface.GetMulti.
type MetaGetterSetter ¶
type MetaGetterSetter interface { MetaGetter // GetAllMeta returns a PropertyMap with all of the metadata in this // MetaGetterSetter. If a metadata field has an error during serialization, // it is skipped. // // If a *struct is implementing this, then it only needs to return the // metadata fields which would be returned by its GetMeta implementation, and // the `GetPLS` implementation will add any statically-defined metadata // fields. So if GetMeta provides $id, but there's a simple tagged field for // $kind, this method is only expected to return a PropertyMap with "$id". GetAllMeta() PropertyMap // SetMeta allows you to set the current value of the meta-keyed field. // It returns true iff the field was set. SetMeta(key string, val interface{}) bool }
MetaGetterSetter is the subset of PropertyLoadSaver which pertains to getting and saving metadata.
A *struct may implement this interface to provide metadata which is supplimental to the variety described by GetPLS. For example, this could be used to implement a parsed-out $kind or $id.
type MultiMetaGetter ¶
type MultiMetaGetter []MetaGetter
MultiMetaGetter is a carrier for metadata, used with RawInterface.GetMulti
It's OK to default-construct this. GetMeta will just return (nil, ErrMetaFieldUnset) for every index.
func NewMultiMetaGetter ¶
func NewMultiMetaGetter(data []PropertyMap) MultiMetaGetter
NewMultiMetaGetter returns a new MultiMetaGetter object. data may be nil.
func (MultiMetaGetter) GetMeta ¶
func (m MultiMetaGetter) GetMeta(idx int, key string) (interface{}, bool)
GetMeta is like PropertyLoadSaver.GetMeta, but it also takes an index indicating which slot you want metadata for. If idx isn't there, this returns (nil, ErrMetaFieldUnset).
func (MultiMetaGetter) GetSingle ¶
func (m MultiMetaGetter) GetSingle(idx int) MetaGetter
GetSingle gets a single MetaGetter at the specified index.
type NewKeyCB ¶
NewKeyCB is the callback signature provided to RawInterface.PutMulti and RawInterface.AllocateIDs. It is invoked once for each positional key that was generated as the result of a call.
- idx is the index of the entity, ranging from 0 through len-1.
- key is the new key for the entity (if the original was incomplete)
- It may be nil if some of the keys/vals to the PutMulti were bad, since all keys are validated before the RPC occurs!
- err is an error associated with putting this entity.
The callback is called once per element. It may be called concurrently, and may be called out of order. The "idx" variable describes which element is being processed. If any callbacks are invoked, exactly one callback will be invoked for each supplied element.
type Property ¶
type Property struct {
// contains filtered or unexported fields
}
Property is a value plus an indicator of whether the value should be indexed. Name and Multiple are stored in the PropertyMap object.
func MkProperty ¶
func MkProperty(val interface{}) Property
MkProperty makes a new indexed* Property and returns it. If val is an invalid value, this panics (so don't do it). If you want to handle the error normally, use SetValue(..., ShouldIndex) instead.
*indexed if val is not an unindexable type like []byte.
func MkPropertyNI ¶
func MkPropertyNI(val interface{}) Property
MkPropertyNI makes a new Property (with noindex set to true), and returns it. If val is an invalid value, this panics (so don't do it). If you want to handle the error normally, use SetValue(..., NoIndex) instead.
func (Property) Clone ¶
func (p Property) Clone() PropertyData
Clone implements the PropertyData interface.
func (*Property) Compare ¶
Compare compares this Property to another, returning a trinary value indicating where it would sort relative to the other in datastore.
It returns:
<0 if the Property would sort before `other`. >0 if the Property would after before `other`. 0 if the Property equals `other`.
This uses datastore's index rules for sorting (see GetIndexTypeAndValue).
func (*Property) Equal ¶
Equal returns true iff p and other have identical index representations.
This uses datastore's index rules for sorting (see GetIndexTypeAndValue).
func (*Property) EstimateSize ¶
EstimateSize estimates the amount of space that this Property would consume if it were committed as part of an entity in the real production datastore.
It uses https://cloud.google.com/appengine/articles/storage_breakdown?csw=1 as a guide for these values.
func (*Property) GQL ¶
GQL returns a correctly formatted Cloud Datastore GQL literal which is valid for a comparison value in the `WHERE` clause.
The flavor of GQL that this emits is defined here:
https://cloud.google.com/datastore/docs/apis/gql/gql_reference
NOTE: GeoPoint values are emitted with speculated future syntax. There is currently no syntax for literal GeoPoint values.
func (*Property) IndexSetting ¶
func (p *Property) IndexSetting() IndexSetting
IndexSetting says whether or not the datastore should create indicies for this value.
func (Property) IndexTypeAndValue ¶
func (p Property) IndexTypeAndValue() (PropertyType, interface{})
IndexTypeAndValue returns the type and value of the Property as it would show up in a datastore index.
This is used to operate on the Property as it would be stored in a datastore index, specifically for serialization and comparison.
The returned type will be the PropertyType used in the index. The returned value will be one of:
- bool
- int64
- float64
- string
- []byte
- GeoPoint
- PropertyMap
- *Key
func (*Property) Less ¶
Less returns true iff p would sort before other.
This uses datastore's index rules for sorting (see GetIndexTypeAndValue).
func (*Property) Project ¶
func (p *Property) Project(to PropertyType) (interface{}, error)
Project can be used to project a Property retrieved from a Projection query into a different datatype. For example, if you have a PTInt property, you could Project(PTTime) to convert it to a time.Time. The following conversions are supported:
PTString <-> PTBlobKey PTString <-> PTBytes PTXXX <-> PTXXX (i.e. identity) PTInt <-> PTTime PTNull <-> Anything
func (*Property) SetValue ¶
func (p *Property) SetValue(value interface{}, is IndexSetting) (err error)
SetValue sets the Value field of a Property, and ensures that its value conforms to the permissible types. That way, you're guaranteed that if you have a Property, its value is valid.
value is the property value. The valid types are:
- int64
- time.Time
- bool
- string (only the first 1500 bytes is indexable)
- []byte (only the first 1500 bytes is indexable)
- blobstore.Key (only the first 1500 bytes is indexable)
- float64
- *Key
- GeoPoint
This set is smaller than the set of valid struct field types that the datastore can load and save. A Property Value cannot be a slice (apart from []byte); use multiple Properties instead. Also, a Value's type must be explicitly on the list above; it is not sufficient for the underlying type to be on that list. For example, a Value of "type myInt64 int64" is invalid. Smaller-width integers and floats are also invalid. Again, this is more restrictive than the set of valid struct field types.
A value may also be the nil interface value; this is equivalent to Python's None but not directly representable by a Go struct. Loading a nil-valued property into a struct will set that field to the zero value.
func (Property) Slice ¶
func (p Property) Slice() PropertySlice
Slice implements the PropertyData interface.
func (*Property) Type ¶
func (p *Property) Type() PropertyType
Type is the PT* type of the data contained in Value().
type PropertyConverter ¶
PropertyConverter may be implemented by the pointer-to a struct field which is serialized by the struct PropertyLoadSaver from GetPLS. Its ToProperty will be called on save, and it's FromProperty will be called on load (from datastore). The method may do arbitrary computation, and if it encounters an error, may return it. This error will be a fatal error (as defined by PropertyLoadSaver) for the struct conversion.
Example:
type Complex complex func (c *Complex) ToProperty() (ret Property, err error) { // something like: err = ret.SetValue(fmt.Sprint(*c), true) return } func (c *Complex) FromProperty(p Property) (err error) { ... load *c from p ... } type MyStruct struct { Complexity []Complex // acts like []complex, but can be serialized to DS }
type PropertyData ¶
type PropertyData interface { // Slice returns a PropertySlice representation of this PropertyData. // // The returned PropertySlice is a clone of the original data. Consequently, // Consequently, Property-modifying methods such as SetValue should NOT be // called on the results. Slice() PropertySlice // Clone creates a duplicate copy of this PropertyData. Clone() PropertyData // contains filtered or unexported methods }
PropertyData is an interface implemented by Property and PropertySlice to identify themselves as valid PropertyMap values.
type PropertyLoadSaver ¶
type PropertyLoadSaver interface { // Load takes the values from the given map and attempts to save them into // the underlying object (usually a struct or a PropertyMap). If a fatal // error occurs, it's returned via error. If non-fatal conversion errors // occur, error will be a MultiError containing one or more ErrFieldMismatch // objects. Load(PropertyMap) error // Save returns the current property as a PropertyMap. if withMeta is true, // then the PropertyMap contains all the metadata (e.g. '$meta' fields) // which was held by this PropertyLoadSaver. Save(withMeta bool) (PropertyMap, error) }
PropertyLoadSaver may be implemented by a user type, and Interface will use this interface to serialize the type instead of trying to automatically create a serialization codec for it with helper.GetPLS.
type PropertyMap ¶
type PropertyMap map[string]PropertyData
PropertyMap represents the contents of a datastore entity in a generic way. It maps from property name to a list of property values which correspond to that property name. It is the spiritual successor to PropertyList from the original SDK.
PropertyMap may contain "meta" values, which are keyed with a '$' prefix. Technically the datastore allows arbitrary property names, but all of the SDKs go out of their way to try to make all property names valid programming language tokens. Special values must correspond to a single Property... corresponding to 0 is equivalent to unset, and corresponding to >1 is an error. So:
{ "$id": {MkProperty(1)}, // GetProperty("id") -> 1, nil "$foo": {}, // GetProperty("foo") -> nil, ErrMetaFieldUnset // GetProperty("bar") -> nil, ErrMetaFieldUnset "$meep": { MkProperty("hi"), MkProperty("there")}, // GetProperty("meep") -> nil, error! }
Additionally, Save returns a copy of the map with the meta keys omitted (e.g. these keys are not going to be serialized to the datastore).
func (PropertyMap) Clone ¶
func (pm PropertyMap) Clone() PropertyMap
Clone returns a copy of this PropertyMap.
func (PropertyMap) EstimateSize ¶
func (pm PropertyMap) EstimateSize() int64
EstimateSize estimates the size that it would take to encode this PropertyMap in the production Appengine datastore. The calculation excludes metadata fields in the map.
It uses https://cloud.google.com/appengine/articles/storage_breakdown?csw=1 as a guide for sizes.
func (PropertyMap) GetAllMeta ¶
func (pm PropertyMap) GetAllMeta() PropertyMap
GetAllMeta implements PropertyLoadSaver.GetAllMeta.
func (PropertyMap) GetMeta ¶
func (pm PropertyMap) GetMeta(key string) (interface{}, bool)
GetMeta implements PropertyLoadSaver.GetMeta, and returns the current value associated with the metadata key.
func (PropertyMap) Load ¶
func (pm PropertyMap) Load(props PropertyMap) error
Load implements PropertyLoadSaver.Load
func (PropertyMap) Problem ¶
func (pm PropertyMap) Problem() error
Problem implements PropertyLoadSaver.Problem. It ALWAYS returns nil.
func (PropertyMap) Save ¶
func (pm PropertyMap) Save(withMeta bool) (PropertyMap, error)
Save implements PropertyLoadSaver.Save by returning a copy of the current map data.
func (PropertyMap) SetMeta ¶
func (pm PropertyMap) SetMeta(key string, val interface{}) bool
SetMeta implements PropertyLoadSaver.SetMeta. It will only return an error if `val` has an invalid type (e.g. not one supported by Property).
func (PropertyMap) Slice ¶
func (pm PropertyMap) Slice(key string) PropertySlice
Slice returns a PropertySlice for the given key
If the value associated with that key is nil, an empty slice will be returned. If the value is single Property, a slice of size 1 with that Property in it will be returned.
type PropertySlice ¶
type PropertySlice []Property
PropertySlice is a slice of Properties. It implements sort.Interface.
PropertySlice holds multiple Properties. Writing a PropertySlice to datastore implicitly marks the property as "multiple", even if it only has one element.
func (PropertySlice) Clone ¶
func (s PropertySlice) Clone() PropertyData
Clone implements the PropertyData interface.
func (PropertySlice) Len ¶
func (s PropertySlice) Len() int
func (PropertySlice) Less ¶
func (s PropertySlice) Less(i, j int) bool
func (PropertySlice) Slice ¶
func (s PropertySlice) Slice() PropertySlice
Slice implements the PropertyData interface.
func (PropertySlice) Swap ¶
func (s PropertySlice) Swap(i, j int)
type PropertyType ¶
type PropertyType byte
PropertyType is a single-byte representation of the type of data contained in a Property. The specific values of this type information are chosen so that the types sort according to the order of types as sorted by the datastore.
Note that indexes may only contain values of the following types:
PTNull PTInt PTBool PTFloat PTString PTGeoPoint PTKey
The biggest impact of this is that if you do a Projection query, you'll only get back Properties with the above types (e.g. if you store a PTTime value, then Project on it, you'll get back a PTInt value). For convenience, Property has a Project(PropertyType) method which will side-cast to your intended type. If you project into a structure with the high-level Interface implementation, or use StructPLS, this conversion will be done for you automatically, using the type of the destination field to cast.
const ( // PTNull represents the 'nil' value. This is only directly visible when // reading/writing a PropertyMap. If a PTNull value is loaded into a struct // field, the field will be initialized with its zero value. If a struct with // a zero value is saved from a struct, it will still retain the field's type, // not the 'nil' type. This is in contrast to other GAE languages such as // python where 'None' is a distinct value than the 'zero' value (e.g. a // StringProperty can have the value "" OR None). // // PTNull is a Projection-query type PTNull PropertyType = iota // PTInt is always an int64. // // This is a Projection-query type, and may be projected to PTTime. PTInt PTTime // PTBool represents true or false // // This is a Projection-query type. PTBool // PTBytes represents []byte PTBytes // PTString is used to represent all strings (text). // // PTString is a Projection-query type and may be projected to PTBytes or // PTBlobKey. PTString // PTFloat is always a float64. // // This is a Projection-query type. PTFloat // PTGeoPoint is a Projection-query type. PTGeoPoint // PTKey represents a *Key object. // // PTKey is a Projection-query type. PTKey // PTBlobKey represents a blobstore.Key PTBlobKey // PTPropertyMap represents a PropertyMap object. // // This is typicaly used to represent GAE *datastore.Entity objects. PTPropertyMap // PTUnknown is a placeholder value which should never show up in reality. // // NOTE: THIS MUST BE LAST VALUE FOR THE init() ASSERTION BELOW TO WORK. PTUnknown )
These constants are in the order described by
https://cloud.google.com/appengine/docs/go/datastore/entities#Go_Value_type_ordering
with a slight divergence for the Int/Time split.
NOTE: this enum can only occupy 7 bits, because we use the high bit to encode indexed/non-indexed, and we additionally require that all valid values and all INVERTED valid values must never equal 0xFF or 0x00. The reason for this constraint is that we must always be able to create a byte that sorts before and after it.
See "./serialize".WriteProperty and "impl/memory".increment for more info.
func PropertyTypeOf ¶
func PropertyTypeOf(v interface{}, checkValid bool) (PropertyType, error)
PropertyTypeOf returns the PT* type of the given Property-compatible value v. If checkValid is true, this method will also ensure that time.Time and GeoPoint have valid values.
func (PropertyType) String ¶
func (i PropertyType) String() string
type Query ¶
type Query struct {
// contains filtered or unexported fields
}
Query is a builder-object for building a datastore query. It may represent an invalid query, but the error will only be observable when you call Finalize.
A Query is, for the most part, not goroutine-safe. However, it is
func NewQuery ¶
NewQuery returns a new Query for the given kind. If kind may be empty to begin a kindless query.
func (*Query) Ancestor ¶
Ancestor sets the ancestor filter for this query.
If ancestor is nil, then this removes the Ancestor restriction from the query.
func (*Query) ClearFilters ¶
ClearFilters clears all equality and inequality filters from the Query. It does not clear the Ancestor filter if one is defined.
func (*Query) ClearOrder ¶
ClearOrder removes all orders from this Query.
func (*Query) ClearProject ¶
ClearProject removes all projected fields from this Query.
func (*Query) Distinct ¶
Distinct makes a projection query only return distinct values. This has no effect on non-projection queries.
func (*Query) End ¶
End sets the ending cursor. The cursor is implementation-defined by the particular 'impl' you have installed.
func (*Query) Eq ¶
Eq adds one or more equality restrictions to the query.
Equality filters interact with multiply-defined properties by ensuring that the given field has /at least one/ value which is equal to the specified constraint.
So a query with `.Eq("thing", 1, 2)` will only return entities where the field "thing" is multiply defined and contains both a value of 1 and a value of 2.
`Eq("thing", 1).Eq("thing", 2)` and `.Eq("thing", 1, 2)` have identical meaning.
func (*Query) EventualConsistency ¶
EventualConsistency changes the EventualConsistency setting for this query.
It only has an effect on Ancestor queries and is otherwise ignored.
func (*Query) Finalize ¶
func (q *Query) Finalize() (*FinalizedQuery, error)
Finalize converts this Query to a FinalizedQuery. If the Query has any inconsistencies or violates any of the query rules, that will be returned here.
func (*Query) FirestoreMode ¶
FirestoreMode set the firestore mode. It removes internal checks for this Query which don't apply when using Firestore-in-Datastore mode. All Datastore queries become strongly consistent if firestoreMode is set as True. The eventualConsistency flag will be ignored.
In particular, it allows non-ancestor queries within a transaction.
func (*Query) GetFirestoreMode ¶
GetFirestoreMode returns the firestore mode.
func (*Query) Gt ¶
Gt imposes a 'greater-than' inequality restriction on the Query.
Inequality filters interact with multiply-defined properties by ensuring that the given field has /exactly one/ value which matches /all/ of the inequality constraints.
So a query with `.Gt("thing", 5).Lt("thing", 10)` will only return entities where the field "thing" has a single value where `5 < val < 10`.
func (*Query) Gte ¶
Gte imposes a 'greater-than-or-equal' inequality restriction on the Query.
Inequality filters interact with multiply-defined properties by ensuring that the given field has /exactly one/ value which matches /all/ of the inequality constraints.
So a query with `.Gt("thing", 5).Lt("thing", 10)` will only return entities where the field "thing" has a single value where `5 < val < 10`.
func (*Query) KeysOnly ¶
KeysOnly makes this into a query which only returns keys (but doesn't fetch values). It's incompatible with projection queries.
func (*Query) Limit ¶
Limit sets the limit (max items to return) for this query. If limit < 0, this removes the limit from the query entirely.
func (*Query) Lt ¶
Lt imposes a 'less-than' inequality restriction on the Query.
Inequality filters interact with multiply-defined properties by ensuring that the given field has /exactly one/ value which matches /all/ of the inequality constraints.
So a query with `.Gt("thing", 5).Lt("thing", 10)` will only return entities where the field "thing" has a single value where `5 < val < 10`.
func (*Query) Lte ¶
Lte imposes a 'less-than-or-equal' inequality restriction on the Query.
Inequality filters interact with multiply-defined properties by ensuring that the given field has /exactly one/ value which matches /all/ of the inequality constraints.
So a query with `.Gt("thing", 5).Lt("thing", 10)` will only return entities where the field "thing" has a single value where `5 < val < 10`.
func (*Query) Offset ¶
Offset sets the offset (number of items to skip) for this query. If offset < 0, this removes the offset from the query entirely.
type RawFactory ¶
type RawFactory func(c context.Context) RawInterface
RawFactory is the function signature for factory methods compatible with SetRawFactory.
type RawFilter ¶
type RawFilter func(context.Context, RawInterface) RawInterface
RawFilter is the function signature for a RawFilter implementation. It gets the current RDS implementation, and returns a new RDS implementation backed by the one passed in.
type RawInterface ¶
type RawInterface interface { // AllocateIDs allows you to allocate IDs from the datastore without putting // any data. The supplied keys must be PartialValid and share the same entity // type. // // If there's no error, the keys in the slice will be replaced with keys // containing integer IDs assigned to them. AllocateIDs(keys []*Key, cb NewKeyCB) error // RunInTransaction runs f in a transaction. // // opts may be nil. // // NOTE: Implementations and filters are guaranteed that: // - f is not nil RunInTransaction(f func(c context.Context) error, opts *TransactionOptions) error // DecodeCursor converts a string returned by a Cursor into a Cursor instance. // It will return an error if the supplied string is not valid, or could not // be decoded by the implementation. DecodeCursor(s string) (Cursor, error) // Run executes the given query, and calls `cb` for each successfully item. // // NOTE: Implementations and filters are guaranteed that: // - query is not nil // - cb is not nil Run(q *FinalizedQuery, cb RawRunCB) error // Count executes the given query and returns the number of entries which // match it. Count(q *FinalizedQuery) (int64, error) // GetMulti retrieves items from the datastore. // // If there was a server error, it will be returned directly. Otherwise, // callback will execute once per key/value pair, returning either the // operation result or individual error for each position. If the callback // receives an error, it will immediately forward that error and stop // subsequent callbacks. // // meta is used to propagate metadata from higher levels. // // NOTE: Implementations and filters are guaranteed that: // - len(keys) > 0 // - all keys are Valid, !Incomplete, and in the current namespace // - cb is not nil GetMulti(keys []*Key, meta MultiMetaGetter, cb GetMultiCB) error // PutMulti writes items to the datastore. // // If there was a server error, it will be returned directly. Otherwise, // callback will execute once per key/value pair, returning either the // operation result or individual error for each position. If the callback // receives an error, it will immediately forward that error and stop // subsequent callbacks. // // NOTE: Implementations and filters are guaranteed that: // - len(keys) > 0 // - len(keys) == len(vals) // - all keys are Valid and in the current namespace // - cb is not nil PutMulti(keys []*Key, vals []PropertyMap, cb NewKeyCB) error // DeleteMulti removes items from the datastore. // // If there was a server error, it will be returned directly. Otherwise, // callback will execute once per key/value pair, returning either the // operation result or individual error for each position. If the callback // receives an error, it will immediately forward that error and stop // subsequent callbacks. // // NOTE: Implementations and filters are guaranteed that // - len(keys) > 0 // - all keys are Valid, !Incomplete, and in the current namespace // - none keys of the keys are 'special' (use a kind prefixed with '__') // - cb is not nil DeleteMulti(keys []*Key, cb DeleteMultiCB) error // WithoutTransaction returns a derived Context without a transaction applied. // This may be called even when outside of a transaction, in which case the // input Context is a valid return value. WithoutTransaction() context.Context // CurrentTransaction returns a reference to the current Transaction, or nil // if the Context does not have a current Transaction. CurrentTransaction() Transaction // Constraints returns this implementation's constraints. Constraints() Constraints // GetTestable returns the Testable interface for the implementation, or nil // if there is none. GetTestable() Testable }
RawInterface implements the datastore functionality without any of the fancy reflection stuff. This is so that Filters can avoid doing lots of redundant reflection work. See datastore.Interface for a more user-friendly interface.
func Raw ¶
func Raw(c context.Context) RawInterface
Raw gets the RawInterface implementation from context.
type RawRunCB ¶
type RawRunCB func(key *Key, val PropertyMap, getCursor CursorCB) error
RawRunCB is the callback signature provided to RawInterface.Run
- key is the Key of the entity
- val is the data of the entity (or nil, if the query was keys-only)
Return nil to continue iterating through the query results, or an error to stop. If you return the error `Stop`, then Run will stop the query and return nil.
type Testable ¶
type Testable interface { // AddIndex adds the provided index. // Blocks all datastore access while the index is built. // Panics if any of the IndexDefinition objects are not Compound() AddIndexes(...*IndexDefinition) // TakeIndexSnapshot allows you to take a snapshot of the current index // tables, which can be used later with SetIndexSnapshot. TakeIndexSnapshot() TestingSnapshot // SetIndexSnapshot allows you to set the state of the current index tables. // Note that this would allow you to create 'non-lienarities' in the precieved // index results (e.g. you could force the indexes to go back in time). // // SetIndexSnapshot takes a reference of the given TestingSnapshot. You're // still responsible for closing the snapshot after this call. SetIndexSnapshot(TestingSnapshot) // CatchupIndexes catches the index table up to the current state of the // datastore. This is equivalent to: // idxSnap := TakeIndexSnapshot() // SetIndexSnapshot(idxSnap) // // But depending on the implementation it may implemented with an atomic // operation. CatchupIndexes() // SetTransactionRetryCount set how many times RunInTransaction will retry // transaction body pretending transaction conflicts happens. 0 (default) // means commit succeeds on the first attempt (no retries). SetTransactionRetryCount(int) // Consistent controls the eventual consistency behavior of the testing // implementation. If it is called with true, then this datastore // implementation will be always-consistent, instead of eventually-consistent. // // By default the datastore is eventually consistent, and you must call // CatchupIndexes or use Take/SetIndexSnapshot to manipulate the index state. Consistent(always bool) // AutoIndex controls the index creation behavior. If it is set to true, then // any time the datastore encounters a missing index, it will silently create // one and allow the query to succeed. If it's false, then the query will // return an error describing the index which could be added with AddIndexes. // // By default this is false. AutoIndex(bool) // DisableSpecialEntities turns off maintenance of special __entity_group__ // type entities. By default this mainenance is enabled, but it can be // disabled by calling this with true. // // If it's true: // - AllocateIDs returns an error. // - Put'ing incomplete Keys returns an error. // - Transactions are disabled and will return an error. // // This is mainly only useful when using an embedded in-memory datastore as // a fully-consistent 'datastore-lite'. In particular, this is useful for the // txnBuf filter which uses it to fulfil queries in a buffered transaction, // but never wants the in-memory versions of these entities to bleed through // to the user code. DisableSpecialEntities(bool) // ShowSpecialProperties disables stripping of special properties added by // the datastore internally (like __scatter__) from result of Get calls. // // Normally such properties are used internally by the datastore or only for // queries. Returning them explicitly is useful for assertions in tests that // rely on queries over special properties. ShowSpecialProperties(bool) // SetConstraints sets this instance's constraints. If the supplied // constraints are invalid, an error will be returned. // // If c is nil, default constraints will be set. SetConstraints(c *Constraints) error }
Testable is the testable interface for fake datastore implementations.
func GetTestable ¶
GetTestable returns the Testable interface for the implementation, or nil if there is none.
type TestingSnapshot ¶
type TestingSnapshot interface {
ImATestingSnapshot()
}
TestingSnapshot is an opaque implementation-defined snapshot type.
type Toggle ¶
type Toggle byte
Toggle is a tri-state boolean (Auto/True/False), which allows structs to control boolean flags for metadata in a non-ambiguous way.
These are the allowed values for Toggle. Any other values are invalid.
type Transaction ¶
type Transaction interface{}
Transaction is a generic interface used to describe a Datastore transaction.
The nil Transaction represents no transaction context.
TODO: Add some functionality here. Ideas include:
- Active() bool: is the transaction currently active?
- AffectedGroups() []*ds.Key: list the groups that have been referenced in this Transaction so far.
func CurrentTransaction ¶
func CurrentTransaction(c context.Context) Transaction
CurrentTransaction returns a reference to the current Transaction, or nil if the Context does not have a current Transaction.
type TransactionOptions ¶
type TransactionOptions struct { // Attempts controls the number of retries to perform when commits fail // due to a conflicting transaction. If omitted, it defaults to 3. Attempts int // ReadOnly controls whether the transaction is a read only transaction. // Read only transactions are potentially more efficient. ReadOnly bool }
TransactionOptions are the options for running a transaction.
Source Files ¶
Directories ¶
Path | Synopsis |
---|---|
Package dumper implements a very VERY dumb datastore-dumping debugging aid.
|
Package dumper implements a very VERY dumb datastore-dumping debugging aid. |
internal
|
|
protos/datastore
Package datastore is a generated protocol buffer package.
|
Package datastore is a generated protocol buffer package. |
Package meta contains some methods for interacting with GAE's metadata APIs.
|
Package meta contains some methods for interacting with GAE's metadata APIs. |
Package serialize provides methods for reading and writing concatenable, bytewise-sortable forms of the datatypes defined in the datastore package.
|
Package serialize provides methods for reading and writing concatenable, bytewise-sortable forms of the datatypes defined in the datastore package. |