Documentation ¶
Overview ¶
Copyright 2020 Zhizhesihai (Beijing) Technology Limited.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, See the License for the specific language governing permissions and limitations under the License.
Index ¶
- Constants
- Variables
- func BoolProto(b bool) *tspb.Value
- func BoolType() *tspb.Type
- func BytesProto(b []byte) *tspb.Value
- func BytesType() *tspb.Type
- func DateKind(d civil.Date) *tspb.Value_TimestampValue
- func DateProto(d civil.Date) *tspb.Value
- func DateType() *tspb.Type
- func ErrCode(err error) codes.Code
- func ErrDesc(err error) string
- func FloatProto(n float64) *tspb.Value
- func FloatType() *tspb.Type
- func IntProto(n int64) *tspb.Value
- func IntType() *tspb.Type
- func ListType(t *tspb.Type) *tspb.Type
- func NullProto() *tspb.Value
- func ParseTarget(target string) (ret resolver.Target)
- func StringProto(s string) *tspb.Value
- func StringType() *tspb.Type
- func TimeProto(t time.Time) *tspb.Value
- func TimeType() *tspb.Type
- type AdminClient
- func (ac *AdminClient) AddColumn(ctx context.Context, table string, columnFamilies []*tspb.ColumnFamilyMeta, ...) error
- func (ac *AdminClient) AlterTable(ctx context.Context, db string, newTableMeta *tspb.TableMeta) error
- func (ac *AdminClient) CreateDatabase(ctx context.Context, dbName string, attributes map[string]string) (*tspb.DatabaseMeta, error)
- func (ac *AdminClient) CreateIndex(ctx context.Context, db, table string, indexMeta *tspb.IndexMeta) error
- func (ac *AdminClient) CreateTable(ctx context.Context, db string, tableMeta *tspb.TableMeta, ...) error
- func (ac *AdminClient) DeleteColumn(ctx context.Context, db, table string, columnFamilies, columns []string) error
- func (ac *AdminClient) DeleteDatabase(ctx context.Context, dbID int64, dbName string) error
- func (ac *AdminClient) DropIndex(ctx context.Context, db, table, index string) error
- func (ac *AdminClient) DropTable(ctx context.Context, db, table string) error
- func (ac *AdminClient) GetDatabase(ctx context.Context, dbID int64, dbName string) (*tspb.DatabaseMeta, error)
- func (ac *AdminClient) GetIndex(ctx context.Context, db, table, index string) (*tspb.IndexMeta, error)
- func (ac *AdminClient) ListDatabase(ctx context.Context, parent, pageToken string, pageSize int32) ([]*tspb.DatabaseMeta, string, error)
- func (ac *AdminClient) ListIndex(ctx context.Context, db, table string) ([]*tspb.IndexMeta, error)
- func (ac *AdminClient) ListTables(ctx context.Context, database, pageToken string, pageSize int32) ([]*tspb.TableMeta, string, error)
- func (ac *AdminClient) TruncateTable(ctx context.Context, db, table string) error
- func (ac *AdminClient) UpdateColumn(ctx context.Context, db, table string, columnFamilies *tspb.ColumnFamilyMeta, ...) error
- func (ac *AdminClient) UpdateDatabase(ctx context.Context, dbName string, newAttributes map[string]string) (*tspb.DatabaseMeta, error)
- type ApplyOption
- type DataClient
- func (dc *DataClient) Apply(ctx context.Context, ms []*Mutation, opts ...ApplyOption) (time.Time, error)
- func (dc *DataClient) Close()
- func (dc *DataClient) Mutate(ctx context.Context, rawMS ...*Mutation) error
- func (dc *DataClient) Read(ctx context.Context, table string, keys KeySet, index string, columns []string, ...) (*tspb.ResultSet, error)
- func (dc *DataClient) ReadOnlyTransaction() *ReadOnlyTransaction
- func (dc *DataClient) ReadWriteTransaction(ctx context.Context, f func(t *ReadWriteTransaction) error) (time.Time, error)
- func (dc *DataClient) Single() *ReadOnlyTransaction
- func (dc *DataClient) SparseRead(ctx context.Context, table, family string, rows []*SparseRow, limit int64) (*SparseResultSet, error)
- type DataClientConfig
- type Error
- type GenericColumnValue
- type Key
- type KeyRange
- type KeyRangeKind
- type KeySet
- type Mutation
- func Delete(table string, ks KeySet, columns ...string) *Mutation
- func DeleteKeyRange(table string, r KeyRange, columns ...string) *Mutation
- func Insert(table string, cols []string, vals []interface{}) *Mutation
- func InsertMap(table string, in map[string]interface{}) *Mutation
- func InsertOrUpdate(table string, cols []string, vals []interface{}) *Mutation
- func InsertOrUpdateMap(table string, in map[string]interface{}) *Mutation
- func InsertOrUpdateStruct(table string, in interface{}) (*Mutation, error)
- func InsertStruct(table string, in interface{}) (*Mutation, error)
- func Replace(table string, cols []string, vals []interface{}) *Mutation
- func ReplaceMap(table string, in map[string]interface{}) *Mutation
- func ReplaceStruct(table string, in interface{}) (*Mutation, error)
- func Update(table string, cols []string, vals []interface{}) *Mutation
- func UpdateMap(table string, in map[string]interface{}) *Mutation
- func UpdateStruct(table string, in interface{}) (*Mutation, error)
- type NullBool
- type NullDate
- type NullFloat64
- type NullInt64
- type NullRow
- type NullString
- type NullTime
- type ReadOnlyTransaction
- func (t *ReadOnlyTransaction) Close()
- func (t *ReadOnlyTransaction) Query(ctx context.Context, statement Statement) *RowIterator
- func (t *ReadOnlyTransaction) Read(ctx context.Context, table string, keys KeySet, columns []string) *RowIterator
- func (t *ReadOnlyTransaction) ReadRow(ctx context.Context, table string, key Key, columns []string) (*Row, error)
- func (t *ReadOnlyTransaction) ReadUsingIndex(ctx context.Context, table, index string, keys KeySet, columns []string) *RowIterator
- func (t *ReadOnlyTransaction) Timestamp() (time.Time, error)
- func (t *ReadOnlyTransaction) WithTimestampBound(tb TimestampBound) *ReadOnlyTransaction
- type ReadWriteTransaction
- func (t *ReadWriteTransaction) BufferWrite(ms []*Mutation) error
- func (t *ReadWriteTransaction) Query(ctx context.Context, statement Statement) *RowIterator
- func (t *ReadWriteTransaction) Read(ctx context.Context, table string, keys KeySet, columns []string) *RowIterator
- func (t *ReadWriteTransaction) ReadRow(ctx context.Context, table string, key Key, columns []string) (*Row, error)
- func (t *ReadWriteTransaction) ReadUsingIndex(ctx context.Context, table, index string, keys KeySet, columns []string) *RowIterator
- type Row
- func (r *Row) Column(i int, ptr interface{}) error
- func (r *Row) ColumnByName(name string, ptr interface{}) error
- func (r *Row) ColumnIndex(name string) (int, error)
- func (r *Row) ColumnName(i int) string
- func (r *Row) ColumnNames() []string
- func (r *Row) Columns(ptrs ...interface{}) error
- func (r *Row) ConvertToStruct(p interface{}) error
- func (r *Row) Size() int
- func (r *Row) ToStruct(p interface{}) error
- type RowIterator
- type SessionPoolConfig
- type SparseResultSet
- type SparseRow
- type Statement
- type TimestampBound
Constants ¶
const (
SPARSE_READ = "sparseread"
)
Variables ¶
var ( // mutations ERR_MUTATION_EMPTY = errors.New("empty mutations") ERR_MUTATION_DIFF_TABLE = errors.New("mutations not in same table") // sessions ERR_SESSION_POOL_INVALID = errors.New("current session pool is invalid") ERR_SESSION_POOL_TIMEOUT = errors.New("get next session timeout") ErrInvalidSessionPool = zettaErrorf(codes.InvalidArgument, "invalid session pool") ErrGetSessionTimeout = zettaErrorf(codes.Canceled, "timeout / context canceled during getting session") )
var DefaultRetryBackoff = gax.Backoff{ Initial: 20 * time.Millisecond, Max: 32 * time.Second, Multiplier: 1.3, }
DefaultRetryBackoff is used for retryers as a fallback value when the server did not return any retry information.
var DefaultSessionPoolConfig = SessionPoolConfig{ MinOpened: 10, MaxOpened: numChannels * 100, MaxBurst: 10, WriteSessions: 0.2, HealthCheckWorkers: 10, HealthCheckInterval: 5 * time.Minute, }
DefaultSessionPoolConfig is the default configuration for the session pool that will be used for a Spanner client, unless the user supplies a specific session pool config.
Functions ¶
func BytesProto ¶
func FloatProto ¶
func ParseTarget ¶
ParseTarget splits target into a resolver.Target struct containing scheme, authority and endpoint.
If target is not a valid scheme://authority/endpoint, it returns {Endpoint: target}.
func StringProto ¶
Helpers to generate protobuf values and Cloud Spanner types.
func StringType ¶
Types ¶
type AdminClient ¶
type AdminClient struct {
// contains filtered or unexported fields
}
func NewAdminClient ¶
func NewAdminClient(addr string) (*AdminClient, error)
func (*AdminClient) AddColumn ¶
func (ac *AdminClient) AddColumn(ctx context.Context, table string, columnFamilies []*tspb.ColumnFamilyMeta, columns []*tspb.ColumnMeta) error
Adds a column to the specified table.
func (*AdminClient) AlterTable ¶
func (ac *AdminClient) AlterTable(ctx context.Context, db string, newTableMeta *tspb.TableMeta) error
Modify a table's metadata
func (*AdminClient) CreateDatabase ¶
func (ac *AdminClient) CreateDatabase(ctx context.Context, dbName string, attributes map[string]string) (*tspb.DatabaseMeta, error)
* database operations
func (*AdminClient) CreateIndex ¶
func (ac *AdminClient) CreateIndex(ctx context.Context, db, table string, indexMeta *tspb.IndexMeta) error
- table index operations
Create a index on table
func (*AdminClient) CreateTable ¶
func (ac *AdminClient) CreateTable(ctx context.Context, db string, tableMeta *tspb.TableMeta, indexMetas []*tspb.IndexMeta) error
Create a table
func (*AdminClient) DeleteColumn ¶
func (ac *AdminClient) DeleteColumn(ctx context.Context, db, table string, columnFamilies, columns []string) error
Deletes a column from the specified table. Table must be disabled.
func (*AdminClient) DeleteDatabase ¶
Deletes Database
func (*AdminClient) DropIndex ¶
func (ac *AdminClient) DropIndex(ctx context.Context, db, table, index string) error
Drop table index
func (*AdminClient) DropTable ¶
func (ac *AdminClient) DropTable(ctx context.Context, db, table string) error
Drop a table
func (*AdminClient) GetDatabase ¶
func (ac *AdminClient) GetDatabase(ctx context.Context, dbID int64, dbName string) (*tspb.DatabaseMeta, error)
Get a Database descriptor by name
func (*AdminClient) GetIndex ¶
func (ac *AdminClient) GetIndex(ctx context.Context, db, table, index string) (*tspb.IndexMeta, error)
Get table index
func (*AdminClient) ListDatabase ¶
func (ac *AdminClient) ListDatabase(ctx context.Context, parent, pageToken string, pageSize int32) ([]*tspb.DatabaseMeta, string, error)
returns a list of Databases
func (*AdminClient) ListTables ¶
func (ac *AdminClient) ListTables(ctx context.Context, database, pageToken string, pageSize int32) ([]*tspb.TableMeta, string, error)
returns a list of Databases
func (*AdminClient) TruncateTable ¶
func (ac *AdminClient) TruncateTable(ctx context.Context, db, table string) error
Truncate a table
func (*AdminClient) UpdateColumn ¶
func (ac *AdminClient) UpdateColumn(ctx context.Context, db, table string, columnFamilies *tspb.ColumnFamilyMeta, columns *tspb.ColumnMeta) error
Modifies an existing column on the specified table.
func (*AdminClient) UpdateDatabase ¶
func (ac *AdminClient) UpdateDatabase(ctx context.Context, dbName string, newAttributes map[string]string) (*tspb.DatabaseMeta, error)
Modify a Database's metadata
type ApplyOption ¶
type ApplyOption func(*applyOption)
An ApplyOption is an optional argument to Apply.
func ApplyAtLeastOnce ¶
func ApplyAtLeastOnce() ApplyOption
ApplyAtLeastOnce returns an ApplyOption that removes replay protection.
With this option, Apply may attempt to apply mutations more than once; if the mutations are not idempotent, this may lead to a failure being reported when the mutation was applied more than once. For example, an insert may fail with ALREADY_EXISTS even though the row did not exist before Apply was called. For this reason, most users of the library will prefer not to use this option. However, ApplyAtLeastOnce requires only a single RPC, whereas Apply's default replay protection may require an additional RPC. So this option may be appropriate for latency sensitive and/or high throughput blind writing.
type DataClient ¶
type DataClient struct {
// contains filtered or unexported fields
}
func NewDataClient ¶
func NewDataClient(ctx context.Context, serverAddr, dbName string, conf DataClientConfig) (*DataClient, error)
func (*DataClient) Apply ¶
func (dc *DataClient) Apply(ctx context.Context, ms []*Mutation, opts ...ApplyOption) (time.Time, error)
Apply applies a list of mutations atomically to the database.
func (*DataClient) Mutate ¶
func (dc *DataClient) Mutate(ctx context.Context, rawMS ...*Mutation) error
raw mutate once time only in one table
func (*DataClient) ReadOnlyTransaction ¶
func (dc *DataClient) ReadOnlyTransaction() *ReadOnlyTransaction
ReadOnlyTransaction returns a ReadOnlyTransaction that can be used for multiple reads from the database. You must call Close() when the ReadOnlyTransaction is no longer needed to release resources on the server.
ReadOnlyTransaction will use a strong TimestampBound by default. Use ReadOnlyTransaction.WithTimestampBound to specify a different TimestampBound. A non-strong bound can be used to reduce latency, or "time-travel" to prior versions of the database, see the documentation of TimestampBound for details.
func (*DataClient) ReadWriteTransaction ¶
func (dc *DataClient) ReadWriteTransaction(ctx context.Context, f func(t *ReadWriteTransaction) error) (time.Time, error)
ReadWriteTransaction executes a read-write transaction, with retries as necessary.
The function f will be called one or more times. It must not maintain any state between calls.
If the transaction cannot be committed or if f returns an IsAborted error, ReadWriteTransaction will call f again. It will continue to call f until the transaction can be committed or the Context times out or is cancelled. If f returns an error other than IsAborted, ReadWriteTransaction will abort the transaction and return the error.
To limit the number of retries, set a deadline on the Context rather than using a fixed limit on the number of attempts. ReadWriteTransaction will retry as needed until that deadline is met.
func (*DataClient) Single ¶
func (dc *DataClient) Single() *ReadOnlyTransaction
Single provides a read-only snapshot transaction optimized for the case where only a single read or query is needed. This is more efficient than using ReadOnlyTransaction() for a single read or query.
Single will use a strong TimestampBound by default. Use ReadOnlyTransaction.WithTimestampBound to specify a different TimestampBound. A non-strong bound can be used to reduce latency, or "time-travel" to prior versions of the database, see the documentation of TimestampBound for details.
func (*DataClient) SparseRead ¶
func (dc *DataClient) SparseRead(ctx context.Context, table, family string, rows []*SparseRow, limit int64) (*SparseResultSet, error)
type DataClientConfig ¶
type DataClientConfig struct { NumChannels int // 并发度,对应 session pool SessionPoolConfig }
type Error ¶
type Error struct { // gRPC status error code Code codes.Code // more detail info Desc string // contains filtered or unexported fields }
sdk error wrapper
type GenericColumnValue ¶
通用的字段类型和字段值,用于查询结果类型不可知的查询 GenericColumnValue represents the generic encoded value and type of the column. See google.spanner.v1.ResultSet proto for details. This can be useful for proxying query results when the result types are not known in advance.
func NewGenericColumnValue ¶
func NewGenericColumnValue(v interface{}) (*GenericColumnValue, error)
NewGenericColumnValue creates a GenericColumnValue from Go value that is valid for Cloud Spanner.
func (GenericColumnValue) Decode ¶
func (v GenericColumnValue) Decode(ptr interface{}) error
从通用 cv 中解码到 ptr 中 Decode decodes a GenericColumnValue. The ptr argument should be a pointer to a Go value that can accept v.
type Key ¶
type Key []interface{}
struct Key is primary key / secondary index key, as interface{} array usages: 1. primary key -> unique row 2. secondary index key -> match set of rows 3. use in KeyRange type
key rows are Read operations / Delete mutations targets but note: column list for Insert / Update mutations must comprise a pk
Go Type => Key Type int, int8, int16, int32, int64, NullInt64 -> INT64 float32, float64, NullFloat64 -> FLOAT64 bool and NullBool -> BOOL []byte -> BYTES string and NullString -> STRING time.Time and NullTime -> TIMESTAMP civil.Date and NullDate -> DATE
type KeyRange ¶
type KeyRange struct {
// left and right boundary
Start, End Key
// boundaries are included or not
Kind KeyRangeKind
}
KeyRange is range of rows in table / index use start key and end key to identify borders and they two could be close or open direction of Key is same as filed
type KeyRangeKind ¶
type KeyRangeKind int
mark boundary open state of left and right
const ( ClosedOpen KeyRangeKind = iota // [l, r) ClosedClosed // [l, r] OpenClosed // (l, r] OpenOpen // (l, r) )
type KeySet ¶
type KeySet struct { // true means all rows of a table or under a index. All bool // a list of keys covered by KeySet Keys []Key // list of key ranges covered by KeySet Ranges []KeyRange }
KeySet is collection of keys and/or key ranges all the keys in the same table or index. and keys needn't be sorted
func AllKeys ¶
func AllKeys() KeySet
AllKeys returns a KeySet that represents all Keys of a table or a index.
func PrefixRange ¶
PrefixRange returns a KeySet for all keys with the given prefix, which is a key itself
type Mutation ¶
type Mutation struct {
// contains filtered or unexported fields
}
Mutation 表示对同一张表的多行进行新增、更新、删除等操作 A Mutation describes a modification to one or more Cloud Spanner rows. The mutation represents an insert, update, delete, etc on a table.
在一个原子 commit 可以带多个 mutation
Many mutations can be applied in a single atomic commit. For purposes of constraint checking (such as foreign key constraints), the operations can be viewed as applying in same order as the mutations are supplied in (so that e.g., a row and its logical "child" can be inserted in the same commit).
- The Apply function applies series of mutations.
- A ReadWriteTransaction applies a series of mutations as part of an atomic read-modify-write operation.
Example:
m := spanner.Insert("User", []string{"user_id", "profile"}, []interface{}{UserID, profile}) _, err := client.Apply(ctx, []*spanner.Mutation{m})
In this example, we insert a new row into the User table. The primary key for the new row is UserID (presuming that "user_id" has been declared as the primary key of the "User" table).
Updating a row ¶
Changing the values of columns in an existing row is very similar to inserting a new row:
m := spanner.Update("User", []string{"user_id", "profile"}, []interface{}{UserID, profile}) _, err := client.Apply(ctx, []*spanner.Mutation{m})
Deleting a row ¶
To delete a row, use spanner.Delete:
m := spanner.Delete("User", spanner.Key{UserId}) _, err := client.Apply(ctx, []*spanner.Mutation{m})
Note that deleting a row in a table may also delete rows from other tables if cascading deletes are specified in those tables' schemas. Delete does nothing if the named row does not exist (does not yield an error).
Deleting a field ¶
To delete/clear a field within a row, use spanner.Update with the value nil:
m := spanner.Update("User", []string{"user_id", "profile"}, []interface{}{UserID, nil}) _, err := client.Apply(ctx, []*spanner.Mutation{m})
The valid Go types and their corresponding Cloud Spanner types that can be used in the Insert/Update/InsertOrUpdate functions are:
string, NullString - STRING []string, []NullString - STRING ARRAY []byte - BYTES [][]byte - BYTES ARRAY int, int64, NullInt64 - INT64 []int, []int64, []NullInt64 - INT64 ARRAY bool, NullBool - BOOL []bool, []NullBool - BOOL ARRAY float64, NullFloat64 - FLOAT64 []float64, []NullFloat64 - FLOAT64 ARRAY time.Time, NullTime - TIMESTAMP []time.Time, []NullTime - TIMESTAMP ARRAY Date, NullDate - DATE []Date, []NullDate - DATE ARRAY
To compare two Mutations for testing purposes, use reflect.DeepEqual.
func DeleteKeyRange ¶
DeleteKeyRange removes a range of keys from a table. Succeeds whether or not the keys were present.
func Insert ¶
Insert returns a Mutation to insert a row into a table. If the row already exists, the write or transaction fails.
func InsertMap ¶
InsertMap returns a Mutation to insert a row into a table, specified by a map of column name to value. If the row already exists, the write or transaction fails.
func InsertOrUpdate ¶
InsertOrUpdate returns a Mutation to insert a row into a table. If the row already exists, it updates it instead. Any column values not explicitly written are preserved.
func InsertOrUpdateMap ¶
InsertOrUpdateMap returns a Mutation to insert a row into a table, specified by a map of column to value. If the row already exists, it updates it instead. Any column values not explicitly written are preserved.
func InsertOrUpdateStruct ¶
InsertOrUpdateStruct returns a Mutation to insert a row into a table, specified by a Go struct. If the row already exists, it updates it instead. Any column values not explicitly written are preserved.
The in argument must be a struct or a pointer to a struct. Its exported fields specify the column names and values. Use a field tag like "spanner:name" to provide an alternative column name, or use "spanner:-" to ignore the field.
func InsertStruct ¶
InsertStruct returns a Mutation to insert a row into a table, specified by a Go struct. If the row already exists, the write or transaction fails.
The in argument must be a struct or a pointer to a struct. Its exported fields specify the column names and values. Use a field tag like "spanner:name" to provide an alternative column name, or use "spanner:-" to ignore the field.
func Replace ¶
Replace returns a Mutation to insert a row into a table, deleting any existing row. Unlike InsertOrUpdate, this means any values not explicitly written become NULL.
func ReplaceMap ¶
ReplaceMap returns a Mutation to insert a row into a table, deleting any existing row. Unlike InsertOrUpdateMap, this means any values not explicitly written become NULL. The row is specified by a map of column to value.
func ReplaceStruct ¶
ReplaceStruct returns a Mutation to insert a row into a table, deleting any existing row. Unlike InsertOrUpdateMap, this means any values not explicitly written become NULL. The row is specified by a Go struct.
The in argument must be a struct or a pointer to a struct. Its exported fields specify the column names and values. Use a field tag like "spanner:name" to provide an alternative column name, or use "spanner:-" to ignore the field.
func Update ¶
Update returns a Mutation to update a row in a table. If the row does not already exist, the write or transaction fails.
func UpdateMap ¶
UpdateMap returns a Mutation to update a row in a table, specified by a map of column to value. If the row does not already exist, the write or transaction fails.
func UpdateStruct ¶
UpdateStruct returns a Mutation to update a row in a table, specified by a Go struct. If the row does not already exist, the write or transaction fails.
type NullFloat64 ¶
NullFloat64 represents a Cloud Spanner FLOAT64 that may be NULL.
func (NullFloat64) String ¶
func (n NullFloat64) String() string
String implements Stringer.String for NullFloat64
type NullRow ¶
NullRow represents a Cloud Spanner STRUCT that may be NULL. See also the document for Row. Note that NullRow is not a valid Cloud Spanner column Type.
type NullString ¶
NullString represents a Cloud Spanner STRING that may be NULL.
func (NullString) String ¶
func (n NullString) String() string
String implements Stringer.String for NullString
type ReadOnlyTransaction ¶
type ReadOnlyTransaction struct {
// contains filtered or unexported fields
}
ReadOnlyTransaction provides a snapshot transaction with guaranteed consistency across reads, but does not allow writes. Read-only transactions can be configured to read at timestamps in the past.
Read-only transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions.
Unlike locking read-write transactions, read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. See the documentation of TimestampBound for more details.
A ReadOnlyTransaction consumes resources on the server until Close() is called.
func (*ReadOnlyTransaction) Close ¶
func (t *ReadOnlyTransaction) Close()
Close closes a ReadOnlyTransaction, the transaction cannot perform any reads after being closed.
func (*ReadOnlyTransaction) Query ¶
func (t *ReadOnlyTransaction) Query(ctx context.Context, statement Statement) *RowIterator
Query executes a query against the database.
The provided function is called once in serial for each row read. If the function returns a non-nil error, Query immediately returns that value.
If no rows are read, Query will return nil without calling the provided function.
func (*ReadOnlyTransaction) Read ¶
func (t *ReadOnlyTransaction) Read(ctx context.Context, table string, keys KeySet, columns []string) *RowIterator
Read reads multiple rows from the database.
The provided function is called once in serial for each row read. If the function returns a non-nil error, Read immediately returns that value.
If no rows are read, Read will return nil without calling the provided function.
func (*ReadOnlyTransaction) ReadRow ¶
func (t *ReadOnlyTransaction) ReadRow(ctx context.Context, table string, key Key, columns []string) (*Row, error)
ReadRow reads a single row from the database.
If no row is present with the given key, then ReadRow returns an error where IsRowNotFound(err) is true.
func (*ReadOnlyTransaction) ReadUsingIndex ¶
func (t *ReadOnlyTransaction) ReadUsingIndex(ctx context.Context, table, index string, keys KeySet, columns []string) *RowIterator
ReadUsingIndex reads multiple rows from the database using an index.
Currently, this function can only read columns that are part of the index key, part of the primary key, or stored in the index due to a STORING clause in the index definition.
The provided function is called once in serial for each row read. If the function returns a non-nil error, ReadUsingIndex immediately returns that value.
If no rows are read, ReadUsingIndex will return nil without calling the provided function.
func (*ReadOnlyTransaction) Timestamp ¶
func (t *ReadOnlyTransaction) Timestamp() (time.Time, error)
Timestamp returns the timestamp chosen to perform reads and queries in this transaction. The value can only be read after some read or query has either returned some data or completed without returning any data.
func (*ReadOnlyTransaction) WithTimestampBound ¶
func (t *ReadOnlyTransaction) WithTimestampBound(tb TimestampBound) *ReadOnlyTransaction
WithTimestampBound specifies the TimestampBound to use for read or query. This can only be used before the first read or query is invoked. Note: bounded staleness is not available with general ReadOnlyTransactions; use a single-use ReadOnlyTransaction instead.
The returned value is the ReadOnlyTransaction so calls can be chained.
type ReadWriteTransaction ¶
type ReadWriteTransaction struct {
// contains filtered or unexported fields
}
ReadWriteTransaction provides a locking read-write transaction.
This type of transaction is the only way to write data into Cloud Spanner; (*Client).Apply and (*Client).ApplyAtLeastOnce use transactions internally. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. However, the interface exposed by (*Client).ReadWriteTransaction eliminates the need for applications to write retry loops explicitly.
Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent.
Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it.
Reads performed within a transaction acquire locks on the data being read. Writes can only be done at commit time, after all reads have been completed. Conceptually, a read-write transaction consists of zero or more reads or SQL queries followed by a commit.
See (*Client).ReadWriteTransaction for an example.
Semantics ¶
Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns ABORTED, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner.
Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves.
Aborted transactions ¶
Application code does not need to retry explicitly; RunInTransaction will automatically retry a transaction if an attempt results in an abort. The lock priority of a transaction increases after each prior aborted transaction, meaning that the next attempt has a slightly better chance of success than before.
Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying.
Idle transactions ¶
A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error ABORTED.
If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., SELECT 1) prevents the transaction from becoming idle.
func (*ReadWriteTransaction) BufferWrite ¶
func (t *ReadWriteTransaction) BufferWrite(ms []*Mutation) error
BufferWrite adds a list of mutations to the set of updates that will be applied when the transaction is committed. It does not actually apply the write until the transaction is committed, so the operation does not block. The effects of the write won't be visible to any reads (including reads done in the same transaction) until the transaction commits.
See the example for Client.ReadWriteTransaction.
func (*ReadWriteTransaction) Query ¶
func (t *ReadWriteTransaction) Query(ctx context.Context, statement Statement) *RowIterator
Query executes a query against the database.
The provided function is called once in serial for each row read. If the function returns a non-nil error, Query immediately returns that value.
If no rows are read, Query will return nil without calling the provided function.
func (*ReadWriteTransaction) Read ¶
func (t *ReadWriteTransaction) Read(ctx context.Context, table string, keys KeySet, columns []string) *RowIterator
Read reads multiple rows from the database.
The provided function is called once in serial for each row read. If the function returns a non-nil error, Read immediately returns that value.
If no rows are read, Read will return nil without calling the provided function.
func (*ReadWriteTransaction) ReadRow ¶
func (t *ReadWriteTransaction) ReadRow(ctx context.Context, table string, key Key, columns []string) (*Row, error)
ReadRow reads a single row from the database.
If no row is present with the given key, then ReadRow returns an error where IsRowNotFound(err) is true.
func (*ReadWriteTransaction) ReadUsingIndex ¶
func (t *ReadWriteTransaction) ReadUsingIndex(ctx context.Context, table, index string, keys KeySet, columns []string) *RowIterator
ReadUsingIndex reads multiple rows from the database using an index.
Currently, this function can only read columns that are part of the index key, part of the primary key, or stored in the index due to a STORING clause in the index definition.
The provided function is called once in serial for each row read. If the function returns a non-nil error, ReadUsingIndex immediately returns that value.
If no rows are read, ReadUsingIndex will return nil without calling the provided function.
type Row ¶
type Row struct {
// contains filtered or unexported fields
}
Row 结构描述读取到的一行 A Row is a view of a row of data produced by a Cloud Spanner read.
一行包含多列,列数取决于构造读的结构 A row consists of a number of columns; the number depends on the columns used to construct the read.
列值可以通过索引读取 The column values can be accessed by index, where the indices are with respect to the columns. For instance, if the read specified []string{"photo_id", "caption", "metadata"}, then each row will contain three columns: the 0th column corresponds to "photo_id", the 1st column corresponds to "caption", etc.
列的值通过 Column(), Columns() 等方法将指定索引的列解码到 Go 变量中 Column values are decoded by using one of the Column, ColumnByName, or Columns methods. The valid values passed to these methods depend on the column type. For example:
var photoID int64 err := row.Column(0, &photoID) // Decode column 0 as an integer. var caption string err := row.Column(1, &caption) // Decode column 1 as a string. // The above two operations at once. err := row.Columns(&photoID, &caption)
已支持的类型有: Supported types and their corresponding Cloud Spanner column type(s) are:
*string(not NULL), *NullString - STRING *[]NullString - STRING ARRAY *[]byte - BYTES *[][]byte - BYTES ARRAY *int64(not NULL), *NullInt64 - INT64 *[]NullInt64 - INT64 ARRAY *bool(not NULL), *NullBool - BOOL *[]NullBool - BOOL ARRAY *float64(not NULL), *NullFloat64 - FLOAT64 *[]NullFloat64 - FLOAT64 ARRAY *time.Time(not NULL), *NullTime - TIMESTAMP *[]NullTime - TIMESTAMP ARRAY *Date(not NULL), *NullDate - DATE *[]NullDate - DATE ARRAY *[]*some_go_struct, *[]NullRow - STRUCT ARRAY *GenericColumnValue - any Cloud Spanner type
For TIMESTAMP columns, returned time.Time object will be in UTC.
To fetch an array of BYTES, pass a *[][]byte. To fetch an array of (sub)rows, pass a *[]spanner.NullRow or a *[]*some_go_struct where some_go_struct holds all information of the subrow, see spannr.Row.ToStruct for the mapping between Cloud Spanner row and Go struct. To fetch an array of other types, pass a *[]spanner.Null* type of the appropriate type. Use *GenericColumnValue when you don't know in advance what column type to expect.
Row decodes the row contents lazily; as a result, each call to a getter has a chance of returning an error.
A column value may be NULL if the corresponding value is not present in Cloud Spanner. The spanner.Null* types (spanner.NullInt64 et al.) allow fetching values that may be null. A NULL BYTES can be fetched into a *[]byte as nil. It is an error to fetch a NULL value into any other type.
func NewRow ¶
测试用 NewRow returns a Row containing the supplied data. This can be useful for mocking Cloud Spanner Read and Query responses for unit testing.
func (*Row) Column ¶
将 row 的第 i 行 decode 到 ptr 指针变量中 Column fetches the value from the ith column, decoding it into ptr.
func (*Row) ColumnByName ¶
将 row 中指定列名的值 decode 到 ptr 指针中 ColumnByName fetches the value from the named column, decoding it into ptr.
func (*Row) ColumnIndex ¶
大小写敏感地返回列名索引 ColumnIndex returns the index of the column with the given name. The comparison is case-sensitive.
func (*Row) ColumnName ¶
返回列名 ColumnName returns the name of column i, or empty string for invalid column.
func (*Row) ColumnNames ¶
返回所有的列名 ColumnNames returns all column names of the row.
func (*Row) Columns ¶
一次性将 row 中尽可能多的列 decode 到对应的 ptr 指针中 Columns fetches all the columns in the row at once.
The value of the kth column will be decoded into the kth argument to Columns. See above for the list of acceptable argument tspb. The number of arguments must be equal to the number of columns. Pass nil to specify that a column should be ignored.
func (*Row) ConvertToStruct ¶
func (*Row) ToStruct ¶
将一整行直接忽略大小写顺序 decode 到某个 struct 中,注意匹配 spanner 的 tag ToStruct fetches the columns in a row into the fields of a struct. The rules for mapping a row's columns into a struct's exported fields are as the following:
- If a field has a `spanner: "column_name"` tag, then decode column 'column_name' into the field. A special case is the `spanner: "-"` tag, which instructs ToStruct to ignore the field during decoding.
- Otherwise, if the name of a field matches the name of a column (ignoring case), decode the column into the field.
The fields of the destination struct can be of any type that is acceptable to (*spanner.Row).Column.
Slice and pointer fields will be set to nil if the source column is NULL, and a non-nil value if the column is not NULL. To decode NULL values of other types, use one of the spanner.Null* as the type of the destination field.
type RowIterator ¶
type RowIterator struct {
// contains filtered or unexported fields
}
行迭代器
RowIterator is an iterator over Rows.
func (*RowIterator) Do ¶
func (r *RowIterator) Do(f func(r *Row) error) error
- 对迭代到的每一行都调用 f, r.Stop() 始终都会被调用
Do calls the provided function once in sequence for each row in the iteration. If the function returns a non-nil error, Do immediately returns that value.
If there are no rows in the iterator, Do will return nil without calling the provided function.
Do always calls Stop on the iterator.
func (*RowIterator) Next ¶
func (r *RowIterator) Next() (*Row, error)
- Next 返回下一行和是否迭代结束
- 若迭代结束后续调用都会返回 Done
Next returns the next result. Its second return value is iterator.Done if there are no more results. Once Next returns Done, all subsequent calls will return Done.
func (*RowIterator) Stop ¶
func (r *RowIterator) Stop()
主动结束 iterator,取消 context
Stop terminates the iteration. It should be called after every iteration.
type SessionPoolConfig ¶
type SessionPoolConfig struct { // MaxOpened is the maximum number of opened sessions that is allowed by the // session pool, zero means unlimited. // 指定 session 池子的最大数量 MaxOpened uint64 // MinOpened is the minimum number of opened sessions that the session pool // tries to maintain. Session pool won't continue to expire sessions if number // of opened connections drops below MinOpened. However, if session is found // to be broken, it will still be evicted from session pool, therefore it is // posssible that the number of opened sessions drops below MinOpened. // 努力维持的最少数量 MinOpened uint64 // MaxSessionAge is the maximum duration that a session can be reused, zero // means session pool will never expire sessions. MaxSessionAge time.Duration // MaxBurst is the maximum number of concurrent session creation requests, MaxBurst uint64 // WriteSessions is the fraction of sessions we try to keep prepared for write. WriteSessions float64 // HealthCheckWorkers is number of workers used by health checker for this pool. HealthCheckWorkers int // HealthCheckInterval is how often the health checker pings a session. HealthCheckInterval time.Duration // contains filtered or unexported fields }
session 池配置
type SparseResultSet ¶
type SparseResultSet struct {
Rows []*Row
}
func BuildSparseResultSet ¶
func BuildSparseResultSet(r *tspb.ResultSet) *SparseResultSet
type Statement ¶
A Statement is a SQL query with named parameters.
A parameter placeholder consists of '@' followed by the parameter name. Parameter names consist of any combination of letters, numbers, and underscores. Names may be entirely numeric (e.g., "WHERE m.id = @5"). Parameters may appear anywhere that a literal value is expected. The same parameter name may be used more than once. It is an error to execute a statement with unbound parameters. On the other hand, it is allowable to bind parameter names that are not used.
See the documentation of the Row type for how Go types are mapped to Cloud Spanner types.
func NewStatement ¶
NewStatement returns a Statement with the given SQL and an empty Params map.
type TimestampBound ¶
type TimestampBound struct {
// contains filtered or unexported fields
}
TimestampBound defines how Cloud Spanner will choose a timestamp for a single read/query or read-only transaction.
The types of timestamp bound are:
- Strong (the default).
- Bounded staleness.
- Exact staleness.
If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica.
Each type of timestamp bound is discussed in detail below. A TimestampBound can be specified when creating transactions, see the documentation of spanner.Client for an example.
Strong reads ¶
Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other - if any part of the read observes a transaction, all parts of the read see the transaction.
Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp.
Use StrongRead() to create a bound of this type.
Exact staleness ¶
These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps less than or equal to the read timestamp have finished.
The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time.
These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results.
Use ReadTimestamp() and ExactStaleness() to create a bound of this type.
Bounded staleness ¶
Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking.
All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results.
Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp.
As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica.
Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use reads and single-use read-only transactions.
Use MinReadTimestamp() and MaxStaleness() to create a bound of this type.
Old read timestamps and garbage collection ¶
Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are four hours old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than four hours in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error ErrorCode.FAILED_PRECONDITION.
func ExactStaleness ¶
func ExactStaleness(d time.Duration) TimestampBound
ExactStaleness returns a TimestampBound that will perform reads and queries at an exact staleness.
func MaxStaleness ¶
func MaxStaleness(d time.Duration) TimestampBound
MaxStaleness returns a TimestampBound that will perform reads and queries at a time chosen to be at most "d" stale.
func MinReadTimestamp ¶
func MinReadTimestamp(t time.Time) TimestampBound
MinReadTimestamp returns a TimestampBound that bound that will perform reads and queries at a time chosen to be at least "t".
func ReadTimestamp ¶
func ReadTimestamp(t time.Time) TimestampBound
ReadTimestamp returns a TimestampBound that will peform reads and queries at the given time.
func StrongRead ¶
func StrongRead() TimestampBound
StrongRead returns a TimestampBound that will perform reads and queries at a timestamp where all previously committed transactions are visible.
func (TimestampBound) String ¶
func (tb TimestampBound) String() string
String implements fmt.Stringer.
Source Files ¶
- admin_client.go
- admin_ddl.go
- data_client.go
- errors.go
- extension.go
- key.go
- keyset.go
- metric.go
- mutation.go
- protoutils.go
- read_stream.go
- retry.go
- row.go
- session.go
- session_hc.go
- session_pool.go
- sparse.go
- statement.go
- timestampbound.go
- transaction.go
- tx_read_only.go
- tx_read_write.go
- tx_write_only.go
- value.go
Directories ¶
Path | Synopsis |
---|---|
examples
|
|
internal
|
|
fields
Package fields provides a view of the fields of a struct that follows the Go rules, amended to consider tags and case insensitivity.
|
Package fields provides a view of the fields of a struct that follows the Go rules, amended to consider tags and case insensitivity. |
utils
|
|