Documentation ¶
Overview ¶
Package schema implements core Relational Algrebra schema objects such as Table, Schema, DataSource, Fields, Headers, Index.
Package schema is a generated protocol buffer package.
It is generated from these files:
schema.proto
It has these top-level messages:
TablePartition Partition TablePb FieldPb Index
Index ¶
- Constants
- Variables
- func CreateDefaultRegistry(applyer Applyer)
- func RegisterSchema(schema *Schema) error
- func RegisterSourceAsSchema(name string, source Source) error
- func RegisterSourceType(sourceType string, source Source)
- type Alter
- type Applyer
- type ConfigNode
- type ConfigSchema
- type ConfigSource
- type Conn
- type ConnAll
- type ConnColumns
- type ConnDeletion
- type ConnMutation
- type ConnMutator
- type ConnPatchWhere
- type ConnScanner
- type ConnSeeker
- type ConnUpsert
- type DialectWriter
- type Field
- type FieldData
- type FieldPb
- func (*FieldPb) Descriptor() ([]byte, []int)
- func (m *FieldPb) GetCollation() string
- func (m *FieldPb) GetContextJson() []byte
- func (m *FieldPb) GetData() string
- func (m *FieldPb) GetDefLength() uint64
- func (m *FieldPb) GetDefVal() []byte
- func (m *FieldPb) GetDescription() string
- func (m *FieldPb) GetExtra() string
- func (m *FieldPb) GetIndexed() bool
- func (m *FieldPb) GetIndexes() []*Index
- func (m *FieldPb) GetKey() string
- func (m *FieldPb) GetLength() uint32
- func (m *FieldPb) GetName() string
- func (m *FieldPb) GetNativeType() uint32
- func (m *FieldPb) GetNoNulls() bool
- func (m *FieldPb) GetRoles() []string
- func (m *FieldPb) GetType() uint32
- func (*FieldPb) ProtoMessage()
- func (m *FieldPb) Reset()
- func (m *FieldPb) String() string
- type InMemApplyer
- type Index
- func (*Index) Descriptor() ([]byte, []int)
- func (m *Index) GetFields() []string
- func (m *Index) GetHashPartition() []string
- func (m *Index) GetName() string
- func (m *Index) GetPartitionSize() int32
- func (m *Index) GetPrimaryKey() bool
- func (*Index) ProtoMessage()
- func (m *Index) Reset()
- func (m *Index) String() string
- type Iterator
- type Key
- type KeyUint
- type Message
- type MessageValues
- type Partition
- type Registry
- func (m *Registry) GetSource(sourceType string) (Source, error)
- func (m *Registry) Init()
- func (m *Registry) Schema(schemaName string) (*Schema, bool)
- func (m *Registry) SchemaAdd(s *Schema) error
- func (m *Registry) SchemaAddChild(name string, child *Schema) error
- func (m *Registry) SchemaAddFromConfig(conf *ConfigSource) error
- func (m *Registry) SchemaDrop(schema, name string, objectType lex.TokenType) error
- func (m *Registry) SchemaRefresh(name string) error
- func (m *Registry) Schemas() []string
- func (m *Registry) String() string
- type Schema
- func (m *Schema) Current() bool
- func (m *Schema) OpenConn(tableName string) (Conn, error)
- func (m *Schema) Schema(schemaName string) (*Schema, error)
- func (m *Schema) SchemaForTable(tableName string) (*Schema, error)
- func (m *Schema) Since(dur time.Duration) bool
- func (m *Schema) Table(tableIn string) (*Table, error)
- func (m *Schema) Tables() []string
- type SchemaSourceProvider
- type Source
- type SourcePartitionable
- type SourceTableColumn
- type SourceTableSchema
- type Table
- func (m *Table) AddContext(key string, value interface{})
- func (m *Table) AddField(fld *Field)
- func (m *Table) AddFieldType(name string, valType value.ValueType)
- func (m *Table) AsRows() [][]driver.Value
- func (m *Table) Body() interface{}
- func (m *Table) Column(col string) (value.ValueType, bool)
- func (m *Table) Columns() []string
- func (m *Table) Current() bool
- func (m *Table) FieldNamesPositions() map[string]int
- func (m *Table) FieldsAsMessages() []Message
- func (m *Table) HasField(name string) bool
- func (m *Table) Id() uint64
- func (m *Table) Marshal() ([]byte, error)
- func (m *Table) SetColumns(cols []string)
- func (m *Table) SetColumnsFromFields()
- func (m *Table) SetRefreshed()
- func (m *Table) SetRows(rows [][]driver.Value)
- func (m *Table) Since(dur time.Duration) bool
- type TablePartition
- func (*TablePartition) Descriptor() ([]byte, []int)
- func (m *TablePartition) GetKeys() []string
- func (m *TablePartition) GetPartitions() []*Partition
- func (m *TablePartition) GetTable() string
- func (*TablePartition) ProtoMessage()
- func (m *TablePartition) Reset()
- func (m *TablePartition) String() string
- type TablePb
- func (*TablePb) Descriptor() ([]byte, []int)
- func (m *TablePb) GetCharset() uint32
- func (m *TablePb) GetContextJson() []byte
- func (m *TablePb) GetFieldpbs() []*FieldPb
- func (m *TablePb) GetIndexes() []*Index
- func (m *TablePb) GetName() string
- func (m *TablePb) GetNameOriginal() string
- func (m *TablePb) GetParent() string
- func (m *TablePb) GetPartition() *TablePartition
- func (m *TablePb) GetPartitionCt() uint32
- func (*TablePb) ProtoMessage()
- func (m *TablePb) Reset()
- func (m *TablePb) String() string
- type TimeMessage
Constants ¶
const ( // NoNulls defines if we allow nulls NoNulls = false // AllowNulls ? AllowNulls = true )
Variables ¶
var ( // ErrNotFound is error expressing sought item was not found. ErrNotFound = fmt.Errorf("Not Found") // ErrNotImplemented this feature is not implemented for this source. ErrNotImplemented = fmt.Errorf("Not Implemented") )
var ( // SchemaRefreshInterval default schema Refresh Interval SchemaRefreshInterval = -time.Minute * 5 // Static list of common field names for describe header on Show, Describe EngineFullCols = []string{"Engine", "Support", "Comment", "Transactions", "XA", "Savepoints"} ProdedureFullCols = []string{"Db", "Name", "Type", "Definer", "Modified", "Created", "Security_type", "Comment", "character_set_client ", "collation_connection", "Database Collation"} DescribeFullCols = []string{"Field", "Type", "Collation", "Null", "Key", "Default", "Extra", "Privileges", "Comment"} DescribeFullColMap = map[string]int{"Field": 0, "Type": 1, "Collation": 2, "Null": 3, "Key": 4, "Default": 5, "Extra": 6, "Privileges": 7, "Comment": 8} DescribeCols = []string{"Field", "Type", "Null", "Key", "Default", "Extra"} DescribeColMap = map[string]int{"Field": 0, "Type": 1, "Null": 2, "Key": 3, "Default": 4, "Extra": 5} ShowTableColumns = []string{"Table", "Table_Type"} ShowVariablesColumns = []string{"Variable_name", "Value"} ShowDatabasesColumns = []string{"Database"} ShowTableColumnMap = map[string]int{"Table": 0} ShowIndexCols = []string{"Table", "Non_unique", "Key_name", "Seq_in_index", "Column_name", "Collation", "Cardinality", "Sub_part", "Packed", "Null", "Index_type", "Index_comment"} DescribeFullHeaders = NewDescribeFullHeaders() DescribeHeaders = NewDescribeHeaders() )
var ( // DisableRecover If true, we will not capture/suppress panics. // Test only feature hopefully DisableRecover bool )
Functions ¶
func CreateDefaultRegistry ¶
func CreateDefaultRegistry(applyer Applyer)
CreateDefaultRegistry create the default registry.
func RegisterSchema ¶
RegisterSchema makes a named schema available by the provided @name If Register is called twice with the same name or if source is nil, it panics.
Sources are specific schemas of type csv, elasticsearch, etc containing multiple tables.
func RegisterSourceAsSchema ¶
RegisterSourceAsSchema means you have a datasource, that is going to act as a named schema. ie, this will not be a nested schema with sub-schemas and the source will not be re-useable as a source-type.
func RegisterSourceType ¶
RegisterSourceType makes a datasource type available by the provided @sourceType If Register is called twice with the same name or if source is nil, it panics.
Sources are specific schemas of type csv, elasticsearch, etc containing multiple tables.
Types ¶
type Applyer ¶
type Applyer interface { // Init initialize the applyer with registry. Init(r *Registry) // AddOrUpdateOnSchema Add or Update object (Table, Index) AddOrUpdateOnSchema(s *Schema, obj interface{}) error // Drop an object from schema Drop(s *Schema, obj interface{}) error }
Applyer takes schema writes and applies them. This is used both as a database is being loaded, and schema is loaded by store as well as responsible for applying schema changes such as Alters. In distributed db's this is very, very huge part of work so is a very important interface that is under flux.
func NewApplyer ¶
func NewApplyer(sp SchemaSourceProvider) Applyer
NewApplyer new in memory applyer. For distributed db's we would need a different applyer (Raft).
type ConfigNode ¶
type ConfigNode struct { Name string `json:"name"` // Name of this Node optional Source string `json:"source"` // Name of source this node belongs to Address string `json:"address"` // host/ip Settings u.JsonHelper `json:"settings"` // Arbitrary settings }
ConfigNode are Servers/Services, ie a running instance of said Source - each must represent a single source type - normal use is a server, describing partitions of servers - may have arbitrary config info in Settings.
type ConfigSchema ¶
type ConfigSchema struct { Name string `json:"name"` // Virtual Schema Name, must be unique Sources []string `json:"sources"` // List of sources , the names of the "Db" in source ConfigNode []string `json:"-"` // List of backend Servers }
ConfigSchema is the json/config block for Schema, the data-sources that make up this Virtual Schema. Must have a name and list of sources to include.
type ConfigSource ¶
type ConfigSource struct { Name string `json:"name"` // Name Schema string `json:"schema"` // Schema Name if different than Name, will join existing schema SourceType string `json:"type"` // [mysql,elasticsearch,csv,etc] Name in DataSource Registry TablesToLoad []string `json:"tables_to_load"` // if non empty, only load these tables TableAliases map[string]string `json:"table_aliases"` // if non empty, only load these tables Nodes []*ConfigNode `json:"nodes"` // List of nodes Hosts []string `json:"hosts"` // List of hosts, replaces older "nodes" Settings u.JsonHelper `json:"settings"` // Arbitrary settings specific to each source type Partitions []*TablePartition `json:"partitions"` // List of partitions per table (optional) PartitionCt uint32 `json:"partition_count"` // Instead of array of per table partitions, raw partition count }
ConfigSource are backend datasources ie : storage/database/csvfiles Each represents a single source type/config. May belong to more than one schema.
func NewSourceConfig ¶
func NewSourceConfig(name, sourceType string) *ConfigSource
func (*ConfigSource) String ¶
func (m *ConfigSource) String() string
type Conn ¶
type Conn interface {
Close() error
}
Conn A Connection/Session to a file, api, backend database. Depending on the features of the backing source, it may optionally implement different portions of this interface.
Minimum Read Features to provide Sql Select
- Scanning: iterate through messages/rows
- Schema Tables: at a minium list of tables available, the column level data can be introspected so is optional
Planning:
- CreateMutator(ctx *plan.Context) : execute a mutation task insert, delete, update
Non Select based Sql DML Operations for Mutator:
- Deletion: (sql delete) Delete() DeleteExpression()
- Upsert Interface (sql Update, Upsert, Insert) Put() PutMulti()
DataSource Connection Session that is Stateful. this is really a generic interface, will actually implement features below: SchemaColumns, Scanner, Seeker, Mutator
type ConnAll ¶
type ConnAll interface { Close() error ConnColumns Iterator ConnSeeker ConnUpsert ConnDeletion }
ConnAll interface describes the FULL set of features a connection can implement.
type ConnColumns ¶
type ConnColumns interface {
Columns() []string
}
ConnColumns Interface for a data source connection exposing column positions for []driver.Value iteration
type ConnDeletion ¶
type ConnDeletion interface { // Delete using this key Delete(driver.Value) (int, error) // Delete with given expression DeleteExpression(p interface{}, n expr.Node) (int, error) }
ConnDeletion deletion interface for data sources
type ConnMutation ¶
type ConnMutation interface {
CreateMutator(pc interface{}) (ConnMutator, error)
}
ConnMutation creates a Mutator connection similar to Open() connection for select - accepts the plan context used in this upsert/insert/update - returns a connection which must be closed
type ConnMutator ¶
type ConnMutator interface { ConnUpsert ConnDeletion }
ConnMutator Mutator Connection
type ConnPatchWhere ¶
type ConnPatchWhere interface {
PatchWhere(ctx context.Context, where expr.Node, patch interface{}) (int64, error)
}
ConnPatchWhere pass through where expression to underlying datasource Used for update statements WHERE x = y
type ConnScanner ¶
ConnScanner is the primary basis for reading data sources. It exposes an interface to scan through rows. If the Source supports Predicate Push Down (ie, push the where/sql down to underlying store) this is just the resulting rows. Otherwise, Qlbridge engine must polyfill.
type ConnSeeker ¶
ConnSeeker is a conn that is Key-Value store, allows relational implementation to be faster for Seeking row values instead of scanning
type ConnUpsert ¶
type ConnUpsert interface { Put(ctx context.Context, key Key, value interface{}) (Key, error) PutMulti(ctx context.Context, keys []Key, src interface{}) ([]Key, error) }
ConnUpsert Mutation interface for Put
- assumes datasource understands key(s?)
type DialectWriter ¶
type DialectWriter interface { // Dialect ie "mysql", "postgres", "cassandra", "bigquery" Dialect() string Table(tbl *Table) string FieldType(t value.ValueType) string }
DialectWriter knows how to format the schema output specific to a dialect such as postgres, mysql, bigquery all have different identity, value escape characters.
type Field ¶
type Field struct { FieldPb Context map[string]interface{} // During schema discovery of underlying source, may need to store additional info // contains filtered or unexported fields }
Field Describes the column info, name, data type, defaults, index, null
- dialects (mysql, mongo, cassandra) have their own descriptors for these, so this is generic meant to be converted to Frontend at runtime
func NewDescribeFullHeaders ¶
func NewDescribeFullHeaders() []*Field
func NewDescribeHeaders ¶
func NewDescribeHeaders() []*Field
func NewFieldBase ¶
func (*Field) AddContext ¶
type FieldData ¶
type FieldData []byte
FieldData is the byte value of a "Described" field ready to write to the wire so we don't have to continually re-serialize it.
type FieldPb ¶
type FieldPb struct { Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` Key string `protobuf:"bytes,3,opt,name=key" json:"key,omitempty"` Extra string `protobuf:"bytes,4,opt,name=extra" json:"extra,omitempty"` Data string `protobuf:"bytes,5,opt,name=data" json:"data,omitempty"` Length uint32 `protobuf:"varint,6,opt,name=length" json:"length,omitempty"` Type uint32 `protobuf:"varint,7,opt,name=type" json:"type,omitempty"` NativeType uint32 `protobuf:"varint,8,opt,name=nativeType" json:"nativeType,omitempty"` DefLength uint64 `protobuf:"varint,9,opt,name=defLength" json:"defLength,omitempty"` DefVal []byte `protobuf:"bytes,11,opt,name=defVal,proto3" json:"defVal,omitempty"` Indexed bool `protobuf:"varint,13,opt,name=indexed" json:"indexed,omitempty"` NoNulls bool `protobuf:"varint,14,opt,name=noNulls" json:"noNulls,omitempty"` Collation string `protobuf:"bytes,15,opt,name=collation" json:"collation,omitempty"` Roles []string `protobuf:"bytes,16,rep,name=roles" json:"roles,omitempty"` Indexes []*Index `protobuf:"bytes,17,rep,name=indexes" json:"indexes,omitempty"` ContextJson []byte `protobuf:"bytes,18,opt,name=contextJson,proto3" json:"contextJson,omitempty"` }
func (*FieldPb) Descriptor ¶
func (*FieldPb) GetCollation ¶
func (*FieldPb) GetContextJson ¶
func (*FieldPb) GetDefLength ¶
func (*FieldPb) GetDescription ¶
func (*FieldPb) GetIndexed ¶
func (*FieldPb) GetIndexes ¶
func (*FieldPb) GetNativeType ¶
func (*FieldPb) GetNoNulls ¶
func (*FieldPb) ProtoMessage ¶
func (*FieldPb) ProtoMessage()
type InMemApplyer ¶
type InMemApplyer struct {
// contains filtered or unexported fields
}
InMemApplyer applies schema changes in memory. As changes to schema come in (such as ALTER statements, new tables, new databases) we need to apply them to the underlying schema.
func (*InMemApplyer) AddOrUpdateOnSchema ¶
func (m *InMemApplyer) AddOrUpdateOnSchema(s *Schema, v interface{}) error
AddOrUpdateOnSchema we have a schema change to apply. A schema change is a new table, index, or whole new schema being registered. We provide the first argument which is which schema it is being applied to (ie, add table x to schema y).
func (*InMemApplyer) Drop ¶
func (m *InMemApplyer) Drop(s *Schema, v interface{}) error
Drop we have a schema change to apply.
func (*InMemApplyer) Init ¶
func (m *InMemApplyer) Init(r *Registry)
Init store the registry as part of in-mem applyer which needs it.
type Index ¶
type Index struct { Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` Fields []string `protobuf:"bytes,2,rep,name=fields" json:"fields,omitempty"` PrimaryKey bool `protobuf:"varint,3,opt,name=primaryKey" json:"primaryKey,omitempty"` HashPartition []string `protobuf:"bytes,4,rep,name=hashPartition" json:"hashPartition,omitempty"` PartitionSize int32 `protobuf:"varint,5,opt,name=partitionSize" json:"partitionSize,omitempty"` }
Index a description of how field(s) should be indexed for a table.
func (*Index) Descriptor ¶
func (*Index) GetHashPartition ¶
func (*Index) GetPartitionSize ¶
func (*Index) GetPrimaryKey ¶
func (*Index) ProtoMessage ¶
func (*Index) ProtoMessage()
type Iterator ¶
type Iterator interface { // Next returns the next message. If none remain, returns nil. Next() Message }
Iterator is simple iterator for paging through a datastore Message(rows) to be used for scanning. Building block for Tasks that process part of a DAG of tasks to process data.
type KeyUint ¶
type KeyUint struct {
ID uint64
}
KeyUint implements Key interface and is simple uint64 key
type Message ¶
type Message interface { Id() uint64 Body() interface{} }
Message is an interface to describe a Row being processed by query engine/vm or it is a message between distributed parts of the system. It provides a Id() method which can be used by consistent-hash algorithms for routing a message consistently to different processes/servers.
Body() returns interface allowing this to be generic structure for routing ¶
type MessageValues ¶
MessageValues describes a message with array of driver.Value.
type Partition ¶
type Partition struct { Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` Left string `protobuf:"bytes,2,opt,name=left" json:"left,omitempty"` Right string `protobuf:"bytes,3,opt,name=right" json:"right,omitempty"` }
Partition describes a range of data the left-key is contained in this partition the right key is not contained in this partition, in the next one
func (*Partition) Descriptor ¶
func (*Partition) ProtoMessage ¶
func (*Partition) ProtoMessage()
type Registry ¶
type Registry struct {
// contains filtered or unexported fields
}
Registry is a global or namespace registry of datasources and schema. Datasources have a "sourcetype" and define somewhat the driver. Schemas are made up of one or more underlying source-types and have normal schema info about tables etc.
func DefaultRegistry ¶
func DefaultRegistry() *Registry
DefaultRegistry get access to the shared/global registry of all datasource implementations
func (*Registry) Init ¶
func (m *Registry) Init()
Init pre-schema load call any sources that need pre-schema init
func (*Registry) SchemaAddChild ¶
SchemaAddChild Add a new Child Schema
func (*Registry) SchemaAddFromConfig ¶
func (m *Registry) SchemaAddFromConfig(conf *ConfigSource) error
SchemaAddFromConfig means you have a Schema-Source you want to add
func (*Registry) SchemaDrop ¶
SchemaDrop removes a schema
func (*Registry) SchemaRefresh ¶
SchemaRefresh means reload the schema from underlying store. Possibly requires introspection.
type Schema ¶
type Schema struct { Name string // Name of schema Conf *ConfigSource // source configuration DS Source // This datasource Interface InfoSchema *Schema // represent this Schema as sql schema like "information_schema" SchemaRef *Schema // IF this is infoschema, the schema it refers to // contains filtered or unexported fields }
Schema is a "Virtual" Schema and may have multiple different backing sources. - Multiple DataSource(s) (each may be discrete source type such as mysql, elasticsearch, etc) - each schema supplies tables to the virtual table pool - each table name across schemas must be unique (or aliased)
func NewInfoSchema ¶
NewInfoSchema create a new empty schema with given name.
func NewSchemaSource ¶
NewSchemaSource create a new empty schema with given name and source.
func (*Schema) SchemaForTable ¶
SchemaForTable Find a Schema for given Table
func (*Schema) Since ¶
Since Is this schema object been refreshed within time window described by @dur time ago ?
type SchemaSourceProvider ¶
SchemaSourceProvider is factory for creating schema storage
type Source ¶
type Source interface { // Init provides opportunity for those sources that require/ no configuration and // introspect schema from their environment time to load pre-schema discovery Init() // Setup optional interface for getting the Schema injected during creation/starup. // Since the Source is a singleton, stateful manager, it has a startup/shutdown process. Setup(*Schema) error // Close this source, ensure connections, underlying resources are closed. Close() error // Open create a connection (not thread safe) to this source. Open(source string) (Conn, error) // Tables is a list of table names provided by this source. Tables() []string // Table get table schema for given table name. Table(table string) (*Table, error) }
Source is an interface describing a datasource such as a database, file, api, in-mem data etc. It is thread-safe, singleton, responsible for creating connections and exposing schema. It also exposes partition information optionally if a distributed source.
Sources are registered in a registry, to be dynamically created as schema demands.
Lifecycle:
Init() Setup() // running .... Open() , Table(name) etc ..... Close()
type SourcePartitionable ¶
type SourcePartitionable interface { // Partitions list of partitions. Partitions() []*Partition PartitionSource(p *Partition) (Conn, error) }
SourcePartitionable is an optional interface a source may implement that announces it (source) as partitionable into ranges for splitting reads, writes onto different nodes of a cluster.
Many databases's already have internal Partition schemas this allow's those to be exposed for use in our partitioning, so the query-planner can distributed work across nodes.
type SourceTableColumn ¶
type SourceTableColumn interface { // Underlying data type of column Column(col string) (value.ValueType, bool) }
SourceTableColumn is a partial source that just provides access to Column schema info, used in Generators.
type SourceTableSchema ¶
SourceTableSchema Partial interface from Source to define just Table()
type Table ¶
type Table struct { TablePb Fields []*Field // List of Fields, in order Context map[string]interface{} // During schema discovery of underlying source, may need to store additional info FieldPositions map[string]int // Maps name of column to ordinal position in array of []driver.Value's FieldMap map[string]*Field // Map of Field-name -> Field Schema *Schema // The schema this is member of Source Source // The source // contains filtered or unexported fields }
Table represents traditional definition of Database Table. It belongs to a Schema and can be used to create a Datasource used to read this table.
func (*Table) AddContext ¶
AddContext add key/value pairs to context (settings, metatadata).
func (*Table) AddFieldType ¶
AddFieldType describe and register a new column
func (*Table) AsRows ¶
AsRows return all fields suiteable as list of values for Describe/Show statements.
func (*Table) Current ¶
Current Is this schema object current? ie, have we refreshed it from source since refresh interval.
func (*Table) FieldNamesPositions ¶
FieldNamesPositions List of Field Names and ordinal position in Column list
func (*Table) FieldsAsMessages ¶
FieldsAsMessages get list of all fields as interface Message used in schema as sql "describe table"
func (*Table) SetColumns ¶
SetColumns Explicityly set column names.
func (*Table) SetColumnsFromFields ¶
func (m *Table) SetColumnsFromFields()
SetColumnsFromFields Explicityly set column names from fields.
func (*Table) SetRefreshed ¶
func (m *Table) SetRefreshed()
SetRefreshed update the refreshed date to now.
type TablePartition ¶
type TablePartition struct { Table string `protobuf:"bytes,1,opt,name=table" json:"table,omitempty"` Keys []string `protobuf:"bytes,2,rep,name=keys" json:"keys,omitempty"` Partitions []*Partition `protobuf:"bytes,3,rep,name=partitions" json:"partitions,omitempty"` }
Partition describes a range of data (in a Table). left-key is contained in this partition right key is not contained in this partition, in the next partition. So any value >= left-key, and < right-key is contained herein.
func (*TablePartition) Descriptor ¶
func (*TablePartition) Descriptor() ([]byte, []int)
func (*TablePartition) GetKeys ¶
func (m *TablePartition) GetKeys() []string
func (*TablePartition) GetPartitions ¶
func (m *TablePartition) GetPartitions() []*Partition
func (*TablePartition) GetTable ¶
func (m *TablePartition) GetTable() string
func (*TablePartition) ProtoMessage ¶
func (*TablePartition) ProtoMessage()
func (*TablePartition) Reset ¶
func (m *TablePartition) Reset()
func (*TablePartition) String ¶
func (m *TablePartition) String() string
type TablePb ¶
type TablePb struct { // Name of table lowercased Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // Name of table (not lowercased) NameOriginal string `protobuf:"bytes,2,opt,name=nameOriginal" json:"nameOriginal,omitempty"` // some dbs are more hiearchical (table-column-family) Parent string `protobuf:"bytes,3,opt,name=parent" json:"parent,omitempty"` // Character set, default = utf8 Charset uint32 `protobuf:"varint,4,opt,name=Charset" json:"Charset,omitempty"` // Partitions in this table, optional may be empty Partition *TablePartition `protobuf:"bytes,5,opt,name=partition" json:"partition,omitempty"` // Partition Count PartitionCt uint32 `protobuf:"varint,6,opt,name=PartitionCt" json:"PartitionCt,omitempty"` // List of indexes for this table Indexes []*Index `protobuf:"bytes,7,rep,name=indexes" json:"indexes,omitempty"` // context json bytes ContextJson []byte `protobuf:"bytes,8,opt,name=contextJson,proto3" json:"contextJson,omitempty"` // List of Fields, in order Fieldpbs []*FieldPb `protobuf:"bytes,9,rep,name=fieldpbs" json:"fieldpbs,omitempty"` }
func (*TablePb) Descriptor ¶
func (*TablePb) GetCharset ¶
func (*TablePb) GetContextJson ¶
func (*TablePb) GetFieldpbs ¶
func (*TablePb) GetIndexes ¶
func (*TablePb) GetNameOriginal ¶
func (*TablePb) GetPartition ¶
func (m *TablePb) GetPartition() *TablePartition
func (*TablePb) GetPartitionCt ¶
func (*TablePb) ProtoMessage ¶
func (*TablePb) ProtoMessage()
type TimeMessage ¶
TimeMessage describes a message with a timestamp.