Documentation ¶
Overview ¶
Package datas defines and implements the database layer used in Noms.
Index ¶
- Constants
- Variables
- func BuildHashesRequestForTest(hashes hash.HashSet) io.ReadCloser
- func FindCommonAncestor(c1, c2 types.Ref, vr types.ValueReader) (a types.Ref, ok bool)
- func IsCommit(v types.Value) bool
- func IsCommitType(t *types.Type) bool
- func IsRefOfCommitType(t *types.Type) bool
- func IsValidDatasetName(name string) bool
- func NewCommit(value types.Value, parents types.Set, meta types.Struct) types.Struct
- func NewHTTPChunkStore(baseURL, auth string) chunks.ChunkStore
- func Pull(srcDB, sinkDB Database, sourceRef types.Ref, progressCh chan PullProgress)
- func Router(cs chunks.ChunkStore, prefix string) *httprouter.Router
- type CommitOptions
- type Database
- type Dataset
- func (ds Dataset) Database() Database
- func (ds Dataset) HasHead() bool
- func (ds Dataset) Head() types.Struct
- func (ds Dataset) HeadRef() types.Ref
- func (ds Dataset) HeadValue() types.Value
- func (ds Dataset) ID() string
- func (ds Dataset) MaybeHead() (types.Struct, bool)
- func (ds Dataset) MaybeHeadRef() (types.Ref, bool)
- func (ds Dataset) MaybeHeadValue() (types.Value, bool)
- type Handler
- type PullProgress
- type RemoteDatabaseServer
- type URLParams
Constants ¶
const ( ParentsField = "parents" ValueField = "value" MetaField = "meta" )
const ( // NomsVersionHeader is the name of the header that Noms clients and // servers must set in every request/response. NomsVersionHeader = "x-noms-vers" )
Variables ¶
var ( ErrOptimisticLockFailed = errors.New("Optimistic lock failed on database Root update") ErrMergeNeeded = errors.New("Dataset head is not ancestor of commit") )
var ( // HandleWriteValue is meant to handle HTTP POST requests to the // writeValue/ server endpoint. The payload should be an appropriately- // ordered sequence of Chunks to be validated and stored on the server. // TODO: Nice comment about what headers it expects/honors, payload // format, and error responses. HandleWriteValue = createHandler(handleWriteValue, true) // HandleGetRefs is meant to handle HTTP POST requests to the getRefs/ // server endpoint. Given a sequence of Chunk hashes, the server will // fetch and return them. // TODO: Nice comment about what headers it // expects/honors, payload format, and responses. HandleGetRefs = createHandler(handleGetRefs, true) // HandleGetBlob is a custom endpoint whose sole purpose is to directly // fetch the *bytes* contained in a Blob value. It expects a single query // param of `h` to be the ref of the Blob. // TODO: Support retrieving blob contents via a path. HandleGetBlob = createHandler(handleGetBlob, false) // HandleWriteValue is meant to handle HTTP POST requests to the hasRefs/ // server endpoint. Given a sequence of Chunk hashes, the server check for // their presence and return a list of true/false responses. // TODO: Nice comment about what headers it expects/honors, payload // format, and responses. HandleHasRefs = createHandler(handleHasRefs, true) // HandleRootGet is meant to handle HTTP GET requests to the root/ server // endpoint. The server returns the hash of the Root as a string. // TODO: Nice comment about what headers it expects/honors, payload // format, and responses. HandleRootGet = createHandler(handleRootGet, true) // HandleWriteValue is meant to handle HTTP POST requests to the root/ // server endpoint. This is used to update the Root to point to a new // Chunk. // TODO: Nice comment about what headers it expects/honors, payload // format, and error responses. HandleRootPost = createHandler(handleRootPost, true) // HandleBaseGet is meant to handle HTTP GET requests to the / server // endpoint. This is used to give a friendly message to users. // TODO: Nice comment about what headers it expects/honors, payload // format, and error responses. HandleBaseGet = handleBaseGet HandleGraphQL = createHandler(handleGraphQL, false) HandleStats = createHandler(handleStats, false) )
var DatasetFullRe = regexp.MustCompile("^" + DatasetRe.String() + "$")
DatasetFullRe is a regexp that matches a only a target string that is entirely legal Dataset name.
var DatasetRe = regexp.MustCompile(`[a-zA-Z0-9\-_/]+`)
DatasetRe is a regexp that matches a legal Dataset name anywhere within the target string.
Functions ¶
func BuildHashesRequestForTest ¶
func BuildHashesRequestForTest(hashes hash.HashSet) io.ReadCloser
func FindCommonAncestor ¶
FindCommonAncestor returns the most recent common ancestor of c1 and c2, if one exists, setting ok to true. If there is no common ancestor, ok is set to false.
func IsCommitType ¶
func IsRefOfCommitType ¶
func IsValidDatasetName ¶
func NewCommit ¶
NewCommit creates a new commit object.
A commit has the following type:
```
struct Commit { meta: M, parents: Set<Ref<Cycle<Commit>>>, value: T, }
``` where M is a struct type and T is any type.
func NewHTTPChunkStore ¶
func NewHTTPChunkStore(baseURL, auth string) chunks.ChunkStore
func Pull ¶
func Pull(srcDB, sinkDB Database, sourceRef types.Ref, progressCh chan PullProgress)
Pull objects that descend from sourceRef from srcDB to sinkDB.
func Router ¶
func Router(cs chunks.ChunkStore, prefix string) *httprouter.Router
Types ¶
type CommitOptions ¶
type CommitOptions struct { // Parents, if provided is the parent commits of the commit we are // creating. Parents types.Set // Meta is a Struct that describes arbitrary metadata about this Commit, // e.g. a timestamp or descriptive text. Meta types.Struct // Policy will be called to attempt to merge this Commit with the current // Head, if this is not a fast-forward. If Policy is nil, no merging will // be attempted. Note that because Commit() retries in some cases, Policy // might also be called multiple times with different values. Policy merge.Policy }
CommitOptions is used to pass options into Commit.
type Database ¶
type Database interface { // To implement types.ValueWriter, Database implementations provide // WriteValue(). WriteValue() writes v to this Database, though v is not // guaranteed to be be persistent until after a subsequent Commit(). The // return value is the Ref of v. // Written values won't be persisted until a commit-alike types.ValueReadWriter // Close must have no side-effects io.Closer // Datasets returns the root of the database which is a // Map<String, Ref<Commit>> where string is a datasetID. Datasets() types.Map // GetDataset returns a Dataset struct containing the current mapping of // datasetID in the above Datasets Map. GetDataset(datasetID string) Dataset // Rebase brings this Database's view of the world inline with upstream. Rebase() // Commit updates the Commit that ds.ID() in this database points at. All // Values that have been written to this Database are guaranteed to be // persistent after Commit() returns. // The new Commit struct is constructed using v, opts.Parents, and // opts.Meta. If opts.Parents is the zero value (types.Set{}) then // the current head is used. If opts.Meta is the zero value // (types.Struct{}) then a fully initialized empty Struct is passed to // NewCommit. // The returned Dataset is always the newest snapshot, regardless of // success or failure, and Datasets() is updated to match backing storage // upon return as well. If the update cannot be performed, e.g., because // of a conflict, Commit returns an 'ErrMergeNeeded' error. Commit(ds Dataset, v types.Value, opts CommitOptions) (Dataset, error) // CommitValue updates the Commit that ds.ID() in this database points at. // All Values that have been written to this Database are guaranteed to be // persistent after Commit(). // The new Commit struct is constructed using `v`, and the current Head of // `ds` as the lone Parent. // The returned Dataset is always the newest snapshot, regardless of // success or failure, and Datasets() is updated to match backing storage // upon return as well. If the update cannot be performed, e.g., because // of a conflict, Commit returns an 'ErrMergeNeeded' error. CommitValue(ds Dataset, v types.Value) (Dataset, error) // Delete removes the Dataset named ds.ID() from the map at the root of // the Database. The Dataset data is not necessarily cleaned up at this // time, but may be garbage collected in the future. // The returned Dataset is always the newest snapshot, regardless of // success or failure, and Datasets() is updated to match backing storage // upon return as well. If the update cannot be performed, e.g., because // of a conflict, Delete returns an 'ErrMergeNeeded' error. Delete(ds Dataset) (Dataset, error) // SetHead ignores any lineage constraints (e.g. the current Head being in // commit’s Parent set) and force-sets a mapping from datasetID: commit in // this database. // All Values that have been written to this Database are guaranteed to be // persistent after SetHead(). If the update cannot be performed, e.g., // because another process moved the current Head out from under you, // error will be non-nil. // The newest snapshot of the Dataset is always returned, so the caller an // easily retry using the latest. // Regardless, Datasets() is updated to match backing storage upon return. SetHead(ds Dataset, newHeadRef types.Ref) (Dataset, error) // FastForward takes a types.Ref to a Commit object and makes it the new // Head of ds iff it is a descendant of the current Head. Intended to be // used e.g. after a call to Pull(). If the update cannot be performed, // e.g., because another process moved the current Head out from under // you, err will be non-nil. // The newest snapshot of the Dataset is always returned, so the caller // can easily retry using the latest. // Regardless, Datasets() is updated to match backing storage upon return. FastForward(ds Dataset, newHeadRef types.Ref) (Dataset, error) // Stats may return some kind of struct that reports statistics about the // ChunkStore that backs this Database instance. The type is // implementation-dependent, and impls may return nil Stats() interface{} // StatsSummary may return a string containing summarized statistics for // the ChunkStore that backs this Database. It must return "Unsupported" // if this operation is not supported. StatsSummary() string Flush() // contains filtered or unexported methods }
Database provides versioned storage for noms values. While Values can be directly read and written from a Database, it is generally more appropriate to read data by inspecting the Head of a Dataset and write new data by updating the Head of a Dataset via Commit() or similar. Particularly, new data is not guaranteed to be persistent until after a Commit (Delete, SetHead, or FastForward) operation completes. The Database API is stateful, meaning that calls to GetDataset() or Datasets() occurring after a call to Commit() (et al) will represent the result of the Commit().
func NewDatabase ¶
func NewDatabase(cs chunks.ChunkStore) Database
type Dataset ¶
type Dataset struct {
// contains filtered or unexported fields
}
Dataset is a named Commit within a Database.
func (Dataset) Database ¶
Database returns the Database object in which this Dataset is stored. WARNING: This method is under consideration for deprecation.
func (Dataset) HasHead ¶
HasHead() returns 'true' if this dataset has a Head Commit, false otherwise.
func (Dataset) Head ¶
Head returns the current head Commit, which contains the current root of the Dataset's value tree.
func (Dataset) HeadRef ¶
HeadRef returns the Ref of the current head Commit, which contains the current root of the Dataset's value tree.
func (Dataset) MaybeHead ¶
MaybeHead returns the current Head Commit of this Dataset, which contains the current root of the Dataset's value tree, if available. If not, it returns a new Commit and 'false'.
func (Dataset) MaybeHeadRef ¶
MaybeHeadRef returns the Ref of the current Head Commit of this Dataset, which contains the current root of the Dataset's value tree, if available. If not, it returns an empty Ref and 'false'.
type Handler ¶
type Handler func(w http.ResponseWriter, req *http.Request, ps URLParams, cs chunks.ChunkStore)
type PullProgress ¶
type PullProgress struct {
DoneCount, KnownCount, ApproxWrittenBytes uint64
}
type RemoteDatabaseServer ¶
type RemoteDatabaseServer struct { // Called just before the server is started. Ready func() // contains filtered or unexported fields }
func NewRemoteDatabaseServer ¶
func NewRemoteDatabaseServer(cs chunks.ChunkStore, address string, port int) *RemoteDatabaseServer
func (*RemoteDatabaseServer) Port ¶
func (s *RemoteDatabaseServer) Port() int
Port is the actual port used. This may be different than the port passed in to NewRemoteDatabaseServer.
func (*RemoteDatabaseServer) Run ¶
func (s *RemoteDatabaseServer) Run()
Run blocks while the RemoteDatabaseServer is listening. Running on a separate go routine is supported.
func (*RemoteDatabaseServer) Stop ¶
func (s *RemoteDatabaseServer) Stop()
Will cause the RemoteDatabaseServer to stop listening and an existing call to Run() to continue.