Documentation ¶
Index ¶
- Variables
- func InferSchema(ctx context.Context, root *doltdb.RootValue, rd table.TableReadCloser, ...) (schema.Schema, error)
- func NewSqlEngineReader(ctx context.Context, dEnv *env.DoltEnv, tableName string) (*sqlEngineTableReader, error)
- func NewSqlEngineTableReaderWithEngine(sqlCtx *sql.Context, se *sqle.Engine, db dsqle.Database, ...) (*sqlEngineTableReader, error)
- func SchAndTableNameFromFile(ctx context.Context, path string, fs filesys.ReadableFS, ...) (string, schema.Schema, error)
- type ChannelRowSource
- func (c *ChannelRowSource) CheckPrivileges(ctx *sql.Context, opChecker sql.PrivilegedOperationChecker) bool
- func (c *ChannelRowSource) Children() []sql.Node
- func (c *ChannelRowSource) Resolved() bool
- func (c *ChannelRowSource) RowIter(ctx *sql.Context, row sql.Row) (sql.RowIter, error)
- func (c *ChannelRowSource) Schema() sql.Schema
- func (c *ChannelRowSource) String() string
- func (c *ChannelRowSource) WithChildren(children ...sql.Node) (sql.Node, error)
- type CsvOptions
- type DataFormat
- type DataLocation
- type DataMover
- type DataMoverCloser
- type DataMoverCreationErrType
- type DataMoverCreationError
- type DataMoverOptions
- type DataMoverPipeline
- type FileDataLocation
- func (dl FileDataLocation) Exists(ctx context.Context, root *doltdb.RootValue, fs filesys.ReadableFS) (bool, error)
- func (dl FileDataLocation) NewCreatingWriter(ctx context.Context, mvOpts DataMoverOptions, root *doltdb.RootValue, ...) (table.SqlTableWriter, error)
- func (dl FileDataLocation) NewReader(ctx context.Context, root *doltdb.RootValue, fs filesys.ReadableFS, ...) (rdCl table.SqlRowReader, sorted bool, err error)
- func (dl FileDataLocation) String() string
- type JSONOptions
- type MoverOptions
- type ParquetOptions
- type SqlEngineTableWriter
- func (s *SqlEngineTableWriter) Commit(ctx context.Context) error
- func (s *SqlEngineTableWriter) RowOperationSchema() sql.PrimaryKeySchema
- func (s *SqlEngineTableWriter) TableSchema() sql.PrimaryKeySchema
- func (s *SqlEngineTableWriter) WriteRows(ctx context.Context, inputChannel chan sql.Row, ...) (err error)
- type StreamDataLocation
- func (dl StreamDataLocation) Exists(ctx context.Context, root *doltdb.RootValue, fs filesys.ReadableFS) (bool, error)
- func (dl StreamDataLocation) NewCreatingWriter(ctx context.Context, mvOpts DataMoverOptions, root *doltdb.RootValue, ...) (table.SqlTableWriter, error)
- func (dl StreamDataLocation) NewReader(ctx context.Context, root *doltdb.RootValue, fs filesys.ReadableFS, ...) (rdCl table.SqlRowReader, sorted bool, err error)
- func (dl StreamDataLocation) String() string
- type TableImportOp
- type XlsxOptions
Constants ¶
This section is empty.
Variables ¶
var ErrProvidedPkNotFound = errors.New("provided primary key not found")
Functions ¶
func InferSchema ¶
func NewSqlEngineReader ¶
Types ¶
type ChannelRowSource ¶
type ChannelRowSource struct {
// contains filtered or unexported fields
}
ChannelRowSource is a sql.Node that wraps a channel as a sql.RowIter.
func NewChannelRowSource ¶
func NewChannelRowSource(schema sql.Schema, rowChannel chan sql.Row) *ChannelRowSource
NewChannelRowSource returns a ChannelRowSource object.
func (*ChannelRowSource) CheckPrivileges ¶
func (c *ChannelRowSource) CheckPrivileges(ctx *sql.Context, opChecker sql.PrivilegedOperationChecker) bool
CheckPrivileges implements the sql.Node interface.
func (*ChannelRowSource) Children ¶
func (c *ChannelRowSource) Children() []sql.Node
Children implements the sql.Node interface.
func (*ChannelRowSource) Resolved ¶
func (c *ChannelRowSource) Resolved() bool
Resolved implements the sql.Node interface.
func (*ChannelRowSource) Schema ¶
func (c *ChannelRowSource) Schema() sql.Schema
Schema implements the sql.Node interface.
func (*ChannelRowSource) String ¶
func (c *ChannelRowSource) String() string
String implements the sql.Node interface.
func (*ChannelRowSource) WithChildren ¶
WithChildren implements the sql.Node interface.
type CsvOptions ¶
type CsvOptions struct {
Delim string
}
type DataFormat ¶
type DataFormat string
DataFormat is an enumeration of the valid data formats
const ( // InvalidDataFormat is the format of a data lotacion that isn't valid InvalidDataFormat DataFormat = "invalid" // DoltDB is the format of a data location for a dolt table DoltDB DataFormat = "doltdb" // CsvFile is the format of a data location that is a .csv file CsvFile DataFormat = ".csv" // PsvFile is the format of a data location that is a .psv file PsvFile DataFormat = ".psv" // XlsxFile is the format of a data location that is a .xlsx file XlsxFile DataFormat = ".xlsx" // JsonFile is the format of a data location that is a json file JsonFile DataFormat = ".json" // SqlFile is the format of a data location that is a .sql file SqlFile DataFormat = ".sql" // ParquetFile is the format of a data location that is a .paquet file ParquetFile DataFormat = ".parquet" )
func DFFromString ¶
func DFFromString(dfStr string) DataFormat
DFFromString returns a data object from a string.
func (DataFormat) ReadableStr ¶
func (df DataFormat) ReadableStr() string
ReadableStr returns a human readable string for a DataFormat
type DataLocation ¶
type DataLocation interface { fmt.Stringer // Exists returns true if the DataLocation already exists Exists(ctx context.Context, root *doltdb.RootValue, fs filesys.ReadableFS) (bool, error) // NewReader creates a TableReadCloser for the DataLocation NewReader(ctx context.Context, root *doltdb.RootValue, fs filesys.ReadableFS, opts interface{}) (rdCl table.SqlRowReader, sorted bool, err error) // NewCreatingWriter will create a TableWriteCloser for a DataLocation that will create a new table, or overwrite // an existing table. NewCreatingWriter(ctx context.Context, mvOpts DataMoverOptions, root *doltdb.RootValue, outSch schema.Schema, opts editor.Options, wr io.WriteCloser) (table.SqlTableWriter, error) }
DataLocation is an interface that can be used to read or write from the source or the destination of a move operation.
func NewDataLocation ¶
func NewDataLocation(path, fileFmtStr string) DataLocation
NewDataLocation creates a DataLocation object from a path and a format string. If the path is the name of a table then a TableDataLocation will be returned. If the path is empty a StreamDataLocation is returned. Otherwise a FileDataLocation is returned. For FileDataLocations and StreamDataLocations, if a file format is provided explicitly then it is used as the format, otherwise, when it can be, it is inferred from the path for files. Inference is based on the file's extension.
type DataMover ¶
type DataMover struct { Rd table.TableReadCloser Transforms *pipeline.TransformCollection Wr table.TableWriteCloser ContOnErr bool }
type DataMoverCloser ¶
type DataMoverCreationErrType ¶
type DataMoverCreationErrType string
const ( CreateReaderErr DataMoverCreationErrType = "Create reader error" NomsKindSchemaErr DataMoverCreationErrType = "Invalid schema error" SchemaErr DataMoverCreationErrType = "Schema error" MappingErr DataMoverCreationErrType = "Mapping error" ReplacingErr DataMoverCreationErrType = "Replacing error" CreateMapperErr DataMoverCreationErrType = "Mapper creation error" CreateWriterErr DataMoverCreationErrType = "Create writer error" CreateSorterErr DataMoverCreationErrType = "Create sorter error" )
type DataMoverCreationError ¶
type DataMoverCreationError struct { ErrType DataMoverCreationErrType Cause error }
func (*DataMoverCreationError) String ¶
func (dmce *DataMoverCreationError) String() string
type DataMoverOptions ¶
type DataMoverPipeline ¶
type DataMoverPipeline struct {
// contains filtered or unexported fields
}
DataMoverPipeline is an errgroup based pipeline that reads rows from a reader and writes them to a destination with a writer.
func NewDataMoverPipeline ¶
func NewDataMoverPipeline(ctx context.Context, rd table.SqlRowReader, wr table.SqlTableWriter) *DataMoverPipeline
func (*DataMoverPipeline) Execute ¶
func (e *DataMoverPipeline) Execute() error
type FileDataLocation ¶
type FileDataLocation struct { // Path is the path of the file on the filesystem Path string // Format is the DataFormat of the file Format DataFormat }
FileDataLocation is a file that that can be imported from or exported to.
func (FileDataLocation) Exists ¶
func (dl FileDataLocation) Exists(ctx context.Context, root *doltdb.RootValue, fs filesys.ReadableFS) (bool, error)
Exists returns true if the DataLocation already exists
func (FileDataLocation) NewCreatingWriter ¶
func (dl FileDataLocation) NewCreatingWriter(ctx context.Context, mvOpts DataMoverOptions, root *doltdb.RootValue, outSch schema.Schema, opts editor.Options, wr io.WriteCloser) (table.SqlTableWriter, error)
NewCreatingWriter will create a TableWriteCloser for a DataLocation that will create a new table, or overwrite an existing table.
func (FileDataLocation) NewReader ¶
func (dl FileDataLocation) NewReader(ctx context.Context, root *doltdb.RootValue, fs filesys.ReadableFS, opts interface{}) (rdCl table.SqlRowReader, sorted bool, err error)
NewReader creates a TableReadCloser for the DataLocation
func (FileDataLocation) String ¶
func (dl FileDataLocation) String() string
String returns a string representation of the data location.
type JSONOptions ¶
type MoverOptions ¶
type MoverOptions struct { ContinueOnErr bool Force bool TableToWriteTo string Operation TableImportOp DisableFks bool }
type ParquetOptions ¶
type SqlEngineTableWriter ¶
type SqlEngineTableWriter struct {
// contains filtered or unexported fields
}
SqlEngineTableWriter is a utility for importing a set of rows through the sql engine.
func NewSqlEngineTableWriter ¶
func NewSqlEngineTableWriterWithEngine ¶
func NewSqlEngineTableWriterWithEngine(ctx *sql.Context, eng *sqle.Engine, db dsqle.Database, createTableSchema, rowOperationSchema schema.Schema, options *MoverOptions, statsCB noms.StatsCB) (*SqlEngineTableWriter, error)
Used by Dolthub API
func (*SqlEngineTableWriter) Commit ¶
func (s *SqlEngineTableWriter) Commit(ctx context.Context) error
func (*SqlEngineTableWriter) RowOperationSchema ¶
func (s *SqlEngineTableWriter) RowOperationSchema() sql.PrimaryKeySchema
func (*SqlEngineTableWriter) TableSchema ¶
func (s *SqlEngineTableWriter) TableSchema() sql.PrimaryKeySchema
type StreamDataLocation ¶
type StreamDataLocation struct { Format DataFormat Writer io.WriteCloser Reader io.ReadCloser }
StreamDataLocation is a process stream that that can be imported from or exported to.
func (StreamDataLocation) Exists ¶
func (dl StreamDataLocation) Exists(ctx context.Context, root *doltdb.RootValue, fs filesys.ReadableFS) (bool, error)
Exists returns true if the DataLocation already exists
func (StreamDataLocation) NewCreatingWriter ¶
func (dl StreamDataLocation) NewCreatingWriter(ctx context.Context, mvOpts DataMoverOptions, root *doltdb.RootValue, outSch schema.Schema, opts editor.Options, wr io.WriteCloser) (table.SqlTableWriter, error)
NewCreatingWriter will create a TableWriteCloser for a DataLocation that will create a new table, or overwrite an existing table.
func (StreamDataLocation) NewReader ¶
func (dl StreamDataLocation) NewReader(ctx context.Context, root *doltdb.RootValue, fs filesys.ReadableFS, opts interface{}) (rdCl table.SqlRowReader, sorted bool, err error)
NewReader creates a TableReadCloser for the DataLocation
func (StreamDataLocation) String ¶
func (dl StreamDataLocation) String() string
String returns a string representation of the data location.
type TableImportOp ¶
type TableImportOp string
const ( CreateOp TableImportOp = "overwrite" ReplaceOp TableImportOp = "replace" UpdateOp TableImportOp = "update" )
type XlsxOptions ¶
type XlsxOptions struct {
SheetName string
}