Documentation ¶
Index ¶
- Constants
- Variables
- func Import(attributeID string, terms []TermSpec, startRow, nRows int, idRows []int, ...) (*ls.Layer, error)
- func ImportSchema(ctx *ls.Context, rows [][]string, context map[string]interface{}) ([]*ls.Layer, error)
- func ParseIngest(context *ls.Context, ingester *ls.Ingester, parser Parser, ...) (*lpg.Node, error)
- func ReadCSV(input io.Reader, csvSeparator string) ([][]string, error)
- func ReadExcel(input io.Reader) (map[string][][]string, error)
- func StreamCSVRows(input io.Reader, csvSeparator string, headerRow int) (<-chan Row, error)
- func StreamExcelSheetRows(input io.Reader, sheet string, headerRow int) (<-chan Row, error)
- type CSVImportSpec
- type ErrColIndexOutOfBounds
- type ErrInvalidID
- type Parser
- type Row
- type TermSpec
- type Writer
- type WriterColumn
Constants ¶
View Source
const CSV = ls.LS + "csv/"
Variables ¶
View Source
var ErrMultipleNodesMatched = errors.New("Multiple nodes match query")
Functions ¶
func Import ¶
func Import(attributeID string, terms []TermSpec, startRow, nRows int, idRows []int, entityID string, required string, input [][]string) (*ls.Layer, error)
Import a CSV schema. The CSV file is organized as columns, one column for base attribute names, and other columns for overlays. CSV does not support nested attributes. Returns an array of Layer objects
func ImportSchema ¶
func ImportSchema(ctx *ls.Context, rows [][]string, context map[string]interface{}) ([]*ls.Layer, error)
ImportSchema imports a schema from a CSV file. The CSV file is organized as follows:
valueType determines the schema header start
valueType, v entityIdFields, f, f, ... @id, @type, <term>, <term> layerId, Schema,,... layerId, Overlay, true, true --> true means include this attribute in overlay attrId, Object, termValue, termValue attrId, Value, termValue, termValue ...
The terms are expanded using the JSON-LD context given.
func ParseIngest ¶
func StreamCSVRows ¶
Types ¶
type CSVImportSpec ¶
type CSVImportSpec struct { AttributeID string `json:"attributeId" yaml:"attributeId"` LayerType string `json:"layerType" yaml:"layerType"` LayerID string `json:"layerId" yaml:"layerId"` RootID string `json:"rootId" yaml:"rootId"` ValueType string `json:"valueType" yaml:"valueType"` EntityIDRows []int `json:"entityIdRows" yaml:"entityIdRows"` EntityID string `json:"entityId" yaml:"entityId"` Required string `json:"required" yaml:"required"` StartRow int `json:"startRow" yaml:"startRow"` NRows int `json:"nrows" yaml:"nrows"` Terms []TermSpec `json:"terms" yaml:"terms"` }
type ErrColIndexOutOfBounds ¶
func (ErrColIndexOutOfBounds) Error ¶
func (e ErrColIndexOutOfBounds) Error() string
type ErrInvalidID ¶
type ErrInvalidID struct {
Row int
}
func (ErrInvalidID) Error ¶
func (e ErrInvalidID) Error() string
type Parser ¶
type TermSpec ¶
type TermSpec struct { // The term Term string `json:"term"` // If nonempty, this template is used to build the term contents // with {{.term}}, and {{.row}} in template context. {{.term}} gives // the Term, and {{.row}} gives the cells of the current row TermTemplate string `json:"template"` // Is property an array Array bool `json:"array"` // Array separator character ArraySeparator string `json:"separator"` }
type Writer ¶
type Writer struct { // openCypher query giving the root nodes for each row of data. This // should be of the form: // // match (n ...) return n // // If empty, all root nodes of the graph are included in the output RowRootQuery string `json:"rowQuery" yaml:"rowQuery"` // The column names in the output. If the column name does not have // a column query, then the column query is assumed to be // // match (root)-[]->(n:DocumentNode {attributeName: <attributeName>}) return n Columns []WriterColumn `json:"columns" yaml:"columns"` }
Writer writes CSV output.
The writer specifies how to interpret the input graph. The output object specifies an opencypher query that determines each row of data.
func (*Writer) WriteHeader ¶
WriteHeader writes the header to the given writer
type WriterColumn ¶
type WriterColumn struct { Name string `json:"name" yaml:"name"` // Optional openCypher queries for each column. The map key is the // column name, and the map value is an opencypher query that is // evaluated with `root` node set to the current root node. Query string `json:"query" yaml:"query"` // contains filtered or unexported fields }
Click to show internal directories.
Click to hide internal directories.