Documentation ¶
Index ¶
- func Marshal(srcInterface []interface{}, schemaHandler *schema.SchemaHandler) (tb *map[string]*layout.Table, err error)
- func MarshalArrow(recs []interface{}, schemaHandler *schema.SchemaHandler) (tb *map[string]*layout.Table, err error)
- func MarshalCSV(records []interface{}, schemaHandler *schema.SchemaHandler) (*map[string]*layout.Table, error)
- func MarshalJSON(ss []interface{}, schemaHandler *schema.SchemaHandler) (tb *map[string]*layout.Table, err error)
- func Unmarshal(tableMap *map[string]*layout.Table, bgn int, end int, dstInterface interface{}, ...) (err error)
- type KeyValue
- type MapRecord
- type Marshaler
- type Node
- type NodeBufType
- type ParquetMap
- type ParquetMapStruct
- type ParquetPtr
- type ParquetSlice
- type ParquetStruct
- type SliceRecord
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
func Marshal ¶
func Marshal(srcInterface []interface{}, schemaHandler *schema.SchemaHandler) (tb *map[string]*layout.Table, err error)
Convert the objects to table map. srcInterface is a slice of objects
func MarshalArrow ¶ added in v1.6.4
func MarshalArrow(recs []interface{}, schemaHandler *schema.SchemaHandler) ( tb *map[string]*layout.Table, err error)
MarshalArrow accepts a slice of rows with which it creates a table object. We need to append row by row as opposed to the arrow go provided way of column by column since the wrapper ParquetWriter uses the number of rows to execute intermediate flush depending on the size of the objects, determined by row, which are currently written.
func MarshalCSV ¶
func MarshalCSV(records []interface{}, schemaHandler *schema.SchemaHandler) (*map[string]*layout.Table, error)
Marshal function for CSV like data
func MarshalJSON ¶
func MarshalJSON(ss []interface{}, schemaHandler *schema.SchemaHandler) (tb *map[string]*layout.Table, err error)
ss is []string
Types ¶
type Marshaler ¶
type Marshaler interface {
Marshal(node *Node, nodeBuf *NodeBufType) []*Node
}
//////for improve performance///////////////////////////////////
type NodeBufType ¶
Improve Performance/////////////////////////// NodeBuf
func NewNodeBuf ¶
func NewNodeBuf(ln int) *NodeBufType
func (*NodeBufType) GetNode ¶
func (nbt *NodeBufType) GetNode() *Node
func (*NodeBufType) Reset ¶
func (nbt *NodeBufType) Reset()
type ParquetMap ¶
type ParquetMap struct {
// contains filtered or unexported fields
}
func (*ParquetMap) Marshal ¶
func (p *ParquetMap) Marshal(node *Node, nodeBuf *NodeBufType) []*Node
type ParquetMapStruct ¶
type ParquetMapStruct struct {
// contains filtered or unexported fields
}
func (*ParquetMapStruct) Marshal ¶
func (p *ParquetMapStruct) Marshal(node *Node, nodeBuf *NodeBufType) []*Node
type ParquetPtr ¶
type ParquetPtr struct{}
func (*ParquetPtr) Marshal ¶
func (p *ParquetPtr) Marshal(node *Node, nodeBuf *NodeBufType) []*Node
type ParquetSlice ¶
type ParquetSlice struct {
// contains filtered or unexported fields
}
func (*ParquetSlice) Marshal ¶
func (p *ParquetSlice) Marshal(node *Node, nodeBuf *NodeBufType) []*Node
type ParquetStruct ¶
type ParquetStruct struct{}
func (*ParquetStruct) Marshal ¶
func (p *ParquetStruct) Marshal(node *Node, nodeBuf *NodeBufType) []*Node
type SliceRecord ¶ added in v1.4.0
Click to show internal directories.
Click to hide internal directories.