store

package
v0.0.0-...-80fb460 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Nov 24, 2024 License: Apache-2.0 Imports: 22 Imported by: 11

Documentation

Index

Constants

View Source
const (
	CONTEXT_MERGE = ContextType(iota)
	CONTEXT_READ
	CONTEXT_FLUSH
	CONTEXT_DEFAULT
)
View Source
const (
	COPY_BUFFER_SIZE = 16384
)
View Source
const RAM_BUFFER_SIZE = 1024
View Source
const (
	SKIP_BUFFER_SIZE = 1024
)

Variables

View Source
var NoLockFactoryInstance = NewNoLockFactory()

Functions

func CopyFrom

func CopyFrom(ctx context.Context, d DirectoryDeleter, from Directory, src, dest string, ioContext *IOContext) error

func FileTime

func FileTime(info os.FileInfo) (access, create, modify time.Time)

func NewFakeHash32

func NewFakeHash32() hash.Hash32

func SegmentFileName

func SegmentFileName(segmentName, segmentSuffix, ext string) string

SegmentFileName Returns a file name that includes the given segment name, your own custom name and extension. The format of the filename is: <segmentName>(_<name>)(.<ext>). NOTE: .<ext> is added to the result file name only if ext is not empty. NOTE: _<segmentSuffix> is added to the result file name only if it's not the empty string NOTE: all custom files should be named using this method, or otherwise some structures may fail to handle them properly (such as if they are added to compound files).

Types

type BaseDataInput

type BaseDataInput struct {
	// contains filtered or unexported fields
}

func NewBaseDataInput

func NewBaseDataInput(r CloneReader) *BaseDataInput

func (*BaseDataInput) Close

func (d *BaseDataInput) Close() error

func (*BaseDataInput) ReadByte

func (d *BaseDataInput) ReadByte() (byte, error)

func (*BaseDataInput) ReadMapOfStrings

func (d *BaseDataInput) ReadMapOfStrings(ctx context.Context) (map[string]string, error)

func (*BaseDataInput) ReadSetOfStrings

func (d *BaseDataInput) ReadSetOfStrings(ctx context.Context) (map[string]struct{}, error)

func (*BaseDataInput) ReadString

func (d *BaseDataInput) ReadString(ctx context.Context) (string, error)

func (*BaseDataInput) ReadUint16

func (d *BaseDataInput) ReadUint16(ctx context.Context) (uint16, error)

func (*BaseDataInput) ReadUint32

func (d *BaseDataInput) ReadUint32(context.Context) (uint32, error)

func (*BaseDataInput) ReadUint64

func (d *BaseDataInput) ReadUint64(context.Context) (uint64, error)

func (*BaseDataInput) ReadUvarint

func (d *BaseDataInput) ReadUvarint(context.Context) (uint64, error)

func (*BaseDataInput) ReadZInt32

func (d *BaseDataInput) ReadZInt32(context.Context) (int64, error)

func (*BaseDataInput) ReadZInt64

func (d *BaseDataInput) ReadZInt64(ctx context.Context) (int64, error)

func (*BaseDataInput) SkipBytes

func (d *BaseDataInput) SkipBytes(ctx context.Context, numBytes int) error

SkipBytes Closer Skip over numBytes bytes. The contract on this method is that it should have the same behavior as reading the same number of bytes into a buffer and discarding its content. Negative values of numBytes are not supported.

type BaseDataOutput

type BaseDataOutput struct {
	// contains filtered or unexported fields
}

func NewBaseDataOutput

func NewBaseDataOutput(writer io.Writer) *BaseDataOutput

func (*BaseDataOutput) CopyBytes

func (d *BaseDataOutput) CopyBytes(ctx context.Context, input DataInput, numBytes int) error

func (*BaseDataOutput) WriteByte

func (d *BaseDataOutput) WriteByte(c byte) error

func (*BaseDataOutput) WriteMapOfStrings

func (d *BaseDataOutput) WriteMapOfStrings(ctx context.Context, values map[string]string) error

func (*BaseDataOutput) WriteSetOfStrings

func (d *BaseDataOutput) WriteSetOfStrings(ctx context.Context, values map[string]struct{}) error

func (*BaseDataOutput) WriteString

func (d *BaseDataOutput) WriteString(ctx context.Context, s string) error

func (*BaseDataOutput) WriteUint16

func (d *BaseDataOutput) WriteUint16(ctx context.Context, i uint16) error

func (*BaseDataOutput) WriteUint32

func (d *BaseDataOutput) WriteUint32(ctx context.Context, i uint32) error

func (*BaseDataOutput) WriteUint64

func (d *BaseDataOutput) WriteUint64(ctx context.Context, i uint64) error

func (*BaseDataOutput) WriteUvarint

func (d *BaseDataOutput) WriteUvarint(ctx context.Context, i uint64) error

func (*BaseDataOutput) WriteZInt32

func (d *BaseDataOutput) WriteZInt32(ctx context.Context, i int32) error

func (*BaseDataOutput) WriteZInt64

func (d *BaseDataOutput) WriteZInt64(ctx context.Context, i int64) error

type BaseDirectory

type BaseDirectory struct {
	DeleteFile func(ctx context.Context, name string) error
}

type BaseIndexInput

type BaseIndexInput struct {
	*BaseDataInput
	// contains filtered or unexported fields
}

func NewBaseIndexInput

func NewBaseIndexInput(input IndexInput) *BaseIndexInput

func (*BaseIndexInput) RandomAccessSlice

func (i *BaseIndexInput) RandomAccessSlice(offset int64, length int64) (RandomAccessInput, error)

type BaseIndexOutput

type BaseIndexOutput struct {
	*BaseDataOutput
	// contains filtered or unexported fields
}

func NewBaseIndexOutput

func NewBaseIndexOutput(name string, writer io.Writer) *BaseIndexOutput

func (*BaseIndexOutput) GetName

func (r *BaseIndexOutput) GetName() string

type Buffer

type Buffer struct {
	*bytes.Buffer
}

func NewBuffer

func NewBuffer() *Buffer

func (*Buffer) Clone

func (b *Buffer) Clone() CloneReader

type BufferInput

type BufferInput struct {
	*BaseDataInput
	// contains filtered or unexported fields
}

func NewBufferDataInput

func NewBufferDataInput(buf *bytes.Buffer) *BufferInput

func (*BufferInput) Clone

func (b *BufferInput) Clone() CloneReader

func (*BufferInput) GetFilePointer

func (b *BufferInput) GetFilePointer() int64

func (*BufferInput) Length

func (b *BufferInput) Length() int64

func (*BufferInput) RandomAccessSlice

func (b *BufferInput) RandomAccessSlice(offset int64, length int64) (RandomAccessInput, error)

func (*BufferInput) Read

func (b *BufferInput) Read(p []byte) (n int, err error)

func (*BufferInput) Seek

func (b *BufferInput) Seek(offset int64, whence int) (int64, error)

func (*BufferInput) Slice

func (b *BufferInput) Slice(sliceDescription string, offset, length int64) (IndexInput, error)

type BufferOutput

type BufferOutput struct {
	*BaseDataOutput
	// contains filtered or unexported fields
}

func NewBufferDataOutput

func NewBufferDataOutput() *BufferOutput

func (*BufferOutput) Bytes

func (b *BufferOutput) Bytes() []byte

func (*BufferOutput) Close

func (b *BufferOutput) Close() error

func (*BufferOutput) CopyTo

func (b *BufferOutput) CopyTo(output DataOutput) error

func (*BufferOutput) GetChecksum

func (b *BufferOutput) GetChecksum() (uint32, error)

func (*BufferOutput) GetFilePointer

func (b *BufferOutput) GetFilePointer() int64

func (*BufferOutput) GetName

func (b *BufferOutput) GetName() string

func (*BufferOutput) Reset

func (b *BufferOutput) Reset()

func (*BufferOutput) Write

func (b *BufferOutput) Write(p []byte) (n int, err error)

type BufferedChecksumIndexInput

type BufferedChecksumIndexInput struct {
	*BaseIndexInput
	// contains filtered or unexported fields
}

BufferedChecksumIndexInput Simple implementation of ChecksumIndexInput that wraps another input and delegates calls.

func NewBufferedChecksumIndexInput

func NewBufferedChecksumIndexInput(in IndexInput) *BufferedChecksumIndexInput

func (*BufferedChecksumIndexInput) Clone

func (*BufferedChecksumIndexInput) Close

func (b *BufferedChecksumIndexInput) Close() error

func (*BufferedChecksumIndexInput) GetChecksum

func (b *BufferedChecksumIndexInput) GetChecksum() uint32

func (*BufferedChecksumIndexInput) GetFilePointer

func (b *BufferedChecksumIndexInput) GetFilePointer() int64

func (*BufferedChecksumIndexInput) Length

func (b *BufferedChecksumIndexInput) Length() int64

func (*BufferedChecksumIndexInput) Read

func (b *BufferedChecksumIndexInput) Read(bs []byte) (int, error)

func (*BufferedChecksumIndexInput) Seek

func (b *BufferedChecksumIndexInput) Seek(pos int64, whence int) (int64, error)

func (*BufferedChecksumIndexInput) Slice

func (b *BufferedChecksumIndexInput) Slice(sliceDescription string, offset, length int64) (IndexInput, error)

type BytesInput

type BytesInput struct {
	*BaseDataInput
	// contains filtered or unexported fields
}

BytesInput DataInput backed by a byte array. WARNING: This class omits all low-level checks.

func NewBytesInput

func NewBytesInput(bs []byte) *BytesInput

func (*BytesInput) Clone

func (b *BytesInput) Clone() CloneReader

func (*BytesInput) GetFilePointer

func (b *BytesInput) GetFilePointer() int64

func (*BytesInput) Length

func (b *BytesInput) Length() int64

func (*BytesInput) RandomAccessSlice

func (b *BytesInput) RandomAccessSlice(offset int64, length int64) (RandomAccessInput, error)

func (*BytesInput) Read

func (b *BytesInput) Read(p []byte) (n int, err error)

func (*BytesInput) Seek

func (b *BytesInput) Seek(offset int64, whence int) (int64, error)

func (*BytesInput) Slice

func (b *BytesInput) Slice(sliceDescription string, offset, length int64) (IndexInput, error)

type BytesOutput

type BytesOutput struct {
	*BaseDataOutput
	// contains filtered or unexported fields
}

BytesOutput DataOutput backed by a byte array. WARNING: This class omits most low-level checks, so be sure to test heavily with assertions enabled.

func NewBytesDataOutput

func NewBytesDataOutput(bytes []byte) *BytesOutput

func (*BytesOutput) GetPosition

func (r *BytesOutput) GetPosition() int

func (*BytesOutput) Reset

func (r *BytesOutput) Reset(bytes []byte) error

func (*BytesOutput) ResetAt

func (r *BytesOutput) ResetAt(bytes []byte, offset, size int) error

func (*BytesOutput) Write

func (r *BytesOutput) Write(b []byte) (int, error)

type BytesRandomAccessInput

type BytesRandomAccessInput struct {
	// contains filtered or unexported fields
}

func (*BytesRandomAccessInput) ReadAt

func (b *BytesRandomAccessInput) ReadAt(p []byte, off int64) (int, error)

func (*BytesRandomAccessInput) ReadU16

func (b *BytesRandomAccessInput) ReadU16(pos int64) (uint16, error)

func (*BytesRandomAccessInput) ReadU32

func (b *BytesRandomAccessInput) ReadU32(pos int64) (uint32, error)

func (*BytesRandomAccessInput) ReadU64

func (b *BytesRandomAccessInput) ReadU64(pos int64) (uint64, error)

func (*BytesRandomAccessInput) ReadU8

func (b *BytesRandomAccessInput) ReadU8(pos int64) (byte, error)

type ChecksumIndexInput

type ChecksumIndexInput interface {
	IndexInput

	// GetChecksum Returns the current checksum value
	GetChecksum() uint32
}

ChecksumIndexInput Extension of IndexInput, computing checksum as it goes. Callers can retrieve the checksum via getChecksum().

func OpenChecksumInput

func OpenChecksumInput(dir Directory, name string) (ChecksumIndexInput, error)

type CloneReader

type CloneReader interface {
	io.Reader

	Clone() CloneReader
}

type ContextType

type ContextType int

ContextType is a enumerator which specifies the context in which the Directory is being used for.

type DataInput

type DataInput interface {
	// ByteReader Reads and returns a single byte.
	// See Also: DataOutput.writeByte(byte)
	//ReadByte() (byte, error)
	io.ByteReader

	CloneReader

	// ReadUint16 Reads two bytes and returns a short.
	// See Also: DataOutput.writeByte(byte)
	ReadUint16(ctx context.Context) (uint16, error)

	// ReadUint32 Reads four bytes and returns an int.
	// See Also: DataOutput.writeInt(int)
	ReadUint32(ctx context.Context) (uint32, error)

	// ReadUvarint
	// Reads an int stored in variable-length format. Reads between one and five bytes.
	// Smaller values take fewer bytes. Negative numbers are supported, but should be avoided.
	// The format is described further in DataOutput.writeVInt(int).
	// See Also: DataOutput.writeVInt(int)
	ReadUvarint(ctx context.Context) (uint64, error)

	// ReadZInt32 Read a zig-zag-encoded variable-length integer.
	// See Also: DataOutput.writeZInt(int)
	ReadZInt32(ctx context.Context) (int64, error)

	// ReadUint64 Reads eight bytes and returns a long.
	// See Also: DataOutput.writeLong(long)
	ReadUint64(ctx context.Context) (uint64, error)

	// ReadZInt64 Read a zig-zag-encoded variable-length integer. Reads between one and ten bytes.
	// See Also: DataOutput.writeZLong(long)
	ReadZInt64(ctx context.Context) (int64, error)

	// ReadString Reads a string.
	// See Also: DataOutput.writeString(String)
	ReadString(ctx context.Context) (string, error)

	// ReadMapOfStrings Reads a Map<String,String> previously written with DataOutput.writeMapOfStrings(Map).
	// Returns: An immutable map containing the written contents.
	ReadMapOfStrings(ctx context.Context) (map[string]string, error)

	// ReadSetOfStrings Reads a Set<String> previously written with DataOutput.writeSetOfStrings(Set).
	// Returns: An immutable set containing the written contents.
	ReadSetOfStrings(ctx context.Context) (map[string]struct{}, error)

	SkipBytes(ctx context.Context, numBytes int) error
}

DataInput Abstract base class for performing read operations of Lucene's low-level data types. DataInput may only be used from one thread, because it is not thread safe (it keeps internal state like file pos). To allow multithreaded use, every DataInput instance must be cloned before used in another thread. Subclasses must therefore implement clone(), returning a new DataInput which operates on the same underlying resource, but positioned independently.

type DataOutput

type DataOutput interface {
	// ByteWriter Write a single byte.
	// The most primitive data type is an eight-bit byte. Files are accessed as sequences of bytes.
	// All other data types are defined as sequences of bytes, so file formats are byte-order independent.
	// See Also: IndexInput.readByte()
	io.ByteWriter

	// Writer
	// Writes an array of bytes.
	io.Writer

	// WriteUint32 Writes an int as four bytes.
	// 32-bit unsigned integer written as four bytes, high-order bytes first.
	// See Also: DataInput.readInt()
	WriteUint32(ctx context.Context, i uint32) error

	// WriteUint16 Writes a short as two bytes.
	// See Also: DataInput.readShort()
	WriteUint16(ctx context.Context, i uint16) error

	// WriteUvarint
	// Writes an int in a variable-length format. Writes between one and five bytes. Smaller
	// values take fewer bytes. Negative numbers are supported, but should be avoided.
	// VByte is a variable-length format for positive integers is defined where the high-order bit of each
	// byte indicates whether more bytes remain to be read. The low-order seven bits are appended as
	// increasingly more significant bits in the resulting integer value. Thus values from zero to 127 may
	// be stored in a single byte, values from 128 to 16,383 may be stored in two bytes, and so on.
	WriteUvarint(ctx context.Context, i uint64) error

	// WriteZInt32 Write a zig-zag-encoded variable-length integer. This is typically useful to write small
	// signed ints and is equivalent to calling writeVInt(BitUtil.zigZagEncode(i)).
	// See Also: DataInput.readZInt()
	WriteZInt32(ctx context.Context, i int32) error

	// WriteUint64 Writes a long as eight bytes.
	// 64-bit unsigned integer written as eight bytes, high-order bytes first.
	// See Also: DataInput.readLong()
	WriteUint64(ctx context.Context, i uint64) error

	// WriteZInt64 Write a zig-zag-encoded variable-length long. Writes between one and ten bytes. This is typically
	// useful to write small signed ints.
	// See Also: DataInput.readZLong()
	WriteZInt64(ctx context.Context, i int64) error

	// WriteString Writes a string.
	// Writes strings as UTF-8 encoded bytes. First the length, in bytes, is written as a VInt, followed by the bytes.
	// See Also: DataInput.readString()
	WriteString(ctx context.Context, s string) error

	// CopyBytes Copy numBytes bytes from input to ourself.
	CopyBytes(ctx context.Context, input DataInput, numBytes int) error

	// WriteMapOfStrings Writes a String map.
	// First the size is written as an vInt, followed by each key-value pair written as two consecutive Strings.
	WriteMapOfStrings(ctx context.Context, values map[string]string) error

	// WriteSetOfStrings Writes a String set.
	//First the size is written as an vInt, followed by each value written as a String.
	WriteSetOfStrings(ctx context.Context, values map[string]struct{}) error
}

DataOutput Abstract base class for performing write operations of Lucene's low-level data types. DataOutput may only be used from one thread, because it is not thread safe (it keeps internal state like file pos).

type Directory

type Directory interface {

	// ListAll
	// Returns names of all files stored in this directory. The output must be in sorted
	// (UTF-16, java's String.compareTo) order.
	// Throws: IOException – in case of I/O error
	ListAll(ctx context.Context) ([]string, error)

	// DeleteFile
	// Removes an existing file in the directory. This method must throw either
	// NoSuchFileException or FileNotFoundException if name points to a non-existing file.
	// Params: name – the name of an existing file.
	// Throws: IOException – in case of I/O error
	DeleteFile(ctx context.Context, name string) error

	// FileLength
	// Returns the byte length of a file in the directory. This method must throw either
	// NoSuchFileException or FileNotFoundException if name points to a non-existing file.
	// name: the name of an existing file.
	// Throws: IOException – in case of I/O error
	FileLength(ctx context.Context, name string) (int64, error)

	// CreateOutput
	// Creates a new, empty file in the directory and returns an IndexOutput instance for
	// appending data to this file. This method must throw java.nio.file.FileAlreadyExistsException if
	// the file already exists.
	// Params: name – the name of the file to create.
	// Throws: IOException – in case of I/O error
	CreateOutput(ctx context.Context, name string) (IndexOutput, error)

	// CreateTempOutput
	// Creates a new, empty, temporary file in the directory and returns an IndexOutput
	// instance for appending data to this file. The temporary file name
	// (accessible via IndexOutput.getName()) will start with prefix, end with suffix and have a reserved
	// file extension .tmp.
	CreateTempOutput(ctx context.Context, prefix, suffix string) (IndexOutput, error)

	// Rename
	// Renames source file to dest file where dest must not already exist in the directory.
	// It is permitted for this operation to not be truly atomic, for example both source and dest can
	// be visible temporarily in listAll(). However, the implementation of this method must ensure the
	// content of dest appears as the entire source atomically. So once dest is visible for readers,
	// the entire content of previous source is visible. This method is used by IndexWriter to publish commits.
	Rename(ctx context.Context, source, dest string) error

	// OpenInput Opens a stream for reading an existing file. This method must throw either
	// NoSuchFileException or FileNotFoundException if name points to a non-existing file.
	// Params: name – the name of an existing file.
	// Throws: IOException – in case of I/O error
	OpenInput(ctx context.Context, name string) (IndexInput, error)

	// ObtainLock Acquires and returns a Lock for a file with the given name.
	// Params: name – the name of the lock file
	// Throws:  LockObtainFailedException – (optional specific exception) if the lock could not be obtained
	//			because it is currently held elsewhere.
	// IOException – if any i/o error occurs attempting to gain the lock
	ObtainLock(name string) (Lock, error)

	// Closer Closes the directory.
	io.Closer

	// CopyFrom Copies an existing src file from directory from to a non-existent file dest in this directory.
	CopyFrom(ctx context.Context, from Directory, src, dest string, ioContext *IOContext) error

	// EnsureOpen Ensures this directory is still open.
	// Throws: AlreadyClosedException – if this directory is closed.
	EnsureOpen() error

	Sync(files map[string]struct{}) error
}

A Directory provides an abstraction layer for storing a list of files. A directory contains only files (no sub-folder hierarchy). Implementing classes must comply with the following:

  • A file in a directory can be created (createOutput), appended to, then closed.
  • A file open for writing may not be available for read access until the corresponding IndexOutput is closed.
  • Once a file is created it must only be opened for input (openInput), or deleted (deleteFile). Calling createOutput on an existing file must throw java.nio.file.FileAlreadyExistsException.

See Also: FSDirectory

  • RAMDirectory
  • FilterDirectory

type DirectoryDeleter

type DirectoryDeleter interface {
	DeleteFile(ctx context.Context, name string) error
}

type FSDirectory

type FSDirectory interface {
	Directory

	// GetDirectory
	// Returns: the underlying filesystem directory
	GetDirectory() (string, error)
}

type FSIndexOutput

type FSIndexOutput struct {
	*OutputStream
}

type FSLockFactory

type FSLockFactory interface {
	LockFactory

	// FSLockFactoryInner Implement this method to obtain a lock for a FSDirectory instance.
	// Throws: IOException – if the lock could not be obtained.
	FSLockFactoryInner
}

FSLockFactory Base class for file system based locking implementation. This class is explicitly checking that the passed Directory is an FSDirectory.

type FSLockFactoryBase

type FSLockFactoryBase struct {
	// contains filtered or unexported fields
}

func NewFSLockFactoryBase

func NewFSLockFactoryBase(inner FSLockFactoryInner) *FSLockFactoryBase

func (*FSLockFactoryBase) ObtainLock

func (f *FSLockFactoryBase) ObtainLock(dir Directory, lockName string) (Lock, error)

type FSLockFactoryInner

type FSLockFactoryInner interface {
	ObtainFSLock(dir FSDirectory, lockName string) (Lock, error)
}

type FlushInfo

type FlushInfo struct {
	NumDocs              int
	EstimatedSegmentSize int64
}

A FlushInfo provides information required for a CONTEXT_FLUSH context. It is used as part of an IOContext in case of CONTEXT_FLUSH context.

func NewFlushInfo

func NewFlushInfo(numDocs int, estimatedSegmentSize int64) *FlushInfo

NewFlushInfo Creates a new FlushInfo instance from the values required for a CONTEXT_FLUSH IOContext context. These values are only estimates and are not the actual values.

type Hash

type Hash interface {
	Write(p []byte)
	Sum() uint32
	Clone() Hash
}

func NewHash

func NewHash() Hash

type IOContext

type IOContext struct {
	Type      ContextType
	MergeInfo *MergeInfo
	FlushInfo *FlushInfo
	ReadOnce  bool
}

IOContext holds additional details on the merge/search context. A IOContext object can never be initialized as null as passed as a parameter to either Directory.openInput(String, IOContext) or Directory.createOutput(String, IOContext)

func NewIOContext

func NewIOContext(option IOContextOption) *IOContext

type IOContextOption

type IOContextOption func(ctx *IOContext)

func WithContextType

func WithContextType(cType ContextType) IOContextOption

func WithFlushInfo

func WithFlushInfo(flushInfo *FlushInfo) IOContextOption

func WithMergeInfo

func WithMergeInfo(mergeInfo *MergeInfo) IOContextOption

func WithReadOnce

func WithReadOnce(readOnce bool) IOContextOption

type IndexInput

type IndexInput interface {
	DataInput

	io.Closer

	// Seeker Sets current pos in this file, where the next read will occur. If this is beyond the end
	// of the file then this will throw EOFException and then the stream is in an undetermined state.
	// See Also: getFilePointer()
	//Seek(pos int64, whence int) (int64, error)
	io.Seeker

	IndexInputSPI
}

IndexInput Abstract base class for input from a file in a Directory. A random-access input stream. Used for all Lucene index input operations.

IndexInput may only be used from one thread, because it is not thread safe (it keeps internal state like file pos). To allow multithreaded use, every IndexInput instance must be cloned before it is used in another thread. Subclasses must therefore implement clone(), returning a new IndexInput which operates on the same underlying resource, but positioned independently.

Warning: Lucene never closes cloned IndexInputs, it will only call close() on the original object. If you access the cloned IndexInput after closing the original object, any readXXX methods will throw AlreadyClosedException. See Also: Directory

type IndexInputSPI

type IndexInputSPI interface {
	// GetFilePointer Returns the current pos in this file, where the next read will occur.
	// See Also: seek(long)
	GetFilePointer() int64

	// Slice Creates a slice of this index input, with the given description, offset, and length.
	// The slice is sought to the beginning.
	Slice(sliceDescription string, offset, length int64) (IndexInput, error)

	// Length The number of bytes in the file.
	Length() int64

	// RandomAccessSlice Creates a random-access slice of this index input, with the given offset and length.
	// The default implementation calls slice, and it doesn't support random access, it implements absolute
	// reads as seek+read.
	RandomAccessSlice(offset int64, length int64) (RandomAccessInput, error)
}

type IndexOutput

type IndexOutput interface {
	io.Closer

	DataOutput

	GetName() string

	// GetFilePointer
	// Returns the current pos in this file, where the next write will occur.
	GetFilePointer() int64

	GetChecksum() (uint32, error)
}

IndexOutput A DataOutput for appending data to a file in a Directory. Instances of this class are not thread-safe. See Also: Directory, IndexInput

type InputStream

type InputStream struct {
	*BaseDataInput
	// contains filtered or unexported fields
}

InputStream A DataInput wrapping a plain InputStream.

func NewInputStream

func NewInputStream(is io.Reader) *InputStream

func (*InputStream) Clone

func (i *InputStream) Clone() CloneReader

func (*InputStream) Close

func (i *InputStream) Close() error

func (*InputStream) GetFilePointer

func (i *InputStream) GetFilePointer() int64

func (*InputStream) Length

func (i *InputStream) Length() int64

func (*InputStream) RandomAccessSlice

func (i *InputStream) RandomAccessSlice(offset int64, length int64) (RandomAccessInput, error)

func (*InputStream) Read

func (i *InputStream) Read(b []byte) (int, error)

func (*InputStream) ReadByte

func (i *InputStream) ReadByte() (byte, error)

func (*InputStream) Seek

func (i *InputStream) Seek(offset int64, whence int) (int64, error)

func (*InputStream) Slice

func (i *InputStream) Slice(sliceDescription string, offset, length int64) (IndexInput, error)

type Lock

type Lock interface {
	// Closer Releases exclusive access.
	// Note that exceptions thrown from close may require human intervention, as it may mean the lock was no longer valid, or that fs permissions prevent removal of the lock file, or other reasons.
	// Closes this stream and releases any system resources associated with it. If the stream is already closed then invoking this method has no effect.
	// As noted in AutoCloseable.close(), cases where the close may fail require careful attention. It is strongly advised to relinquish the underlying resources and to internally mark the Closeable as closed, prior to throwing the IOException.
	// Throws: LockReleaseFailedException – optional specific exception) if the lock could not be properly released.
	io.Closer

	// EnsureValid Best effort check that this lock is still valid. Locks could become invalidated externally for a number of reasons, for example if a user deletes the lock file manually or when a network filesystem is in use.
	// Throws: IOException – if the lock is no longer valid.
	EnsureValid() error
}

Lock An interprocess mutex lock. See Also: Directory.obtainLock(String)

type LockFactory

type LockFactory interface {

	// ObtainLock Return a new obtained Lock instance identified by lockName.
	// Params: lockName – name of the lock to be created.
	// Throws: 	LockObtainFailedException – (optional specific exception) if the lock could not be obtained
	//			because it is currently held elsewhere.
	//			IOException – if any i/o error occurs attempting to gain the lock
	ObtainLock(dir Directory, lockName string) (Lock, error)
}

LockFactory Base class for Locking implementation. Directory uses instances of this class to implement locking.

Lucene uses NativeFSLockFactory by default for FSDirectory-based index directories.

Special care needs to be taken if you change the locking implementation: First be certain that no writer is in fact writing to the index otherwise you can easily corrupt your index. Be sure to do the LockFactory change on all Lucene instances and clean up all leftover lock files before starting the new configuration for the first time. Different implementations can not work together!

If you suspect that some LockFactory implementation is not working properly in your environment, you can easily test it by using VerifyingLockFactory, LockVerifyServer and LockStressTest.

See Also: LockVerifyServer,

LockStressTest,
VerifyingLockFactory

type MergeInfo

type MergeInfo struct {
	TotalMaxDoc         int
	EstimatedMergeBytes int
	IsExternal          bool
	MergeMaxNumSegments int
}

A MergeInfo provides information required for a CONTEXT_MERGE context. It is used as part of an IOContext in case of CONTEXT_MERGE context.

func NewMergeInfo

func NewMergeInfo(totalMaxDoc int, estimatedMergeBytes int, isExternal bool, mergeMaxNumSegments int) *MergeInfo

type MmapDataInput

type MmapDataInput struct {
	*BaseDataInput
	// contains filtered or unexported fields
}

func NewMmapDataInput

func NewMmapDataInput(file string) (*MmapDataInput, error)

func (*MmapDataInput) Clone

func (m *MmapDataInput) Clone() CloneReader

func (*MmapDataInput) Close

func (m *MmapDataInput) Close() error

func (*MmapDataInput) Read

func (m *MmapDataInput) Read(p []byte) (n int, err error)

type NIOFSDirectory

type NIOFSDirectory struct {
	sync.Mutex
	// contains filtered or unexported fields
}

NIOFSDirectory An FSDirectory implementation that uses java.nio's FileChannel's positional read, which allows multiple threads to read from the same file without synchronizing. This class only uses FileChannel when reading; writing is achieved with FSDirectory.FSIndexOutput. NOTE: NIOFSDirectory is not recommended on Windows because of a bug in how FileChannel.read is implemented in Sun's JRE. Inside of the implementation the pos is apparently synchronized. See here for details. NOTE: Accessing this class either directly or indirectly from a thread while it's interrupted can close the underlying file descriptor immediately if at the same time the thread is blocked on IO. The file descriptor will remain closed and subsequent access to NIOFSDirectory will throw a ClosedChannelException. If your application uses either Thread.interrupt() or Future.cancel(boolean) you should use the legacy RAFDirectory from the Lucene misc module in favor of NIOFSDirectory.

func NewNIOFSDirectory

func NewNIOFSDirectory(path string) (*NIOFSDirectory, error)

func (*NIOFSDirectory) Close

func (n *NIOFSDirectory) Close() error

func (*NIOFSDirectory) CopyFrom

func (n *NIOFSDirectory) CopyFrom(ctx context.Context, from Directory, src, dest string, ioContext *IOContext) error

func (*NIOFSDirectory) CreateOutput

func (n *NIOFSDirectory) CreateOutput(ctx context.Context, name string) (IndexOutput, error)

func (*NIOFSDirectory) CreateTempOutput

func (n *NIOFSDirectory) CreateTempOutput(ctx context.Context, prefix, suffix string) (IndexOutput, error)

func (*NIOFSDirectory) DeleteFile

func (n *NIOFSDirectory) DeleteFile(ctx context.Context, name string) error

func (*NIOFSDirectory) EnsureOpen

func (n *NIOFSDirectory) EnsureOpen() error

func (*NIOFSDirectory) FileLength

func (n *NIOFSDirectory) FileLength(ctx context.Context, name string) (int64, error)

func (*NIOFSDirectory) GetDirectory

func (n *NIOFSDirectory) GetDirectory() (string, error)

func (*NIOFSDirectory) GetLockFactory

func (n *NIOFSDirectory) GetLockFactory() LockFactory

func (*NIOFSDirectory) ListAll

func (n *NIOFSDirectory) ListAll(context.Context) ([]string, error)

func (*NIOFSDirectory) NewFSIndexOutput

func (n *NIOFSDirectory) NewFSIndexOutput(name string) (*FSIndexOutput, error)

func (*NIOFSDirectory) ObtainLock

func (n *NIOFSDirectory) ObtainLock(name string) (Lock, error)

func (*NIOFSDirectory) OpenInput

func (n *NIOFSDirectory) OpenInput(ctx context.Context, name string) (IndexInput, error)

func (*NIOFSDirectory) Rename

func (n *NIOFSDirectory) Rename(ctx context.Context, source, dest string) error

func (*NIOFSDirectory) Sync

func (n *NIOFSDirectory) Sync(names map[string]struct{}) error

type NIOFSIndexInput

type NIOFSIndexInput struct {
	*BaseIndexInput
	// contains filtered or unexported fields
}

func NewNIOFSIndexInput

func NewNIOFSIndexInput(file *os.File) (*NIOFSIndexInput, error)

func (*NIOFSIndexInput) Clone

func (n *NIOFSIndexInput) Clone() CloneReader

func (*NIOFSIndexInput) Close

func (n *NIOFSIndexInput) Close() error

func (*NIOFSIndexInput) GetFilePointer

func (n *NIOFSIndexInput) GetFilePointer() int64

func (*NIOFSIndexInput) Length

func (n *NIOFSIndexInput) Length() int64

func (*NIOFSIndexInput) Read

func (n *NIOFSIndexInput) Read(p []byte) (size int, err error)

func (*NIOFSIndexInput) Seek

func (n *NIOFSIndexInput) Seek(pos int64, whence int) (int64, error)

func (*NIOFSIndexInput) Slice

func (n *NIOFSIndexInput) Slice(desc string, offset, length int64) (IndexInput, error)

type NoLock

type NoLock struct {
}

func (*NoLock) Close

func (n *NoLock) Close() error

func (*NoLock) EnsureValid

func (n *NoLock) EnsureValid() error

type NoLockFactory

type NoLockFactory struct {
	// contains filtered or unexported fields
}

NoLockFactory Use this LockFactory to disable locking entirely. This is a singleton, you have to use INSTANCE. See Also: LockFactory

func NewNoLockFactory

func NewNoLockFactory() *NoLockFactory

func (*NoLockFactory) ObtainLock

func (n *NoLockFactory) ObtainLock(dir Directory, lockName string) (Lock, error)

type OutputStream

type OutputStream struct {
	*BaseIndexOutput
	// contains filtered or unexported fields
}

OutputStream Implementation class for buffered IndexOutput that writes to an OutputStream.

func NewOutputStream

func NewOutputStream(name string, out io.WriteCloser) *OutputStream

func (*OutputStream) Close

func (o *OutputStream) Close() error

func (*OutputStream) GetChecksum

func (o *OutputStream) GetChecksum() (uint32, error)

func (*OutputStream) GetFilePointer

func (o *OutputStream) GetFilePointer() int64

func (*OutputStream) Write

func (o *OutputStream) Write(b []byte) (int, error)

type RAMDirectory

type RAMDirectory struct {
	sync.RWMutex
	// contains filtered or unexported fields
}

func (*RAMDirectory) Close

func (d *RAMDirectory) Close() error

func (*RAMDirectory) CopyFrom

func (d *RAMDirectory) CopyFrom(ctx context.Context, from Directory, src, dest string, ioContext *IOContext) error

func (*RAMDirectory) CreateOutput

func (d *RAMDirectory) CreateOutput(ctx context.Context, name string) (IndexOutput, error)

func (*RAMDirectory) CreateTempOutput

func (d *RAMDirectory) CreateTempOutput(ctx context.Context, prefix, suffix string) (IndexOutput, error)

func (*RAMDirectory) DeleteFile

func (d *RAMDirectory) DeleteFile(ctx context.Context, name string) error

func (*RAMDirectory) EnsureOpen

func (d *RAMDirectory) EnsureOpen() error

func (*RAMDirectory) FileLength

func (d *RAMDirectory) FileLength(ctx context.Context, name string) (int64, error)

func (*RAMDirectory) ListAll

func (d *RAMDirectory) ListAll(ctx context.Context) ([]string, error)

func (*RAMDirectory) ObtainLock

func (d *RAMDirectory) ObtainLock(name string) (Lock, error)

func (*RAMDirectory) OpenInput

func (d *RAMDirectory) OpenInput(ctx context.Context, name string) (IndexInput, error)

func (*RAMDirectory) Rename

func (d *RAMDirectory) Rename(ctx context.Context, source, dest string) error

func (*RAMDirectory) Sync

func (d *RAMDirectory) Sync(files map[string]struct{}) error

type RAMFile

type RAMFile struct {
	// contains filtered or unexported fields
}

func NewRAMFile

func NewRAMFile(dir *RAMDirectory) *RAMFile

func (*RAMFile) Clone

func (f *RAMFile) Clone() *RAMFile

func (*RAMFile) GetLength

func (f *RAMFile) GetLength() int64

type RAMInputStream

type RAMInputStream struct {
	*BaseIndexInput
	// contains filtered or unexported fields
}

func NewRAMInputStream

func NewRAMInputStream(name string, file *RAMFile, length int) (*RAMInputStream, error)

func (*RAMInputStream) Clone

func (s *RAMInputStream) Clone() CloneReader

func (*RAMInputStream) GetFilePointer

func (s *RAMInputStream) GetFilePointer() int64

func (*RAMInputStream) Length

func (s *RAMInputStream) Length() int64

func (*RAMInputStream) Read

func (s *RAMInputStream) Read(p []byte) (n int, err error)

func (*RAMInputStream) Seek

func (s *RAMInputStream) Seek(offset int64, whence int) (int64, error)

func (*RAMInputStream) Slice

func (s *RAMInputStream) Slice(sliceDescription string, offset, length int64) (IndexInput, error)

type RAMOutputStream

type RAMOutputStream struct {
	*BaseIndexOutput
	// contains filtered or unexported fields
}

func NewRAMOutputStream

func NewRAMOutputStream(name string, file *RAMFile, checksum bool) *RAMOutputStream

func (*RAMOutputStream) Close

func (s *RAMOutputStream) Close() error

func (*RAMOutputStream) GetChecksum

func (s *RAMOutputStream) GetChecksum() (uint32, error)

func (*RAMOutputStream) GetFilePointer

func (s *RAMOutputStream) GetFilePointer() int64

func (*RAMOutputStream) Write

func (s *RAMOutputStream) Write(p []byte) (n int, err error)

type RAMOutputStreamOptionBuilder

type RAMOutputStreamOptionBuilder struct {
}

type RandomAccessInput

type RandomAccessInput interface {
	io.ReaderAt

	// ReadU8
	// Reads a byte at the given pos in the file
	// See Also: DataInput.readByte
	ReadU8(pos int64) (byte, error)

	// ReadU16
	// Reads a short at the given pos in the file
	// See Also: DataInput.readShort
	ReadU16(pos int64) (uint16, error)

	// ReadU32
	// Reads an integer at the given pos in the file
	// See Also: DataInput.readInt
	ReadU32(pos int64) (uint32, error)

	// ReadU64
	// Reads a long at the given pos in the file
	// See Also: DataInput.readLong
	ReadU64(pos int64) (uint64, error)
}

RandomAccessInput Random Access Index API. Unlike IndexInput, this has no concept of file pos, all reads are absolute. However, like IndexInput, it is only intended for use by a single thread.

func NewBytesRandomAccessInput

func NewBytesRandomAccessInput(bs []byte, byteOrder binary.ByteOrder) RandomAccessInput

type SimpleFSLock

type SimpleFSLock struct {
	// contains filtered or unexported fields
}

func NewSimpleFSLock

func NewSimpleFSLock(path string, creationTime time.Time) *SimpleFSLock

func (*SimpleFSLock) Close

func (s *SimpleFSLock) Close() error

func (*SimpleFSLock) EnsureValid

func (s *SimpleFSLock) EnsureValid() error

type SimpleFSLockFactory

type SimpleFSLockFactory struct {
	*FSLockFactoryBase
}

SimpleFSLockFactory Implements LockFactory using Files.createFile. The main downside with using this API for locking is that the Lucene write lock may not be released when the JVM exits abnormally. When this happens, an LockObtainFailedException is hit when trying to create a writer, in which case you may need to explicitly clear the lock file first by manually removing the file. But, first be certain that no writer is in fact writing to the index otherwise you can easily corrupt your index. Special care needs to be taken if you change the locking implementation: First be certain that no writer is in fact writing to the index otherwise you can easily corrupt your index. Be sure to do the LockFactory change all Lucene instances and clean up all leftover lock files before starting the new configuration for the first time. Different implementations can not work together! If you suspect that this or any other LockFactory is not working properly in your environment, you can easily test it by using VerifyingLockFactory, LockVerifyServer and LockStressTest. This is a singleton, you have to use INSTANCE. See Also: LockFactory

func NewSimpleFSLockFactory

func NewSimpleFSLockFactory() *SimpleFSLockFactory

func (*SimpleFSLockFactory) ObtainFSLock

func (s *SimpleFSLockFactory) ObtainFSLock(dir FSDirectory, lockName string) (Lock, error)

type SingleInstanceLock

type SingleInstanceLock struct {
	*SingleInstanceLockFactory
	// contains filtered or unexported fields
}

func NewSingleInstanceLock

func NewSingleInstanceLock(factory *SingleInstanceLockFactory, lockName string) *SingleInstanceLock

func (*SingleInstanceLock) Close

func (s *SingleInstanceLock) Close() error

func (*SingleInstanceLock) EnsureValid

func (s *SingleInstanceLock) EnsureValid() error

type SingleInstanceLockFactory

type SingleInstanceLockFactory struct {
	sync.RWMutex
	// contains filtered or unexported fields
}

SingleInstanceLockFactory Implements LockFactory for a single in-process instance, meaning all locking will take place through this one instance. Only use this LockFactory when you are certain all IndexWriters for a given index are running against a single shared in-process Directory instance. This is currently the default locking for RAMDirectory. See Also: LockFactory

func NewSingleInstanceLockFactory

func NewSingleInstanceLockFactory() *SingleInstanceLockFactory

func (*SingleInstanceLockFactory) ObtainLock

func (s *SingleInstanceLockFactory) ObtainLock(_ Directory, lockName string) (Lock, error)

type TrackingDirectoryWrapper

type TrackingDirectoryWrapper struct {
	sync.RWMutex

	Directory
	// contains filtered or unexported fields
}

func NewTrackingDirectoryWrapper

func NewTrackingDirectoryWrapper(directory Directory) *TrackingDirectoryWrapper

func (*TrackingDirectoryWrapper) ClearCreatedFiles

func (t *TrackingDirectoryWrapper) ClearCreatedFiles()

func (*TrackingDirectoryWrapper) CopyFrom

func (t *TrackingDirectoryWrapper) CopyFrom(ctx context.Context, from Directory, src, dest string, ioContext *IOContext) error

func (*TrackingDirectoryWrapper) CreateOutput

func (t *TrackingDirectoryWrapper) CreateOutput(ctx context.Context, name string) (IndexOutput, error)

func (*TrackingDirectoryWrapper) CreateTempOutput

func (t *TrackingDirectoryWrapper) CreateTempOutput(ctx context.Context, prefix, suffix string) (IndexOutput, error)

func (*TrackingDirectoryWrapper) DeleteFile

func (t *TrackingDirectoryWrapper) DeleteFile(ctx context.Context, name string) error

func (*TrackingDirectoryWrapper) GetCreatedFiles

func (t *TrackingDirectoryWrapper) GetCreatedFiles() map[string]struct{}

func (*TrackingDirectoryWrapper) Rename

func (t *TrackingDirectoryWrapper) Rename(ctx context.Context, source, dest string) error

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL