Documentation ¶
Index ¶
- Constants
- Variables
- func AlignObjectSize(size int64) int64
- func BitCount(b byte) int
- func Byte315ToFloat(b byte) float32
- func Close(objects ...io.Closer) error
- func CloseWhileHandlingError(priorErr error, objects ...io.Closer) error
- func CloseWhileSuppressingError(objects ...io.Closer)
- func DeleteFilesIgnoringErrors(dir FileDeleter, files ...string)
- func FileNameFromGeneration(base, ext string, gen int64) string
- func FloatToByte315(f float32) int8
- func Fsync(fileToSync string, isDir bool) error
- func Gcd(a, b int64) int64
- func GrowByteSlice(arr []byte, minSize int) []byte
- func GrowIntSlice(arr []int, minSize int) []int
- func Hashstr(sep string) int
- func IntroSort(data sort.Interface)
- func IsValidUTF16String(s []rune) bool
- func ItoHex(i int64) string
- func Log(x int64, base int) int
- func MurmurHash3_x86_32(data []byte, seed uint32) uint32
- func NumberOfTrailingZeros(n int64) uint
- func Oversize(minTargetSize int, bytesPerElement int) int
- func ParseGeneration(filename string) int64
- func ParseSegmentName(filename string) string
- func SegmentFileName(name, suffix, ext string) string
- func SetDefaultInfoStream(infoStream InfoStream)
- func ShallowSizeOf(obj interface{}) int64
- func ShallowSizeOfInstance(clazz reflect.Type) int64
- func SizeOf(arr interface{}) int64
- func StartsWith(ref, prefix []byte) bool
- func StripExtension(filename string) string
- func StripSegmentName(filename string) string
- func TimSort(data sort.Interface)
- func UTF8SortedAsUnicodeLess(aBytes, bBytes []byte) bool
- func ZigZagDecodeLong(l int64) int64
- func ZigZagEncodeLong(l int64) int64
- type Accountable
- type ArrayTimSorter
- type Attribute
- type AttributeFactory
- type AttributeImpl
- type AttributeSource
- func (as *AttributeSource) Add(s string) Attribute
- func (as *AttributeSource) AddImpl(att AttributeImpl)
- func (as *AttributeSource) CaptureState() (state *AttributeState)
- func (as *AttributeSource) Clear()
- func (as *AttributeSource) Get(s string) Attribute
- func (as *AttributeSource) Has(s string) bool
- func (as *AttributeSource) RestoreState(state *AttributeState)
- func (as *AttributeSource) String() string
- type AttributeState
- type Bits
- type ByteAllocator
- type ByteAllocatorImpl
- type ByteBlockPool
- type BytesRef
- type BytesRefBuilder
- func (b *BytesRefBuilder) At(offset int) byte
- func (b *BytesRefBuilder) Bytes() []byte
- func (b *BytesRefBuilder) Copy(ref []byte)
- func (b *BytesRefBuilder) Get() *BytesRef
- func (b *BytesRefBuilder) Grow(capacity int)
- func (b *BytesRefBuilder) Length() int
- func (b *BytesRefBuilder) Set(offset int, v byte)
- func (b *BytesRefBuilder) SetLength(length int)
- type BytesRefHash
- type BytesRefIterator
- type BytesRefs
- type BytesStartArray
- type CompoundError
- type Counter
- type DataInput
- type DataInputImpl
- func (in *DataInputImpl) ReadBytesBuffered(buf []byte, useBuffer bool) error
- func (in *DataInputImpl) ReadInt() (n int32, err error)
- func (in *DataInputImpl) ReadLong() (n int64, err error)
- func (in *DataInputImpl) ReadShort() (n int16, err error)
- func (in *DataInputImpl) ReadString() (s string, err error)
- func (in *DataInputImpl) ReadStringSet() (s map[string]bool, err error)
- func (in *DataInputImpl) ReadStringStringMap() (m map[string]string, err error)
- func (in *DataInputImpl) ReadVInt() (n int32, err error)
- func (in *DataInputImpl) ReadVLong() (int64, error)
- func (in *DataInputImpl) SkipBytes(numBytes int64) (err error)
- type DataOutput
- type DataOutputImpl
- func (out *DataOutputImpl) CopyBytes(input DataInput, numBytes int64) error
- func (out *DataOutputImpl) WriteInt(i int32) error
- func (out *DataOutputImpl) WriteLong(i int64) error
- func (out *DataOutputImpl) WriteString(s string) error
- func (out *DataOutputImpl) WriteStringSet(m map[string]bool) error
- func (out *DataOutputImpl) WriteStringStringMap(m map[string]string) error
- func (out *DataOutputImpl) WriteVInt(i int32) error
- func (out *DataOutputImpl) WriteVLong(i int64) error
- type DataReader
- type DataWriter
- type DirectTrackingAllocator
- type FileDeleter
- type FixedBitSet
- type InPlaceMergeSorter
- type InfoStream
- type IntAllocator
- type IntAllocatorImpl
- type IntBlockPool
- type IntroSorter
- type IntroSorterSPI
- type IntsRef
- type IntsRefBuilder
- func (a *IntsRefBuilder) Append(i int)
- func (a *IntsRefBuilder) At(offset int) int
- func (a *IntsRefBuilder) Clear()
- func (a *IntsRefBuilder) CopyIntSlice(other []int)
- func (a *IntsRefBuilder) CopyInts(ints *IntsRef)
- func (a *IntsRefBuilder) Get() *IntsRef
- func (a *IntsRefBuilder) Grow(newLength int)
- func (a *IntsRefBuilder) Length() int
- type ListIntroSorter
- type MaxBytesLengthExceededError
- type MutableBits
- type NoOutput
- type OpenBitSet
- func (b *OpenBitSet) And(other *OpenBitSet)
- func (b *OpenBitSet) AndNot(other *OpenBitSet)
- func (b *OpenBitSet) Cardinality() int64
- func (b *OpenBitSet) Clear(index int64)
- func (b *OpenBitSet) Get(index int64) bool
- func (b *OpenBitSet) IsEmpty() bool
- func (b *OpenBitSet) NextSetBit(index int64) int64
- func (b *OpenBitSet) Set(index int64)
- func (b *OpenBitSet) String() string
- type PrintStreamInfoStream
- type SetOnce
- type Sorter
- type TimSorter
- type Version
Constants ¶
const ( BYTE_BLOCK_SHIFT = 15 BYTE_BLOCK_SIZE = 1 << BYTE_BLOCK_SHIFT BYTE_BLOCK_MASK = BYTE_BLOCK_SIZE - 1 )
const ( NUM_BYTES_CHAR = 2 // UTF8 uses 1-4 bytes to represent each rune NUM_BYTES_SHORT = 2 NUM_BYTES_INT = 8 NUM_BYTES_FLOAT = 4 NUM_BYTES_LONG = 8 /* Number of bytes to represent an object reference */ NUM_BYTES_OBJECT_REF = 8 // Number of bytes to represent an object header (no fields, no alignments). NUM_BYTES_OBJECT_HEADER = 16 // Number of bytes to represent an array header (no content, but with alignments). NUM_BYTES_ARRAY_HEADER = 24 // A constant specifying the object alignment boundary inside the // JVM. Objects will always take a full multiple of this constant, // possibly wasting some space. NUM_BYTES_OBJECT_ALIGNMENT = 8 )
amd64 system
const ( MINRUN = 32 RUN_THRESHOLD = 64 STACKSIZE = 40 // depends on MINRUN MIN_GALLOP = 7 )
const ( UNI_SUR_HIGH_START = 0xD800 UNI_SUR_HIGH_END = 0xDBFF UNI_SUR_LOW_START = 0xDC00 UNI_SUR_LOW_END = 0xDFFF )
const DATA_OUTPUT_COPY_BUFFER_SIZE = 16384
const FORMAT = "2006/01/02 15:04:05"
const INT_BLOCK_MASK = INT_BLOCK_SIZE - 1
const INT_BLOCK_SHIFT = 13
const INT_BLOCK_SIZE = 1 << INT_BLOCK_SHIFT
const MAX_ARRAY_LENGTH = math.MaxInt32 - NUM_BYTES_ARRAY_HEADER
Maximum length for an array
const ( // The default precision step used by LongField, DoubleField, // NumericTokenStream, NumericRangeQuery, and NumericRangeFilter. NUMERIC_PRECISION_STEP_DEFAULT = 16 )
const (
SEGMENTS = "segments"
)
const SKIP_BUFFER_SIZE = 1024
const SORTER_THRESHOLD = 20
Variables ¶
var ( // Match settings and bugs in Lucene's 3.1 release. VERSION_31 = Version([4]int{3, 1, 0, 0}) // Match settings and bugs in Lucene's 4.0 release. VERSION_4_0 = Version([4]int{4, 0, 0, 0}) // Match settings and bugs in Lucene's 4.5 release. VERSION_45 = Version([4]int{4, 5, 0, 0}) // Match settings and bugs in Lucene's 4.9 release. // Use this to get the latest and greatest settings, bug fixes, etc, // for Lucnee. VERSION_49 = Version([4]int{4, 9, 0, 0}) VERSION_4_10 = Version([4]int{4, 10, 0, 0}) VERSION_4_10_1 = Version([4]int{4, 10, 1, 0}) VERSION_LATEST = VERSION_4_10_1 )
var BIT_LISTS = []int{}/* 256 elements not displayed */
The General Idea: instead of having an array per byte that has the offsets of the next set bit, that array could be packed inside a 32 bit integer (8 4 bit numbers). That should be faster than accessing an array for each index, and the total array size is kept smaller (256*sizeof(int))=1K **** the python code that generated bitlist
def bits2int(val): arr=0 for shift in range(8,0,-1): if val & 0x80: arr = (arr << 4) | shift val = val << 1 return arr def int_table(): tbl = [ hex(bits2int(val)).strip('L') for val in range(256) ] return ','.join(tbl) *****
var BYTE_COUNTS = []int{}/* 256 elements not displayed */
var CODEC_FILE_PATTERN = regexp.MustCompile("_[a-z0-9]+(_.*)?\\..*")
All files created by codecs must match this pattern (checked in SegmentInfo)
var EMPTY_BYTES = []byte{}
An empty byte slice for convenience
var EMPTY_BYTES_REF_ITERATOR = &emptyBytesRefIterator{}
var EMPTY_INTS = []int{}
An empty integer array for convenience
var FIRST_LEVEL_SIZE = LEVEL_SIZE_ARRAY[0]
THe first level size for new slices
var GOOD_FAST_HASH_SEED = func() uint32 { if prop := os.Getenv("tests_seed"); prop != "" { if len(prop) > 8 { prop = prop[len(prop)-8:] } n, err := strconv.ParseInt(prop, 16, 64) if err != nil { panic(err) } fmt.Printf("tests_seed=%v\n", uint32(n)) return uint32(n) } else { return uint32(time.Now().Nanosecond()) } }()
Poached from Guava: set a different salt/seed for each VM instance, to frustrate hash key collision denial of service attacks, and to catch any places that somehow rely on hash function/order across VM instances:
var LEVEL_SIZE_ARRAY = []int{5, 14, 20, 30, 40, 40, 80, 80, 120, 200}
An array holding the level sizes for byte slices.
var LUCENE_MAIN_VERSION = VERSION_LATEST.String()
This is the internal Lucene version, recorded into each segment.
var LUCENE_VERSION = VERSION_LATEST.String()
This is the Lucene version for display purpose.
var MESSAGE_ID int32 // atomic
var NEXT_LEVEL_ARRAY = []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 9}
An array holding the offset into the LEVEL_SIZE_ARRAY to quickly navigate to the next slice level.
var NO_OUTPUT = NoOutput(true)
Instance of InfoStream that does no logging at all.
Functions ¶
func AlignObjectSize ¶
Aligns an object size to be the next multiple of NUM_BYTES_OBJECT_ALIGNMENT
func Byte315ToFloat ¶
* byteToFloat(b, mantissaBits=3, zeroExponent=15)
func CloseWhileHandlingError ¶
func DeleteFilesIgnoringErrors ¶
func DeleteFilesIgnoringErrors(dir FileDeleter, files ...string)
Deletes all given files, suppressing all throw errors.
Note that the files should not be nil.
func FileNameFromGeneration ¶
func FloatToByte315 ¶
floatToByte(b, mantissaBits=3, zeroExponent=15) smallest non-zero value = 5.820766E-10 largest value = 7.5161928E9 epsilon = 0.125
func Fsync ¶
Ensure that any writes to the given file is written to the storage device that contains it.
func Gcd ¶
Return the greatest common divisor of a and b, consistently with big.GCD(a, b).
NOTE: A greatest common divisor must be positive, but 2^64 cannot be expressed as an int64 although it is the GCD of math.MinInt64 and 0 and the GCD of math.MinInt64 and math.MinInt64. So in these 2 cases, and only them, this method will return math.MinInt64.
func MurmurHash3_x86_32 ¶
Returns the MurmurHash3_x86_32 hash. Original source/tests at https://github.com/yonik/java_util/
func NumberOfTrailingZeros ¶
func Oversize ¶
L152 * Returns an array size >= minTargetSize, generally
- over-allocating exponentially to achieve amortized
- linear-time cost as the array grows. *
- NOTE: this was originally borrowed from Python 2.4.2
- listobject.c sources (attribution in LICENSE.txt), but
- has now been substantially changed based on
- discussions from java-dev thread with subject "Dynamic
- array reallocation algorithms", started on Jan 12
- 2010. *
- @param minTargetSize Minimum required value to be returned.
- @param bytesPerElement Bytes used by each element of
- the array. See constants in {@link RamUsageEstimator}. *
- @lucene.internal
func ParseGeneration ¶
Returns the generation from this file name, or 0 if there is no generation.
func ParseSegmentName ¶
func SegmentFileName ¶
func SetDefaultInfoStream ¶
func SetDefaultInfoStream(infoStream InfoStream)
Sets the default InfoStream used by a newly instantiated classes. It cannot be nil, to disable logging use NO_OUTPUT.
func ShallowSizeOf ¶
func ShallowSizeOf(obj interface{}) int64
Estimates a "shallow" memory usage of the given object. For slices, this will be the memory taken by slice storage (no subreferences will be followed). For objects, this will be the memory taken by the fields.
func ShallowSizeOfInstance ¶
func StartsWith ¶
Returns true iff the ref starts with the given prefix. Otherwise false.
func StripExtension ¶
func StripSegmentName ¶
func TimSort ¶
L699
Sorts the given array slice in its own order. This method uses the Tim sort algorithm, but falls back to binary sort for small arrays.
func UTF8SortedAsUnicodeLess ¶
func ZigZagDecodeLong ¶
func ZigZagEncodeLong ¶
Types ¶
type Accountable ¶
type Accountable interface { // Return the memory usage of this object in bytes. Negative values are illegal. RamBytesUsed() int64 }
An object whose RAM usage can be computed.
type ArrayTimSorter ¶
type ArrayTimSorter struct { *TimSorter // contains filtered or unexported fields }
A TimSorter for object arrays
type AttributeFactory ¶
type AttributeFactory interface {
Create(string) AttributeImpl
}
An AttributeFactory creates instances of AttributeImpls.
type AttributeImpl ¶
type AttributeImpl interface { Interfaces() []string Clone() AttributeImpl // Clears the values in this AttributeImpl and resets it to its // default value. If this implementation implements more than one // Attribute interface, it clears all. Clear() CopyTo(target AttributeImpl) }
Base class for Attributes that can be added to a AttributeSource.
Attributes are used to add data in a dynamic, yet type-safe way to a source of usually streamed ojects, e.g. a TokenStream.
type AttributeSource ¶
type AttributeSource struct {
// contains filtered or unexported fields
}
An AttributeSource contains a list of different AttributeImpls, and methods to add and get them. There can only be a single instance of an attribute in the same AttributeSource instance. This is ensured by passing in the actual type of the Attribute (reflect.TypeOf(Attribute)) to the #AddAttribute(Type), which then checks if an instance of that type is already present. If yes, it returns the instance, otherwise it creates a new instance and returns it.
func NewAttributeSourceFrom ¶
func NewAttributeSourceFrom(input *AttributeSource) *AttributeSource
An AttributeSource that uses the same attributes as the supplied one.
func NewAttributeSourceWith ¶
func NewAttributeSourceWith(factory AttributeFactory) *AttributeSource
An AttributeSource using the supplied AttributeFactory for creating new Attribute instance.
func (*AttributeSource) Add ¶
func (as *AttributeSource) Add(s string) Attribute
The caller must pass in a Attribute instance. This method first checks if an instance of that type is already in this AttributeSource and returns it. Otherwise a new instance is created, added to this AttributeSource and returned.
func (*AttributeSource) AddImpl ¶
func (as *AttributeSource) AddImpl(att AttributeImpl)
Expert: Adds a custom AttributeImpl instance with one or more Attribute interfaces.
Please note: it is not guaranteed, that att is added to the AttributeSource, because the provided attributes may already exist. You should always retrieve the wanted attributes using Get() after adding with this method and cast to you class.
The recommended way to use custom implementations is using an AttributeFactory.
func (*AttributeSource) CaptureState ¶
func (as *AttributeSource) CaptureState() (state *AttributeState)
func (*AttributeSource) Clear ¶
func (as *AttributeSource) Clear()
Resets all Attributes in this AttributeSource by calling AttributeImpl.clear() on each Attribute implementation.
func (*AttributeSource) Get ¶
func (as *AttributeSource) Get(s string) Attribute
Returns the instance of the passe in Attribute contained in this AttributeSource
func (*AttributeSource) Has ¶
func (as *AttributeSource) Has(s string) bool
Returns true, iff this AttributeSource contains the passed-in Attribute.
func (*AttributeSource) RestoreState ¶
func (as *AttributeSource) RestoreState(state *AttributeState)
func (*AttributeSource) String ¶
func (as *AttributeSource) String() string
Returns a string consisting of the class's simple name, the hex representation of the identity hash code, and the current reflection of all attributes.
type AttributeState ¶
type AttributeState struct {
// contains filtered or unexported fields
}
This class holds the state of an AttributeSource
func (*AttributeState) Clone ¶
func (s *AttributeState) Clone() *AttributeState
type Bits ¶
type Bits interface { /** * Returns the value of the bit with the specified <code>index</code>. * @param index index, should be non-negative and < {@link #length()}. * The result of passing negative or out of bounds values is undefined * by this interface, <b>just don't do it!</b> * @return <code>true</code> if the bit is set, <code>false</code> otherwise. */ At(index int) bool // Returns the number of bits in the set Length() int }
*
- Interface for Bitset-like structures.
- @lucene.experimental
type ByteAllocator ¶
type ByteAllocator interface {
// contains filtered or unexported methods
}
Abstract class for allocating and freeing byte blocks.
type ByteAllocatorImpl ¶
type ByteAllocatorImpl struct {
// contains filtered or unexported fields
}
type ByteBlockPool ¶
type ByteBlockPool struct { Buffers [][]byte ByteUpto int Buffer []byte ByteOffset int // contains filtered or unexported fields }
Class that Posting and PostingVector use to writ ebyte streams into shared fixed-size []byte arrays. The idea is to allocate slices of increasing lengths. For example, the first slice is 5 bytes, the next slice is 14, etc. We start by writing out bytes into the first 5 bytes. When we hit the end of the slice, we allocate the next slice and then write the address of the next slice into the last 4 bytes of the previous slice (the "forwarding address").
Each slice is filled with 0's initially, and we mark the end with a non-zero byte. This way the methods that are writing into the slice don't need to record its length and instead allocate a new slice once they hit a non-zero byte.
func NewByteBlockPool ¶
func NewByteBlockPool(allocator ByteAllocator) *ByteBlockPool
func (*ByteBlockPool) AllocSlice ¶
func (p *ByteBlockPool) AllocSlice(slice []byte, upto int) int
Creates a new byte slice with the given starting size and returns the slices offset in the pool.
func (*ByteBlockPool) NewSlice ¶
func (pool *ByteBlockPool) NewSlice(size int) int
Allocates a new slice with the given size.
func (*ByteBlockPool) NextBuffer ¶
func (pool *ByteBlockPool) NextBuffer()
Advances the pool to its next buffer. This method should be called once after the constructor to initialize the pool. In contrast to the constructor, a ByteBlockPool.Reset() call will advance the pool to its first buffer immediately.
func (*ByteBlockPool) Reset ¶
func (pool *ByteBlockPool) Reset(zeroFillBuffers, reuseFirst bool)
Expert: Resets the pool to its initial state reusing the first buffer.
func (*ByteBlockPool) SetBytesRef ¶
func (p *ByteBlockPool) SetBytesRef(term *BytesRef, textStart int)
Fill in a BytesRef from term's length & bytes encoded in byte block
type BytesRef ¶
Represents []byte, as a slice (offset + length) into an existing []byte, similar to Go's byte slice.
Important note: Unless otherwise noted, GoLucene uses []byte directly to represent terms that are encoded as UTF8 bytes in the index. It uses this class in cases when caller needs to hold a reference, while allowing underlying []byte to change.
func DeepCopyOf ¶
Creates a new BytesRef that points to a copy of the bytes from other.
The returned BytesRef will have a length of other.length and an offset of zero.
func NewBytesRef ¶
func NewBytesRefFrom ¶
func NewEmptyBytesRef ¶
func NewEmptyBytesRef() *BytesRef
type BytesRefBuilder ¶
type BytesRefBuilder struct {
// contains filtered or unexported fields
}
func NewBytesRefBuilder ¶
func NewBytesRefBuilder() *BytesRefBuilder
func (*BytesRefBuilder) At ¶
func (b *BytesRefBuilder) At(offset int) byte
Return the byte at the given offset.
func (*BytesRefBuilder) Bytes ¶
func (b *BytesRefBuilder) Bytes() []byte
Return a reference to the bytes of this build.
func (*BytesRefBuilder) Copy ¶
func (b *BytesRefBuilder) Copy(ref []byte)
func (*BytesRefBuilder) Get ¶
func (b *BytesRefBuilder) Get() *BytesRef
func (*BytesRefBuilder) Grow ¶
func (b *BytesRefBuilder) Grow(capacity int)
Ensure that this builder can hold at least capacity bytes without resizing.
func (*BytesRefBuilder) Length ¶
func (b *BytesRefBuilder) Length() int
Return the number of bytes in this buffer.
type BytesRefHash ¶
type BytesRefHash struct {
// contains filtered or unexported fields
}
BytesRefHash is a special purpose hash map like data structure optimized for BytesRef instances. BytesRefHash maintains mappings of byte arrays to ids (map[[]byte]int) sorting the hashed bytes efficiently in continuous storage. The mapping to the id is encapsulated inside BytesRefHash and is guaranteed to be increased for each added BytesRef.
Note: The maximum capacity BytesRef instance passed to add() must not be longer than BYTE_BLOCK_SIZE-2. The internal storage is limited to 2GB total byte storage.
func NewBytesRefHash ¶
func NewBytesRefHash(pool *ByteBlockPool, capacity int, bytesStartArray BytesStartArray) *BytesRefHash
func (*BytesRefHash) Add ¶
func (h *BytesRefHash) Add(bytes []byte) (int, error)
Adds a new BytesRef.
func (*BytesRefHash) ByteStart ¶
func (h *BytesRefHash) ByteStart(bytesId int) int
Returns the bytesStart offset into the internally used ByteBlockPool for the given bytesID.
func (*BytesRefHash) Clear ¶
func (h *BytesRefHash) Clear(resetPool bool)
Clears the BytesRef which maps to the given BytesRef
func (*BytesRefHash) Reinit ¶
func (h *BytesRefHash) Reinit()
reinitializes the BytesRefHash after a previous clear() call. If clear() has not been called previously this method has no effect.
func (*BytesRefHash) Size ¶
func (h *BytesRefHash) Size() int
Returns the number of values in this hash.
type BytesRefIterator ¶
type BytesRefIterator interface { /* Increments the iteration to the next []byte in the iterator. Returns the resulting []byte or nil if the end of the iterator is reached. The returned []byte may be re-used across calls to the next. After this method returns nil, do not call it again: the results are undefined. */ Next() (buf []byte, err error) /* Return the []byte Comparator used to sort terms provided by the iterator. This may return nil if there are no items or the iterator is not sorted. Callers may invoke this method many times, so it's best to cache a single instance & reuse it. */ Comparator() sort.Interface }
BytesRefIterator.java A simple iterator interface for []byte iteration.
type BytesStartArray ¶
type BytesStartArray interface { // Initializes the BytesStartArray. This call will allocate memory Init() []int // A Counter reference holding the number of bytes used by this // BytesStartArray. The BytesRefHash uses this reference to track // its memory usage BytesUsed() Counter // Grows the BytesStartArray Grow() []int // clears the BytesStartArray and returns the cleared instance. Clear() []int }
Manages allocation of per-term addresses.
type CompoundError ¶
type CompoundError struct {
// contains filtered or unexported fields
}
func (*CompoundError) Error ¶
func (e *CompoundError) Error() string
type Counter ¶
func NewAtomicCounter ¶
func NewAtomicCounter() Counter
func NewCounter ¶
func NewCounter() Counter
type DataInput ¶
type DataInput interface { ReadByte() (b byte, err error) ReadBytes(buf []byte) error ReadShort() (n int16, err error) ReadInt() (n int32, err error) ReadVInt() (n int32, err error) ReadLong() (n int64, err error) ReadVLong() (n int64, err error) ReadString() (s string, err error) ReadStringStringMap() (m map[string]string, err error) ReadStringSet() (m map[string]bool, err error) }
Abstract base class for performing read operations of Lucene's low-level data types.
DataInput may only be used from one thread, because it is not thread safe (it keeps internal state like file position). To allow multithreaded use, every DataInput instance must be cloned before used in another thread. Subclases must therefore implement Clone(), returning a new DataInput which operates on the same underlying resource, but positioned independently.
type DataInputImpl ¶
type DataInputImpl struct { Reader DataReader // contains filtered or unexported fields }
func NewDataInput ¶
func NewDataInput(spi DataReader) *DataInputImpl
func (*DataInputImpl) ReadBytesBuffered ¶
func (in *DataInputImpl) ReadBytesBuffered(buf []byte, useBuffer bool) error
func (*DataInputImpl) ReadInt ¶
func (in *DataInputImpl) ReadInt() (n int32, err error)
func (*DataInputImpl) ReadLong ¶
func (in *DataInputImpl) ReadLong() (n int64, err error)
func (*DataInputImpl) ReadShort ¶
func (in *DataInputImpl) ReadShort() (n int16, err error)
func (*DataInputImpl) ReadString ¶
func (in *DataInputImpl) ReadString() (s string, err error)
func (*DataInputImpl) ReadStringSet ¶
func (in *DataInputImpl) ReadStringSet() (s map[string]bool, err error)
func (*DataInputImpl) ReadStringStringMap ¶
func (in *DataInputImpl) ReadStringStringMap() (m map[string]string, err error)
func (*DataInputImpl) ReadVInt ¶
func (in *DataInputImpl) ReadVInt() (n int32, err error)
func (*DataInputImpl) ReadVLong ¶
func (in *DataInputImpl) ReadVLong() (int64, error)
func (*DataInputImpl) SkipBytes ¶
func (in *DataInputImpl) SkipBytes(numBytes int64) (err error)
Skip over numBytes bytes. The contract on this method is that it should have the same behavior as reading the same number of bytes into a buffer and discarding its content. Negative values of numBytes are not supported.
type DataOutput ¶
type DataOutput interface { DataWriter WriteInt(i int32) error WriteVInt(i int32) error WriteLong(i int64) error WriteVLong(i int64) error WriteString(s string) error CopyBytes(input DataInput, numBytes int64) error WriteStringStringMap(m map[string]string) error WriteStringSet(m map[string]bool) error }
Abstract base class for performing write operations of Lucene's low-level data types.
DataOutput may only be used from one thread, because it is not thread safe (it keeps internal state like file position).
type DataOutputImpl ¶
type DataOutputImpl struct { Writer DataWriter // contains filtered or unexported fields }
func NewDataOutput ¶
func NewDataOutput(part DataWriter) *DataOutputImpl
func (*DataOutputImpl) CopyBytes ¶
func (out *DataOutputImpl) CopyBytes(input DataInput, numBytes int64) error
func (*DataOutputImpl) WriteInt ¶
func (out *DataOutputImpl) WriteInt(i int32) error
Writes an int as four bytes.
32-bit unsigned integer written as four bytes, high-order bytes first.
func (*DataOutputImpl) WriteLong ¶
func (out *DataOutputImpl) WriteLong(i int64) error
Writes a long as eight bytes.
64-bit unsigned integer written as eight bytes, high-order bytes first.
func (*DataOutputImpl) WriteString ¶
func (out *DataOutputImpl) WriteString(s string) error
Writes a string.
Writes strings as UTF-8 encoded bytes. First the length, in bytes, is written as a VInt, followed by the bytes.
func (*DataOutputImpl) WriteStringSet ¶
func (out *DataOutputImpl) WriteStringSet(m map[string]bool) error
Writes a String set.
First the size is written as an int32, followed by each value written as a string.
func (*DataOutputImpl) WriteStringStringMap ¶
func (out *DataOutputImpl) WriteStringStringMap(m map[string]string) error
Writes a string map.
First the size is written as an int32, followed by each key-value pair written as two consecutive strings.
func (*DataOutputImpl) WriteVInt ¶
func (out *DataOutputImpl) WriteVInt(i int32) error
Writes an int in a variable-length format. Writes between one and five bytes. Smaller values take fewer bytes. Negative numbers are supported, by should be avoided.
VByte is a variable-length format. For positive integers, it is defined where the high-order bit of each byte indicates whether more bytes remain to be read. The low-order seven bits are appended as increasingly more significant bits in the resulting integer value. Thus values from zero to 127 may be stored in a single byte, values from 128 to 16,383 may be stored in two bytes, and so on.
VByte Encoding Examle
| Value | Byte 1 | Byte 2 | Byte 3 | | 0 | 00000000 | | 1 | 00000001 | | 2 | 00000010 | | ... | | 127 | 01111111 | | 128 | 10000000 | 00000001 | | 129 | 10000001 | 00000001 | | 130 | 10000010 | 00000001 | | ... | | 16,383 | 11111111 | 01111111 | | 16,384 | 10000000 | 10000000 | 00000001 | | 16,385 | 10000001 | 10000000 | 00000001 | | ... |
This provides compression while still being efficient to decode.
func (*DataOutputImpl) WriteVLong ¶
func (out *DataOutputImpl) WriteVLong(i int64) error
Writes an long in a variable-length format. Writes between one and none bytes. Smaller values take fewer bytes. Negative number are not supported.
The format is described further in WriteVInt().
type DataReader ¶
type DataWriter ¶
type DirectTrackingAllocator ¶
type DirectTrackingAllocator struct { *ByteAllocatorImpl // contains filtered or unexported fields }
A simple Allocator that never recycles, but tracks how much total RAM is in use.
func NewDirectTrackingAllocator ¶
func NewDirectTrackingAllocator(bytesUsed Counter) *DirectTrackingAllocator
type FileDeleter ¶
type FixedBitSet ¶
type FixedBitSet struct {
// contains filtered or unexported fields
}
BitSet of fixed length (numBits), backed by accessible bits() []int64, accessed with an int index, implementing Bits and DocIdSet. Unlike OpenBitSet, this bit set does not auto-expand, cannot handle long index, and does not have fastXX/XX variants (just X).
func EnsureFixedBitSet ¶
func EnsureFixedBitSet(bits *FixedBitSet, numBits int) *FixedBitSet
If the given FixedBitSet is large enough to hold numBits, returns the given bits, otherwise returns a new FixedBitSet which can hold the rquired number of bits.
NOTE: the returned bitset reuses the underlying []int64 of the given bits if possible. Also, calling length() on the returned bits may return a value greater than numBits.
func NewFixedBitSetOf ¶
func NewFixedBitSetOf(numBits int) *FixedBitSet
func (*FixedBitSet) At ¶
func (b *FixedBitSet) At(index int) bool
func (*FixedBitSet) Bits ¶
func (b *FixedBitSet) Bits() Bits
func (*FixedBitSet) Cardinality ¶
func (b *FixedBitSet) Cardinality() int
Returns number of set bits. NOTE: this visits every int64 in the backing bits slice, and the result is not internaly cached!
func (*FixedBitSet) IsCacheable ¶
func (b *FixedBitSet) IsCacheable() bool
func (*FixedBitSet) Length ¶
func (b *FixedBitSet) Length() int
func (*FixedBitSet) RamBytesUsed ¶
func (b *FixedBitSet) RamBytesUsed() int64
func (*FixedBitSet) Set ¶
func (b *FixedBitSet) Set(index int)
type InPlaceMergeSorter ¶
type InPlaceMergeSorter struct {
*Sorter
}
Sorter implementation absed on the merge-sort algorithm that merges in place (no extra memory will be allocated). Small arrays are sorter with insertion sort.
func NewInPlaceMergeSorter ¶
func NewInPlaceMergeSorter(impl sort.Interface) *InPlaceMergeSorter
func (*InPlaceMergeSorter) Sort ¶
func (s *InPlaceMergeSorter) Sort(from, to int)
type InfoStream ¶
type InfoStream interface { io.Closer // Clone() InfoStream // prints a message Message(component, message string, args ...interface{}) // returns true if messages are enabled and should be posted. IsEnabled(component string) bool }
Debugging API for Lucene classes such as IndexWriter and SegmentInfos.
NOTE: Enabling infostreams may cause performance degradation in some components.
func DefaultInfoStream ¶
func DefaultInfoStream() InfoStream
The default InfoStream used by a newly instantiated classes.
type IntAllocator ¶
type IntAllocator interface { Recycle(blocks [][]int) // contains filtered or unexported methods }
type IntAllocatorImpl ¶
type IntAllocatorImpl struct {
// contains filtered or unexported fields
}
func NewIntAllocator ¶
func NewIntAllocator(blockSize int) *IntAllocatorImpl
type IntBlockPool ¶
type IntBlockPool struct { Buffers [][]int IntUpto int Buffer []int IntOffset int // contains filtered or unexported fields }
A pool for int blocks similar to ByteBlockPool
func NewIntBlockPool ¶
func NewIntBlockPool(allocator IntAllocator) *IntBlockPool
func (*IntBlockPool) NextBuffer ¶
func (p *IntBlockPool) NextBuffer()
Advances the pool to its next buffer. This mthod should be called once after the constructor to initialize the pool. In contrast to the constructor a IntBlockPool.reset() call will advance the pool to its first buffer immediately.
func (*IntBlockPool) Reset ¶
func (pool *IntBlockPool) Reset(zeroFillBuffers, reuseFirst bool)
Expert: Resets the pool to its initial state reusing the first buffer.
type IntroSorter ¶
type IntroSorter struct { *Sorter // contains filtered or unexported fields }
Sorter implementation based on a variant of the quicksort algorithm called introsort: when the recursion level exceeds the log of the length of the array to sort, it falls back to heapsort. This prevents quicksort from running into its worst-case quadratic runtime. Small arrays are sorted with insertion sort.
func NewIntroSorter ¶
func NewIntroSorter(spi IntroSorterSPI, arr sort.Interface) *IntroSorter
func (*IntroSorter) Sort ¶
func (s *IntroSorter) Sort(from, to int)
type IntroSorterSPI ¶
type IntsRef ¶
type IntsRef struct { // The contents of teh IntsRef. Should never be nil. Ints []int // Offset of first valid integer. Offset int // Length of used ints. Length int }
Represents []int, as a slice (offset + length) into an existing []int. The ints member should never be nil; use EMPTY_INTS if necessary.
Go's native slice is always preferrable unless the reference pointer need to remain unchanged, in which case, this class is more useful.
func NewEmptyIntsRef ¶
func NewEmptyIntsRef() *IntsRef
type IntsRefBuilder ¶
type IntsRefBuilder struct {
// contains filtered or unexported fields
}
func NewIntsRefBuilder ¶
func NewIntsRefBuilder() *IntsRefBuilder
func (*IntsRefBuilder) Append ¶
func (a *IntsRefBuilder) Append(i int)
func (*IntsRefBuilder) At ¶
func (a *IntsRefBuilder) At(offset int) int
func (*IntsRefBuilder) Clear ¶
func (a *IntsRefBuilder) Clear()
func (*IntsRefBuilder) CopyIntSlice ¶
func (a *IntsRefBuilder) CopyIntSlice(other []int)
func (*IntsRefBuilder) CopyInts ¶
func (a *IntsRefBuilder) CopyInts(ints *IntsRef)
func (*IntsRefBuilder) Get ¶
func (a *IntsRefBuilder) Get() *IntsRef
func (*IntsRefBuilder) Grow ¶
func (a *IntsRefBuilder) Grow(newLength int)
func (*IntsRefBuilder) Length ¶
func (a *IntsRefBuilder) Length() int
type ListIntroSorter ¶
type ListIntroSorter struct {
*IntroSorter
}
func (*ListIntroSorter) PivotLess ¶
func (s *ListIntroSorter) PivotLess(j int) bool
func (*ListIntroSorter) SetPivot ¶
func (s *ListIntroSorter) SetPivot(i int)
type MaxBytesLengthExceededError ¶
type MaxBytesLengthExceededError string
func (MaxBytesLengthExceededError) Error ¶
func (e MaxBytesLengthExceededError) Error() string
type MutableBits ¶
Extension of Bits for live documents.
type OpenBitSet ¶
type OpenBitSet struct {
// contains filtered or unexported fields
}
func NewOpenBitSet ¶
func NewOpenBitSet() *OpenBitSet
func NewOpenBitSetOf ¶
func NewOpenBitSetOf(numBits int64) *OpenBitSet
Constructs an OpenBitSet large enough to hold numBits.
func (*OpenBitSet) And ¶
func (b *OpenBitSet) And(other *OpenBitSet)
func (*OpenBitSet) AndNot ¶
func (b *OpenBitSet) AndNot(other *OpenBitSet)
func (*OpenBitSet) Cardinality ¶
func (b *OpenBitSet) Cardinality() int64
L553
Return the number of set bits
func (*OpenBitSet) Clear ¶
func (b *OpenBitSet) Clear(index int64)
Clears a bit, allowing access beyond the current set size without changing the size.
func (*OpenBitSet) Get ¶
func (b *OpenBitSet) Get(index int64) bool
Returns true or false for the specified bit index
func (*OpenBitSet) IsEmpty ¶
func (b *OpenBitSet) IsEmpty() bool
Returns true if there are no set bits
func (*OpenBitSet) NextSetBit ¶
func (b *OpenBitSet) NextSetBit(index int64) int64
Returns the index of the first set bit starting at the index specified. - is returned if there are no more set bits.
func (*OpenBitSet) Set ¶
func (b *OpenBitSet) Set(index int64)
Sets a bit, expanding the set size if necessary
func (*OpenBitSet) String ¶
func (b *OpenBitSet) String() string
type PrintStreamInfoStream ¶
type PrintStreamInfoStream struct {
// contains filtered or unexported fields
}
InfoStream implementation over an io.Writer such as os.Stdout
func NewPrintStreamInfoStream ¶
func NewPrintStreamInfoStream(w io.Writer) *PrintStreamInfoStream
func (*PrintStreamInfoStream) Close ¶
func (is *PrintStreamInfoStream) Close() error
func (*PrintStreamInfoStream) IsEnabled ¶
func (is *PrintStreamInfoStream) IsEnabled(component string) bool
func (*PrintStreamInfoStream) Message ¶
func (is *PrintStreamInfoStream) Message(component, message string, args ...interface{})
type SetOnce ¶
A convenient class which offers a semi-immutable object wrapper implementation which allows one to set the value of an object exactly once, and retrieve it many times. If Set() is called more than once, error is returned and the operation will fail.
func NewSetOnce ¶
func NewSetOnce() *SetOnce
func NewSetOnceOf ¶
func NewSetOnceOf(obj interface{}) *SetOnce
type TimSorter ¶
type TimSorter struct { *Sorter // contains filtered or unexported fields }
Sorter implementation based on TimSorter(http://svn.python.org/projects/python/trunk/Objects/listsort.txt) algorithm.
This implementation is especially good at sorting partially-sorted arrays and sorts small arrays with binary sort.
NOTE: There are a few differences with the original implementation:
1. The extra amount of memory to perform merges is configurable. This allows small merges to be very fast while large merges will be performed in-place (slightly slower). You can make sure that the fast merge routine will always be used by having maxTempSlots equal to half of the length of the slice of data to sort.
2. Only the fast merge routine can gallop (the one that doesn't in-place) and it only gallops on the longest slice.
type Version ¶
type Version [4]int
func ParseVersion ¶
Parse a version number of the form major.minor.bugfix.prerlease
Part .bugfix and part .prerelease are optional. Note that this is forwards compatible: the parsed version does not have to exist as a constant.
Source Files ¶
- accountable.go
- array.go
- attributes.go
- bit.go
- bits.go
- byteBlockPool.go
- bytesRefHash.go
- bytesref.go
- constants.go
- convert.go
- filename.go
- fixedBitSet.go
- infoStream.go
- input.go
- intBlockPool.go
- intsRef.go
- io.go
- iterator.go
- list.go
- math.go
- numeric.go
- openBitSet.go
- output.go
- pool.go
- ramUsageEstimator.go
- smallFloat.go
- sort.go
- string.go
- unicode.go
- version.go
- wrapper.go