This is an automated email from the ASF dual-hosted git repository. hanahmily pushed a commit to branch index-int in repository https://gitbox.apache.org/repos/asf/skywalking-banyandb.git
commit bb43e206a0e89ac40e57d82a16284be29aaf2ca3 Author: Gao Hongtao <[email protected]> AuthorDate: Thu Nov 28 14:51:49 2024 +0800 Support indexing the int type Signed-off-by: Gao Hongtao <[email protected]> --- banyand/measure/write.go | 24 ++-- banyand/stream/benchmark_test.go | 12 +- banyand/stream/write.go | 69 ++++++----- pkg/index/index.go | 188 +++++++++++++++++++++++++---- pkg/index/inverted/inverted.go | 92 +++++++++----- pkg/index/inverted/inverted_series.go | 2 +- pkg/index/inverted/inverted_series_test.go | 74 +++--------- pkg/index/inverted/inverted_test.go | 104 +++++++++++----- pkg/index/inverted/sort_test.go | 10 +- pkg/index/testcases/duration.go | 86 +++---------- pkg/index/testcases/service_name.go | 29 ++--- pkg/query/logical/expr_literal.go | 146 +++++++++++++--------- pkg/query/logical/interface.go | 6 +- pkg/query/logical/stream/index_filter.go | 62 ++++------ 14 files changed, 522 insertions(+), 382 deletions(-) diff --git a/banyand/measure/write.go b/banyand/measure/write.go index d6acc9fa..d27d2ba6 100644 --- a/banyand/measure/write.go +++ b/banyand/measure/write.go @@ -245,22 +245,18 @@ func (w *writeCallback) handleTagFamily(stm *measure, req *measurev1.WriteReques } toIndex := ok || !stm.schema.IndexMode if encodeTagValue.value != nil { - fields = append(fields, index.Field{ - Key: fieldKey, - Term: encodeTagValue.value, - Store: true, - Index: toIndex, - NoSort: r.GetNoSort(), - }) + f := index.NewBytesField(fieldKey, encodeTagValue.value) + f.Store = true + f.Index = toIndex + f.NoSort = r.GetNoSort() + fields = append(fields, f) } else { for _, val := range encodeTagValue.valueArr { - fields = append(fields, index.Field{ - Key: fieldKey, - Term: val, - Store: true, - Index: toIndex, - NoSort: r.GetNoSort(), - }) + f := index.NewBytesField(fieldKey, val) + f.Store = true + f.Index = toIndex + f.NoSort = r.GetNoSort() + fields = append(fields, f) } } continue diff --git a/banyand/stream/benchmark_test.go b/banyand/stream/benchmark_test.go index cf2b52a9..866bfe88 100644 --- a/banyand/stream/benchmark_test.go +++ b/banyand/stream/benchmark_test.go @@ -148,13 +148,11 @@ func generateData(p parameter) ([]*elements, []index.Documents, mockIndex) { es.tagFamilies = append(es.tagFamilies, tfs) idx.insert(value, common.SeriesID(k), int(unixTimestamp)) var fields []index.Field - fields = append(fields, index.Field{ - Key: index.FieldKey{ - IndexRuleID: 1, - SeriesID: common.SeriesID(k), - }, - Term: []byte(value), - }) + + fields = append(fields, index.NewBytesField(index.FieldKey{ + IndexRuleID: 1, + SeriesID: common.SeriesID(k), + }, []byte(value))) docs = append(docs, index.Document{ DocID: uint64(unixTimestamp), Fields: fields, diff --git a/banyand/stream/write.go b/banyand/stream/write.go index 12ee5af4..66ff64d8 100644 --- a/banyand/stream/write.go +++ b/banyand/stream/write.go @@ -165,40 +165,21 @@ func (w *writeCallback) handle(dst map[string]*elementsInGroup, writeEvent *stre } t := tagFamilySpec.Tags[j] - encodeTagValue := encodeTagValue( - t.Name, - t.Type, - tagValue) if r, ok := tfr[t.Name]; ok { - if encodeTagValue.value != nil { - fields = append(fields, index.Field{ - Key: index.FieldKey{ - IndexRuleID: r.GetMetadata().GetId(), - Analyzer: r.Analyzer, - SeriesID: series.ID, - }, - Term: encodeTagValue.value, - NoSort: r.GetNoSort(), - }) - } else { - for _, val := range encodeTagValue.valueArr { - fields = append(fields, index.Field{ - Key: index.FieldKey{ - IndexRuleID: r.GetMetadata().GetId(), - Analyzer: r.Analyzer, - SeriesID: series.ID, - }, - Term: val, - NoSort: r.GetNoSort(), - }) - } - } + fields = appendField(fields, index.FieldKey{ + IndexRuleID: r.GetMetadata().GetId(), + Analyzer: r.Analyzer, + SeriesID: series.ID, + }, t.Type, tagValue, r.GetNoSort()) } _, isEntity := stm.indexRuleLocators.EntitySet[tagFamilySpec.Tags[j].Name] if tagFamilySpec.Tags[j].IndexedOnly || isEntity { continue } - tf.values = append(tf.values, encodeTagValue) + tf.values = append(tf.values, encodeTagValue( + t.Name, + t.Type, + tagValue)) } if len(tf.values) > 0 { tagFamilies = append(tagFamilies, tf) @@ -319,3 +300,35 @@ func encodeTagValue(name string, tagType databasev1.TagType, tagVal *modelv1.Tag } return tv } + +func appendField(dest []index.Field, fieldKey index.FieldKey, tagType databasev1.TagType, tagVal *modelv1.TagValue, noSort bool) []index.Field { + switch tagType { + case databasev1.TagType_TAG_TYPE_INT: + f := index.NewIntField(fieldKey, tagVal.GetInt().Value) + f.NoSort = noSort + dest = append(dest, f) + case databasev1.TagType_TAG_TYPE_STRING: + f := index.NewStringField(fieldKey, tagVal.GetStr().Value) + f.NoSort = noSort + dest = append(dest, f) + case databasev1.TagType_TAG_TYPE_DATA_BINARY: + f := index.NewBytesField(fieldKey, tagVal.GetBinaryData()) + f.NoSort = noSort + dest = append(dest, f) + case databasev1.TagType_TAG_TYPE_INT_ARRAY: + for i := range tagVal.GetIntArray().Value { + f := index.NewIntField(fieldKey, tagVal.GetIntArray().Value[i]) + f.NoSort = noSort + dest = append(dest, f) + } + case databasev1.TagType_TAG_TYPE_STRING_ARRAY: + for i := range tagVal.GetStrArray().Value { + f := index.NewStringField(fieldKey, tagVal.GetStrArray().Value[i]) + f.NoSort = noSort + dest = append(dest, f) + } + default: + logger.Panicf("unsupported tag value type: %T", tagVal.GetValue()) + } + return dest +} diff --git a/pkg/index/index.go b/pkg/index/index.go index 55387048..9cbe4608 100644 --- a/pkg/index/index.go +++ b/pkg/index/index.go @@ -23,6 +23,8 @@ import ( "context" "fmt" "io" + "math" + "strconv" "github.com/apache/skywalking-banyandb/api/common" databasev1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/database/v1" @@ -31,6 +33,7 @@ import ( "github.com/apache/skywalking-banyandb/pkg/index/posting" "github.com/apache/skywalking-banyandb/pkg/iter/sort" "github.com/apache/skywalking-banyandb/pkg/timestamp" + "github.com/blugelabs/bluge/numeric" ) const ( @@ -46,6 +49,11 @@ const ( AnalyzerURL = "url" ) +var ( + defaultUpper = convert.Uint64ToBytes(math.MaxUint64) + defaultLower = convert.Uint64ToBytes(0) +) + // FieldKey is the key of field in a document. type FieldKey struct { Analyzer string @@ -62,48 +70,180 @@ func (f FieldKey) Marshal() string { return string(convert.Uint32ToBytes(f.IndexRuleID)) } +func NewStringField(key FieldKey, value string) Field { + return Field{ + term: &BytesTermValue{Value: convert.StringToBytes(value)}, + Key: key, + } +} + +func NewIntField(key FieldKey, value int64) Field { + return Field{ + term: &FloatTermValue{Value: numeric.Int64ToFloat64(value)}, + Key: key, + } +} + +func NewBytesField(key FieldKey, value []byte) Field { + return Field{ + term: &BytesTermValue{Value: bytes.Clone(value)}, + Key: key, + } +} + +func NewFloatField(key FieldKey, value float64) Field { + return Field{ + term: &FloatTermValue{Value: value}, + Key: key, + } +} + +func IntToStr(i int64) string { + return strconv.FormatFloat(numeric.Int64ToFloat64(i), 'f', -1, 64) +} + // Field is a indexed item in a document. type Field struct { - Term []byte + term IsTermValue Key FieldKey NoSort bool Store bool Index bool } +func (f *Field) GetTerm() IsTermValue { + return f.term +} + +func (f *Field) GetBytes() []byte { + if bv, ok := f.GetTerm().(*BytesTermValue); ok { + return bv.Value + } + panic("field is not bytes") +} + +func (f *Field) GetFloat() float64 { + if fv, ok := f.GetTerm().(*FloatTermValue); ok { + return fv.Value + } + panic("field is not float") +} + +func (f *Field) String() string { + return fmt.Sprintf("{\"key\": \"%s\", \"term\": %s}", f.Key.Marshal(), f.term) +} + +func (f *Field) MarshalJSON() ([]byte, error) { + return []byte(f.String()), nil +} + +type IsTermValue interface { + isTermValue() + String() string +} + +type BytesTermValue struct { + Value []byte +} + +func (BytesTermValue) isTermValue() {} + +func (b BytesTermValue) String() string { + return convert.BytesToString(b.Value) +} + +type FloatTermValue struct { + Value float64 +} + +func (FloatTermValue) isTermValue() {} + +func (f FloatTermValue) String() string { + return strconv.FormatInt(numeric.Float64ToInt64(f.Value), 10) +} + // RangeOpts contains options to performance a continuous scan. type RangeOpts struct { - Upper []byte - Lower []byte + Upper IsTermValue + Lower IsTermValue IncludesUpper bool IncludesLower bool } -// Between reports whether value is in the range. -func (r RangeOpts) Between(value []byte) int { - if r.Upper != nil { - var in bool - if r.IncludesUpper { - in = bytes.Compare(r.Upper, value) >= 0 - } else { - in = bytes.Compare(r.Upper, value) > 0 - } - if !in { - return 1 - } +func (r RangeOpts) IsEmpty() bool { + return r.Upper == nil && r.Lower == nil +} + +func (r RangeOpts) Valid() bool { + if r.Upper == nil || r.Lower == nil { + return false } - if r.Lower != nil { - var in bool - if r.IncludesLower { - in = bytes.Compare(r.Lower, value) <= 0 - } else { - in = bytes.Compare(r.Lower, value) < 0 + switch r.Upper.(type) { + case *BytesTermValue: + if bytes.Compare(r.Lower.(*BytesTermValue).Value, r.Upper.(*BytesTermValue).Value) > 0 { + return false } - if !in { - return -1 + case *FloatTermValue: + if r.Lower.(*FloatTermValue).Value > r.Upper.(*FloatTermValue).Value { + return false } + default: + return false + } + return true +} + +func NewStringRangeOpts(lower, upper string, includesLower, includesUpper bool) RangeOpts { + var upperBytes, lowerBytes []byte + if len(upper) == 0 { + upperBytes = defaultUpper + } else { + upperBytes = convert.StringToBytes(upper) + } + if len(lower) == 0 { + lowerBytes = defaultLower + } else { + lowerBytes = convert.StringToBytes(lower) + } + return RangeOpts{ + Lower: &BytesTermValue{Value: upperBytes}, + Upper: &BytesTermValue{Value: lowerBytes}, + IncludesLower: includesLower, + IncludesUpper: includesUpper, + } +} + +func NewIntRangeOpts(lower, upper int64, includesLower, includesUpper bool) RangeOpts { + return RangeOpts{ + Lower: &FloatTermValue{Value: numeric.Int64ToFloat64(lower)}, + Upper: &FloatTermValue{Value: numeric.Int64ToFloat64(upper)}, + IncludesLower: includesLower, + IncludesUpper: includesUpper, + } +} + +func NewBytesRangeOpts(lower, upper []byte, includesLower, includesUpper bool) RangeOpts { + if len(upper) == 0 { + upper = defaultUpper + } + if len(lower) == 0 { + lower = defaultLower + } + return RangeOpts{ + Lower: &BytesTermValue{Value: bytes.Clone(lower)}, + Upper: &BytesTermValue{Value: bytes.Clone(upper)}, + IncludesLower: includesLower, + IncludesUpper: includesUpper, + } +} + +func NewFloatRangeOpts(lower, upper float64, includesLower, includesUpper bool) RangeOpts { + return RangeOpts{ + Lower: &FloatTermValue{Value: lower}, + Upper: &FloatTermValue{Value: upper}, + IncludesLower: includesLower, + IncludesUpper: includesUpper, } - return 0 } // DocumentResult represents a document in an index. diff --git a/pkg/index/inverted/inverted.go b/pkg/index/inverted/inverted.go index 4772f8b8..c6c3d81c 100644 --- a/pkg/index/inverted/inverted.go +++ b/pkg/index/inverted/inverted.go @@ -19,11 +19,10 @@ package inverted import ( - "bytes" "context" "io" "log" - "math" + "strconv" "time" "github.com/blugelabs/bluge" @@ -53,8 +52,6 @@ const ( ) var ( - defaultUpper = convert.Uint64ToBytes(math.MaxUint64) - defaultLower = convert.Uint64ToBytes(0) defaultRangePreloadSize = 1000 defaultProjection = []string{docIDField} ) @@ -113,7 +110,15 @@ func (s *store) Batch(batch index.Batch) error { for _, d := range batch.Documents { doc := bluge.NewDocument(convert.BytesToString(convert.Uint64ToBytes(d.DocID))) for i, f := range d.Fields { - tf := bluge.NewKeywordFieldBytes(f.Key.Marshal(), f.Term) + var tf *bluge.TermField + switch f.GetTerm().(type) { + case *index.BytesTermValue: + tf = bluge.NewKeywordFieldBytes(f.Key.Marshal(), f.GetBytes()) + case *index.FloatTermValue: + tf = bluge.NewNumericField(f.Key.Marshal(), f.GetFloat()) + default: + return errors.Errorf("unexpected field type: %T", f.GetTerm()) + } if !f.NoSort { tf.Sortable() } @@ -172,9 +177,7 @@ func (s *store) Close() error { func (s *store) Iterator(ctx context.Context, fieldKey index.FieldKey, termRange index.RangeOpts, order modelv1.Sort, preLoadSize int, ) (iter index.FieldIterator[*index.DocumentResult], err error) { - if termRange.Lower != nil && - termRange.Upper != nil && - bytes.Compare(termRange.Lower, termRange.Upper) > 0 { + if !termRange.IsEmpty() && !termRange.Valid() { return index.DummyFieldIterator, nil } if !s.closer.AddRunning() { @@ -192,21 +195,42 @@ func (s *store) Iterator(ctx context.Context, fieldKey index.FieldKey, termRange rangeQuery = rangeQuery.AddMust(bluge.NewTermQuery(string(fieldKey.SeriesID.Marshal())). SetField(seriesIDField)) rangeNode.Append(newTermNode(string(fieldKey.SeriesID.Marshal()), nil)) - if termRange.Lower != nil || termRange.Upper != nil { - if termRange.Upper == nil { - termRange.Upper = defaultUpper - } - if termRange.Lower == nil { - termRange.Lower = defaultLower + if !termRange.IsEmpty() && termRange.Valid() { + switch lower := termRange.Lower.(type) { + case *index.BytesTermValue: + upper := termRange.Upper.(*index.BytesTermValue) + rangeQuery.AddMust(bluge.NewTermRangeInclusiveQuery( + string(lower.Value), + string(upper.Value), + termRange.IncludesLower, + termRange.IncludesUpper, + ). + SetField(fk)) + rangeNode.Append(newTermRangeInclusiveNode( + string(lower.Value), + string(upper.Value), + termRange.IncludesLower, + termRange.IncludesUpper, nil, + )) + + case *index.FloatTermValue: + upper := termRange.Upper.(*index.FloatTermValue) + rangeQuery.AddMust(bluge.NewNumericRangeInclusiveQuery( + lower.Value, + upper.Value, + termRange.IncludesLower, + termRange.IncludesUpper, + ). + SetField(fk)) + rangeNode.Append(newTermRangeInclusiveNode( + strconv.FormatFloat(lower.Value, 'f', -1, 64), + strconv.FormatFloat(upper.Value, 'f', -1, 64), + termRange.IncludesLower, + termRange.IncludesUpper, + nil, + )) + default: } - rangeQuery.AddMust(bluge.NewTermRangeInclusiveQuery( - string(termRange.Lower), - string(termRange.Upper), - termRange.IncludesLower, - termRange.IncludesUpper, - ). - SetField(fk)) - rangeNode.Append(newTermRangeInclusiveNode(string(termRange.Lower), string(termRange.Upper), termRange.IncludesLower, termRange.IncludesUpper, nil)) } sortedKey := fk @@ -234,11 +258,25 @@ func (s *store) MatchTerms(field index.Field) (list posting.List, err error) { if err != nil { return nil, err } - fk := field.Key.Marshal() - query := bluge.NewBooleanQuery(). - AddMust(bluge.NewTermQuery(string(field.Term)).SetField(fk)). - AddMust(bluge.NewTermQuery(string(field.Key.SeriesID.Marshal())). - SetField(seriesIDField)) + + var query *bluge.BooleanQuery + switch field.GetTerm().(type) { + case *index.BytesTermValue: + query = bluge.NewBooleanQuery() + query.AddMust(bluge.NewTermQuery(string(field.GetBytes())).SetField(field.Key.Marshal())) + case *index.FloatTermValue: + query = bluge.NewBooleanQuery() + query.AddMust(bluge.NewTermQuery( + strconv.FormatFloat(field.GetFloat(), 'f', -1, 64)). + SetField(field.Key.Marshal())) + case nil: + return roaring.DummyPostingList, nil + default: + return nil, errors.Errorf("unexpected field type: %T", field.GetTerm()) + } + query.AddMust(bluge.NewTermQuery(string(field.Key.SeriesID.Marshal())). + SetField(seriesIDField)) + documentMatchIterator, err := reader.Search(context.Background(), bluge.NewAllMatches(query)) if err != nil { return nil, err diff --git a/pkg/index/inverted/inverted_series.go b/pkg/index/inverted/inverted_series.go index a5d4c12c..32b6d3df 100644 --- a/pkg/index/inverted/inverted_series.go +++ b/pkg/index/inverted/inverted_series.go @@ -75,7 +75,7 @@ func (s *store) UpdateSeriesBatch(batch index.Batch) error { func toDoc(d index.Document) *bluge.Document { doc := bluge.NewDocument(convert.BytesToString(d.EntityValues)) for _, f := range d.Fields { - tf := bluge.NewKeywordFieldBytes(f.Key.Marshal(), f.Term) + tf := bluge.NewKeywordFieldBytes(f.Key.Marshal(), f.GetBytes()) if !f.Index { tf.FieldOptions = 0 } else if !f.NoSort { diff --git a/pkg/index/inverted/inverted_series_test.go b/pkg/index/inverted/inverted_series_test.go index 5f3f384c..1b3038f6 100644 --- a/pkg/index/inverted/inverted_series_test.go +++ b/pkg/index/inverted/inverted_series_test.go @@ -904,32 +904,10 @@ func generateDocs() (index.Batch, index.Batch) { series2 := index.Document{ EntityValues: []byte("test2"), Fields: []index.Field{ - { - Key: fieldKeyDuration, - Term: convert.Int64ToBytes(int64(100)), - Store: true, - Index: true, - }, - { - Key: fieldKeyServiceName, - Term: []byte("svc2"), - Store: true, - Index: true, - }, - { - Key: fieldKeyStartTime, - Term: convert.Int64ToBytes(int64(100)), - Store: true, - Index: true, - }, - { - Key: index.FieldKey{ - TagName: "short_name", - }, - Term: []byte("t2"), - Store: true, - Index: false, - }, + field(fieldKeyDuration, convert.Int64ToBytes(100), true, true), + field(fieldKeyServiceName, []byte("svc2"), true, true), + field(fieldKeyStartTime, convert.Int64ToBytes(100), true, true), + field(index.FieldKey{TagName: "short_name"}, []byte("t2"), true, false), }, Timestamp: int64(101), } @@ -937,44 +915,17 @@ func generateDocs() (index.Batch, index.Batch) { series3 := index.Document{ EntityValues: []byte("test3"), Fields: []index.Field{ - { - Key: fieldKeyDuration, - Term: convert.Int64ToBytes(int64(500)), - Store: true, - Index: true, - }, - { - Key: fieldKeyStartTime, - Term: convert.Int64ToBytes(int64(1000)), - Store: true, - Index: true, - }, - { - Key: index.FieldKey{ - TagName: "short_name", - }, - Term: []byte("t3"), - Store: true, - Index: false, - }, + field(fieldKeyDuration, convert.Int64ToBytes(500), true, true), + field(fieldKeyStartTime, convert.Int64ToBytes(1000), true, true), + field(index.FieldKey{TagName: "short_name"}, []byte("t3"), true, false), }, Timestamp: int64(1001), } series4 := index.Document{ EntityValues: []byte("test4"), Fields: []index.Field{ - { - Key: fieldKeyDuration, - Term: convert.Int64ToBytes(int64(500)), - Store: true, - Index: true, - }, - { - Key: fieldKeyStartTime, - Term: convert.Int64ToBytes(int64(2000)), - Store: true, - Index: true, - }, + field(fieldKeyDuration, convert.Int64ToBytes(500), true, true), + field(fieldKeyStartTime, convert.Int64ToBytes(2000), true, true), }, Timestamp: int64(2001), } @@ -985,6 +936,13 @@ func generateDocs() (index.Batch, index.Batch) { } } +func field(key index.FieldKey, value []byte, stored, indexed bool) index.Field { + f := index.NewBytesField(key, value) + f.Index = indexed + f.Store = stored + return f +} + func updateData(tester *require.Assertions, s index.SeriesStore) { b1, b2 := generateDocs() tester.NoError(s.UpdateSeriesBatch(b1)) diff --git a/pkg/index/inverted/inverted_test.go b/pkg/index/inverted/inverted_test.go index 58b086ef..b0a41aca 100644 --- a/pkg/index/inverted/inverted_test.go +++ b/pkg/index/inverted/inverted_test.go @@ -213,38 +213,33 @@ func setup(tester *require.Assertions, s index.Store, serviceName index.FieldKey var batch index.Batch batch.Documents = append(batch.Documents, index.Document{ - Fields: []index.Field{{ - Key: serviceName, - Term: []byte("GET::/product/order"), - }}, + Fields: []index.Field{ + index.NewStringField(serviceName, "GET::/product/order"), + }, DocID: 1, }, index.Document{ - Fields: []index.Field{{ - Key: serviceName, - Term: []byte("GET::/root/product"), - }}, + Fields: []index.Field{ + index.NewBytesField(serviceName, []byte("GET::/root/product")), + }, DocID: 2, }, index.Document{ - Fields: []index.Field{{ - Key: serviceName, - Term: []byte("org.apache.skywalking.examples.OrderService.order"), - }}, + Fields: []index.Field{ + index.NewStringField(serviceName, "org.apache.skywalking.examples.OrderService.order"), + }, DocID: 3, }, index.Document{ - Fields: []index.Field{{ - Key: serviceName, - Term: []byte("/svc1/v1/user"), - }}, + Fields: []index.Field{ + index.NewBytesField(serviceName, []byte("/svc1/v1/user")), + }, DocID: 4, }, index.Document{ - Fields: []index.Field{{ - Key: serviceName, - Term: []byte("/svc1/v2/user"), - }}, + Fields: []index.Field{ + index.NewStringField(serviceName, "/svc1/v2/user"), + }, DocID: 5, }, ) @@ -255,24 +250,21 @@ func setupSeries(tester *assert.Assertions, s index.Store, serviceName index.Fie var batch index.Batch batch.Documents = append(batch.Documents, index.Document{ - Fields: []index.Field{{ - Key: serviceName, - Term: []byte("test.a"), - }}, + Fields: []index.Field{ + index.NewStringField(serviceName, "test.a"), + }, DocID: 1, }, index.Document{ - Fields: []index.Field{{ - Key: serviceName, - Term: []byte("test.b"), - }}, + Fields: []index.Field{ + index.NewBytesField(serviceName, []byte("test.b")), + }, DocID: 2, }, index.Document{ - Fields: []index.Field{{ - Key: serviceName, - Term: []byte("test.c"), - }}, + Fields: []index.Field{ + index.NewStringField(serviceName, "test.c"), + }, DocID: 3, }, ) @@ -311,6 +303,54 @@ func TestStore_Iterator(t *testing.T) { testcases.RunDuration(t, data, s) } +func TestStore_NumericMatch(t *testing.T) { + tester := assert.New(t) + path, fn := setUp(require.New(t)) + s, err := NewStore(StoreOpts{ + Path: path, + Logger: logger.GetLogger("test"), + }) + tester.NoError(err) + defer func() { + tester.NoError(s.Close()) + fn() + }() + var batch index.Batch + serviceName := index.FieldKey{ + IndexRuleID: 6, + } + durationName := index.FieldKey{ + IndexRuleID: 7, + } + batch.Documents = append(batch.Documents, + index.Document{ + Fields: []index.Field{ + index.NewStringField(serviceName, "svc1"), + index.NewIntField(durationName, 50), + }, + DocID: 1, + }, + index.Document{ + Fields: []index.Field{ + index.NewBytesField(serviceName, []byte("svc2")), + index.NewIntField(durationName, 200), + }, + DocID: 2, + }, + index.Document{ + Fields: []index.Field{ + index.NewIntField(durationName, 500), + }, + DocID: 3, + }, + ) + tester.NoError(s.Batch(batch)) + l, err := s.MatchTerms(index.NewIntField(durationName, 50)) + tester.NoError(err) + tester.NotNil(l) + tester.True(roaring.NewPostingListWithInitialData(1).Equal(l)) +} + func setUp(t *require.Assertions) (tempDir string, deferFunc func()) { t.NoError(logger.Init(logger.Logging{ Env: "dev", diff --git a/pkg/index/inverted/sort_test.go b/pkg/index/inverted/sort_test.go index 9b78dc9f..148facf4 100644 --- a/pkg/index/inverted/sort_test.go +++ b/pkg/index/inverted/sort_test.go @@ -29,7 +29,6 @@ import ( "github.com/apache/skywalking-banyandb/api/common" modelv1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/model/v1" - "github.com/apache/skywalking-banyandb/pkg/convert" "github.com/apache/skywalking-banyandb/pkg/index" "github.com/apache/skywalking-banyandb/pkg/index/posting" "github.com/apache/skywalking-banyandb/pkg/index/posting/roaring" @@ -224,13 +223,12 @@ func setUpDuration(t *require.Assertions, ts time.Time, store index.Writer) map[ } sid := i2%2 + 1 batch.Documents = append(batch.Documents, index.Document{ - Fields: []index.Field{{ - Key: index.FieldKey{ + Fields: []index.Field{ + index.NewIntField(index.FieldKey{ SeriesID: common.SeriesID(sid), IndexRuleID: indexRuleID, - }, - Term: convert.Int64ToBytes(int64(term)), - }}, + }, int64(term)), + }, DocID: id, Timestamp: ts.UnixNano(), }) diff --git a/pkg/index/testcases/duration.go b/pkg/index/testcases/duration.go index a812cadf..0f1997e7 100644 --- a/pkg/index/testcases/duration.go +++ b/pkg/index/testcases/duration.go @@ -21,6 +21,7 @@ package testcases import ( "context" "fmt" + "math" "sort" "testing" @@ -28,7 +29,6 @@ import ( "github.com/stretchr/testify/require" modelv1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/model/v1" - "github.com/apache/skywalking-banyandb/pkg/convert" "github.com/apache/skywalking-banyandb/pkg/index" "github.com/apache/skywalking-banyandb/pkg/index/posting" "github.com/apache/skywalking-banyandb/pkg/index/posting/roaring" @@ -85,10 +85,7 @@ func RunDuration(t *testing.T, data map[int]posting.List, store SimpleStore) { args: args{ fieldKey: duration, orderType: modelv1.Sort_SORT_ASC, - termRange: index.RangeOpts{ - Lower: convert.Int64ToBytes(50), - Upper: convert.Int64ToBytes(2000), - }, + termRange: index.NewIntRangeOpts(50, 2000, false, false), }, want: []int{200, 500, 1000}, }, @@ -97,10 +94,7 @@ func RunDuration(t *testing.T, data map[int]posting.List, store SimpleStore) { args: args{ fieldKey: duration, orderType: modelv1.Sort_SORT_DESC, - termRange: index.RangeOpts{ - Lower: convert.Int64ToBytes(50), - Upper: convert.Int64ToBytes(2000), - }, + termRange: index.NewIntRangeOpts(50, 2000, false, false), }, want: []int{1000, 500, 200}, }, @@ -109,12 +103,7 @@ func RunDuration(t *testing.T, data map[int]posting.List, store SimpleStore) { args: args{ fieldKey: duration, orderType: modelv1.Sort_SORT_ASC, - termRange: index.RangeOpts{ - Lower: convert.Int64ToBytes(200), - IncludesLower: true, - Upper: convert.Int64ToBytes(1000), - IncludesUpper: true, - }, + termRange: index.NewIntRangeOpts(200, 1000, true, true), }, want: []int{200, 500, 1000}, }, @@ -123,24 +112,16 @@ func RunDuration(t *testing.T, data map[int]posting.List, store SimpleStore) { args: args{ fieldKey: duration, orderType: modelv1.Sort_SORT_DESC, - termRange: index.RangeOpts{ - Lower: convert.Int64ToBytes(200), - IncludesLower: true, - Upper: convert.Int64ToBytes(1000), - IncludesUpper: true, - }, + termRange: index.NewIntRangeOpts(200, 1000, true, true), }, want: []int{1000, 500, 200}, }, { - name: "scan in [lower, undefined) and sort in asc order", + name: "scan in [lower, undefined) and sort in asc order", args: args{ fieldKey: duration, orderType: modelv1.Sort_SORT_ASC, - termRange: index.RangeOpts{ - Lower: convert.Int64ToBytes(200), - IncludesLower: true, - }, + termRange: index.NewIntRangeOpts(200, math.MaxInt64, true, false), }, want: []int{200, 500, 1000, 2000}, }, @@ -149,10 +130,7 @@ func RunDuration(t *testing.T, data map[int]posting.List, store SimpleStore) { args: args{ fieldKey: duration, orderType: modelv1.Sort_SORT_DESC, - termRange: index.RangeOpts{ - Lower: convert.Int64ToBytes(200), - IncludesLower: true, - }, + termRange: index.NewIntRangeOpts(200, math.MaxInt64, true, false), }, want: []int{2000, 1000, 500, 200}, }, @@ -161,10 +139,7 @@ func RunDuration(t *testing.T, data map[int]posting.List, store SimpleStore) { args: args{ fieldKey: duration, orderType: modelv1.Sort_SORT_ASC, - termRange: index.RangeOpts{ - Upper: convert.Int64ToBytes(1000), - IncludesUpper: true, - }, + termRange: index.NewIntRangeOpts(math.MinInt64, 1000, false, true), }, want: []int{50, 200, 500, 1000}, }, @@ -173,10 +148,7 @@ func RunDuration(t *testing.T, data map[int]posting.List, store SimpleStore) { args: args{ fieldKey: duration, orderType: modelv1.Sort_SORT_DESC, - termRange: index.RangeOpts{ - Upper: convert.Int64ToBytes(1000), - IncludesUpper: true, - }, + termRange: index.NewIntRangeOpts(math.MinInt64, 1000, false, true), }, want: []int{1000, 500, 200, 50}, }, @@ -185,10 +157,7 @@ func RunDuration(t *testing.T, data map[int]posting.List, store SimpleStore) { args: args{ fieldKey: duration, orderType: modelv1.Sort_SORT_ASC, - termRange: index.RangeOpts{ - Lower: convert.Int64ToBytes(50 + 100), - Upper: convert.Int64ToBytes(2000 - 100), - }, + termRange: index.NewIntRangeOpts(150, 1900, false, false), }, want: []int{200, 500, 1000}, }, @@ -197,10 +166,7 @@ func RunDuration(t *testing.T, data map[int]posting.List, store SimpleStore) { args: args{ fieldKey: duration, orderType: modelv1.Sort_SORT_DESC, - termRange: index.RangeOpts{ - Lower: convert.Int64ToBytes(50 + 100), - Upper: convert.Int64ToBytes(2000 - 100), - }, + termRange: index.NewIntRangeOpts(150, 1900, false, false), }, want: []int{1000, 500, 200}, }, @@ -209,12 +175,7 @@ func RunDuration(t *testing.T, data map[int]posting.List, store SimpleStore) { args: args{ fieldKey: duration, orderType: modelv1.Sort_SORT_ASC, - termRange: index.RangeOpts{ - Lower: convert.Int64ToBytes(50 + 100), - IncludesLower: true, - Upper: convert.Int64ToBytes(2000 - 100), - IncludesUpper: true, - }, + termRange: index.NewIntRangeOpts(150, 1900, true, true), }, want: []int{200, 500, 1000}, }, @@ -223,12 +184,7 @@ func RunDuration(t *testing.T, data map[int]posting.List, store SimpleStore) { args: args{ fieldKey: duration, orderType: modelv1.Sort_SORT_DESC, - termRange: index.RangeOpts{ - Lower: convert.Int64ToBytes(50 + 100), - IncludesLower: true, - Upper: convert.Int64ToBytes(2000 - 100), - IncludesUpper: true, - }, + termRange: index.NewIntRangeOpts(150, 1900, true, true), }, want: []int{1000, 500, 200}, }, @@ -254,11 +210,8 @@ func RunDuration(t *testing.T, data map[int]posting.List, store SimpleStore) { { name: "invalid range", args: args{ - fieldKey: duration, - termRange: index.RangeOpts{ - Lower: convert.Int64ToBytes(100), - Upper: convert.Int64ToBytes(50), - }, + fieldKey: duration, + termRange: index.NewIntRangeOpts(100, 50, false, false), }, }, } @@ -345,10 +298,9 @@ func setUpPartialDuration(t *assert.Assertions, store index.Writer, r map[int]po continue } batch.Documents = append(batch.Documents, index.Document{ - Fields: []index.Field{{ - Key: duration, - Term: convert.Int64ToBytes(int64(term)), - }}, + Fields: []index.Field{ + index.NewIntField(duration, int64(term)), + }, DocID: id, }) r[term].Insert(id) diff --git a/pkg/index/testcases/service_name.go b/pkg/index/testcases/service_name.go index af4ac951..1e8b7924 100644 --- a/pkg/index/testcases/service_name.go +++ b/pkg/index/testcases/service_name.go @@ -44,18 +44,12 @@ func RunServiceName(t *testing.T, store SimpleStore) { }{ { name: "match gateway", - arg: index.Field{ - Key: serviceName, - Term: []byte("gateway"), - }, + arg: index.NewStringField(serviceName, "gateway"), want: roaring.NewRange(0, 50), }, { name: "match webpage", - arg: index.Field{ - Key: serviceName, - Term: []byte("webpage"), - }, + arg: index.NewStringField(serviceName, "webpage"), want: roaring.NewRange(50, 100), }, { @@ -64,10 +58,7 @@ func RunServiceName(t *testing.T, store SimpleStore) { }, { name: "unknown term", - arg: index.Field{ - Key: serviceName, - Term: []byte("unknown"), - }, + arg: index.NewStringField(serviceName, "unknown"), want: roaring.DummyPostingList, }, } @@ -91,18 +82,16 @@ func SetUp(t *assert.Assertions, store SimpleStore) { for i := 0; i < 100; i++ { if i < 100/2 { batch.Documents = append(batch.Documents, index.Document{ - Fields: []index.Field{{ - Key: serviceName, - Term: []byte("gateway"), - }}, + Fields: []index.Field{ + index.NewStringField(serviceName, "gateway"), + }, DocID: uint64(i), }) } else { batch.Documents = append(batch.Documents, index.Document{ - Fields: []index.Field{{ - Key: serviceName, - Term: []byte("webpage"), - }}, + Fields: []index.Field{ + index.NewStringField(serviceName, "webpage"), + }, DocID: uint64(i), }) } diff --git a/pkg/query/logical/expr_literal.go b/pkg/query/logical/expr_literal.go index c21fdd08..b49a499f 100644 --- a/pkg/query/logical/expr_literal.go +++ b/pkg/query/logical/expr_literal.go @@ -18,9 +18,8 @@ package logical import ( - "bytes" - "encoding/hex" "fmt" + "math" "strconv" "strings" @@ -28,6 +27,8 @@ import ( databasev1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/database/v1" "github.com/apache/skywalking-banyandb/pkg/convert" + "github.com/apache/skywalking-banyandb/pkg/index" + "github.com/apache/skywalking-banyandb/pkg/logger" ) var ( @@ -39,6 +40,21 @@ type int64Literal struct { int64 } +func (i *int64Literal) Field(key index.FieldKey) index.Field { + return index.NewIntField(key, i.int64) +} + +func (i *int64Literal) RangeOpts(isUpper bool, includeLower bool, includeUpper bool) index.RangeOpts { + if isUpper { + return index.NewIntRangeOpts(math.MinInt64, i.int64, includeLower, includeUpper) + } + return index.NewIntRangeOpts(i.int64, math.MaxInt64, includeLower, includeUpper) +} + +func (i *int64Literal) SubExprs() []LiteralExpr { + return []LiteralExpr{i} +} + func newInt64Literal(val int64) *int64Literal { return &int64Literal{ int64: val, @@ -86,10 +102,6 @@ func (i *int64Literal) Equal(expr Expr) bool { return false } -func (i *int64Literal) DataType() int32 { - return int32(databasev1.TagType_TAG_TYPE_INT) -} - func (i *int64Literal) String() string { return strconv.FormatInt(i.int64, 10) } @@ -107,6 +119,24 @@ type int64ArrLiteral struct { arr []int64 } +func (i *int64ArrLiteral) Field(key index.FieldKey) index.Field { + logger.Panicf("unsupported generate an index field for int64 array") + return index.Field{} +} + +func (i *int64ArrLiteral) RangeOpts(isUpper bool, includeLower bool, includeUpper bool) index.RangeOpts { + logger.Panicf("unsupported generate an index range opts for int64 array") + return index.RangeOpts{} +} + +func (i *int64ArrLiteral) SubExprs() []LiteralExpr { + exprs := make([]LiteralExpr, 0, len(i.arr)) + for _, v := range i.arr { + exprs = append(exprs, newInt64Literal(v)) + } + return exprs +} + func newInt64ArrLiteral(val []int64) *int64ArrLiteral { return &int64ArrLiteral{ arr: val, @@ -169,10 +199,6 @@ func (i *int64ArrLiteral) Equal(expr Expr) bool { return false } -func (i *int64ArrLiteral) DataType() int32 { - return int32(databasev1.TagType_TAG_TYPE_INT_ARRAY) -} - func (i *int64ArrLiteral) String() string { return fmt.Sprintf("%v", i.arr) } @@ -186,14 +212,31 @@ func (i *int64ArrLiteral) Elements() []string { } var ( - _ LiteralExpr = (*strLiteral)(nil) - _ ComparableExpr = (*strLiteral)(nil) + _ LiteralExpr = (*strLiteral)(nil) + _ ComparableExpr = (*strLiteral)(nil) + defaultUpper = convert.Uint64ToBytes(math.MaxUint64) + defaultLower = convert.Uint64ToBytes(0) ) type strLiteral struct { string } +func (s *strLiteral) Field(key index.FieldKey) index.Field { + return index.NewStringField(key, s.string) +} + +func (s *strLiteral) RangeOpts(isUpper bool, includeLower bool, includeUpper bool) index.RangeOpts { + if isUpper { + return index.NewStringRangeOpts(convert.BytesToString(defaultLower), s.string, includeLower, includeUpper) + } + return index.NewStringRangeOpts(s.string, convert.BytesToString(defaultUpper), includeLower, includeUpper) +} + +func (s *strLiteral) SubExprs() []LiteralExpr { + return []LiteralExpr{s} +} + func (s *strLiteral) Compare(other LiteralExpr) (int, bool) { if o, ok := other.(*strLiteral); ok { return strings.Compare(s.string, o.string), true @@ -239,10 +282,6 @@ func str(str string) LiteralExpr { return &strLiteral{str} } -func (s *strLiteral) DataType() int32 { - return int32(databasev1.TagType_TAG_TYPE_STRING) -} - func (s *strLiteral) String() string { return s.string } @@ -260,6 +299,24 @@ type strArrLiteral struct { arr []string } +func (s *strArrLiteral) Field(key index.FieldKey) index.Field { + logger.Panicf("unsupported generate an index field for string array") + return index.Field{} +} + +func (s *strArrLiteral) RangeOpts(isUpper bool, includeLower bool, includeUpper bool) index.RangeOpts { + logger.Panicf("unsupported generate an index range opts for string array") + return index.RangeOpts{} +} + +func (s *strArrLiteral) SubExprs() []LiteralExpr { + exprs := make([]LiteralExpr, 0, len(s.arr)) + for _, v := range s.arr { + exprs = append(exprs, str(v)) + } + return exprs +} + func newStrArrLiteral(val []string) *strArrLiteral { return &strArrLiteral{ arr: val, @@ -322,10 +379,6 @@ func (s *strArrLiteral) Equal(expr Expr) bool { return false } -func (s *strArrLiteral) DataType() int32 { - return int32(databasev1.TagType_TAG_TYPE_STRING_ARRAY) -} - func (s *strArrLiteral) String() string { return fmt.Sprintf("%v", s.arr) } @@ -334,53 +387,28 @@ func (s *strArrLiteral) Elements() []string { return s.arr } -// BytesLiteral represents a wrapper for a slice of bytes. -type BytesLiteral struct { - bb []byte -} - -// NewBytesLiteral creates a new instance of BytesLiteral with the provided slice of bytes. -func NewBytesLiteral(bb []byte) *BytesLiteral { - return &BytesLiteral{bb: bb} -} - -// Bytes returns a 2D slice of bytes where the inner slice contains the byte slice stored in the BytesLiteral. -func (b *BytesLiteral) Bytes() [][]byte { - return [][]byte{b.bb} -} - -// Equal checks if the current BytesLiteral is equal to the provided Expr. -func (b *BytesLiteral) Equal(expr Expr) bool { - if other, ok := expr.(*BytesLiteral); ok { - return bytes.Equal(other.bb, b.bb) - } - - return false -} - -// DataType returns the data type of BytesLiteral. -func (b *BytesLiteral) DataType() int32 { - return int32(databasev1.TagType_TAG_TYPE_DATA_BINARY) -} - -// String converts the BytesLiteral's slice of bytes to a string representation. -func (b *BytesLiteral) String() string { - return hex.EncodeToString(b.bb) -} - -// Elements returns a slice containing the string representation of the byte slice. -func (b *BytesLiteral) Elements() []string { - return []string{hex.EncodeToString(b.bb)} -} - var ( _ LiteralExpr = (*nullLiteral)(nil) _ ComparableExpr = (*nullLiteral)(nil) nullLiteralExpr = &nullLiteral{} + nullIndexField = index.Field{} + nullRangeOpts = index.RangeOpts{} ) type nullLiteral struct{} +func (s *nullLiteral) Field(key index.FieldKey) index.Field { + return nullIndexField +} + +func (s *nullLiteral) RangeOpts(isUpper bool, includeLower bool, includeUpper bool) index.RangeOpts { + return nullRangeOpts +} + +func (s *nullLiteral) SubExprs() []LiteralExpr { + panic("unimplemented") +} + func newNullLiteral() *nullLiteral { return nullLiteralExpr } diff --git a/pkg/query/logical/interface.go b/pkg/query/logical/interface.go index 0634473a..90e85e6a 100644 --- a/pkg/query/logical/interface.go +++ b/pkg/query/logical/interface.go @@ -19,6 +19,8 @@ package logical import ( "fmt" + + "github.com/apache/skywalking-banyandb/pkg/index" ) // UnresolvedPlan denotes an logical expression. @@ -38,7 +40,6 @@ type Plan interface { type Expr interface { fmt.Stringer Elements() []string - DataType() int32 Equal(Expr) bool } @@ -46,6 +47,9 @@ type Expr interface { type LiteralExpr interface { Expr Bytes() [][]byte + Field(key index.FieldKey) index.Field + RangeOpts(isUpper bool, includeLower, includeUpper bool) index.RangeOpts + SubExprs() []LiteralExpr } // ComparableExpr allows comparing Expr and Expr arrays. diff --git a/pkg/query/logical/stream/index_filter.go b/pkg/query/logical/stream/index_filter.go index fa8852b6..e5746710 100644 --- a/pkg/query/logical/stream/index_filter.go +++ b/pkg/query/logical/stream/index_filter.go @@ -18,8 +18,6 @@ package stream import ( - "bytes" - "encoding/base64" "encoding/json" "strings" @@ -106,23 +104,13 @@ func parseConditionToFilter(cond *modelv1.Condition, indexRule *databasev1.Index ) (index.Filter, [][]*modelv1.TagValue, error) { switch cond.Op { case modelv1.Condition_BINARY_OP_GT: - return newRange(indexRule, index.RangeOpts{ - Lower: bytes.Join(expr.Bytes(), nil), - }), [][]*modelv1.TagValue{entity}, nil + return newRange(indexRule, expr.RangeOpts(false, false, false)), [][]*modelv1.TagValue{entity}, nil case modelv1.Condition_BINARY_OP_GE: - return newRange(indexRule, index.RangeOpts{ - IncludesLower: true, - Lower: bytes.Join(expr.Bytes(), nil), - }), [][]*modelv1.TagValue{entity}, nil + return newRange(indexRule, expr.RangeOpts(false, true, false)), [][]*modelv1.TagValue{entity}, nil case modelv1.Condition_BINARY_OP_LT: - return newRange(indexRule, index.RangeOpts{ - Upper: bytes.Join(expr.Bytes(), nil), - }), [][]*modelv1.TagValue{entity}, nil + return newRange(indexRule, expr.RangeOpts(true, false, false)), [][]*modelv1.TagValue{entity}, nil case modelv1.Condition_BINARY_OP_LE: - return newRange(indexRule, index.RangeOpts{ - IncludesUpper: true, - Upper: bytes.Join(expr.Bytes(), nil), - }), [][]*modelv1.TagValue{entity}, nil + return newRange(indexRule, expr.RangeOpts(true, true, false)), [][]*modelv1.TagValue{entity}, nil case modelv1.Condition_BINARY_OP_EQ: return newEq(indexRule, expr), [][]*modelv1.TagValue{entity}, nil case modelv1.Condition_BINARY_OP_MATCH: @@ -130,47 +118,47 @@ func parseConditionToFilter(cond *modelv1.Condition, indexRule *databasev1.Index case modelv1.Condition_BINARY_OP_NE: return newNot(indexRule, newEq(indexRule, expr)), [][]*modelv1.TagValue{entity}, nil case modelv1.Condition_BINARY_OP_HAVING: - bb := expr.Bytes() - l := len(bb) + ee := expr.SubExprs() + l := len(ee) if l < 1 { return ENode, [][]*modelv1.TagValue{entity}, nil } and := newAnd(l) - for _, b := range bb { - and.append(newEq(indexRule, logical.NewBytesLiteral(b))) + for i := range ee { + and.append(newEq(indexRule, ee[i])) } return and, [][]*modelv1.TagValue{entity}, nil case modelv1.Condition_BINARY_OP_NOT_HAVING: - bb := expr.Bytes() - l := len(bb) + ee := expr.SubExprs() + l := len(ee) if l < 1 { return ENode, [][]*modelv1.TagValue{entity}, nil } and := newAnd(l) - for _, b := range bb { - and.append(newEq(indexRule, logical.NewBytesLiteral(b))) + for i := range ee { + and.append(newEq(indexRule, ee[i])) } return newNot(indexRule, and), [][]*modelv1.TagValue{entity}, nil case modelv1.Condition_BINARY_OP_IN: - bb := expr.Bytes() - l := len(bb) + ee := expr.SubExprs() + l := len(ee) if l < 1 { return ENode, [][]*modelv1.TagValue{entity}, nil } or := newOr(l) - for _, b := range bb { - or.append(newEq(indexRule, logical.NewBytesLiteral(b))) + for i := range ee { + or.append(newEq(indexRule, ee[i])) } return or, [][]*modelv1.TagValue{entity}, nil case modelv1.Condition_BINARY_OP_NOT_IN: - bb := expr.Bytes() - l := len(bb) + ee := expr.SubExprs() + l := len(ee) if l < 1 { return ENode, [][]*modelv1.TagValue{entity}, nil } or := newOr(l) - for _, b := range bb { - or.append(newEq(indexRule, logical.NewBytesLiteral(b))) + for i := range ee { + or.append(newEq(indexRule, ee[i])) } return newNot(indexRule, or), [][]*modelv1.TagValue{entity}, nil } @@ -375,6 +363,7 @@ func (n *not) String() string { type eq struct { *leaf + field *index.Field } func newEq(indexRule *databasev1.IndexRule, values logical.LiteralExpr) *eq { @@ -391,10 +380,7 @@ func (eq *eq) Execute(searcher index.GetSearcher, seriesID common.SeriesID) (pos if err != nil { return nil, err } - return s.MatchTerms(index.Field{ - Key: eq.Key.toIndex(seriesID), - Term: bytes.Join(eq.Expr.Bytes(), nil), - }) + return s.MatchTerms(eq.Expr.Field(eq.Key.toIndex(seriesID))) } func (eq *eq) MarshalJSON() ([]byte, error) { @@ -481,9 +467,9 @@ func (r *rangeOp) MarshalJSON() ([]byte, error) { builder.WriteString("(") } } - builder.WriteString(base64.StdEncoding.EncodeToString(r.Opts.Lower)) + builder.WriteString(r.Opts.Lower.String()) builder.WriteString(",") - builder.WriteString(base64.StdEncoding.EncodeToString(r.Opts.Upper)) + builder.WriteString(r.Opts.Upper.String()) if r.Opts.Upper != nil { if r.Opts.IncludesUpper { builder.WriteString("]")
