This is an automated email from the ASF dual-hosted git repository. hanahmily pushed a commit to branch sidx/query in repository https://gitbox.apache.org/repos/asf/skywalking-banyandb.git
commit aa5cf34239152f3c73d96fa9de6df5cd39f1ef46 Author: Gao Hongtao <[email protected]> AuthorDate: Sun Aug 24 06:47:25 2025 +0800 Refactor metadata and part iterator: Clean up whitespace and improve code readability in metadata.go and part_iter.go. Update tests to use compressed primary block metadata for better memory management in part_test.go. --- banyand/internal/sidx/metadata.go | 6 ++-- banyand/internal/sidx/part.go | 2 -- banyand/internal/sidx/part_iter.go | 29 ++++++++-------- banyand/internal/sidx/part_test.go | 68 +++++++++++++++++++++++++++++++++++--- 4 files changed, 80 insertions(+), 25 deletions(-) diff --git a/banyand/internal/sidx/metadata.go b/banyand/internal/sidx/metadata.go index 2a5a686d..008ec728 100644 --- a/banyand/internal/sidx/metadata.go +++ b/banyand/internal/sidx/metadata.go @@ -69,9 +69,9 @@ func (bma *blockMetadataArray) reset() { } var ( - partMetadataPool = pool.Register[*partMetadata]("sidx-partMetadata") - blockMetadataPool = pool.Register[*blockMetadata]("sidx-blockMetadata") - blockMetadataArrayPool = pool.Register[*blockMetadataArray]("sidx-blockMetadataArray") + partMetadataPool = pool.Register[*partMetadata]("sidx-partMetadata") + blockMetadataPool = pool.Register[*blockMetadata]("sidx-blockMetadata") + blockMetadataArrayPool = pool.Register[*blockMetadataArray]("sidx-blockMetadataArray") ) // generatePartMetadata gets partMetadata from pool or creates new. diff --git a/banyand/internal/sidx/part.go b/banyand/internal/sidx/part.go index fb492bfe..c08af669 100644 --- a/banyand/internal/sidx/part.go +++ b/banyand/internal/sidx/part.go @@ -268,7 +268,6 @@ func (p *part) getPartMetadata() *partMetadata { return p.partMetadata } - // getTagDataReader returns the tag data reader for the specified tag name. func (p *part) getTagDataReader(tagName string) (fs.Reader, bool) { reader, exists := p.tagData[tagName] @@ -534,7 +533,6 @@ func openMemPart(mp *memPart) *part { return p } - // partPath returns the path for a part with the given epoch. func partPath(root string, epoch uint64) string { return filepath.Join(root, partName(epoch)) diff --git a/banyand/internal/sidx/part_iter.go b/banyand/internal/sidx/part_iter.go index abe6f09b..b8ccee98 100644 --- a/banyand/internal/sidx/part_iter.go +++ b/banyand/internal/sidx/part_iter.go @@ -32,17 +32,17 @@ import ( ) type partIter struct { - err error - p *part - curBlock *blockMetadata - sids []common.SeriesID - primaryBlockMetadata []primaryBlockMetadata - bms []blockMetadata - compressedPrimaryBuf []byte - primaryBuf []byte - sidIdx int - minKey int64 - maxKey int64 + err error + p *part + curBlock *blockMetadata + sids []common.SeriesID + primaryBlockMetadata []primaryBlockMetadata + bms []blockMetadata + compressedPrimaryBuf []byte + primaryBuf []byte + sidIdx int + minKey int64 + maxKey int64 } func (pi *partIter) reset() { @@ -238,8 +238,6 @@ func (pi *partIter) findBlock() bool { return false } - - func (bm *blockMetadata) copyFrom(other *blockMetadata) { bm.seriesID = other.seriesID bm.minKey = other.minKey @@ -248,16 +246,15 @@ func (bm *blockMetadata) copyFrom(other *blockMetadata) { bm.uncompressedSize = other.uncompressedSize bm.dataBlock = other.dataBlock bm.keysBlock = other.keysBlock - + // Copy tag blocks if bm.tagsBlocks == nil { bm.tagsBlocks = make(map[string]dataBlock) } clear(bm.tagsBlocks) maps.Copy(bm.tagsBlocks, other.tagsBlocks) - + // Copy tag projection bm.tagProjection = bm.tagProjection[:0] bm.tagProjection = append(bm.tagProjection, other.tagProjection...) } - diff --git a/banyand/internal/sidx/part_test.go b/banyand/internal/sidx/part_test.go index 5b955b57..c430191e 100644 --- a/banyand/internal/sidx/part_test.go +++ b/banyand/internal/sidx/part_test.go @@ -429,8 +429,23 @@ func TestPartMemoryManagement(t *testing.T) { metaData, err := pm.marshal() require.NoError(t, err) + // Create valid primary block metadata + pbm := primaryBlockMetadata{ + seriesID: 1, + minKey: 0, + maxKey: 100, + dataBlock: dataBlock{ + offset: 0, + size: 256, + }, + } + + // Marshal and compress primary block metadata + primaryData := pbm.marshal(nil) + compressedPrimaryData := zstd.Compress(nil, primaryData, 1) + testFiles := map[string][]byte{ - primaryFilename: []byte("primary with block data"), + primaryFilename: compressedPrimaryData, dataFilename: []byte("data"), keysFilename: []byte("keys"), metaFilename: metaData, @@ -472,8 +487,23 @@ func TestPartStringRepresentation(t *testing.T) { metaData, err := pm.marshal() require.NoError(t, err) + // Create valid primary block metadata + pbm := primaryBlockMetadata{ + seriesID: 1, + minKey: 0, + maxKey: 100, + dataBlock: dataBlock{ + offset: 0, + size: 256, + }, + } + + // Marshal and compress primary block metadata + primaryData := pbm.marshal(nil) + compressedPrimaryData := zstd.Compress(nil, primaryData, 1) + testFiles := map[string][]byte{ - primaryFilename: []byte("primary"), + primaryFilename: compressedPrimaryData, dataFilename: []byte("data"), keysFilename: []byte("keys"), metaFilename: metaData, @@ -511,8 +541,23 @@ func BenchmarkPartOpen(b *testing.B) { metaData, err := pm.marshal() require.NoError(b, err) + // Create valid primary block metadata + pbm := primaryBlockMetadata{ + seriesID: 1, + minKey: 0, + maxKey: 1000, + dataBlock: dataBlock{ + offset: 0, + size: 1024, + }, + } + + // Marshal and compress primary block metadata + primaryData := pbm.marshal(nil) + compressedPrimaryData := zstd.Compress(nil, primaryData, 1) + testFiles := map[string][]byte{ - primaryFilename: []byte("primary data for benchmark"), + primaryFilename: compressedPrimaryData, dataFilename: []byte("data content for benchmark"), keysFilename: []byte("keys content for benchmark"), metaFilename: metaData, @@ -555,8 +600,23 @@ func BenchmarkPartTagAccess(b *testing.B) { metaData, err := pm.marshal() require.NoError(b, err) + // Create valid primary block metadata + pbm := primaryBlockMetadata{ + seriesID: 1, + minKey: 0, + maxKey: 100, + dataBlock: dataBlock{ + offset: 0, + size: 256, + }, + } + + // Marshal and compress primary block metadata + primaryData := pbm.marshal(nil) + compressedPrimaryData := zstd.Compress(nil, primaryData, 1) + testFiles := map[string][]byte{ - primaryFilename: []byte("primary"), + primaryFilename: compressedPrimaryData, dataFilename: []byte("data"), keysFilename: []byte("keys"), metaFilename: metaData,
