Copilot commented on code in PR #928:
URL: 
https://github.com/apache/skywalking-banyandb/pull/928#discussion_r2668082009


##########
banyand/metadata/discovery/file/file.go:
##########
@@ -0,0 +1,366 @@
+// Licensed to Apache Software Foundation (ASF) under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Apache Software Foundation (ASF) licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// Package file implements file-based node discovery for distributed metadata 
management.
+package file
+
+import (
+       "context"
+       "errors"
+       "fmt"
+       "os"
+       "sync"
+       "time"
+
+       "gopkg.in/yaml.v3"
+
+       databasev1 
"github.com/apache/skywalking-banyandb/api/proto/banyandb/database/v1"
+       "github.com/apache/skywalking-banyandb/banyand/metadata/schema"
+       "github.com/apache/skywalking-banyandb/banyand/observability"
+       "github.com/apache/skywalking-banyandb/pkg/grpchelper"
+       "github.com/apache/skywalking-banyandb/pkg/logger"
+       "github.com/apache/skywalking-banyandb/pkg/run"
+)
+
+// Service implements file-based node discovery.
+type Service struct {
+       nodeCache     map[string]*databasev1.Node
+       closer        *run.Closer
+       log           *logger.Logger
+       metrics       *metrics
+       handlers      map[string]schema.EventHandler
+       filePath      string
+       grpcTimeout   time.Duration
+       fetchInterval time.Duration
+       cacheMutex    sync.RWMutex
+       handlersMutex sync.RWMutex
+}
+
+// Config holds configuration for file discovery service.
+type Config struct {
+       FilePath      string
+       GRPCTimeout   time.Duration
+       FetchInterval time.Duration
+}
+
+// NodeFileConfig represents the YAML configuration file structure.
+type NodeFileConfig struct {
+       Nodes []NodeConfig `yaml:"nodes"`
+}
+
+// NodeConfig represents a single node configuration.
+type NodeConfig struct {
+       Name       string `yaml:"name"`
+       Address    string `yaml:"grpc_address"`
+       CACertPath string `yaml:"ca_cert_path"`
+       TLSEnabled bool   `yaml:"tls_enabled"`
+}
+
+// NewService creates a new file discovery service.
+func NewService(cfg Config) (*Service, error) {
+       if cfg.FilePath == "" {
+               return nil, errors.New("file path cannot be empty")
+       }
+
+       // validate file exists and is readable
+       if _, err := os.Stat(cfg.FilePath); err != nil {
+               return nil, fmt.Errorf("failed to access file path %s: %w", 
cfg.FilePath, err)
+       }
+
+       svc := &Service{
+               filePath:      cfg.FilePath,
+               nodeCache:     make(map[string]*databasev1.Node),
+               handlers:      make(map[string]schema.EventHandler),
+               closer:        run.NewCloser(1),
+               log:           logger.GetLogger("metadata-discovery-file"),
+               grpcTimeout:   cfg.GRPCTimeout,
+               fetchInterval: cfg.FetchInterval,
+       }
+
+       return svc, nil
+}
+
+// Start begins the file discovery background process.
+func (s *Service) Start(ctx context.Context) error {
+       s.log.Debug().Str("file_path", s.filePath).Msg("Starting file-based 
node discovery service")
+
+       // initial load
+       if err := s.loadAndParseFile(ctx); err != nil {
+               return fmt.Errorf("failed to load initial configuration: %w", 
err)
+       }
+
+       // start periodic fetch loop
+       go s.periodicFetch(ctx)
+
+       return nil
+}
+
+func (s *Service) loadAndParseFile(ctx context.Context) error {
+       startTime := time.Now()
+       var parseErr error
+       defer func() {
+               if s.metrics != nil {
+                       duration := time.Since(startTime)
+                       s.metrics.fileLoadCount.Inc(1)
+                       s.metrics.fileLoadDuration.Observe(duration.Seconds())
+                       if parseErr != nil {
+                               s.metrics.fileLoadFailedCount.Inc(1)
+                       }
+               }
+       }()
+
+       data, err := os.ReadFile(s.filePath)
+       if err != nil {
+               parseErr = fmt.Errorf("failed to read file: %w", err)
+               return parseErr
+       }
+
+       var cfg NodeFileConfig
+       if err := yaml.Unmarshal(data, &cfg); err != nil {
+               parseErr = fmt.Errorf("failed to parse YAML: %w", err)
+               return parseErr
+       }
+
+       // validate required fields
+       for idx, node := range cfg.Nodes {
+               if node.Address == "" {
+                       parseErr = fmt.Errorf("node %s at index %d is missing 
required field: grpc_address", node.Name, idx)
+                       return parseErr
+               }
+               if node.TLSEnabled && node.CACertPath == "" {
+                       parseErr = fmt.Errorf("node %s at index %d has TLS 
enabled but missing ca_cert_path", node.Name, idx)
+                       return parseErr
+               }
+       }
+
+       // update cache
+       s.updateNodeCache(ctx, cfg.Nodes)
+
+       s.log.Debug().Int("node_count", len(cfg.Nodes)).Msg("Successfully 
loaded configuration file")
+       return nil
+}
+
+func (s *Service) fetchNodeMetadata(ctx context.Context, nodeConfig 
NodeConfig) (*databasev1.Node, error) {
+       ctxTimeout, cancel := context.WithTimeout(ctx, s.grpcTimeout)
+       defer cancel()
+
+       // prepare TLS options
+       dialOpts, err := grpchelper.SecureOptions(nil, nodeConfig.TLSEnabled, 
false, nodeConfig.CACertPath)
+       if err != nil {
+               return nil, fmt.Errorf("failed to load TLS config for node %s: 
%w", nodeConfig.Name, err)
+       }
+
+       // connect to node
+       // nolint:contextcheck
+       conn, connErr := grpchelper.Conn(nodeConfig.Address, s.grpcTimeout, 
dialOpts...)
+       if connErr != nil {
+               return nil, fmt.Errorf("failed to connect to %s: %w", 
nodeConfig.Address, connErr)
+       }
+       defer conn.Close()

Review Comment:
   The fetchNodeMetadata function uses a context with timeout, but then passes 
it to grpchelper.Conn which expects a non-cancelled context. The 
nolint:contextcheck at line 168 suggests this is known. However, this means 
connection establishment doesn't respect the timeout, only the metadata RPC 
call does. Consider passing ctxTimeout to the Conn function to properly enforce 
timeouts on the entire operation.



##########
banyand/metadata/discovery/file/file.go:
##########
@@ -0,0 +1,366 @@
+// Licensed to Apache Software Foundation (ASF) under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Apache Software Foundation (ASF) licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// Package file implements file-based node discovery for distributed metadata 
management.
+package file
+
+import (
+       "context"
+       "errors"
+       "fmt"
+       "os"
+       "sync"
+       "time"
+
+       "gopkg.in/yaml.v3"
+
+       databasev1 
"github.com/apache/skywalking-banyandb/api/proto/banyandb/database/v1"
+       "github.com/apache/skywalking-banyandb/banyand/metadata/schema"
+       "github.com/apache/skywalking-banyandb/banyand/observability"
+       "github.com/apache/skywalking-banyandb/pkg/grpchelper"
+       "github.com/apache/skywalking-banyandb/pkg/logger"
+       "github.com/apache/skywalking-banyandb/pkg/run"
+)
+
+// Service implements file-based node discovery.
+type Service struct {
+       nodeCache     map[string]*databasev1.Node
+       closer        *run.Closer
+       log           *logger.Logger
+       metrics       *metrics
+       handlers      map[string]schema.EventHandler
+       filePath      string
+       grpcTimeout   time.Duration
+       fetchInterval time.Duration
+       cacheMutex    sync.RWMutex
+       handlersMutex sync.RWMutex
+}
+
+// Config holds configuration for file discovery service.
+type Config struct {
+       FilePath      string
+       GRPCTimeout   time.Duration
+       FetchInterval time.Duration
+}
+
+// NodeFileConfig represents the YAML configuration file structure.
+type NodeFileConfig struct {
+       Nodes []NodeConfig `yaml:"nodes"`
+}
+
+// NodeConfig represents a single node configuration.
+type NodeConfig struct {
+       Name       string `yaml:"name"`
+       Address    string `yaml:"grpc_address"`
+       CACertPath string `yaml:"ca_cert_path"`
+       TLSEnabled bool   `yaml:"tls_enabled"`
+}
+
+// NewService creates a new file discovery service.
+func NewService(cfg Config) (*Service, error) {
+       if cfg.FilePath == "" {
+               return nil, errors.New("file path cannot be empty")
+       }
+
+       // validate file exists and is readable
+       if _, err := os.Stat(cfg.FilePath); err != nil {
+               return nil, fmt.Errorf("failed to access file path %s: %w", 
cfg.FilePath, err)
+       }
+
+       svc := &Service{
+               filePath:      cfg.FilePath,
+               nodeCache:     make(map[string]*databasev1.Node),
+               handlers:      make(map[string]schema.EventHandler),
+               closer:        run.NewCloser(1),
+               log:           logger.GetLogger("metadata-discovery-file"),
+               grpcTimeout:   cfg.GRPCTimeout,
+               fetchInterval: cfg.FetchInterval,
+       }
+
+       return svc, nil
+}
+
+// Start begins the file discovery background process.
+func (s *Service) Start(ctx context.Context) error {
+       s.log.Debug().Str("file_path", s.filePath).Msg("Starting file-based 
node discovery service")
+
+       // initial load
+       if err := s.loadAndParseFile(ctx); err != nil {
+               return fmt.Errorf("failed to load initial configuration: %w", 
err)
+       }
+
+       // start periodic fetch loop
+       go s.periodicFetch(ctx)
+
+       return nil
+}
+
+func (s *Service) loadAndParseFile(ctx context.Context) error {
+       startTime := time.Now()
+       var parseErr error
+       defer func() {
+               if s.metrics != nil {
+                       duration := time.Since(startTime)
+                       s.metrics.fileLoadCount.Inc(1)
+                       s.metrics.fileLoadDuration.Observe(duration.Seconds())
+                       if parseErr != nil {
+                               s.metrics.fileLoadFailedCount.Inc(1)
+                       }
+               }
+       }()
+
+       data, err := os.ReadFile(s.filePath)
+       if err != nil {
+               parseErr = fmt.Errorf("failed to read file: %w", err)
+               return parseErr
+       }
+
+       var cfg NodeFileConfig
+       if err := yaml.Unmarshal(data, &cfg); err != nil {
+               parseErr = fmt.Errorf("failed to parse YAML: %w", err)
+               return parseErr
+       }
+
+       // validate required fields
+       for idx, node := range cfg.Nodes {
+               if node.Address == "" {
+                       parseErr = fmt.Errorf("node %s at index %d is missing 
required field: grpc_address", node.Name, idx)
+                       return parseErr
+               }
+               if node.TLSEnabled && node.CACertPath == "" {
+                       parseErr = fmt.Errorf("node %s at index %d has TLS 
enabled but missing ca_cert_path", node.Name, idx)
+                       return parseErr
+               }
+       }
+
+       // update cache
+       s.updateNodeCache(ctx, cfg.Nodes)
+
+       s.log.Debug().Int("node_count", len(cfg.Nodes)).Msg("Successfully 
loaded configuration file")
+       return nil
+}
+
+func (s *Service) fetchNodeMetadata(ctx context.Context, nodeConfig 
NodeConfig) (*databasev1.Node, error) {
+       ctxTimeout, cancel := context.WithTimeout(ctx, s.grpcTimeout)
+       defer cancel()
+
+       // prepare TLS options
+       dialOpts, err := grpchelper.SecureOptions(nil, nodeConfig.TLSEnabled, 
false, nodeConfig.CACertPath)
+       if err != nil {
+               return nil, fmt.Errorf("failed to load TLS config for node %s: 
%w", nodeConfig.Name, err)
+       }
+
+       // connect to node
+       // nolint:contextcheck
+       conn, connErr := grpchelper.Conn(nodeConfig.Address, s.grpcTimeout, 
dialOpts...)
+       if connErr != nil {
+               return nil, fmt.Errorf("failed to connect to %s: %w", 
nodeConfig.Address, connErr)
+       }
+       defer conn.Close()
+
+       // query metadata of the node
+       client := databasev1.NewNodeQueryServiceClient(conn)
+       resp, callErr := client.GetCurrentNode(ctxTimeout, 
&databasev1.GetCurrentNodeRequest{})
+       if callErr != nil {
+               return nil, fmt.Errorf("failed to get current node from %s: 
%w", nodeConfig.Address, callErr)
+       }
+
+       return resp.GetNode(), nil
+}
+
+func (s *Service) updateNodeCache(ctx context.Context, newNodes []NodeConfig) {
+       for _, n := range newNodes {
+               s.cacheMutex.RLock()
+               _, exists := s.nodeCache[n.Address]
+               s.cacheMutex.RUnlock()
+
+               if !exists {
+                       // fetch node metadata from gRPC
+                       node, fetchErr := s.fetchNodeMetadata(ctx, n)
+                       if fetchErr != nil {
+                               s.log.Warn().
+                                       Err(fetchErr).
+                                       Str("node", n.Name).
+                                       Str("address", n.Address).
+                                       Msg("Failed to fetch node metadata, 
will skip")
+                               continue
+                       }
+
+                       s.cacheMutex.Lock()
+                       if _, alreadyAdded := s.nodeCache[n.Address]; 
!alreadyAdded {
+                               s.nodeCache[n.Address] = node
+
+                               // notify handlers after releasing lock
+                               s.notifyHandlers(schema.Metadata{
+                                       TypeMeta: schema.TypeMeta{
+                                               Kind: schema.KindNode,
+                                               Name: 
node.GetMetadata().GetName(),
+                                       },
+                                       Spec: node,
+                               }, true)
+
+                               s.log.Debug().
+                                       Str("address", n.Address).
+                                       Str("name", 
node.GetMetadata().GetName()).
+                                       Msg("New node discovered and added to 
cache")
+                       }
+                       s.cacheMutex.Unlock()

Review Comment:
   The notifyHandlers call at line 208-214 is made while holding the cacheMutex 
lock. If a handler performs slow operations or calls back into the service, 
this could cause deadlocks or performance issues. The handlers should be 
notified after releasing the lock, similar to how deletion notifications are 
handled at lines 250-258.



##########
banyand/metadata/discovery/file/file_test.go:
##########
@@ -0,0 +1,461 @@
+// Licensed to Apache Software Foundation (ASF) under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Apache Software Foundation (ASF) licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package file
+
+import (
+       "context"
+       "fmt"
+       "net"
+       "os"
+       "path/filepath"
+       "sync"
+       "testing"
+       "time"
+
+       "github.com/stretchr/testify/assert"
+       "github.com/stretchr/testify/require"
+       "google.golang.org/grpc"
+       "google.golang.org/grpc/health"
+       grpc_health_v1 "google.golang.org/grpc/health/grpc_health_v1"
+       "google.golang.org/protobuf/types/known/timestamppb"
+
+       commonv1 
"github.com/apache/skywalking-banyandb/api/proto/banyandb/common/v1"
+       databasev1 
"github.com/apache/skywalking-banyandb/api/proto/banyandb/database/v1"
+       "github.com/apache/skywalking-banyandb/banyand/metadata/schema"
+)
+
+const (
+       testGRPCTimeout   = 2 * time.Second
+       testFetchInterval = 200 * time.Millisecond
+)
+
+func TestNewService(t *testing.T) {
+       t.Run("valid config", func(t *testing.T) {
+               configFile := createTempConfigFile(t, `
+nodes:
+  - name: node1
+    grpc_address: 127.0.0.1:17912
+`)
+               defer os.Remove(configFile)
+
+               svc, err := NewService(Config{
+                       FilePath:      configFile,
+                       GRPCTimeout:   testGRPCTimeout,
+                       FetchInterval: testFetchInterval,
+               })
+               require.NoError(t, err)
+               require.NotNil(t, svc)
+               require.NoError(t, svc.Close())
+       })
+
+       t.Run("empty file path", func(t *testing.T) {
+               _, err := NewService(Config{FilePath: ""})
+               require.Error(t, err)
+               assert.Contains(t, err.Error(), "file path cannot be empty")
+       })
+
+       t.Run("non-existent file", func(t *testing.T) {
+               _, err := NewService(Config{
+                       FilePath:      "/not/exist",
+                       GRPCTimeout:   testGRPCTimeout,
+                       FetchInterval: testFetchInterval,
+               })
+               require.Error(t, err)
+               assert.Contains(t, err.Error(), "failed to access file path")
+       })
+}
+
+func TestStartWithInvalidConfig(t *testing.T) {
+       ctx := context.Background()
+
+       t.Run("invalid yaml", func(t *testing.T) {
+               configFile := createTempConfigFile(t, `
+nodes:
+  - name: node1
+    grpc_address: [invalid
+`)
+               defer os.Remove(configFile)
+
+               svc, err := NewService(Config{
+                       FilePath:      configFile,
+                       GRPCTimeout:   testGRPCTimeout,
+                       FetchInterval: testFetchInterval,
+               })
+               require.NoError(t, err)
+               defer svc.Close()
+
+               err = svc.Start(ctx)
+               require.Error(t, err)
+               assert.Contains(t, err.Error(), "failed to parse YAML")
+       })
+
+       t.Run("missing address", func(t *testing.T) {
+               configFile := createTempConfigFile(t, `
+nodes:
+  - name: node1
+`)
+               defer os.Remove(configFile)
+
+               svc, err := NewService(Config{
+                       FilePath:      configFile,
+                       GRPCTimeout:   testGRPCTimeout,
+                       FetchInterval: testFetchInterval,
+               })
+               require.NoError(t, err)
+               defer svc.Close()
+
+               err = svc.Start(ctx)
+               require.Error(t, err)
+               assert.Contains(t, err.Error(), "missing required field: 
grpc_address")
+       })
+
+       t.Run("tls enabled without ca cert", func(t *testing.T) {
+               configFile := createTempConfigFile(t, `
+nodes:
+  - name: node1
+    grpc_address: 127.0.0.1:17912
+    tls_enabled: true
+`)
+               defer os.Remove(configFile)
+
+               svc, err := NewService(Config{
+                       FilePath:      configFile,
+                       GRPCTimeout:   testGRPCTimeout,
+                       FetchInterval: testFetchInterval,
+               })
+               require.NoError(t, err)
+               defer svc.Close()
+
+               err = svc.Start(ctx)
+               require.Error(t, err)
+               assert.Contains(t, err.Error(), "missing ca_cert_path")
+       })
+}
+
+func TestStartAndCacheNodes(t *testing.T) {
+       listener, grpcServer, nodeServer := startMockGRPCServer(t)
+       defer grpcServer.Stop()
+       defer listener.Close()
+
+       nodeName := "test-node"
+       serverAddr := listener.Addr().String()
+       nodeServer.setNode(newTestNode(nodeName, serverAddr))
+
+       configFile := createTempConfigFile(t, fmt.Sprintf(`
+nodes:
+  - name: %s
+    grpc_address: %s
+`, nodeName, serverAddr))
+       defer os.Remove(configFile)
+
+       svc, err := NewService(Config{
+               FilePath:      configFile,
+               GRPCTimeout:   testGRPCTimeout,
+               FetchInterval: testFetchInterval,
+       })
+       require.NoError(t, err)
+       defer svc.Close()
+
+       ctx, cancel := context.WithCancel(context.Background())
+       defer cancel()
+       require.NoError(t, svc.Start(ctx))
+
+       nodes, listErr := svc.ListNode(ctx, databasev1.Role_ROLE_UNSPECIFIED)
+       require.NoError(t, listErr)
+       require.Len(t, nodes, 1)
+       assert.Equal(t, nodeName, nodes[0].GetMetadata().GetName())
+       assert.Equal(t, serverAddr, nodes[0].GetGrpcAddress())
+
+       nodeFromCache, getErr := svc.GetNode(ctx, nodeName)
+       require.NoError(t, getErr)
+       assert.Equal(t, nodeName, nodeFromCache.GetMetadata().GetName())
+}
+
+func TestHandlerNotifications(t *testing.T) {
+       listenerOne, grpcServerOne, nodeServerOne := startMockGRPCServer(t)
+       defer grpcServerOne.Stop()
+       defer listenerOne.Close()
+       addrOne := listenerOne.Addr().String()
+       nodeServerOne.setNode(newTestNode("node-one", addrOne))
+
+       listenerTwo, grpcServerTwo, nodeServerTwo := startMockGRPCServer(t)
+       defer grpcServerTwo.Stop()
+       defer listenerTwo.Close()
+       addrTwo := listenerTwo.Addr().String()
+       nodeServerTwo.setNode(newTestNode("node-two", addrTwo))
+
+       configFile := createTempConfigFile(t, fmt.Sprintf(`
+nodes:
+  - name: node-one
+    grpc_address: %s
+`, addrOne))
+       defer os.Remove(configFile)
+
+       svc, err := NewService(Config{
+               FilePath:      configFile,
+               GRPCTimeout:   testGRPCTimeout,
+               FetchInterval: testFetchInterval,
+       })
+       require.NoError(t, err)
+       defer svc.Close()
+
+       ctx, cancel := context.WithCancel(context.Background())
+       defer cancel()
+
+       var mu sync.Mutex
+       added := make([]string, 0)
+       deleted := make([]string, 0)
+       handler := &testEventHandler{
+               onAdd: func(metadata schema.Metadata) {
+                       mu.Lock()
+                       defer mu.Unlock()
+                       added = append(added, metadata.Name)
+               },
+               onDelete: func(metadata schema.Metadata) {
+                       mu.Lock()
+                       defer mu.Unlock()
+                       deleted = append(deleted, metadata.Name)
+               },
+       }
+       svc.RegisterHandler("test", handler)
+
+       require.NoError(t, svc.Start(ctx))
+
+       require.Eventually(t, func() bool {
+               mu.Lock()
+               defer mu.Unlock()
+               return len(added) == 1 && added[0] == "node-one"
+       }, 3*time.Second, 50*time.Millisecond)
+
+       updateConfigFile(t, configFile, fmt.Sprintf(`
+nodes:
+  - name: node-two
+    grpc_address: %s
+`, addrTwo))
+
+       require.Eventually(t, func() bool {
+               mu.Lock()
+               defer mu.Unlock()
+               return len(added) == 2 && added[1] == "node-two" && 
len(deleted) == 1 && deleted[0] == "node-one"
+       }, 5*time.Second, 50*time.Millisecond)
+
+       nodes, listErr := svc.ListNode(ctx, databasev1.Role_ROLE_UNSPECIFIED)
+       require.NoError(t, listErr)
+       require.Len(t, nodes, 1)
+       assert.Equal(t, "node-two", nodes[0].GetMetadata().GetName())
+}
+
+func TestListNodeRoleFilter(t *testing.T) {
+       listener, grpcServer, nodeServer := startMockGRPCServer(t)
+       defer grpcServer.Stop()
+       defer listener.Close()
+       addr := listener.Addr().String()
+       nodeServer.setNode(newTestNode("role-test-node", addr))
+
+       configFile := createTempConfigFile(t, fmt.Sprintf(`
+nodes:
+  - name: role-test-node
+    grpc_address: %s
+`, addr))
+       defer os.Remove(configFile)
+
+       svc, err := NewService(Config{
+               FilePath:      configFile,
+               GRPCTimeout:   testGRPCTimeout,
+               FetchInterval: testFetchInterval,
+       })
+       require.NoError(t, err)
+       defer svc.Close()
+
+       ctx, cancel := context.WithCancel(context.Background())
+       defer cancel()
+       require.NoError(t, svc.Start(ctx))
+
+       nodes, listErr := svc.ListNode(ctx, databasev1.Role_ROLE_UNSPECIFIED)
+       require.NoError(t, listErr)
+       require.Len(t, nodes, 1)
+
+       nodes, listErr = svc.ListNode(ctx, databasev1.Role_ROLE_DATA)
+       require.NoError(t, listErr)
+       assert.Empty(t, nodes)
+}
+
+func TestGetNode(t *testing.T) {
+       listener, grpcServer, nodeServer := startMockGRPCServer(t)
+       defer grpcServer.Stop()
+       defer listener.Close()
+       addr := listener.Addr().String()
+       nodeServer.setNode(newTestNode("cached-node", addr))
+
+       configFile := createTempConfigFile(t, fmt.Sprintf(`
+nodes:
+  - name: cached-node
+    grpc_address: %s
+`, addr))
+       defer os.Remove(configFile)
+
+       svc, err := NewService(Config{
+               FilePath:      configFile,
+               GRPCTimeout:   testGRPCTimeout,
+               FetchInterval: testFetchInterval,
+       })
+       require.NoError(t, err)
+       defer svc.Close()
+
+       ctx, cancel := context.WithCancel(context.Background())
+       defer cancel()
+       require.NoError(t, svc.Start(ctx))
+
+       node, getErr := svc.GetNode(ctx, addr)
+       require.NoError(t, getErr)
+       assert.Equal(t, "cached-node", node.GetMetadata().GetName())
+
+       _, getErr = svc.GetNode(ctx, "non-existent")
+       require.Error(t, getErr)
+       assert.Contains(t, getErr.Error(), "not found")
+}
+
+func TestConcurrentAccess(t *testing.T) {
+       listener, grpcServer, nodeServer := startMockGRPCServer(t)
+       defer grpcServer.Stop()
+       defer listener.Close()
+       addr := listener.Addr().String()
+       nodeServer.setNode(newTestNode("concurrent-node", addr))
+
+       configFile := createTempConfigFile(t, fmt.Sprintf(`
+nodes:
+  - name: concurrent-node
+    grpc_address: %s
+`, addr))
+       defer os.Remove(configFile)
+
+       svc, err := NewService(Config{
+               FilePath:      configFile,
+               GRPCTimeout:   testGRPCTimeout,
+               FetchInterval: testFetchInterval,
+       })
+       require.NoError(t, err)
+       defer svc.Close()
+
+       ctx, cancel := context.WithCancel(context.Background())
+       defer cancel()
+       require.NoError(t, svc.Start(ctx))
+
+       var wg sync.WaitGroup
+       for i := 0; i < 10; i++ {
+               wg.Add(1)
+               go func() {
+                       defer wg.Done()
+                       for j := 0; j < 100; j++ {
+                               if _, errList := svc.ListNode(ctx, 
databasev1.Role_ROLE_UNSPECIFIED); errList != nil {
+                                       t.Errorf("list node: %v", errList)
+                               }
+                               if _, errGet := svc.GetNode(ctx, 
"concurrent-node"); errGet != nil {
+                                       t.Errorf("get node: %v", errGet)
+                               }
+                       }
+               }()
+       }
+
+       wg.Wait()
+}

Review Comment:
   The test uses small intervals (200ms) for fetchInterval, but doesn't verify 
that the file is actually reloaded at that interval. Consider adding a test 
that verifies the periodic reload behavior by modifying the file after initial 
load and checking that changes are detected within the expected interval.



##########
banyand/metadata/discovery/file/file.go:
##########
@@ -0,0 +1,366 @@
+// Licensed to Apache Software Foundation (ASF) under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Apache Software Foundation (ASF) licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// Package file implements file-based node discovery for distributed metadata 
management.
+package file
+
+import (
+       "context"
+       "errors"
+       "fmt"
+       "os"
+       "sync"
+       "time"
+
+       "gopkg.in/yaml.v3"
+
+       databasev1 
"github.com/apache/skywalking-banyandb/api/proto/banyandb/database/v1"
+       "github.com/apache/skywalking-banyandb/banyand/metadata/schema"
+       "github.com/apache/skywalking-banyandb/banyand/observability"
+       "github.com/apache/skywalking-banyandb/pkg/grpchelper"
+       "github.com/apache/skywalking-banyandb/pkg/logger"
+       "github.com/apache/skywalking-banyandb/pkg/run"
+)
+
+// Service implements file-based node discovery.
+type Service struct {
+       nodeCache     map[string]*databasev1.Node
+       closer        *run.Closer
+       log           *logger.Logger
+       metrics       *metrics
+       handlers      map[string]schema.EventHandler
+       filePath      string
+       grpcTimeout   time.Duration
+       fetchInterval time.Duration
+       cacheMutex    sync.RWMutex
+       handlersMutex sync.RWMutex
+}
+
+// Config holds configuration for file discovery service.
+type Config struct {
+       FilePath      string
+       GRPCTimeout   time.Duration
+       FetchInterval time.Duration
+}
+
+// NodeFileConfig represents the YAML configuration file structure.
+type NodeFileConfig struct {
+       Nodes []NodeConfig `yaml:"nodes"`
+}
+
+// NodeConfig represents a single node configuration.
+type NodeConfig struct {
+       Name       string `yaml:"name"`
+       Address    string `yaml:"grpc_address"`
+       CACertPath string `yaml:"ca_cert_path"`
+       TLSEnabled bool   `yaml:"tls_enabled"`
+}
+
+// NewService creates a new file discovery service.
+func NewService(cfg Config) (*Service, error) {
+       if cfg.FilePath == "" {
+               return nil, errors.New("file path cannot be empty")
+       }
+
+       // validate file exists and is readable
+       if _, err := os.Stat(cfg.FilePath); err != nil {
+               return nil, fmt.Errorf("failed to access file path %s: %w", 
cfg.FilePath, err)
+       }
+
+       svc := &Service{
+               filePath:      cfg.FilePath,
+               nodeCache:     make(map[string]*databasev1.Node),
+               handlers:      make(map[string]schema.EventHandler),
+               closer:        run.NewCloser(1),
+               log:           logger.GetLogger("metadata-discovery-file"),
+               grpcTimeout:   cfg.GRPCTimeout,
+               fetchInterval: cfg.FetchInterval,
+       }
+
+       return svc, nil
+}
+
+// Start begins the file discovery background process.
+func (s *Service) Start(ctx context.Context) error {
+       s.log.Debug().Str("file_path", s.filePath).Msg("Starting file-based 
node discovery service")
+
+       // initial load
+       if err := s.loadAndParseFile(ctx); err != nil {
+               return fmt.Errorf("failed to load initial configuration: %w", 
err)
+       }
+
+       // start periodic fetch loop
+       go s.periodicFetch(ctx)
+
+       return nil
+}
+
+func (s *Service) loadAndParseFile(ctx context.Context) error {
+       startTime := time.Now()
+       var parseErr error
+       defer func() {
+               if s.metrics != nil {
+                       duration := time.Since(startTime)
+                       s.metrics.fileLoadCount.Inc(1)
+                       s.metrics.fileLoadDuration.Observe(duration.Seconds())
+                       if parseErr != nil {
+                               s.metrics.fileLoadFailedCount.Inc(1)
+                       }
+               }
+       }()
+
+       data, err := os.ReadFile(s.filePath)
+       if err != nil {
+               parseErr = fmt.Errorf("failed to read file: %w", err)
+               return parseErr
+       }
+
+       var cfg NodeFileConfig
+       if err := yaml.Unmarshal(data, &cfg); err != nil {
+               parseErr = fmt.Errorf("failed to parse YAML: %w", err)
+               return parseErr
+       }
+
+       // validate required fields
+       for idx, node := range cfg.Nodes {
+               if node.Address == "" {
+                       parseErr = fmt.Errorf("node %s at index %d is missing 
required field: grpc_address", node.Name, idx)
+                       return parseErr
+               }
+               if node.TLSEnabled && node.CACertPath == "" {
+                       parseErr = fmt.Errorf("node %s at index %d has TLS 
enabled but missing ca_cert_path", node.Name, idx)
+                       return parseErr
+               }
+       }
+
+       // update cache
+       s.updateNodeCache(ctx, cfg.Nodes)
+
+       s.log.Debug().Int("node_count", len(cfg.Nodes)).Msg("Successfully 
loaded configuration file")
+       return nil
+}
+
+func (s *Service) fetchNodeMetadata(ctx context.Context, nodeConfig 
NodeConfig) (*databasev1.Node, error) {
+       ctxTimeout, cancel := context.WithTimeout(ctx, s.grpcTimeout)
+       defer cancel()
+
+       // prepare TLS options
+       dialOpts, err := grpchelper.SecureOptions(nil, nodeConfig.TLSEnabled, 
false, nodeConfig.CACertPath)
+       if err != nil {
+               return nil, fmt.Errorf("failed to load TLS config for node %s: 
%w", nodeConfig.Name, err)
+       }
+
+       // connect to node
+       // nolint:contextcheck
+       conn, connErr := grpchelper.Conn(nodeConfig.Address, s.grpcTimeout, 
dialOpts...)
+       if connErr != nil {
+               return nil, fmt.Errorf("failed to connect to %s: %w", 
nodeConfig.Address, connErr)
+       }
+       defer conn.Close()
+
+       // query metadata of the node
+       client := databasev1.NewNodeQueryServiceClient(conn)
+       resp, callErr := client.GetCurrentNode(ctxTimeout, 
&databasev1.GetCurrentNodeRequest{})
+       if callErr != nil {
+               return nil, fmt.Errorf("failed to get current node from %s: 
%w", nodeConfig.Address, callErr)
+       }
+
+       return resp.GetNode(), nil
+}
+
+func (s *Service) updateNodeCache(ctx context.Context, newNodes []NodeConfig) {
+       for _, n := range newNodes {
+               s.cacheMutex.RLock()
+               _, exists := s.nodeCache[n.Address]
+               s.cacheMutex.RUnlock()
+
+               if !exists {
+                       // fetch node metadata from gRPC
+                       node, fetchErr := s.fetchNodeMetadata(ctx, n)
+                       if fetchErr != nil {
+                               s.log.Warn().
+                                       Err(fetchErr).
+                                       Str("node", n.Name).
+                                       Str("address", n.Address).
+                                       Msg("Failed to fetch node metadata, 
will skip")
+                               continue
+                       }
+
+                       s.cacheMutex.Lock()
+                       if _, alreadyAdded := s.nodeCache[n.Address]; 
!alreadyAdded {
+                               s.nodeCache[n.Address] = node
+
+                               // notify handlers after releasing lock
+                               s.notifyHandlers(schema.Metadata{
+                                       TypeMeta: schema.TypeMeta{
+                                               Kind: schema.KindNode,
+                                               Name: 
node.GetMetadata().GetName(),
+                                       },
+                                       Spec: node,
+                               }, true)
+
+                               s.log.Debug().
+                                       Str("address", n.Address).
+                                       Str("name", 
node.GetMetadata().GetName()).
+                                       Msg("New node discovered and added to 
cache")
+                       }
+                       s.cacheMutex.Unlock()

Review Comment:
   There's a race condition between the RLock check at line 187-189 and the 
Lock at line 203. Two goroutines could both pass the existence check and both 
attempt to fetch metadata for the same node. Consider using a single Lock 
instead of RLock-then-Lock, or implement proper double-checked locking with a 
second existence check after acquiring the write lock.



##########
banyand/metadata/client.go:
##########
@@ -133,13 +139,19 @@ func (s *clientService) FlagSet() *run.FlagSet {
                "Enable TLS for DNS discovery gRPC connections")
        fs.StringSliceVar(&s.dnsCACertPaths, "node-discovery-dns-ca-certs", 
[]string{},
                "Comma-separated list of CA certificate files to verify DNS 
discovered nodes (one per SRV address, in same order)")
+       fs.StringVar(&s.filePath, "node-discovery-file-path", "",
+               "File path for static node configuration (file mode only)")
+       fs.DurationVar(&s.fileFetchInterval, 
"node-discovery-file-fetch-interval", 20*time.Second,
+               "Fetch file interval for nodes in file discovery mode")

Review Comment:
   The flag description says "Fetch file interval" but should say "Fetch 
interval for file". Also, it mentions "for nodes" which is redundant given the 
context. Consider rewording to: "Interval to poll the discovery file and retry 
failed nodes in file discovery mode".
   ```suggestion
                "Interval to poll the discovery file and retry failed nodes in 
file discovery mode")
   ```



##########
banyand/metadata/client.go:
##########
@@ -133,13 +139,19 @@ func (s *clientService) FlagSet() *run.FlagSet {
                "Enable TLS for DNS discovery gRPC connections")
        fs.StringSliceVar(&s.dnsCACertPaths, "node-discovery-dns-ca-certs", 
[]string{},
                "Comma-separated list of CA certificate files to verify DNS 
discovered nodes (one per SRV address, in same order)")
+       fs.StringVar(&s.filePath, "node-discovery-file-path", "",
+               "File path for static node configuration (file mode only)")
+       fs.DurationVar(&s.fileFetchInterval, 
"node-discovery-file-fetch-interval", 20*time.Second,
+               "Fetch file interval for nodes in file discovery mode")

Review Comment:
   The flag name uses "fetch-interval" but the documentation calls it 
"retry-interval". The flag should be renamed to 
"node-discovery-file-retry-interval" to match the documentation at line 311, or 
the documentation should be updated to use "fetch-interval".
   ```suggestion
        fs.DurationVar(&s.fileFetchInterval, 
"node-discovery-file-retry-interval", 20*time.Second,
                "Retry interval for reading node configuration file in file 
discovery mode")
   ```



##########
banyand/metadata/client.go:
##########
@@ -230,8 +252,23 @@ func (s *clientService) PreRun(ctx context.Context) error {
                }
        }
 
-       // skip node registration if DNS mode is enabled or node registration 
is disabled
-       if !s.toRegisterNode || s.nodeDiscoveryMode == NodeDiscoveryModeDNS {
+       if s.nodeDiscoveryMode == NodeDiscoveryModeFile {
+               l.Info().Str("file-path", s.filePath).Msg("Initializing 
file-based node discovery")
+
+               var createErr error
+               s.fileDiscovery, createErr = file.NewService(file.Config{
+                       FilePath:      s.filePath,
+                       GRPCTimeout:   s.grpcTimeout,
+                       FetchInterval: s.fileFetchInterval,
+               })
+               if createErr != nil {
+                       return fmt.Errorf("failed to create file discovery 
service: %w", createErr)
+               }
+       }
+
+       // skip node registration if DNS/file mode is enabled or node 
registration is disabled
+       if !s.toRegisterNode || s.nodeDiscoveryMode == NodeDiscoveryModeDNS ||
+               s.nodeDiscoveryMode == NodeDiscoveryModeFile {

Review Comment:
   The comment says "skip node registration if DNS/file mode is enabled" but 
the actual condition only skips if the mode is DNS or File AND toRegisterNode 
is true. The comment should be more precise: "skip node registration if 
DNS/file mode is enabled (these modes don't support manual registration) or 
node registration is disabled".



##########
banyand/metadata/discovery/file/file.go:
##########
@@ -0,0 +1,366 @@
+// Licensed to Apache Software Foundation (ASF) under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Apache Software Foundation (ASF) licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// Package file implements file-based node discovery for distributed metadata 
management.
+package file
+
+import (
+       "context"
+       "errors"
+       "fmt"
+       "os"
+       "sync"
+       "time"
+
+       "gopkg.in/yaml.v3"
+
+       databasev1 
"github.com/apache/skywalking-banyandb/api/proto/banyandb/database/v1"
+       "github.com/apache/skywalking-banyandb/banyand/metadata/schema"
+       "github.com/apache/skywalking-banyandb/banyand/observability"
+       "github.com/apache/skywalking-banyandb/pkg/grpchelper"
+       "github.com/apache/skywalking-banyandb/pkg/logger"
+       "github.com/apache/skywalking-banyandb/pkg/run"
+)
+
+// Service implements file-based node discovery.
+type Service struct {
+       nodeCache     map[string]*databasev1.Node
+       closer        *run.Closer
+       log           *logger.Logger
+       metrics       *metrics
+       handlers      map[string]schema.EventHandler
+       filePath      string
+       grpcTimeout   time.Duration
+       fetchInterval time.Duration
+       cacheMutex    sync.RWMutex
+       handlersMutex sync.RWMutex
+}
+
+// Config holds configuration for file discovery service.
+type Config struct {
+       FilePath      string
+       GRPCTimeout   time.Duration
+       FetchInterval time.Duration
+}
+
+// NodeFileConfig represents the YAML configuration file structure.
+type NodeFileConfig struct {
+       Nodes []NodeConfig `yaml:"nodes"`
+}
+
+// NodeConfig represents a single node configuration.
+type NodeConfig struct {
+       Name       string `yaml:"name"`
+       Address    string `yaml:"grpc_address"`
+       CACertPath string `yaml:"ca_cert_path"`
+       TLSEnabled bool   `yaml:"tls_enabled"`
+}
+
+// NewService creates a new file discovery service.
+func NewService(cfg Config) (*Service, error) {
+       if cfg.FilePath == "" {
+               return nil, errors.New("file path cannot be empty")
+       }
+
+       // validate file exists and is readable
+       if _, err := os.Stat(cfg.FilePath); err != nil {
+               return nil, fmt.Errorf("failed to access file path %s: %w", 
cfg.FilePath, err)
+       }
+
+       svc := &Service{
+               filePath:      cfg.FilePath,
+               nodeCache:     make(map[string]*databasev1.Node),
+               handlers:      make(map[string]schema.EventHandler),
+               closer:        run.NewCloser(1),
+               log:           logger.GetLogger("metadata-discovery-file"),
+               grpcTimeout:   cfg.GRPCTimeout,
+               fetchInterval: cfg.FetchInterval,
+       }
+
+       return svc, nil
+}
+
+// Start begins the file discovery background process.
+func (s *Service) Start(ctx context.Context) error {
+       s.log.Debug().Str("file_path", s.filePath).Msg("Starting file-based 
node discovery service")
+
+       // initial load
+       if err := s.loadAndParseFile(ctx); err != nil {
+               return fmt.Errorf("failed to load initial configuration: %w", 
err)
+       }
+
+       // start periodic fetch loop
+       go s.periodicFetch(ctx)
+
+       return nil
+}
+
+func (s *Service) loadAndParseFile(ctx context.Context) error {
+       startTime := time.Now()
+       var parseErr error
+       defer func() {
+               if s.metrics != nil {
+                       duration := time.Since(startTime)
+                       s.metrics.fileLoadCount.Inc(1)
+                       s.metrics.fileLoadDuration.Observe(duration.Seconds())
+                       if parseErr != nil {
+                               s.metrics.fileLoadFailedCount.Inc(1)
+                       }
+               }
+       }()
+
+       data, err := os.ReadFile(s.filePath)
+       if err != nil {
+               parseErr = fmt.Errorf("failed to read file: %w", err)
+               return parseErr
+       }
+
+       var cfg NodeFileConfig
+       if err := yaml.Unmarshal(data, &cfg); err != nil {
+               parseErr = fmt.Errorf("failed to parse YAML: %w", err)
+               return parseErr
+       }
+
+       // validate required fields
+       for idx, node := range cfg.Nodes {
+               if node.Address == "" {
+                       parseErr = fmt.Errorf("node %s at index %d is missing 
required field: grpc_address", node.Name, idx)
+                       return parseErr
+               }
+               if node.TLSEnabled && node.CACertPath == "" {
+                       parseErr = fmt.Errorf("node %s at index %d has TLS 
enabled but missing ca_cert_path", node.Name, idx)
+                       return parseErr
+               }
+       }
+
+       // update cache
+       s.updateNodeCache(ctx, cfg.Nodes)
+
+       s.log.Debug().Int("node_count", len(cfg.Nodes)).Msg("Successfully 
loaded configuration file")
+       return nil
+}
+
+func (s *Service) fetchNodeMetadata(ctx context.Context, nodeConfig 
NodeConfig) (*databasev1.Node, error) {
+       ctxTimeout, cancel := context.WithTimeout(ctx, s.grpcTimeout)
+       defer cancel()
+
+       // prepare TLS options
+       dialOpts, err := grpchelper.SecureOptions(nil, nodeConfig.TLSEnabled, 
false, nodeConfig.CACertPath)
+       if err != nil {
+               return nil, fmt.Errorf("failed to load TLS config for node %s: 
%w", nodeConfig.Name, err)
+       }
+
+       // connect to node
+       // nolint:contextcheck
+       conn, connErr := grpchelper.Conn(nodeConfig.Address, s.grpcTimeout, 
dialOpts...)
+       if connErr != nil {
+               return nil, fmt.Errorf("failed to connect to %s: %w", 
nodeConfig.Address, connErr)
+       }
+       defer conn.Close()
+
+       // query metadata of the node
+       client := databasev1.NewNodeQueryServiceClient(conn)
+       resp, callErr := client.GetCurrentNode(ctxTimeout, 
&databasev1.GetCurrentNodeRequest{})
+       if callErr != nil {
+               return nil, fmt.Errorf("failed to get current node from %s: 
%w", nodeConfig.Address, callErr)
+       }
+
+       return resp.GetNode(), nil
+}
+
+func (s *Service) updateNodeCache(ctx context.Context, newNodes []NodeConfig) {
+       for _, n := range newNodes {
+               s.cacheMutex.RLock()
+               _, exists := s.nodeCache[n.Address]
+               s.cacheMutex.RUnlock()
+
+               if !exists {
+                       // fetch node metadata from gRPC
+                       node, fetchErr := s.fetchNodeMetadata(ctx, n)
+                       if fetchErr != nil {
+                               s.log.Warn().
+                                       Err(fetchErr).
+                                       Str("node", n.Name).
+                                       Str("address", n.Address).
+                                       Msg("Failed to fetch node metadata, 
will skip")
+                               continue
+                       }
+
+                       s.cacheMutex.Lock()
+                       if _, alreadyAdded := s.nodeCache[n.Address]; 
!alreadyAdded {
+                               s.nodeCache[n.Address] = node
+
+                               // notify handlers after releasing lock
+                               s.notifyHandlers(schema.Metadata{
+                                       TypeMeta: schema.TypeMeta{
+                                               Kind: schema.KindNode,
+                                               Name: 
node.GetMetadata().GetName(),
+                                       },
+                                       Spec: node,
+                               }, true)
+
+                               s.log.Debug().
+                                       Str("address", n.Address).
+                                       Str("name", 
node.GetMetadata().GetName()).
+                                       Msg("New node discovered and added to 
cache")
+                       }
+                       s.cacheMutex.Unlock()
+               }
+       }

Review Comment:
   The logic skips nodes that are already in the cache, but the documentation 
at line 295 states "Reload the file at a configured interval, reprocessing all 
nodes (including previously failed ones)". This creates a documentation-code 
mismatch. Either the code should be updated to reprocess all nodes (to detect 
changes), or the documentation should clarify that successfully cached nodes 
are not reprocessed.



##########
banyand/metadata/discovery/file/file_test.go:
##########
@@ -0,0 +1,461 @@
+// Licensed to Apache Software Foundation (ASF) under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Apache Software Foundation (ASF) licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package file
+
+import (
+       "context"
+       "fmt"
+       "net"
+       "os"
+       "path/filepath"
+       "sync"
+       "testing"
+       "time"
+
+       "github.com/stretchr/testify/assert"
+       "github.com/stretchr/testify/require"
+       "google.golang.org/grpc"
+       "google.golang.org/grpc/health"
+       grpc_health_v1 "google.golang.org/grpc/health/grpc_health_v1"
+       "google.golang.org/protobuf/types/known/timestamppb"
+
+       commonv1 
"github.com/apache/skywalking-banyandb/api/proto/banyandb/common/v1"
+       databasev1 
"github.com/apache/skywalking-banyandb/api/proto/banyandb/database/v1"
+       "github.com/apache/skywalking-banyandb/banyand/metadata/schema"
+)
+
+const (
+       testGRPCTimeout   = 2 * time.Second
+       testFetchInterval = 200 * time.Millisecond
+)
+
+func TestNewService(t *testing.T) {
+       t.Run("valid config", func(t *testing.T) {
+               configFile := createTempConfigFile(t, `
+nodes:
+  - name: node1
+    grpc_address: 127.0.0.1:17912
+`)
+               defer os.Remove(configFile)
+
+               svc, err := NewService(Config{
+                       FilePath:      configFile,
+                       GRPCTimeout:   testGRPCTimeout,
+                       FetchInterval: testFetchInterval,
+               })
+               require.NoError(t, err)
+               require.NotNil(t, svc)
+               require.NoError(t, svc.Close())
+       })
+
+       t.Run("empty file path", func(t *testing.T) {
+               _, err := NewService(Config{FilePath: ""})
+               require.Error(t, err)
+               assert.Contains(t, err.Error(), "file path cannot be empty")
+       })
+
+       t.Run("non-existent file", func(t *testing.T) {
+               _, err := NewService(Config{
+                       FilePath:      "/not/exist",
+                       GRPCTimeout:   testGRPCTimeout,
+                       FetchInterval: testFetchInterval,
+               })
+               require.Error(t, err)
+               assert.Contains(t, err.Error(), "failed to access file path")
+       })
+}
+
+func TestStartWithInvalidConfig(t *testing.T) {
+       ctx := context.Background()
+
+       t.Run("invalid yaml", func(t *testing.T) {
+               configFile := createTempConfigFile(t, `
+nodes:
+  - name: node1
+    grpc_address: [invalid
+`)
+               defer os.Remove(configFile)
+
+               svc, err := NewService(Config{
+                       FilePath:      configFile,
+                       GRPCTimeout:   testGRPCTimeout,
+                       FetchInterval: testFetchInterval,
+               })
+               require.NoError(t, err)
+               defer svc.Close()
+
+               err = svc.Start(ctx)
+               require.Error(t, err)
+               assert.Contains(t, err.Error(), "failed to parse YAML")
+       })
+
+       t.Run("missing address", func(t *testing.T) {
+               configFile := createTempConfigFile(t, `
+nodes:
+  - name: node1
+`)
+               defer os.Remove(configFile)
+
+               svc, err := NewService(Config{
+                       FilePath:      configFile,
+                       GRPCTimeout:   testGRPCTimeout,
+                       FetchInterval: testFetchInterval,
+               })
+               require.NoError(t, err)
+               defer svc.Close()
+
+               err = svc.Start(ctx)
+               require.Error(t, err)
+               assert.Contains(t, err.Error(), "missing required field: 
grpc_address")
+       })
+
+       t.Run("tls enabled without ca cert", func(t *testing.T) {
+               configFile := createTempConfigFile(t, `
+nodes:
+  - name: node1
+    grpc_address: 127.0.0.1:17912
+    tls_enabled: true
+`)
+               defer os.Remove(configFile)
+
+               svc, err := NewService(Config{
+                       FilePath:      configFile,
+                       GRPCTimeout:   testGRPCTimeout,
+                       FetchInterval: testFetchInterval,
+               })
+               require.NoError(t, err)
+               defer svc.Close()
+
+               err = svc.Start(ctx)
+               require.Error(t, err)
+               assert.Contains(t, err.Error(), "missing ca_cert_path")
+       })
+}

Review Comment:
   The documentation at line 294 states "Nodes that fail to connect are skipped 
and will be attempted again on the next periodic file reload". However, there's 
no test coverage for this retry behavior. Consider adding a test that starts 
with an unreachable node, then makes it reachable, and verifies it gets added 
to the cache after the retry interval.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to