Copilot commented on code in PR #918:
URL: 
https://github.com/apache/skywalking-banyandb/pull/918#discussion_r2650620212


##########
fodc/proxy/internal/grpc/service.go:
##########
@@ -0,0 +1,380 @@
+// Licensed to Apache Software Foundation (ASF) under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Apache Software Foundation (ASF) licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package grpc
+
+import (
+       "context"
+       "errors"
+       "fmt"
+       "io"
+       "strings"
+       "sync"
+       "time"
+
+       "google.golang.org/grpc"
+       "google.golang.org/grpc/codes"
+       "google.golang.org/grpc/metadata"
+       "google.golang.org/grpc/peer"
+       "google.golang.org/grpc/status"
+       "google.golang.org/protobuf/types/known/timestamppb"
+
+       fodcv1 
"github.com/apache/skywalking-banyandb/api/proto/banyandb/fodc/v1"
+       "github.com/apache/skywalking-banyandb/fodc/proxy/internal/metrics"
+       "github.com/apache/skywalking-banyandb/fodc/proxy/internal/registry"
+       "github.com/apache/skywalking-banyandb/pkg/logger"
+)
+
+// AgentConnection represents a connection to an agent.
+type AgentConnection struct {
+       MetricsStream fodcv1.FODCService_StreamMetricsServer
+       Context       context.Context
+       Stream        grpc.ServerStream
+       Cancel        context.CancelFunc
+       LastActivity  time.Time
+       AgentID       string
+       Identity      registry.AgentIdentity
+       mu            sync.RWMutex
+}
+
+// UpdateActivity updates the last activity time.
+func (ac *AgentConnection) UpdateActivity() {
+       ac.mu.Lock()
+       defer ac.mu.Unlock()
+       ac.LastActivity = time.Now()
+}
+
+// GetLastActivity returns the last activity time.
+func (ac *AgentConnection) GetLastActivity() time.Time {
+       ac.mu.RLock()
+       defer ac.mu.RUnlock()
+       return ac.LastActivity
+}
+
+// FODCService implements the FODC gRPC service.
+type FODCService struct {
+       fodcv1.UnimplementedFODCServiceServer
+       registry          *registry.AgentRegistry
+       metricsAggregator *metrics.Aggregator
+       logger            *logger.Logger
+       connections       map[string]*AgentConnection
+       connectionsMu     sync.RWMutex
+       heartbeatInterval time.Duration
+}
+
+// NewFODCService creates a new FODCService instance.
+func NewFODCService(registry *registry.AgentRegistry, metricsAggregator 
*metrics.Aggregator, logger *logger.Logger, heartbeatInterval time.Duration) 
*FODCService {
+       return &FODCService{
+               registry:          registry,
+               metricsAggregator: metricsAggregator,
+               logger:            logger,
+               connections:       make(map[string]*AgentConnection),
+               heartbeatInterval: heartbeatInterval,
+       }
+}
+
+// RegisterAgent handles bi-directional agent registration stream.
+func (s *FODCService) RegisterAgent(stream 
fodcv1.FODCService_RegisterAgentServer) error {
+       ctx, cancel := context.WithCancel(stream.Context())
+       defer cancel()
+
+       var agentID string
+       var agentConn *AgentConnection
+       initialRegistration := true
+
+       for {
+               req, recvErr := stream.Recv()
+               if errors.Is(recvErr, io.EOF) {
+                       s.logger.Debug().Str("agent_id", 
agentID).Msg("Registration stream closed by agent")
+                       break
+               }
+               if recvErr != nil {
+                       s.logger.Error().Err(recvErr).Str("agent_id", 
agentID).Msg("Error receiving registration request")
+                       return recvErr
+               }
+
+               if initialRegistration {
+                       identity := registry.AgentIdentity{
+                               IP:     req.PrimaryAddress.Ip,
+                               Port:   int(req.PrimaryAddress.Port),
+                               Role:   req.NodeRole,
+                               Labels: req.Labels,
+                       }
+
+                       primaryAddr := registry.Address{
+                               IP:   req.PrimaryAddress.Ip,
+                               Port: int(req.PrimaryAddress.Port),
+                       }
+
+                       registeredAgentID, registerErr := 
s.registry.RegisterAgent(ctx, identity, primaryAddr)
+                       if registerErr != nil {
+                               resp := &fodcv1.RegisterAgentResponse{
+                                       Success: false,
+                                       Message: registerErr.Error(),
+                               }
+                               if sendErr := stream.Send(resp); sendErr != nil 
{
+                                       
s.logger.Error().Err(sendErr).Msg("Failed to send registration error response")
+                               }
+                               return registerErr
+                       }
+
+                       agentID = registeredAgentID
+                       agentConn = &AgentConnection{
+                               AgentID:      agentID,
+                               Identity:     identity,
+                               Stream:       stream,
+                               Context:      ctx,
+                               Cancel:       cancel,
+                               LastActivity: time.Now(),
+                       }
+
+                       s.connectionsMu.Lock()
+                       s.connections[agentID] = agentConn
+                       s.connectionsMu.Unlock()
+
+                       resp := &fodcv1.RegisterAgentResponse{
+                               Success:                  true,
+                               Message:                  "Agent registered 
successfully",
+                               HeartbeatIntervalSeconds: 
int64(s.heartbeatInterval.Seconds()),
+                               AgentId:                  agentID,
+                       }
+
+                       if sendErr := stream.Send(resp); sendErr != nil {
+                               s.logger.Error().Err(sendErr).Str("agent_id", 
agentID).Msg("Failed to send registration response")
+                               s.cleanupConnection(agentID)
+                               // Unregister agent since we couldn't send 
confirmation
+                               if unregisterErr := 
s.registry.UnregisterAgent(agentID); unregisterErr != nil {
+                                       
s.logger.Error().Err(unregisterErr).Str("agent_id", agentID).Msg("Failed to 
unregister agent after send error")
+                               }
+                               return sendErr
+                       }
+
+                       initialRegistration = false
+                       s.logger.Info().
+                               Str("agent_id", agentID).
+                               Str("ip", identity.IP).
+                               Int("port", identity.Port).
+                               Str("role", identity.Role).
+                               Msg("Agent registration stream established")
+               } else {
+                       if updateErr := s.registry.UpdateHeartbeat(agentID); 
updateErr != nil {
+                               s.logger.Error().Err(updateErr).Str("agent_id", 
agentID).Msg("Failed to update heartbeat")
+                               s.cleanupConnection(agentID)
+                               return updateErr
+                       }
+
+                       if agentConn != nil {
+                               agentConn.UpdateActivity()
+                       }
+               }
+       }
+
+       s.cleanupConnection(agentID)
+       return nil
+}
+
+// StreamMetrics handles bi-directional metrics streaming.
+func (s *FODCService) StreamMetrics(stream 
fodcv1.FODCService_StreamMetricsServer) error {
+       ctx := stream.Context()
+
+       agentID := s.getAgentIDFromContext(ctx)
+       if agentID == "" {
+               agentID = s.getAgentIDFromPeer(ctx)
+               if agentID != "" {
+                       s.logger.Warn().
+                               Str("agent_id", agentID).
+                               Msg("Agent ID not found in metadata, using peer 
address fallback (this may be unreliable)")
+               }
+       }
+
+       if agentID == "" {
+               s.logger.Error().Msg("Agent ID not found in context metadata or 
peer address")
+               return status.Errorf(codes.Unauthenticated, "agent ID not found 
in context or peer address")
+       }
+
+       agentConn := &AgentConnection{
+               AgentID:       agentID,
+               Stream:        stream,
+               MetricsStream: stream,
+               Context:       ctx,
+               LastActivity:  time.Now(),
+       }
+
+       s.connectionsMu.Lock()
+       existingConn, exists := s.connections[agentID]
+       if exists {
+               existingConn.MetricsStream = stream
+       } else {
+               s.connections[agentID] = agentConn

Review Comment:
   When updating an existing connection's MetricsStream field, the agentConn 
created at line 209 is discarded but not cleaned up. This could lead to 
goroutine leaks if the new agentConn had any associated resources. Consider 
using the existing connection and only updating its MetricsStream field, or 
properly cleaning up the discarded agentConn.
   ```suggestion
        s.connectionsMu.Lock()
        existingConn, exists := s.connections[agentID]
        if exists {
                existingConn.MetricsStream = stream
        } else {
                s.connections[agentID] = &AgentConnection{
                        AgentID:       agentID,
                        Stream:        stream,
                        MetricsStream: stream,
                        Context:       ctx,
                        LastActivity:  time.Now(),
                }
   ```



##########
fodc/proxy/internal/metrics/aggregator.go:
##########
@@ -0,0 +1,271 @@
+// Licensed to Apache Software Foundation (ASF) under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Apache Software Foundation (ASF) licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// Package metrics provides functionality for aggregating and enriching 
metrics from all agents.
+package metrics
+
+import (
+       "context"
+       "fmt"
+       "sync"
+       "time"
+
+       fodcv1 
"github.com/apache/skywalking-banyandb/api/proto/banyandb/fodc/v1"
+       "github.com/apache/skywalking-banyandb/fodc/proxy/internal/registry"
+       "github.com/apache/skywalking-banyandb/pkg/logger"
+)
+
+// AggregatedMetric represents an aggregated metric with node metadata.
+type AggregatedMetric struct {
+       Labels      map[string]string
+       Timestamp   time.Time
+       Name        string
+       AgentID     string
+       NodeRole    string
+       Description string
+       Value       float64
+}
+
+// Filter defines filters for metrics collection.
+type Filter struct {
+       StartTime *time.Time
+       EndTime   *time.Time
+       Role      string
+       Address   string
+       AgentIDs  []string
+}
+
+// Aggregator aggregates and enriches metrics from all agents.
+type Aggregator struct {
+       registry     *registry.AgentRegistry
+       logger       *logger.Logger
+       grpcService  RequestSender
+       metricsCh    chan *AggregatedMetric
+       collecting   map[string]chan []*AggregatedMetric
+       mu           sync.RWMutex
+       collectingMu sync.RWMutex
+}
+
+// RequestSender is an interface for sending metrics requests to agents.
+type RequestSender interface {
+       RequestMetrics(ctx context.Context, agentID string, startTime, endTime 
*time.Time) error
+}
+
+// NewAggregator creates a new MetricsAggregator instance.
+func NewAggregator(registry *registry.AgentRegistry, grpcService 
RequestSender, logger *logger.Logger) *Aggregator {
+       return &Aggregator{
+               registry:    registry,
+               grpcService: grpcService,
+               logger:      logger,
+               metricsCh:   make(chan *AggregatedMetric, 1000),
+               collecting:  make(map[string]chan []*AggregatedMetric),
+       }
+}
+
+// SetGRPCService sets the gRPC service for sending metrics requests.
+func (ma *Aggregator) SetGRPCService(grpcService RequestSender) {
+       ma.mu.Lock()
+       defer ma.mu.Unlock()
+       ma.grpcService = grpcService
+}
+
+// ProcessMetricsFromAgent processes metrics received from an agent.
+func (ma *Aggregator) ProcessMetricsFromAgent(ctx context.Context, agentID 
string, agentInfo *registry.AgentInfo, req *fodcv1.StreamMetricsRequest) error {
+       aggregatedMetrics := make([]*AggregatedMetric, 0, len(req.Metrics))
+
+       for _, metric := range req.Metrics {
+               labels := make(map[string]string)
+               for key, value := range metric.Labels {
+                       labels[key] = value
+               }
+
+               labels["agent_id"] = agentID
+               labels["node_role"] = agentInfo.NodeRole
+               labels["ip"] = agentInfo.PrimaryAddress.IP
+               labels["port"] = fmt.Sprintf("%d", 
agentInfo.PrimaryAddress.Port)
+
+               for key, value := range agentInfo.Labels {
+                       labels[key] = value
+               }
+
+               var timestamp time.Time
+               if req.Timestamp != nil {
+                       timestamp = req.Timestamp.AsTime()
+               } else {
+                       timestamp = time.Now()
+               }
+
+               aggregatedMetric := &AggregatedMetric{
+                       Name:        metric.Name,
+                       Labels:      labels,
+                       Value:       metric.Value,
+                       Timestamp:   timestamp,
+                       AgentID:     agentID,
+                       NodeRole:    agentInfo.NodeRole,
+                       Description: metric.Description,
+               }
+
+               aggregatedMetrics = append(aggregatedMetrics, aggregatedMetric)
+       }
+
+       ma.collectingMu.RLock()
+       collectCh, exists := ma.collecting[agentID]
+       ma.collectingMu.RUnlock()
+
+       if exists {
+               select {
+               case collectCh <- aggregatedMetrics:
+               case <-ctx.Done():
+                       return ctx.Err()
+               default:
+                       ma.logger.Warn().Str("agent_id", agentID).Msg("Metrics 
collection channel full, dropping metrics")
+               }
+       }
+
+       return nil
+}
+
+// CollectMetricsFromAgents requests metrics from all agents (or filtered 
agents) when external client queries.
+func (ma *Aggregator) CollectMetricsFromAgents(ctx context.Context, filter 
*Filter) ([]*AggregatedMetric, error) {
+       agents := ma.getFilteredAgents(filter)
+       if len(agents) == 0 {
+               return []*AggregatedMetric{}, nil
+       }
+
+       collectChs := make(map[string]chan []*AggregatedMetric)
+       agentIDs := make([]string, 0, len(agents))
+       ma.collectingMu.Lock()
+       for _, agentInfo := range agents {
+               collectCh := make(chan []*AggregatedMetric, 1)
+               collectChs[agentInfo.AgentID] = collectCh
+               ma.collecting[agentInfo.AgentID] = collectCh
+               agentIDs = append(agentIDs, agentInfo.AgentID)
+       }
+       ma.collectingMu.Unlock()
+
+       defer func() {
+               ma.collectingMu.Lock()
+               for _, agentID := range agentIDs {
+                       delete(ma.collecting, agentID)
+               }
+               ma.collectingMu.Unlock()
+       }()
+
+       for _, agentInfo := range agents {
+               requestErr := ma.grpcService.RequestMetrics(ctx, 
agentInfo.AgentID, filter.StartTime, filter.EndTime)
+               if requestErr != nil {
+                       ma.logger.Error().
+                               Err(requestErr).
+                               Str("agent_id", agentInfo.AgentID).
+                               Msg("Failed to request metrics from agent")
+                       delete(collectChs, agentInfo.AgentID)
+               }
+       }
+
+       allMetrics := make([]*AggregatedMetric, 0)
+       timeout := 10 * time.Second
+       if filter.StartTime != nil && filter.EndTime != nil {
+               timeout = filter.EndTime.Sub(*filter.StartTime) + 5*time.Second
+       }

Review Comment:
   The timeout calculation could result in very large timeouts for wide time 
windows (e.g., querying a year of data would timeout after ~1 year + 5 
seconds). Consider adding a maximum timeout cap to prevent excessively long 
waits.



##########
fodc/proxy/internal/registry/registry.go:
##########
@@ -0,0 +1,329 @@
+// Licensed to Apache Software Foundation (ASF) under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Apache Software Foundation (ASF) licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// Package registry provides functionality for managing and tracking FODC 
agents.
+package registry
+
+import (
+       "context"
+       "fmt"
+       "sync"
+       "time"
+
+       "github.com/google/uuid"
+
+       "github.com/apache/skywalking-banyandb/pkg/logger"
+)
+
+// AgentStatus represents the current status of an agent.
+type AgentStatus string
+
+const (
+       // AgentStatusOnline indicates the agent is online and connected.
+       AgentStatusOnline AgentStatus = "online"
+       // AgentStatusOffline indicates the agent is offline or unconnected.
+       AgentStatusOffline AgentStatus = "unconnected"
+)
+
+// Address represents a network address.
+type Address struct {
+       IP   string
+       Port int
+}
+
+// AgentIdentity represents the identity of an agent.
+type AgentIdentity struct {
+       Labels map[string]string
+       IP     string
+       Role   string
+       Port   int
+}
+
+// AgentInfo contains information about a registered agent.
+type AgentInfo struct {
+       Labels         map[string]string
+       RegisteredAt   time.Time
+       LastHeartbeat  time.Time
+       AgentID        string
+       NodeRole       string
+       Status         AgentStatus
+       AgentIdentity  AgentIdentity
+       PrimaryAddress Address
+}
+
+// AgentRegistry manages the lifecycle and state of all connected FODC Agents.
+type AgentRegistry struct {
+       agents            map[string]*AgentInfo
+       logger            *logger.Logger
+       healthCheckTicker *time.Ticker
+       healthCheckStopCh chan struct{}
+       mu                sync.RWMutex
+       heartbeatTimeout  time.Duration
+       cleanupTimeout    time.Duration
+       maxAgents         int
+}
+
+// NewAgentRegistry creates a new AgentRegistry instance.
+func NewAgentRegistry(logger *logger.Logger, heartbeatTimeout, cleanupTimeout 
time.Duration, maxAgents int) *AgentRegistry {
+       ar := &AgentRegistry{
+               agents:            make(map[string]*AgentInfo),
+               logger:            logger,
+               heartbeatTimeout:  heartbeatTimeout,
+               maxAgents:         maxAgents,
+               cleanupTimeout:    cleanupTimeout,
+               healthCheckStopCh: make(chan struct{}),
+       }
+       ar.startHealthCheck()
+       return ar
+}
+
+// RegisterAgent registers a new agent or updates existing agent information.
+func (ar *AgentRegistry) RegisterAgent(_ context.Context, identity 
AgentIdentity, primaryAddr Address) (string, error) {
+       ar.mu.Lock()
+       defer ar.mu.Unlock()
+
+       if len(ar.agents) >= ar.maxAgents {
+               return "", fmt.Errorf("maximum number of agents (%d) reached", 
ar.maxAgents)
+       }
+
+       if primaryAddr.IP == "" {
+               return "", fmt.Errorf("primary address IP cannot be empty")
+       }
+       if primaryAddr.Port <= 0 {
+               return "", fmt.Errorf("primary address port must be greater 
than 0")
+       }
+       if identity.Role == "" {
+               return "", fmt.Errorf("node role cannot be empty")
+       }
+
+       agentID := uuid.New().String()
+       now := time.Now()
+
+       agentInfo := &AgentInfo{
+               AgentID:        agentID,
+               AgentIdentity:  identity,
+               NodeRole:       identity.Role,
+               PrimaryAddress: primaryAddr,
+               Labels:         identity.Labels,
+               RegisteredAt:   now,
+               LastHeartbeat:  now,
+               Status:         AgentStatusOnline,
+       }
+
+       ar.agents[agentID] = agentInfo
+
+       ar.logger.Info().
+               Str("agent_id", agentID).
+               Str("ip", primaryAddr.IP).
+               Int("port", primaryAddr.Port).
+               Str("role", identity.Role).
+               Msg("Agent registered")
+
+       return agentID, nil
+}
+
+// UnregisterAgent removes an agent from the registry.
+func (ar *AgentRegistry) UnregisterAgent(agentID string) error {
+       ar.mu.Lock()
+       defer ar.mu.Unlock()
+
+       agentInfo, exists := ar.agents[agentID]
+       if !exists {
+               return fmt.Errorf("agent %s not found", agentID)
+       }
+
+       delete(ar.agents, agentID)
+
+       ar.logger.Info().
+               Str("agent_id", agentID).
+               Str("ip", agentInfo.PrimaryAddress.IP).
+               Int("port", agentInfo.PrimaryAddress.Port).
+               Str("role", agentInfo.NodeRole).
+               Msg("Agent unregistered")
+
+       return nil
+}
+
+// UpdateHeartbeat updates the last heartbeat timestamp for an agent.
+func (ar *AgentRegistry) UpdateHeartbeat(agentID string) error {
+       ar.mu.Lock()
+       defer ar.mu.Unlock()
+
+       agentInfo, exists := ar.agents[agentID]
+       if !exists {
+               return fmt.Errorf("agent %s not found", agentID)
+       }
+
+       agentInfo.LastHeartbeat = time.Now()
+       agentInfo.Status = AgentStatusOnline
+
+       return nil
+}
+
+// GetAgent retrieves agent information by primary IP + port + role + labels.
+func (ar *AgentRegistry) GetAgent(ip string, port int, role string, labels 
map[string]string) (*AgentInfo, error) {
+       ar.mu.RLock()
+       defer ar.mu.RUnlock()
+
+       identity := AgentIdentity{
+               IP:     ip,
+               Port:   port,
+               Role:   role,
+               Labels: labels,
+       }
+
+       for _, agentInfo := range ar.agents {
+               if ar.matchesIdentity(agentInfo.AgentIdentity, identity) {
+                       return agentInfo, nil
+               }
+       }
+
+       return nil, fmt.Errorf("agent not found")
+}
+
+// GetAgentByID retrieves agent information by unique agent ID.
+func (ar *AgentRegistry) GetAgentByID(agentID string) (*AgentInfo, error) {
+       ar.mu.RLock()
+       defer ar.mu.RUnlock()
+
+       agentInfo, exists := ar.agents[agentID]
+       if !exists {
+               return nil, fmt.Errorf("agent %s not found", agentID)
+       }
+
+       return agentInfo, nil
+}
+
+// ListAgents returns a list of all registered agents.
+func (ar *AgentRegistry) ListAgents() []*AgentInfo {
+       ar.mu.RLock()
+       defer ar.mu.RUnlock()
+
+       agents := make([]*AgentInfo, 0, len(ar.agents))
+       for _, agentInfo := range ar.agents {
+               agents = append(agents, agentInfo)
+       }
+
+       return agents
+}
+
+// ListAgentsByRole returns agents filtered by role.
+func (ar *AgentRegistry) ListAgentsByRole(role string) []*AgentInfo {
+       ar.mu.RLock()
+       defer ar.mu.RUnlock()
+
+       agents := make([]*AgentInfo, 0)
+       for _, agentInfo := range ar.agents {
+               if agentInfo.NodeRole == role {
+                       agents = append(agents, agentInfo)
+               }
+       }
+
+       return agents
+}
+
+// CheckAgentHealth periodically checks agent health based on heartbeat 
timeout.
+func (ar *AgentRegistry) CheckAgentHealth() error {
+       ar.mu.Lock()
+       defer ar.mu.Unlock()
+
+       now := time.Now()
+       agentsToUnregister := make([]string, 0)
+
+       for agentID, agentInfo := range ar.agents {
+               timeSinceHeartbeat := now.Sub(agentInfo.LastHeartbeat)
+
+               if timeSinceHeartbeat > ar.heartbeatTimeout {
+                       if agentInfo.Status == AgentStatusOnline {
+                               agentInfo.Status = AgentStatusOffline
+                               ar.logger.Warn().
+                                       Str("agent_id", agentID).
+                                       Dur("time_since_heartbeat", 
timeSinceHeartbeat).
+                                       Msg("Agent marked as offline due to 
heartbeat timeout")
+                       }
+
+                       if timeSinceHeartbeat > ar.cleanupTimeout {
+                               agentsToUnregister = append(agentsToUnregister, 
agentID)
+                       }
+               }
+       }
+
+       ar.mu.Unlock()
+       for _, agentID := range agentsToUnregister {
+               if unregisterErr := ar.UnregisterAgent(agentID); unregisterErr 
!= nil {
+                       ar.logger.Error().
+                               Err(unregisterErr).
+                               Str("agent_id", agentID).
+                               Msg("Failed to unregister agent during health 
check")
+               }
+       }
+       ar.mu.Lock()

Review Comment:
   The mutex is unlocked at line 265, then locked again at line 274, but 
UnregisterAgent (called at line 267) also acquires the same mutex. This creates 
a potential race condition where the agents map could be modified between 
unlock and lock, causing the iteration or cleanup to operate on stale data. 
Consider holding the lock throughout the cleanup operation or using a different 
synchronization strategy.



##########
fodc/agent/internal/cmd/root.go:
##########
@@ -164,6 +189,40 @@ func runFODC(_ *cobra.Command, _ []string) error {
 
        stopCh := wd.Serve()
 
+       var proxyClient *proxy.Client
+       if proxyAddr != "" && nodeIP != "" && nodePort > 0 && nodeRole != "" {
+               labelsMap := proxy.ParseLabels(nodeLabels)
+               proxyClient = proxy.NewClient(
+                       proxyAddr,
+                       nodeIP,
+                       nodePort,
+                       nodeRole,
+                       labelsMap,
+                       heartbeatInterval,
+                       reconnectInterval,
+                       fr,
+                       log,
+               )
+
+               proxyCtx, proxyCancel := context.WithCancel(ctx)
+               defer proxyCancel()
+
+               go func() {
+                       if startErr := proxyClient.Start(proxyCtx); startErr != 
nil {
+                               log.Error().Err(startErr).Msg("Proxy client 
error")
+                       }
+               }()
+
+               log.Info().
+                       Str("proxy_addr", proxyAddr).
+                       Str("node_ip", nodeIP).
+                       Int("node_port", nodePort).
+                       Str("node_role", nodeRole).
+                       Msg("Proxy client started")

Review Comment:
   The ParseLabels function is called but not defined in this file, and there's 
no error handling for potentially invalid label formats. Consider validating 
the parsed labels or handling parsing errors to provide clear feedback to users.
   ```suggestion
        parseProxyLabels := func(labels string) (map[string]string, error) {
                if strings.TrimSpace(labels) == "" {
                        return map[string]string{}, nil
                }
   
                segments := strings.Split(labels, ",")
                for _, seg := range segments {
                        s := strings.TrimSpace(seg)
                        if s == "" {
                                continue
                        }
                        if !strings.Contains(s, "=") {
                                return nil, fmt.Errorf("invalid label %q: must 
be in key=value format", s)
                        }
                }
   
                return proxy.ParseLabels(labels), nil
        }
   
        var proxyClient *proxy.Client
        if proxyAddr != "" && nodeIP != "" && nodePort > 0 && nodeRole != "" {
                labelsMap, labelsErr := parseProxyLabels(nodeLabels)
                if labelsErr != nil {
                        log.Error().Err(labelsErr).Msg("Invalid node labels; 
proxy client not started")
                } else {
                        proxyClient = proxy.NewClient(
                                proxyAddr,
                                nodeIP,
                                nodePort,
                                nodeRole,
                                labelsMap,
                                heartbeatInterval,
                                reconnectInterval,
                                fr,
                                log,
                        )
   
                        proxyCtx, proxyCancel := context.WithCancel(ctx)
                        defer proxyCancel()
   
                        go func() {
                                if startErr := proxyClient.Start(proxyCtx); 
startErr != nil {
                                        log.Error().Err(startErr).Msg("Proxy 
client error")
                                }
                        }()
   
                        log.Info().
                                Str("proxy_addr", proxyAddr).
                                Str("node_ip", nodeIP).
                                Int("node_port", nodePort).
                                Str("node_role", nodeRole).
                                Msg("Proxy client started")
                }
   ```



##########
fodc/proxy/internal/api/server.go:
##########
@@ -0,0 +1,391 @@
+// Licensed to Apache Software Foundation (ASF) under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Apache Software Foundation (ASF) licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// Package api provides functionality for the API server.
+package api
+
+import (
+       "context"
+       "encoding/json"
+       "fmt"
+       "net/http"
+       "sort"
+       "strconv"
+       "strings"
+       "time"
+
+       "github.com/apache/skywalking-banyandb/fodc/proxy/internal/metrics"
+       "github.com/apache/skywalking-banyandb/fodc/proxy/internal/registry"
+       "github.com/apache/skywalking-banyandb/pkg/logger"
+)
+
+// Server exposes REST and Prometheus-style endpoints for external consumption.
+type Server struct {
+       metricsAggregator *metrics.Aggregator
+       registry          *registry.AgentRegistry
+       server            *http.Server
+       logger            *logger.Logger
+       startTime         time.Time
+}
+
+// NewServer creates a new Server instance.
+func NewServer(metricsAggregator *metrics.Aggregator, registry 
*registry.AgentRegistry, logger *logger.Logger) *Server {
+       return &Server{
+               metricsAggregator: metricsAggregator,
+               registry:          registry,
+               logger:            logger,
+               startTime:         time.Now(),
+       }
+}
+
+// Start starts the HTTP server.
+func (s *Server) Start(listenAddr string, readTimeout, writeTimeout 
time.Duration) error {
+       mux := http.NewServeMux()
+
+       mux.HandleFunc("/metrics", s.handleMetrics)
+       mux.HandleFunc("/metrics-windows", s.handleMetricsWindows)
+       mux.HandleFunc("/health", s.handleHealth)
+
+       s.server = &http.Server{
+               Addr:         listenAddr,
+               Handler:      mux,
+               ReadTimeout:  readTimeout,
+               WriteTimeout: writeTimeout,
+       }
+
+       s.logger.Info().Str("addr", listenAddr).Msg("Starting HTTP API server")
+
+       go func() {
+               if serveErr := s.server.ListenAndServe(); serveErr != nil && 
serveErr != http.ErrServerClosed {
+                       s.logger.Error().Err(serveErr).Msg("HTTP server error")
+               }
+       }()
+
+       return nil
+}
+
+// Stop gracefully stops the HTTP server.
+func (s *Server) Stop() error {
+       if s.server == nil {
+               return nil
+       }
+
+       ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+       defer cancel()
+
+       return s.server.Shutdown(ctx)
+}
+
+// handleMetrics handles GET /metrics endpoint.
+func (s *Server) handleMetrics(w http.ResponseWriter, r *http.Request) {
+       if r.Method != http.MethodGet {
+               http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
+               return
+       }
+
+       filter := &metrics.Filter{
+               Role:    r.URL.Query().Get("role"),
+               Address: r.URL.Query().Get("address"),
+       }
+
+       ctx := r.Context()
+       aggregatedMetrics, collectErr := 
s.metricsAggregator.GetLatestMetrics(ctx)
+       if collectErr != nil {
+               s.logger.Error().Err(collectErr).Msg("Failed to collect 
metrics")
+               http.Error(w, "Failed to collect metrics", 
http.StatusInternalServerError)
+               return
+       }
+
+       if filter.Role != "" || filter.Address != "" {
+               filteredMetrics := make([]*metrics.AggregatedMetric, 0)
+               for _, metric := range aggregatedMetrics {
+                       if filter.Role != "" && metric.NodeRole != filter.Role {
+                               continue
+                       }
+                       if filter.Address != "" {
+                               addressMatch := 
strings.Contains(metric.Labels["ip"], filter.Address) ||
+                                       strings.Contains(fmt.Sprintf("%s:%s", 
metric.Labels["ip"], metric.Labels["port"]), filter.Address)
+                               if !addressMatch {
+                                       continue
+                               }
+                       }
+                       filteredMetrics = append(filteredMetrics, metric)
+               }
+               aggregatedMetrics = filteredMetrics
+       }
+
+       prometheusText := s.formatPrometheusText(aggregatedMetrics)
+
+       w.Header().Set("Content-Type", "text/plain; version=0.0.4; 
charset=utf-8")
+       w.WriteHeader(http.StatusOK)
+       _, _ = w.Write([]byte(prometheusText))
+}
+
+// handleMetricsWindows handles GET /metrics-windows endpoint.
+func (s *Server) handleMetricsWindows(w http.ResponseWriter, r *http.Request) {
+       if r.Method != http.MethodGet {
+               http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
+               return
+       }
+
+       var startTime, endTime time.Time
+       var parseErr error
+
+       startTimeStr := r.URL.Query().Get("start_time")
+       if startTimeStr != "" {
+               startTime, parseErr = time.Parse(time.RFC3339, startTimeStr)
+               if parseErr != nil {
+                       http.Error(w, fmt.Sprintf("Invalid start_time format: 
%v", parseErr), http.StatusBadRequest)
+                       return
+               }
+       }
+
+       endTimeStr := r.URL.Query().Get("end_time")
+       if endTimeStr != "" {
+               endTime, parseErr = time.Parse(time.RFC3339, endTimeStr)
+               if parseErr != nil {
+                       http.Error(w, fmt.Sprintf("Invalid end_time format: 
%v", parseErr), http.StatusBadRequest)
+                       return
+               }
+       }
+
+       filter := &metrics.Filter{
+               Role:    r.URL.Query().Get("role"),
+               Address: r.URL.Query().Get("address"),
+       }
+
+       ctx := r.Context()
+       var aggregatedMetrics []*metrics.AggregatedMetric
+       var collectErr error
+
+       if startTimeStr != "" && endTimeStr != "" {
+               aggregatedMetrics, collectErr = 
s.metricsAggregator.GetMetricsWindow(ctx, startTime, endTime, filter)
+       } else {
+               aggregatedMetrics, collectErr = 
s.metricsAggregator.GetLatestMetrics(ctx)
+       }
+
+       if collectErr != nil {
+               s.logger.Error().Err(collectErr).Msg("Failed to collect 
metrics")
+               http.Error(w, "Failed to collect metrics", 
http.StatusInternalServerError)
+               return
+       }
+
+       if filter.Role != "" || filter.Address != "" {
+               filteredMetrics := make([]*metrics.AggregatedMetric, 0)
+               for _, metric := range aggregatedMetrics {
+                       if filter.Role != "" && metric.NodeRole != filter.Role {
+                               continue
+                       }
+                       if filter.Address != "" {
+                               addressMatch := 
strings.Contains(metric.Labels["ip"], filter.Address) ||
+                                       strings.Contains(fmt.Sprintf("%s:%s", 
metric.Labels["ip"], metric.Labels["port"]), filter.Address)
+                               if !addressMatch {
+                                       continue
+                               }
+                       }
+                       filteredMetrics = append(filteredMetrics, metric)
+               }
+               aggregatedMetrics = filteredMetrics
+       }
+
+       response := s.formatMetricsWindowJSON(aggregatedMetrics)
+
+       w.Header().Set("Content-Type", "application/json")
+       w.WriteHeader(http.StatusOK)
+       if encodeErr := json.NewEncoder(w).Encode(response); encodeErr != nil {
+               s.logger.Error().Err(encodeErr).Msg("Failed to encode JSON 
response")
+       }
+}
+
+// handleHealth handles GET /health endpoint.
+func (s *Server) handleHealth(w http.ResponseWriter, r *http.Request) {
+       if r.Method != http.MethodGet {
+               http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
+               return
+       }
+
+       agents := s.registry.ListAgents()
+       agentsOnline := 0
+       for _, agentInfo := range agents {
+               if agentInfo.Status == registry.AgentStatusOnline {
+                       agentsOnline++
+               }
+       }
+
+       response := map[string]interface{}{
+               "status":         "healthy",
+               "agents_online":  agentsOnline,
+               "agents_total":   len(agents),
+               "uptime_seconds": int(time.Since(s.startTime).Seconds()),
+       }
+
+       w.Header().Set("Content-Type", "application/json")
+       w.WriteHeader(http.StatusOK)
+       if encodeErr := json.NewEncoder(w).Encode(response); encodeErr != nil {
+               s.logger.Error().Err(encodeErr).Msg("Failed to encode JSON 
response")
+       }
+}
+
+// formatPrometheusText formats aggregated metrics as Prometheus text format.
+func (s *Server) formatPrometheusText(aggregatedMetrics 
[]*metrics.AggregatedMetric) string {
+       if len(aggregatedMetrics) == 0 {
+               return ""
+       }
+
+       metricMap := make(map[string]*metricGroup)
+       for _, metric := range aggregatedMetrics {
+               key := metric.Name
+               group, exists := metricMap[key]
+               if !exists {
+                       group = &metricGroup{
+                               name:        metric.Name,
+                               description: metric.Description,
+                               metrics:     make([]*metrics.AggregatedMetric, 
0),
+                       }
+                       metricMap[key] = group
+               }
+               group.metrics = append(group.metrics, metric)
+       }
+
+       var builder strings.Builder
+       metricNames := make([]string, 0, len(metricMap))
+       for name := range metricMap {
+               metricNames = append(metricNames, name)
+       }
+       sort.Strings(metricNames)
+
+       for _, name := range metricNames {
+               group := metricMap[name]
+               if group.description != "" {
+                       builder.WriteString(fmt.Sprintf("# HELP %s %s\n", 
group.name, group.description))
+               }
+               builder.WriteString(fmt.Sprintf("# TYPE %s gauge\n", 
group.name))
+
+               for _, metric := range group.metrics {
+                       labelParts := make([]string, 0, len(metric.Labels))
+                       for key, value := range metric.Labels {
+                               labelParts = append(labelParts, 
fmt.Sprintf(`%s="%s"`, key, value))
+                       }
+                       sort.Strings(labelParts)
+
+                       labelStr := ""
+                       if len(labelParts) > 0 {
+                               labelStr = "{" + strings.Join(labelParts, ",") 
+ "}"
+                       }
+
+                       builder.WriteString(fmt.Sprintf("%s%s %s\n", 
group.name, labelStr, formatFloat(metric.Value)))
+               }
+       }
+
+       return builder.String()
+}
+
+// formatMetricsWindowJSON formats aggregated metrics as JSON for 
metrics-windows endpoint.
+func (s *Server) formatMetricsWindowJSON(aggregatedMetrics 
[]*metrics.AggregatedMetric) []map[string]interface{} {
+       metricMap := make(map[string]*timeSeriesMetric)
+
+       for _, metric := range aggregatedMetrics {
+               key := s.getMetricKey(metric)
+               tsMetric, exists := metricMap[key]
+               if !exists {
+                       tsMetric = &timeSeriesMetric{
+                               name:        metric.Name,
+                               description: metric.Description,
+                               labels:      make(map[string]string),
+                               agentID:     metric.AgentID,
+                               ip:          metric.Labels["ip"],
+                               port:        metric.Labels["port"],
+                               data:        make([]map[string]interface{}, 0),
+                       }
+
+                       for key, value := range metric.Labels {
+                               if key != "ip" && key != "port" {
+                                       tsMetric.labels[key] = value
+                               }
+                       }
+
+                       metricMap[key] = tsMetric
+               }
+
+               dataPoint := map[string]interface{}{
+                       "timestamp": metric.Timestamp.Format(time.RFC3339),
+                       "value":     metric.Value,
+               }
+               tsMetric.data = append(tsMetric.data, dataPoint)
+       }
+
+       result := make([]map[string]interface{}, 0, len(metricMap))
+       for _, tsMetric := range metricMap {
+               sort.Slice(tsMetric.data, func(i, j int) bool {
+                       timeI, _ := time.Parse(time.RFC3339, 
tsMetric.data[i]["timestamp"].(string))
+                       timeJ, _ := time.Parse(time.RFC3339, 
tsMetric.data[j]["timestamp"].(string))

Review Comment:
   Parse errors are silently ignored with the blank identifier. If timestamp 
parsing fails, the sort comparison will use zero-valued times, leading to 
incorrect ordering. Consider handling parse errors appropriately or logging a 
warning.
   ```suggestion
                        tsI, okI := tsMetric.data[i]["timestamp"].(string)
                        tsJ, okJ := tsMetric.data[j]["timestamp"].(string)
                        if !okI || !okJ {
                                if s.logger != nil {
                                        s.logger.Warnf("unexpected non-string 
timestamp when sorting metrics: %v, %v", tsMetric.data[i]["timestamp"], 
tsMetric.data[j]["timestamp"])
                                }
                                return tsI < tsJ
                        }
   
                        timeI, errI := time.Parse(time.RFC3339, tsI)
                        timeJ, errJ := time.Parse(time.RFC3339, tsJ)
                        if errI != nil || errJ != nil {
                                if s.logger != nil {
                                        s.logger.Warnf("failed to parse metric 
timestamps %q or %q: %v, %v", tsI, tsJ, errI, errJ)
                                }
                                return tsI < tsJ
                        }
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to