profile
viewpoint

startedhajimehoshi/ebiten

started time in 10 days

startedcertonid/certonid

started time in 10 days

Pull request review commentenvoyproxy/envoy

Lrs example

+#!/usr/bin/env bash++counter=1+while [ $counter -le 50 ]+do +  # generate random Port number to send requests +  port1=80+  port2=81+  range=$(($port2-$port1+1))+  PORT=$RANDOM+  let "PORT %= $range"+  PORT=$(($PORT+$port1))

this can be simplified a bit. something like:

ports=("80" "81")
port=${ports[$RANDOM % ${#ports[@]} ]}
kathan24

comment created time in 3 months

Pull request review commentenvoyproxy/envoy

Lrs example

+module github.com/envoyproxy/envoy/examples/load-reporting-service++go 1.13++require (+	github.com/envoyproxy/go-control-plane v0.9.0+	github.com/golang/protobuf v1.3.2+	golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e

go mod tidy to remove this and a couple other unused dependencies

kathan24

comment created time in 3 months

Pull request review commentenvoyproxy/envoy

Lrs example

+package main++import (+	"log"+	"net"+	"time"++	"github.com/envoyproxy/envoy/examples/load-reporting-service/server"+	gcpLoadStats "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v2"+	"google.golang.org/grpc"+)++func main() {+	// This is how often Envoy will send the load report+	const StatsFrequencyInSeconds = 2++	// Listening on port 18000+	address := ":18000"+	lis, err := net.Listen("tcp", address)+	if err != nil {+		panic(err)+	}++	grpcServer := grpc.NewServer()+	xdsServer := server.NewServer()+	gcpLoadStats.RegisterLoadReportingServiceServer(grpcServer, xdsServer)+	startCollectingStats(xdsServer, "http_service", []string{"local_service"}, StatsFrequencyInSeconds)++	log.Printf("LRS Server is up and running on %s", address)+	err = grpcServer.Serve(lis)+	if err != nil {+		panic(err)+	}+}++func startCollectingStats(server server.Server, cluster string, upstreamClusters []string, frequency int64) {+	// Send LoadStatsResponse after 10 seconds to initiate the Load Reporting+	ticker := time.NewTicker(time.Duration(10) * time.Second)

tickers are useful for generating events at regular intervals. it seems like here you could simplify and use a time.Sleep if you just need to wait for a 10s.

would another approach be to just start calling SendResponse and do that until one succeeds? that would allow you to skip the fixed delay, making things more responsive and reliable.

kathan24

comment created time in 3 months

Pull request review commentenvoyproxy/envoy

Lrs example

+package server++import (+	core "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"+	gcpLoadStats "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v2"+	"github.com/golang/protobuf/ptypes/duration"+	"google.golang.org/grpc"+	"log"+	"sync"+)++// Server handling Load Stats communication+type Server interface {+	gcpLoadStats.LoadReportingServiceServer+	SendResponse(cluster string, upstreamCluster []string, frequency int64)+}++func NewServer() Server {+	return &server{lrsCache: make(map[string]map[string]NodeMetadata)}+}++type server struct {+	// protects lrsCache+	mu sync.RWMutex++	// This cache stores stream objects (and Node data) for every node (within a cluster) upon connection+	lrsCache map[string]map[string]NodeMetadata+}++// Struct to hold stream object and node details+type NodeMetadata struct {+	stream stream+	node   *core.Node+}++type stream interface {+	grpc.ServerStream++	Send(*gcpLoadStats.LoadStatsResponse) error+	Recv() (*gcpLoadStats.LoadStatsRequest, error)+}++// Handles incoming stream connections and LoadStatsRequests+func (s *server) StreamLoadStats(stream gcpLoadStats.LoadReportingService_StreamLoadStatsServer) error {+	for {+		req, err := stream.Recv()+		// input stream ended or errored out+		if err != nil {+			return err+		}++		clusterName := req.GetNode().GetCluster()

can GetNode ever return nil?

kathan24

comment created time in 3 months

Pull request review commentenvoyproxy/envoy

Lrs example

+package server++import (+	core "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"+	gcpLoadStats "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v2"+	"github.com/golang/protobuf/ptypes/duration"+	"google.golang.org/grpc"+	"log"+	"sync"+)++// Server handling Load Stats communication+type Server interface {+	gcpLoadStats.LoadReportingServiceServer+	SendResponse(cluster string, upstreamCluster []string, frequency int64)+}++func NewServer() Server {+	return &server{lrsCache: make(map[string]map[string]NodeMetadata)}+}++type server struct {+	// protects lrsCache+	mu sync.RWMutex++	// This cache stores stream objects (and Node data) for every node (within a cluster) upon connection+	lrsCache map[string]map[string]NodeMetadata+}++// Struct to hold stream object and node details+type NodeMetadata struct {+	stream stream+	node   *core.Node+}++type stream interface {+	grpc.ServerStream++	Send(*gcpLoadStats.LoadStatsResponse) error+	Recv() (*gcpLoadStats.LoadStatsRequest, error)+}++// Handles incoming stream connections and LoadStatsRequests+func (s *server) StreamLoadStats(stream gcpLoadStats.LoadReportingService_StreamLoadStatsServer) error {+	for {+		req, err := stream.Recv()+		// input stream ended or errored out+		if err != nil {+			return err+		}++		clusterName := req.GetNode().GetCluster()+		nodeId := req.GetNode().GetId()++		s.mu.Lock()+		// Check whether any Node from Cluster has already connected or not.+		// If yes, Cluster should be present in the cache+		// If not, add Cluster <-> Node <-> Stream object in cache+		if _, exist := s.lrsCache[clusterName]; !exist {+			// Add all Nodes and its stream objects into the cache+			log.Printf("Adding new cluster to cache `%s` with node `%s`", clusterName, nodeId)+			s.lrsCache[clusterName] = make(map[string]NodeMetadata)+			s.lrsCache[clusterName][nodeId] = NodeMetadata{+				node:   req.GetNode(),+				stream: stream,+			}+		} else if _, exist := s.lrsCache[clusterName][nodeId]; !exist {+			// Add remaining Nodes of a Cluster and its stream objects into the cache+			log.Printf("Adding new node `%s` to existing cluster `%s`", nodeId, clusterName)+			s.lrsCache[clusterName][nodeId] = NodeMetadata{+				node:   req.GetNode(),+				stream: stream,+			}+		} else {+			// After Load Report is enabled, log the Load Report stats received+			for i := 0; i < len(req.ClusterStats); i++ {+				if len(req.ClusterStats[i].UpstreamLocalityStats) > 0 {+					log.Printf("Got stats from cluster `%s` node `%s` - %s", req.Node.Cluster, req.Node.Id, req.GetClusterStats()[i])+				}+			}+		}+		s.mu.Unlock()+	}+}++// Initialize Load Reporting for a given cluster to a list of UpStreamClusters+func (s *server) SendResponse(cluster string, upstreamClusters []string, frequency int64) {+	s.mu.Lock()+	// Check whether any Node from given Cluster is connected or not.+	clusterDetails, exist := s.lrsCache[cluster]+	s.mu.Unlock()+	if !exist {+		log.Printf("Cannot send response as cluster `%s` because is not connected", cluster)+		return+	}++	// To enable Load Report, send LoadStatsResponse to all Nodes within a Cluster+	for nodeId, nodeDetails := range clusterDetails {

golint: rename to nodeID

kathan24

comment created time in 3 months

Pull request review commentenvoyproxy/envoy

Lrs example

+package server++import (+	core "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"+	gcpLoadStats "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v2"+	"github.com/golang/protobuf/ptypes/duration"+	"google.golang.org/grpc"+	"log"+	"sync"+)++// Server handling Load Stats communication+type Server interface {+	gcpLoadStats.LoadReportingServiceServer+	SendResponse(cluster string, upstreamCluster []string, frequency int64)+}++func NewServer() Server {+	return &server{lrsCache: make(map[string]map[string]NodeMetadata)}+}++type server struct {+	// protects lrsCache+	mu sync.RWMutex++	// This cache stores stream objects (and Node data) for every node (within a cluster) upon connection+	lrsCache map[string]map[string]NodeMetadata+}++// Struct to hold stream object and node details+type NodeMetadata struct {+	stream stream+	node   *core.Node+}++type stream interface {+	grpc.ServerStream++	Send(*gcpLoadStats.LoadStatsResponse) error+	Recv() (*gcpLoadStats.LoadStatsRequest, error)+}++// Handles incoming stream connections and LoadStatsRequests+func (s *server) StreamLoadStats(stream gcpLoadStats.LoadReportingService_StreamLoadStatsServer) error {+	for {+		req, err := stream.Recv()+		// input stream ended or errored out+		if err != nil {+			return err+		}++		clusterName := req.GetNode().GetCluster()+		nodeId := req.GetNode().GetId()++		s.mu.Lock()+		// Check whether any Node from Cluster has already connected or not.+		// If yes, Cluster should be present in the cache+		// If not, add Cluster <-> Node <-> Stream object in cache+		if _, exist := s.lrsCache[clusterName]; !exist {+			// Add all Nodes and its stream objects into the cache+			log.Printf("Adding new cluster to cache `%s` with node `%s`", clusterName, nodeId)+			s.lrsCache[clusterName] = make(map[string]NodeMetadata)+			s.lrsCache[clusterName][nodeId] = NodeMetadata{+				node:   req.GetNode(),+				stream: stream,+			}+		} else if _, exist := s.lrsCache[clusterName][nodeId]; !exist {+			// Add remaining Nodes of a Cluster and its stream objects into the cache+			log.Printf("Adding new node `%s` to existing cluster `%s`", nodeId, clusterName)+			s.lrsCache[clusterName][nodeId] = NodeMetadata{+				node:   req.GetNode(),+				stream: stream,+			}+		} else {+			// After Load Report is enabled, log the Load Report stats received+			for i := 0; i < len(req.ClusterStats); i++ {

consider using a range here to iterate over ClusterStats. this will simplify the code within the for loop by avoiding indexing into the slice

kathan24

comment created time in 3 months

Pull request review commentenvoyproxy/envoy

Lrs example

+package server++import (+	core "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"+	gcpLoadStats "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v2"+	"github.com/golang/protobuf/ptypes/duration"+	"google.golang.org/grpc"+	"log"+	"sync"+)++// Server handling Load Stats communication+type Server interface {+	gcpLoadStats.LoadReportingServiceServer+	SendResponse(cluster string, upstreamCluster []string, frequency int64)+}++func NewServer() Server {+	return &server{lrsCache: make(map[string]map[string]NodeMetadata)}+}++type server struct {+	// protects lrsCache+	mu sync.RWMutex++	// This cache stores stream objects (and Node data) for every node (within a cluster) upon connection+	lrsCache map[string]map[string]NodeMetadata+}++// Struct to hold stream object and node details+type NodeMetadata struct {+	stream stream+	node   *core.Node+}++type stream interface {+	grpc.ServerStream++	Send(*gcpLoadStats.LoadStatsResponse) error+	Recv() (*gcpLoadStats.LoadStatsRequest, error)+}++// Handles incoming stream connections and LoadStatsRequests+func (s *server) StreamLoadStats(stream gcpLoadStats.LoadReportingService_StreamLoadStatsServer) error {+	for {+		req, err := stream.Recv()+		// input stream ended or errored out+		if err != nil {+			return err+		}++		clusterName := req.GetNode().GetCluster()+		nodeId := req.GetNode().GetId()

golint: rename to nodeID

kathan24

comment created time in 3 months

Pull request review commentenvoyproxy/envoy

Lrs example

+package server++import (+	core "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"

run goimports to cleanup ordering

kathan24

comment created time in 3 months

Pull request review commentenvoyproxy/envoy

Lrs example

+package server++import (+	core "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"+	gcpLoadStats "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v2"+	"github.com/golang/protobuf/ptypes/duration"+	"google.golang.org/grpc"+	"log"+	"sync"+)++// Server handling Load Stats communication+type Server interface {+	gcpLoadStats.LoadReportingServiceServer+	SendResponse(cluster string, upstreamCluster []string, frequency int64)+}++func NewServer() Server {+	return &server{lrsCache: make(map[string]map[string]NodeMetadata)}+}++type server struct {+	// protects lrsCache+	mu sync.RWMutex

sync.RWMutex adds RLock and RUnlock for more granular reader/writer locking. However, only Lock/Unlock are ever used. Consider changing this to the simpler sync.Mutex

kathan24

comment created time in 3 months

Pull request review commentenvoyproxy/envoy

Lrs example

+package server++import (+	core "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"+	gcpLoadStats "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v2"+	"github.com/golang/protobuf/ptypes/duration"+	"google.golang.org/grpc"+	"log"+	"sync"+)++// Server handling Load Stats communication+type Server interface {+	gcpLoadStats.LoadReportingServiceServer+	SendResponse(cluster string, upstreamCluster []string, frequency int64)+}++func NewServer() Server {+	return &server{lrsCache: make(map[string]map[string]NodeMetadata)}+}++type server struct {+	// protects lrsCache+	mu sync.RWMutex++	// This cache stores stream objects (and Node data) for every node (within a cluster) upon connection+	lrsCache map[string]map[string]NodeMetadata+}++// Struct to hold stream object and node details+type NodeMetadata struct {+	stream stream+	node   *core.Node+}++type stream interface {+	grpc.ServerStream++	Send(*gcpLoadStats.LoadStatsResponse) error+	Recv() (*gcpLoadStats.LoadStatsRequest, error)+}++// Handles incoming stream connections and LoadStatsRequests+func (s *server) StreamLoadStats(stream gcpLoadStats.LoadReportingService_StreamLoadStatsServer) error {+	for {+		req, err := stream.Recv()+		// input stream ended or errored out+		if err != nil {+			return err+		}++		clusterName := req.GetNode().GetCluster()+		nodeId := req.GetNode().GetId()++		s.mu.Lock()+		// Check whether any Node from Cluster has already connected or not.+		// If yes, Cluster should be present in the cache+		// If not, add Cluster <-> Node <-> Stream object in cache+		if _, exist := s.lrsCache[clusterName]; !exist {+			// Add all Nodes and its stream objects into the cache+			log.Printf("Adding new cluster to cache `%s` with node `%s`", clusterName, nodeId)+			s.lrsCache[clusterName] = make(map[string]NodeMetadata)+			s.lrsCache[clusterName][nodeId] = NodeMetadata{+				node:   req.GetNode(),+				stream: stream,+			}+		} else if _, exist := s.lrsCache[clusterName][nodeId]; !exist {+			// Add remaining Nodes of a Cluster and its stream objects into the cache+			log.Printf("Adding new node `%s` to existing cluster `%s`", nodeId, clusterName)+			s.lrsCache[clusterName][nodeId] = NodeMetadata{+				node:   req.GetNode(),+				stream: stream,+			}

early return here as well

kathan24

comment created time in 3 months

Pull request review commentenvoyproxy/envoy

Lrs example

+package server++import (+	core "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"+	gcpLoadStats "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v2"+	"github.com/golang/protobuf/ptypes/duration"+	"google.golang.org/grpc"+	"log"+	"sync"+)++// Server handling Load Stats communication+type Server interface {+	gcpLoadStats.LoadReportingServiceServer+	SendResponse(cluster string, upstreamCluster []string, frequency int64)+}++func NewServer() Server {+	return &server{lrsCache: make(map[string]map[string]NodeMetadata)}+}++type server struct {+	// protects lrsCache+	mu sync.RWMutex++	// This cache stores stream objects (and Node data) for every node (within a cluster) upon connection+	lrsCache map[string]map[string]NodeMetadata+}++// Struct to hold stream object and node details+type NodeMetadata struct {+	stream stream+	node   *core.Node+}++type stream interface {+	grpc.ServerStream++	Send(*gcpLoadStats.LoadStatsResponse) error+	Recv() (*gcpLoadStats.LoadStatsRequest, error)+}++// Handles incoming stream connections and LoadStatsRequests+func (s *server) StreamLoadStats(stream gcpLoadStats.LoadReportingService_StreamLoadStatsServer) error {+	for {+		req, err := stream.Recv()+		// input stream ended or errored out+		if err != nil {+			return err+		}++		clusterName := req.GetNode().GetCluster()+		nodeId := req.GetNode().GetId()++		s.mu.Lock()+		// Check whether any Node from Cluster has already connected or not.+		// If yes, Cluster should be present in the cache+		// If not, add Cluster <-> Node <-> Stream object in cache+		if _, exist := s.lrsCache[clusterName]; !exist {+			// Add all Nodes and its stream objects into the cache+			log.Printf("Adding new cluster to cache `%s` with node `%s`", clusterName, nodeId)+			s.lrsCache[clusterName] = make(map[string]NodeMetadata)+			s.lrsCache[clusterName][nodeId] = NodeMetadata{+				node:   req.GetNode(),+				stream: stream,+			}

early return to reduce branching complexity

kathan24

comment created time in 3 months

Pull request review commentenvoyproxy/envoy

Lrs example

+package server++import (+	core "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"+	gcpLoadStats "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v2"+	"github.com/golang/protobuf/ptypes/duration"+	"google.golang.org/grpc"+	"log"+	"sync"+)++// Server handling Load Stats communication+type Server interface {+	gcpLoadStats.LoadReportingServiceServer+	SendResponse(cluster string, upstreamCluster []string, frequency int64)+}++func NewServer() Server {+	return &server{lrsCache: make(map[string]map[string]NodeMetadata)}+}++type server struct {+	// protects lrsCache+	mu sync.RWMutex++	// This cache stores stream objects (and Node data) for every node (within a cluster) upon connection+	lrsCache map[string]map[string]NodeMetadata+}++// Struct to hold stream object and node details+type NodeMetadata struct {+	stream stream+	node   *core.Node+}++type stream interface {+	grpc.ServerStream++	Send(*gcpLoadStats.LoadStatsResponse) error+	Recv() (*gcpLoadStats.LoadStatsRequest, error)+}++// Handles incoming stream connections and LoadStatsRequests+func (s *server) StreamLoadStats(stream gcpLoadStats.LoadReportingService_StreamLoadStatsServer) error {+	for {+		req, err := stream.Recv()+		// input stream ended or errored out+		if err != nil {+			return err+		}++		clusterName := req.GetNode().GetCluster()+		nodeId := req.GetNode().GetId()++		s.mu.Lock()+		// Check whether any Node from Cluster has already connected or not.+		// If yes, Cluster should be present in the cache+		// If not, add Cluster <-> Node <-> Stream object in cache+		if _, exist := s.lrsCache[clusterName]; !exist {+			// Add all Nodes and its stream objects into the cache+			log.Printf("Adding new cluster to cache `%s` with node `%s`", clusterName, nodeId)+			s.lrsCache[clusterName] = make(map[string]NodeMetadata)+			s.lrsCache[clusterName][nodeId] = NodeMetadata{+				node:   req.GetNode(),+				stream: stream,+			}+		} else if _, exist := s.lrsCache[clusterName][nodeId]; !exist {+			// Add remaining Nodes of a Cluster and its stream objects into the cache+			log.Printf("Adding new node `%s` to existing cluster `%s`", nodeId, clusterName)+			s.lrsCache[clusterName][nodeId] = NodeMetadata{+				node:   req.GetNode(),+				stream: stream,+			}+		} else {+			// After Load Report is enabled, log the Load Report stats received+			for i := 0; i < len(req.ClusterStats); i++ {+				if len(req.ClusterStats[i].UpstreamLocalityStats) > 0 {+					log.Printf("Got stats from cluster `%s` node `%s` - %s", req.Node.Cluster, req.Node.Id, req.GetClusterStats()[i])+				}+			}+		}+		s.mu.Unlock()

move to the top of function with a defer

kathan24

comment created time in 3 months

more