routes in loadbalance pool no longer listed in ls-route and its API, the loadbalancer is listed instead. improved context handling and grateful shutdown

This commit is contained in:
yusing 2024-10-14 09:28:54 +08:00
parent d3b8cb8cba
commit 99207ae606
14 changed files with 278 additions and 166 deletions

View file

@ -1,16 +1,12 @@
package main package main
import ( import (
"context"
"encoding/json" "encoding/json"
"io" "io"
"log" "log"
"net/http" "net/http"
"os" "os"
"os/signal" "os/signal"
"reflect"
"runtime"
"strings"
"syscall" "syscall"
"time" "time"
@ -20,13 +16,10 @@ import (
"github.com/yusing/go-proxy/internal/api/v1/query" "github.com/yusing/go-proxy/internal/api/v1/query"
"github.com/yusing/go-proxy/internal/common" "github.com/yusing/go-proxy/internal/common"
"github.com/yusing/go-proxy/internal/config" "github.com/yusing/go-proxy/internal/config"
"github.com/yusing/go-proxy/internal/docker"
"github.com/yusing/go-proxy/internal/docker/idlewatcher"
E "github.com/yusing/go-proxy/internal/error" E "github.com/yusing/go-proxy/internal/error"
"github.com/yusing/go-proxy/internal/net/http/middleware" "github.com/yusing/go-proxy/internal/net/http/middleware"
R "github.com/yusing/go-proxy/internal/route" R "github.com/yusing/go-proxy/internal/route"
"github.com/yusing/go-proxy/internal/server" "github.com/yusing/go-proxy/internal/server"
F "github.com/yusing/go-proxy/internal/utils/functional"
"github.com/yusing/go-proxy/pkg" "github.com/yusing/go-proxy/pkg"
) )
@ -39,7 +32,6 @@ func main() {
} }
l := logrus.WithField("module", "main") l := logrus.WithField("module", "main")
onShutdown := F.NewSlice[func()]()
if common.IsDebug { if common.IsDebug {
logrus.SetLevel(logrus.DebugLevel) logrus.SetLevel(logrus.DebugLevel)
@ -127,9 +119,6 @@ func main() {
cfg.StartProxyProviders() cfg.StartProxyProviders()
cfg.WatchChanges() cfg.WatchChanges()
onShutdown.Add(docker.CloseAllClients)
onShutdown.Add(cfg.Dispose)
sig := make(chan os.Signal, 1) sig := make(chan os.Signal, 1)
signal.Notify(sig, syscall.SIGINT) signal.Notify(sig, syscall.SIGINT)
signal.Notify(sig, syscall.SIGTERM) signal.Notify(sig, syscall.SIGTERM)
@ -137,9 +126,7 @@ func main() {
autocert := cfg.GetAutoCertProvider() autocert := cfg.GetAutoCertProvider()
if autocert != nil { if autocert != nil {
ctx, cancel := context.WithCancel(context.Background()) if err := autocert.Setup(); err != nil {
onShutdown.Add(cancel)
if err := autocert.Setup(ctx); err != nil {
l.Fatal(err) l.Fatal(err)
} }
} else { } else {
@ -164,55 +151,24 @@ func main() {
proxyServer.Start() proxyServer.Start()
apiServer.Start() apiServer.Start()
onShutdown.Add(proxyServer.Stop)
onShutdown.Add(apiServer.Stop)
go idlewatcher.Start()
onShutdown.Add(idlewatcher.Stop)
// wait for signal // wait for signal
<-sig <-sig
// grafully shutdown // grafully shutdown
logrus.Info("shutting down") logrus.Info("shutting down")
done := make(chan struct{}, 1) common.CancelGlobalContext()
currentIdx := 0 common.GlobalContextWait(time.Second * time.Duration(cfg.Value().TimeoutShutdown))
go func() {
onShutdown.ForEach(func(f func()) {
l.Debugf("waiting for %s to complete...", funcName(f))
f()
currentIdx++
l.Debugf("%s done", funcName(f))
})
close(done)
}()
timeout := time.After(time.Duration(cfg.Value().TimeoutShutdown) * time.Second)
select {
case <-done:
logrus.Info("shutdown complete")
case <-timeout:
logrus.Info("timeout waiting for shutdown")
for i := currentIdx; i < onShutdown.Size(); i++ {
l.Warnf("%s() is still running", funcName(onShutdown.Get(i)))
}
}
} }
func prepareDirectory(dir string) { func prepareDirectory(dir string) {
if _, err := os.Stat(dir); os.IsNotExist(err) { if _, err := os.Stat(dir); os.IsNotExist(err) {
if err = os.MkdirAll(dir, 0755); err != nil { if err = os.MkdirAll(dir, 0o755); err != nil {
logrus.Fatalf("failed to create directory %s: %v", dir, err) logrus.Fatalf("failed to create directory %s: %v", dir, err)
} }
} }
} }
func funcName(f func()) string {
parts := strings.Split(runtime.FuncForPC(reflect.ValueOf(f).Pointer()).Name(), "/go-proxy/")
return parts[len(parts)-1]
}
func printJSON(obj any) { func printJSON(obj any) {
j, err := E.Check(json.MarshalIndent(obj, "", " ")) j, err := E.Check(json.MarshalIndent(obj, "", " "))
if err != nil { if err != nil {

View file

@ -1,7 +1,6 @@
package autocert package autocert
import ( import (
"context"
"crypto/tls" "crypto/tls"
"crypto/x509" "crypto/x509"
"os" "os"
@ -14,6 +13,7 @@ import (
"github.com/go-acme/lego/v4/challenge" "github.com/go-acme/lego/v4/challenge"
"github.com/go-acme/lego/v4/lego" "github.com/go-acme/lego/v4/lego"
"github.com/go-acme/lego/v4/registration" "github.com/go-acme/lego/v4/registration"
"github.com/yusing/go-proxy/internal/common"
E "github.com/yusing/go-proxy/internal/error" E "github.com/yusing/go-proxy/internal/error"
"github.com/yusing/go-proxy/internal/types" "github.com/yusing/go-proxy/internal/types"
U "github.com/yusing/go-proxy/internal/utils" U "github.com/yusing/go-proxy/internal/utils"
@ -136,20 +136,20 @@ func (p *Provider) ShouldRenewOn() time.Time {
panic("no certificate available") panic("no certificate available")
} }
func (p *Provider) ScheduleRenewal(ctx context.Context) { func (p *Provider) ScheduleRenewal() {
if p.GetName() == ProviderLocal { if p.GetName() == ProviderLocal {
return return
} }
logger.Debug("started renewal scheduler")
defer logger.Debug("renewal scheduler stopped")
ticker := time.NewTicker(5 * time.Second) ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop() defer ticker.Stop()
task := common.NewTask("cert renew scheduler")
defer task.Finished()
for { for {
select { select {
case <-ctx.Done(): case <-task.Context().Done():
return return
case <-ticker.C: // check every 5 seconds case <-ticker.C: // check every 5 seconds
if err := p.renewIfNeeded(); err.HasError() { if err := p.renewIfNeeded(); err.HasError() {

View file

@ -1,13 +1,12 @@
package autocert package autocert
import ( import (
"context"
"os" "os"
E "github.com/yusing/go-proxy/internal/error" E "github.com/yusing/go-proxy/internal/error"
) )
func (p *Provider) Setup(ctx context.Context) (err E.NestedError) { func (p *Provider) Setup() (err E.NestedError) {
if err = p.LoadCert(); err != nil { if err = p.LoadCert(); err != nil {
if !err.Is(os.ErrNotExist) { // ignore if cert doesn't exist if !err.Is(os.ErrNotExist) { // ignore if cert doesn't exist
return err return err
@ -18,7 +17,7 @@ func (p *Provider) Setup(ctx context.Context) (err E.NestedError) {
} }
} }
go p.ScheduleRenewal(ctx) go p.ScheduleRenewal()
for _, expiry := range p.GetExpiries() { for _, expiry := range p.GetExpiries() {
logger.Infof("certificate expire on %s", expiry) logger.Infof("certificate expire on %s", expiry)

158
internal/common/task.go Normal file
View file

@ -0,0 +1,158 @@
package common
import (
"context"
"fmt"
"strings"
"sync"
"time"
"github.com/puzpuzpuz/xsync/v3"
"github.com/sirupsen/logrus"
)
var (
globalCtx, globalCtxCancel = context.WithCancel(context.Background())
globalCtxWg sync.WaitGroup
globalCtxTraceMap = xsync.NewMapOf[*task, struct{}]()
)
type (
Task interface {
Name() string
Context() context.Context
Subtask(usageFmt string, args ...interface{}) Task
SubtaskWithCancel(usageFmt string, args ...interface{}) (Task, context.CancelFunc)
Finished()
}
task struct {
ctx context.Context
subtasks []*task
name string
finished bool
mu sync.Mutex
}
)
func (t *task) Name() string {
return t.name
}
func (t *task) Context() context.Context {
return t.ctx
}
func (t *task) Finished() {
t.mu.Lock()
defer t.mu.Unlock()
if t.finished {
return
}
t.finished = true
if _, ok := globalCtxTraceMap.Load(t); ok {
globalCtxWg.Done()
globalCtxTraceMap.Delete(t)
}
}
func (t *task) Subtask(format string, args ...interface{}) Task {
if len(args) > 0 {
format = fmt.Sprintf(format, args...)
}
t.mu.Lock()
defer t.mu.Unlock()
sub := newSubTask(t.ctx, format)
t.subtasks = append(t.subtasks, sub)
return sub
}
func (t *task) SubtaskWithCancel(format string, args ...interface{}) (Task, context.CancelFunc) {
if len(args) > 0 {
format = fmt.Sprintf(format, args...)
}
t.mu.Lock()
defer t.mu.Unlock()
ctx, cancel := context.WithCancel(t.ctx)
sub := newSubTask(ctx, format)
t.subtasks = append(t.subtasks, sub)
return sub, cancel
}
func (t *task) Tree(prefix ...string) string {
var sb strings.Builder
var pre string
if len(prefix) > 0 {
pre = prefix[0]
}
sb.WriteString(pre)
sb.WriteString(t.Name() + "\n")
for _, sub := range t.subtasks {
if sub.finished {
continue
}
sb.WriteString(sub.Tree(pre + " "))
}
return sb.String()
}
func newSubTask(ctx context.Context, name string) *task {
t := &task{
ctx: ctx,
name: name,
}
globalCtxTraceMap.Store(t, struct{}{})
globalCtxWg.Add(1)
return t
}
func NewTask(format string, args ...interface{}) Task {
if len(args) > 0 {
format = fmt.Sprintf(format, args...)
}
return newSubTask(globalCtx, format)
}
func NewTaskWithCancel(format string, args ...interface{}) (Task, context.CancelFunc) {
subCtx, cancel := context.WithCancel(globalCtx)
if len(args) > 0 {
format = fmt.Sprintf(format, args...)
}
return newSubTask(subCtx, format), cancel
}
func GlobalTask(format string, args ...interface{}) Task {
if len(args) > 0 {
format = fmt.Sprintf(format, args...)
}
return &task{
ctx: globalCtx,
name: format,
}
}
func CancelGlobalContext() {
globalCtxCancel()
}
func GlobalContextWait(timeout time.Duration) {
done := make(chan struct{})
after := time.After(timeout)
go func() {
globalCtxWg.Wait()
close(done)
}()
for {
select {
case <-done:
return
case <-after:
logrus.Println("Timeout waiting for these tasks to finish:")
globalCtxTraceMap.Range(func(t *task, _ struct{}) bool {
logrus.Println(t.Tree())
return true
})
return
}
}
}

View file

@ -1,7 +1,6 @@
package config package config
import ( import (
"context"
"os" "os"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
@ -26,8 +25,7 @@ type Config struct {
l logrus.FieldLogger l logrus.FieldLogger
watcher W.Watcher watcher W.Watcher
watcherCtx context.Context
watcherCancel context.CancelFunc
reloadReq chan struct{} reloadReq chan struct{}
} }
@ -76,14 +74,6 @@ func (cfg *Config) GetAutoCertProvider() *autocert.Provider {
return cfg.autocertProvider return cfg.autocertProvider
} }
func (cfg *Config) Dispose() {
if cfg.watcherCancel != nil {
cfg.watcherCancel()
cfg.l.Debug("stopped watcher")
}
cfg.stopProviders()
}
func (cfg *Config) Reload() (err E.NestedError) { func (cfg *Config) Reload() (err E.NestedError) {
cfg.stopProviders() cfg.stopProviders()
err = cfg.load() err = cfg.load()
@ -96,11 +86,13 @@ func (cfg *Config) StartProxyProviders() {
} }
func (cfg *Config) WatchChanges() { func (cfg *Config) WatchChanges() {
cfg.watcherCtx, cfg.watcherCancel = context.WithCancel(context.Background()) task := common.NewTask("Config watcher")
defer task.Finished()
go func() { go func() {
for { for {
select { select {
case <-cfg.watcherCtx.Done(): case <-task.Context().Done():
return return
case <-cfg.reloadReq: case <-cfg.reloadReq:
if err := cfg.Reload(); err != nil { if err := cfg.Reload(); err != nil {
@ -110,10 +102,10 @@ func (cfg *Config) WatchChanges() {
} }
}() }()
go func() { go func() {
eventCh, errCh := cfg.watcher.Events(cfg.watcherCtx) eventCh, errCh := cfg.watcher.Events(task.Context())
for { for {
select { select {
case <-cfg.watcherCtx.Done(): case <-task.Context().Done():
return return
case event := <-eventCh: case event := <-eventCh:
if event.Action == events.ActionFileDeleted || event.Action == events.ActionFileRenamed { if event.Action == events.ActionFileDeleted || event.Action == events.ActionFileRenamed {

View file

@ -97,23 +97,31 @@ func (cfg *Config) HomepageConfig() homepage.Config {
return hpCfg return hpCfg
} }
func (cfg *Config) RoutesByAlias() map[string]U.SerializedObject { func (cfg *Config) RoutesByAlias(typeFilter ...R.RouteType) map[string]U.SerializedObject {
routes := make(map[string]U.SerializedObject) routes := make(map[string]U.SerializedObject)
cfg.forEachRoute(func(alias string, r *R.Route, p *PR.Provider) { if len(typeFilter) == 0 {
if !r.Started() { typeFilter = []R.RouteType{R.RouteTypeReverseProxy, R.RouteTypeStream}
return
} }
for _, t := range typeFilter {
switch t {
case R.RouteTypeReverseProxy:
R.GetReverseProxies().RangeAll(func(alias string, r *R.HTTPRoute) {
obj, err := U.Serialize(r) obj, err := U.Serialize(r)
if err != nil { if err != nil {
cfg.l.Error(err) panic(err) // should not happen
return
} }
obj["provider"] = p.GetName()
obj["type"] = string(r.Type)
obj["started"] = r.Started()
obj["raw"] = r.Entry
routes[alias] = obj routes[alias] = obj
}) })
case R.RouteTypeStream:
R.GetStreamProxies().RangeAll(func(alias string, r *R.StreamRoute) {
obj, err := U.Serialize(r)
if err != nil {
panic(err) // should not happen
}
routes[alias] = obj
})
}
}
return routes return routes
} }

View file

@ -1,9 +0,0 @@
package route
import (
"time"
)
const (
streamStopListenTimeout = 1 * time.Second
)

View file

@ -1,7 +1,6 @@
package route package route
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"net/http" "net/http"
@ -10,6 +9,7 @@ import (
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/yusing/go-proxy/internal/api/v1/errorpage" "github.com/yusing/go-proxy/internal/api/v1/errorpage"
"github.com/yusing/go-proxy/internal/common"
"github.com/yusing/go-proxy/internal/docker/idlewatcher" "github.com/yusing/go-proxy/internal/docker/idlewatcher"
E "github.com/yusing/go-proxy/internal/error" E "github.com/yusing/go-proxy/internal/error"
gphttp "github.com/yusing/go-proxy/internal/net/http" gphttp "github.com/yusing/go-proxy/internal/net/http"
@ -52,6 +52,10 @@ func (rp ReverseProxyHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
rp.ReverseProxy.ServeHTTP(w, r) rp.ReverseProxy.ServeHTTP(w, r)
} }
func GetReverseProxies() F.Map[string, *HTTPRoute] {
return httpRoutes
}
func SetFindMuxDomains(domains []string) { func SetFindMuxDomains(domains []string) {
if len(domains) == 0 { if len(domains) == 0 {
findMuxFunc = findMuxAnyDomain findMuxFunc = findMuxAnyDomain
@ -91,8 +95,7 @@ func NewHTTPRoute(entry *P.ReverseProxyEntry) (*HTTPRoute, E.NestedError) {
} }
if !entry.HealthCheck.Disabled { if !entry.HealthCheck.Disabled {
r.healthMon = health.NewHTTPHealthMonitor( r.healthMon = health.NewHTTPHealthMonitor(
context.Background(), common.GlobalTask("Reverse proxy "+r.String()),
string(entry.Alias),
entry.URL, entry.URL,
entry.HealthCheck, entry.HealthCheck,
) )

View file

@ -5,14 +5,14 @@ import (
"errors" "errors"
"fmt" "fmt"
"sync" "sync"
"sync/atomic"
"time"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/yusing/go-proxy/internal/common"
E "github.com/yusing/go-proxy/internal/error" E "github.com/yusing/go-proxy/internal/error"
url "github.com/yusing/go-proxy/internal/net/types" url "github.com/yusing/go-proxy/internal/net/types"
P "github.com/yusing/go-proxy/internal/proxy" P "github.com/yusing/go-proxy/internal/proxy"
PT "github.com/yusing/go-proxy/internal/proxy/fields" PT "github.com/yusing/go-proxy/internal/proxy/fields"
F "github.com/yusing/go-proxy/internal/utils/functional"
"github.com/yusing/go-proxy/internal/watcher/health" "github.com/yusing/go-proxy/internal/watcher/health"
) )
@ -20,16 +20,18 @@ type StreamRoute struct {
*P.StreamEntry *P.StreamEntry
StreamImpl `json:"-"` StreamImpl `json:"-"`
HealthMon health.HealthMonitor `json:"health"`
url url.URL url url.URL
healthMon health.HealthMonitor
wg sync.WaitGroup wg sync.WaitGroup
ctx context.Context task common.Task
cancel context.CancelFunc cancel context.CancelFunc
connCh chan any connCh chan any
started atomic.Bool
l logrus.FieldLogger l logrus.FieldLogger
mu sync.Mutex
} }
type StreamImpl interface { type StreamImpl interface {
@ -40,6 +42,12 @@ type StreamImpl interface {
String() string String() string
} }
var streamRoutes = F.NewMapOf[string, *StreamRoute]()
func GetStreamProxies() F.Map[string, *StreamRoute] {
return streamRoutes
}
func NewStreamRoute(entry *P.StreamEntry) (*StreamRoute, E.NestedError) { func NewStreamRoute(entry *P.StreamEntry) (*StreamRoute, E.NestedError) {
// TODO: support non-coherent scheme // TODO: support non-coherent scheme
if !entry.Scheme.IsCoherent() { if !entry.Scheme.IsCoherent() {
@ -60,9 +68,6 @@ func NewStreamRoute(entry *P.StreamEntry) (*StreamRoute, E.NestedError) {
} else { } else {
base.StreamImpl = NewUDPRoute(base) base.StreamImpl = NewUDPRoute(base)
} }
if !entry.Healthcheck.Disabled {
base.healthMon = health.NewRawHealthMonitor(base.ctx, string(entry.Alias), url, entry.Healthcheck)
}
base.l = logrus.WithField("route", base.StreamImpl) base.l = logrus.WithField("route", base.StreamImpl)
return base, nil return base, nil
} }
@ -76,72 +81,71 @@ func (r *StreamRoute) URL() url.URL {
} }
func (r *StreamRoute) Start() E.NestedError { func (r *StreamRoute) Start() E.NestedError {
if r.Port.ProxyPort == PT.NoPort || r.started.Load() { r.mu.Lock()
defer r.mu.Unlock()
if r.Port.ProxyPort == PT.NoPort || r.task != nil {
return nil return nil
} }
r.ctx, r.cancel = context.WithCancel(context.Background()) r.task, r.cancel = common.NewTaskWithCancel(r.String())
r.wg.Wait() r.wg.Wait()
if err := r.Setup(); err != nil { if err := r.Setup(); err != nil {
return E.FailWith("setup", err) return E.FailWith("setup", err)
} }
r.l.Infof("listening on port %d", r.Port.ListeningPort) r.l.Infof("listening on port %d", r.Port.ListeningPort)
r.started.Store(true)
r.wg.Add(2) r.wg.Add(2)
go r.grAcceptConnections() go r.acceptConnections()
go r.grHandleConnections() go r.handleConnections()
if r.healthMon != nil { if !r.Healthcheck.Disabled {
r.healthMon.Start() r.HealthMon = health.NewRawHealthMonitor(r.task, r.URL(), r.Healthcheck)
r.HealthMon.Start()
} }
streamRoutes.Store(string(r.Alias), r)
return nil return nil
} }
func (r *StreamRoute) Stop() E.NestedError { func (r *StreamRoute) Stop() E.NestedError {
if !r.started.Load() { r.mu.Lock()
defer r.mu.Unlock()
if r.task == nil {
return nil return nil
} }
r.started.Store(false)
if r.healthMon != nil { streamRoutes.Delete(string(r.Alias))
r.healthMon.Stop()
if r.HealthMon != nil {
r.HealthMon.Stop()
r.HealthMon = nil
} }
r.cancel() r.cancel()
r.CloseListeners() r.CloseListeners()
done := make(chan struct{}, 1)
go func() {
r.wg.Wait() r.wg.Wait()
close(done) r.task.Finished()
}()
r.task, r.cancel = nil, nil
timeout := time.After(streamStopListenTimeout)
for {
select {
case <-done:
r.l.Debug("stopped listening")
return nil return nil
case <-timeout:
return E.FailedWhy("stop", "timed out")
}
}
} }
func (r *StreamRoute) Started() bool { func (r *StreamRoute) Started() bool {
return r.started.Load() return r.task != nil
} }
func (r *StreamRoute) grAcceptConnections() { func (r *StreamRoute) acceptConnections() {
defer r.wg.Done() defer r.wg.Done()
for { for {
select { select {
case <-r.ctx.Done(): case <-r.task.Context().Done():
return return
default: default:
conn, err := r.Accept() conn, err := r.Accept()
if err != nil { if err != nil {
select { select {
case <-r.ctx.Done(): case <-r.task.Context().Done():
return return
default: default:
r.l.Error(err) r.l.Error(err)
@ -153,12 +157,12 @@ func (r *StreamRoute) grAcceptConnections() {
} }
} }
func (r *StreamRoute) grHandleConnections() { func (r *StreamRoute) handleConnections() {
defer r.wg.Done() defer r.wg.Done()
for { for {
select { select {
case <-r.ctx.Done(): case <-r.task.Context().Done():
return return
case conn := <-r.connCh: case conn := <-r.connCh:
go func() { go func() {

View file

@ -51,7 +51,7 @@ func (route *TCPRoute) Handle(c any) error {
defer clientConn.Close() defer clientConn.Close()
ctx, cancel := context.WithTimeout(route.ctx, tcpDialTimeout) ctx, cancel := context.WithTimeout(route.task.Context(), tcpDialTimeout)
defer cancel() defer cancel()
serverAddr := fmt.Sprintf("%s:%v", route.Host, route.Port.ProxyPort) serverAddr := fmt.Sprintf("%s:%v", route.Host, route.Port.ProxyPort)
@ -64,7 +64,7 @@ func (route *TCPRoute) Handle(c any) error {
route.mu.Lock() route.mu.Lock()
pipe := U.NewBidirectionalPipe(route.ctx, clientConn, serverConn) pipe := U.NewBidirectionalPipe(route.task.Context(), clientConn, serverConn)
route.pipe = append(route.pipe, pipe) route.pipe = append(route.pipe, pipe)
route.mu.Unlock() route.mu.Unlock()

View file

@ -93,7 +93,7 @@ func (route *UDPRoute) Accept() (any, error) {
key, key,
srcConn, srcConn,
dstConn, dstConn,
U.NewBidirectionalPipe(route.ctx, sourceRWCloser{in, dstConn}, sourceRWCloser{in, srcConn}), U.NewBidirectionalPipe(route.task.Context(), sourceRWCloser{in, dstConn}, sourceRWCloser{in, srcConn}),
} }
route.connMap.Store(key, conn) route.connMap.Store(key, conn)
} }

View file

@ -1,11 +1,11 @@
package health package health
import ( import (
"context"
"crypto/tls" "crypto/tls"
"errors" "errors"
"net/http" "net/http"
"github.com/yusing/go-proxy/internal/common"
"github.com/yusing/go-proxy/internal/net/types" "github.com/yusing/go-proxy/internal/net/types"
) )
@ -15,9 +15,9 @@ type HTTPHealthMonitor struct {
pinger *http.Client pinger *http.Client
} }
func NewHTTPHealthMonitor(ctx context.Context, name string, url types.URL, config HealthCheckConfig) HealthMonitor { func NewHTTPHealthMonitor(task common.Task, url types.URL, config HealthCheckConfig) HealthMonitor {
mon := new(HTTPHealthMonitor) mon := new(HTTPHealthMonitor)
mon.monitor = newMonitor(ctx, name, url, &config, mon.checkHealth) mon.monitor = newMonitor(task, url, &config, mon.checkHealth)
mon.pinger = &http.Client{Timeout: config.Timeout} mon.pinger = &http.Client{Timeout: config.Timeout}
if config.UseGet { if config.UseGet {
mon.method = http.MethodGet mon.method = http.MethodGet
@ -29,7 +29,7 @@ func NewHTTPHealthMonitor(ctx context.Context, name string, url types.URL, confi
func (mon *HTTPHealthMonitor) checkHealth() (healthy bool, detail string, err error) { func (mon *HTTPHealthMonitor) checkHealth() (healthy bool, detail string, err error) {
req, reqErr := http.NewRequestWithContext( req, reqErr := http.NewRequestWithContext(
mon.ctx, mon.task.Context(),
mon.method, mon.method,
mon.URL.String(), mon.URL.String(),
nil, nil,

View file

@ -7,6 +7,7 @@ import (
"sync/atomic" "sync/atomic"
"time" "time"
"github.com/yusing/go-proxy/internal/common"
"github.com/yusing/go-proxy/internal/net/types" "github.com/yusing/go-proxy/internal/net/types"
F "github.com/yusing/go-proxy/internal/utils/functional" F "github.com/yusing/go-proxy/internal/utils/functional"
) )
@ -27,7 +28,7 @@ type (
healthy atomic.Bool healthy atomic.Bool
checkHealth HealthCheckFunc checkHealth HealthCheckFunc
ctx context.Context task common.Task
cancel context.CancelFunc cancel context.CancelFunc
done chan struct{} done chan struct{}
@ -37,22 +38,18 @@ type (
var monMap = F.NewMapOf[string, HealthMonitor]() var monMap = F.NewMapOf[string, HealthMonitor]()
func newMonitor(parentCtx context.Context, name string, url types.URL, config *HealthCheckConfig, healthCheckFunc HealthCheckFunc) *monitor { func newMonitor(task common.Task, url types.URL, config *HealthCheckConfig, healthCheckFunc HealthCheckFunc) *monitor {
if parentCtx == nil { task, cancel := task.SubtaskWithCancel("Health monitor for %s", task.Name())
parentCtx = context.Background()
}
ctx, cancel := context.WithCancel(parentCtx)
mon := &monitor{ mon := &monitor{
Name: name, Name: task.Name(),
URL: url.JoinPath(config.Path), URL: url.JoinPath(config.Path),
Interval: config.Interval, Interval: config.Interval,
checkHealth: healthCheckFunc, checkHealth: healthCheckFunc,
ctx: ctx, task: task,
cancel: cancel, cancel: cancel,
done: make(chan struct{}), done: make(chan struct{}),
} }
mon.healthy.Store(true) mon.healthy.Store(true)
monMap.Store(name, mon)
return mon return mon
} }
@ -65,8 +62,12 @@ func IsHealthy(name string) (healthy bool, ok bool) {
} }
func (mon *monitor) Start() { func (mon *monitor) Start() {
defer monMap.Store(mon.Name, mon)
defer logger.Debugf("%s health monitor started", mon)
go func() { go func() {
defer close(mon.done) defer close(mon.done)
defer mon.task.Finished()
ok := mon.checkUpdateHealth() ok := mon.checkUpdateHealth()
if !ok { if !ok {
@ -78,7 +79,7 @@ func (mon *monitor) Start() {
for { for {
select { select {
case <-mon.ctx.Done(): case <-mon.task.Context().Done():
return return
case <-ticker.C: case <-ticker.C:
ok = mon.checkUpdateHealth() ok = mon.checkUpdateHealth()
@ -92,7 +93,7 @@ func (mon *monitor) Start() {
} }
func (mon *monitor) Stop() { func (mon *monitor) Stop() {
defer logger.Debugf("health monitor %q stopped", mon) defer logger.Debugf("%s health monitor stopped", mon)
monMap.Delete(mon.Name) monMap.Delete(mon.Name)

View file

@ -1,9 +1,9 @@
package health package health
import ( import (
"context"
"net" "net"
"github.com/yusing/go-proxy/internal/common"
"github.com/yusing/go-proxy/internal/net/types" "github.com/yusing/go-proxy/internal/net/types"
) )
@ -14,9 +14,9 @@ type (
} }
) )
func NewRawHealthMonitor(ctx context.Context, name string, url types.URL, config HealthCheckConfig) HealthMonitor { func NewRawHealthMonitor(task common.Task, url types.URL, config HealthCheckConfig) HealthMonitor {
mon := new(RawHealthMonitor) mon := new(RawHealthMonitor)
mon.monitor = newMonitor(ctx, name, url, &config, mon.checkAvail) mon.monitor = newMonitor(task, url, &config, mon.checkAvail)
mon.dialer = &net.Dialer{ mon.dialer = &net.Dialer{
Timeout: config.Timeout, Timeout: config.Timeout,
FallbackDelay: -1, FallbackDelay: -1,
@ -25,7 +25,7 @@ func NewRawHealthMonitor(ctx context.Context, name string, url types.URL, config
} }
func (mon *RawHealthMonitor) checkAvail() (avail bool, detail string, err error) { func (mon *RawHealthMonitor) checkAvail() (avail bool, detail string, err error) {
conn, dialErr := mon.dialer.DialContext(mon.ctx, mon.URL.Scheme, mon.URL.Host) conn, dialErr := mon.dialer.DialContext(mon.task.Context(), mon.URL.Scheme, mon.URL.Host)
if dialErr != nil { if dialErr != nil {
detail = dialErr.Error() detail = dialErr.Error()
/* trunk-ignore(golangci-lint/nilerr) */ /* trunk-ignore(golangci-lint/nilerr) */