mirror of
https://github.com/yusing/godoxy.git
synced 2025-07-21 20:04:03 +02:00
refactor: clean up code and fix race condition in idlewatcher
This commit is contained in:
parent
95fe294f7d
commit
a7da8ffb90
8 changed files with 264 additions and 196 deletions
52
internal/docker/idlewatcher/container.go
Normal file
52
internal/docker/idlewatcher/container.go
Normal file
|
@ -0,0 +1,52 @@
|
||||||
|
package idlewatcher
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
"github.com/docker/docker/api/types/container"
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
containerMeta struct {
|
||||||
|
ContainerID, ContainerName string
|
||||||
|
}
|
||||||
|
containerState struct {
|
||||||
|
running bool
|
||||||
|
ready bool
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func (w *Watcher) containerStop(ctx context.Context) error {
|
||||||
|
return w.client.ContainerStop(ctx, w.ContainerID, container.StopOptions{
|
||||||
|
Signal: string(w.StopSignal),
|
||||||
|
Timeout: &w.StopTimeout,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Watcher) containerPause(ctx context.Context) error {
|
||||||
|
return w.client.ContainerPause(ctx, w.ContainerID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Watcher) containerKill(ctx context.Context) error {
|
||||||
|
return w.client.ContainerKill(ctx, w.ContainerID, string(w.StopSignal))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Watcher) containerUnpause(ctx context.Context) error {
|
||||||
|
return w.client.ContainerUnpause(ctx, w.ContainerID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Watcher) containerStart(ctx context.Context) error {
|
||||||
|
return w.client.ContainerStart(ctx, w.ContainerID, container.StartOptions{})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Watcher) containerStatus() (string, error) {
|
||||||
|
ctx, cancel := context.WithTimeoutCause(w.task.Context(), dockerReqTimeout, errors.New("docker request timeout"))
|
||||||
|
defer cancel()
|
||||||
|
json, err := w.client.ContainerInspect(ctx, w.ContainerID)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return json.State.Status, nil
|
||||||
|
}
|
|
@ -6,7 +6,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"text/template"
|
"text/template"
|
||||||
|
|
||||||
"github.com/yusing/go-proxy/internal/common"
|
"github.com/yusing/go-proxy/internal/net/gphttp/httpheaders"
|
||||||
)
|
)
|
||||||
|
|
||||||
type templateData struct {
|
type templateData struct {
|
||||||
|
@ -23,11 +23,11 @@ func (w *Watcher) makeLoadingPageBody() []byte {
|
||||||
msg := w.ContainerName + " is starting..."
|
msg := w.ContainerName + " is starting..."
|
||||||
|
|
||||||
data := new(templateData)
|
data := new(templateData)
|
||||||
data.CheckRedirectHeader = common.HeaderCheckRedirect
|
data.CheckRedirectHeader = httpheaders.HeaderGoDoxyCheckRedirect
|
||||||
data.Title = w.ContainerName
|
data.Title = w.ContainerName
|
||||||
data.Message = strings.ReplaceAll(msg, " ", " ")
|
data.Message = strings.ReplaceAll(msg, " ", " ")
|
||||||
|
|
||||||
buf := bytes.NewBuffer(make([]byte, len(loadingPage)+len(data.Title)+len(data.Message)+len(common.HeaderCheckRedirect)))
|
buf := bytes.NewBuffer(make([]byte, len(loadingPage)+len(data.Title)+len(data.Message)+len(httpheaders.HeaderGoDoxyCheckRedirect)))
|
||||||
err := loadingPageTmpl.Execute(buf, data)
|
err := loadingPageTmpl.Execute(buf, data)
|
||||||
if err != nil { // should never happen in production
|
if err != nil { // should never happen in production
|
||||||
panic(err)
|
panic(err)
|
||||||
|
|
39
internal/docker/idlewatcher/state.go
Normal file
39
internal/docker/idlewatcher/state.go
Normal file
|
@ -0,0 +1,39 @@
|
||||||
|
package idlewatcher
|
||||||
|
|
||||||
|
func (w *Watcher) running() bool {
|
||||||
|
return w.state.Load().running
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Watcher) ready() bool {
|
||||||
|
return w.state.Load().ready
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Watcher) error() error {
|
||||||
|
return w.state.Load().err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Watcher) setReady() {
|
||||||
|
w.state.Store(&containerState{
|
||||||
|
running: true,
|
||||||
|
ready: true,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Watcher) setStarting() {
|
||||||
|
w.state.Store(&containerState{
|
||||||
|
running: true,
|
||||||
|
ready: false,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Watcher) setNapping() {
|
||||||
|
w.setError(nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Watcher) setError(err error) {
|
||||||
|
w.state.Store(&containerState{
|
||||||
|
running: false,
|
||||||
|
ready: false,
|
||||||
|
err: err,
|
||||||
|
})
|
||||||
|
}
|
|
@ -7,7 +7,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/yusing/go-proxy/internal/docker"
|
"github.com/yusing/go-proxy/internal/docker"
|
||||||
E "github.com/yusing/go-proxy/internal/error"
|
"github.com/yusing/go-proxy/internal/gperr"
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
|
@ -18,11 +18,6 @@ type (
|
||||||
StopMethod StopMethod `json:"stop_method,omitempty"`
|
StopMethod StopMethod `json:"stop_method,omitempty"`
|
||||||
StopSignal Signal `json:"stop_signal,omitempty"`
|
StopSignal Signal `json:"stop_signal,omitempty"`
|
||||||
StartEndpoint string `json:"start_endpoint,omitempty"` // Optional path that must be hit to start container
|
StartEndpoint string `json:"start_endpoint,omitempty"` // Optional path that must be hit to start container
|
||||||
|
|
||||||
DockerHost string `json:"docker_host,omitempty"`
|
|
||||||
ContainerName string `json:"container_name,omitempty"`
|
|
||||||
ContainerID string `json:"container_id,omitempty"`
|
|
||||||
ContainerRunning bool `json:"container_running,omitempty"`
|
|
||||||
}
|
}
|
||||||
StopMethod string
|
StopMethod string
|
||||||
Signal string
|
Signal string
|
||||||
|
@ -40,28 +35,19 @@ var validSignals = map[string]struct{}{
|
||||||
"INT": {}, "TERM": {}, "HUP": {}, "QUIT": {},
|
"INT": {}, "TERM": {}, "HUP": {}, "QUIT": {},
|
||||||
}
|
}
|
||||||
|
|
||||||
func ValidateConfig(cont *docker.Container) (*Config, E.Error) {
|
func ValidateConfig(cont *docker.Container) (*Config, gperr.Error) {
|
||||||
if cont == nil {
|
if cont == nil || cont.IdleTimeout == "" {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if cont.IdleTimeout == "" {
|
errs := gperr.NewBuilder("invalid idlewatcher config")
|
||||||
return &Config{
|
|
||||||
DockerHost: cont.DockerHost,
|
|
||||||
ContainerName: cont.ContainerName,
|
|
||||||
ContainerID: cont.ContainerID,
|
|
||||||
ContainerRunning: cont.Running,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
errs := E.NewBuilder("invalid idlewatcher config")
|
idleTimeout := gperr.Collect(errs, validateDurationPostitive, cont.IdleTimeout)
|
||||||
|
wakeTimeout := gperr.Collect(errs, validateDurationPostitive, cont.WakeTimeout)
|
||||||
idleTimeout := E.Collect(errs, validateDurationPostitive, cont.IdleTimeout)
|
stopTimeout := gperr.Collect(errs, validateDurationPostitive, cont.StopTimeout)
|
||||||
wakeTimeout := E.Collect(errs, validateDurationPostitive, cont.WakeTimeout)
|
stopMethod := gperr.Collect(errs, validateStopMethod, cont.StopMethod)
|
||||||
stopTimeout := E.Collect(errs, validateDurationPostitive, cont.StopTimeout)
|
signal := gperr.Collect(errs, validateSignal, cont.StopSignal)
|
||||||
stopMethod := E.Collect(errs, validateStopMethod, cont.StopMethod)
|
startEndpoint := gperr.Collect(errs, validateStartEndpoint, cont.StartEndpoint)
|
||||||
signal := E.Collect(errs, validateSignal, cont.StopSignal)
|
|
||||||
startEndpoint := E.Collect(errs, validateStartEndpoint, cont.StartEndpoint)
|
|
||||||
|
|
||||||
if errs.HasError() {
|
if errs.HasError() {
|
||||||
return nil, errs.Error()
|
return nil, errs.Error()
|
||||||
|
@ -74,11 +60,6 @@ func ValidateConfig(cont *docker.Container) (*Config, E.Error) {
|
||||||
StopMethod: stopMethod,
|
StopMethod: stopMethod,
|
||||||
StopSignal: signal,
|
StopSignal: signal,
|
||||||
StartEndpoint: startEndpoint,
|
StartEndpoint: startEndpoint,
|
||||||
|
|
||||||
DockerHost: cont.DockerHost,
|
|
||||||
ContainerName: cont.ContainerName,
|
|
||||||
ContainerID: cont.ContainerID,
|
|
||||||
ContainerRunning: cont.Running,
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,14 +1,12 @@
|
||||||
package idlewatcher
|
package idlewatcher
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/yusing/go-proxy/internal/common"
|
|
||||||
"github.com/yusing/go-proxy/internal/docker/idlewatcher/types"
|
"github.com/yusing/go-proxy/internal/docker/idlewatcher/types"
|
||||||
E "github.com/yusing/go-proxy/internal/error"
|
"github.com/yusing/go-proxy/internal/gperr"
|
||||||
"github.com/yusing/go-proxy/internal/metrics"
|
"github.com/yusing/go-proxy/internal/metrics"
|
||||||
"github.com/yusing/go-proxy/internal/net/http/reverseproxy"
|
"github.com/yusing/go-proxy/internal/net/gphttp/reverseproxy"
|
||||||
net "github.com/yusing/go-proxy/internal/net/types"
|
net "github.com/yusing/go-proxy/internal/net/types"
|
||||||
route "github.com/yusing/go-proxy/internal/route/types"
|
route "github.com/yusing/go-proxy/internal/route/types"
|
||||||
"github.com/yusing/go-proxy/internal/task"
|
"github.com/yusing/go-proxy/internal/task"
|
||||||
|
@ -26,8 +24,6 @@ type (
|
||||||
stream net.Stream
|
stream net.Stream
|
||||||
hc health.HealthChecker
|
hc health.HealthChecker
|
||||||
metric *metrics.Gauge
|
metric *metrics.Gauge
|
||||||
|
|
||||||
ready atomic.Bool
|
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -38,7 +34,7 @@ const (
|
||||||
|
|
||||||
// TODO: support stream
|
// TODO: support stream
|
||||||
|
|
||||||
func newWaker(parent task.Parent, route route.Route, rp *reverseproxy.ReverseProxy, stream net.Stream) (Waker, E.Error) {
|
func newWaker(parent task.Parent, route route.Route, rp *reverseproxy.ReverseProxy, stream net.Stream) (Waker, gperr.Error) {
|
||||||
hcCfg := route.HealthCheckConfig()
|
hcCfg := route.HealthCheckConfig()
|
||||||
hcCfg.Timeout = idleWakerCheckTimeout
|
hcCfg.Timeout = idleWakerCheckTimeout
|
||||||
|
|
||||||
|
@ -46,13 +42,14 @@ func newWaker(parent task.Parent, route route.Route, rp *reverseproxy.ReversePro
|
||||||
rp: rp,
|
rp: rp,
|
||||||
stream: stream,
|
stream: stream,
|
||||||
}
|
}
|
||||||
task := parent.Subtask("idlewatcher." + route.TargetName())
|
watcher, err := registerWatcher(parent, route, waker)
|
||||||
watcher, err := registerWatcher(task, route, waker)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, E.Errorf("register watcher: %w", err)
|
return nil, gperr.Errorf("register watcher: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
|
case route.IsAgent():
|
||||||
|
waker.hc = monitor.NewAgentProxiedMonitor(route.Agent(), hcCfg, monitor.AgentTargetFromURL(route.TargetURL()))
|
||||||
case rp != nil:
|
case rp != nil:
|
||||||
waker.hc = monitor.NewHTTPHealthChecker(route.TargetURL(), hcCfg)
|
waker.hc = monitor.NewHTTPHealthChecker(route.TargetURL(), hcCfg)
|
||||||
case stream != nil:
|
case stream != nil:
|
||||||
|
@ -61,26 +58,20 @@ func newWaker(parent task.Parent, route route.Route, rp *reverseproxy.ReversePro
|
||||||
panic("both nil")
|
panic("both nil")
|
||||||
}
|
}
|
||||||
|
|
||||||
if common.PrometheusEnabled {
|
|
||||||
m := metrics.GetServiceMetrics()
|
|
||||||
fqn := parent.Name() + "/" + route.TargetName()
|
|
||||||
waker.metric = m.HealthStatus.With(metrics.HealthMetricLabels(fqn))
|
|
||||||
waker.metric.Set(float64(watcher.Status()))
|
|
||||||
}
|
|
||||||
return watcher, nil
|
return watcher, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// lifetime should follow route provider.
|
// lifetime should follow route provider.
|
||||||
func NewHTTPWaker(parent task.Parent, route route.Route, rp *reverseproxy.ReverseProxy) (Waker, E.Error) {
|
func NewHTTPWaker(parent task.Parent, route route.Route, rp *reverseproxy.ReverseProxy) (Waker, gperr.Error) {
|
||||||
return newWaker(parent, route, rp, nil)
|
return newWaker(parent, route, rp, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewStreamWaker(parent task.Parent, route route.Route, stream net.Stream) (Waker, E.Error) {
|
func NewStreamWaker(parent task.Parent, route route.Route, stream net.Stream) (Waker, gperr.Error) {
|
||||||
return newWaker(parent, route, nil, stream)
|
return newWaker(parent, route, nil, stream)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start implements health.HealthMonitor.
|
// Start implements health.HealthMonitor.
|
||||||
func (w *Watcher) Start(parent task.Parent) E.Error {
|
func (w *Watcher) Start(parent task.Parent) gperr.Error {
|
||||||
w.task.OnCancel("route_cleanup", func() {
|
w.task.OnCancel("route_cleanup", func() {
|
||||||
parent.Finish(w.task.FinishCause())
|
parent.Finish(w.task.FinishCause())
|
||||||
if w.metric != nil {
|
if w.metric != nil {
|
||||||
|
@ -124,33 +115,50 @@ func (w *Watcher) Latency() time.Duration {
|
||||||
|
|
||||||
// Status implements health.HealthMonitor.
|
// Status implements health.HealthMonitor.
|
||||||
func (w *Watcher) Status() health.Status {
|
func (w *Watcher) Status() health.Status {
|
||||||
status := w.getStatusUpdateReady()
|
state := w.state.Load()
|
||||||
if w.metric != nil {
|
if state.err != nil {
|
||||||
w.metric.Set(float64(status))
|
|
||||||
}
|
|
||||||
return status
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Watcher) getStatusUpdateReady() health.Status {
|
|
||||||
if !w.ContainerRunning {
|
|
||||||
return health.StatusNapping
|
|
||||||
}
|
|
||||||
|
|
||||||
if w.ready.Load() {
|
|
||||||
return health.StatusHealthy
|
|
||||||
}
|
|
||||||
|
|
||||||
result, err := w.hc.CheckHealth()
|
|
||||||
switch {
|
|
||||||
case err != nil:
|
|
||||||
w.ready.Store(false)
|
|
||||||
return health.StatusError
|
return health.StatusError
|
||||||
case result.Healthy:
|
}
|
||||||
w.ready.Store(true)
|
if state.ready {
|
||||||
return health.StatusHealthy
|
return health.StatusHealthy
|
||||||
default:
|
}
|
||||||
|
if state.running {
|
||||||
return health.StatusStarting
|
return health.StatusStarting
|
||||||
}
|
}
|
||||||
|
return health.StatusNapping
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Watcher) checkUpdateState() (ready bool, err error) {
|
||||||
|
// already ready
|
||||||
|
if w.ready() {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if !w.running() {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if w.metric != nil {
|
||||||
|
defer w.metric.Set(float64(w.Status()))
|
||||||
|
}
|
||||||
|
|
||||||
|
// the new container info not yet updated
|
||||||
|
if w.hc.URL().Host == "" {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := w.hc.CheckHealth()
|
||||||
|
if err != nil {
|
||||||
|
w.setError(err)
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if res.Healthy {
|
||||||
|
w.setReady()
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
w.setStarting()
|
||||||
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalJSON implements health.HealthMonitor.
|
// MarshalJSON implements health.HealthMonitor.
|
||||||
|
@ -159,10 +167,15 @@ func (w *Watcher) MarshalJSON() ([]byte, error) {
|
||||||
if w.hc.URL().Port() != "0" {
|
if w.hc.URL().Port() != "0" {
|
||||||
url = w.hc.URL()
|
url = w.hc.URL()
|
||||||
}
|
}
|
||||||
|
var detail string
|
||||||
|
if err := w.error(); err != nil {
|
||||||
|
detail = err.Error()
|
||||||
|
}
|
||||||
return (&monitor.JSONRepresentation{
|
return (&monitor.JSONRepresentation{
|
||||||
Name: w.Name(),
|
Name: w.Name(),
|
||||||
Status: w.Status(),
|
Status: w.Status(),
|
||||||
Config: w.hc.Config(),
|
Config: w.hc.Config(),
|
||||||
URL: url,
|
URL: url,
|
||||||
|
Detail: detail,
|
||||||
}).MarshalJSON()
|
}).MarshalJSON()
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,9 +7,8 @@ import (
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/yusing/go-proxy/internal/common"
|
gphttp "github.com/yusing/go-proxy/internal/net/gphttp"
|
||||||
gphttp "github.com/yusing/go-proxy/internal/net/http"
|
"github.com/yusing/go-proxy/internal/net/gphttp/httpheaders"
|
||||||
"github.com/yusing/go-proxy/internal/watcher/health"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type ForceCacheControl struct {
|
type ForceCacheControl struct {
|
||||||
|
@ -42,11 +41,25 @@ func (w *Watcher) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (w *Watcher) cancelled(reqCtx context.Context, rw http.ResponseWriter) bool {
|
||||||
|
select {
|
||||||
|
case <-reqCtx.Done():
|
||||||
|
w.WakeDebug().Str("cause", context.Cause(reqCtx).Error()).Msg("canceled")
|
||||||
|
return true
|
||||||
|
case <-w.task.Context().Done():
|
||||||
|
w.WakeDebug().Str("cause", w.task.FinishCause().Error()).Msg("canceled")
|
||||||
|
http.Error(rw, "Service unavailable", http.StatusServiceUnavailable)
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (w *Watcher) wakeFromHTTP(rw http.ResponseWriter, r *http.Request) (shouldNext bool) {
|
func (w *Watcher) wakeFromHTTP(rw http.ResponseWriter, r *http.Request) (shouldNext bool) {
|
||||||
w.resetIdleTimer()
|
w.resetIdleTimer()
|
||||||
|
|
||||||
// pass through if container is already ready
|
// pass through if container is already ready
|
||||||
if w.ready.Load() {
|
if w.ready() {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -56,14 +69,10 @@ func (w *Watcher) wakeFromHTTP(rw http.ResponseWriter, r *http.Request) (shouldN
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if r.Body != nil {
|
|
||||||
defer r.Body.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
accept := gphttp.GetAccept(r.Header)
|
accept := gphttp.GetAccept(r.Header)
|
||||||
acceptHTML := (r.Method == http.MethodGet && accept.AcceptHTML() || r.RequestURI == "/" && accept.IsEmpty())
|
acceptHTML := (r.Method == http.MethodGet && accept.AcceptHTML() || r.RequestURI == "/" && accept.IsEmpty())
|
||||||
|
|
||||||
isCheckRedirect := r.Header.Get(common.HeaderCheckRedirect) != ""
|
isCheckRedirect := r.Header.Get(httpheaders.HeaderGoDoxyCheckRedirect) != ""
|
||||||
if !isCheckRedirect && acceptHTML {
|
if !isCheckRedirect && acceptHTML {
|
||||||
// Send a loading response to the client
|
// Send a loading response to the client
|
||||||
body := w.makeLoadingPageBody()
|
body := w.makeLoadingPageBody()
|
||||||
|
@ -82,21 +91,7 @@ func (w *Watcher) wakeFromHTTP(rw http.ResponseWriter, r *http.Request) (shouldN
|
||||||
ctx, cancel := context.WithTimeoutCause(r.Context(), w.WakeTimeout, errors.New("wake timeout"))
|
ctx, cancel := context.WithTimeoutCause(r.Context(), w.WakeTimeout, errors.New("wake timeout"))
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
checkCanceled := func() (canceled bool) {
|
if w.cancelled(ctx, rw) {
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
w.WakeDebug().Str("cause", context.Cause(ctx).Error()).Msg("canceled")
|
|
||||||
return true
|
|
||||||
case <-w.task.Context().Done():
|
|
||||||
w.WakeDebug().Str("cause", w.task.FinishCause().Error()).Msg("canceled")
|
|
||||||
http.Error(rw, "Service unavailable", http.StatusServiceUnavailable)
|
|
||||||
return true
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if checkCanceled() {
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -109,11 +104,16 @@ func (w *Watcher) wakeFromHTTP(rw http.ResponseWriter, r *http.Request) (shouldN
|
||||||
}
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
if checkCanceled() {
|
if w.cancelled(ctx, rw) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if w.Status() == health.StatusHealthy {
|
ready, err := w.checkUpdateState()
|
||||||
|
if err != nil {
|
||||||
|
http.Error(rw, "Error waking container", http.StatusInternalServerError)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if ready {
|
||||||
w.resetIdleTimer()
|
w.resetIdleTimer()
|
||||||
if isCheckRedirect {
|
if isCheckRedirect {
|
||||||
w.Debug().Msgf("redirecting to %s ...", w.hc.URL())
|
w.Debug().Msgf("redirecting to %s ...", w.hc.URL())
|
||||||
|
|
|
@ -8,7 +8,6 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/yusing/go-proxy/internal/net/types"
|
"github.com/yusing/go-proxy/internal/net/types"
|
||||||
"github.com/yusing/go-proxy/internal/watcher/health"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Setup implements types.Stream.
|
// Setup implements types.Stream.
|
||||||
|
@ -50,7 +49,7 @@ func (w *Watcher) wakeFromStream() error {
|
||||||
w.resetIdleTimer()
|
w.resetIdleTimer()
|
||||||
|
|
||||||
// pass through if container is already ready
|
// pass through if container is already ready
|
||||||
if w.ready.Load() {
|
if w.ready() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -78,7 +77,9 @@ func (w *Watcher) wakeFromStream() error {
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
|
||||||
if w.Status() == health.StatusHealthy {
|
if ready, err := w.checkUpdateState(); err != nil {
|
||||||
|
return err
|
||||||
|
} else if ready {
|
||||||
w.resetIdleTimer()
|
w.resetIdleTimer()
|
||||||
w.Debug().Msg("container is ready, passing through to " + w.hc.URL().String())
|
w.Debug().Msg("container is ready, passing through to " + w.hc.URL().String())
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -6,16 +6,15 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/docker/docker/api/types/container"
|
|
||||||
"github.com/rs/zerolog"
|
"github.com/rs/zerolog"
|
||||||
D "github.com/yusing/go-proxy/internal/docker"
|
"github.com/yusing/go-proxy/internal/docker"
|
||||||
idlewatcher "github.com/yusing/go-proxy/internal/docker/idlewatcher/types"
|
idlewatcher "github.com/yusing/go-proxy/internal/docker/idlewatcher/types"
|
||||||
E "github.com/yusing/go-proxy/internal/error"
|
"github.com/yusing/go-proxy/internal/gperr"
|
||||||
"github.com/yusing/go-proxy/internal/logging"
|
"github.com/yusing/go-proxy/internal/logging"
|
||||||
route "github.com/yusing/go-proxy/internal/route/types"
|
route "github.com/yusing/go-proxy/internal/route/types"
|
||||||
"github.com/yusing/go-proxy/internal/task"
|
"github.com/yusing/go-proxy/internal/task"
|
||||||
U "github.com/yusing/go-proxy/internal/utils"
|
U "github.com/yusing/go-proxy/internal/utils"
|
||||||
F "github.com/yusing/go-proxy/internal/utils/functional"
|
"github.com/yusing/go-proxy/internal/utils/atomic"
|
||||||
"github.com/yusing/go-proxy/internal/watcher"
|
"github.com/yusing/go-proxy/internal/watcher"
|
||||||
"github.com/yusing/go-proxy/internal/watcher/events"
|
"github.com/yusing/go-proxy/internal/watcher/events"
|
||||||
)
|
)
|
||||||
|
@ -26,75 +25,89 @@ type (
|
||||||
|
|
||||||
zerolog.Logger
|
zerolog.Logger
|
||||||
|
|
||||||
*idlewatcher.Config
|
|
||||||
*waker
|
*waker
|
||||||
|
*containerMeta
|
||||||
|
*idlewatcher.Config
|
||||||
|
|
||||||
|
client *docker.SharedClient
|
||||||
|
state atomic.Value[*containerState]
|
||||||
|
|
||||||
client *D.SharedClient
|
|
||||||
stopByMethod StopCallback // send a docker command w.r.t. `stop_method`
|
stopByMethod StopCallback // send a docker command w.r.t. `stop_method`
|
||||||
ticker *time.Ticker
|
ticker *time.Ticker
|
||||||
lastReset time.Time
|
lastReset time.Time
|
||||||
task *task.Task
|
task *task.Task
|
||||||
}
|
}
|
||||||
|
|
||||||
WakeDone <-chan error
|
|
||||||
WakeFunc func() WakeDone
|
|
||||||
StopCallback func() error
|
StopCallback func() error
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
watcherMap = F.NewMapOf[string, *Watcher]()
|
watcherMap = make(map[string]*Watcher)
|
||||||
watcherMapMu sync.Mutex
|
watcherMapMu sync.RWMutex
|
||||||
|
|
||||||
errShouldNotReachHere = errors.New("should not reach here")
|
errShouldNotReachHere = errors.New("should not reach here")
|
||||||
)
|
)
|
||||||
|
|
||||||
const dockerReqTimeout = 3 * time.Second
|
const dockerReqTimeout = 3 * time.Second
|
||||||
|
|
||||||
func registerWatcher(watcherTask *task.Task, route route.Route, waker *waker) (*Watcher, error) {
|
func registerWatcher(parent task.Parent, route route.Route, waker *waker) (*Watcher, error) {
|
||||||
cfg := route.IdlewatcherConfig()
|
cfg := route.IdlewatcherConfig()
|
||||||
|
|
||||||
if cfg.IdleTimeout == 0 {
|
if cfg.IdleTimeout == 0 {
|
||||||
panic(errShouldNotReachHere)
|
panic(errShouldNotReachHere)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cont := route.ContainerInfo()
|
||||||
|
key := cont.ContainerID
|
||||||
|
|
||||||
watcherMapMu.Lock()
|
watcherMapMu.Lock()
|
||||||
defer watcherMapMu.Unlock()
|
defer watcherMapMu.Unlock()
|
||||||
|
w, ok := watcherMap[key]
|
||||||
|
if !ok {
|
||||||
|
client, err := docker.NewClient(cont.DockerHost)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
key := cfg.ContainerID
|
w = &Watcher{
|
||||||
|
Logger: logging.With().Str("name", cont.ContainerName).Logger(),
|
||||||
if w, ok := watcherMap.Load(key); ok {
|
client: client,
|
||||||
w.Config = cfg
|
task: parent.Subtask("idlewatcher." + cont.ContainerName),
|
||||||
w.waker = waker
|
ticker: time.NewTicker(cfg.IdleTimeout),
|
||||||
w.resetIdleTimer()
|
}
|
||||||
watcherTask.Finish("used existing watcher")
|
|
||||||
return w, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
client, err := D.ConnectClient(cfg.DockerHost)
|
// FIXME: possible race condition here
|
||||||
if err != nil {
|
w.waker = waker
|
||||||
return nil, err
|
w.containerMeta = &containerMeta{
|
||||||
|
ContainerID: cont.ContainerID,
|
||||||
|
ContainerName: cont.ContainerName,
|
||||||
|
}
|
||||||
|
w.Config = cfg
|
||||||
|
w.ticker.Reset(cfg.IdleTimeout)
|
||||||
|
|
||||||
|
if cont.Running {
|
||||||
|
w.setStarting()
|
||||||
|
} else {
|
||||||
|
w.setNapping()
|
||||||
}
|
}
|
||||||
|
|
||||||
w := &Watcher{
|
if !ok {
|
||||||
Logger: logging.With().Str("name", cfg.ContainerName).Logger(),
|
w.stopByMethod = w.getStopCallback()
|
||||||
Config: cfg,
|
watcherMap[key] = w
|
||||||
waker: waker,
|
|
||||||
client: client,
|
go func() {
|
||||||
task: watcherTask,
|
cause := w.watchUntilDestroy()
|
||||||
ticker: time.NewTicker(cfg.IdleTimeout),
|
|
||||||
|
watcherMapMu.Lock()
|
||||||
|
defer watcherMapMu.Unlock()
|
||||||
|
delete(watcherMap, key)
|
||||||
|
|
||||||
|
w.ticker.Stop()
|
||||||
|
w.client.Close()
|
||||||
|
w.task.Finish(cause)
|
||||||
|
}()
|
||||||
}
|
}
|
||||||
w.stopByMethod = w.getStopCallback()
|
|
||||||
watcherMap.Store(key, w)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
cause := w.watchUntilDestroy()
|
|
||||||
|
|
||||||
watcherMap.Delete(w.ContainerID)
|
|
||||||
|
|
||||||
w.ticker.Stop()
|
|
||||||
w.client.Close()
|
|
||||||
w.task.Finish(cause)
|
|
||||||
}()
|
|
||||||
|
|
||||||
return w, nil
|
return w, nil
|
||||||
}
|
}
|
||||||
|
@ -118,45 +131,8 @@ func (w *Watcher) WakeError(err error) {
|
||||||
w.Err(err).Str("action", "wake").Msg("error")
|
w.Err(err).Str("action", "wake").Msg("error")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *Watcher) LogReason(action, reason string) {
|
|
||||||
w.Info().Str("reason", reason).Msg(action)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Watcher) containerStop(ctx context.Context) error {
|
|
||||||
return w.client.ContainerStop(ctx, w.ContainerID, container.StopOptions{
|
|
||||||
Signal: string(w.StopSignal),
|
|
||||||
Timeout: &w.StopTimeout,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Watcher) containerPause(ctx context.Context) error {
|
|
||||||
return w.client.ContainerPause(ctx, w.ContainerID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Watcher) containerKill(ctx context.Context) error {
|
|
||||||
return w.client.ContainerKill(ctx, w.ContainerID, string(w.StopSignal))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Watcher) containerUnpause(ctx context.Context) error {
|
|
||||||
return w.client.ContainerUnpause(ctx, w.ContainerID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Watcher) containerStart(ctx context.Context) error {
|
|
||||||
return w.client.ContainerStart(ctx, w.ContainerID, container.StartOptions{})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Watcher) containerStatus() (string, error) {
|
|
||||||
ctx, cancel := context.WithTimeoutCause(w.task.Context(), dockerReqTimeout, errors.New("docker request timeout"))
|
|
||||||
defer cancel()
|
|
||||||
json, err := w.client.ContainerInspect(ctx, w.ContainerID)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return json.State.Status, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Watcher) wakeIfStopped() error {
|
func (w *Watcher) wakeIfStopped() error {
|
||||||
if w.ContainerRunning {
|
if w.running() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -177,7 +153,7 @@ func (w *Watcher) wakeIfStopped() error {
|
||||||
case "running":
|
case "running":
|
||||||
return nil
|
return nil
|
||||||
default:
|
default:
|
||||||
return E.Errorf("unexpected container status: %s", status)
|
return gperr.Errorf("unexpected container status: %s", status)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -210,8 +186,8 @@ func (w *Watcher) expires() time.Time {
|
||||||
return w.lastReset.Add(w.IdleTimeout)
|
return w.lastReset.Add(w.IdleTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *Watcher) getEventCh(dockerWatcher watcher.DockerWatcher) (eventCh <-chan events.Event, errCh <-chan E.Error) {
|
func (w *Watcher) getEventCh(ctx context.Context, dockerWatcher *watcher.DockerWatcher) (eventCh <-chan events.Event, errCh <-chan gperr.Error) {
|
||||||
eventCh, errCh = dockerWatcher.EventsWithOptions(w.Task().Context(), watcher.DockerListOptions{
|
eventCh, errCh = dockerWatcher.EventsWithOptions(ctx, watcher.DockerListOptions{
|
||||||
Filters: watcher.NewDockerFilter(
|
Filters: watcher.NewDockerFilter(
|
||||||
watcher.DockerFilterContainer,
|
watcher.DockerFilterContainer,
|
||||||
watcher.DockerFilterContainerNameID(w.ContainerID),
|
watcher.DockerFilterContainerNameID(w.ContainerID),
|
||||||
|
@ -239,8 +215,11 @@ func (w *Watcher) getEventCh(dockerWatcher watcher.DockerWatcher) (eventCh <-cha
|
||||||
// it exits only if the context is canceled, the container is destroyed,
|
// it exits only if the context is canceled, the container is destroyed,
|
||||||
// errors occurred on docker client, or route provider died (mainly caused by config reload).
|
// errors occurred on docker client, or route provider died (mainly caused by config reload).
|
||||||
func (w *Watcher) watchUntilDestroy() (returnCause error) {
|
func (w *Watcher) watchUntilDestroy() (returnCause error) {
|
||||||
|
eventCtx, eventCancel := context.WithCancel(w.task.Context())
|
||||||
|
defer eventCancel()
|
||||||
|
|
||||||
dockerWatcher := watcher.NewDockerWatcher(w.client.DaemonHost())
|
dockerWatcher := watcher.NewDockerWatcher(w.client.DaemonHost())
|
||||||
dockerEventCh, dockerEventErrCh := w.getEventCh(dockerWatcher)
|
dockerEventCh, dockerEventErrCh := w.getEventCh(eventCtx, dockerWatcher)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
|
@ -248,24 +227,23 @@ func (w *Watcher) watchUntilDestroy() (returnCause error) {
|
||||||
return w.task.FinishCause()
|
return w.task.FinishCause()
|
||||||
case err := <-dockerEventErrCh:
|
case err := <-dockerEventErrCh:
|
||||||
if !err.Is(context.Canceled) {
|
if !err.Is(context.Canceled) {
|
||||||
E.LogError("idlewatcher error", err, &w.Logger)
|
gperr.LogError("idlewatcher error", err, &w.Logger)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
case e := <-dockerEventCh:
|
case e := <-dockerEventCh:
|
||||||
switch {
|
switch {
|
||||||
case e.Action == events.ActionContainerDestroy:
|
case e.Action == events.ActionContainerDestroy:
|
||||||
w.ContainerRunning = false
|
w.setError(errors.New("container destroyed"))
|
||||||
w.ready.Store(false)
|
w.Info().Str("reason", "container destroyed").Msg("watcher stopped")
|
||||||
w.LogReason("watcher stopped", "container destroyed")
|
|
||||||
return errors.New("container destroyed")
|
return errors.New("container destroyed")
|
||||||
// create / start / unpause
|
// create / start / unpause
|
||||||
case e.Action.IsContainerWake():
|
case e.Action.IsContainerWake():
|
||||||
w.ContainerRunning = true
|
w.setStarting()
|
||||||
w.resetIdleTimer()
|
w.resetIdleTimer()
|
||||||
w.Info().Msg("awaken")
|
w.Info().Msg("awaken")
|
||||||
case e.Action.IsContainerSleep(): // stop / pause / kil
|
case e.Action.IsContainerSleep(): // stop / pause / kil
|
||||||
w.ContainerRunning = false
|
w.setNapping()
|
||||||
w.ready.Store(false)
|
w.resetIdleTimer()
|
||||||
w.ticker.Stop()
|
w.ticker.Stop()
|
||||||
default:
|
default:
|
||||||
w.Error().Msg("unexpected docker event: " + e.String())
|
w.Error().Msg("unexpected docker event: " + e.String())
|
||||||
|
@ -279,11 +257,15 @@ func (w *Watcher) watchUntilDestroy() (returnCause error) {
|
||||||
w.Debug().Msgf("id changed %s -> %s", w.ContainerID, e.ActorID)
|
w.Debug().Msgf("id changed %s -> %s", w.ContainerID, e.ActorID)
|
||||||
w.ContainerID = e.ActorID
|
w.ContainerID = e.ActorID
|
||||||
// recreate event stream
|
// recreate event stream
|
||||||
dockerEventCh, dockerEventErrCh = w.getEventCh(dockerWatcher)
|
eventCancel()
|
||||||
|
|
||||||
|
eventCtx, eventCancel = context.WithCancel(w.task.Context())
|
||||||
|
defer eventCancel()
|
||||||
|
dockerEventCh, dockerEventErrCh = w.getEventCh(eventCtx, dockerWatcher)
|
||||||
}
|
}
|
||||||
case <-w.ticker.C:
|
case <-w.ticker.C:
|
||||||
w.ticker.Stop()
|
w.ticker.Stop()
|
||||||
if w.ContainerRunning {
|
if w.running() {
|
||||||
err := w.stopByMethod()
|
err := w.stopByMethod()
|
||||||
switch {
|
switch {
|
||||||
case errors.Is(err, context.Canceled):
|
case errors.Is(err, context.Canceled):
|
||||||
|
@ -294,7 +276,7 @@ func (w *Watcher) watchUntilDestroy() (returnCause error) {
|
||||||
}
|
}
|
||||||
w.Err(err).Msgf("container stop with method %q failed", w.StopMethod)
|
w.Err(err).Msgf("container stop with method %q failed", w.StopMethod)
|
||||||
default:
|
default:
|
||||||
w.LogReason("container stopped", "idle timeout")
|
w.Info().Str("reason", "idle timeout").Msg("container stopped")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue