mirror of
https://github.com/yusing/godoxy.git
synced 2025-07-09 07:54:03 +02:00
fixed idlewatcher panic, dashboard app name, route not removing on container destroy
This commit is contained in:
parent
56b778f19c
commit
c0c61709ca
10 changed files with 53 additions and 56 deletions
|
@ -125,6 +125,7 @@ func newSubTask(ctx context.Context, name string) *task {
|
||||||
}
|
}
|
||||||
tasksMap.Store(t, struct{}{})
|
tasksMap.Store(t, struct{}{})
|
||||||
taskWg.Add(1)
|
taskWg.Add(1)
|
||||||
|
logrus.Debugf("task %q started", name)
|
||||||
return t
|
return t
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -47,9 +47,6 @@ func (cfg *Config) HomepageConfig() homepage.Config {
|
||||||
item := entry.Homepage
|
item := entry.Homepage
|
||||||
if item == nil {
|
if item == nil {
|
||||||
item = new(homepage.Item)
|
item = new(homepage.Item)
|
||||||
}
|
|
||||||
|
|
||||||
if !item.Show && item.IsEmpty() {
|
|
||||||
item.Show = true
|
item.Show = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
E "github.com/yusing/go-proxy/internal/error"
|
||||||
gphttp "github.com/yusing/go-proxy/internal/net/http"
|
gphttp "github.com/yusing/go-proxy/internal/net/http"
|
||||||
"github.com/yusing/go-proxy/internal/net/types"
|
"github.com/yusing/go-proxy/internal/net/types"
|
||||||
"github.com/yusing/go-proxy/internal/watcher/health"
|
"github.com/yusing/go-proxy/internal/watcher/health"
|
||||||
|
@ -125,7 +126,7 @@ func (w *Waker) wake(rw http.ResponseWriter, r *http.Request) (shouldNext bool)
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-w.task.Context().Done():
|
case <-w.task.Context().Done():
|
||||||
http.Error(rw, "Waking timed out", http.StatusGatewayTimeout)
|
http.Error(rw, "Service unavailable", http.StatusServiceUnavailable)
|
||||||
return
|
return
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
http.Error(rw, "Waking timed out", http.StatusGatewayTimeout)
|
http.Error(rw, "Waking timed out", http.StatusGatewayTimeout)
|
||||||
|
@ -133,12 +134,11 @@ func (w *Waker) wake(rw http.ResponseWriter, r *http.Request) (shouldNext bool)
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
|
||||||
// wake the container and reset idle timer
|
w.l.Debug("wake signal received")
|
||||||
// also wait for another wake request
|
err := w.wakeIfStopped()
|
||||||
w.wakeCh <- struct{}{}
|
if err != nil {
|
||||||
|
w.l.Error(E.FailWith("wake", err))
|
||||||
if <-w.wakeDone != nil {
|
http.Error(rw, "Error waking container", http.StatusInternalServerError)
|
||||||
http.Error(rw, "Error sending wake request", http.StatusInternalServerError)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -153,6 +153,9 @@ func (w *Waker) wake(rw http.ResponseWriter, r *http.Request) (shouldNext bool)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
|
case <-w.task.Context().Done():
|
||||||
|
http.Error(rw, "Service unavailable", http.StatusServiceUnavailable)
|
||||||
|
return
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
http.Error(rw, "Waking timed out", http.StatusGatewayTimeout)
|
http.Error(rw, "Waking timed out", http.StatusGatewayTimeout)
|
||||||
return
|
return
|
||||||
|
|
|
@ -27,8 +27,6 @@ type (
|
||||||
ready atomic.Bool // whether the site is ready to accept connection
|
ready atomic.Bool // whether the site is ready to accept connection
|
||||||
stopByMethod StopCallback // send a docker command w.r.t. `stop_method`
|
stopByMethod StopCallback // send a docker command w.r.t. `stop_method`
|
||||||
|
|
||||||
wakeCh chan struct{}
|
|
||||||
wakeDone chan E.NestedError
|
|
||||||
ticker *time.Ticker
|
ticker *time.Ticker
|
||||||
|
|
||||||
task common.Task
|
task common.Task
|
||||||
|
@ -84,8 +82,6 @@ func Register(entry *P.ReverseProxyEntry) (*Watcher, E.NestedError) {
|
||||||
ReverseProxyEntry: entry,
|
ReverseProxyEntry: entry,
|
||||||
client: client,
|
client: client,
|
||||||
refCount: U.NewRefCounter(),
|
refCount: U.NewRefCounter(),
|
||||||
wakeCh: make(chan struct{}, 1),
|
|
||||||
wakeDone: make(chan E.NestedError),
|
|
||||||
ticker: time.NewTicker(entry.IdleTimeout),
|
ticker: time.NewTicker(entry.IdleTimeout),
|
||||||
l: logger.WithField("container", entry.ContainerName),
|
l: logger.WithField("container", entry.ContainerName),
|
||||||
}
|
}
|
||||||
|
@ -127,6 +123,9 @@ func (w *Watcher) containerStart() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *Watcher) containerStatus() (string, E.NestedError) {
|
func (w *Watcher) containerStatus() (string, E.NestedError) {
|
||||||
|
if !w.client.Connected() {
|
||||||
|
return "", E.Failure("docker client closed")
|
||||||
|
}
|
||||||
json, err := w.client.ContainerInspect(w.task.Context(), w.ContainerID)
|
json, err := w.client.ContainerInspect(w.task.Context(), w.ContainerID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", E.FailWith("inspect container", err)
|
return "", E.FailWith("inspect container", err)
|
||||||
|
@ -201,10 +200,9 @@ func (w *Watcher) watchUntilCancel() {
|
||||||
})
|
})
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
|
w.cancel()
|
||||||
w.ticker.Stop()
|
w.ticker.Stop()
|
||||||
w.client.Close()
|
w.client.Close()
|
||||||
close(w.wakeDone)
|
|
||||||
close(w.wakeCh)
|
|
||||||
watcherMap.Delete(w.ContainerID)
|
watcherMap.Delete(w.ContainerID)
|
||||||
w.task.Finished()
|
w.task.Finished()
|
||||||
}()
|
}()
|
||||||
|
@ -220,6 +218,7 @@ func (w *Watcher) watchUntilCancel() {
|
||||||
case err := <-dockerEventErrCh:
|
case err := <-dockerEventErrCh:
|
||||||
if err != nil && err.IsNot(context.Canceled) {
|
if err != nil && err.IsNot(context.Canceled) {
|
||||||
w.l.Error(E.FailWith("docker watcher", err))
|
w.l.Error(E.FailWith("docker watcher", err))
|
||||||
|
return
|
||||||
}
|
}
|
||||||
case e := <-dockerEventCh:
|
case e := <-dockerEventCh:
|
||||||
switch {
|
switch {
|
||||||
|
@ -227,27 +226,22 @@ func (w *Watcher) watchUntilCancel() {
|
||||||
case e.Action.IsContainerWake():
|
case e.Action.IsContainerWake():
|
||||||
w.ContainerRunning = true
|
w.ContainerRunning = true
|
||||||
w.resetIdleTimer()
|
w.resetIdleTimer()
|
||||||
w.l.Info(e)
|
w.l.Info("container awaken")
|
||||||
default: // stop / pause / kil
|
case e.Action.IsContainerSleep(): // stop / pause / kil
|
||||||
w.ContainerRunning = false
|
w.ContainerRunning = false
|
||||||
w.ticker.Stop()
|
w.ticker.Stop()
|
||||||
w.ready.Store(false)
|
w.ready.Store(false)
|
||||||
w.l.Info(e)
|
default:
|
||||||
|
w.l.Errorf("unexpected docker event: %s", e)
|
||||||
}
|
}
|
||||||
case <-w.ticker.C:
|
case <-w.ticker.C:
|
||||||
w.l.Debug("idle timeout")
|
w.l.Debug("idle timeout")
|
||||||
w.ticker.Stop()
|
w.ticker.Stop()
|
||||||
if err := w.stopByMethod(); err != nil && err.IsNot(context.Canceled) {
|
if err := w.stopByMethod(); err != nil && err.IsNot(context.Canceled) {
|
||||||
w.l.Error(E.FailWith("stop", err).Extraf("stop method: %s", w.StopMethod))
|
w.l.Error(E.FailWith("stop", err).Extraf("stop method: %s", w.StopMethod))
|
||||||
}
|
} else {
|
||||||
case <-w.wakeCh:
|
w.l.Info("stopped by idle timeout")
|
||||||
w.l.Debug("wake signal received")
|
}
|
||||||
w.resetIdleTimer()
|
|
||||||
err := w.wakeIfStopped()
|
|
||||||
if err != nil {
|
|
||||||
w.l.Error(E.FailWith("wake", err))
|
|
||||||
}
|
|
||||||
w.wakeDone <- err
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -89,12 +89,6 @@ func (p *DockerProvider) shouldIgnore(container *D.Container) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *DockerProvider) OnEvent(event W.Event, oldRoutes R.Routes) (res EventResult) {
|
func (p *DockerProvider) OnEvent(event W.Event, oldRoutes R.Routes) (res EventResult) {
|
||||||
switch event.Action {
|
|
||||||
case events.ActionContainerStart, events.ActionContainerStop:
|
|
||||||
break
|
|
||||||
default:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
b := E.NewBuilder("event %s error", event)
|
b := E.NewBuilder("event %s error", event)
|
||||||
defer b.To(&res.err)
|
defer b.To(&res.err)
|
||||||
|
|
||||||
|
@ -105,15 +99,26 @@ func (p *DockerProvider) OnEvent(event W.Event, oldRoutes R.Routes) (res EventRe
|
||||||
matches.Store(k, v)
|
matches.Store(k, v)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
//FIXME: docker event die stuck
|
||||||
|
|
||||||
var newRoutes R.Routes
|
var newRoutes R.Routes
|
||||||
var err E.NestedError
|
var err E.NestedError
|
||||||
|
|
||||||
if matches.Size() == 0 { // id & container name changed
|
switch {
|
||||||
|
// id & container name changed
|
||||||
|
case matches.Size() == 0:
|
||||||
matches = oldRoutes
|
matches = oldRoutes
|
||||||
newRoutes, err = p.LoadRoutesImpl()
|
newRoutes, err = p.LoadRoutesImpl()
|
||||||
b.Add(err)
|
b.Add(err)
|
||||||
} else {
|
case event.Action == events.ActionContainerDestroy:
|
||||||
|
// stop all old routes
|
||||||
|
matches.RangeAllParallel(func(_ string, v *R.Route) {
|
||||||
|
oldRoutes.Delete(v.Entry.Alias)
|
||||||
|
b.Add(v.Stop())
|
||||||
|
res.nRemoved++
|
||||||
|
})
|
||||||
|
return
|
||||||
|
default:
|
||||||
cont, err := D.Inspect(p.dockerHost, event.ActorID)
|
cont, err := D.Inspect(p.dockerHost, event.ActorID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Add(E.FailWith("inspect container", err))
|
b.Add(E.FailWith("inspect container", err))
|
||||||
|
@ -124,6 +129,7 @@ func (p *DockerProvider) OnEvent(event W.Event, oldRoutes R.Routes) (res EventRe
|
||||||
// stop all old routes
|
// stop all old routes
|
||||||
matches.RangeAllParallel(func(_ string, v *R.Route) {
|
matches.RangeAllParallel(func(_ string, v *R.Route) {
|
||||||
b.Add(v.Stop())
|
b.Add(v.Stop())
|
||||||
|
res.nRemoved++
|
||||||
})
|
})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -145,19 +151,13 @@ func (p *DockerProvider) OnEvent(event W.Event, oldRoutes R.Routes) (res EventRe
|
||||||
newRoutes.RangeAll(func(alias string, newRoute *R.Route) {
|
newRoutes.RangeAll(func(alias string, newRoute *R.Route) {
|
||||||
oldRoute, exists := oldRoutes.Load(alias)
|
oldRoute, exists := oldRoutes.Load(alias)
|
||||||
if exists {
|
if exists {
|
||||||
if err := oldRoute.Stop(); err != nil {
|
b.Add(oldRoute.Stop())
|
||||||
b.Add(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
oldRoutes.Store(alias, newRoute)
|
|
||||||
if err := newRoute.Start(); err != nil {
|
|
||||||
b.Add(err)
|
|
||||||
}
|
|
||||||
if exists {
|
|
||||||
res.nReloaded++
|
res.nReloaded++
|
||||||
} else {
|
} else {
|
||||||
res.nAdded++
|
res.nAdded++
|
||||||
}
|
}
|
||||||
|
b.Add(newRoute.Start())
|
||||||
|
oldRoutes.Store(alias, newRoute)
|
||||||
})
|
})
|
||||||
|
|
||||||
return
|
return
|
||||||
|
|
|
@ -189,9 +189,11 @@ func (p *Provider) watchEvents() {
|
||||||
case <-p.watcherTask.Context().Done():
|
case <-p.watcherTask.Context().Done():
|
||||||
return
|
return
|
||||||
case event := <-events:
|
case event := <-events:
|
||||||
res := p.OnEvent(event, p.routes)
|
task := p.watcherTask.Subtask("%s event %s", event.Type, event)
|
||||||
if res.nAdded+res.nRemoved+res.nReloaded > 0 {
|
|
||||||
l.Infof("%s event %q", event.Type, event)
|
l.Infof("%s event %q", event.Type, event)
|
||||||
|
res := p.OnEvent(event, p.routes)
|
||||||
|
task.Finished()
|
||||||
|
if res.nAdded+res.nRemoved+res.nReloaded > 0 {
|
||||||
l.Infof("| %d NEW | %d REMOVED | %d RELOADED |", res.nAdded, res.nRemoved, res.nReloaded)
|
l.Infof("| %d NEW | %d REMOVED | %d RELOADED |", res.nAdded, res.nRemoved, res.nReloaded)
|
||||||
}
|
}
|
||||||
if res.err != nil {
|
if res.err != nil {
|
||||||
|
|
|
@ -129,7 +129,7 @@ func (r *HTTPRoute) Start() E.NestedError {
|
||||||
r.handler = waker
|
r.handler = waker
|
||||||
r.HealthMon = waker
|
r.HealthMon = waker
|
||||||
case !r.HealthCheck.Disabled:
|
case !r.HealthCheck.Disabled:
|
||||||
r.HealthMon = health.NewHTTPHealthMonitor(common.GlobalTask("Reverse proxy "+r.String()), r.URL(), r.HealthCheck)
|
r.HealthMon = health.NewHTTPHealthMonitor(common.GlobalTask(r.String()), r.URL(), r.HealthCheck)
|
||||||
}
|
}
|
||||||
|
|
||||||
if r.handler == nil {
|
if r.handler == nil {
|
||||||
|
|
|
@ -28,6 +28,7 @@ var (
|
||||||
DockerFilterStart = filters.Arg("event", string(docker_events.ActionStart))
|
DockerFilterStart = filters.Arg("event", string(docker_events.ActionStart))
|
||||||
DockerFilterStop = filters.Arg("event", string(docker_events.ActionStop))
|
DockerFilterStop = filters.Arg("event", string(docker_events.ActionStop))
|
||||||
DockerFilterDie = filters.Arg("event", string(docker_events.ActionDie))
|
DockerFilterDie = filters.Arg("event", string(docker_events.ActionDie))
|
||||||
|
DockerFilterDestroy = filters.Arg("event", string(docker_events.ActionDestroy))
|
||||||
DockerFilterKill = filters.Arg("event", string(docker_events.ActionKill))
|
DockerFilterKill = filters.Arg("event", string(docker_events.ActionKill))
|
||||||
DockerFilterPause = filters.Arg("event", string(docker_events.ActionPause))
|
DockerFilterPause = filters.Arg("event", string(docker_events.ActionPause))
|
||||||
DockerFilterUnpause = filters.Arg("event", string(docker_events.ActionUnPause))
|
DockerFilterUnpause = filters.Arg("event", string(docker_events.ActionUnPause))
|
||||||
|
@ -97,12 +98,8 @@ func (w DockerWatcher) EventsWithOptions(ctx context.Context, options DockerList
|
||||||
|
|
||||||
defer w.client.Close()
|
defer w.client.Close()
|
||||||
|
|
||||||
w.Debugf("client connected")
|
|
||||||
|
|
||||||
cEventCh, cErrCh := w.client.Events(ctx, options)
|
cEventCh, cErrCh := w.client.Events(ctx, options)
|
||||||
|
|
||||||
w.Debugf("watcher started")
|
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
|
@ -148,4 +145,5 @@ var optionsWatchAll = DockerListOptions{Filters: NewDockerFilter(
|
||||||
DockerFilterStart,
|
DockerFilterStart,
|
||||||
// DockerFilterStop,
|
// DockerFilterStop,
|
||||||
DockerFilterDie,
|
DockerFilterDie,
|
||||||
|
DockerFilterDestroy,
|
||||||
)}
|
)}
|
||||||
|
|
|
@ -32,6 +32,7 @@ const (
|
||||||
ActionContainerStop
|
ActionContainerStop
|
||||||
ActionContainerPause
|
ActionContainerPause
|
||||||
ActionContainerDie
|
ActionContainerDie
|
||||||
|
ActionContainerDestroy
|
||||||
|
|
||||||
actionContainerWakeMask = ActionContainerCreate | ActionContainerStart | ActionContainerUnpause
|
actionContainerWakeMask = ActionContainerCreate | ActionContainerStart | ActionContainerUnpause
|
||||||
actionContainerSleepMask = ActionContainerKill | ActionContainerStop | ActionContainerPause | ActionContainerDie
|
actionContainerSleepMask = ActionContainerKill | ActionContainerStop | ActionContainerPause | ActionContainerDie
|
||||||
|
@ -51,6 +52,7 @@ var DockerEventMap = map[dockerEvents.Action]Action{
|
||||||
dockerEvents.ActionStop: ActionContainerStop,
|
dockerEvents.ActionStop: ActionContainerStop,
|
||||||
dockerEvents.ActionPause: ActionContainerPause,
|
dockerEvents.ActionPause: ActionContainerPause,
|
||||||
dockerEvents.ActionDie: ActionContainerDie,
|
dockerEvents.ActionDie: ActionContainerDie,
|
||||||
|
dockerEvents.ActionDestroy: ActionContainerDestroy,
|
||||||
}
|
}
|
||||||
|
|
||||||
var fileActionNameMap = map[Action]string{
|
var fileActionNameMap = map[Action]string{
|
||||||
|
|
|
@ -127,7 +127,7 @@ func (mon *monitor) String() string {
|
||||||
|
|
||||||
func (mon *monitor) MarshalJSON() ([]byte, error) {
|
func (mon *monitor) MarshalJSON() ([]byte, error) {
|
||||||
return (&JSONRepresentation{
|
return (&JSONRepresentation{
|
||||||
Name: mon.Name(),
|
Name: mon.service,
|
||||||
Config: mon.config,
|
Config: mon.config,
|
||||||
Status: mon.status.Load(),
|
Status: mon.status.Load(),
|
||||||
Started: mon.startTime,
|
Started: mon.startTime,
|
||||||
|
|
Loading…
Add table
Reference in a new issue