fix: high cpu usage

This commit is contained in:
yusing 2025-01-22 05:44:04 +08:00
parent 3781bb93e1
commit b984386bab
12 changed files with 82 additions and 44 deletions

View file

@ -39,6 +39,7 @@ func init() {
}
func main() {
initProfiling()
args := common.GetArgs()
switch args.Command {

5
cmd/main_production.go Normal file
View file

@ -0,0 +1,5 @@
//go:build production
package main
func initProfiling() {}

17
cmd/main_prof.go Normal file
View file

@ -0,0 +1,17 @@
//go:build pprof
package main
import (
"log"
"net/http"
_ "net/http/pprof"
"runtime"
)
func initProfiling() {
runtime.GOMAXPROCS(2)
go func() {
log.Println(http.ListenAndServe(":7777", nil))
}()
}

View file

@ -23,8 +23,9 @@ type logEntryRange struct {
type memLogger struct {
bytes.Buffer
sync.Mutex
connChans F.Map[chan *logEntryRange, struct{}]
sync.RWMutex
notifyLock sync.RWMutex
connChans F.Map[chan *logEntryRange, struct{}]
}
const (
@ -72,25 +73,28 @@ func MemLogger() io.Writer {
}
func (m *memLogger) Write(p []byte) (n int, err error) {
m.Lock()
m.RLock()
if m.Len() > maxMemLogSize {
m.Truncate(truncateSize)
}
m.RUnlock()
pos := m.Buffer.Len()
n = len(p)
m.Lock()
pos := m.Len()
_, err = m.Buffer.Write(p)
m.Unlock()
if err != nil {
m.Unlock()
return
}
if m.connChans.Size() > 0 {
m.Unlock()
timeout := time.NewTimer(1 * time.Second)
defer timeout.Stop()
m.notifyLock.RLock()
defer m.notifyLock.RUnlock()
m.connChans.Range(func(ch chan *logEntryRange, _ struct{}) bool {
select {
case ch <- &logEntryRange{pos, pos + n}:
@ -102,8 +106,6 @@ func (m *memLogger) Write(p []byte) (n int, err error) {
})
return
}
m.Unlock()
return
}
@ -120,8 +122,11 @@ func (m *memLogger) ServeHTTP(config config.ConfigInstance, w http.ResponseWrite
/* trunk-ignore(golangci-lint/errcheck) */
defer func() {
_ = conn.CloseNow()
m.notifyLock.Lock()
m.connChans.Delete(logCh)
close(logCh)
m.notifyLock.Unlock()
}()
if err := m.wsInitial(r.Context(), conn); err != nil {
@ -149,10 +154,10 @@ func (m *memLogger) wsStreamLog(ctx context.Context, conn *websocket.Conn, ch <-
case <-ctx.Done():
return
case logRange := <-ch:
m.Lock()
m.RLock()
msg := m.Buffer.Bytes()[logRange.Start:logRange.End]
err := m.writeBytes(ctx, conn, msg)
m.Unlock()
m.RUnlock()
if err != nil {
return
}

View file

@ -12,7 +12,6 @@ import (
"github.com/yusing/go-proxy/internal/logging"
"github.com/yusing/go-proxy/internal/task"
U "github.com/yusing/go-proxy/internal/utils"
F "github.com/yusing/go-proxy/internal/utils/functional"
)
type (
@ -27,7 +26,7 @@ type (
)
var (
clientMap F.Map[string, *SharedClient] = F.NewMapOf[string, *SharedClient]()
clientMap = make(map[string]*SharedClient, 5)
clientMapMu sync.Mutex
clientOptEnvHost = []client.Opt{
@ -38,11 +37,14 @@ var (
func init() {
task.OnProgramExit("docker_clients_cleanup", func() {
clientMap.RangeAllParallel(func(_ string, c *SharedClient) {
clientMapMu.Lock()
defer clientMapMu.Unlock()
for _, c := range clientMap {
if c.Connected() {
c.Client.Close()
}
})
}
})
}
@ -71,8 +73,7 @@ func ConnectClient(host string) (*SharedClient, error) {
clientMapMu.Lock()
defer clientMapMu.Unlock()
// check if client exists
if client, ok := clientMap.Load(host); ok {
if client, ok := clientMap[host]; ok {
client.refCount.Add()
return client, nil
}
@ -123,11 +124,13 @@ func ConnectClient(host string) (*SharedClient, error) {
}
c.l.Trace().Msg("client connected")
clientMap.Store(host, c)
clientMap[host] = c
go func() {
<-c.refCount.Zero()
clientMap.Delete(c.key)
clientMapMu.Lock()
delete(clientMap, c.key)
clientMapMu.Unlock()
if c.Connected() {
c.Client.Close()

View file

@ -42,7 +42,7 @@ const updateInterval = 2 * time.Hour
var (
iconsCache *Cache
iconsCahceMu sync.Mutex
iconsCahceMu sync.RWMutex
lastUpdate time.Time
)
@ -71,14 +71,17 @@ func InitIconListCache() {
}
func ListAvailableIcons() (*Cache, error) {
iconsCahceMu.Lock()
defer iconsCahceMu.Unlock()
iconsCahceMu.RLock()
if time.Since(lastUpdate) < updateInterval {
if !iconsCache.needUpdate() {
iconsCahceMu.RUnlock()
return iconsCache, nil
}
}
iconsCahceMu.RUnlock()
iconsCahceMu.Lock()
defer iconsCahceMu.Unlock()
icons, err := fetchIconData()
if err != nil {

View file

@ -19,8 +19,8 @@ type (
io AccessLogIO
buf bytes.Buffer // buffer for non-flushed log
bufMu sync.Mutex // protect buf
bufPool sync.Pool // buffer pool for formatting a single log line
bufMu sync.RWMutex
bufPool sync.Pool // buffer pool for formatting a single log line
flushThreshold int
@ -123,10 +123,10 @@ func (l *AccessLogger) Flush(force bool) {
return
}
if force || l.buf.Len() >= l.flushThreshold {
l.bufMu.Lock()
l.bufMu.RLock()
l.write(l.buf.Bytes())
l.buf.Reset()
l.bufMu.Unlock()
l.bufMu.RUnlock()
}
}

View file

@ -19,19 +19,12 @@ import (
const errPagesBasePath = common.ErrorPagesBasePath
var (
setupMu sync.Mutex
setupOnce sync.Once
dirWatcher W.Watcher
fileContentMap = F.NewMapOf[string, []byte]()
)
func setup() {
setupMu.Lock()
defer setupMu.Unlock()
if dirWatcher != nil {
return
}
t := task.RootTask("error_page", false)
dirWatcher = W.NewDirectoryWatcher(t, errPagesBasePath)
loadContent()
@ -39,7 +32,7 @@ func setup() {
}
func GetStaticFile(filename string) ([]byte, bool) {
setup()
setupOnce.Do(setup)
return fileContentMap.Load(filename)
}

View file

@ -49,8 +49,6 @@ func (rl *rateLimiter) newLimiter() *rate.Limiter {
}
func (rl *rateLimiter) limit(w http.ResponseWriter, r *http.Request) bool {
rl.mu.Lock()
host, _, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
rl.AddTracef("unable to parse remote address %s", r.RemoteAddr)
@ -58,12 +56,12 @@ func (rl *rateLimiter) limit(w http.ResponseWriter, r *http.Request) bool {
return false
}
rl.mu.Lock()
limiter, ok := rl.requestMap[host]
if !ok {
limiter = rl.newLimiter()
rl.requestMap[host] = limiter
}
rl.mu.Unlock()
if limiter.Allow() {

View file

@ -172,7 +172,7 @@ func (e *RawEntry) Finalize() {
}
}
if e.Homepage == nil {
if e.Homepage.IsEmpty() {
e.Homepage = homepage.NewItem(e.Alias)
}

View file

@ -4,6 +4,7 @@ import (
"context"
"runtime/debug"
"sync"
"sync/atomic"
"time"
"github.com/yusing/go-proxy/internal/common"
@ -44,8 +45,11 @@ type (
callbacks map[*Callback]struct{}
callbacksDone chan struct{}
finished chan struct{}
finishedCalled bool
finished chan struct{}
// finishedCalled == 1 Finish has been called
// but does not mean that the task is finished yet
// this is used to avoid calling Finish twice
finishedCalled uint32
mu sync.Mutex
@ -93,13 +97,19 @@ func (t *Task) OnCancel(about string, fn func()) {
// Finish cancel all subtasks and wait for them to finish,
// then marks the task as finished, with the given reason (if any).
func (t *Task) Finish(reason any) {
if atomic.LoadUint32(&t.finishedCalled) == 1 {
return
}
t.mu.Lock()
if t.finishedCalled {
if t.finishedCalled == 1 {
t.mu.Unlock()
return
}
t.finishedCalled = true
t.finishedCalled = 1
t.mu.Unlock()
t.finish(reason)
}

View file

@ -116,6 +116,9 @@ GoDoxy v0.8.2 expected changes
```
- **new** Brand new rewritten WebUI
- View logs directly from WebUI
- Edit dashboard item config (overrides docker labels and include file)
- Health bubbles, latency, etc. rich info on dashboard items
- **new** Support selfh.st icons: `@selfhst/<reference>.<format>` _(e.g. `@selfhst/adguard-home.webp`)_
- also uses the display name on https://selfh.st/icons/ as default for our dashboard!
- **new** GoDoxy server side favicon retreiving and caching