GoDoxy/internal/net/gphttp/accesslog/back_scanner.go
Yuzerion 57292f0fe8
feat: proxmox idlewatcher (#88)
* feat: idle sleep for proxmox LXCs

* refactor: replace deprecated docker api types

* chore(api): remove debug task list endpoint

* refactor: move servemux to gphttp/servemux; favicon.go to v1/favicon

* refactor: introduce Pool interface, move agent_pool to agent module

* refactor: simplify api code

* feat: introduce debug api

* refactor: remove net.URL and net.CIDR types, improved unmarshal handling

* chore: update Makefile for debug build tag, update README

* chore: add gperr.Unwrap method

* feat: relative time and duration formatting

* chore: add ROOT_DIR environment variable, refactor

* migration: move homepage override and icon cache to $BASE_DIR/data, add migration code

* fix: nil dereference on marshalling service health

* fix: wait for route deletion

* chore: enhance tasks debuggability

* feat: stdout access logger and MultiWriter

* fix(agent): remove agent properly on verify error

* fix(metrics): disk exclusion logic and added corresponding tests

* chore: update schema and prettify, fix package.json and Makefile

* fix: I/O buffer not being shrunk before putting back to pool

* feat: enhanced error handling module

* chore: deps upgrade

* feat: better value formatting and handling

---------

Co-authored-by: yusing <yusing@6uo.me>
2025-04-16 14:52:33 +08:00

104 lines
2.3 KiB
Go

package accesslog
import (
"bytes"
"io"
)
// BackScanner provides an interface to read a file backward line by line.
type BackScanner struct {
file supportRotate
chunkSize int
offset int64
buffer []byte
line []byte
err error
size int64
}
// NewBackScanner creates a new Scanner to read the file backward.
// chunkSize determines the size of each read chunk from the end of the file.
func NewBackScanner(file supportRotate, chunkSize int) *BackScanner {
size, err := file.Seek(0, io.SeekEnd)
if err != nil {
return &BackScanner{err: err}
}
return &BackScanner{
file: file,
chunkSize: chunkSize,
offset: size,
size: size,
}
}
// Scan advances the scanner to the previous line, which will then be available
// via the Bytes method. It returns false when there are no more lines.
func (s *BackScanner) Scan() bool {
if s.err != nil {
return false
}
// Read chunks until a newline is found or the file is fully read
for {
// Check if there's a line in the buffer
if idx := bytes.LastIndexByte(s.buffer, '\n'); idx >= 0 {
s.line = s.buffer[idx+1:]
s.buffer = s.buffer[:idx]
if len(s.line) > 0 {
return true
}
continue
}
for {
if s.offset <= 0 {
// No more data to read; check remaining buffer
if len(s.buffer) > 0 {
s.line = s.buffer
s.buffer = nil
return true
}
return false
}
newOffset := max(0, s.offset-int64(s.chunkSize))
chunkSize := s.offset - newOffset
chunk := make([]byte, chunkSize)
n, err := s.file.ReadAt(chunk, newOffset)
if err != nil && err != io.EOF {
s.err = err
return false
}
// Prepend the chunk to the buffer
s.buffer = append(chunk[:n], s.buffer...)
s.offset = newOffset
// Check for newline in the updated buffer
if idx := bytes.LastIndexByte(s.buffer, '\n'); idx >= 0 {
s.line = s.buffer[idx+1:]
s.buffer = s.buffer[:idx]
if len(s.line) > 0 {
return true
}
break
}
}
}
}
// Bytes returns the most recent line generated by a call to Scan.
func (s *BackScanner) Bytes() []byte {
return s.line
}
// FileSize returns the size of the file.
func (s *BackScanner) FileSize() int64 {
return s.size
}
// Err returns the first non-EOF error encountered by the scanner.
func (s *BackScanner) Err() error {
return s.err
}