|
18 | 18 | package log |
19 | 19 |
|
20 | 20 | import ( |
| 21 | + "compress/gzip" |
21 | 22 | "errors" |
22 | 23 | "fmt" |
23 | 24 | "io" |
| 25 | + "io/ioutil" |
24 | 26 | "os" |
25 | 27 | "path/filepath" |
26 | 28 | "runtime/debug" |
| 29 | + "sort" |
| 30 | + "strconv" |
27 | 31 | "strings" |
28 | 32 | "sync" |
29 | 33 | "time" |
@@ -205,6 +209,7 @@ func (l *Logger) start() error { |
205 | 209 | l.create = time.Now() |
206 | 210 | } |
207 | 211 | l.writer = file |
| 212 | + l.mill() |
208 | 213 | l.once.Do(l.startRotate) // start rotate, only once |
209 | 214 | } |
210 | 215 | } |
@@ -247,6 +252,7 @@ func (l *Logger) handler() { |
247 | 252 | } |
248 | 253 | } |
249 | 254 | case buf := <-l.writeBufferChan: |
| 255 | + |
250 | 256 | l.Write(buf.Bytes()) |
251 | 257 | PutLogBuffer(buf) |
252 | 258 | } |
@@ -276,6 +282,7 @@ func (l *Logger) reopen() error { |
276 | 282 | if err := closer.Close(); err != nil { |
277 | 283 | fmt.Fprintf(os.Stderr, "logger %s close error when restart, error: %v", l.output, err) |
278 | 284 | } |
| 285 | + l.mill() |
279 | 286 | return l.start() |
280 | 287 | } |
281 | 288 | return ErrReopenUnsupported |
@@ -471,3 +478,203 @@ func parseSyslogAddress(location string) *syslogAddress { |
471 | 478 |
|
472 | 479 | return nil |
473 | 480 | } |
| 481 | + |
| 482 | +const ( |
| 483 | + compressSuffix = ".gz" |
| 484 | +) |
| 485 | + |
| 486 | +// millRunOnce performs compression and removal of stale log files. |
| 487 | +// Log files are compressed if enabled via configuration and old log |
| 488 | +// files are removed, keeping at most l.MaxBackups files, as long as |
| 489 | +// none of them are older than MaxAge. |
| 490 | +func (l *Logger) millRunOnce() error { |
| 491 | + files, err := l.oldLogFiles() |
| 492 | + if err != nil { |
| 493 | + return err |
| 494 | + } |
| 495 | + |
| 496 | + compress, remove := l.screeningCompressFile(files) |
| 497 | + |
| 498 | + for _, f := range remove { |
| 499 | + _ = os.Remove(filepath.Join(l.dir(), f.FileName)) |
| 500 | + } |
| 501 | + |
| 502 | + for _, f := range compress { |
| 503 | + var fnCompress string |
| 504 | + fnCompress, err = l.findCompressFile(f.FileName) |
| 505 | + if err != nil { |
| 506 | + return err |
| 507 | + } |
| 508 | + errCompress := l.compressLogFile(f.FileName, fnCompress) |
| 509 | + if err != nil && errCompress != nil { |
| 510 | + err = errCompress |
| 511 | + } |
| 512 | + } |
| 513 | + |
| 514 | + return err |
| 515 | +} |
| 516 | + |
| 517 | +func (l *Logger) screeningCompressFile(files []LoggerInfo) (compress, remove []LoggerInfo) { |
| 518 | + resFiles, removeByMaxAge := l.screeningCompressFileByMaxAge(files) |
| 519 | + resFiles, remove = l.screeningCompressFileByMaxBackups(resFiles, removeByMaxAge) |
| 520 | + |
| 521 | + if l.roller.Compress { |
| 522 | + for i := range resFiles { |
| 523 | + if !strings.HasSuffix(resFiles[i].FileName, compressSuffix) { |
| 524 | + compress = append(compress, resFiles[i]) |
| 525 | + } |
| 526 | + } |
| 527 | + } |
| 528 | + return |
| 529 | +} |
| 530 | + |
| 531 | +func (l *Logger) screeningCompressFileByMaxAge(files []LoggerInfo) (resFiles, remove []LoggerInfo) { |
| 532 | + if l.roller.MaxAge > 0 { |
| 533 | + diff := time.Duration(int64(maxRotateHour*time.Hour) * int64(l.roller.MaxAge)) |
| 534 | + cutoff := time.Now().Add(-1 * diff) |
| 535 | + |
| 536 | + for i := range files { |
| 537 | + if files[i].CreateTime.Before(cutoff) { |
| 538 | + remove = append(remove, files[i]) |
| 539 | + } else { |
| 540 | + resFiles = append(resFiles, files[i]) |
| 541 | + } |
| 542 | + } |
| 543 | + } else { |
| 544 | + resFiles = files |
| 545 | + } |
| 546 | + return |
| 547 | +} |
| 548 | + |
| 549 | +func (l *Logger) screeningCompressFileByMaxBackups(files, remove []LoggerInfo) (resFiles, resRemove []LoggerInfo) { |
| 550 | + if l.roller.MaxBackups > 0 && l.roller.MaxBackups < len(files) { |
| 551 | + preserved := make(map[string]bool) |
| 552 | + |
| 553 | + for i := range files { |
| 554 | + // Only count the uncompressed log file or the |
| 555 | + // compressed log file, not both. |
| 556 | + fn := files[i].FileName |
| 557 | + |
| 558 | + preserved[strings.TrimSuffix(fn, compressSuffix)] = true |
| 559 | + |
| 560 | + if len(preserved) > l.roller.MaxBackups { |
| 561 | + remove = append(remove, files[i]) |
| 562 | + } else { |
| 563 | + resFiles = append(resFiles, files[i]) |
| 564 | + } |
| 565 | + } |
| 566 | + } else { |
| 567 | + resFiles = files |
| 568 | + } |
| 569 | + resRemove = remove |
| 570 | + return |
| 571 | +} |
| 572 | + |
| 573 | +func (l *Logger) findCompressFile(fileName string) (string, error) { |
| 574 | + num := 1 |
| 575 | + statName := fileName |
| 576 | + |
| 577 | + for i := 0; i < 10; i++ { |
| 578 | + if _, err := os.Stat(l.dir() + statName + compressSuffix); os.IsNotExist(err) { |
| 579 | + return statName + compressSuffix, nil |
| 580 | + } |
| 581 | + statName = fileName + "." + strconv.Itoa(num) |
| 582 | + num++ |
| 583 | + } |
| 584 | + return fileName, errors.New("findCompressFile failed") |
| 585 | +} |
| 586 | + |
| 587 | +func (l *Logger) mill() { |
| 588 | + if l.roller.MaxBackups != 0 || l.roller.MaxAge != 0 || l.roller.Compress { |
| 589 | + _ = l.millRunOnce() |
| 590 | + } |
| 591 | +} |
| 592 | + |
| 593 | +// oldLogFiles returns the list of backup log files stored in the same |
| 594 | +// directory as the current log file, sorted by ModTime |
| 595 | +func (l *Logger) oldLogFiles() ([]LoggerInfo, error) { |
| 596 | + files, err := ioutil.ReadDir(l.dir()) |
| 597 | + if err != nil { |
| 598 | + return nil, err |
| 599 | + } |
| 600 | + logFiles := []LoggerInfo{} |
| 601 | + |
| 602 | + for _, f := range files { |
| 603 | + if f.IsDir() { |
| 604 | + continue |
| 605 | + } |
| 606 | + if !strings.HasPrefix(f.Name(), filepath.Base(l.output)+".") { |
| 607 | + continue |
| 608 | + } |
| 609 | + //use modTime replace createTime |
| 610 | + logFiles = append(logFiles, LoggerInfo{*l.roller, f.Name(), f.ModTime()}) |
| 611 | + } |
| 612 | + sort.Sort(byFormatTime(logFiles)) |
| 613 | + |
| 614 | + return logFiles, nil |
| 615 | +} |
| 616 | + |
| 617 | +// dir returns the directory for the current filename. |
| 618 | +func (l *Logger) dir() string { |
| 619 | + return filepath.Dir(l.output) |
| 620 | +} |
| 621 | + |
| 622 | +// compressLogFile compresses the given log file, removing the |
| 623 | +// uncompressed log file if successful. |
| 624 | +func (l *Logger) compressLogFile(srcFile, dstFile string) error { |
| 625 | + f, err := os.Open(filepath.Join(l.dir(), filepath.Clean(srcFile))) |
| 626 | + if err != nil { |
| 627 | + return err |
| 628 | + } |
| 629 | + |
| 630 | + defer func() { |
| 631 | + _ = f.Close() |
| 632 | + }() |
| 633 | + |
| 634 | + gzf, err := os.OpenFile(filepath.Join(l.dir(), filepath.Clean(dstFile)), os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644) |
| 635 | + if err != nil { |
| 636 | + return err |
| 637 | + } |
| 638 | + |
| 639 | + defer func() { |
| 640 | + _ = gzf.Close() |
| 641 | + if err != nil { |
| 642 | + _ = os.Remove(filepath.Join(l.dir(), filepath.Clean(dstFile))) |
| 643 | + } |
| 644 | + }() |
| 645 | + |
| 646 | + gz := gzip.NewWriter(gzf) |
| 647 | + |
| 648 | + if _, err = io.Copy(gz, f); err != nil { |
| 649 | + return err |
| 650 | + } |
| 651 | + |
| 652 | + if err = gz.Close(); err != nil { |
| 653 | + return err |
| 654 | + } |
| 655 | + |
| 656 | + if err = gzf.Close(); err != nil { |
| 657 | + return err |
| 658 | + } |
| 659 | + |
| 660 | + if err = f.Close(); err != nil { |
| 661 | + return err |
| 662 | + } |
| 663 | + |
| 664 | + return os.Remove(filepath.Join(l.dir(), filepath.Clean(srcFile))) |
| 665 | +} |
| 666 | + |
| 667 | +// byFormatTime sorts by newest time formatted in the name. |
| 668 | +type byFormatTime []LoggerInfo |
| 669 | + |
| 670 | +func (b byFormatTime) Less(i, j int) bool { |
| 671 | + return b[i].CreateTime.After(b[j].CreateTime) |
| 672 | +} |
| 673 | + |
| 674 | +func (b byFormatTime) Swap(i, j int) { |
| 675 | + b[i], b[j] = b[j], b[i] |
| 676 | +} |
| 677 | + |
| 678 | +func (b byFormatTime) Len() int { |
| 679 | + return len(b) |
| 680 | +} |
0 commit comments