Bläddra i källkod

refactor(cli): use time.Duration for cleanup tasks

gudvinr 7 månader sedan
förälder
incheckning
983291c78b

+ 4 - 4
internal/cli/cleanup_tasks.go

@@ -14,15 +14,15 @@ import (
 )
 
 func runCleanupTasks(store *storage.Storage) {
-	nbSessions := store.CleanOldSessions(config.Opts.CleanupRemoveSessionsDays())
-	nbUserSessions := store.CleanOldUserSessions(config.Opts.CleanupRemoveSessionsDays())
+	nbSessions := store.CleanOldSessions(config.Opts.CleanupRemoveSessionsInterval())
+	nbUserSessions := store.CleanOldUserSessions(config.Opts.CleanupRemoveSessionsInterval())
 	slog.Info("Sessions cleanup completed",
 		slog.Int64("application_sessions_removed", nbSessions),
 		slog.Int64("user_sessions_removed", nbUserSessions),
 	)
 
 	startTime := time.Now()
-	if rowsAffected, err := store.ArchiveEntries(model.EntryStatusRead, config.Opts.CleanupArchiveReadDays(), config.Opts.CleanupArchiveBatchSize()); err != nil {
+	if rowsAffected, err := store.ArchiveEntries(model.EntryStatusRead, config.Opts.CleanupArchiveReadInterval(), config.Opts.CleanupArchiveBatchSize()); err != nil {
 		slog.Error("Unable to archive read entries", slog.Any("error", err))
 	} else {
 		slog.Info("Archiving read entries completed",
@@ -35,7 +35,7 @@ func runCleanupTasks(store *storage.Storage) {
 	}
 
 	startTime = time.Now()
-	if rowsAffected, err := store.ArchiveEntries(model.EntryStatusUnread, config.Opts.CleanupArchiveUnreadDays(), config.Opts.CleanupArchiveBatchSize()); err != nil {
+	if rowsAffected, err := store.ArchiveEntries(model.EntryStatusUnread, config.Opts.CleanupArchiveUnreadInterval(), config.Opts.CleanupArchiveBatchSize()); err != nil {
 		slog.Error("Unable to archive unread entries", slog.Any("error", err))
 	} else {
 		slog.Info("Archiving unread entries completed",

+ 48 - 8
internal/config/config_test.go

@@ -645,12 +645,22 @@ func TestDefaultCleanupArchiveReadDaysValue(t *testing.T) {
 		t.Fatalf(`Parsing failure: %v`, err)
 	}
 
-	expected := 60
-	result := opts.CleanupArchiveReadDays()
+	expected := 60 * 24 * time.Hour
+	result := opts.CleanupArchiveReadInterval()
 
 	if result != expected {
 		t.Fatalf(`Unexpected CLEANUP_ARCHIVE_READ_DAYS value, got %v instead of %v`, result, expected)
 	}
+
+	sorted := opts.SortedOptions(false)
+	i := slices.IndexFunc(sorted, func(opt *option) bool {
+		return opt.Key == "CLEANUP_ARCHIVE_READ_DAYS"
+	})
+
+	expectedSerialized := 60
+	if got := sorted[i].Value; got != expectedSerialized {
+		t.Fatalf(`Unexpected value in option output, got %q instead of %q`, got, expectedSerialized)
+	}
 }
 
 func TestCleanupArchiveReadDays(t *testing.T) {
@@ -664,12 +674,22 @@ func TestCleanupArchiveReadDays(t *testing.T) {
 		t.Fatalf(`Parsing failure: %v`, err)
 	}
 
-	expected := 7
-	result := opts.CleanupArchiveReadDays()
+	expected := 7 * 24 * time.Hour
+	result := opts.CleanupArchiveReadInterval()
 
 	if result != expected {
 		t.Fatalf(`Unexpected CLEANUP_ARCHIVE_READ_DAYS value, got %v instead of %v`, result, expected)
 	}
+
+	sorted := opts.SortedOptions(false)
+	i := slices.IndexFunc(sorted, func(opt *option) bool {
+		return opt.Key == "CLEANUP_ARCHIVE_READ_DAYS"
+	})
+
+	expectedSerialized := 7
+	if got := sorted[i].Value; got != expectedSerialized {
+		t.Fatalf(`Unexpected value in option output, got %q instead of %q`, got, expectedSerialized)
+	}
 }
 
 func TestDefaultCleanupRemoveSessionsDaysValue(t *testing.T) {
@@ -681,12 +701,22 @@ func TestDefaultCleanupRemoveSessionsDaysValue(t *testing.T) {
 		t.Fatalf(`Parsing failure: %v`, err)
 	}
 
-	expected := 30
-	result := opts.CleanupRemoveSessionsDays()
+	expected := 30 * 24 * time.Hour
+	result := opts.CleanupRemoveSessionsInterval()
 
 	if result != expected {
 		t.Fatalf(`Unexpected CLEANUP_REMOVE_SESSIONS_DAYS value, got %v instead of %v`, result, expected)
 	}
+
+	sorted := opts.SortedOptions(false)
+	i := slices.IndexFunc(sorted, func(opt *option) bool {
+		return opt.Key == "CLEANUP_REMOVE_SESSIONS_DAYS"
+	})
+
+	expectedSerialized := 30
+	if got := sorted[i].Value; got != expectedSerialized {
+		t.Fatalf(`Unexpected value in option output, got %q instead of %q`, got, expectedSerialized)
+	}
 }
 
 func TestCleanupRemoveSessionsDays(t *testing.T) {
@@ -699,12 +729,22 @@ func TestCleanupRemoveSessionsDays(t *testing.T) {
 		t.Fatalf(`Parsing failure: %v`, err)
 	}
 
-	expected := 7
-	result := opts.CleanupRemoveSessionsDays()
+	expected := 7 * 24 * time.Hour
+	result := opts.CleanupRemoveSessionsInterval()
 
 	if result != expected {
 		t.Fatalf(`Unexpected CLEANUP_REMOVE_SESSIONS_DAYS value, got %v instead of %v`, result, expected)
 	}
+
+	sorted := opts.SortedOptions(false)
+	i := slices.IndexFunc(sorted, func(opt *option) bool {
+		return opt.Key == "CLEANUP_REMOVE_SESSIONS_DAYS"
+	})
+
+	expectedSerialized := 7
+	if got := sorted[i].Value; got != expectedSerialized {
+		t.Fatalf(`Unexpected value in option output, got %q instead of %q`, got, expectedSerialized)
+	}
 }
 
 func TestDefaultWorkerPoolSizeValue(t *testing.T) {

+ 21 - 21
internal/config/options.go

@@ -48,10 +48,10 @@ const (
 	defaultKeyFile                            = ""
 	defaultCertDomain                         = ""
 	defaultCleanupFrequency                   = 24 * time.Hour
-	defaultCleanupArchiveReadDays             = 60
-	defaultCleanupArchiveUnreadDays           = 180
+	defaultCleanupArchiveReadInterval         = 60 * 24 * time.Hour
+	defaultCleanupArchiveUnreadInterval       = 180 * 24 * time.Hour
 	defaultCleanupArchiveBatchSize            = 10000
-	defaultCleanupRemoveSessionsDays          = 30
+	defaultCleanupRemoveSessionsInterval      = 30 * 24 * time.Hour
 	defaultMediaProxyHTTPClientTimeout        = 120 * time.Second
 	defaultMediaProxyMode                     = "http-only"
 	defaultMediaResourceTypes                 = "image"
@@ -126,10 +126,10 @@ type options struct {
 	certDomain                         string
 	certKeyFile                        string
 	cleanupFrequencyInterval           time.Duration
-	cleanupArchiveReadDays             int
-	cleanupArchiveUnreadDays           int
+	cleanupArchiveReadInterval         time.Duration
+	cleanupArchiveUnreadInterval       time.Duration
 	cleanupArchiveBatchSize            int
-	cleanupRemoveSessionsDays          int
+	cleanupRemoveSessionsInterval      time.Duration
 	forceRefreshInterval               time.Duration
 	batchSize                          int
 	schedulerEntryFrequencyMinInterval time.Duration
@@ -210,10 +210,10 @@ func NewOptions() *options {
 		certDomain:                         defaultCertDomain,
 		certKeyFile:                        defaultKeyFile,
 		cleanupFrequencyInterval:           defaultCleanupFrequency,
-		cleanupArchiveReadDays:             defaultCleanupArchiveReadDays,
-		cleanupArchiveUnreadDays:           defaultCleanupArchiveUnreadDays,
+		cleanupArchiveReadInterval:         defaultCleanupArchiveReadInterval,
+		cleanupArchiveUnreadInterval:       defaultCleanupArchiveUnreadInterval,
 		cleanupArchiveBatchSize:            defaultCleanupArchiveBatchSize,
-		cleanupRemoveSessionsDays:          defaultCleanupRemoveSessionsDays,
+		cleanupRemoveSessionsInterval:      defaultCleanupRemoveSessionsInterval,
 		pollingFrequency:                   defaultPollingFrequency,
 		forceRefreshInterval:               defaultForceRefreshInterval,
 		batchSize:                          defaultBatchSize,
@@ -366,14 +366,14 @@ func (o *options) CleanupFrequency() time.Duration {
 	return o.cleanupFrequencyInterval
 }
 
-// CleanupArchiveReadDays returns the number of days after which marking read items as removed.
-func (o *options) CleanupArchiveReadDays() int {
-	return o.cleanupArchiveReadDays
+// CleanupArchiveReadDays returns the interval after which marking read items as removed.
+func (o *options) CleanupArchiveReadInterval() time.Duration {
+	return o.cleanupArchiveReadInterval
 }
 
-// CleanupArchiveUnreadDays returns the number of days after which marking unread items as removed.
-func (o *options) CleanupArchiveUnreadDays() int {
-	return o.cleanupArchiveUnreadDays
+// CleanupArchiveUnreadDays returns the interval after which marking unread items as removed.
+func (o *options) CleanupArchiveUnreadInterval() time.Duration {
+	return o.cleanupArchiveUnreadInterval
 }
 
 // CleanupArchiveBatchSize returns the number of entries to archive for each interval.
@@ -381,9 +381,9 @@ func (o *options) CleanupArchiveBatchSize() int {
 	return o.cleanupArchiveBatchSize
 }
 
-// CleanupRemoveSessionsDays returns the number of days after which to remove sessions.
-func (o *options) CleanupRemoveSessionsDays() int {
-	return o.cleanupRemoveSessionsDays
+// CleanupRemoveSessionsDays returns the interval after which to remove sessions.
+func (o *options) CleanupRemoveSessionsInterval() time.Duration {
+	return o.cleanupRemoveSessionsInterval
 }
 
 // WorkerPoolSize returns the number of background worker.
@@ -723,9 +723,9 @@ func (o *options) SortedOptions(redactSecret bool) []*option {
 		"CERT_FILE":                              o.certFile,
 		"CLEANUP_FREQUENCY_HOURS":                int(o.cleanupFrequencyInterval.Hours()),
 		"CLEANUP_ARCHIVE_BATCH_SIZE":             o.cleanupArchiveBatchSize,
-		"CLEANUP_ARCHIVE_READ_DAYS":              o.cleanupArchiveReadDays,
-		"CLEANUP_ARCHIVE_UNREAD_DAYS":            o.cleanupArchiveUnreadDays,
-		"CLEANUP_REMOVE_SESSIONS_DAYS":           o.cleanupRemoveSessionsDays,
+		"CLEANUP_ARCHIVE_READ_DAYS":              int(o.cleanupArchiveReadInterval.Hours() / 24),
+		"CLEANUP_ARCHIVE_UNREAD_DAYS":            int(o.cleanupArchiveUnreadInterval.Hours() / 24),
+		"CLEANUP_REMOVE_SESSIONS_DAYS":           int(o.cleanupRemoveSessionsInterval.Hours() / 24),
 		"CREATE_ADMIN":                           o.createAdmin,
 		"DATABASE_CONNECTION_LIFETIME":           o.databaseConnectionLifetime,
 		"DATABASE_MAX_CONNS":                     o.databaseMaxConns,

+ 3 - 3
internal/config/parser.go

@@ -129,13 +129,13 @@ func (p *parser) parseLines(lines []string) (err error) {
 		case "CLEANUP_FREQUENCY_HOURS":
 			p.opts.cleanupFrequencyInterval = parseInterval(value, time.Hour, defaultCleanupFrequency)
 		case "CLEANUP_ARCHIVE_READ_DAYS":
-			p.opts.cleanupArchiveReadDays = parseInt(value, defaultCleanupArchiveReadDays)
+			p.opts.cleanupArchiveReadInterval = parseInterval(value, 24*time.Hour, defaultCleanupArchiveReadInterval)
 		case "CLEANUP_ARCHIVE_UNREAD_DAYS":
-			p.opts.cleanupArchiveUnreadDays = parseInt(value, defaultCleanupArchiveUnreadDays)
+			p.opts.cleanupArchiveUnreadInterval = parseInterval(value, 24*time.Hour, defaultCleanupArchiveUnreadInterval)
 		case "CLEANUP_ARCHIVE_BATCH_SIZE":
 			p.opts.cleanupArchiveBatchSize = parseInt(value, defaultCleanupArchiveBatchSize)
 		case "CLEANUP_REMOVE_SESSIONS_DAYS":
-			p.opts.cleanupRemoveSessionsDays = parseInt(value, defaultCleanupRemoveSessionsDays)
+			p.opts.cleanupRemoveSessionsInterval = parseInterval(value, 24*time.Hour, defaultCleanupRemoveSessionsInterval)
 		case "WORKER_POOL_SIZE":
 			p.opts.workerPoolSize = parseInt(value, defaultWorkerPoolSize)
 		case "FORCE_REFRESH_INTERVAL":

+ 1 - 1
internal/http/cookie/cookie.go

@@ -24,7 +24,7 @@ func New(name, value string, isHTTPS bool, path string) *http.Cookie {
 		Path:     basePath(path),
 		Secure:   isHTTPS,
 		HttpOnly: true,
-		Expires:  time.Now().Add(time.Duration(config.Opts.CleanupRemoveSessionsDays()) * 24 * time.Hour),
+		Expires:  time.Now().Add(config.Opts.CleanupRemoveSessionsInterval()),
 		SameSite: http.SameSiteLaxMode,
 	}
 }

+ 5 - 3
internal/storage/entry.go

@@ -392,9 +392,9 @@ func (s *Storage) RefreshFeedEntries(userID, feedID int64, entries model.Entries
 	return newEntries, nil
 }
 
-// ArchiveEntries changes the status of entries to "removed" after the given number of days.
-func (s *Storage) ArchiveEntries(status string, days, limit int) (int64, error) {
-	if days < 0 || limit <= 0 {
+// ArchiveEntries changes the status of entries to "removed" after the interval (24h minimum).
+func (s *Storage) ArchiveEntries(status string, interval time.Duration, limit int) (int64, error) {
+	if interval < 0 || limit <= 0 {
 		return 0, nil
 	}
 
@@ -419,6 +419,8 @@ func (s *Storage) ArchiveEntries(status string, days, limit int) (int64, error)
 				)
 	`
 
+	days := max(int(interval/(24*time.Hour)), 1)
+
 	result, err := s.db.Exec(query, model.EntryStatusRemoved, status, fmt.Sprintf("%d days", days), limit)
 	if err != nil {
 		return 0, fmt.Errorf(`store: unable to archive %s entries: %v`, status, err)

+ 6 - 2
internal/storage/session.go

@@ -7,6 +7,7 @@ import (
 	"crypto/rand"
 	"database/sql"
 	"fmt"
+	"time"
 
 	"miniflux.app/v2/internal/model"
 )
@@ -122,14 +123,17 @@ func (s *Storage) FlushAllSessions() (err error) {
 	return nil
 }
 
-// CleanOldSessions removes sessions older than specified days.
-func (s *Storage) CleanOldSessions(days int) int64 {
+// CleanOldSessions removes sessions older than specified interval (24h minimum).
+func (s *Storage) CleanOldSessions(interval time.Duration) int64 {
 	query := `
 		DELETE FROM
 			sessions
 		WHERE
 			created_at < now() - $1::interval
 	`
+
+	days := max(int(interval/(24*time.Hour)), 1)
+
 	result, err := s.db.Exec(query, fmt.Sprintf("%d days", days))
 	if err != nil {
 		return 0

+ 6 - 3
internal/storage/user_session.go

@@ -7,6 +7,7 @@ import (
 	"crypto/rand"
 	"database/sql"
 	"fmt"
+	"time"
 
 	"miniflux.app/v2/internal/model"
 )
@@ -43,7 +44,6 @@ func (s *Storage) UserSessions(userID int64) (model.UserSessions, error) {
 			&session.UserAgent,
 			&session.IP,
 		)
-
 		if err != nil {
 			return nil, fmt.Errorf(`store: unable to fetch user session row: %v`, err)
 		}
@@ -164,14 +164,17 @@ func (s *Storage) RemoveUserSessionByID(userID, sessionID int64) error {
 	return nil
 }
 
-// CleanOldUserSessions removes user sessions older than specified days.
-func (s *Storage) CleanOldUserSessions(days int) int64 {
+// CleanOldUserSessions removes user sessions older than specified interval (24h minimum).
+func (s *Storage) CleanOldUserSessions(interval time.Duration) int64 {
 	query := `
 		DELETE FROM
 			user_sessions
 		WHERE
 			created_at < now() - $1::interval
 	`
+
+	days := max(int(interval/(24*time.Hour)), 1)
+
 	result, err := s.db.Exec(query, fmt.Sprintf("%d days", days))
 	if err != nil {
 		return 0