Browse Source

Make configurable the number of days to archive read items

Jebbs 9 months ago
parent
commit
87648490fd
4 changed files with 28 additions and 9 deletions
  1. 6 0
      config/config.go
  2. 13 0
      config/config_test.go
  3. 3 3
      service/scheduler/scheduler.go
  4. 6 6
      storage/entry.go

+ 6 - 0
config/config.go

@@ -21,6 +21,7 @@ const (
 	defaultBatchSize          = 10
 	defaultDatabaseMaxConns   = 20
 	defaultDatabaseMinConns   = 1
+	defaultArchiveReadDays    = 60
 	defaultListenAddr         = "127.0.0.1:8080"
 	defaultCertFile           = ""
 	defaultKeyFile            = ""
@@ -224,6 +225,11 @@ func (c *Config) HasSchedulerService() bool {
 	return !getBooleanValue("DISABLE_SCHEDULER_SERVICE")
 }
 
+// ArchiveReadDays returns the number of days after which marking read items as removed.
+func (c *Config) ArchiveReadDays() int {
+	return getIntValue("ARCHIVE_READ_DAYS", defaultArchiveReadDays)
+}
+
 // NewConfig returns a new Config.
 func NewConfig() *Config {
 	cfg := &Config{

+ 13 - 0
config/config_test.go

@@ -708,6 +708,19 @@ func TestDisableSchedulerService(t *testing.T) {
 	}
 }
 
+func TestArchiveReadDays(t *testing.T) {
+	os.Clearenv()
+	os.Setenv("ARCHIVE_READ_DAYS", "7")
+
+	cfg := NewConfig()
+	expected := 7
+	result := cfg.ArchiveReadDays()
+
+	if result != expected {
+		t.Fatalf(`Unexpected ARCHIVE_READ_DAYS value, got %v instead of %v`, result, expected)
+	}
+}
+
 func TestRunMigrationsWhenUnset(t *testing.T) {
 	os.Clearenv()
 

+ 3 - 3
service/scheduler/scheduler.go

@@ -17,7 +17,7 @@ import (
 func Serve(cfg *config.Config, store *storage.Storage, pool *worker.Pool) {
 	logger.Info(`Starting scheduler...`)
 	go feedScheduler(store, pool, cfg.PollingFrequency(), cfg.BatchSize())
-	go cleanupScheduler(store, cfg.CleanupFrequency())
+	go cleanupScheduler(store, cfg.CleanupFrequency(), cfg.ArchiveReadDays())
 }
 
 func feedScheduler(store *storage.Storage, pool *worker.Pool, frequency, batchSize int) {
@@ -33,14 +33,14 @@ func feedScheduler(store *storage.Storage, pool *worker.Pool, frequency, batchSi
 	}
 }
 
-func cleanupScheduler(store *storage.Storage, frequency int) {
+func cleanupScheduler(store *storage.Storage, frequency int, archiveDays int) {
 	c := time.Tick(time.Duration(frequency) * time.Hour)
 	for range c {
 		nbSessions := store.CleanOldSessions()
 		nbUserSessions := store.CleanOldUserSessions()
 		logger.Info("[Scheduler:Cleanup] Cleaned %d sessions and %d user sessions", nbSessions, nbUserSessions)
 
-		if err := store.ArchiveEntries(); err != nil {
+		if err := store.ArchiveEntries(archiveDays); err != nil {
 			logger.Error("[Scheduler:Cleanup] %v", err)
 		}
 	}

+ 6 - 6
storage/entry.go

@@ -186,12 +186,12 @@ func (s *Storage) UpdateEntries(userID, feedID int64, entries model.Entries, upd
 	return nil
 }
 
-// ArchiveEntries changes the status of read items to "removed" after 60 days.
-func (s *Storage) ArchiveEntries() error {
-	query := `
-		UPDATE entries SET status='removed'
-		WHERE id=ANY(SELECT id FROM entries WHERE status='read' AND starred is false AND published_at < now () - '60 days'::interval LIMIT 5000)
-	`
+// ArchiveEntries changes the status of read items to "removed" after specified days.
+func (s *Storage) ArchiveEntries(days int) error {
+	query := fmt.Sprintf(`
+			UPDATE entries SET status='removed'
+			WHERE id=ANY(SELECT id FROM entries WHERE status='read' AND starred is false AND published_at < now () - '%d days'::interval LIMIT 5000)
+		`, days)
 	if _, err := s.db.Exec(query); err != nil {
 		return fmt.Errorf("unable to archive read entries: %v", err)
 	}