Add option to archive unread entries
This commit is contained in:
parent
df7a6e18fd
commit
13c89c29c5
6 changed files with 44 additions and 66 deletions
|
@ -480,24 +480,6 @@ func TestCleanupFrequencyHours(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDeprecatedCleanupFrequencyHoursVar(t *testing.T) {
|
|
||||||
os.Clearenv()
|
|
||||||
os.Setenv("CLEANUP_FREQUENCY", "42")
|
|
||||||
|
|
||||||
parser := NewParser()
|
|
||||||
opts, err := parser.ParseEnvironmentVariables()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf(`Parsing failure: %v`, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
expected := 42
|
|
||||||
result := opts.CleanupFrequencyHours()
|
|
||||||
|
|
||||||
if result != expected {
|
|
||||||
t.Fatalf(`Unexpected CLEANUP_FREQUENCY value, got %v instead of %v`, result, expected)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDefaultCleanupArchiveReadDaysValue(t *testing.T) {
|
func TestDefaultCleanupArchiveReadDaysValue(t *testing.T) {
|
||||||
os.Clearenv()
|
os.Clearenv()
|
||||||
|
|
||||||
|
@ -534,24 +516,6 @@ func TestCleanupArchiveReadDays(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDeprecatedCleanupArchiveReadDaysVar(t *testing.T) {
|
|
||||||
os.Clearenv()
|
|
||||||
os.Setenv("ARCHIVE_READ_DAYS", "7")
|
|
||||||
|
|
||||||
parser := NewParser()
|
|
||||||
opts, err := parser.ParseEnvironmentVariables()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf(`Parsing failure: %v`, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
expected := 7
|
|
||||||
result := opts.CleanupArchiveReadDays()
|
|
||||||
|
|
||||||
if result != expected {
|
|
||||||
t.Fatalf(`Unexpected ARCHIVE_READ_DAYS value, got %v instead of %v`, result, expected)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDefaultCleanupRemoveSessionsDaysValue(t *testing.T) {
|
func TestDefaultCleanupRemoveSessionsDaysValue(t *testing.T) {
|
||||||
os.Clearenv()
|
os.Clearenv()
|
||||||
|
|
||||||
|
|
|
@ -36,6 +36,7 @@ const (
|
||||||
defaultCertCache = "/tmp/cert_cache"
|
defaultCertCache = "/tmp/cert_cache"
|
||||||
defaultCleanupFrequencyHours = 24
|
defaultCleanupFrequencyHours = 24
|
||||||
defaultCleanupArchiveReadDays = 60
|
defaultCleanupArchiveReadDays = 60
|
||||||
|
defaultCleanupArchiveUnreadDays = 180
|
||||||
defaultCleanupRemoveSessionsDays = 30
|
defaultCleanupRemoveSessionsDays = 30
|
||||||
defaultProxyImages = "http-only"
|
defaultProxyImages = "http-only"
|
||||||
defaultCreateAdmin = false
|
defaultCreateAdmin = false
|
||||||
|
@ -79,6 +80,7 @@ type Options struct {
|
||||||
certKeyFile string
|
certKeyFile string
|
||||||
cleanupFrequencyHours int
|
cleanupFrequencyHours int
|
||||||
cleanupArchiveReadDays int
|
cleanupArchiveReadDays int
|
||||||
|
cleanupArchiveUnreadDays int
|
||||||
cleanupRemoveSessionsDays int
|
cleanupRemoveSessionsDays int
|
||||||
pollingFrequency int
|
pollingFrequency int
|
||||||
batchSize int
|
batchSize int
|
||||||
|
@ -129,6 +131,7 @@ func NewOptions() *Options {
|
||||||
certKeyFile: defaultKeyFile,
|
certKeyFile: defaultKeyFile,
|
||||||
cleanupFrequencyHours: defaultCleanupFrequencyHours,
|
cleanupFrequencyHours: defaultCleanupFrequencyHours,
|
||||||
cleanupArchiveReadDays: defaultCleanupArchiveReadDays,
|
cleanupArchiveReadDays: defaultCleanupArchiveReadDays,
|
||||||
|
cleanupArchiveUnreadDays: defaultCleanupArchiveUnreadDays,
|
||||||
cleanupRemoveSessionsDays: defaultCleanupRemoveSessionsDays,
|
cleanupRemoveSessionsDays: defaultCleanupRemoveSessionsDays,
|
||||||
pollingFrequency: defaultPollingFrequency,
|
pollingFrequency: defaultPollingFrequency,
|
||||||
batchSize: defaultBatchSize,
|
batchSize: defaultBatchSize,
|
||||||
|
@ -245,6 +248,11 @@ func (o *Options) CleanupArchiveReadDays() int {
|
||||||
return o.cleanupArchiveReadDays
|
return o.cleanupArchiveReadDays
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CleanupArchiveUnreadDays returns the number of days after which marking unread items as removed.
|
||||||
|
func (o *Options) CleanupArchiveUnreadDays() int {
|
||||||
|
return o.cleanupArchiveUnreadDays
|
||||||
|
}
|
||||||
|
|
||||||
// CleanupRemoveSessionsDays returns the number of days after which to remove sessions.
|
// CleanupRemoveSessionsDays returns the number of days after which to remove sessions.
|
||||||
func (o *Options) CleanupRemoveSessionsDays() int {
|
func (o *Options) CleanupRemoveSessionsDays() int {
|
||||||
return o.cleanupRemoveSessionsDays
|
return o.cleanupRemoveSessionsDays
|
||||||
|
@ -412,6 +420,7 @@ func (o *Options) String() string {
|
||||||
builder.WriteString(fmt.Sprintf("CERT_CACHE: %v\n", o.certCache))
|
builder.WriteString(fmt.Sprintf("CERT_CACHE: %v\n", o.certCache))
|
||||||
builder.WriteString(fmt.Sprintf("CLEANUP_FREQUENCY_HOURS: %v\n", o.cleanupFrequencyHours))
|
builder.WriteString(fmt.Sprintf("CLEANUP_FREQUENCY_HOURS: %v\n", o.cleanupFrequencyHours))
|
||||||
builder.WriteString(fmt.Sprintf("CLEANUP_ARCHIVE_READ_DAYS: %v\n", o.cleanupArchiveReadDays))
|
builder.WriteString(fmt.Sprintf("CLEANUP_ARCHIVE_READ_DAYS: %v\n", o.cleanupArchiveReadDays))
|
||||||
|
builder.WriteString(fmt.Sprintf("CLEANUP_ARCHIVE_UNREAD_DAYS: %v\n", o.cleanupArchiveUnreadDays))
|
||||||
builder.WriteString(fmt.Sprintf("CLEANUP_REMOVE_SESSIONS_DAYS: %v\n", o.cleanupRemoveSessionsDays))
|
builder.WriteString(fmt.Sprintf("CLEANUP_REMOVE_SESSIONS_DAYS: %v\n", o.cleanupRemoveSessionsDays))
|
||||||
builder.WriteString(fmt.Sprintf("WORKER_POOL_SIZE: %v\n", o.workerPoolSize))
|
builder.WriteString(fmt.Sprintf("WORKER_POOL_SIZE: %v\n", o.workerPoolSize))
|
||||||
builder.WriteString(fmt.Sprintf("POLLING_FREQUENCY: %v\n", o.pollingFrequency))
|
builder.WriteString(fmt.Sprintf("POLLING_FREQUENCY: %v\n", o.pollingFrequency))
|
||||||
|
|
|
@ -15,8 +15,6 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"miniflux.app/logger"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Parser handles configuration parsing.
|
// Parser handles configuration parsing.
|
||||||
|
@ -118,24 +116,10 @@ func (p *Parser) parseLines(lines []string) (err error) {
|
||||||
p.opts.cleanupFrequencyHours = parseInt(value, defaultCleanupFrequencyHours)
|
p.opts.cleanupFrequencyHours = parseInt(value, defaultCleanupFrequencyHours)
|
||||||
case "CLEANUP_ARCHIVE_READ_DAYS":
|
case "CLEANUP_ARCHIVE_READ_DAYS":
|
||||||
p.opts.cleanupArchiveReadDays = parseInt(value, defaultCleanupArchiveReadDays)
|
p.opts.cleanupArchiveReadDays = parseInt(value, defaultCleanupArchiveReadDays)
|
||||||
|
case "CLEANUP_ARCHIVE_UNREAD_DAYS":
|
||||||
|
p.opts.cleanupArchiveUnreadDays = parseInt(value, defaultCleanupArchiveUnreadDays)
|
||||||
case "CLEANUP_REMOVE_SESSIONS_DAYS":
|
case "CLEANUP_REMOVE_SESSIONS_DAYS":
|
||||||
p.opts.cleanupRemoveSessionsDays = parseInt(value, defaultCleanupRemoveSessionsDays)
|
p.opts.cleanupRemoveSessionsDays = parseInt(value, defaultCleanupRemoveSessionsDays)
|
||||||
case "CLEANUP_FREQUENCY":
|
|
||||||
logger.Error("[Config] CLEANUP_FREQUENCY has been deprecated in favor of CLEANUP_FREQUENCY_HOURS.")
|
|
||||||
|
|
||||||
if p.opts.cleanupFrequencyHours != defaultCleanupFrequencyHours {
|
|
||||||
logger.Error("[Config] Ignoring CLEANUP_FREQUENCY as CLEANUP_FREQUENCY_HOURS is already specified.")
|
|
||||||
} else {
|
|
||||||
p.opts.cleanupFrequencyHours = parseInt(value, defaultCleanupFrequencyHours)
|
|
||||||
}
|
|
||||||
case "ARCHIVE_READ_DAYS":
|
|
||||||
logger.Error("[Config] ARCHIVE_READ_DAYS has been deprecated in favor of CLEANUP_ARCHIVE_READ_DAYS.")
|
|
||||||
|
|
||||||
if p.opts.cleanupArchiveReadDays != defaultCleanupArchiveReadDays {
|
|
||||||
logger.Error("[Config] Ignoring ARCHIVE_READ_DAYS as CLEANUP_ARCHIVE_READ_DAYS is already specified.")
|
|
||||||
} else {
|
|
||||||
p.opts.cleanupArchiveReadDays = parseInt(value, defaultCleanupArchiveReadDays)
|
|
||||||
}
|
|
||||||
case "WORKER_POOL_SIZE":
|
case "WORKER_POOL_SIZE":
|
||||||
p.opts.workerPoolSize = parseInt(value, defaultWorkerPoolSize)
|
p.opts.workerPoolSize = parseInt(value, defaultWorkerPoolSize)
|
||||||
case "POLLING_FREQUENCY":
|
case "POLLING_FREQUENCY":
|
||||||
|
|
|
@ -158,6 +158,11 @@ Number of days after marking read items as removed\&.
|
||||||
.br
|
.br
|
||||||
Default is 60 days\&.
|
Default is 60 days\&.
|
||||||
.TP
|
.TP
|
||||||
|
.B CLEANUP_ARCHIVE_UNREAD_DAYS
|
||||||
|
Number of days after marking unread items as removed\&.
|
||||||
|
.br
|
||||||
|
Default is 180 days\&.
|
||||||
|
.TP
|
||||||
.B CLEANUP_REMOVE_SESSIONS_DAYS
|
.B CLEANUP_REMOVE_SESSIONS_DAYS
|
||||||
Number of days after removing old sessions from the database\&.
|
Number of days after removing old sessions from the database\&.
|
||||||
.br
|
.br
|
||||||
|
|
|
@ -9,6 +9,7 @@ import (
|
||||||
|
|
||||||
"miniflux.app/config"
|
"miniflux.app/config"
|
||||||
"miniflux.app/logger"
|
"miniflux.app/logger"
|
||||||
|
"miniflux.app/model"
|
||||||
"miniflux.app/storage"
|
"miniflux.app/storage"
|
||||||
"miniflux.app/worker"
|
"miniflux.app/worker"
|
||||||
)
|
)
|
||||||
|
@ -28,6 +29,7 @@ func Serve(store *storage.Storage, pool *worker.Pool) {
|
||||||
store,
|
store,
|
||||||
config.Opts.CleanupFrequencyHours(),
|
config.Opts.CleanupFrequencyHours(),
|
||||||
config.Opts.CleanupArchiveReadDays(),
|
config.Opts.CleanupArchiveReadDays(),
|
||||||
|
config.Opts.CleanupArchiveUnreadDays(),
|
||||||
config.Opts.CleanupRemoveSessionsDays(),
|
config.Opts.CleanupRemoveSessionsDays(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -45,15 +47,23 @@ func feedScheduler(store *storage.Storage, pool *worker.Pool, frequency, batchSi
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func cleanupScheduler(store *storage.Storage, frequency int, archiveDays int, sessionsDays int) {
|
func cleanupScheduler(store *storage.Storage, frequency, archiveReadDays, archiveUnreadDays, sessionsDays int) {
|
||||||
c := time.Tick(time.Duration(frequency) * time.Hour)
|
c := time.Tick(time.Duration(frequency) * time.Hour)
|
||||||
for range c {
|
for range c {
|
||||||
nbSessions := store.CleanOldSessions(sessionsDays)
|
nbSessions := store.CleanOldSessions(sessionsDays)
|
||||||
nbUserSessions := store.CleanOldUserSessions(sessionsDays)
|
nbUserSessions := store.CleanOldUserSessions(sessionsDays)
|
||||||
logger.Info("[Scheduler:Cleanup] Cleaned %d sessions and %d user sessions", nbSessions, nbUserSessions)
|
logger.Info("[Scheduler:Cleanup] Cleaned %d sessions and %d user sessions", nbSessions, nbUserSessions)
|
||||||
|
|
||||||
if err := store.ArchiveEntries(archiveDays); err != nil {
|
if rowsAffected, err := store.ArchiveEntries(model.EntryStatusRead, archiveReadDays); err != nil {
|
||||||
logger.Error("[Scheduler:Cleanup] %v", err)
|
logger.Error("[Scheduler:ArchiveReadEntries] %v", err)
|
||||||
|
} else {
|
||||||
|
logger.Info("[Scheduler:ArchiveReadEntries] %d entries changed", rowsAffected)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rowsAffected, err := store.ArchiveEntries(model.EntryStatusUnread, archiveUnreadDays); err != nil {
|
||||||
|
logger.Error("[Scheduler:ArchiveUnreadEntries] %v", err)
|
||||||
|
} else {
|
||||||
|
logger.Info("[Scheduler:ArchiveUnreadEntries] %d entries changed", rowsAffected)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -209,26 +209,32 @@ func (s *Storage) UpdateEntries(userID, feedID int64, entries model.Entries, upd
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ArchiveEntries changes the status of read items to "removed" after specified days.
|
// ArchiveEntries changes the status of entries to "removed" after the given number of days.
|
||||||
func (s *Storage) ArchiveEntries(days int) error {
|
func (s *Storage) ArchiveEntries(status string, days int) (int64, error) {
|
||||||
if days < 0 {
|
if days < 0 {
|
||||||
return nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
before := time.Now().AddDate(0, 0, -days)
|
|
||||||
query := `
|
query := `
|
||||||
UPDATE
|
UPDATE
|
||||||
entries
|
entries
|
||||||
SET
|
SET
|
||||||
status=$1
|
status='removed'
|
||||||
WHERE
|
WHERE
|
||||||
id=ANY(SELECT id FROM entries WHERE status=$2 AND starred is false AND share_code='' AND published_at < $3 LIMIT 5000)
|
id=ANY(SELECT id FROM entries WHERE status=$1 AND starred is false AND share_code='' AND published_at < now () - '%d days'::interval LIMIT 5000)
|
||||||
`
|
`
|
||||||
if _, err := s.db.Exec(query, model.EntryStatusRemoved, model.EntryStatusRead, before); err != nil {
|
|
||||||
return fmt.Errorf(`store: unable to archive read entries: %v`, err)
|
result, err := s.db.Exec(fmt.Sprintf(query, days), status)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf(`store: unable to archive %s entries: %v`, status, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
count, err := result.RowsAffected()
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf(`store: unable to get the number of rows affected: %v`, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return count, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetEntriesStatus update the status of the given list of entries.
|
// SetEntriesStatus update the status of the given list of entries.
|
||||||
|
|
Loading…
Reference in a new issue