2023-06-19 17:42:47 -04:00
|
|
|
// SPDX-FileCopyrightText: Copyright The Miniflux Authors. All rights reserved.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0
|
2017-11-20 00:10:04 -05:00
|
|
|
|
2023-08-10 22:46:45 -04:00
|
|
|
package storage // import "miniflux.app/v2/internal/storage"
|
2017-11-20 00:10:04 -05:00
|
|
|
|
|
|
|
import (
|
2020-09-21 02:01:01 -04:00
|
|
|
"database/sql"
|
2017-11-20 00:10:04 -05:00
|
|
|
"errors"
|
|
|
|
"fmt"
|
2023-09-24 19:32:09 -04:00
|
|
|
"log/slog"
|
2017-11-21 18:46:59 -05:00
|
|
|
"time"
|
|
|
|
|
2023-08-10 22:46:45 -04:00
|
|
|
"miniflux.app/v2/internal/crypto"
|
|
|
|
"miniflux.app/v2/internal/model"
|
2017-11-20 00:10:04 -05:00
|
|
|
|
|
|
|
"github.com/lib/pq"
|
|
|
|
)
|
|
|
|
|
2020-09-27 19:01:06 -04:00
|
|
|
// CountAllEntries returns the number of entries for each status in the database.
|
|
|
|
func (s *Storage) CountAllEntries() map[string]int64 {
|
|
|
|
rows, err := s.db.Query(`SELECT status, count(*) FROM entries GROUP BY status`)
|
|
|
|
if err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
defer rows.Close()
|
|
|
|
|
|
|
|
results := make(map[string]int64)
|
2021-01-18 16:22:09 -05:00
|
|
|
results[model.EntryStatusUnread] = 0
|
|
|
|
results[model.EntryStatusRead] = 0
|
|
|
|
results[model.EntryStatusRemoved] = 0
|
2020-09-27 19:01:06 -04:00
|
|
|
|
|
|
|
for rows.Next() {
|
|
|
|
var status string
|
|
|
|
var count int64
|
|
|
|
|
|
|
|
if err := rows.Scan(&status, &count); err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
results[status] = count
|
|
|
|
}
|
|
|
|
|
2021-01-18 16:22:09 -05:00
|
|
|
results["total"] = results[model.EntryStatusUnread] + results[model.EntryStatusRead] + results[model.EntryStatusRemoved]
|
2020-09-27 19:01:06 -04:00
|
|
|
return results
|
|
|
|
}
|
|
|
|
|
2018-04-29 19:35:04 -04:00
|
|
|
// CountUnreadEntries returns the number of unread entries.
|
|
|
|
func (s *Storage) CountUnreadEntries(userID int64) int {
|
|
|
|
builder := s.NewEntryQueryBuilder(userID)
|
|
|
|
builder.WithStatus(model.EntryStatusUnread)
|
2021-06-02 20:39:47 -04:00
|
|
|
builder.WithGloballyVisible()
|
2018-04-29 19:35:04 -04:00
|
|
|
|
|
|
|
n, err := builder.CountEntries()
|
|
|
|
if err != nil {
|
2023-09-24 19:32:09 -04:00
|
|
|
slog.Error("Unable to count unread entries",
|
|
|
|
slog.Int64("user_id", userID),
|
|
|
|
slog.Any("error", err),
|
|
|
|
)
|
2018-04-29 19:35:04 -04:00
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2017-12-28 22:20:14 -05:00
|
|
|
// NewEntryQueryBuilder returns a new EntryQueryBuilder
|
|
|
|
func (s *Storage) NewEntryQueryBuilder(userID int64) *EntryQueryBuilder {
|
|
|
|
return NewEntryQueryBuilder(s, userID)
|
2017-11-20 00:10:04 -05:00
|
|
|
}
|
|
|
|
|
2018-07-04 20:40:03 -04:00
|
|
|
// UpdateEntryContent updates entry content.
|
|
|
|
func (s *Storage) UpdateEntryContent(entry *model.Entry) error {
|
|
|
|
tx, err := s.db.Begin()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-10-30 01:48:07 -04:00
|
|
|
query := `
|
|
|
|
UPDATE
|
|
|
|
entries
|
|
|
|
SET
|
2020-11-18 20:29:40 -05:00
|
|
|
content=$1, reading_time=$2
|
2019-10-30 01:48:07 -04:00
|
|
|
WHERE
|
2020-11-18 20:29:40 -05:00
|
|
|
id=$3 AND user_id=$4
|
2019-10-30 01:48:07 -04:00
|
|
|
`
|
2020-11-18 20:29:40 -05:00
|
|
|
_, err = tx.Exec(query, entry.Content, entry.ReadingTime, entry.ID, entry.UserID)
|
2018-07-04 20:40:03 -04:00
|
|
|
if err != nil {
|
|
|
|
tx.Rollback()
|
2019-10-30 01:48:07 -04:00
|
|
|
return fmt.Errorf(`store: unable to update content of entry #%d: %v`, entry.ID, err)
|
2018-07-04 20:40:03 -04:00
|
|
|
}
|
|
|
|
|
2019-10-30 01:48:07 -04:00
|
|
|
query = `
|
|
|
|
UPDATE
|
|
|
|
entries
|
|
|
|
SET
|
2021-02-06 17:01:41 -05:00
|
|
|
document_vectors = setweight(to_tsvector(left(coalesce(title, ''), 500000)), 'A') || setweight(to_tsvector(left(coalesce(content, ''), 500000)), 'B')
|
2019-10-30 01:48:07 -04:00
|
|
|
WHERE
|
|
|
|
id=$1 AND user_id=$2
|
2018-07-04 20:40:03 -04:00
|
|
|
`
|
|
|
|
_, err = tx.Exec(query, entry.ID, entry.UserID)
|
|
|
|
if err != nil {
|
|
|
|
tx.Rollback()
|
2019-10-30 01:48:07 -04:00
|
|
|
return fmt.Errorf(`store: unable to update content of entry #%d: %v`, entry.ID, err)
|
2018-07-04 20:40:03 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
return tx.Commit()
|
|
|
|
}
|
|
|
|
|
2017-12-24 21:04:34 -05:00
|
|
|
// createEntry add a new entry.
|
2020-09-21 02:01:01 -04:00
|
|
|
func (s *Storage) createEntry(tx *sql.Tx, entry *model.Entry) error {
|
2017-11-20 00:10:04 -05:00
|
|
|
query := `
|
|
|
|
INSERT INTO entries
|
2020-11-18 20:29:40 -05:00
|
|
|
(
|
|
|
|
title,
|
|
|
|
hash,
|
|
|
|
url,
|
|
|
|
comments_url,
|
|
|
|
published_at,
|
|
|
|
content,
|
|
|
|
author,
|
|
|
|
user_id,
|
|
|
|
feed_id,
|
|
|
|
reading_time,
|
|
|
|
changed_at,
|
2023-02-24 23:52:45 -05:00
|
|
|
document_vectors,
|
|
|
|
tags
|
2020-11-18 20:29:40 -05:00
|
|
|
)
|
2017-11-20 00:10:04 -05:00
|
|
|
VALUES
|
2020-11-18 20:29:40 -05:00
|
|
|
(
|
|
|
|
$1,
|
|
|
|
$2,
|
|
|
|
$3,
|
|
|
|
$4,
|
|
|
|
$5,
|
|
|
|
$6,
|
|
|
|
$7,
|
|
|
|
$8,
|
|
|
|
$9,
|
|
|
|
$10,
|
|
|
|
now(),
|
2023-02-24 23:52:45 -05:00
|
|
|
setweight(to_tsvector(left(coalesce($1, ''), 500000)), 'A') || setweight(to_tsvector(left(coalesce($6, ''), 500000)), 'B'),
|
|
|
|
$11
|
2020-11-18 20:29:40 -05:00
|
|
|
)
|
2019-10-30 01:48:07 -04:00
|
|
|
RETURNING
|
2023-09-09 01:45:17 -04:00
|
|
|
id, status, created_at, changed_at
|
2017-11-20 00:10:04 -05:00
|
|
|
`
|
2020-09-21 02:01:01 -04:00
|
|
|
err := tx.QueryRow(
|
2017-11-20 00:10:04 -05:00
|
|
|
query,
|
|
|
|
entry.Title,
|
|
|
|
entry.Hash,
|
|
|
|
entry.URL,
|
2018-04-07 16:50:45 -04:00
|
|
|
entry.CommentsURL,
|
2017-11-20 00:10:04 -05:00
|
|
|
entry.Date,
|
|
|
|
entry.Content,
|
|
|
|
entry.Author,
|
|
|
|
entry.UserID,
|
|
|
|
entry.FeedID,
|
2020-11-18 20:29:40 -05:00
|
|
|
entry.ReadingTime,
|
2023-02-24 23:52:45 -05:00
|
|
|
pq.Array(removeDuplicates(entry.Tags)),
|
2023-09-09 01:45:17 -04:00
|
|
|
).Scan(
|
|
|
|
&entry.ID,
|
|
|
|
&entry.Status,
|
|
|
|
&entry.CreatedAt,
|
|
|
|
&entry.ChangedAt,
|
|
|
|
)
|
2017-11-20 00:10:04 -05:00
|
|
|
|
|
|
|
if err != nil {
|
2019-10-30 01:48:07 -04:00
|
|
|
return fmt.Errorf(`store: unable to create entry %q (feed #%d): %v`, entry.URL, entry.FeedID, err)
|
2017-11-20 00:10:04 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
for i := 0; i < len(entry.Enclosures); i++ {
|
|
|
|
entry.Enclosures[i].EntryID = entry.ID
|
|
|
|
entry.Enclosures[i].UserID = entry.UserID
|
2020-09-21 02:01:01 -04:00
|
|
|
err := s.createEnclosure(tx, entry.Enclosures[i])
|
2017-11-20 00:10:04 -05:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-01-30 00:07:55 -05:00
|
|
|
// updateEntry updates an entry when a feed is refreshed.
|
|
|
|
// Note: we do not update the published date because some feeds do not contains any date,
|
|
|
|
// it default to time.Now() which could change the order of items on the history page.
|
2020-09-21 02:01:01 -04:00
|
|
|
func (s *Storage) updateEntry(tx *sql.Tx, entry *model.Entry) error {
|
2017-11-20 00:10:04 -05:00
|
|
|
query := `
|
2019-10-30 01:48:07 -04:00
|
|
|
UPDATE
|
|
|
|
entries
|
|
|
|
SET
|
|
|
|
title=$1,
|
|
|
|
url=$2,
|
|
|
|
comments_url=$3,
|
|
|
|
content=$4,
|
|
|
|
author=$5,
|
2020-11-18 20:29:40 -05:00
|
|
|
reading_time=$6,
|
2023-02-24 23:52:45 -05:00
|
|
|
document_vectors = setweight(to_tsvector(left(coalesce($1, ''), 500000)), 'A') || setweight(to_tsvector(left(coalesce($4, ''), 500000)), 'B'),
|
|
|
|
tags=$10
|
2019-10-30 01:48:07 -04:00
|
|
|
WHERE
|
2020-11-18 20:29:40 -05:00
|
|
|
user_id=$7 AND feed_id=$8 AND hash=$9
|
2019-10-30 01:48:07 -04:00
|
|
|
RETURNING
|
|
|
|
id
|
2017-11-20 00:10:04 -05:00
|
|
|
`
|
2020-09-21 02:01:01 -04:00
|
|
|
err := tx.QueryRow(
|
2017-11-20 00:10:04 -05:00
|
|
|
query,
|
|
|
|
entry.Title,
|
|
|
|
entry.URL,
|
2018-04-07 16:50:45 -04:00
|
|
|
entry.CommentsURL,
|
2017-11-20 00:10:04 -05:00
|
|
|
entry.Content,
|
|
|
|
entry.Author,
|
2020-11-18 20:29:40 -05:00
|
|
|
entry.ReadingTime,
|
2017-11-20 00:10:04 -05:00
|
|
|
entry.UserID,
|
|
|
|
entry.FeedID,
|
|
|
|
entry.Hash,
|
2023-02-24 23:52:45 -05:00
|
|
|
pq.Array(removeDuplicates(entry.Tags)),
|
2017-11-21 19:08:43 -05:00
|
|
|
).Scan(&entry.ID)
|
|
|
|
|
|
|
|
if err != nil {
|
2019-10-30 01:48:07 -04:00
|
|
|
return fmt.Errorf(`store: unable to update entry %q: %v`, entry.URL, err)
|
2017-11-21 19:08:43 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, enclosure := range entry.Enclosures {
|
|
|
|
enclosure.UserID = entry.UserID
|
|
|
|
enclosure.EntryID = entry.ID
|
|
|
|
}
|
2017-11-20 00:10:04 -05:00
|
|
|
|
2023-09-09 01:45:17 -04:00
|
|
|
return s.updateEnclosures(tx, entry)
|
2017-11-20 00:10:04 -05:00
|
|
|
}
|
|
|
|
|
2017-12-24 21:04:34 -05:00
|
|
|
// entryExists checks if an entry already exists based on its hash when refreshing a feed.
|
2023-06-24 15:56:53 -04:00
|
|
|
func (s *Storage) entryExists(tx *sql.Tx, entry *model.Entry) (bool, error) {
|
2020-09-21 02:01:01 -04:00
|
|
|
var result bool
|
2023-06-24 15:56:53 -04:00
|
|
|
|
|
|
|
// Note: This query uses entries_feed_id_hash_key index (filtering on user_id is not necessary).
|
|
|
|
err := tx.QueryRow(`SELECT true FROM entries WHERE feed_id=$1 AND hash=$2`, entry.FeedID, entry.Hash).Scan(&result)
|
|
|
|
|
|
|
|
if err != nil && err != sql.ErrNoRows {
|
|
|
|
return result, fmt.Errorf(`store: unable to check if entry exists: %v`, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return result, nil
|
2017-11-20 00:10:04 -05:00
|
|
|
}
|
|
|
|
|
2021-03-08 23:10:53 -05:00
|
|
|
// GetReadTime fetches the read time of an entry based on its hash, and the feed id and user id from the feed.
|
|
|
|
// It's intended to be used on entries objects created by parsing a feed as they don't contain much information.
|
|
|
|
// The feed param helps to scope the search to a specific user and feed in order to avoid hash clashes.
|
|
|
|
func (s *Storage) GetReadTime(entry *model.Entry, feed *model.Feed) int {
|
|
|
|
var result int
|
|
|
|
s.db.QueryRow(
|
|
|
|
`SELECT reading_time FROM entries WHERE user_id=$1 AND feed_id=$2 AND hash=$3`,
|
|
|
|
feed.UserID,
|
|
|
|
feed.ID,
|
|
|
|
entry.Hash,
|
|
|
|
).Scan(&result)
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
2018-07-04 20:40:03 -04:00
|
|
|
// cleanupEntries deletes from the database entries marked as "removed" and not visible anymore in the feed.
|
|
|
|
func (s *Storage) cleanupEntries(feedID int64, entryHashes []string) error {
|
|
|
|
query := `
|
2019-10-30 01:48:07 -04:00
|
|
|
DELETE FROM
|
|
|
|
entries
|
|
|
|
WHERE
|
|
|
|
feed_id=$1
|
|
|
|
AND
|
|
|
|
id IN (SELECT id FROM entries WHERE feed_id=$2 AND status=$3 AND NOT (hash=ANY($4)))
|
2018-07-04 20:40:03 -04:00
|
|
|
`
|
|
|
|
if _, err := s.db.Exec(query, feedID, feedID, model.EntryStatusRemoved, pq.Array(entryHashes)); err != nil {
|
2019-10-30 01:48:07 -04:00
|
|
|
return fmt.Errorf(`store: unable to cleanup entries: %v`, err)
|
2018-07-04 20:40:03 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-09-21 02:01:01 -04:00
|
|
|
// RefreshFeedEntries updates feed entries while refreshing a feed.
|
2023-09-09 01:45:17 -04:00
|
|
|
func (s *Storage) RefreshFeedEntries(userID, feedID int64, entries model.Entries, updateExistingEntries bool) (newEntries model.Entries, err error) {
|
2017-11-21 19:33:36 -05:00
|
|
|
var entryHashes []string
|
2020-09-21 02:01:01 -04:00
|
|
|
|
2017-11-20 00:10:04 -05:00
|
|
|
for _, entry := range entries {
|
|
|
|
entry.UserID = userID
|
|
|
|
entry.FeedID = feedID
|
|
|
|
|
2020-09-21 02:01:01 -04:00
|
|
|
tx, err := s.db.Begin()
|
|
|
|
if err != nil {
|
2023-09-09 01:45:17 -04:00
|
|
|
return nil, fmt.Errorf(`store: unable to start transaction: %v`, err)
|
2020-09-21 02:01:01 -04:00
|
|
|
}
|
|
|
|
|
2023-06-24 15:56:53 -04:00
|
|
|
entryExists, err := s.entryExists(tx, entry)
|
|
|
|
if err != nil {
|
|
|
|
if rollbackErr := tx.Rollback(); rollbackErr != nil {
|
2023-09-09 01:45:17 -04:00
|
|
|
return nil, fmt.Errorf(`store: unable to rollback transaction: %v (rolled back due to: %v)`, rollbackErr, err)
|
2023-06-24 15:56:53 -04:00
|
|
|
}
|
2023-09-09 01:45:17 -04:00
|
|
|
return nil, err
|
2023-06-24 15:56:53 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
if entryExists {
|
2018-01-20 17:04:19 -05:00
|
|
|
if updateExistingEntries {
|
2020-09-21 02:01:01 -04:00
|
|
|
err = s.updateEntry(tx, entry)
|
2018-01-20 17:04:19 -05:00
|
|
|
}
|
2017-11-20 00:10:04 -05:00
|
|
|
} else {
|
2020-09-21 02:01:01 -04:00
|
|
|
err = s.createEntry(tx, entry)
|
2023-09-09 01:45:17 -04:00
|
|
|
if err == nil {
|
|
|
|
newEntries = append(newEntries, entry)
|
|
|
|
}
|
2017-11-20 00:10:04 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
2023-06-24 15:56:53 -04:00
|
|
|
if rollbackErr := tx.Rollback(); rollbackErr != nil {
|
2023-09-09 01:45:17 -04:00
|
|
|
return nil, fmt.Errorf(`store: unable to rollback transaction: %v (rolled back due to: %v)`, rollbackErr, err)
|
2023-06-24 15:56:53 -04:00
|
|
|
}
|
2023-09-09 01:45:17 -04:00
|
|
|
return nil, err
|
2017-11-20 00:10:04 -05:00
|
|
|
}
|
2017-11-21 19:33:36 -05:00
|
|
|
|
2020-09-21 02:01:01 -04:00
|
|
|
if err := tx.Commit(); err != nil {
|
2023-09-09 01:45:17 -04:00
|
|
|
return nil, fmt.Errorf(`store: unable to commit transaction: %v`, err)
|
2020-09-21 02:01:01 -04:00
|
|
|
}
|
|
|
|
|
2017-11-21 19:33:36 -05:00
|
|
|
entryHashes = append(entryHashes, entry.Hash)
|
|
|
|
}
|
|
|
|
|
2020-09-21 02:01:01 -04:00
|
|
|
go func() {
|
|
|
|
if err := s.cleanupEntries(feedID, entryHashes); err != nil {
|
2023-09-24 19:32:09 -04:00
|
|
|
slog.Error("Unable to cleanup entries",
|
|
|
|
slog.Int64("user_id", userID),
|
|
|
|
slog.Int64("feed_id", feedID),
|
|
|
|
slog.Any("error", err),
|
|
|
|
)
|
2020-09-21 02:01:01 -04:00
|
|
|
}
|
|
|
|
}()
|
2017-11-21 19:33:36 -05:00
|
|
|
|
2023-09-09 01:45:17 -04:00
|
|
|
return newEntries, nil
|
2017-11-21 19:33:36 -05:00
|
|
|
}
|
|
|
|
|
2020-09-12 23:04:06 -04:00
|
|
|
// ArchiveEntries changes the status of entries to "removed" after the given number of days.
|
2021-05-23 23:45:37 -04:00
|
|
|
func (s *Storage) ArchiveEntries(status string, days, limit int) (int64, error) {
|
|
|
|
if days < 0 || limit <= 0 {
|
2020-09-12 23:04:06 -04:00
|
|
|
return 0, nil
|
2019-03-10 13:51:21 -04:00
|
|
|
}
|
2019-10-30 01:48:07 -04:00
|
|
|
|
|
|
|
query := `
|
|
|
|
UPDATE
|
|
|
|
entries
|
|
|
|
SET
|
2020-09-12 23:04:06 -04:00
|
|
|
status='removed'
|
2019-10-30 01:48:07 -04:00
|
|
|
WHERE
|
2021-05-23 23:45:37 -04:00
|
|
|
id=ANY(SELECT id FROM entries WHERE status=$1 AND starred is false AND share_code='' AND created_at < now () - '%d days'::interval ORDER BY created_at ASC LIMIT %d)
|
2019-10-30 01:48:07 -04:00
|
|
|
`
|
2020-09-12 23:04:06 -04:00
|
|
|
|
2021-05-23 23:45:37 -04:00
|
|
|
result, err := s.db.Exec(fmt.Sprintf(query, days, limit), status)
|
2020-09-12 23:04:06 -04:00
|
|
|
if err != nil {
|
|
|
|
return 0, fmt.Errorf(`store: unable to archive %s entries: %v`, status, err)
|
2018-05-19 19:40:24 -04:00
|
|
|
}
|
|
|
|
|
2020-09-12 23:04:06 -04:00
|
|
|
count, err := result.RowsAffected()
|
|
|
|
if err != nil {
|
|
|
|
return 0, fmt.Errorf(`store: unable to get the number of rows affected: %v`, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return count, nil
|
2018-05-19 19:40:24 -04:00
|
|
|
}
|
|
|
|
|
2017-11-21 18:46:59 -05:00
|
|
|
// SetEntriesStatus update the status of the given list of entries.
|
2017-11-20 00:10:04 -05:00
|
|
|
func (s *Storage) SetEntriesStatus(userID int64, entryIDs []int64, status string) error {
|
2020-02-10 23:20:03 -05:00
|
|
|
query := `UPDATE entries SET status=$1, changed_at=now() WHERE user_id=$2 AND id=ANY($3)`
|
2017-11-20 00:10:04 -05:00
|
|
|
result, err := s.db.Exec(query, status, userID, pq.Array(entryIDs))
|
|
|
|
if err != nil {
|
2019-10-30 01:48:07 -04:00
|
|
|
return fmt.Errorf(`store: unable to update entries statuses %v: %v`, entryIDs, err)
|
2017-11-20 00:10:04 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
count, err := result.RowsAffected()
|
|
|
|
if err != nil {
|
2019-10-30 01:48:07 -04:00
|
|
|
return fmt.Errorf(`store: unable to update these entries %v: %v`, entryIDs, err)
|
2017-11-20 00:10:04 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
if count == 0 {
|
2019-10-30 01:48:07 -04:00
|
|
|
return errors.New(`store: nothing has been updated`)
|
2017-11-21 18:46:59 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-06-02 20:39:47 -04:00
|
|
|
func (s *Storage) SetEntriesStatusCount(userID int64, entryIDs []int64, status string) (int, error) {
|
|
|
|
if err := s.SetEntriesStatus(userID, entryIDs, status); err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
query := `
|
|
|
|
SELECT count(*)
|
|
|
|
FROM entries e
|
|
|
|
JOIN feeds f ON (f.id = e.feed_id)
|
|
|
|
JOIN categories c ON (c.id = f.category_id)
|
2021-08-15 11:32:43 -04:00
|
|
|
WHERE e.user_id = $1
|
|
|
|
AND e.id = ANY($2)
|
|
|
|
AND NOT f.hide_globally
|
|
|
|
AND NOT c.hide_globally
|
2021-06-02 20:39:47 -04:00
|
|
|
`
|
|
|
|
row := s.db.QueryRow(query, userID, pq.Array(entryIDs))
|
|
|
|
visible := 0
|
|
|
|
if err := row.Scan(&visible); err != nil {
|
|
|
|
return 0, fmt.Errorf(`store: unable to query entries visibility %v: %v`, entryIDs, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return visible, nil
|
|
|
|
}
|
|
|
|
|
2022-01-02 22:45:12 -05:00
|
|
|
// SetEntriesBookmarked update the bookmarked state for the given list of entries.
|
|
|
|
func (s *Storage) SetEntriesBookmarkedState(userID int64, entryIDs []int64, starred bool) error {
|
|
|
|
query := `UPDATE entries SET starred=$1, changed_at=now() WHERE user_id=$2 AND id=ANY($3)`
|
|
|
|
result, err := s.db.Exec(query, starred, userID, pq.Array(entryIDs))
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf(`store: unable to update the bookmarked state %v: %v`, entryIDs, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
count, err := result.RowsAffected()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf(`store: unable to update these entries %v: %v`, entryIDs, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if count == 0 {
|
|
|
|
return errors.New(`store: nothing has been updated`)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-12-22 14:33:01 -05:00
|
|
|
// ToggleBookmark toggles entry bookmark value.
|
|
|
|
func (s *Storage) ToggleBookmark(userID int64, entryID int64) error {
|
2020-02-10 23:20:03 -05:00
|
|
|
query := `UPDATE entries SET starred = NOT starred, changed_at=now() WHERE user_id=$1 AND id=$2`
|
2017-12-24 21:04:34 -05:00
|
|
|
result, err := s.db.Exec(query, userID, entryID)
|
2017-12-22 14:33:01 -05:00
|
|
|
if err != nil {
|
2019-10-30 01:48:07 -04:00
|
|
|
return fmt.Errorf(`store: unable to toggle bookmark flag for entry #%d: %v`, entryID, err)
|
2017-12-24 21:04:34 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
count, err := result.RowsAffected()
|
|
|
|
if err != nil {
|
2020-01-29 17:21:48 -05:00
|
|
|
return fmt.Errorf(`store: unable to toggle bookmark flag for entry #%d: %v`, entryID, err)
|
2017-12-24 21:04:34 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
if count == 0 {
|
2019-10-30 01:48:07 -04:00
|
|
|
return errors.New(`store: nothing has been updated`)
|
2017-12-22 14:33:01 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-10-06 00:37:45 -04:00
|
|
|
// FlushHistory changes all entries with the status "read" to "removed".
|
2017-11-21 18:46:59 -05:00
|
|
|
func (s *Storage) FlushHistory(userID int64) error {
|
2020-03-22 21:48:14 -04:00
|
|
|
query := `
|
|
|
|
UPDATE
|
|
|
|
entries
|
|
|
|
SET
|
|
|
|
status=$1,
|
|
|
|
changed_at=now()
|
|
|
|
WHERE
|
2020-07-01 23:11:36 -04:00
|
|
|
user_id=$2 AND status=$3 AND starred is false AND share_code=''
|
2020-03-22 21:48:14 -04:00
|
|
|
`
|
2017-11-21 18:46:59 -05:00
|
|
|
_, err := s.db.Exec(query, model.EntryStatusRemoved, userID, model.EntryStatusRead)
|
|
|
|
if err != nil {
|
2019-10-30 01:48:07 -04:00
|
|
|
return fmt.Errorf(`store: unable to flush history: %v`, err)
|
2017-11-20 00:10:04 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2018-01-04 21:11:15 -05:00
|
|
|
|
2018-10-07 15:50:59 -04:00
|
|
|
// MarkAllAsRead updates all user entries to the read status.
|
2018-01-04 21:11:15 -05:00
|
|
|
func (s *Storage) MarkAllAsRead(userID int64) error {
|
2020-02-10 23:20:03 -05:00
|
|
|
query := `UPDATE entries SET status=$1, changed_at=now() WHERE user_id=$2 AND status=$3`
|
2018-10-26 22:49:49 -04:00
|
|
|
result, err := s.db.Exec(query, model.EntryStatusRead, userID, model.EntryStatusUnread)
|
2018-01-04 21:11:15 -05:00
|
|
|
if err != nil {
|
2019-10-30 01:48:07 -04:00
|
|
|
return fmt.Errorf(`store: unable to mark all entries as read: %v`, err)
|
2018-01-04 21:11:15 -05:00
|
|
|
}
|
|
|
|
|
2018-10-26 22:49:49 -04:00
|
|
|
count, _ := result.RowsAffected()
|
2023-09-24 19:32:09 -04:00
|
|
|
slog.Debug("Marked all entries as read",
|
|
|
|
slog.Int64("user_id", userID),
|
|
|
|
slog.Int64("nb_entries", count),
|
|
|
|
)
|
2018-10-26 22:49:49 -04:00
|
|
|
|
2018-01-04 21:11:15 -05:00
|
|
|
return nil
|
|
|
|
}
|
2018-01-19 21:43:27 -05:00
|
|
|
|
2023-05-30 14:29:36 -04:00
|
|
|
// MarkGloballyVisibleFeedsAsRead updates all user entries to the read status.
|
|
|
|
func (s *Storage) MarkGloballyVisibleFeedsAsRead(userID int64) error {
|
|
|
|
query := `
|
|
|
|
UPDATE
|
|
|
|
entries
|
|
|
|
SET
|
|
|
|
status=$1,
|
|
|
|
changed_at=now()
|
|
|
|
FROM
|
|
|
|
feeds
|
|
|
|
WHERE
|
|
|
|
entries.feed_id = feeds.id
|
|
|
|
AND entries.user_id=$2
|
|
|
|
AND entries.status=$3
|
|
|
|
AND feeds.hide_globally=$4
|
|
|
|
`
|
|
|
|
result, err := s.db.Exec(query, model.EntryStatusRead, userID, model.EntryStatusUnread, false)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf(`store: unable to mark globally visible feeds as read: %v`, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
count, _ := result.RowsAffected()
|
2023-09-24 19:32:09 -04:00
|
|
|
slog.Debug("Marked globally visible feed entries as read",
|
|
|
|
slog.Int64("user_id", userID),
|
|
|
|
slog.Int64("nb_entries", count),
|
|
|
|
)
|
2023-05-30 14:29:36 -04:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-10-07 15:50:59 -04:00
|
|
|
// MarkFeedAsRead updates all feed entries to the read status.
|
|
|
|
func (s *Storage) MarkFeedAsRead(userID, feedID int64, before time.Time) error {
|
|
|
|
query := `
|
2019-10-30 01:48:07 -04:00
|
|
|
UPDATE
|
|
|
|
entries
|
|
|
|
SET
|
2020-02-10 23:20:03 -05:00
|
|
|
status=$1,
|
|
|
|
changed_at=now()
|
2019-10-30 01:48:07 -04:00
|
|
|
WHERE
|
|
|
|
user_id=$2 AND feed_id=$3 AND status=$4 AND published_at < $5
|
2018-10-07 15:50:59 -04:00
|
|
|
`
|
|
|
|
result, err := s.db.Exec(query, model.EntryStatusRead, userID, feedID, model.EntryStatusUnread, before)
|
|
|
|
if err != nil {
|
2019-10-30 01:48:07 -04:00
|
|
|
return fmt.Errorf(`store: unable to mark feed entries as read: %v`, err)
|
2018-10-07 15:50:59 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
count, _ := result.RowsAffected()
|
2023-09-24 19:32:09 -04:00
|
|
|
slog.Debug("Marked feed entries as read",
|
|
|
|
slog.Int64("user_id", userID),
|
|
|
|
slog.Int64("feed_id", feedID),
|
|
|
|
slog.Int64("nb_entries", count),
|
|
|
|
)
|
2018-10-07 15:50:59 -04:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// MarkCategoryAsRead updates all category entries to the read status.
|
|
|
|
func (s *Storage) MarkCategoryAsRead(userID, categoryID int64, before time.Time) error {
|
|
|
|
query := `
|
2019-10-30 01:48:07 -04:00
|
|
|
UPDATE
|
|
|
|
entries
|
|
|
|
SET
|
2020-02-10 23:20:03 -05:00
|
|
|
status=$1,
|
|
|
|
changed_at=now()
|
2018-10-07 15:50:59 -04:00
|
|
|
WHERE
|
2019-10-30 01:48:07 -04:00
|
|
|
user_id=$2
|
|
|
|
AND
|
|
|
|
status=$3
|
|
|
|
AND
|
|
|
|
published_at < $4
|
|
|
|
AND
|
|
|
|
feed_id IN (SELECT id FROM feeds WHERE user_id=$2 AND category_id=$5)
|
2018-10-07 15:50:59 -04:00
|
|
|
`
|
|
|
|
result, err := s.db.Exec(query, model.EntryStatusRead, userID, model.EntryStatusUnread, before, categoryID)
|
|
|
|
if err != nil {
|
2019-10-30 01:48:07 -04:00
|
|
|
return fmt.Errorf(`store: unable to mark category entries as read: %v`, err)
|
2018-10-07 15:50:59 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
count, _ := result.RowsAffected()
|
2023-09-24 19:32:09 -04:00
|
|
|
slog.Debug("Marked category entries as read",
|
|
|
|
slog.Int64("user_id", userID),
|
|
|
|
slog.Int64("category_id", categoryID),
|
|
|
|
slog.Int64("nb_entries", count),
|
|
|
|
)
|
2018-10-07 15:50:59 -04:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-01-19 21:43:27 -05:00
|
|
|
// EntryURLExists returns true if an entry with this URL already exists.
|
2019-02-28 23:43:33 -05:00
|
|
|
func (s *Storage) EntryURLExists(feedID int64, entryURL string) bool {
|
2019-10-30 01:48:07 -04:00
|
|
|
var result bool
|
|
|
|
query := `SELECT true FROM entries WHERE feed_id=$1 AND url=$2`
|
2019-02-28 23:43:33 -05:00
|
|
|
s.db.QueryRow(query, feedID, entryURL).Scan(&result)
|
2019-10-30 01:48:07 -04:00
|
|
|
return result
|
2018-01-19 21:43:27 -05:00
|
|
|
}
|
2019-10-05 07:30:25 -04:00
|
|
|
|
2020-03-22 21:48:14 -04:00
|
|
|
// EntryShareCode returns the share code of the provided entry.
|
2019-10-05 07:30:25 -04:00
|
|
|
// It generates a new one if not already defined.
|
2020-03-22 21:48:14 -04:00
|
|
|
func (s *Storage) EntryShareCode(userID int64, entryID int64) (shareCode string, err error) {
|
2019-10-05 07:30:25 -04:00
|
|
|
query := `SELECT share_code FROM entries WHERE user_id=$1 AND id=$2`
|
|
|
|
err = s.db.QueryRow(query, userID, entryID).Scan(&shareCode)
|
2020-03-22 21:48:14 -04:00
|
|
|
if err != nil {
|
|
|
|
err = fmt.Errorf(`store: unable to get share code for entry #%d: %v`, entryID, err)
|
2019-10-05 07:30:25 -04:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-03-22 21:48:14 -04:00
|
|
|
if shareCode == "" {
|
|
|
|
shareCode = crypto.GenerateRandomStringHex(20)
|
2019-10-05 07:30:25 -04:00
|
|
|
|
2020-03-22 21:48:14 -04:00
|
|
|
query = `UPDATE entries SET share_code = $1 WHERE user_id=$2 AND id=$3`
|
|
|
|
_, err = s.db.Exec(query, shareCode, userID, entryID)
|
|
|
|
if err != nil {
|
|
|
|
err = fmt.Errorf(`store: unable to set share code for entry #%d: %v`, entryID, err)
|
|
|
|
return
|
|
|
|
}
|
2019-10-05 07:30:25 -04:00
|
|
|
}
|
|
|
|
|
2020-03-22 21:48:14 -04:00
|
|
|
return
|
|
|
|
}
|
2019-10-05 07:30:25 -04:00
|
|
|
|
2020-03-22 21:48:14 -04:00
|
|
|
// UnshareEntry removes the share code for the given entry.
|
|
|
|
func (s *Storage) UnshareEntry(userID int64, entryID int64) (err error) {
|
|
|
|
query := `UPDATE entries SET share_code='' WHERE user_id=$1 AND id=$2`
|
|
|
|
_, err = s.db.Exec(query, userID, entryID)
|
|
|
|
if err != nil {
|
|
|
|
err = fmt.Errorf(`store: unable to remove share code for entry #%d: %v`, entryID, err)
|
2019-10-05 07:30:25 -04:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
2023-02-24 23:52:45 -05:00
|
|
|
|
|
|
|
// removeDuplicate removes duplicate entries from a slice
|
|
|
|
func removeDuplicates[T string | int](sliceList []T) []T {
|
|
|
|
allKeys := make(map[T]bool)
|
|
|
|
list := []T{}
|
|
|
|
for _, item := range sliceList {
|
|
|
|
if _, value := allKeys[item]; !value {
|
|
|
|
allKeys[item] = true
|
|
|
|
list = append(list, item)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return list
|
|
|
|
}
|