1
0
Fork 0
miniflux/internal/storage/feed_query_builder.go

310 lines
7.2 KiB
Go
Raw Normal View History

// SPDX-FileCopyrightText: Copyright The Miniflux Authors. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
2021-01-18 16:22:09 -05:00
package storage // import "miniflux.app/v2/internal/storage"
2021-01-18 16:22:09 -05:00
import (
"database/sql"
"fmt"
"strings"
"miniflux.app/v2/internal/model"
"miniflux.app/v2/internal/timezone"
2021-01-18 16:22:09 -05:00
)
// FeedQueryBuilder builds a SQL query to fetch feeds.
type FeedQueryBuilder struct {
store *Storage
args []interface{}
conditions []string
sortExpressions []string
limit int
offset int
withCounters bool
counterJoinFeeds bool
counterArgs []interface{}
counterConditions []string
2021-01-18 16:22:09 -05:00
}
// NewFeedQueryBuilder returns a new FeedQueryBuilder.
func NewFeedQueryBuilder(store *Storage, userID int64) *FeedQueryBuilder {
return &FeedQueryBuilder{
store: store,
args: []interface{}{userID},
conditions: []string{"f.user_id = $1"},
counterArgs: []interface{}{userID, model.EntryStatusRead, model.EntryStatusUnread},
counterConditions: []string{"e.user_id = $1", "e.status IN ($2, $3)"},
}
}
// WithCategoryID filter by category ID.
func (f *FeedQueryBuilder) WithCategoryID(categoryID int64) *FeedQueryBuilder {
if categoryID > 0 {
f.conditions = append(f.conditions, fmt.Sprintf("f.category_id = $%d", len(f.args)+1))
f.args = append(f.args, categoryID)
f.counterConditions = append(f.counterConditions, fmt.Sprintf("f.category_id = $%d", len(f.counterArgs)+1))
f.counterArgs = append(f.counterArgs, categoryID)
f.counterJoinFeeds = true
}
return f
}
// WithFeedID filter by feed ID.
func (f *FeedQueryBuilder) WithFeedID(feedID int64) *FeedQueryBuilder {
if feedID > 0 {
f.conditions = append(f.conditions, fmt.Sprintf("f.id = $%d", len(f.args)+1))
f.args = append(f.args, feedID)
}
return f
}
// WithCounters let the builder return feeds with counters of statuses of entries.
func (f *FeedQueryBuilder) WithCounters() *FeedQueryBuilder {
f.withCounters = true
return f
}
// WithSorting add a sort expression.
func (f *FeedQueryBuilder) WithSorting(column, direction string) *FeedQueryBuilder {
f.sortExpressions = append(f.sortExpressions, fmt.Sprintf("%s %s", column, direction))
2021-01-18 16:22:09 -05:00
return f
}
// WithLimit set the limit.
func (f *FeedQueryBuilder) WithLimit(limit int) *FeedQueryBuilder {
f.limit = limit
return f
}
// WithOffset set the offset.
func (f *FeedQueryBuilder) WithOffset(offset int) *FeedQueryBuilder {
f.offset = offset
return f
}
func (f *FeedQueryBuilder) buildCondition() string {
return strings.Join(f.conditions, " AND ")
}
func (f *FeedQueryBuilder) buildCounterCondition() string {
return strings.Join(f.counterConditions, " AND ")
}
func (f *FeedQueryBuilder) buildSorting() string {
var parts []string
if len(f.sortExpressions) > 0 {
parts = append(parts, fmt.Sprintf(`ORDER BY %s`, strings.Join(f.sortExpressions, ", ")))
2021-01-18 16:22:09 -05:00
}
if len(parts) > 0 {
parts = append(parts, ", lower(f.title) ASC")
}
if f.limit > 0 {
parts = append(parts, fmt.Sprintf(`LIMIT %d`, f.limit))
}
if f.offset > 0 {
parts = append(parts, fmt.Sprintf(`OFFSET %d`, f.offset))
}
return strings.Join(parts, " ")
}
// GetFeed returns a single feed that match the condition.
func (f *FeedQueryBuilder) GetFeed() (*model.Feed, error) {
f.limit = 1
feeds, err := f.GetFeeds()
if err != nil {
return nil, err
}
if len(feeds) != 1 {
return nil, nil
}
return feeds[0], nil
}
// GetFeeds returns a list of feeds that match the condition.
func (f *FeedQueryBuilder) GetFeeds() (model.Feeds, error) {
var query = `
SELECT
f.id,
f.feed_url,
f.site_url,
f.title,
f.etag_header,
f.last_modified_header,
f.user_id,
f.checked_at at time zone u.timezone,
f.parsing_error_count,
f.parsing_error_msg,
f.scraper_rules,
f.rewrite_rules,
f.blocklist_rules,
f.keeplist_rules,
f.url_rewrite_rules,
2021-01-18 16:22:09 -05:00
f.crawler,
f.user_agent,
2021-03-22 23:27:58 -04:00
f.cookie,
2021-01-18 16:22:09 -05:00
f.username,
f.password,
f.ignore_http_cache,
f.allow_self_signed_certificates,
2021-01-18 16:22:09 -05:00
f.fetch_via_proxy,
f.disabled,
Add Media Player and resume to last playback position In order to ease podcast listening, the player can be put on top of the feed entry as main content. Use the `Use podcast player` option to enable that. It works on audio and video. Also, when playing audio or video, progression will be saved in order to be able to resume listening later. This position saving is done using the original attachement/enclosures player AND podcast player and do not rely on the podcast player option ti be enabled. Additionally, I made the player fill the width with the entry container to ease seeking and have a bigger video. updateEnclosures now keep existing enclosures based on URL When feeds get updated, enclosures entries are always wiped and re-created. This cause two issue - enclosure progression get lost in the process - enclosure ID changes I used the URL as identifier of an enclosure. Not perfect but hopefully should work. When an enclosure already exist, I simply do nothing and leave the entry as is in the database. If anyone is listening/watching to this enclosure during the refresh, the id stay coherent and progression saving still works. The updateEnclosures function got a bit more complex. I tried to make it the more clear I could. Some optimisation are possible but would make the function harder to read in my opinion. I'm not sure if this is often the case, but some feeds may include tracking or simply change the url each time we update the feed. In those situation, enclosures ids and progression will be lost. I have no idea how to handle this last situation. Use the size instead/alongside url to define the identity of an enclosure ? Translation: english as placeholder for every language except French Aside, I tested a video feed and fixed a few things for it. In fact, the MimeType was not working at all on my side, and found a pretty old stackoverflow discussion that suggest to use an Apple non-standard MimeType for m4v video format. I only did one substitution because I only have one feed to test. Any new video feed can make this go away or evolve depending on the situation. Real video feeds does not tend to be easy to find and test extensively this. Co-authored-by: toastal
2023-04-13 05:46:43 -04:00
f.no_media_player,
f.hide_globally,
2021-01-18 16:22:09 -05:00
f.category_id,
c.title as category_title,
c.hide_globally as category_hidden,
2021-01-18 16:22:09 -05:00
fi.icon_id,
2023-08-26 03:16:41 -04:00
u.timezone,
f.apprise_service_urls
2021-01-18 16:22:09 -05:00
FROM
feeds f
LEFT JOIN
categories c ON c.id=f.category_id
LEFT JOIN
feed_icons fi ON fi.feed_id=f.id
LEFT JOIN
users u ON u.id=f.user_id
WHERE %s
%s
`
query = fmt.Sprintf(query, f.buildCondition(), f.buildSorting())
rows, err := f.store.db.Query(query, f.args...)
if err != nil {
return nil, fmt.Errorf(`store: unable to fetch feeds: %w`, err)
}
defer rows.Close()
readCounters, unreadCounters, err := f.fetchFeedCounter()
if err != nil {
return nil, err
}
feeds := make(model.Feeds, 0)
for rows.Next() {
var feed model.Feed
var iconID sql.NullInt64
var tz string
feed.Category = &model.Category{}
err := rows.Scan(
&feed.ID,
&feed.FeedURL,
&feed.SiteURL,
&feed.Title,
&feed.EtagHeader,
&feed.LastModifiedHeader,
&feed.UserID,
&feed.CheckedAt,
&feed.ParsingErrorCount,
&feed.ParsingErrorMsg,
&feed.ScraperRules,
&feed.RewriteRules,
&feed.BlocklistRules,
&feed.KeeplistRules,
&feed.UrlRewriteRules,
2021-01-18 16:22:09 -05:00
&feed.Crawler,
&feed.UserAgent,
2021-03-22 23:27:58 -04:00
&feed.Cookie,
2021-01-18 16:22:09 -05:00
&feed.Username,
&feed.Password,
&feed.IgnoreHTTPCache,
&feed.AllowSelfSignedCertificates,
2021-01-18 16:22:09 -05:00
&feed.FetchViaProxy,
&feed.Disabled,
Add Media Player and resume to last playback position In order to ease podcast listening, the player can be put on top of the feed entry as main content. Use the `Use podcast player` option to enable that. It works on audio and video. Also, when playing audio or video, progression will be saved in order to be able to resume listening later. This position saving is done using the original attachement/enclosures player AND podcast player and do not rely on the podcast player option ti be enabled. Additionally, I made the player fill the width with the entry container to ease seeking and have a bigger video. updateEnclosures now keep existing enclosures based on URL When feeds get updated, enclosures entries are always wiped and re-created. This cause two issue - enclosure progression get lost in the process - enclosure ID changes I used the URL as identifier of an enclosure. Not perfect but hopefully should work. When an enclosure already exist, I simply do nothing and leave the entry as is in the database. If anyone is listening/watching to this enclosure during the refresh, the id stay coherent and progression saving still works. The updateEnclosures function got a bit more complex. I tried to make it the more clear I could. Some optimisation are possible but would make the function harder to read in my opinion. I'm not sure if this is often the case, but some feeds may include tracking or simply change the url each time we update the feed. In those situation, enclosures ids and progression will be lost. I have no idea how to handle this last situation. Use the size instead/alongside url to define the identity of an enclosure ? Translation: english as placeholder for every language except French Aside, I tested a video feed and fixed a few things for it. In fact, the MimeType was not working at all on my side, and found a pretty old stackoverflow discussion that suggest to use an Apple non-standard MimeType for m4v video format. I only did one substitution because I only have one feed to test. Any new video feed can make this go away or evolve depending on the situation. Real video feeds does not tend to be easy to find and test extensively this. Co-authored-by: toastal
2023-04-13 05:46:43 -04:00
&feed.NoMediaPlayer,
&feed.HideGlobally,
2021-01-18 16:22:09 -05:00
&feed.Category.ID,
&feed.Category.Title,
&feed.Category.HideGlobally,
2021-01-18 16:22:09 -05:00
&iconID,
&tz,
2023-08-26 03:16:41 -04:00
&feed.AppriseServiceURLs,
2021-01-18 16:22:09 -05:00
)
if err != nil {
return nil, fmt.Errorf(`store: unable to fetch feeds row: %w`, err)
}
if iconID.Valid {
feed.Icon = &model.FeedIcon{FeedID: feed.ID, IconID: iconID.Int64}
} else {
feed.Icon = &model.FeedIcon{FeedID: feed.ID, IconID: 0}
}
if readCounters != nil {
if count, found := readCounters[feed.ID]; found {
feed.ReadCount = count
}
}
if unreadCounters != nil {
if count, found := unreadCounters[feed.ID]; found {
feed.UnreadCount = count
}
}
feed.CheckedAt = timezone.Convert(tz, feed.CheckedAt)
feed.Category.UserID = feed.UserID
feeds = append(feeds, &feed)
}
return feeds, nil
}
func (f *FeedQueryBuilder) fetchFeedCounter() (unreadCounters map[int64]int, readCounters map[int64]int, err error) {
if !f.withCounters {
return nil, nil, nil
}
query := `
SELECT
e.feed_id,
e.status,
count(*)
FROM
entries e
%s
WHERE
%s
GROUP BY
e.feed_id, e.status
`
join := ""
if f.counterJoinFeeds {
join = "LEFT JOIN feeds f ON f.id=e.feed_id"
}
query = fmt.Sprintf(query, join, f.buildCounterCondition())
rows, err := f.store.db.Query(query, f.counterArgs...)
if err != nil {
return nil, nil, fmt.Errorf(`store: unable to fetch feed counts: %w`, err)
}
defer rows.Close()
readCounters = make(map[int64]int)
unreadCounters = make(map[int64]int)
for rows.Next() {
var feedID int64
var status string
var count int
if err := rows.Scan(&feedID, &status, &count); err != nil {
return nil, nil, fmt.Errorf(`store: unable to fetch feed counter row: %w`, err)
}
if status == model.EntryStatusRead {
readCounters[feedID] = count
} else if status == model.EntryStatusUnread {
unreadCounters[feedID] = count
}
}
return readCounters, unreadCounters, nil
}