Refactor manual entry scraper
This commit is contained in:
parent
52de36b158
commit
311a133ab8
6 changed files with 34 additions and 19 deletions
|
@ -14,9 +14,9 @@ import (
|
||||||
"miniflux.app/logger"
|
"miniflux.app/logger"
|
||||||
"miniflux.app/model"
|
"miniflux.app/model"
|
||||||
"miniflux.app/reader/browser"
|
"miniflux.app/reader/browser"
|
||||||
"miniflux.app/reader/filter"
|
|
||||||
"miniflux.app/reader/icon"
|
"miniflux.app/reader/icon"
|
||||||
"miniflux.app/reader/parser"
|
"miniflux.app/reader/parser"
|
||||||
|
"miniflux.app/reader/processor"
|
||||||
"miniflux.app/storage"
|
"miniflux.app/storage"
|
||||||
"miniflux.app/timer"
|
"miniflux.app/timer"
|
||||||
)
|
)
|
||||||
|
@ -63,7 +63,7 @@ func (h *Handler) CreateFeed(userID, categoryID int64, url string, crawler bool,
|
||||||
subscription.WithClientResponse(response)
|
subscription.WithClientResponse(response)
|
||||||
subscription.CheckedNow()
|
subscription.CheckedNow()
|
||||||
|
|
||||||
filter.Apply(h.store, subscription)
|
processor.ProcessFeedEntries(h.store, subscription)
|
||||||
|
|
||||||
if storeErr := h.store.CreateFeed(subscription); storeErr != nil {
|
if storeErr := h.store.CreateFeed(subscription); storeErr != nil {
|
||||||
return nil, storeErr
|
return nil, storeErr
|
||||||
|
@ -114,7 +114,7 @@ func (h *Handler) RefreshFeed(userID, feedID int64) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
originalFeed.Entries = updatedFeed.Entries
|
originalFeed.Entries = updatedFeed.Entries
|
||||||
filter.Apply(h.store, originalFeed)
|
processor.ProcessFeedEntries(h.store, originalFeed)
|
||||||
|
|
||||||
// We don't update existing entries when the crawler is enabled (we crawl only inexisting entries).
|
// We don't update existing entries when the crawler is enabled (we crawl only inexisting entries).
|
||||||
if storeErr := h.store.UpdateEntries(originalFeed.UserID, originalFeed.ID, originalFeed.Entries, !originalFeed.Crawler); storeErr != nil {
|
if storeErr := h.store.UpdateEntries(originalFeed.UserID, originalFeed.ID, originalFeed.Entries, !originalFeed.Crawler); storeErr != nil {
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
||||||
Package filter applies a set of filters to feed entries.
|
Package processor applies rules and sanitize content for feed entries.
|
||||||
|
|
||||||
*/
|
*/
|
||||||
package filter // import "miniflux.app/reader/filter"
|
package processor // import "miniflux.app/reader/processor"
|
|
@ -2,7 +2,7 @@
|
||||||
// Use of this source code is governed by the Apache 2.0
|
// Use of this source code is governed by the Apache 2.0
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
package filter
|
package processor
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"miniflux.app/logger"
|
"miniflux.app/logger"
|
||||||
|
@ -13,15 +13,15 @@ import (
|
||||||
"miniflux.app/storage"
|
"miniflux.app/storage"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Apply executes all entry filters.
|
// ProcessFeedEntries downloads original web page for entries and apply filters.
|
||||||
func Apply(store *storage.Storage, feed *model.Feed) {
|
func ProcessFeedEntries(store *storage.Storage, feed *model.Feed) {
|
||||||
for _, entry := range feed.Entries {
|
for _, entry := range feed.Entries {
|
||||||
if feed.Crawler {
|
if feed.Crawler {
|
||||||
if !store.EntryURLExists(feed.UserID, entry.URL) {
|
if !store.EntryURLExists(feed.UserID, entry.URL) {
|
||||||
content, err := scraper.Fetch(entry.URL, feed.ScraperRules, feed.UserAgent)
|
content, err := scraper.Fetch(entry.URL, feed.ScraperRules, feed.UserAgent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("Unable to crawl this entry: %q => %v", entry.URL, err)
|
logger.Error(`[Filter] Unable to crawl this entry: %q => %v`, entry.URL, err)
|
||||||
} else {
|
} else if content != "" {
|
||||||
// We replace the entry content only if the scraper doesn't return any error.
|
// We replace the entry content only if the scraper doesn't return any error.
|
||||||
entry.Content = content
|
entry.Content = content
|
||||||
}
|
}
|
||||||
|
@ -34,3 +34,20 @@ func Apply(store *storage.Storage, feed *model.Feed) {
|
||||||
entry.Content = sanitizer.Sanitize(entry.URL, entry.Content)
|
entry.Content = sanitizer.Sanitize(entry.URL, entry.Content)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ProcessEntryWebPage downloads the entry web page and apply rewrite rules.
|
||||||
|
func ProcessEntryWebPage(entry *model.Entry) error {
|
||||||
|
content, err := scraper.Fetch(entry.URL, entry.Feed.ScraperRules, entry.Feed.UserAgent)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
content = rewrite.Rewriter(entry.URL, content, entry.Feed.RewriteRules)
|
||||||
|
content = sanitizer.Sanitize(entry.URL, content)
|
||||||
|
|
||||||
|
if content != "" {
|
||||||
|
entry.Content = content
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -7,6 +7,7 @@ package rewrite // import "miniflux.app/reader/rewrite"
|
||||||
import (
|
import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"miniflux.app/logger"
|
||||||
"miniflux.app/url"
|
"miniflux.app/url"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -20,6 +21,8 @@ func Rewriter(entryURL, entryContent, customRewriteRules string) string {
|
||||||
rules := strings.Split(rulesList, ",")
|
rules := strings.Split(rulesList, ",")
|
||||||
rules = append(rules, "add_pdf_download_link")
|
rules = append(rules, "add_pdf_download_link")
|
||||||
|
|
||||||
|
logger.Debug(`[Rewrite] Applying rules %v for %q`, rules, entryURL)
|
||||||
|
|
||||||
for _, rule := range rules {
|
for _, rule := range rules {
|
||||||
switch strings.TrimSpace(rule) {
|
switch strings.TrimSpace(rule) {
|
||||||
case "add_image_title":
|
case "add_image_title":
|
||||||
|
|
|
@ -54,7 +54,7 @@ func Fetch(websiteURL, rules, userAgent string) (string, error) {
|
||||||
logger.Debug(`[Scraper] Using rules %q for %q`, rules, websiteURL)
|
logger.Debug(`[Scraper] Using rules %q for %q`, rules, websiteURL)
|
||||||
content, err = scrapContent(response.Body, rules)
|
content, err = scrapContent(response.Body, rules)
|
||||||
} else {
|
} else {
|
||||||
logger.Debug(`[Scraper] Using readability for "%q`, websiteURL)
|
logger.Debug(`[Scraper] Using readability for %q`, websiteURL)
|
||||||
content, err = readability.ExtractContent(response.Body)
|
content, err = readability.ExtractContent(response.Body)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -6,12 +6,11 @@ package ui // import "miniflux.app/ui"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"miniflux.app/http/request"
|
"miniflux.app/http/request"
|
||||||
"miniflux.app/http/response/json"
|
"miniflux.app/http/response/json"
|
||||||
"miniflux.app/model"
|
"miniflux.app/model"
|
||||||
"miniflux.app/reader/rewrite"
|
"miniflux.app/reader/processor"
|
||||||
"miniflux.app/reader/sanitizer"
|
|
||||||
"miniflux.app/reader/scraper"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func (h *handler) fetchContent(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) fetchContent(w http.ResponseWriter, r *http.Request) {
|
||||||
|
@ -31,15 +30,11 @@ func (h *handler) fetchContent(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
content, err := scraper.Fetch(entry.URL, entry.Feed.ScraperRules, entry.Feed.UserAgent)
|
if err := processor.ProcessEntryWebPage(entry); err != nil {
|
||||||
if err != nil {
|
|
||||||
json.ServerError(w, r, err)
|
json.ServerError(w, r, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
content = rewrite.Rewriter(entry.URL, content, entry.Feed.RewriteRules)
|
|
||||||
|
|
||||||
entry.Content = sanitizer.Sanitize(entry.URL, content)
|
|
||||||
h.store.UpdateEntryContent(entry)
|
h.store.UpdateEntryContent(entry)
|
||||||
|
|
||||||
json.OK(w, r, map[string]string{"content": entry.Content})
|
json.OK(w, r, map[string]string{"content": entry.Content})
|
||||||
|
|
Loading…
Reference in a new issue