2023-06-19 14:42:47 -07:00
|
|
|
// SPDX-FileCopyrightText: Copyright The Miniflux Authors. All rights reserved.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0
|
2017-12-10 19:01:38 -08:00
|
|
|
|
2023-08-10 19:46:45 -07:00
|
|
|
package scraper // import "miniflux.app/v2/internal/reader/scraper"
|
2017-12-10 19:01:38 -08:00
|
|
|
|
|
|
|
import (
|
|
|
|
"errors"
|
2018-01-02 18:32:01 -08:00
|
|
|
"fmt"
|
2017-12-10 20:51:04 -08:00
|
|
|
"io"
|
2023-09-24 16:32:09 -07:00
|
|
|
"log/slog"
|
2017-12-10 20:51:04 -08:00
|
|
|
"strings"
|
2017-12-10 19:01:38 -08:00
|
|
|
|
2023-08-10 19:46:45 -07:00
|
|
|
"miniflux.app/v2/internal/config"
|
|
|
|
"miniflux.app/v2/internal/http/client"
|
|
|
|
"miniflux.app/v2/internal/reader/readability"
|
2023-08-13 19:09:01 -07:00
|
|
|
"miniflux.app/v2/internal/urllib"
|
2018-08-24 21:51:50 -07:00
|
|
|
|
2017-12-10 20:51:04 -08:00
|
|
|
"github.com/PuerkitoBio/goquery"
|
2017-12-10 19:01:38 -08:00
|
|
|
)
|
|
|
|
|
2018-10-14 11:46:41 -07:00
|
|
|
// Fetch downloads a web page and returns relevant contents.
|
2021-08-28 17:30:04 +08:00
|
|
|
func Fetch(websiteURL, rules, userAgent string, cookie string, allowSelfSignedCertificates, useProxy bool) (string, error) {
|
2020-09-27 14:29:48 -07:00
|
|
|
clt := client.NewClientWithConfig(websiteURL, config.Opts)
|
2021-02-21 13:42:49 -08:00
|
|
|
clt.WithUserAgent(userAgent)
|
2021-03-23 04:27:58 +01:00
|
|
|
clt.WithCookie(cookie)
|
2021-08-28 17:30:04 +08:00
|
|
|
if useProxy {
|
|
|
|
clt.WithProxy()
|
|
|
|
}
|
2021-02-21 13:42:49 -08:00
|
|
|
clt.AllowSelfSignedCertificates = allowSelfSignedCertificates
|
2018-09-20 03:19:24 +02:00
|
|
|
|
2018-04-28 10:51:07 -07:00
|
|
|
response, err := clt.Get()
|
2017-12-10 19:01:38 -08:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
if response.HasServerFailure() {
|
2018-01-02 18:32:01 -08:00
|
|
|
return "", errors.New("scraper: unable to download web page")
|
|
|
|
}
|
|
|
|
|
2020-09-27 16:01:06 -07:00
|
|
|
if !isAllowedContentType(response.ContentType) {
|
2018-01-02 18:32:01 -08:00
|
|
|
return "", fmt.Errorf("scraper: this resource is not a HTML document (%s)", response.ContentType)
|
2017-12-10 19:01:38 -08:00
|
|
|
}
|
|
|
|
|
2018-10-14 11:46:41 -07:00
|
|
|
if err = response.EnsureUnicodeBody(); err != nil {
|
2017-12-10 19:01:38 -08:00
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
2018-01-02 18:32:01 -08:00
|
|
|
// The entry URL could redirect somewhere else.
|
2023-08-13 19:09:01 -07:00
|
|
|
sameSite := urllib.Domain(websiteURL) == urllib.Domain(response.EffectiveURL)
|
2017-12-13 21:30:40 -08:00
|
|
|
websiteURL = response.EffectiveURL
|
|
|
|
|
2017-12-10 20:51:04 -08:00
|
|
|
if rules == "" {
|
|
|
|
rules = getPredefinedScraperRules(websiteURL)
|
|
|
|
}
|
|
|
|
|
2017-12-12 19:19:36 -08:00
|
|
|
var content string
|
2021-12-08 16:46:33 +08:00
|
|
|
if sameSite && rules != "" {
|
2023-09-24 16:32:09 -07:00
|
|
|
slog.Debug("Extracting content with custom rules",
|
|
|
|
"url", websiteURL,
|
|
|
|
"rules", rules,
|
|
|
|
)
|
2018-10-14 11:46:41 -07:00
|
|
|
content, err = scrapContent(response.Body, rules)
|
2017-12-10 20:51:04 -08:00
|
|
|
} else {
|
2023-09-24 16:32:09 -07:00
|
|
|
slog.Debug("Extracting content with readability",
|
|
|
|
"url", websiteURL,
|
|
|
|
)
|
2018-10-14 11:46:41 -07:00
|
|
|
content, err = readability.ExtractContent(response.Body)
|
2017-12-10 20:51:04 -08:00
|
|
|
}
|
|
|
|
|
2017-12-10 19:01:38 -08:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
2017-12-12 19:19:36 -08:00
|
|
|
return content, nil
|
2017-12-10 19:01:38 -08:00
|
|
|
}
|
2017-12-10 20:51:04 -08:00
|
|
|
|
|
|
|
func scrapContent(page io.Reader, rules string) (string, error) {
|
|
|
|
document, err := goquery.NewDocumentFromReader(page)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
contents := ""
|
|
|
|
document.Find(rules).Each(func(i int, s *goquery.Selection) {
|
|
|
|
var content string
|
|
|
|
|
2019-12-21 21:18:31 -08:00
|
|
|
content, _ = goquery.OuterHtml(s)
|
2017-12-10 20:51:04 -08:00
|
|
|
contents += content
|
|
|
|
})
|
|
|
|
|
|
|
|
return contents, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func getPredefinedScraperRules(websiteURL string) string {
|
2023-08-13 19:09:01 -07:00
|
|
|
urlDomain := urllib.Domain(websiteURL)
|
2017-12-10 20:51:04 -08:00
|
|
|
|
|
|
|
for domain, rules := range predefinedRules {
|
|
|
|
if strings.Contains(urlDomain, domain) {
|
|
|
|
return rules
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ""
|
|
|
|
}
|
2018-11-03 13:44:13 -07:00
|
|
|
|
2020-09-27 16:01:06 -07:00
|
|
|
func isAllowedContentType(contentType string) bool {
|
2018-11-03 13:44:13 -07:00
|
|
|
contentType = strings.ToLower(contentType)
|
|
|
|
return strings.HasPrefix(contentType, "text/html") ||
|
|
|
|
strings.HasPrefix(contentType, "application/xhtml+xml")
|
|
|
|
}
|