2017-12-10 23:51:04 -05:00
|
|
|
// Copyright 2017 Frédéric Guillot. All rights reserved.
|
|
|
|
// Use of this source code is governed by the Apache 2.0
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
2018-08-25 00:51:50 -04:00
|
|
|
package scraper // import "miniflux.app/reader/scraper"
|
2017-12-10 23:51:04 -05:00
|
|
|
|
2019-12-22 00:18:31 -05:00
|
|
|
import (
|
|
|
|
"bytes"
|
2021-02-17 00:19:03 -05:00
|
|
|
"os"
|
2019-12-22 00:18:31 -05:00
|
|
|
"strings"
|
|
|
|
"testing"
|
|
|
|
)
|
2017-12-10 23:51:04 -05:00
|
|
|
|
|
|
|
func TestGetPredefinedRules(t *testing.T) {
|
|
|
|
if getPredefinedScraperRules("http://www.phoronix.com/") == "" {
|
|
|
|
t.Error("Unable to find rule for phoronix.com")
|
|
|
|
}
|
|
|
|
|
|
|
|
if getPredefinedScraperRules("https://www.linux.com/") == "" {
|
|
|
|
t.Error("Unable to find rule for linux.com")
|
|
|
|
}
|
|
|
|
|
|
|
|
if getPredefinedScraperRules("https://example.org/") != "" {
|
|
|
|
t.Error("A rule not defined should not return anything")
|
|
|
|
}
|
|
|
|
}
|
2018-11-03 16:44:13 -04:00
|
|
|
|
|
|
|
func TestWhitelistedContentTypes(t *testing.T) {
|
|
|
|
scenarios := map[string]bool{
|
|
|
|
"text/html": true,
|
|
|
|
"TeXt/hTmL": true,
|
|
|
|
"application/xhtml+xml": true,
|
|
|
|
"text/html; charset=utf-8": true,
|
|
|
|
"application/xhtml+xml; charset=utf-8": true,
|
|
|
|
"text/css": false,
|
|
|
|
"application/javascript": false,
|
|
|
|
"image/png": false,
|
|
|
|
"application/pdf": false,
|
|
|
|
}
|
|
|
|
|
|
|
|
for inputValue, expectedResult := range scenarios {
|
2020-09-27 19:01:06 -04:00
|
|
|
actualResult := isAllowedContentType(inputValue)
|
2018-11-03 16:44:13 -04:00
|
|
|
if actualResult != expectedResult {
|
|
|
|
t.Errorf(`Unexpected result for content type whitelist, got "%v" instead of "%v"`, actualResult, expectedResult)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-12-22 00:18:31 -05:00
|
|
|
|
|
|
|
func TestSelectorRules(t *testing.T) {
|
2020-09-27 19:01:06 -04:00
|
|
|
var ruleTestCases = map[string]string{
|
|
|
|
"img.html": "article > img",
|
|
|
|
"iframe.html": "article > iframe",
|
|
|
|
"p.html": "article > p",
|
2019-12-22 00:18:31 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
for filename, rule := range ruleTestCases {
|
2021-02-17 00:19:03 -05:00
|
|
|
html, err := os.ReadFile("testdata/" + filename)
|
2019-12-22 00:18:31 -05:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf(`Unable to read file %q: %v`, filename, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
actualResult, err := scrapContent(bytes.NewReader(html), rule)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf(`Scraping error for %q - %q: %v`, filename, rule, err)
|
|
|
|
}
|
|
|
|
|
2021-02-17 00:19:03 -05:00
|
|
|
expectedResult, err := os.ReadFile("testdata/" + filename + "-result")
|
2019-12-22 00:18:31 -05:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf(`Unable to read file %q: %v`, filename, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if actualResult != strings.TrimSpace(string(expectedResult)) {
|
|
|
|
t.Errorf(`Unexpected result for %q, got "%s" instead of "%s"`, rule, actualResult, expectedResult)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|