Skip to content

Commit

Permalink
Refactor manual entry scraper
Browse files Browse the repository at this point in the history
  • Loading branch information
fguillot committed Dec 3, 2018
1 parent 52de36b commit 311a133
Show file tree
Hide file tree
Showing 6 changed files with 34 additions and 19 deletions.
6 changes: 3 additions & 3 deletions reader/feed/handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,9 @@ import (
"miniflux.app/logger"
"miniflux.app/model"
"miniflux.app/reader/browser"
"miniflux.app/reader/filter"
"miniflux.app/reader/icon"
"miniflux.app/reader/parser"
"miniflux.app/reader/processor"
"miniflux.app/storage"
"miniflux.app/timer"
)
Expand Down Expand Up @@ -63,7 +63,7 @@ func (h *Handler) CreateFeed(userID, categoryID int64, url string, crawler bool,
subscription.WithClientResponse(response)
subscription.CheckedNow()

filter.Apply(h.store, subscription)
processor.ProcessFeedEntries(h.store, subscription)

if storeErr := h.store.CreateFeed(subscription); storeErr != nil {
return nil, storeErr
Expand Down Expand Up @@ -114,7 +114,7 @@ func (h *Handler) RefreshFeed(userID, feedID int64) error {
}

originalFeed.Entries = updatedFeed.Entries
filter.Apply(h.store, originalFeed)
processor.ProcessFeedEntries(h.store, originalFeed)

// We don't update existing entries when the crawler is enabled (we crawl only inexisting entries).
if storeErr := h.store.UpdateEntries(originalFeed.UserID, originalFeed.ID, originalFeed.Entries, !originalFeed.Crawler); storeErr != nil {
Expand Down
4 changes: 2 additions & 2 deletions reader/filter/doc.go → reader/processor/doc.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

/*
Package filter applies a set of filters to feed entries.
Package processor applies rules and sanitize content for feed entries.
*/
package filter // import "miniflux.app/reader/filter"
package processor // import "miniflux.app/reader/processor"
27 changes: 22 additions & 5 deletions reader/filter/filter.go → reader/processor/filter.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.

package filter
package processor

import (
"miniflux.app/logger"
Expand All @@ -13,15 +13,15 @@ import (
"miniflux.app/storage"
)

// Apply executes all entry filters.
func Apply(store *storage.Storage, feed *model.Feed) {
// ProcessFeedEntries downloads original web page for entries and apply filters.
func ProcessFeedEntries(store *storage.Storage, feed *model.Feed) {
for _, entry := range feed.Entries {
if feed.Crawler {
if !store.EntryURLExists(feed.UserID, entry.URL) {
content, err := scraper.Fetch(entry.URL, feed.ScraperRules, feed.UserAgent)
if err != nil {
logger.Error("Unable to crawl this entry: %q => %v", entry.URL, err)
} else {
logger.Error(`[Filter] Unable to crawl this entry: %q => %v`, entry.URL, err)
} else if content != "" {
// We replace the entry content only if the scraper doesn't return any error.
entry.Content = content
}
Expand All @@ -34,3 +34,20 @@ func Apply(store *storage.Storage, feed *model.Feed) {
entry.Content = sanitizer.Sanitize(entry.URL, entry.Content)
}
}

// ProcessEntryWebPage downloads the entry web page and apply rewrite rules.
func ProcessEntryWebPage(entry *model.Entry) error {
content, err := scraper.Fetch(entry.URL, entry.Feed.ScraperRules, entry.Feed.UserAgent)
if err != nil {
return err
}

content = rewrite.Rewriter(entry.URL, content, entry.Feed.RewriteRules)
content = sanitizer.Sanitize(entry.URL, content)

if content != "" {
entry.Content = content
}

return nil
}
3 changes: 3 additions & 0 deletions reader/rewrite/rewriter.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ package rewrite // import "miniflux.app/reader/rewrite"
import (
"strings"

"miniflux.app/logger"
"miniflux.app/url"
)

Expand All @@ -20,6 +21,8 @@ func Rewriter(entryURL, entryContent, customRewriteRules string) string {
rules := strings.Split(rulesList, ",")
rules = append(rules, "add_pdf_download_link")

logger.Debug(`[Rewrite] Applying rules %v for %q`, rules, entryURL)

for _, rule := range rules {
switch strings.TrimSpace(rule) {
case "add_image_title":
Expand Down
2 changes: 1 addition & 1 deletion reader/scraper/scraper.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ func Fetch(websiteURL, rules, userAgent string) (string, error) {
logger.Debug(`[Scraper] Using rules %q for %q`, rules, websiteURL)
content, err = scrapContent(response.Body, rules)
} else {
logger.Debug(`[Scraper] Using readability for "%q`, websiteURL)
logger.Debug(`[Scraper] Using readability for %q`, websiteURL)
content, err = readability.ExtractContent(response.Body)
}

Expand Down
11 changes: 3 additions & 8 deletions ui/entry_scraper.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,11 @@ package ui // import "miniflux.app/ui"

import (
"net/http"

"miniflux.app/http/request"
"miniflux.app/http/response/json"
"miniflux.app/model"
"miniflux.app/reader/rewrite"
"miniflux.app/reader/sanitizer"
"miniflux.app/reader/scraper"
"miniflux.app/reader/processor"
)

func (h *handler) fetchContent(w http.ResponseWriter, r *http.Request) {
Expand All @@ -31,15 +30,11 @@ func (h *handler) fetchContent(w http.ResponseWriter, r *http.Request) {
return
}

content, err := scraper.Fetch(entry.URL, entry.Feed.ScraperRules, entry.Feed.UserAgent)
if err != nil {
if err := processor.ProcessEntryWebPage(entry); err != nil {
json.ServerError(w, r, err)
return
}

content = rewrite.Rewriter(entry.URL, content, entry.Feed.RewriteRules)

entry.Content = sanitizer.Sanitize(entry.URL, content)
h.store.UpdateEntryContent(entry)

json.OK(w, r, map[string]string{"content": entry.Content})
Expand Down

0 comments on commit 311a133

Please sign in to comment.