Skip to content

Commit

Permalink
Simplify feed entries filtering
Browse files Browse the repository at this point in the history
- Rename processor package to filter
- Remove boilerplate code
  • Loading branch information
fguillot committed Oct 15, 2018
1 parent 234b371 commit b8f874a
Show file tree
Hide file tree
Showing 4 changed files with 45 additions and 86 deletions.
20 changes: 7 additions & 13 deletions reader/feed/handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,9 @@ import (
"miniflux.app/logger"
"miniflux.app/model"
"miniflux.app/reader/browser"
"miniflux.app/reader/filter"
"miniflux.app/reader/icon"
"miniflux.app/reader/parser"
"miniflux.app/reader/processor"
"miniflux.app/storage"
"miniflux.app/timer"
)
Expand Down Expand Up @@ -63,9 +63,7 @@ func (h *Handler) CreateFeed(userID, categoryID int64, url string, crawler bool,
subscription.WithClientResponse(response)
subscription.CheckedNow()

feedProcessor := processor.NewFeedProcessor(userID, h.store, subscription)
feedProcessor.WithCrawler(crawler)
feedProcessor.Process()
filter.Apply(h.store, subscription)

if storeErr := h.store.CreateFeed(subscription); storeErr != nil {
return nil, storeErr
Expand Down Expand Up @@ -108,22 +106,18 @@ func (h *Handler) RefreshFeed(userID, feedID int64) error {
if response.IsModified(originalFeed.EtagHeader, originalFeed.LastModifiedHeader) {
logger.Debug("[Handler:RefreshFeed] Feed #%d has been modified", feedID)

subscription, parseErr := parser.ParseFeed(response.String())
updatedFeed, parseErr := parser.ParseFeed(response.String())
if parseErr != nil {
originalFeed.WithError(parseErr.Localize(printer))
h.store.UpdateFeed(originalFeed)
return parseErr
}

feedProcessor := processor.NewFeedProcessor(userID, h.store, subscription)
feedProcessor.WithScraperRules(originalFeed.ScraperRules)
feedProcessor.WithUserAgent(originalFeed.UserAgent)
feedProcessor.WithRewriteRules(originalFeed.RewriteRules)
feedProcessor.WithCrawler(originalFeed.Crawler)
feedProcessor.Process()
originalFeed.Entries = updatedFeed.Entries
filter.Apply(h.store, originalFeed)

// Note: We don't update existing entries when the crawler is enabled (we crawl only inexisting entries).
if storeErr := h.store.UpdateEntries(originalFeed.UserID, originalFeed.ID, subscription.Entries, !originalFeed.Crawler); storeErr != nil {
// We don't update existing entries when the crawler is enabled (we crawl only inexisting entries).
if storeErr := h.store.UpdateEntries(originalFeed.UserID, originalFeed.ID, originalFeed.Entries, !originalFeed.Crawler); storeErr != nil {
return storeErr
}

Expand Down
4 changes: 2 additions & 2 deletions reader/processor/doc.go → reader/filter/doc.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

/*
Package processor handles the logic to manipulate feed contents.
Package filter applies a set of filters to feed entries.
*/
package processor // import "miniflux.app/reader/processor"
package filter // import "miniflux.app/reader/filter"
36 changes: 36 additions & 0 deletions reader/filter/filter.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
// Copyright 2018 Frédéric Guillot. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.

package filter

import (
"miniflux.app/logger"
"miniflux.app/model"
"miniflux.app/reader/rewrite"
"miniflux.app/reader/sanitizer"
"miniflux.app/reader/scraper"
"miniflux.app/storage"
)

// Apply executes all entry filters.
func Apply(store *storage.Storage, feed *model.Feed) {
for _, entry := range feed.Entries {
if feed.Crawler {
if !store.EntryURLExists(feed.UserID, entry.URL) {
content, err := scraper.Fetch(entry.URL, feed.ScraperRules, feed.UserAgent)
if err != nil {
logger.Error("Unable to crawl this entry: %q => %v", entry.URL, err)
} else {
// We replace the entry content only if the scraper doesn't return any error.
entry.Content = content
}
}
}

entry.Content = rewrite.Rewriter(entry.URL, entry.Content, feed.RewriteRules)

// The sanitizer should always run at the end of the process to make sure unsafe HTML is filtered.
entry.Content = sanitizer.Sanitize(entry.URL, entry.Content)
}
}
71 changes: 0 additions & 71 deletions reader/processor/processor.go

This file was deleted.

0 comments on commit b8f874a

Please sign in to comment.