processor.go 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112
  1. // Copyright 2018 Frédéric Guillot. All rights reserved.
  2. // Use of this source code is governed by the Apache 2.0
  3. // license that can be found in the LICENSE file.
  4. package processor
  5. import (
  6. "regexp"
  7. "time"
  8. "miniflux.app/config"
  9. "miniflux.app/logger"
  10. "miniflux.app/metric"
  11. "miniflux.app/model"
  12. "miniflux.app/reader/rewrite"
  13. "miniflux.app/reader/sanitizer"
  14. "miniflux.app/reader/scraper"
  15. "miniflux.app/storage"
  16. )
  17. // ProcessFeedEntries downloads original web page for entries and apply filters.
  18. func ProcessFeedEntries(store *storage.Storage, feed *model.Feed) {
  19. filterFeedEntries(feed)
  20. for _, entry := range feed.Entries {
  21. logger.Debug("[Feed #%d] Processing entry %s", feed.ID, entry.URL)
  22. if feed.Crawler {
  23. if !store.EntryURLExists(feed.ID, entry.URL) {
  24. startTime := time.Now()
  25. content, scraperErr := scraper.Fetch(entry.URL, feed.ScraperRules, feed.UserAgent)
  26. if config.Opts.HasMetricsCollector() {
  27. status := "success"
  28. if scraperErr != nil {
  29. status = "error"
  30. }
  31. metric.ScraperRequestDuration.WithLabelValues(status).Observe(time.Since(startTime).Seconds())
  32. }
  33. if scraperErr != nil {
  34. logger.Error(`[Filter] Unable to crawl this entry: %q => %v`, entry.URL, scraperErr)
  35. } else if content != "" {
  36. // We replace the entry content only if the scraper doesn't return any error.
  37. entry.Content = content
  38. }
  39. }
  40. }
  41. entry.Content = rewrite.Rewriter(entry.URL, entry.Content, feed.RewriteRules)
  42. // The sanitizer should always run at the end of the process to make sure unsafe HTML is filtered.
  43. entry.Content = sanitizer.Sanitize(entry.URL, entry.Content)
  44. }
  45. }
  46. /*
  47. Filters feed entries based on regex rules
  48. First we filter based on our keep list, then we remove those entries that match the block list
  49. */
  50. func filterFeedEntries(feed *model.Feed) {
  51. var filteredEntries []*model.Entry
  52. if len(feed.KeeplistRules) > 0 {
  53. for _, entry := range feed.Entries {
  54. match, _ := regexp.MatchString(feed.KeeplistRules, entry.Title)
  55. if match == true {
  56. filteredEntries = append(filteredEntries, entry)
  57. }
  58. }
  59. } else {
  60. filteredEntries = feed.Entries
  61. }
  62. if len(feed.BlocklistRules) > 0 {
  63. k := 0
  64. for _, entry := range filteredEntries {
  65. match, _ := regexp.MatchString(feed.BlocklistRules, entry.Title)
  66. if match != true {
  67. filteredEntries[k] = entry
  68. k++
  69. }
  70. }
  71. filteredEntries = filteredEntries[:k]
  72. }
  73. feed.Entries = filteredEntries
  74. }
  75. // ProcessEntryWebPage downloads the entry web page and apply rewrite rules.
  76. func ProcessEntryWebPage(entry *model.Entry) error {
  77. startTime := time.Now()
  78. content, scraperErr := scraper.Fetch(entry.URL, entry.Feed.ScraperRules, entry.Feed.UserAgent)
  79. if config.Opts.HasMetricsCollector() {
  80. status := "success"
  81. if scraperErr != nil {
  82. status = "error"
  83. }
  84. metric.ScraperRequestDuration.WithLabelValues(status).Observe(time.Since(startTime).Seconds())
  85. }
  86. if scraperErr != nil {
  87. return scraperErr
  88. }
  89. content = rewrite.Rewriter(entry.URL, content, entry.Feed.RewriteRules)
  90. content = sanitizer.Sanitize(entry.URL, content)
  91. if content != "" {
  92. entry.Content = content
  93. }
  94. return nil
  95. }