processor.go 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458
  1. // SPDX-FileCopyrightText: Copyright The Miniflux Authors. All rights reserved.
  2. // SPDX-License-Identifier: Apache-2.0
  3. package processor
  4. import (
  5. "log/slog"
  6. "regexp"
  7. "slices"
  8. "strings"
  9. "time"
  10. "miniflux.app/v2/internal/config"
  11. "miniflux.app/v2/internal/metric"
  12. "miniflux.app/v2/internal/model"
  13. "miniflux.app/v2/internal/reader/fetcher"
  14. "miniflux.app/v2/internal/reader/readingtime"
  15. "miniflux.app/v2/internal/reader/rewrite"
  16. "miniflux.app/v2/internal/reader/sanitizer"
  17. "miniflux.app/v2/internal/reader/scraper"
  18. "miniflux.app/v2/internal/reader/urlcleaner"
  19. "miniflux.app/v2/internal/storage"
  20. "github.com/tdewolff/minify/v2"
  21. "github.com/tdewolff/minify/v2/html"
  22. )
  23. var customReplaceRuleRegex = regexp.MustCompile(`rewrite\("(.*)"\|"(.*)"\)`)
  24. // ProcessFeedEntries downloads original web page for entries and apply filters.
  25. func ProcessFeedEntries(store *storage.Storage, feed *model.Feed, user *model.User, forceRefresh bool) {
  26. var filteredEntries model.Entries
  27. // Process older entries first
  28. for i := len(feed.Entries) - 1; i >= 0; i-- {
  29. entry := feed.Entries[i]
  30. slog.Debug("Processing entry",
  31. slog.Int64("user_id", user.ID),
  32. slog.String("entry_url", entry.URL),
  33. slog.String("entry_hash", entry.Hash),
  34. slog.String("entry_title", entry.Title),
  35. slog.Int64("feed_id", feed.ID),
  36. slog.String("feed_url", feed.FeedURL),
  37. )
  38. if isBlockedEntry(feed, entry, user) || !isAllowedEntry(feed, entry, user) || !isRecentEntry(entry) {
  39. continue
  40. }
  41. if cleanedURL, err := urlcleaner.RemoveTrackingParameters(entry.URL); err == nil {
  42. entry.URL = cleanedURL
  43. }
  44. pageBaseURL := ""
  45. rewrittenURL := rewriteEntryURL(feed, entry)
  46. entry.URL = rewrittenURL
  47. entryIsNew := store.IsNewEntry(feed.ID, entry.Hash)
  48. if feed.Crawler && (entryIsNew || forceRefresh) {
  49. slog.Debug("Scraping entry",
  50. slog.Int64("user_id", user.ID),
  51. slog.String("entry_url", entry.URL),
  52. slog.String("entry_hash", entry.Hash),
  53. slog.String("entry_title", entry.Title),
  54. slog.Int64("feed_id", feed.ID),
  55. slog.String("feed_url", feed.FeedURL),
  56. slog.Bool("entry_is_new", entryIsNew),
  57. slog.Bool("force_refresh", forceRefresh),
  58. slog.String("rewritten_url", rewrittenURL),
  59. )
  60. startTime := time.Now()
  61. requestBuilder := fetcher.NewRequestBuilder()
  62. requestBuilder.WithUserAgent(feed.UserAgent, config.Opts.HTTPClientUserAgent())
  63. requestBuilder.WithCookie(feed.Cookie)
  64. requestBuilder.WithTimeout(config.Opts.HTTPClientTimeout())
  65. requestBuilder.WithProxy(config.Opts.HTTPClientProxy())
  66. requestBuilder.UseProxy(feed.FetchViaProxy)
  67. requestBuilder.IgnoreTLSErrors(feed.AllowSelfSignedCertificates)
  68. requestBuilder.DisableHTTP2(feed.DisableHTTP2)
  69. scrapedPageBaseURL, extractedContent, scraperErr := scraper.ScrapeWebsite(
  70. requestBuilder,
  71. rewrittenURL,
  72. feed.ScraperRules,
  73. )
  74. if scrapedPageBaseURL != "" {
  75. pageBaseURL = scrapedPageBaseURL
  76. }
  77. if config.Opts.HasMetricsCollector() {
  78. status := "success"
  79. if scraperErr != nil {
  80. status = "error"
  81. }
  82. metric.ScraperRequestDuration.WithLabelValues(status).Observe(time.Since(startTime).Seconds())
  83. }
  84. if scraperErr != nil {
  85. slog.Warn("Unable to scrape entry",
  86. slog.Int64("user_id", user.ID),
  87. slog.String("entry_url", entry.URL),
  88. slog.Int64("feed_id", feed.ID),
  89. slog.String("feed_url", feed.FeedURL),
  90. slog.Any("error", scraperErr),
  91. )
  92. } else if extractedContent != "" {
  93. // We replace the entry content only if the scraper doesn't return any error.
  94. entry.Content = minifyEntryContent(extractedContent)
  95. }
  96. }
  97. rewrite.Rewriter(rewrittenURL, entry, feed.RewriteRules)
  98. if pageBaseURL == "" {
  99. pageBaseURL = rewrittenURL
  100. }
  101. // The sanitizer should always run at the end of the process to make sure unsafe HTML is filtered out.
  102. entry.Content = sanitizer.Sanitize(pageBaseURL, entry.Content)
  103. updateEntryReadingTime(store, feed, entry, entryIsNew, user)
  104. filteredEntries = append(filteredEntries, entry)
  105. }
  106. feed.Entries = filteredEntries
  107. }
  108. func isBlockedEntry(feed *model.Feed, entry *model.Entry, user *model.User) bool {
  109. if user.BlockFilterEntryRules != "" {
  110. rules := strings.Split(user.BlockFilterEntryRules, "\n")
  111. for _, rule := range rules {
  112. parts := strings.SplitN(rule, "=", 2)
  113. var match bool
  114. switch parts[0] {
  115. case "EntryTitle":
  116. match, _ = regexp.MatchString(parts[1], entry.Title)
  117. case "EntryURL":
  118. match, _ = regexp.MatchString(parts[1], entry.URL)
  119. case "EntryCommentsURL":
  120. match, _ = regexp.MatchString(parts[1], entry.CommentsURL)
  121. case "EntryContent":
  122. match, _ = regexp.MatchString(parts[1], entry.Content)
  123. case "EntryAuthor":
  124. match, _ = regexp.MatchString(parts[1], entry.Author)
  125. case "EntryTag":
  126. containsTag := slices.ContainsFunc(entry.Tags, func(tag string) bool {
  127. match, _ = regexp.MatchString(parts[1], tag)
  128. return match
  129. })
  130. if containsTag {
  131. match = true
  132. }
  133. }
  134. if match {
  135. slog.Debug("Blocking entry based on rule",
  136. slog.String("entry_url", entry.URL),
  137. slog.Int64("feed_id", feed.ID),
  138. slog.String("feed_url", feed.FeedURL),
  139. slog.String("rule", rule),
  140. )
  141. return true
  142. }
  143. }
  144. }
  145. if feed.BlocklistRules == "" {
  146. return false
  147. }
  148. compiledBlocklist, err := regexp.Compile(feed.BlocklistRules)
  149. if err != nil {
  150. slog.Debug("Failed on regexp compilation",
  151. slog.String("pattern", feed.BlocklistRules),
  152. slog.Any("error", err),
  153. )
  154. return false
  155. }
  156. containsBlockedTag := slices.ContainsFunc(entry.Tags, func(tag string) bool {
  157. return compiledBlocklist.MatchString(tag)
  158. })
  159. if compiledBlocklist.MatchString(entry.URL) || compiledBlocklist.MatchString(entry.Title) || compiledBlocklist.MatchString(entry.Author) || containsBlockedTag {
  160. slog.Debug("Blocking entry based on rule",
  161. slog.String("entry_url", entry.URL),
  162. slog.Int64("feed_id", feed.ID),
  163. slog.String("feed_url", feed.FeedURL),
  164. slog.String("rule", feed.BlocklistRules),
  165. )
  166. return true
  167. }
  168. return false
  169. }
  170. func isAllowedEntry(feed *model.Feed, entry *model.Entry, user *model.User) bool {
  171. if user.KeepFilterEntryRules != "" {
  172. rules := strings.Split(user.KeepFilterEntryRules, "\n")
  173. for _, rule := range rules {
  174. parts := strings.SplitN(rule, "=", 2)
  175. var match bool
  176. switch parts[0] {
  177. case "EntryTitle":
  178. match, _ = regexp.MatchString(parts[1], entry.Title)
  179. case "EntryURL":
  180. match, _ = regexp.MatchString(parts[1], entry.URL)
  181. case "EntryCommentsURL":
  182. match, _ = regexp.MatchString(parts[1], entry.CommentsURL)
  183. case "EntryContent":
  184. match, _ = regexp.MatchString(parts[1], entry.Content)
  185. case "EntryAuthor":
  186. match, _ = regexp.MatchString(parts[1], entry.Author)
  187. case "EntryTag":
  188. containsTag := slices.ContainsFunc(entry.Tags, func(tag string) bool {
  189. match, _ = regexp.MatchString(parts[1], tag)
  190. return match
  191. })
  192. if containsTag {
  193. match = true
  194. }
  195. }
  196. if match {
  197. slog.Debug("Allowing entry based on rule",
  198. slog.String("entry_url", entry.URL),
  199. slog.Int64("feed_id", feed.ID),
  200. slog.String("feed_url", feed.FeedURL),
  201. slog.String("rule", rule),
  202. )
  203. return true
  204. }
  205. }
  206. return false
  207. }
  208. if feed.KeeplistRules == "" {
  209. return true
  210. }
  211. compiledKeeplist, err := regexp.Compile(feed.KeeplistRules)
  212. if err != nil {
  213. slog.Debug("Failed on regexp compilation",
  214. slog.String("pattern", feed.KeeplistRules),
  215. slog.Any("error", err),
  216. )
  217. return false
  218. }
  219. containsAllowedTag := slices.ContainsFunc(entry.Tags, func(tag string) bool {
  220. return compiledKeeplist.MatchString(tag)
  221. })
  222. if compiledKeeplist.MatchString(entry.URL) || compiledKeeplist.MatchString(entry.Title) || compiledKeeplist.MatchString(entry.Author) || containsAllowedTag {
  223. slog.Debug("Allow entry based on rule",
  224. slog.String("entry_url", entry.URL),
  225. slog.Int64("feed_id", feed.ID),
  226. slog.String("feed_url", feed.FeedURL),
  227. slog.String("rule", feed.KeeplistRules),
  228. )
  229. return true
  230. }
  231. return false
  232. }
  233. // ProcessEntryWebPage downloads the entry web page and apply rewrite rules.
  234. func ProcessEntryWebPage(feed *model.Feed, entry *model.Entry, user *model.User) error {
  235. startTime := time.Now()
  236. rewrittenEntryURL := rewriteEntryURL(feed, entry)
  237. requestBuilder := fetcher.NewRequestBuilder()
  238. requestBuilder.WithUserAgent(feed.UserAgent, config.Opts.HTTPClientUserAgent())
  239. requestBuilder.WithCookie(feed.Cookie)
  240. requestBuilder.WithTimeout(config.Opts.HTTPClientTimeout())
  241. requestBuilder.WithProxy(config.Opts.HTTPClientProxy())
  242. requestBuilder.UseProxy(feed.FetchViaProxy)
  243. requestBuilder.IgnoreTLSErrors(feed.AllowSelfSignedCertificates)
  244. requestBuilder.DisableHTTP2(feed.DisableHTTP2)
  245. pageBaseURL, extractedContent, scraperErr := scraper.ScrapeWebsite(
  246. requestBuilder,
  247. rewrittenEntryURL,
  248. feed.ScraperRules,
  249. )
  250. if config.Opts.HasMetricsCollector() {
  251. status := "success"
  252. if scraperErr != nil {
  253. status = "error"
  254. }
  255. metric.ScraperRequestDuration.WithLabelValues(status).Observe(time.Since(startTime).Seconds())
  256. }
  257. if scraperErr != nil {
  258. return scraperErr
  259. }
  260. if extractedContent != "" {
  261. entry.Content = minifyEntryContent(extractedContent)
  262. if user.ShowReadingTime {
  263. entry.ReadingTime = readingtime.EstimateReadingTime(entry.Content, user.DefaultReadingSpeed, user.CJKReadingSpeed)
  264. }
  265. }
  266. rewrite.Rewriter(rewrittenEntryURL, entry, entry.Feed.RewriteRules)
  267. entry.Content = sanitizer.Sanitize(pageBaseURL, entry.Content)
  268. return nil
  269. }
  270. func rewriteEntryURL(feed *model.Feed, entry *model.Entry) string {
  271. var rewrittenURL = entry.URL
  272. if feed.UrlRewriteRules != "" {
  273. parts := customReplaceRuleRegex.FindStringSubmatch(feed.UrlRewriteRules)
  274. if len(parts) >= 3 {
  275. re, err := regexp.Compile(parts[1])
  276. if err != nil {
  277. slog.Error("Failed on regexp compilation",
  278. slog.String("url_rewrite_rules", feed.UrlRewriteRules),
  279. slog.Any("error", err),
  280. )
  281. return rewrittenURL
  282. }
  283. rewrittenURL = re.ReplaceAllString(entry.URL, parts[2])
  284. slog.Debug("Rewriting entry URL",
  285. slog.String("original_entry_url", entry.URL),
  286. slog.String("rewritten_entry_url", rewrittenURL),
  287. slog.Int64("feed_id", feed.ID),
  288. slog.String("feed_url", feed.FeedURL),
  289. )
  290. } else {
  291. slog.Debug("Cannot find search and replace terms for replace rule",
  292. slog.String("original_entry_url", entry.URL),
  293. slog.String("rewritten_entry_url", rewrittenURL),
  294. slog.Int64("feed_id", feed.ID),
  295. slog.String("feed_url", feed.FeedURL),
  296. slog.String("url_rewrite_rules", feed.UrlRewriteRules),
  297. )
  298. }
  299. }
  300. return rewrittenURL
  301. }
  302. func updateEntryReadingTime(store *storage.Storage, feed *model.Feed, entry *model.Entry, entryIsNew bool, user *model.User) {
  303. if !user.ShowReadingTime {
  304. slog.Debug("Skip reading time estimation for this user", slog.Int64("user_id", user.ID))
  305. return
  306. }
  307. if shouldFetchYouTubeWatchTime(entry) {
  308. if entryIsNew {
  309. watchTime, err := fetchYouTubeWatchTime(entry.URL)
  310. if err != nil {
  311. slog.Warn("Unable to fetch YouTube watch time",
  312. slog.Int64("user_id", user.ID),
  313. slog.Int64("entry_id", entry.ID),
  314. slog.String("entry_url", entry.URL),
  315. slog.Int64("feed_id", feed.ID),
  316. slog.String("feed_url", feed.FeedURL),
  317. slog.Any("error", err),
  318. )
  319. }
  320. entry.ReadingTime = watchTime
  321. } else {
  322. entry.ReadingTime = store.GetReadTime(feed.ID, entry.Hash)
  323. }
  324. }
  325. if shouldFetchNebulaWatchTime(entry) {
  326. if entryIsNew {
  327. watchTime, err := fetchNebulaWatchTime(entry.URL)
  328. if err != nil {
  329. slog.Warn("Unable to fetch Nebula watch time",
  330. slog.Int64("user_id", user.ID),
  331. slog.Int64("entry_id", entry.ID),
  332. slog.String("entry_url", entry.URL),
  333. slog.Int64("feed_id", feed.ID),
  334. slog.String("feed_url", feed.FeedURL),
  335. slog.Any("error", err),
  336. )
  337. }
  338. entry.ReadingTime = watchTime
  339. } else {
  340. entry.ReadingTime = store.GetReadTime(feed.ID, entry.Hash)
  341. }
  342. }
  343. if shouldFetchOdyseeWatchTime(entry) {
  344. if entryIsNew {
  345. watchTime, err := fetchOdyseeWatchTime(entry.URL)
  346. if err != nil {
  347. slog.Warn("Unable to fetch Odysee watch time",
  348. slog.Int64("user_id", user.ID),
  349. slog.Int64("entry_id", entry.ID),
  350. slog.String("entry_url", entry.URL),
  351. slog.Int64("feed_id", feed.ID),
  352. slog.String("feed_url", feed.FeedURL),
  353. slog.Any("error", err),
  354. )
  355. }
  356. entry.ReadingTime = watchTime
  357. } else {
  358. entry.ReadingTime = store.GetReadTime(feed.ID, entry.Hash)
  359. }
  360. }
  361. if shouldFetchBilibiliWatchTime(entry) {
  362. if entryIsNew {
  363. watchTime, err := fetchBilibiliWatchTime(entry.URL)
  364. if err != nil {
  365. slog.Warn("Unable to fetch Bilibili watch time",
  366. slog.Int64("user_id", user.ID),
  367. slog.Int64("entry_id", entry.ID),
  368. slog.String("entry_url", entry.URL),
  369. slog.Int64("feed_id", feed.ID),
  370. slog.String("feed_url", feed.FeedURL),
  371. slog.Any("error", err),
  372. )
  373. }
  374. entry.ReadingTime = watchTime
  375. } else {
  376. entry.ReadingTime = store.GetReadTime(feed.ID, entry.Hash)
  377. }
  378. }
  379. // Handle YT error case and non-YT entries.
  380. if entry.ReadingTime == 0 {
  381. entry.ReadingTime = readingtime.EstimateReadingTime(entry.Content, user.DefaultReadingSpeed, user.CJKReadingSpeed)
  382. }
  383. }
  384. func isRecentEntry(entry *model.Entry) bool {
  385. if config.Opts.FilterEntryMaxAgeDays() == 0 || entry.Date.After(time.Now().AddDate(0, 0, -config.Opts.FilterEntryMaxAgeDays())) {
  386. return true
  387. }
  388. return false
  389. }
  390. func minifyEntryContent(entryContent string) string {
  391. m := minify.New()
  392. // Options required to avoid breaking the HTML content.
  393. m.Add("text/html", &html.Minifier{
  394. KeepEndTags: true,
  395. KeepQuotes: true,
  396. })
  397. if minifiedHTML, err := m.String("text/html", entryContent); err == nil {
  398. entryContent = minifiedHTML
  399. }
  400. return entryContent
  401. }