readability.go 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374
  1. // SPDX-FileCopyrightText: Copyright The Miniflux Authors. All rights reserved.
  2. // SPDX-License-Identifier: Apache-2.0
  3. package readability // import "miniflux.app/v2/internal/reader/readability"
  4. import (
  5. "fmt"
  6. "io"
  7. "log/slog"
  8. "regexp"
  9. "strings"
  10. "miniflux.app/v2/internal/urllib"
  11. "github.com/PuerkitoBio/goquery"
  12. "golang.org/x/net/html"
  13. )
  14. const (
  15. defaultTagsToScore = "section,h2,h3,h4,h5,h6,p,td,pre,div"
  16. )
  17. var (
  18. divToPElementsRegexp = regexp.MustCompile(`(?i)<(?:a|blockquote|dl|div|img|ol|p|pre|table|ul)[ />]`)
  19. strongCandidates = [...]string{"popupbody", "-ad", "g-plus"}
  20. maybeCandidate = [...]string{"and", "article", "body", "column", "main", "shadow"}
  21. unlikelyCandidate = [...]string{"banner", "breadcrumbs", "combx", "comment", "community", "cover-wrap", "disqus", "extra", "foot", "header", "legends", "menu", "modal", "related", "remark", "replies", "rss", "shoutbox", "sidebar", "skyscraper", "social", "sponsor", "supplemental", "ad-break", "agegate", "pagination", "pager", "popup", "yom-remote"}
  22. positiveKeywords = [...]string{"article", "blog", "body", "content", "entry", "h-entry", "hentry", "main", "page", "pagination", "post", "story", "text"}
  23. negativeKeywords = [...]string{"author", "banner", "byline", "com-", "combx", "comment", "contact", "dateline", "foot", "hid", "masthead", "media", "meta", "modal", "outbrain", "promo", "related", "scroll", "share", "shopping", "shoutbox", "sidebar", "skyscraper", "sponsor", "tags", "tool", "widget", "writtenby"}
  24. )
  25. type candidate struct {
  26. selection *goquery.Selection
  27. score float32
  28. }
  29. func (c *candidate) Node() *html.Node {
  30. if c.selection.Length() == 0 {
  31. return nil
  32. }
  33. return c.selection.Get(0)
  34. }
  35. func (c *candidate) String() string {
  36. node := c.Node()
  37. if node == nil {
  38. return fmt.Sprintf("empty => %f", c.score)
  39. }
  40. id, _ := c.selection.Attr("id")
  41. class, _ := c.selection.Attr("class")
  42. switch {
  43. case id != "" && class != "":
  44. return fmt.Sprintf("%s#%s.%s => %f", node.DataAtom, id, class, c.score)
  45. case id != "":
  46. return fmt.Sprintf("%s#%s => %f", node.DataAtom, id, c.score)
  47. case class != "":
  48. return fmt.Sprintf("%s.%s => %f", node.DataAtom, class, c.score)
  49. }
  50. return fmt.Sprintf("%s => %f", node.DataAtom, c.score)
  51. }
  52. type candidateList map[*html.Node]*candidate
  53. func (c candidateList) String() string {
  54. var output []string
  55. for _, candidate := range c {
  56. output = append(output, candidate.String())
  57. }
  58. return strings.Join(output, ", ")
  59. }
  60. // ExtractContent returns relevant content.
  61. func ExtractContent(page io.Reader) (baseURL string, extractedContent string, err error) {
  62. document, err := goquery.NewDocumentFromReader(page)
  63. if err != nil {
  64. return "", "", err
  65. }
  66. if hrefValue, exists := document.FindMatcher(goquery.Single("head base")).Attr("href"); exists {
  67. hrefValue = strings.TrimSpace(hrefValue)
  68. if urllib.IsAbsoluteURL(hrefValue) {
  69. baseURL = hrefValue
  70. }
  71. }
  72. document.Find("script,style").Remove()
  73. transformMisusedDivsIntoParagraphs(document)
  74. removeUnlikelyCandidates(document)
  75. candidates := getCandidates(document)
  76. topCandidate := getTopCandidate(document, candidates)
  77. slog.Debug("Readability parsing",
  78. slog.String("base_url", baseURL),
  79. slog.Any("candidates", candidates),
  80. slog.Any("topCandidate", topCandidate),
  81. )
  82. extractedContent = getArticle(topCandidate, candidates)
  83. return baseURL, extractedContent, nil
  84. }
  85. // Now that we have the top candidate, look through its siblings for content that might also be related.
  86. // Things like preambles, content split by ads that we removed, etc.
  87. func getArticle(topCandidate *candidate, candidates candidateList) string {
  88. var output strings.Builder
  89. output.WriteString("<div>")
  90. siblingScoreThreshold := max(10, topCandidate.score/5)
  91. topCandidate.selection.Siblings().Union(topCandidate.selection).Each(func(i int, s *goquery.Selection) {
  92. append := false
  93. tag := "div"
  94. node := s.Get(0)
  95. topNode := topCandidate.Node()
  96. if topNode != nil && node == topNode {
  97. append = true
  98. } else if c, ok := candidates[node]; ok && c.score >= siblingScoreThreshold {
  99. append = true
  100. } else if s.Is("p") {
  101. tag = node.Data
  102. linkDensity := getLinkDensity(s)
  103. content := s.Text()
  104. contentLength := len(content)
  105. if contentLength >= 80 {
  106. if linkDensity < .25 {
  107. append = true
  108. }
  109. } else {
  110. if linkDensity == 0 {
  111. if containsSentence(content) {
  112. append = true
  113. }
  114. }
  115. }
  116. }
  117. if append {
  118. html, _ := s.Html()
  119. output.WriteString("<" + tag + ">" + html + "</" + tag + ">")
  120. }
  121. })
  122. output.WriteString("</div>")
  123. return output.String()
  124. }
  125. func shouldRemoveCandidate(str string) bool {
  126. str = strings.ToLower(str)
  127. // Those candidates have no false-positives, no need to check against `maybeCandidate`
  128. for _, strongCandidate := range strongCandidates {
  129. if strings.Contains(str, strongCandidate) {
  130. return true
  131. }
  132. }
  133. for _, unlikelyCandidate := range unlikelyCandidate {
  134. if strings.Contains(str, unlikelyCandidate) {
  135. // Do we have a false positive?
  136. for _, maybe := range maybeCandidate {
  137. if strings.Contains(str, maybe) {
  138. return false
  139. }
  140. }
  141. // Nope, it's a true positive!
  142. return true
  143. }
  144. }
  145. return false
  146. }
  147. func removeUnlikelyCandidates(document *goquery.Document) {
  148. document.Find("*").Each(func(i int, s *goquery.Selection) {
  149. if s.Length() == 0 || s.Get(0).Data == "html" || s.Get(0).Data == "body" {
  150. return
  151. }
  152. // Don't remove elements within code blocks (pre or code tags)
  153. if s.Closest("pre, code").Length() > 0 {
  154. return
  155. }
  156. if class, ok := s.Attr("class"); ok && shouldRemoveCandidate(class) {
  157. s.Remove()
  158. } else if id, ok := s.Attr("id"); ok && shouldRemoveCandidate(id) {
  159. s.Remove()
  160. }
  161. })
  162. }
  163. func getTopCandidate(document *goquery.Document, candidates candidateList) *candidate {
  164. var best *candidate
  165. for _, c := range candidates {
  166. if best == nil {
  167. best = c
  168. } else if best.score < c.score {
  169. best = c
  170. }
  171. }
  172. if best == nil {
  173. best = &candidate{document.Find("body"), 0}
  174. }
  175. return best
  176. }
  177. // Loop through all paragraphs, and assign a score to them based on how content-y they look.
  178. // Then add their score to their parent node.
  179. // A score is determined by things like number of commas, class names, etc.
  180. // Maybe eventually link density.
  181. func getCandidates(document *goquery.Document) candidateList {
  182. candidates := make(candidateList)
  183. document.Find(defaultTagsToScore).Each(func(i int, s *goquery.Selection) {
  184. text := s.Text()
  185. // If this paragraph is less than 25 characters, don't even count it.
  186. if len(text) < 25 {
  187. return
  188. }
  189. parent := s.Parent()
  190. parentNode := parent.Get(0)
  191. grandParent := parent.Parent()
  192. var grandParentNode *html.Node
  193. if grandParent.Length() > 0 {
  194. grandParentNode = grandParent.Get(0)
  195. }
  196. if _, found := candidates[parentNode]; !found {
  197. candidates[parentNode] = scoreNode(parent)
  198. }
  199. if grandParentNode != nil {
  200. if _, found := candidates[grandParentNode]; !found {
  201. candidates[grandParentNode] = scoreNode(grandParent)
  202. }
  203. }
  204. // Add a point for the paragraph itself as a base.
  205. contentScore := float32(1.0)
  206. // Add points for any commas within this paragraph.
  207. contentScore += float32(strings.Count(text, ",") + 1)
  208. // For every 100 characters in this paragraph, add another point. Up to 3 points.
  209. contentScore += float32(min(len(text)/100.0, 3))
  210. candidates[parentNode].score += contentScore
  211. if grandParentNode != nil {
  212. candidates[grandParentNode].score += contentScore / 2.0
  213. }
  214. })
  215. // Scale the final candidates score based on link density. Good content
  216. // should have a relatively small link density (5% or less) and be mostly
  217. // unaffected by this operation
  218. for _, candidate := range candidates {
  219. candidate.score *= (1 - getLinkDensity(candidate.selection))
  220. }
  221. return candidates
  222. }
  223. func scoreNode(s *goquery.Selection) *candidate {
  224. c := &candidate{selection: s, score: 0}
  225. // Check if selection is empty to avoid panic
  226. if s.Length() == 0 {
  227. return c
  228. }
  229. switch s.Get(0).DataAtom.String() {
  230. case "div":
  231. c.score += 5
  232. case "pre", "td", "blockquote", "img":
  233. c.score += 3
  234. case "address", "ol", "ul", "dl", "dd", "dt", "li", "form":
  235. c.score -= 3
  236. case "h1", "h2", "h3", "h4", "h5", "h6", "th":
  237. c.score -= 5
  238. }
  239. c.score += getClassWeight(s)
  240. return c
  241. }
  242. // Get the density of links as a percentage of the content
  243. // This is the amount of text that is inside a link divided by the total text in the node.
  244. func getLinkDensity(s *goquery.Selection) float32 {
  245. var getLengthOfTextContent func(*html.Node) int
  246. getLengthOfTextContent = func(n *html.Node) int {
  247. total := 0
  248. if n.Type == html.TextNode {
  249. total += len(n.Data)
  250. }
  251. if n.FirstChild != nil {
  252. for c := n.FirstChild; c != nil; c = c.NextSibling {
  253. total += getLengthOfTextContent(c)
  254. }
  255. }
  256. return total
  257. }
  258. sum := 0
  259. for _, n := range s.Nodes {
  260. sum += getLengthOfTextContent(n)
  261. }
  262. if sum == 0 {
  263. return 0
  264. }
  265. // TODO: use something better than materializing the HTML.
  266. linkLength := len(s.Find("a").Text())
  267. return float32(linkLength) / float32(sum)
  268. }
  269. // Get an elements class/id weight. Uses regular expressions to tell if this
  270. // element looks good or bad.
  271. func getClassWeight(s *goquery.Selection) float32 {
  272. weight := 0
  273. if class, ok := s.Attr("class"); ok {
  274. weight += getWeight(class)
  275. }
  276. if id, ok := s.Attr("id"); ok {
  277. weight += getWeight(id)
  278. }
  279. return float32(weight)
  280. }
  281. func getWeight(s string) int {
  282. s = strings.ToLower(s)
  283. for _, keyword := range negativeKeywords {
  284. if strings.Contains(s, keyword) {
  285. return -25
  286. }
  287. }
  288. for _, keyword := range positiveKeywords {
  289. if strings.Contains(s, keyword) {
  290. return +25
  291. }
  292. }
  293. return 0
  294. }
  295. func transformMisusedDivsIntoParagraphs(document *goquery.Document) {
  296. document.Find("div").Each(func(i int, s *goquery.Selection) {
  297. html, _ := s.Html()
  298. if !divToPElementsRegexp.MatchString(html) {
  299. node := s.Get(0)
  300. node.Data = "p"
  301. }
  302. })
  303. }
  304. func containsSentence(content string) bool {
  305. return strings.HasSuffix(content, ".") || strings.Contains(content, ". ")
  306. }