batch.go 2.2 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091
  1. // SPDX-FileCopyrightText: Copyright The Miniflux Authors. All rights reserved.
  2. // SPDX-License-Identifier: Apache-2.0
  3. package storage // import "miniflux.app/v2/internal/storage"
  4. import (
  5. "database/sql"
  6. "fmt"
  7. "strings"
  8. "miniflux.app/v2/internal/model"
  9. )
  10. type BatchBuilder struct {
  11. db *sql.DB
  12. args []any
  13. conditions []string
  14. limit int
  15. }
  16. func (s *Storage) NewBatchBuilder() *BatchBuilder {
  17. return &BatchBuilder{
  18. db: s.db,
  19. }
  20. }
  21. func (b *BatchBuilder) WithBatchSize(batchSize int) *BatchBuilder {
  22. b.limit = batchSize
  23. return b
  24. }
  25. func (b *BatchBuilder) WithUserID(userID int64) *BatchBuilder {
  26. b.conditions = append(b.conditions, fmt.Sprintf("user_id = $%d", len(b.args)+1))
  27. b.args = append(b.args, userID)
  28. return b
  29. }
  30. func (b *BatchBuilder) WithCategoryID(categoryID int64) *BatchBuilder {
  31. b.conditions = append(b.conditions, fmt.Sprintf("category_id = $%d", len(b.args)+1))
  32. b.args = append(b.args, categoryID)
  33. return b
  34. }
  35. func (b *BatchBuilder) WithErrorLimit(limit int) *BatchBuilder {
  36. if limit > 0 {
  37. b.conditions = append(b.conditions, fmt.Sprintf("parsing_error_count < $%d", len(b.args)+1))
  38. b.args = append(b.args, limit)
  39. }
  40. return b
  41. }
  42. func (b *BatchBuilder) WithNextCheckExpired() *BatchBuilder {
  43. b.conditions = append(b.conditions, "next_check_at < now()")
  44. return b
  45. }
  46. func (b *BatchBuilder) WithoutDisabledFeeds() *BatchBuilder {
  47. b.conditions = append(b.conditions, "disabled is false")
  48. return b
  49. }
  50. func (b *BatchBuilder) FetchJobs() (jobs model.JobList, err error) {
  51. var parts []string
  52. parts = append(parts, `SELECT id, user_id FROM feeds`)
  53. if len(b.conditions) > 0 {
  54. parts = append(parts, fmt.Sprintf("WHERE %s", strings.Join(b.conditions, " AND ")))
  55. }
  56. if b.limit > 0 {
  57. parts = append(parts, fmt.Sprintf("ORDER BY next_check_at ASC LIMIT %d", b.limit))
  58. }
  59. query := strings.Join(parts, " ")
  60. rows, err := b.db.Query(query, b.args...)
  61. if err != nil {
  62. return nil, fmt.Errorf(`store: unable to fetch batch of jobs: %v`, err)
  63. }
  64. defer rows.Close()
  65. for rows.Next() {
  66. var job model.Job
  67. if err := rows.Scan(&job.FeedID, &job.UserID); err != nil {
  68. return nil, fmt.Errorf(`store: unable to fetch job: %v`, err)
  69. }
  70. jobs = append(jobs, job)
  71. }
  72. return jobs, nil
  73. }