mirror of
https://github.com/idanoo/autobrr
synced 2025-07-23 00:39:13 +00:00

* factor out test helpers * refactor, add tests for animebytes * revert test refactor * better name * change format, migrate some examples * migrated remaining test cases * add comment about `Test` vs `Tests` * refactor * reorder expectations to match vars * generate * turn on strict unmarshalling, remove old `Test` from schema * start modifying actual definitions * done with the As * Bs * C, D * E, F * G, H, I, ... L * M, N * O, P * R * bonus error. without this, pattern/vars disagreement can panic. * S * T, U * X.. Now we know our ABCs next time won't you sing with meeeee * fix another test * another driveby change * be less strict parsing custom definitions * fix(definitions): load custom definitions --------- Co-authored-by: ze0s <ze0s@riseup.net>
250 lines
5.9 KiB
Go
250 lines
5.9 KiB
Go
// Copyright (c) 2021 - 2023, Ludvig Lundgren and the autobrr contributors.
|
||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||
|
||
package announce
|
||
|
||
import (
|
||
"bytes"
|
||
"net/url"
|
||
"strings"
|
||
"text/template"
|
||
|
||
"github.com/autobrr/autobrr/internal/domain"
|
||
"github.com/autobrr/autobrr/internal/indexer"
|
||
"github.com/autobrr/autobrr/internal/release"
|
||
"github.com/autobrr/autobrr/pkg/errors"
|
||
|
||
"github.com/rs/zerolog"
|
||
)
|
||
|
||
type Processor interface {
|
||
AddLineToQueue(channel string, line string) error
|
||
}
|
||
|
||
type announceProcessor struct {
|
||
log zerolog.Logger
|
||
indexer *domain.IndexerDefinition
|
||
|
||
releaseSvc release.Service
|
||
|
||
queues map[string]chan string
|
||
}
|
||
|
||
func NewAnnounceProcessor(log zerolog.Logger, releaseSvc release.Service, indexer *domain.IndexerDefinition) Processor {
|
||
ap := &announceProcessor{
|
||
log: log.With().Str("module", "announce_processor").Logger(),
|
||
releaseSvc: releaseSvc,
|
||
indexer: indexer,
|
||
}
|
||
|
||
// setup queues and consumers
|
||
ap.setupQueues()
|
||
ap.setupQueueConsumers()
|
||
|
||
return ap
|
||
}
|
||
|
||
func (a *announceProcessor) setupQueues() {
|
||
queues := make(map[string]chan string)
|
||
for _, channel := range a.indexer.IRC.Channels {
|
||
channel = strings.ToLower(channel)
|
||
|
||
queues[channel] = make(chan string, 128)
|
||
a.log.Trace().Msgf("announce: setup queue: %v", channel)
|
||
}
|
||
|
||
a.queues = queues
|
||
}
|
||
|
||
func (a *announceProcessor) setupQueueConsumers() {
|
||
for queueName, queue := range a.queues {
|
||
go func(name string, q chan string) {
|
||
a.log.Trace().Msgf("announce: setup queue consumer: %v", name)
|
||
a.processQueue(q)
|
||
a.log.Trace().Msgf("announce: queue consumer stopped: %v", name)
|
||
}(queueName, queue)
|
||
}
|
||
}
|
||
|
||
func (a *announceProcessor) processQueue(queue chan string) {
|
||
for {
|
||
tmpVars := map[string]string{}
|
||
parseFailed := false
|
||
//patternParsed := false
|
||
|
||
for _, parseLine := range a.indexer.IRC.Parse.Lines {
|
||
line, err := a.getNextLine(queue)
|
||
if err != nil {
|
||
a.log.Error().Err(err).Msg("could not get line from queue")
|
||
return
|
||
}
|
||
a.log.Trace().Msgf("announce: process line: %v", line)
|
||
|
||
// check should ignore
|
||
|
||
match, err := indexer.ParseLine(&a.log, parseLine.Pattern, parseLine.Vars, tmpVars, line, parseLine.Ignore)
|
||
if err != nil {
|
||
a.log.Error().Err(err).Msgf("error parsing extract for line: %v", line)
|
||
|
||
parseFailed = true
|
||
break
|
||
}
|
||
|
||
if !match {
|
||
a.log.Debug().Msgf("line not matching expected regex pattern: %v", line)
|
||
parseFailed = true
|
||
break
|
||
}
|
||
}
|
||
|
||
if parseFailed {
|
||
continue
|
||
}
|
||
|
||
rls := domain.NewRelease(a.indexer.Identifier)
|
||
rls.Protocol = domain.ReleaseProtocol(a.indexer.Protocol)
|
||
|
||
// on lines matched
|
||
if err := a.onLinesMatched(a.indexer, tmpVars, rls); err != nil {
|
||
a.log.Error().Err(err).Msg("error match line")
|
||
continue
|
||
}
|
||
|
||
// process release in a new go routine
|
||
go a.releaseSvc.Process(rls)
|
||
}
|
||
}
|
||
|
||
func (a *announceProcessor) getNextLine(queue chan string) (string, error) {
|
||
for {
|
||
line, ok := <-queue
|
||
if !ok {
|
||
return "", errors.New("could not queue line")
|
||
}
|
||
|
||
return line, nil
|
||
}
|
||
}
|
||
|
||
func (a *announceProcessor) AddLineToQueue(channel string, line string) error {
|
||
channel = strings.ToLower(channel)
|
||
queue, ok := a.queues[channel]
|
||
if !ok {
|
||
return errors.New("no queue for channel (%v) found", channel)
|
||
}
|
||
|
||
queue <- line
|
||
a.log.Trace().Msgf("announce: queued line: %v", line)
|
||
|
||
return nil
|
||
}
|
||
|
||
// onLinesMatched process vars into release
|
||
func (a *announceProcessor) onLinesMatched(def *domain.IndexerDefinition, vars map[string]string, rls *domain.Release) error {
|
||
// map variables from regex capture onto release struct
|
||
if err := rls.MapVars(def, vars); err != nil {
|
||
a.log.Error().Err(err).Msg("announce: could not map vars for release")
|
||
return err
|
||
}
|
||
|
||
// since OPS uses en-dashes as separators, which causes moistari/rls to not the torrentName properly,
|
||
// we replace the en-dashes with hyphens here
|
||
if def.Identifier == "ops" {
|
||
rls.TorrentName = strings.ReplaceAll(rls.TorrentName, "–", "-")
|
||
}
|
||
|
||
// parse fields
|
||
// run before ParseMatch to not potentially use a reconstructed TorrentName
|
||
rls.ParseString(rls.TorrentName)
|
||
|
||
// set baseUrl to default domain
|
||
baseUrl := def.URLS[0]
|
||
|
||
// override baseUrl
|
||
if def.BaseURL != "" {
|
||
baseUrl = def.BaseURL
|
||
}
|
||
|
||
// merge vars from regex captures on announce and vars from settings
|
||
mergedVars := mergeVars(vars, def.SettingsMap)
|
||
|
||
// parse torrentUrl
|
||
matched, err := def.IRC.Parse.ParseMatch(baseUrl, mergedVars)
|
||
if err != nil {
|
||
a.log.Error().Err(err).Msgf("announce: %v", err)
|
||
return err
|
||
}
|
||
|
||
if matched != nil {
|
||
rls.DownloadURL = matched.TorrentURL
|
||
|
||
if matched.InfoURL != "" {
|
||
rls.InfoURL = matched.InfoURL
|
||
}
|
||
|
||
// only used by few indexers
|
||
if matched.TorrentName != "" {
|
||
rls.TorrentName = matched.TorrentName
|
||
}
|
||
}
|
||
|
||
// handle optional cookies
|
||
if v, ok := def.SettingsMap["cookie"]; ok {
|
||
rls.RawCookie = v
|
||
}
|
||
|
||
return nil
|
||
}
|
||
|
||
func (a *announceProcessor) processTorrentUrl(match string, vars map[string]string, extraVars map[string]string, encode []string) (string, error) {
|
||
tmpVars := map[string]string{}
|
||
|
||
// copy vars to new tmp map
|
||
for k, v := range vars {
|
||
tmpVars[k] = v
|
||
}
|
||
|
||
// merge extra vars with vars
|
||
for k, v := range extraVars {
|
||
tmpVars[k] = v
|
||
}
|
||
|
||
// handle url encode of values
|
||
for _, e := range encode {
|
||
if v, ok := tmpVars[e]; ok {
|
||
// url encode value
|
||
t := url.QueryEscape(v)
|
||
tmpVars[e] = t
|
||
}
|
||
}
|
||
|
||
// setup text template to inject variables into
|
||
tmpl, err := template.New("torrenturl").Parse(match)
|
||
if err != nil {
|
||
a.log.Error().Err(err).Msg("could not create torrent url template")
|
||
return "", err
|
||
}
|
||
|
||
var b bytes.Buffer
|
||
if err := tmpl.Execute(&b, &tmpVars); err != nil {
|
||
a.log.Error().Err(err).Msg("could not write torrent url template output")
|
||
return "", err
|
||
}
|
||
|
||
a.log.Trace().Msg("torrenturl processed")
|
||
|
||
return b.String(), nil
|
||
}
|
||
|
||
// mergeVars merge maps
|
||
func mergeVars(data ...map[string]string) map[string]string {
|
||
tmpVars := map[string]string{}
|
||
|
||
for _, vars := range data {
|
||
// copy vars to new tmp map
|
||
for k, v := range vars {
|
||
tmpVars[k] = v
|
||
}
|
||
}
|
||
return tmpVars
|
||
}
|