mirror of
https://github.com/idanoo/autobrr
synced 2025-07-22 16:29:12 +00:00
Feature: Support multiline irc parsing (#39)
* feat: initial multiline support * refactor: handle multiple indexers per network * wip: setup indexer * build: add docker compose for testing * chore: remove temp mock indexers * chore: update deps * refactor: update and store network handler * build: update test compose * chore: minor cleanup
This commit is contained in:
parent
506cef6f0f
commit
c4d580eb03
17 changed files with 1100 additions and 1042 deletions
|
@ -14,7 +14,6 @@ import (
|
|||
_ "modernc.org/sqlite"
|
||||
|
||||
"github.com/autobrr/autobrr/internal/action"
|
||||
"github.com/autobrr/autobrr/internal/announce"
|
||||
"github.com/autobrr/autobrr/internal/auth"
|
||||
"github.com/autobrr/autobrr/internal/config"
|
||||
"github.com/autobrr/autobrr/internal/database"
|
||||
|
@ -80,16 +79,17 @@ func main() {
|
|||
userRepo = database.NewUserRepo(db)
|
||||
)
|
||||
|
||||
// setup services
|
||||
var (
|
||||
downloadClientService = download_client.NewService(downloadClientRepo)
|
||||
actionService = action.NewService(actionRepo, downloadClientService, bus)
|
||||
indexerService = indexer.NewService(indexerRepo)
|
||||
filterService = filter.NewService(filterRepo, actionRepo, indexerService)
|
||||
releaseService = release.NewService(releaseRepo, actionService)
|
||||
announceService = announce.NewService(filterService, indexerService, releaseService)
|
||||
ircService = irc.NewService(ircRepo, announceService)
|
||||
ircService = irc.NewService(ircRepo, filterService, indexerService, releaseService)
|
||||
userService = user.NewService(userRepo)
|
||||
authService = auth.NewService(userService)
|
||||
//announceService = announce.NewService(filterService, indexerService, releaseService)
|
||||
)
|
||||
|
||||
// register event subscribers
|
||||
|
|
1
go.mod
1
go.mod
|
@ -10,6 +10,7 @@ require (
|
|||
github.com/gdm85/go-libdeluge v0.5.5
|
||||
github.com/go-chi/chi v1.5.4
|
||||
github.com/gorilla/sessions v1.2.1
|
||||
github.com/gorilla/websocket v1.4.2
|
||||
github.com/lib/pq v1.10.4
|
||||
github.com/mattn/go-isatty v0.0.14 // indirect
|
||||
github.com/pkg/errors v0.9.1
|
||||
|
|
1
go.sum
1
go.sum
|
@ -446,6 +446,7 @@ github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+
|
|||
github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7FsgI=
|
||||
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
|
||||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gosuri/uilive v0.0.0-20170323041506-ac356e6e42cd/go.mod h1:qkLSc0A5EXSP6B04TrN4oQoxqFI7A8XvoXSlJi8cwk8=
|
||||
github.com/gosuri/uilive v0.0.3/go.mod h1:qkLSc0A5EXSP6B04TrN4oQoxqFI7A8XvoXSlJi8cwk8=
|
||||
|
|
|
@ -1,218 +0,0 @@
|
|||
package announce
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"github.com/autobrr/autobrr/internal/domain"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
func (s *service) parseLineSingle(def *domain.IndexerDefinition, release *domain.Release, line string) error {
|
||||
for _, extract := range def.Parse.Lines {
|
||||
tmpVars := map[string]string{}
|
||||
|
||||
var err error
|
||||
match, err := s.parseExtract(extract.Pattern, extract.Vars, tmpVars, line)
|
||||
if err != nil {
|
||||
log.Debug().Msgf("error parsing extract: %v", line)
|
||||
return err
|
||||
}
|
||||
|
||||
if !match {
|
||||
log.Debug().Msgf("line not matching expected regex pattern: %v", line)
|
||||
return errors.New("line not matching expected regex pattern")
|
||||
}
|
||||
|
||||
// on lines matched
|
||||
err = s.onLinesMatched(def, tmpVars, release)
|
||||
if err != nil {
|
||||
log.Debug().Msgf("error match line: %v", line)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *service) parseMultiLine() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *service) parseExtract(pattern string, vars []string, tmpVars map[string]string, line string) (bool, error) {
|
||||
|
||||
rxp, err := regExMatch(pattern, line)
|
||||
if err != nil {
|
||||
log.Debug().Msgf("did not match expected line: %v", line)
|
||||
}
|
||||
|
||||
if rxp == nil {
|
||||
//return nil, nil
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// extract matched
|
||||
for i, v := range vars {
|
||||
value := ""
|
||||
|
||||
if rxp[i] != "" {
|
||||
value = rxp[i]
|
||||
// tmpVars[v] = rxp[i]
|
||||
}
|
||||
|
||||
tmpVars[v] = value
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (s *service) onLinesMatched(def *domain.IndexerDefinition, vars map[string]string, release *domain.Release) error {
|
||||
var err error
|
||||
|
||||
err = release.MapVars(vars)
|
||||
|
||||
// TODO is this even needed anymore
|
||||
// canonicalize name
|
||||
//canonReleaseName := cleanReleaseName(release.TorrentName)
|
||||
//log.Trace().Msgf("canonicalize release name: %v", canonReleaseName)
|
||||
|
||||
err = release.Parse()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("announce: could not parse release")
|
||||
return err
|
||||
}
|
||||
|
||||
// torrent url
|
||||
torrentUrl, err := s.processTorrentUrl(def.Parse.Match.TorrentURL, vars, def.SettingsMap, def.Parse.Match.Encode)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("announce: could not process torrent url")
|
||||
return err
|
||||
}
|
||||
|
||||
if torrentUrl != "" {
|
||||
release.TorrentURL = torrentUrl
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *service) processTorrentUrl(match string, vars map[string]string, extraVars map[string]string, encode []string) (string, error) {
|
||||
tmpVars := map[string]string{}
|
||||
|
||||
// copy vars to new tmp map
|
||||
for k, v := range vars {
|
||||
tmpVars[k] = v
|
||||
}
|
||||
|
||||
// merge extra vars with vars
|
||||
if extraVars != nil {
|
||||
for k, v := range extraVars {
|
||||
tmpVars[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
// handle url encode of values
|
||||
if encode != nil {
|
||||
for _, e := range encode {
|
||||
if v, ok := tmpVars[e]; ok {
|
||||
// url encode value
|
||||
t := url.QueryEscape(v)
|
||||
tmpVars[e] = t
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// setup text template to inject variables into
|
||||
tmpl, err := template.New("torrenturl").Parse(match)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("could not create torrent url template")
|
||||
return "", err
|
||||
}
|
||||
|
||||
var b bytes.Buffer
|
||||
err = tmpl.Execute(&b, &tmpVars)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("could not write torrent url template output")
|
||||
return "", err
|
||||
}
|
||||
|
||||
return b.String(), nil
|
||||
}
|
||||
|
||||
func split(r rune) bool {
|
||||
return r == ' ' || r == '.'
|
||||
}
|
||||
|
||||
func Splitter(s string, splits string) []string {
|
||||
m := make(map[rune]int)
|
||||
for _, r := range splits {
|
||||
m[r] = 1
|
||||
}
|
||||
|
||||
splitter := func(r rune) bool {
|
||||
return m[r] == 1
|
||||
}
|
||||
|
||||
return strings.FieldsFunc(s, splitter)
|
||||
}
|
||||
|
||||
func canonicalizeString(s string) []string {
|
||||
//a := strings.FieldsFunc(s, split)
|
||||
a := Splitter(s, " .")
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
func cleanReleaseName(input string) string {
|
||||
// Make a Regex to say we only want letters and numbers
|
||||
reg, err := regexp.Compile("[^a-zA-Z0-9]+")
|
||||
if err != nil {
|
||||
//log.Fatal(err)
|
||||
}
|
||||
processedString := reg.ReplaceAllString(input, " ")
|
||||
|
||||
return processedString
|
||||
}
|
||||
|
||||
func removeElement(s []string, i int) ([]string, error) {
|
||||
// s is [1,2,3,4,5,6], i is 2
|
||||
|
||||
// perform bounds checking first to prevent a panic!
|
||||
if i >= len(s) || i < 0 {
|
||||
return nil, fmt.Errorf("Index is out of range. Index is %d with slice length %d", i, len(s))
|
||||
}
|
||||
|
||||
// This creates a new slice by creating 2 slices from the original:
|
||||
// s[:i] -> [1, 2]
|
||||
// s[i+1:] -> [4, 5, 6]
|
||||
// and joining them together using `append`
|
||||
return append(s[:i], s[i+1:]...), nil
|
||||
}
|
||||
|
||||
func regExMatch(pattern string, value string) ([]string, error) {
|
||||
|
||||
rxp, err := regexp.Compile(pattern)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
//return errors.Wrapf(err, "invalid regex: %s", value)
|
||||
}
|
||||
|
||||
matches := rxp.FindStringSubmatch(value)
|
||||
if matches == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
res := make([]string, 0)
|
||||
if matches != nil {
|
||||
res, err = removeElement(matches, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
|
@ -1,581 +0,0 @@
|
|||
package announce
|
||||
|
||||
//func Test_service_OnNewLine(t *testing.T) {
|
||||
// tfiles := tracker.NewService()
|
||||
// tfiles.ReadFiles()
|
||||
//
|
||||
// type fields struct {
|
||||
// trackerSvc tracker.Service
|
||||
// }
|
||||
// type args struct {
|
||||
// msg string
|
||||
// }
|
||||
// tests := []struct {
|
||||
// name string
|
||||
// fields fields
|
||||
// args args
|
||||
// wantErr bool
|
||||
// }{
|
||||
// // TODO: Add test cases.
|
||||
// {
|
||||
// name: "parse announce",
|
||||
// fields: fields{
|
||||
// trackerSvc: tfiles,
|
||||
// },
|
||||
// args: args{
|
||||
// msg: "New Torrent Announcement: <PC :: Iso> Name:'debian live 10 6 0 amd64 standard iso' uploaded by 'Anonymous' - http://www.tracker01.test/torrent/263302",
|
||||
// },
|
||||
// // expect struct: category, torrentName uploader freeleech baseurl torrentId
|
||||
// wantErr: false,
|
||||
// },
|
||||
// }
|
||||
// for _, tt := range tests {
|
||||
// t.Run(tt.name, func(t *testing.T) {
|
||||
// s := &service{
|
||||
// trackerSvc: tt.fields.trackerSvc,
|
||||
// }
|
||||
// if err := s.OnNewLine(tt.args.msg); (err != nil) != tt.wantErr {
|
||||
// t.Errorf("OnNewLine() error = %v, wantErr %v", err, tt.wantErr)
|
||||
// }
|
||||
// })
|
||||
// }
|
||||
//}
|
||||
|
||||
//func Test_service_parse(t *testing.T) {
|
||||
// type fields struct {
|
||||
// trackerSvc tracker.Service
|
||||
// }
|
||||
// type args struct {
|
||||
// serverName string
|
||||
// channelName string
|
||||
// announcer string
|
||||
// line string
|
||||
// }
|
||||
// tests := []struct {
|
||||
// name string
|
||||
// fields fields
|
||||
// args args
|
||||
// wantErr bool
|
||||
// }{
|
||||
// // TODO: Add test cases.
|
||||
// }
|
||||
// for _, tt := range tests {
|
||||
// t.Run(tt.name, func(t *testing.T) {
|
||||
// s := &service{
|
||||
// trackerSvc: tt.fields.trackerSvc,
|
||||
// }
|
||||
// if err := s.parse(tt.args.serverName, tt.args.channelName, tt.args.announcer, tt.args.line); (err != nil) != tt.wantErr {
|
||||
// t.Errorf("parse() error = %v, wantErr %v", err, tt.wantErr)
|
||||
// }
|
||||
// })
|
||||
// }
|
||||
//}
|
||||
|
||||
/*
|
||||
var (
|
||||
tracker01 = domain.TrackerInstance{
|
||||
Name: "T01",
|
||||
Enabled: true,
|
||||
Settings: nil,
|
||||
Auth: map[string]string{"rsskey": "000aaa111bbb222ccc333ddd"},
|
||||
//IRC: nil,
|
||||
Info: &domain.TrackerInfo{
|
||||
Type: "t01",
|
||||
ShortName: "T01",
|
||||
LongName: "Tracker01",
|
||||
SiteName: "www.tracker01.test",
|
||||
IRC: domain.TrackerIRCServer{
|
||||
Network: "Tracker01.test",
|
||||
ServerNames: []string{"irc.tracker01.test"},
|
||||
ChannelNames: []string{"#tracker01", "#t01announces"},
|
||||
AnnouncerNames: []string{"_AnnounceBot_"},
|
||||
},
|
||||
ParseInfo: domain.ParseInfo{
|
||||
LinePatterns: []domain.TrackerExtractPattern{
|
||||
|
||||
{
|
||||
PatternType: "linepattern",
|
||||
Optional: false,
|
||||
Regex: regexp.MustCompile("New Torrent Announcement:\\s*<([^>]*)>\\s*Name:'(.*)' uploaded by '([^']*)'\\s*(freeleech)*\\s*-\\s*https?\\:\\/\\/([^\\/]+\\/)torrent\\/(\\d+)"),
|
||||
Vars: []string{"category", "torrentName", "uploader", "$freeleech", "$baseUrl", "$torrentId"},
|
||||
},
|
||||
},
|
||||
MultiLinePatterns: nil,
|
||||
LineMatched: domain.LineMatched{
|
||||
Vars: []domain.LineMatchVars{
|
||||
{
|
||||
Name: "freeleech",
|
||||
Vars: []domain.LineMatchVarElem{
|
||||
{Type: "string", Value: "false"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "torrentUrl",
|
||||
Vars: []domain.LineMatchVarElem{
|
||||
{Type: "string", Value: "https://"},
|
||||
{Type: "var", Value: "$baseUrl"},
|
||||
{Type: "string", Value: "rss/download/"},
|
||||
{Type: "var", Value: "$torrentId"},
|
||||
{Type: "string", Value: "/"},
|
||||
{Type: "var", Value: "rsskey"},
|
||||
{Type: "string", Value: "/"},
|
||||
{Type: "varenc", Value: "torrentName"},
|
||||
{Type: "string", Value: ".torrent"},
|
||||
},
|
||||
},
|
||||
},
|
||||
Extract: nil,
|
||||
LineMatchIf: nil,
|
||||
VarReplace: nil,
|
||||
SetRegex: &domain.SetRegex{
|
||||
SrcVar: "$freeleech",
|
||||
Regex: regexp.MustCompile("freeleech"),
|
||||
VarName: "freeleech",
|
||||
NewValue: "true",
|
||||
},
|
||||
ExtractOne: domain.ExtractOne{Extract: nil},
|
||||
ExtractTags: domain.ExtractTags{
|
||||
Name: "",
|
||||
SrcVar: "",
|
||||
Split: "",
|
||||
Regex: nil,
|
||||
SetVarIf: nil,
|
||||
},
|
||||
},
|
||||
Ignore: []domain.TrackerIgnore{},
|
||||
},
|
||||
},
|
||||
}
|
||||
tracker05 = domain.TrackerInstance{
|
||||
Name: "T05",
|
||||
Enabled: true,
|
||||
Settings: nil,
|
||||
Auth: map[string]string{"authkey": "000aaa111bbb222ccc333ddd", "torrent_pass": "eee444fff555ggg666hhh777"},
|
||||
//IRC: nil,
|
||||
Info: &domain.TrackerInfo{
|
||||
Type: "t05",
|
||||
ShortName: "T05",
|
||||
LongName: "Tracker05",
|
||||
SiteName: "tracker05.test",
|
||||
IRC: domain.TrackerIRCServer{
|
||||
Network: "Tracker05.test",
|
||||
ServerNames: []string{"irc.tracker05.test"},
|
||||
ChannelNames: []string{"#t05-announce"},
|
||||
AnnouncerNames: []string{"Drone"},
|
||||
},
|
||||
ParseInfo: domain.ParseInfo{
|
||||
LinePatterns: []domain.TrackerExtractPattern{
|
||||
|
||||
{
|
||||
PatternType: "linepattern",
|
||||
Optional: false,
|
||||
Regex: regexp.MustCompile("^(.*)\\s+-\\s+https?:.*[&\\?]id=.*https?\\:\\/\\/([^\\/]+\\/).*[&\\?]id=(\\d+)\\s*-\\s*(.*)"),
|
||||
Vars: []string{"torrentName", "$baseUrl", "$torrentId", "tags"},
|
||||
},
|
||||
},
|
||||
MultiLinePatterns: nil,
|
||||
LineMatched: domain.LineMatched{
|
||||
Vars: []domain.LineMatchVars{
|
||||
{
|
||||
Name: "scene",
|
||||
Vars: []domain.LineMatchVarElem{
|
||||
{Type: "string", Value: "false"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "log",
|
||||
Vars: []domain.LineMatchVarElem{
|
||||
{Type: "string", Value: "false"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "cue",
|
||||
Vars: []domain.LineMatchVarElem{
|
||||
{Type: "string", Value: "false"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "freeleech",
|
||||
Vars: []domain.LineMatchVarElem{
|
||||
{Type: "string", Value: "false"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "torrentUrl",
|
||||
Vars: []domain.LineMatchVarElem{
|
||||
{Type: "string", Value: "https://"},
|
||||
{Type: "var", Value: "$baseUrl"},
|
||||
{Type: "string", Value: "torrents.php?action=download&id="},
|
||||
{Type: "var", Value: "$torrentId"},
|
||||
{Type: "string", Value: "&authkey="},
|
||||
{Type: "var", Value: "authkey"},
|
||||
{Type: "string", Value: "&torrent_pass="},
|
||||
{Type: "var", Value: "torrent_pass"},
|
||||
},
|
||||
},
|
||||
},
|
||||
Extract: []domain.Extract{
|
||||
{SrcVar: "torrentName", Optional: true, Regex: regexp.MustCompile("[(\\[]((?:19|20)\\d\\d)[)\\]]"), Vars: []string{"year"}},
|
||||
{SrcVar: "$releaseTags", Optional: true, Regex: regexp.MustCompile("([\\d.]+)%"), Vars: []string{"logScore"}},
|
||||
},
|
||||
LineMatchIf: nil,
|
||||
VarReplace: []domain.ParseVarReplace{
|
||||
{Name: "tags", SrcVar: "tags", Regex: regexp.MustCompile("[._]"), Replace: " "},
|
||||
},
|
||||
SetRegex: nil,
|
||||
ExtractOne: domain.ExtractOne{Extract: []domain.Extract{
|
||||
{SrcVar: "torrentName", Optional: false, Regex: regexp.MustCompile("^(.+?) - ([^\\[]+).*\\[(\\d{4})\\] \\[([^\\[]+)\\] - ([^\\-\\[\\]]+)"), Vars: []string{"name1", "name2", "year", "releaseType", "$releaseTags"}},
|
||||
{SrcVar: "torrentName", Optional: false, Regex: regexp.MustCompile("^([^\\-]+)\\s+-\\s+(.+)"), Vars: []string{"name1", "name2"}},
|
||||
{SrcVar: "torrentName", Optional: false, Regex: regexp.MustCompile("(.*)"), Vars: []string{"name1"}},
|
||||
}},
|
||||
ExtractTags: domain.ExtractTags{
|
||||
Name: "",
|
||||
SrcVar: "$releaseTags",
|
||||
Split: "/",
|
||||
Regex: []*regexp.Regexp{regexp.MustCompile("^(?:5\\.1 Audio|\\.m4a|Various.*|~.*|>.*)$")},
|
||||
SetVarIf: []domain.SetVarIf{
|
||||
{VarName: "format", Value: "", NewValue: "", Regex: regexp.MustCompile("^(?:MP3|FLAC|Ogg Vorbis|AAC|AC3|DTS)$")},
|
||||
{VarName: "bitrate", Value: "", NewValue: "", Regex: regexp.MustCompile("Lossless$")},
|
||||
{VarName: "bitrate", Value: "", NewValue: "", Regex: regexp.MustCompile("^(?:vbr|aps|apx|v\\d|\\d{2,4}|\\d+\\.\\d+|q\\d+\\.[\\dx]+|Other)?(?:\\s*kbps|\\s*kbits?|\\s*k)?(?:\\s*\\(?(?:vbr|cbr)\\)?)?$")},
|
||||
{VarName: "media", Value: "", NewValue: "", Regex: regexp.MustCompile("^(?:CD|DVD|Vinyl|Soundboard|SACD|DAT|Cassette|WEB|Blu-ray|Other)$")},
|
||||
{VarName: "scene", Value: "Scene", NewValue: "true", Regex: nil},
|
||||
{VarName: "log", Value: "Log", NewValue: "true", Regex: nil},
|
||||
{VarName: "cue", Value: "Cue", NewValue: "true", Regex: nil},
|
||||
{VarName: "freeleech", Value: "Freeleech!", NewValue: "true", Regex: nil},
|
||||
},
|
||||
},
|
||||
},
|
||||
Ignore: []domain.TrackerIgnore{},
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
*/
|
||||
|
||||
//func Test_service_parse(t *testing.T) {
|
||||
// type fields struct {
|
||||
// name string
|
||||
// trackerSvc tracker.Service
|
||||
// queues map[string]chan string
|
||||
// }
|
||||
// type args struct {
|
||||
// ti *domain.TrackerInstance
|
||||
// message string
|
||||
// }
|
||||
//
|
||||
// tests := []struct {
|
||||
// name string
|
||||
// fields fields
|
||||
// args args
|
||||
// want *domain.Announce
|
||||
// wantErr bool
|
||||
// }{
|
||||
// {
|
||||
// name: "tracker01_no_freeleech",
|
||||
// fields: fields{
|
||||
// name: "T01",
|
||||
// trackerSvc: nil,
|
||||
// queues: make(map[string]chan string),
|
||||
// }, args: args{
|
||||
// ti: &tracker01,
|
||||
// message: "New Torrent Announcement: <PC :: Iso> Name:'debian live 10 6 0 amd64 standard iso' uploaded by 'Anonymous' - http://www.tracker01.test/torrent/263302",
|
||||
// },
|
||||
// want: &domain.Announce{
|
||||
// Freeleech: false,
|
||||
// Category: "PC :: Iso",
|
||||
// Name: "debian live 10 6 0 amd64 standard iso",
|
||||
// Uploader: "Anonymous",
|
||||
// TorrentUrl: "https://www.tracker01.test/rss/download/263302/000aaa111bbb222ccc333ddd/debian+live+10+6+0+amd64+standard+iso.torrent",
|
||||
// Site: "T01",
|
||||
// },
|
||||
// wantErr: false,
|
||||
// },
|
||||
// {
|
||||
// name: "tracker01_freeleech",
|
||||
// fields: fields{
|
||||
// name: "T01",
|
||||
// trackerSvc: nil,
|
||||
// queues: make(map[string]chan string),
|
||||
// }, args: args{
|
||||
// ti: &tracker01,
|
||||
// message: "New Torrent Announcement: <PC :: Iso> Name:'debian live 10 6 0 amd64 standard iso' uploaded by 'Anonymous' freeleech - http://www.tracker01.test/torrent/263302",
|
||||
// },
|
||||
// want: &domain.Announce{
|
||||
// Freeleech: true,
|
||||
// Category: "PC :: Iso",
|
||||
// Name: "debian live 10 6 0 amd64 standard iso",
|
||||
// Uploader: "Anonymous",
|
||||
// TorrentUrl: "https://www.tracker01.test/rss/download/263302/000aaa111bbb222ccc333ddd/debian+live+10+6+0+amd64+standard+iso.torrent",
|
||||
// Site: "T01",
|
||||
// },
|
||||
// wantErr: false,
|
||||
// },
|
||||
// {
|
||||
// name: "tracker05_01",
|
||||
// fields: fields{
|
||||
// name: "T05",
|
||||
// trackerSvc: nil,
|
||||
// queues: make(map[string]chan string),
|
||||
// }, args: args{
|
||||
// ti: &tracker05,
|
||||
// message: "Roy Buchanan - Loading Zone [1977] - FLAC / Lossless / Log / 100% / Cue / CD - http://passtheheadphones.me/torrents.php?id=97614 / http://tracker05.test/torrents.php?action=download&id=1382972 - blues, rock, classic.rock,jazz,blues.rock,electric.blues",
|
||||
// },
|
||||
// want: &domain.Announce{
|
||||
// Name1: "Roy Buchanan - Loading Zone [1977] - FLAC / Lossless / Log / 100% / Cue / CD",
|
||||
// Name2: "Loading Zone [1977] - FLAC / Lossless / Log / 100% / Cue / CD",
|
||||
// Freeleech: false,
|
||||
// Name: "Roy Buchanan - Loading Zone [1977] - FLAC / Lossless / Log / 100% / Cue / CD",
|
||||
// TorrentUrl: "https://tracker05.test/torrents.php?action=download&id=1382972&authkey=000aaa111bbb222ccc333ddd&torrent_pass=eee444fff555ggg666hhh777",
|
||||
// Site: "T05",
|
||||
// Tags: "blues, rock, classic rock,jazz,blues rock,electric blues",
|
||||
// Log: "true",
|
||||
// Cue: true,
|
||||
// Format: "FLAC",
|
||||
// Bitrate: "Lossless",
|
||||
// Media: "CD",
|
||||
// Scene: false,
|
||||
// Year: 1977,
|
||||
// },
|
||||
// wantErr: false,
|
||||
// },
|
||||
// {
|
||||
// name: "tracker05_02",
|
||||
// fields: fields{
|
||||
// name: "T05",
|
||||
// trackerSvc: nil,
|
||||
// queues: make(map[string]chan string),
|
||||
// }, args: args{
|
||||
// ti: &tracker05,
|
||||
// message: "Heirloom - Road to the Isles [1998] [Album] - FLAC / Lossless / Log / 100% / Cue / CD - http://tracker05.test/torrents.php?id=72158898 / http://tracker05.test/torrents.php?action=download&id=29910415 - 1990s, folk, world_music, celtic",
|
||||
// },
|
||||
// want: &domain.Announce{
|
||||
// ReleaseType: "Album",
|
||||
// Name1: "Heirloom - Road to the Isles [1998] [Album] - FLAC / Lossless / Log / 100% / Cue / CD",
|
||||
// Name2: "Road to the Isles [1998] [Album] - FLAC / Lossless / Log / 100% / Cue / CD",
|
||||
// Freeleech: false,
|
||||
// Name: "Heirloom - Road to the Isles [1998] [Album] - FLAC / Lossless / Log / 100% / Cue / CD",
|
||||
// TorrentUrl: "https://tracker05.test/torrents.php?action=download&id=29910415&authkey=000aaa111bbb222ccc333ddd&torrent_pass=eee444fff555ggg666hhh777",
|
||||
// Site: "T05",
|
||||
// Tags: "1990s, folk, world music, celtic",
|
||||
// Log: "true",
|
||||
// Cue: true,
|
||||
// Format: "FLAC",
|
||||
// Bitrate: "Lossless",
|
||||
// Media: "CD",
|
||||
// Scene: false,
|
||||
// Year: 1998,
|
||||
// },
|
||||
// wantErr: false,
|
||||
// },
|
||||
// }
|
||||
// for _, tt := range tests {
|
||||
// t.Run(tt.name, func(t *testing.T) {
|
||||
// s := &service{
|
||||
// name: tt.fields.name,
|
||||
// trackerSvc: tt.fields.trackerSvc,
|
||||
// queues: tt.fields.queues,
|
||||
// }
|
||||
// got, err := s.parse(tt.args.ti, tt.args.message)
|
||||
//
|
||||
// if (err != nil) != tt.wantErr {
|
||||
// t.Errorf("parse() error = %v, wantErr %v", err, tt.wantErr)
|
||||
// return
|
||||
// }
|
||||
// assert.Equal(t, tt.want, got)
|
||||
// })
|
||||
// }
|
||||
//}
|
||||
|
||||
//func Test_service_parseSingleLine(t *testing.T) {
|
||||
// type fields struct {
|
||||
// name string
|
||||
// ts tracker.Service
|
||||
// queues map[string]chan string
|
||||
// }
|
||||
// type args struct {
|
||||
// ti *domain.TrackerInstance
|
||||
// line string
|
||||
// }
|
||||
//
|
||||
// tests := []struct {
|
||||
// name string
|
||||
// fields fields
|
||||
// args args
|
||||
// want *domain.Announce
|
||||
// wantErr bool
|
||||
// }{
|
||||
// {
|
||||
// name: "tracker01_no_freeleech",
|
||||
// fields: fields{
|
||||
// name: "T01",
|
||||
// ts: nil,
|
||||
// queues: make(map[string]chan string),
|
||||
// }, args: args{
|
||||
// ti: &tracker01,
|
||||
// line: "New Torrent Announcement: <PC :: Iso> Name:'debian live 10 6 0 amd64 standard iso' uploaded by 'Anonymous' - http://www.tracker01.test/torrent/263302",
|
||||
// },
|
||||
// want: &domain.Announce{
|
||||
// Freeleech: false,
|
||||
// Category: "PC :: Iso",
|
||||
// Name: "debian live 10 6 0 amd64 standard iso",
|
||||
// Uploader: "Anonymous",
|
||||
// TorrentUrl: "https://www.tracker01.test/rss/download/263302/000aaa111bbb222ccc333ddd/debian+live+10+6+0+amd64+standard+iso.torrent",
|
||||
// Site: "T01",
|
||||
// },
|
||||
// wantErr: false,
|
||||
// },
|
||||
// {
|
||||
// name: "tracker01_freeleech",
|
||||
// fields: fields{
|
||||
// name: "T01",
|
||||
// ts: nil,
|
||||
// queues: make(map[string]chan string),
|
||||
// }, args: args{
|
||||
// ti: &tracker01,
|
||||
// line: "New Torrent Announcement: <PC :: Iso> Name:'debian live 10 6 0 amd64 standard iso' uploaded by 'Anonymous' freeleech - http://www.tracker01.test/torrent/263302",
|
||||
// },
|
||||
// want: &domain.Announce{
|
||||
// Freeleech: true,
|
||||
// Category: "PC :: Iso",
|
||||
// Name: "debian live 10 6 0 amd64 standard iso",
|
||||
// Uploader: "Anonymous",
|
||||
// TorrentUrl: "https://www.tracker01.test/rss/download/263302/000aaa111bbb222ccc333ddd/debian+live+10+6+0+amd64+standard+iso.torrent",
|
||||
// Site: "T01",
|
||||
// },
|
||||
// wantErr: false,
|
||||
// },
|
||||
// {
|
||||
// name: "tracker05_01",
|
||||
// fields: fields{
|
||||
// name: "T05",
|
||||
// ts: nil,
|
||||
// queues: make(map[string]chan string),
|
||||
// }, args: args{
|
||||
// ti: &tracker05,
|
||||
// line: "Roy Buchanan - Loading Zone [1977] - FLAC / Lossless / Log / 100% / Cue / CD - http://passtheheadphones.me/torrents.php?id=97614 / http://tracker05.test/torrents.php?action=download&id=1382972 - blues, rock, classic.rock,jazz,blues.rock,electric.blues",
|
||||
// },
|
||||
// want: &domain.Announce{
|
||||
// Name1: "Roy Buchanan - Loading Zone [1977] - FLAC / Lossless / Log / 100% / Cue / CD",
|
||||
// Name2: "Loading Zone [1977] - FLAC / Lossless / Log / 100% / Cue / CD",
|
||||
// Freeleech: false,
|
||||
// Name: "Roy Buchanan - Loading Zone [1977] - FLAC / Lossless / Log / 100% / Cue / CD",
|
||||
// TorrentUrl: "https://tracker05.test/torrents.php?action=download&id=1382972&authkey=000aaa111bbb222ccc333ddd&torrent_pass=eee444fff555ggg666hhh777",
|
||||
// Site: "T05",
|
||||
// Tags: "blues, rock, classic rock,jazz,blues rock,electric blues",
|
||||
// //Log: "true",
|
||||
// //Cue: true,
|
||||
// //Format: "FLAC",
|
||||
// //Bitrate: "Lossless",
|
||||
// //Media: "CD",
|
||||
// Log: "false",
|
||||
// Cue: false,
|
||||
// Format: "",
|
||||
// Bitrate: "",
|
||||
// Media: "",
|
||||
// Scene: false,
|
||||
// Year: 1977,
|
||||
// },
|
||||
// wantErr: false,
|
||||
// },
|
||||
// {
|
||||
// name: "tracker05_02",
|
||||
// fields: fields{
|
||||
// name: "T05",
|
||||
// ts: nil,
|
||||
// queues: make(map[string]chan string),
|
||||
// }, args: args{
|
||||
// ti: &tracker05,
|
||||
// line: "Heirloom - Road to the Isles [1998] [Album] - FLAC / Lossless / Log / 100% / Cue / CD - http://tracker05.test/torrents.php?id=72158898 / http://tracker05.test/torrents.php?action=download&id=29910415 - 1990s, folk, world_music, celtic",
|
||||
// },
|
||||
// want: &domain.Announce{
|
||||
// ReleaseType: "Album",
|
||||
// Name1: "Heirloom - Road to the Isles [1998] [Album] - FLAC / Lossless / Log / 100% / Cue / CD",
|
||||
// Name2: "Road to the Isles [1998] [Album] - FLAC / Lossless / Log / 100% / Cue / CD",
|
||||
// Freeleech: false,
|
||||
// Name: "Heirloom - Road to the Isles [1998] [Album] - FLAC / Lossless / Log / 100% / Cue / CD",
|
||||
// TorrentUrl: "https://tracker05.test/torrents.php?action=download&id=29910415&authkey=000aaa111bbb222ccc333ddd&torrent_pass=eee444fff555ggg666hhh777",
|
||||
// Site: "T05",
|
||||
// Tags: "1990s, folk, world music, celtic",
|
||||
// Log: "true",
|
||||
// Cue: true,
|
||||
// Format: "FLAC",
|
||||
// Bitrate: "Lossless",
|
||||
// Media: "CD",
|
||||
// Scene: false,
|
||||
// Year: 1998,
|
||||
// },
|
||||
// wantErr: false,
|
||||
// },
|
||||
// }
|
||||
// for _, tt := range tests {
|
||||
// t.Run(tt.name, func(t *testing.T) {
|
||||
// s := &service{
|
||||
// name: tt.fields.name,
|
||||
// trackerSvc: tt.fields.ts,
|
||||
// queues: tt.fields.queues,
|
||||
// }
|
||||
//
|
||||
// announce := domain.Announce{
|
||||
// Site: tt.fields.name,
|
||||
// //Line: msg,
|
||||
// }
|
||||
// got, err := s.parseSingleLine(tt.args.ti, tt.args.line, &announce)
|
||||
// if (err != nil) != tt.wantErr {
|
||||
// t.Errorf("parseSingleLine() error = %v, wantErr %v", err, tt.wantErr)
|
||||
// return
|
||||
// }
|
||||
//
|
||||
// assert.Equal(t, tt.want, got)
|
||||
// })
|
||||
// }
|
||||
//}
|
||||
|
||||
//func Test_service_extractReleaseInfo(t *testing.T) {
|
||||
// type fields struct {
|
||||
// name string
|
||||
// queues map[string]chan string
|
||||
// }
|
||||
// type args struct {
|
||||
// varMap map[string]string
|
||||
// releaseName string
|
||||
// }
|
||||
// tests := []struct {
|
||||
// name string
|
||||
// fields fields
|
||||
// args args
|
||||
// wantErr bool
|
||||
// }{
|
||||
// {
|
||||
// name: "test_01",
|
||||
// fields: fields{
|
||||
// name: "", queues: nil,
|
||||
// },
|
||||
// args: args{
|
||||
// varMap: map[string]string{},
|
||||
// releaseName: "Heirloom - Road to the Isles [1998] [Album] - FLAC / Lossless / Log / 100% / Cue / CD",
|
||||
// },
|
||||
// wantErr: false,
|
||||
// },
|
||||
// {
|
||||
// name: "test_02",
|
||||
// fields: fields{
|
||||
// name: "", queues: nil,
|
||||
// },
|
||||
// args: args{
|
||||
// varMap: map[string]string{},
|
||||
// releaseName: "Lost S06E07 720p WEB-DL DD 5.1 H.264 - LP",
|
||||
// },
|
||||
// wantErr: false,
|
||||
// },
|
||||
// }
|
||||
// for _, tt := range tests {
|
||||
// t.Run(tt.name, func(t *testing.T) {
|
||||
// s := &service{
|
||||
// queues: tt.fields.queues,
|
||||
// }
|
||||
// if err := s.extractReleaseInfo(tt.args.varMap, tt.args.releaseName); (err != nil) != tt.wantErr {
|
||||
// t.Errorf("extractReleaseInfo() error = %v, wantErr %v", err, tt.wantErr)
|
||||
// }
|
||||
// })
|
||||
// }
|
||||
//}
|
|
@ -1,110 +0,0 @@
|
|||
package announce
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/autobrr/autobrr/internal/domain"
|
||||
"github.com/autobrr/autobrr/internal/filter"
|
||||
"github.com/autobrr/autobrr/internal/indexer"
|
||||
"github.com/autobrr/autobrr/internal/release"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
type Service interface {
|
||||
Parse(announceID string, msg string) error
|
||||
}
|
||||
|
||||
type service struct {
|
||||
filterSvc filter.Service
|
||||
indexerSvc indexer.Service
|
||||
releaseSvc release.Service
|
||||
queues map[string]chan string
|
||||
}
|
||||
|
||||
func NewService(filterService filter.Service, indexerSvc indexer.Service, releaseService release.Service) Service {
|
||||
|
||||
//queues := make(map[string]chan string)
|
||||
//for _, channel := range tinfo {
|
||||
//
|
||||
//}
|
||||
|
||||
return &service{
|
||||
filterSvc: filterService,
|
||||
indexerSvc: indexerSvc,
|
||||
releaseSvc: releaseService,
|
||||
}
|
||||
}
|
||||
|
||||
// Parse announce line
|
||||
func (s *service) Parse(announceID string, msg string) error {
|
||||
ctx := context.Background()
|
||||
// make simpler by injecting indexer, or indexerdefinitions
|
||||
|
||||
// announceID (server:channel:announcer)
|
||||
definition := s.indexerSvc.GetIndexerByAnnounce(announceID)
|
||||
if definition == nil {
|
||||
log.Debug().Msgf("could not find indexer definition: %v", announceID)
|
||||
return nil
|
||||
}
|
||||
|
||||
newRelease, err := domain.NewRelease(definition.Identifier, msg)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("could not create new release")
|
||||
return err
|
||||
}
|
||||
|
||||
// parse lines
|
||||
if definition.Parse.Type == "single" {
|
||||
err = s.parseLineSingle(definition, newRelease, msg)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("could not parse single line: %v", msg)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// TODO implement multiline parsing
|
||||
|
||||
filterOK, foundFilter, err := s.filterSvc.FindAndCheckFilters(newRelease)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("could not find filter")
|
||||
return err
|
||||
}
|
||||
|
||||
// no foundFilter found, lets return
|
||||
if !filterOK || foundFilter == nil {
|
||||
log.Trace().Msg("no matching filter found")
|
||||
|
||||
// TODO check in config for "Save all releases"
|
||||
// Save as rejected
|
||||
//newRelease.FilterStatus = domain.ReleaseStatusFilterRejected
|
||||
//err = s.releaseSvc.Store(ctx, newRelease)
|
||||
//if err != nil {
|
||||
// log.Error().Err(err).Msgf("error writing release to database: %+v", newRelease)
|
||||
// return nil
|
||||
//}
|
||||
return nil
|
||||
}
|
||||
|
||||
// save release
|
||||
newRelease.Filter = foundFilter
|
||||
newRelease.FilterName = foundFilter.Name
|
||||
newRelease.FilterID = foundFilter.ID
|
||||
|
||||
newRelease.FilterStatus = domain.ReleaseStatusFilterApproved
|
||||
err = s.releaseSvc.Store(ctx, newRelease)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("error writing release to database: %+v", newRelease)
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Info().Msgf("Matched '%v' (%v) for %v", newRelease.TorrentName, newRelease.Filter.Name, newRelease.Indexer)
|
||||
|
||||
// process release
|
||||
go func() {
|
||||
err = s.releaseSvc.Process(*newRelease)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("could not process release: %+v", newRelease)
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,9 +1,9 @@
|
|||
package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/autobrr/autobrr/internal/domain"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
@ -139,17 +139,16 @@ func (r *IndexerRepo) FindByFilterID(id int) ([]domain.Indexer, error) {
|
|||
|
||||
}
|
||||
|
||||
func (r *IndexerRepo) Delete(id int) error {
|
||||
func (r *IndexerRepo) Delete(ctx context.Context, id int) error {
|
||||
query := `DELETE FROM indexer WHERE id = ?`
|
||||
|
||||
res, err := r.db.Exec(`DELETE FROM indexer WHERE id = ?`, id)
|
||||
_, err := r.db.ExecContext(ctx, query, id)
|
||||
if err != nil {
|
||||
log.Error().Stack().Err(err).Msg("error executing query")
|
||||
log.Error().Stack().Err(err).Msgf("indexer.delete: error executing query: '%v'", query)
|
||||
return err
|
||||
}
|
||||
|
||||
rows, _ := res.RowsAffected()
|
||||
|
||||
log.Info().Msgf("rows affected %v", rows)
|
||||
log.Debug().Msgf("indexer.delete: id %v", id)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -4,6 +4,8 @@ import (
|
|||
"context"
|
||||
"database/sql"
|
||||
|
||||
sq "github.com/Masterminds/squirrel"
|
||||
|
||||
"github.com/autobrr/autobrr/internal/domain"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
|
@ -74,6 +76,39 @@ func (ir *IrcRepo) DeleteNetwork(ctx context.Context, id int64) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (ir *IrcRepo) FindActiveNetworks(ctx context.Context) ([]domain.IrcNetwork, error) {
|
||||
|
||||
rows, err := ir.db.QueryContext(ctx, "SELECT id, enabled, name, server, port, tls, pass, invite_command, nickserv_account, nickserv_password FROM irc_network WHERE enabled = true")
|
||||
if err != nil {
|
||||
log.Fatal().Err(err)
|
||||
}
|
||||
|
||||
defer rows.Close()
|
||||
|
||||
var networks []domain.IrcNetwork
|
||||
for rows.Next() {
|
||||
var net domain.IrcNetwork
|
||||
|
||||
var pass, inviteCmd sql.NullString
|
||||
var tls sql.NullBool
|
||||
|
||||
if err := rows.Scan(&net.ID, &net.Enabled, &net.Name, &net.Server, &net.Port, &tls, &pass, &inviteCmd, &net.NickServ.Account, &net.NickServ.Password); err != nil {
|
||||
log.Fatal().Err(err)
|
||||
}
|
||||
|
||||
net.TLS = tls.Bool
|
||||
net.Pass = pass.String
|
||||
net.InviteCommand = inviteCmd.String
|
||||
|
||||
networks = append(networks, net)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return networks, nil
|
||||
}
|
||||
|
||||
func (ir *IrcRepo) ListNetworks(ctx context.Context) ([]domain.IrcNetwork, error) {
|
||||
|
||||
rows, err := ir.db.QueryContext(ctx, "SELECT id, enabled, name, server, port, tls, pass, invite_command, nickserv_account, nickserv_password FROM irc_network")
|
||||
|
@ -132,6 +167,45 @@ func (ir *IrcRepo) ListChannels(networkID int64) ([]domain.IrcChannel, error) {
|
|||
return channels, nil
|
||||
}
|
||||
|
||||
func (ir *IrcRepo) CheckExistingNetwork(ctx context.Context, network *domain.IrcNetwork) (*domain.IrcNetwork, error) {
|
||||
|
||||
queryBuilder := sq.
|
||||
Select("id", "enabled", "name", "server", "port", "tls", "pass", "invite_command", "nickserv_account", "nickserv_password").
|
||||
From("irc_network").
|
||||
Where("server = ?", network.Server).
|
||||
Where("nickserv_account = ?", network.NickServ.Account)
|
||||
|
||||
query, args, err := queryBuilder.ToSql()
|
||||
if err != nil {
|
||||
log.Error().Stack().Err(err).Msg("irc.check_existing_network: error fetching data")
|
||||
return nil, err
|
||||
}
|
||||
log.Trace().Str("database", "irc.check_existing_network").Msgf("query: '%v', args: '%v'", query, args)
|
||||
|
||||
row := ir.db.QueryRowContext(ctx, query, args...)
|
||||
|
||||
var net domain.IrcNetwork
|
||||
|
||||
var pass, inviteCmd, nickPass sql.NullString
|
||||
var tls sql.NullBool
|
||||
|
||||
err = row.Scan(&net.ID, &net.Enabled, &net.Name, &net.Server, &net.Port, &tls, &pass, &inviteCmd, &net.NickServ.Account, &nickPass)
|
||||
if err == sql.ErrNoRows {
|
||||
// no result is not an error in our case
|
||||
return nil, nil
|
||||
} else if err != nil {
|
||||
log.Error().Stack().Err(err).Msg("irc.check_existing_network: error scanning data to struct")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
net.TLS = tls.Bool
|
||||
net.Pass = pass.String
|
||||
net.InviteCommand = inviteCmd.String
|
||||
net.NickServ.Password = nickPass.String
|
||||
|
||||
return &net, nil
|
||||
}
|
||||
|
||||
func (ir *IrcRepo) StoreNetwork(network *domain.IrcNetwork) error {
|
||||
|
||||
netName := toNullString(network.Name)
|
||||
|
@ -167,6 +241,10 @@ func (ir *IrcRepo) StoreNetwork(network *domain.IrcNetwork) error {
|
|||
nsPassword,
|
||||
network.ID,
|
||||
)
|
||||
if err != nil {
|
||||
log.Error().Stack().Err(err).Msg("irc.store_network: error executing query")
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
var res sql.Result
|
||||
|
||||
|
@ -180,7 +258,7 @@ func (ir *IrcRepo) StoreNetwork(network *domain.IrcNetwork) error {
|
|||
invite_command,
|
||||
nickserv_account,
|
||||
nickserv_password
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?) ON CONFLICT DO NOTHING`,
|
||||
network.Enabled,
|
||||
netName,
|
||||
network.Server,
|
||||
|
@ -192,7 +270,7 @@ func (ir *IrcRepo) StoreNetwork(network *domain.IrcNetwork) error {
|
|||
nsPassword,
|
||||
)
|
||||
if err != nil {
|
||||
log.Error().Stack().Err(err).Msg("error executing query")
|
||||
log.Error().Stack().Err(err).Msg("irc.store_network: error executing query")
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -202,6 +280,100 @@ func (ir *IrcRepo) StoreNetwork(network *domain.IrcNetwork) error {
|
|||
return err
|
||||
}
|
||||
|
||||
func (ir *IrcRepo) UpdateNetwork(ctx context.Context, network *domain.IrcNetwork) error {
|
||||
|
||||
netName := toNullString(network.Name)
|
||||
pass := toNullString(network.Pass)
|
||||
inviteCmd := toNullString(network.InviteCommand)
|
||||
|
||||
nsAccount := toNullString(network.NickServ.Account)
|
||||
nsPassword := toNullString(network.NickServ.Password)
|
||||
|
||||
var err error
|
||||
// update record
|
||||
_, err = ir.db.ExecContext(ctx, `UPDATE irc_network
|
||||
SET enabled = ?,
|
||||
name = ?,
|
||||
server = ?,
|
||||
port = ?,
|
||||
tls = ?,
|
||||
pass = ?,
|
||||
invite_command = ?,
|
||||
nickserv_account = ?,
|
||||
nickserv_password = ?,
|
||||
updated_at = CURRENT_TIMESTAMP
|
||||
WHERE id = ?`,
|
||||
network.Enabled,
|
||||
netName,
|
||||
network.Server,
|
||||
network.Port,
|
||||
network.TLS,
|
||||
pass,
|
||||
inviteCmd,
|
||||
nsAccount,
|
||||
nsPassword,
|
||||
network.ID,
|
||||
)
|
||||
if err != nil {
|
||||
log.Error().Stack().Err(err).Msg("irc.store_network: error executing query")
|
||||
return err
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO create new channel handler to only add, not delete
|
||||
|
||||
func (ir *IrcRepo) StoreNetworkChannels(ctx context.Context, networkID int64, channels []domain.IrcChannel) error {
|
||||
|
||||
tx, err := ir.db.BeginTx(ctx, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer tx.Rollback()
|
||||
|
||||
_, err = tx.ExecContext(ctx, `DELETE FROM irc_channel WHERE network_id = ?`, networkID)
|
||||
if err != nil {
|
||||
log.Error().Stack().Err(err).Msgf("error deleting channels for network: %v", networkID)
|
||||
return err
|
||||
}
|
||||
|
||||
for _, channel := range channels {
|
||||
var res sql.Result
|
||||
pass := toNullString(channel.Password)
|
||||
|
||||
res, err = tx.ExecContext(ctx, `INSERT INTO irc_channel (
|
||||
enabled,
|
||||
detached,
|
||||
name,
|
||||
password,
|
||||
network_id
|
||||
) VALUES (?, ?, ?, ?, ?)`,
|
||||
channel.Enabled,
|
||||
true,
|
||||
channel.Name,
|
||||
pass,
|
||||
networkID,
|
||||
)
|
||||
if err != nil {
|
||||
log.Error().Stack().Err(err).Msg("error executing query")
|
||||
return err
|
||||
}
|
||||
|
||||
channel.ID, err = res.LastInsertId()
|
||||
}
|
||||
|
||||
err = tx.Commit()
|
||||
if err != nil {
|
||||
log.Error().Stack().Err(err).Msgf("error deleting network: %v", networkID)
|
||||
return err
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ir *IrcRepo) StoreChannel(networkID int64, channel *domain.IrcChannel) error {
|
||||
pass := toNullString(channel.Password)
|
||||
|
||||
|
@ -231,7 +403,7 @@ func (ir *IrcRepo) StoreChannel(networkID int64, channel *domain.IrcChannel) err
|
|||
name,
|
||||
password,
|
||||
network_id
|
||||
) VALUES (?, ?, ?, ?, ?)`,
|
||||
) VALUES (?, ?, ?, ?, ?) ON CONFLICT DO NOTHING`,
|
||||
channel.Enabled,
|
||||
true,
|
||||
channel.Name,
|
||||
|
|
|
@ -1,10 +1,12 @@
|
|||
package domain
|
||||
|
||||
import "context"
|
||||
|
||||
type IndexerRepo interface {
|
||||
Store(indexer Indexer) (*Indexer, error)
|
||||
Update(indexer Indexer) (*Indexer, error)
|
||||
List() ([]Indexer, error)
|
||||
Delete(id int) error
|
||||
Delete(ctx context.Context, id int) error
|
||||
FindByFilterID(id int) ([]Indexer, error)
|
||||
}
|
||||
|
||||
|
|
|
@ -36,7 +36,11 @@ type IrcNetwork struct {
|
|||
|
||||
type IrcRepo interface {
|
||||
StoreNetwork(network *IrcNetwork) error
|
||||
UpdateNetwork(ctx context.Context, network *IrcNetwork) error
|
||||
StoreChannel(networkID int64, channel *IrcChannel) error
|
||||
StoreNetworkChannels(ctx context.Context, networkID int64, channels []IrcChannel) error
|
||||
CheckExistingNetwork(ctx context.Context, network *IrcNetwork) (*IrcNetwork, error)
|
||||
FindActiveNetworks(ctx context.Context) ([]IrcNetwork, error)
|
||||
ListNetworks(ctx context.Context) ([]IrcNetwork, error)
|
||||
ListChannels(networkID int64) ([]IrcChannel, error)
|
||||
GetNetworkByID(id int64) (*IrcNetwork, error)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package http
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
@ -16,7 +17,7 @@ type indexerService interface {
|
|||
List() ([]domain.Indexer, error)
|
||||
GetAll() ([]*domain.IndexerDefinition, error)
|
||||
GetTemplates() ([]domain.IndexerDefinition, error)
|
||||
Delete(id int) error
|
||||
Delete(ctx context.Context, id int) error
|
||||
}
|
||||
|
||||
type indexerHandler struct {
|
||||
|
@ -96,7 +97,7 @@ func (h indexerHandler) delete(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
id, _ := strconv.Atoi(idParam)
|
||||
|
||||
if err := h.service.Delete(id); err != nil {
|
||||
if err := h.service.Delete(ctx, id); err != nil {
|
||||
// return err
|
||||
}
|
||||
|
||||
|
|
|
@ -15,7 +15,8 @@ type ircService interface {
|
|||
ListNetworks(ctx context.Context) ([]domain.IrcNetwork, error)
|
||||
DeleteNetwork(ctx context.Context, id int64) error
|
||||
GetNetworkByID(id int64) (*domain.IrcNetwork, error)
|
||||
StoreNetwork(network *domain.IrcNetwork) error
|
||||
StoreNetwork(ctx context.Context, network *domain.IrcNetwork) error
|
||||
UpdateNetwork(ctx context.Context, network *domain.IrcNetwork) error
|
||||
StoreChannel(networkID int64, channel *domain.IrcChannel) error
|
||||
StopNetwork(name string) error
|
||||
}
|
||||
|
@ -35,7 +36,7 @@ func newIrcHandler(encoder encoder, service ircService) *ircHandler {
|
|||
func (h ircHandler) Routes(r chi.Router) {
|
||||
r.Get("/", h.listNetworks)
|
||||
r.Post("/", h.storeNetwork)
|
||||
r.Put("/network/{networkID}", h.storeNetwork)
|
||||
r.Put("/network/{networkID}", h.updateNetwork)
|
||||
r.Post("/network/{networkID}/channel", h.storeChannel)
|
||||
r.Get("/network/{networkID}/stop", h.stopNetwork)
|
||||
r.Get("/network/{networkID}", h.getNetworkByID)
|
||||
|
@ -79,7 +80,27 @@ func (h ircHandler) storeNetwork(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
err := h.service.StoreNetwork(&data)
|
||||
err := h.service.StoreNetwork(ctx, &data)
|
||||
if err != nil {
|
||||
//
|
||||
h.encoder.StatusResponse(ctx, w, nil, http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
h.encoder.StatusResponse(ctx, w, nil, http.StatusCreated)
|
||||
}
|
||||
|
||||
func (h ircHandler) updateNetwork(w http.ResponseWriter, r *http.Request) {
|
||||
var (
|
||||
ctx = r.Context()
|
||||
data domain.IrcNetwork
|
||||
)
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(&data); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err := h.service.UpdateNetwork(ctx, &data)
|
||||
if err != nil {
|
||||
//
|
||||
h.encoder.StatusResponse(ctx, w, nil, http.StatusBadRequest)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package indexer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"strings"
|
||||
|
@ -14,13 +15,13 @@ import (
|
|||
type Service interface {
|
||||
Store(indexer domain.Indexer) (*domain.Indexer, error)
|
||||
Update(indexer domain.Indexer) (*domain.Indexer, error)
|
||||
Delete(id int) error
|
||||
Delete(ctx context.Context, id int) error
|
||||
FindByFilterID(id int) ([]domain.Indexer, error)
|
||||
List() ([]domain.Indexer, error)
|
||||
GetAll() ([]*domain.IndexerDefinition, error)
|
||||
GetTemplates() ([]domain.IndexerDefinition, error)
|
||||
LoadIndexerDefinitions() error
|
||||
GetIndexerByAnnounce(name string) *domain.IndexerDefinition
|
||||
GetIndexersByIRCNetwork(server string) []domain.IndexerDefinition
|
||||
Start() error
|
||||
}
|
||||
|
||||
|
@ -30,19 +31,18 @@ type service struct {
|
|||
// contains all raw indexer definitions
|
||||
indexerDefinitions map[string]domain.IndexerDefinition
|
||||
|
||||
// contains indexers with data set
|
||||
indexerInstances map[string]domain.IndexerDefinition
|
||||
|
||||
// map server:channel:announce to indexer.Identifier
|
||||
mapIndexerIRCToName map[string]string
|
||||
|
||||
lookupIRCServerDefinition map[string]map[string]domain.IndexerDefinition
|
||||
}
|
||||
|
||||
func NewService(repo domain.IndexerRepo) Service {
|
||||
return &service{
|
||||
repo: repo,
|
||||
indexerDefinitions: make(map[string]domain.IndexerDefinition),
|
||||
indexerInstances: make(map[string]domain.IndexerDefinition),
|
||||
mapIndexerIRCToName: make(map[string]string),
|
||||
repo: repo,
|
||||
indexerDefinitions: make(map[string]domain.IndexerDefinition),
|
||||
mapIndexerIRCToName: make(map[string]string),
|
||||
lookupIRCServerDefinition: make(map[string]map[string]domain.IndexerDefinition),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -79,11 +79,14 @@ func (s *service) Update(indexer domain.Indexer) (*domain.Indexer, error) {
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func (s *service) Delete(id int) error {
|
||||
if err := s.repo.Delete(id); err != nil {
|
||||
func (s *service) Delete(ctx context.Context, id int) error {
|
||||
if err := s.repo.Delete(ctx, id); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO remove handler if needed
|
||||
// remove from lookup tables
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -192,9 +195,10 @@ func (s *service) Start() error {
|
|||
}
|
||||
|
||||
for _, indexerDefinition := range indexerDefinitions {
|
||||
s.indexerInstances[indexerDefinition.Identifier] = *indexerDefinition
|
||||
|
||||
s.mapIRCIndexerLookup(indexerDefinition.Identifier, *indexerDefinition)
|
||||
|
||||
// add to irc server lookup table
|
||||
s.mapIRCServerDefinitionLookup(indexerDefinition.IRC.Server, *indexerDefinition)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -223,8 +227,6 @@ func (s *service) addIndexer(indexer domain.Indexer) error {
|
|||
// continue
|
||||
//}
|
||||
|
||||
s.indexerInstances[indexerDefinition.Identifier] = *indexerDefinition
|
||||
|
||||
s.mapIRCIndexerLookup(indexer.Identifier, *indexerDefinition)
|
||||
|
||||
return nil
|
||||
|
@ -251,6 +253,21 @@ func (s *service) mapIRCIndexerLookup(indexerIdentifier string, indexerDefinitio
|
|||
}
|
||||
}
|
||||
|
||||
// mapIRCServerDefinitionLookup map irc stuff to indexer.name
|
||||
// map[irc.network.test][indexer1] = indexer1
|
||||
// map[irc.network.test][indexer2] = indexer2
|
||||
func (s *service) mapIRCServerDefinitionLookup(ircServer string, indexerDefinition domain.IndexerDefinition) {
|
||||
if indexerDefinition.IRC != nil {
|
||||
// check if already exists, if ok add it to existing, otherwise create new
|
||||
_, exists := s.lookupIRCServerDefinition[ircServer]
|
||||
if !exists {
|
||||
s.lookupIRCServerDefinition[ircServer] = map[string]domain.IndexerDefinition{}
|
||||
}
|
||||
|
||||
s.lookupIRCServerDefinition[ircServer][indexerDefinition.Identifier] = indexerDefinition
|
||||
}
|
||||
}
|
||||
|
||||
// LoadIndexerDefinitions load definitions from golang embed fs
|
||||
func (s *service) LoadIndexerDefinitions() error {
|
||||
|
||||
|
@ -293,16 +310,19 @@ func (s *service) LoadIndexerDefinitions() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *service) GetIndexerByAnnounce(name string) *domain.IndexerDefinition {
|
||||
name = strings.ToLower(name)
|
||||
func (s *service) GetIndexersByIRCNetwork(server string) []domain.IndexerDefinition {
|
||||
server = strings.ToLower(server)
|
||||
|
||||
if identifier, idOk := s.mapIndexerIRCToName[name]; idOk {
|
||||
if indexer, ok := s.indexerInstances[identifier]; ok {
|
||||
return &indexer
|
||||
indexerDefinitions := make([]domain.IndexerDefinition, 0)
|
||||
|
||||
// get indexer definitions matching irc network from lookup table
|
||||
if srv, idOk := s.lookupIRCServerDefinition[server]; idOk {
|
||||
for _, definition := range srv {
|
||||
indexerDefinitions = append(indexerDefinitions, definition)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return indexerDefinitions
|
||||
}
|
||||
|
||||
func (s *service) getDefinitionByName(name string) *domain.IndexerDefinition {
|
||||
|
|
359
internal/irc/announce.go
Normal file
359
internal/irc/announce.go
Normal file
|
@ -0,0 +1,359 @@
|
|||
package irc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"github.com/autobrr/autobrr/internal/domain"
|
||||
"github.com/autobrr/autobrr/internal/filter"
|
||||
"github.com/autobrr/autobrr/internal/release"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
type Processor interface {
|
||||
AddLineToQueue(channel string, line string) error
|
||||
}
|
||||
|
||||
type announceProcessor struct {
|
||||
indexer domain.IndexerDefinition
|
||||
|
||||
filterSvc filter.Service
|
||||
releaseSvc release.Service
|
||||
|
||||
queues map[string]chan string
|
||||
}
|
||||
|
||||
func NewAnnounceProcessor(indexer domain.IndexerDefinition, filterSvc filter.Service, releaseSvc release.Service) Processor {
|
||||
ap := &announceProcessor{
|
||||
indexer: indexer,
|
||||
filterSvc: filterSvc,
|
||||
releaseSvc: releaseSvc,
|
||||
}
|
||||
|
||||
// setup queues and consumers
|
||||
ap.setupQueues()
|
||||
ap.setupQueueConsumers()
|
||||
|
||||
return ap
|
||||
}
|
||||
|
||||
func (a *announceProcessor) setupQueues() {
|
||||
queues := make(map[string]chan string)
|
||||
for _, channel := range a.indexer.IRC.Channels {
|
||||
channel = strings.ToLower(channel)
|
||||
|
||||
queues[channel] = make(chan string, 128)
|
||||
}
|
||||
|
||||
a.queues = queues
|
||||
}
|
||||
|
||||
func (a *announceProcessor) setupQueueConsumers() {
|
||||
for queueName, queue := range a.queues {
|
||||
go func(name string, q chan string) {
|
||||
a.processQueue(q)
|
||||
}(queueName, queue)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *announceProcessor) processQueue(queue chan string) {
|
||||
for {
|
||||
tmpVars := map[string]string{}
|
||||
parseFailed := false
|
||||
//patternParsed := false
|
||||
|
||||
for _, pattern := range a.indexer.Parse.Lines {
|
||||
line, err := a.getNextLine(queue)
|
||||
if err != nil {
|
||||
log.Error().Stack().Err(err).Msg("could not get line from queue")
|
||||
return
|
||||
}
|
||||
log.Trace().Msgf("announce: process line: %v", line)
|
||||
|
||||
// check should ignore
|
||||
|
||||
match, err := a.parseExtract(pattern.Pattern, pattern.Vars, tmpVars, line)
|
||||
if err != nil {
|
||||
log.Debug().Msgf("error parsing extract: %v", line)
|
||||
|
||||
parseFailed = true
|
||||
break
|
||||
}
|
||||
|
||||
if !match {
|
||||
log.Debug().Msgf("line not matching expected regex pattern: %v", line)
|
||||
parseFailed = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if parseFailed {
|
||||
continue
|
||||
}
|
||||
|
||||
newRelease, err := domain.NewRelease(a.indexer.Identifier, "")
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("could not create new release")
|
||||
break
|
||||
}
|
||||
|
||||
// on lines matched
|
||||
err = a.onLinesMatched(a.indexer, tmpVars, newRelease)
|
||||
if err != nil {
|
||||
log.Debug().Msgf("error match line: %v", "")
|
||||
break
|
||||
}
|
||||
|
||||
// send to filter service to take care of the rest
|
||||
|
||||
// find and check filter
|
||||
filterOK, foundFilter, err := a.filterSvc.FindAndCheckFilters(newRelease)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("could not find filter")
|
||||
break
|
||||
}
|
||||
|
||||
// no foundFilter found, lets return
|
||||
if !filterOK || foundFilter == nil {
|
||||
log.Trace().Msg("no matching filter found")
|
||||
break
|
||||
|
||||
// TODO check in config for "Save all releases"
|
||||
// Save as rejected
|
||||
//newRelease.FilterStatus = domain.ReleaseStatusFilterRejected
|
||||
//err = s.releaseSvc.Store(ctx, newRelease)
|
||||
//if err != nil {
|
||||
// log.Error().Err(err).Msgf("error writing release to database: %+v", newRelease)
|
||||
// return nil
|
||||
//}
|
||||
//return nil
|
||||
}
|
||||
|
||||
// save release
|
||||
newRelease.Filter = foundFilter
|
||||
newRelease.FilterName = foundFilter.Name
|
||||
newRelease.FilterID = foundFilter.ID
|
||||
|
||||
newRelease.FilterStatus = domain.ReleaseStatusFilterApproved
|
||||
err = a.releaseSvc.Store(context.Background(), newRelease)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("error writing release to database: %+v", newRelease)
|
||||
break
|
||||
}
|
||||
|
||||
log.Info().Msgf("Matched '%v' (%v) for %v", newRelease.TorrentName, newRelease.Filter.Name, newRelease.Indexer)
|
||||
|
||||
// process release
|
||||
go func() {
|
||||
err = a.releaseSvc.Process(*newRelease)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("could not process release: %+v", newRelease)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func (a *announceProcessor) getNextLine(queue chan string) (string, error) {
|
||||
for {
|
||||
line, ok := <-queue
|
||||
if !ok {
|
||||
return "", errors.New("could not queue line")
|
||||
}
|
||||
|
||||
return line, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (a *announceProcessor) AddLineToQueue(channel string, line string) error {
|
||||
channel = strings.ToLower(channel)
|
||||
queue, ok := a.queues[channel]
|
||||
if !ok {
|
||||
return fmt.Errorf("no queue for channel (%v) found", channel)
|
||||
}
|
||||
|
||||
queue <- line
|
||||
log.Trace().Msgf("announce: queued line: %v", line)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *announceProcessor) parseExtract(pattern string, vars []string, tmpVars map[string]string, line string) (bool, error) {
|
||||
|
||||
rxp, err := regExMatch(pattern, line)
|
||||
if err != nil {
|
||||
log.Debug().Msgf("did not match expected line: %v", line)
|
||||
}
|
||||
|
||||
if rxp == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// extract matched
|
||||
for i, v := range vars {
|
||||
value := ""
|
||||
|
||||
if rxp[i] != "" {
|
||||
value = rxp[i]
|
||||
// tmpVars[v] = rxp[i]
|
||||
}
|
||||
|
||||
tmpVars[v] = value
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// onLinesMatched process vars into release
|
||||
func (a *announceProcessor) onLinesMatched(def domain.IndexerDefinition, vars map[string]string, release *domain.Release) error {
|
||||
var err error
|
||||
|
||||
err = release.MapVars(vars)
|
||||
|
||||
// FIXME is this even needed anymore?
|
||||
// canonicalize name
|
||||
//canonReleaseName := cleanReleaseName(release.TorrentName)
|
||||
//log.Trace().Msgf("canonicalize release name: %v", canonReleaseName)
|
||||
|
||||
// parse fields
|
||||
err = release.Parse()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("announce: could not parse release")
|
||||
return err
|
||||
}
|
||||
|
||||
// generate torrent url
|
||||
torrentUrl, err := a.processTorrentUrl(def.Parse.Match.TorrentURL, vars, def.SettingsMap, def.Parse.Match.Encode)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("announce: could not process torrent url")
|
||||
return err
|
||||
}
|
||||
|
||||
if torrentUrl != "" {
|
||||
release.TorrentURL = torrentUrl
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *announceProcessor) processTorrentUrl(match string, vars map[string]string, extraVars map[string]string, encode []string) (string, error) {
|
||||
tmpVars := map[string]string{}
|
||||
|
||||
// copy vars to new tmp map
|
||||
for k, v := range vars {
|
||||
tmpVars[k] = v
|
||||
}
|
||||
|
||||
// merge extra vars with vars
|
||||
if extraVars != nil {
|
||||
for k, v := range extraVars {
|
||||
tmpVars[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
// handle url encode of values
|
||||
if encode != nil {
|
||||
for _, e := range encode {
|
||||
if v, ok := tmpVars[e]; ok {
|
||||
// url encode value
|
||||
t := url.QueryEscape(v)
|
||||
tmpVars[e] = t
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// setup text template to inject variables into
|
||||
tmpl, err := template.New("torrenturl").Parse(match)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("could not create torrent url template")
|
||||
return "", err
|
||||
}
|
||||
|
||||
var b bytes.Buffer
|
||||
err = tmpl.Execute(&b, &tmpVars)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("could not write torrent url template output")
|
||||
return "", err
|
||||
}
|
||||
|
||||
return b.String(), nil
|
||||
}
|
||||
|
||||
func split(r rune) bool {
|
||||
return r == ' ' || r == '.'
|
||||
}
|
||||
|
||||
func Splitter(s string, splits string) []string {
|
||||
m := make(map[rune]int)
|
||||
for _, r := range splits {
|
||||
m[r] = 1
|
||||
}
|
||||
|
||||
splitter := func(r rune) bool {
|
||||
return m[r] == 1
|
||||
}
|
||||
|
||||
return strings.FieldsFunc(s, splitter)
|
||||
}
|
||||
|
||||
func canonicalizeString(s string) []string {
|
||||
//a := strings.FieldsFunc(s, split)
|
||||
a := Splitter(s, " .")
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
func cleanReleaseName(input string) string {
|
||||
// Make a Regex to say we only want letters and numbers
|
||||
reg, err := regexp.Compile("[^a-zA-Z0-9]+")
|
||||
if err != nil {
|
||||
//log.Fatal(err)
|
||||
}
|
||||
processedString := reg.ReplaceAllString(input, " ")
|
||||
|
||||
return processedString
|
||||
}
|
||||
|
||||
func removeElement(s []string, i int) ([]string, error) {
|
||||
// s is [1,2,3,4,5,6], i is 2
|
||||
|
||||
// perform bounds checking first to prevent a panic!
|
||||
if i >= len(s) || i < 0 {
|
||||
return nil, fmt.Errorf("Index is out of range. Index is %d with slice length %d", i, len(s))
|
||||
}
|
||||
|
||||
// This creates a new slice by creating 2 slices from the original:
|
||||
// s[:i] -> [1, 2]
|
||||
// s[i+1:] -> [4, 5, 6]
|
||||
// and joining them together using `append`
|
||||
return append(s[:i], s[i+1:]...), nil
|
||||
}
|
||||
|
||||
func regExMatch(pattern string, value string) ([]string, error) {
|
||||
|
||||
rxp, err := regexp.Compile(pattern)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
//return errors.Wrapf(err, "invalid regex: %s", value)
|
||||
}
|
||||
|
||||
matches := rxp.FindStringSubmatch(value)
|
||||
if matches == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
res := make([]string, 0)
|
||||
if matches != nil {
|
||||
res, err = removeElement(matches, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
|
@ -10,8 +10,9 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/autobrr/autobrr/internal/announce"
|
||||
"github.com/autobrr/autobrr/internal/domain"
|
||||
"github.com/autobrr/autobrr/internal/filter"
|
||||
"github.com/autobrr/autobrr/internal/release"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
"gopkg.in/irc.v3"
|
||||
|
@ -21,9 +22,20 @@ var (
|
|||
connectTimeout = 15 * time.Second
|
||||
)
|
||||
|
||||
type channelHealth struct {
|
||||
name string
|
||||
monitoring bool
|
||||
monitoringSince time.Time
|
||||
lastPing time.Time
|
||||
lastAnnounce time.Time
|
||||
}
|
||||
|
||||
type Handler struct {
|
||||
network *domain.IrcNetwork
|
||||
announceService announce.Service
|
||||
network *domain.IrcNetwork
|
||||
filterService filter.Service
|
||||
releaseService release.Service
|
||||
announceProcessors map[string]Processor
|
||||
definitions []domain.IndexerDefinition
|
||||
|
||||
client *irc.Client
|
||||
conn net.Conn
|
||||
|
@ -33,17 +45,42 @@ type Handler struct {
|
|||
|
||||
lastPing time.Time
|
||||
lastAnnounce time.Time
|
||||
|
||||
validAnnouncers map[string]struct{}
|
||||
channels map[string]channelHealth
|
||||
}
|
||||
|
||||
func NewHandler(network domain.IrcNetwork, announceService announce.Service) *Handler {
|
||||
return &Handler{
|
||||
client: nil,
|
||||
conn: nil,
|
||||
ctx: nil,
|
||||
stopped: make(chan struct{}),
|
||||
network: &network,
|
||||
announceService: announceService,
|
||||
func NewHandler(network domain.IrcNetwork, filterService filter.Service, releaseService release.Service, definitions []domain.IndexerDefinition) *Handler {
|
||||
h := &Handler{
|
||||
client: nil,
|
||||
conn: nil,
|
||||
ctx: nil,
|
||||
stopped: make(chan struct{}),
|
||||
network: &network,
|
||||
filterService: filterService,
|
||||
releaseService: releaseService,
|
||||
definitions: definitions,
|
||||
announceProcessors: map[string]Processor{},
|
||||
validAnnouncers: map[string]struct{}{},
|
||||
}
|
||||
|
||||
// Networks can be shared by multiple indexers but channels are unique
|
||||
// so let's add a new AnnounceProcessor per channel
|
||||
for _, definition := range definitions {
|
||||
// indexers can use multiple channels, but it's not common, but let's handle that anyway.
|
||||
for _, channel := range definition.IRC.Channels {
|
||||
channel = strings.ToLower(channel)
|
||||
|
||||
h.announceProcessors[channel] = NewAnnounceProcessor(definition, filterService, releaseService)
|
||||
}
|
||||
|
||||
// create map of valid announcers
|
||||
for _, announcer := range definition.IRC.Announcers {
|
||||
h.validAnnouncers[announcer] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
func (s *Handler) Run() error {
|
||||
|
@ -101,11 +138,14 @@ func (s *Handler) Run() error {
|
|||
switch m.Command {
|
||||
case "001":
|
||||
// 001 is a welcome event, so we join channels there
|
||||
err := s.onConnect(c, s.network.Channels)
|
||||
err := s.onConnect(s.network.Channels)
|
||||
if err != nil {
|
||||
log.Error().Msgf("error joining channels %v", err)
|
||||
}
|
||||
|
||||
case "372", "375", "376":
|
||||
// Handle MOTD
|
||||
|
||||
// 322 TOPIC
|
||||
// 333 UP
|
||||
// 353 @
|
||||
|
@ -189,6 +229,10 @@ func (s *Handler) GetNetwork() *domain.IrcNetwork {
|
|||
return s.network
|
||||
}
|
||||
|
||||
func (s *Handler) UpdateNetwork(network *domain.IrcNetwork) {
|
||||
s.network = network
|
||||
}
|
||||
|
||||
func (s *Handler) Stop() {
|
||||
s.cancel()
|
||||
|
||||
|
@ -210,7 +254,23 @@ func (s *Handler) isStopped() bool {
|
|||
}
|
||||
}
|
||||
|
||||
func (s *Handler) onConnect(client *irc.Client, channels []domain.IrcChannel) error {
|
||||
func (s *Handler) Restart() error {
|
||||
s.cancel()
|
||||
|
||||
if !s.isStopped() {
|
||||
close(s.stopped)
|
||||
}
|
||||
|
||||
if s.conn != nil {
|
||||
s.conn.Close()
|
||||
}
|
||||
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
return s.Run()
|
||||
}
|
||||
|
||||
func (s *Handler) onConnect(channels []domain.IrcChannel) error {
|
||||
identified := false
|
||||
|
||||
time.Sleep(2 * time.Second)
|
||||
|
@ -239,7 +299,7 @@ func (s *Handler) onConnect(client *irc.Client, channels []domain.IrcChannel) er
|
|||
|
||||
if !identified {
|
||||
for _, channel := range channels {
|
||||
err := s.handleJoinChannel(channel.Name)
|
||||
err := s.HandleJoinChannel(channel.Name, channel.Password)
|
||||
if err != nil {
|
||||
log.Error().Err(err)
|
||||
return err
|
||||
|
@ -255,36 +315,47 @@ func (s *Handler) OnJoin(msg string) (interface{}, error) {
|
|||
}
|
||||
|
||||
func (s *Handler) onMessage(msg *irc.Message) error {
|
||||
//log.Debug().Msgf("raw msg: %v", msg)
|
||||
|
||||
// check if message is from announce bot and correct channel, if not return
|
||||
//if msg.Name != s.network. {
|
||||
//
|
||||
//}
|
||||
|
||||
// parse announce
|
||||
channel := &msg.Params[0]
|
||||
announcer := &msg.Name
|
||||
message := msg.Trailing()
|
||||
// TODO add network
|
||||
|
||||
// add correlationID and tracing
|
||||
|
||||
announceID := fmt.Sprintf("%v:%v:%v", s.network.Server, *channel, *announcer)
|
||||
announceID = strings.ToLower(announceID)
|
||||
// check if message is from announce bot, if not return
|
||||
validAnnouncer := s.isValidAnnouncer(*announcer)
|
||||
if !validAnnouncer {
|
||||
return nil
|
||||
}
|
||||
|
||||
// clean message
|
||||
cleanedMsg := cleanMessage(message)
|
||||
log.Debug().Msgf("%v: %v %v: %v", s.network.Server, *channel, *announcer, cleanedMsg)
|
||||
|
||||
s.lastAnnounce = time.Now()
|
||||
s.setLastAnnounce()
|
||||
|
||||
go func() {
|
||||
err := s.announceService.Parse(announceID, cleanedMsg)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("could not parse line: %v", cleanedMsg)
|
||||
}
|
||||
}()
|
||||
if err := s.sendToAnnounceProcessor(*channel, cleanedMsg); err != nil {
|
||||
log.Error().Stack().Err(err).Msgf("could not queue line: %v", cleanedMsg)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Handler) sendToAnnounceProcessor(channel string, msg string) error {
|
||||
channel = strings.ToLower(channel)
|
||||
|
||||
// check if queue exists
|
||||
queue, ok := s.announceProcessors[channel]
|
||||
if !ok {
|
||||
return fmt.Errorf("queue '%v' not found", channel)
|
||||
}
|
||||
|
||||
// if it exists, add msg
|
||||
err := queue.AddLineToQueue(channel, msg)
|
||||
if err != nil {
|
||||
log.Error().Stack().Err(err).Msgf("could not queue line: %v", msg)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -302,13 +373,19 @@ func (s *Handler) sendPrivMessage(msg string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *Handler) handleJoinChannel(channel string) error {
|
||||
m := irc.Message{
|
||||
Command: "JOIN",
|
||||
Params: []string{channel},
|
||||
func (s *Handler) HandleJoinChannel(channel string, password string) error {
|
||||
// support channel password
|
||||
params := []string{channel}
|
||||
if password != "" {
|
||||
params = append(params, password)
|
||||
}
|
||||
|
||||
log.Debug().Msgf("%v: %v", s.network.Server, m.String())
|
||||
m := irc.Message{
|
||||
Command: "JOIN",
|
||||
Params: params,
|
||||
}
|
||||
|
||||
log.Trace().Msgf("%v: sending %v", s.network.Server, m.String())
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
|
@ -323,6 +400,27 @@ func (s *Handler) handleJoinChannel(channel string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *Handler) HandlePartChannel(channel string) error {
|
||||
m := irc.Message{
|
||||
Command: "PART",
|
||||
Params: []string{channel},
|
||||
}
|
||||
|
||||
log.Debug().Msgf("%v: %v", s.network.Server, m.String())
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
err := s.client.Write(m.String())
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("error handling part: %v", m.String())
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info().Msgf("Left channel '%v' on network '%s'", channel, s.network.Server)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Handler) handleJoined(msg *irc.Message) {
|
||||
log.Debug().Msgf("%v: JOINED: %v", s.network.Server, msg.Trailing())
|
||||
|
||||
|
@ -384,6 +482,40 @@ func (s *Handler) handleNickServPRIVMSG(nick, password string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *Handler) HandleNickServIdentify(nick, password string) error {
|
||||
m := irc.Message{
|
||||
Command: "PRIVMSG",
|
||||
Params: []string{"NickServ", "IDENTIFY", nick, password},
|
||||
}
|
||||
|
||||
log.Debug().Msgf("%v: NickServ: %v", s.network.Server, m.String())
|
||||
|
||||
err := s.client.Write(m.String())
|
||||
if err != nil {
|
||||
log.Error().Stack().Err(err).Msgf("error identifying with nickserv: %v", m.String())
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Handler) HandleNickChange(nick string) error {
|
||||
m := irc.Message{
|
||||
Command: "NICK",
|
||||
Params: []string{nick},
|
||||
}
|
||||
|
||||
log.Debug().Msgf("%v: Nick change: %v", s.network.Server, m.String())
|
||||
|
||||
err := s.client.Write(m.String())
|
||||
if err != nil {
|
||||
log.Error().Stack().Err(err).Msgf("error changing nick: %v", m.String())
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Handler) handleMode(msg *irc.Message) error {
|
||||
log.Debug().Msgf("%v: MODE: %v %v", s.network.Server, msg.User, msg.Trailing())
|
||||
|
||||
|
@ -395,7 +527,7 @@ func (s *Handler) handleMode(msg *irc.Message) error {
|
|||
}
|
||||
|
||||
for _, ch := range s.network.Channels {
|
||||
err := s.handleJoinChannel(ch.Name)
|
||||
err := s.HandleJoinChannel(ch.Name, ch.Password)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("error joining channel: %v", ch.Name)
|
||||
continue
|
||||
|
@ -423,11 +555,36 @@ func (s *Handler) handlePing(msg *irc.Message) error {
|
|||
return err
|
||||
}
|
||||
|
||||
s.lastPing = time.Now()
|
||||
s.setLastPing()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Handler) isValidAnnouncer(nick string) bool {
|
||||
_, ok := s.validAnnouncers[nick]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *Handler) setLastAnnounce() {
|
||||
s.lastAnnounce = time.Now()
|
||||
}
|
||||
|
||||
func (s *Handler) GetLastAnnounce() time.Time {
|
||||
return s.lastAnnounce
|
||||
}
|
||||
|
||||
func (s *Handler) setLastPing() {
|
||||
s.lastPing = time.Now()
|
||||
}
|
||||
|
||||
func (s *Handler) GetLastPing() time.Time {
|
||||
return s.lastPing
|
||||
}
|
||||
|
||||
// irc line can contain lots of extra stuff like color so lets clean that
|
||||
func cleanMessage(message string) string {
|
||||
var regexMessageClean = `\x0f|\x1f|\x02|\x03(?:[\d]{1,2}(?:,[\d]{1,2})?)?`
|
||||
|
|
|
@ -5,8 +5,10 @@ import (
|
|||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/autobrr/autobrr/internal/announce"
|
||||
"github.com/autobrr/autobrr/internal/domain"
|
||||
"github.com/autobrr/autobrr/internal/filter"
|
||||
"github.com/autobrr/autobrr/internal/indexer"
|
||||
"github.com/autobrr/autobrr/internal/release"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
@ -18,30 +20,35 @@ type Service interface {
|
|||
ListNetworks(ctx context.Context) ([]domain.IrcNetwork, error)
|
||||
GetNetworkByID(id int64) (*domain.IrcNetwork, error)
|
||||
DeleteNetwork(ctx context.Context, id int64) error
|
||||
StoreNetwork(network *domain.IrcNetwork) error
|
||||
StoreNetwork(ctx context.Context, network *domain.IrcNetwork) error
|
||||
UpdateNetwork(ctx context.Context, network *domain.IrcNetwork) error
|
||||
StoreChannel(networkID int64, channel *domain.IrcChannel) error
|
||||
}
|
||||
|
||||
type service struct {
|
||||
repo domain.IrcRepo
|
||||
announceService announce.Service
|
||||
indexerMap map[string]string
|
||||
handlers map[string]*Handler
|
||||
repo domain.IrcRepo
|
||||
filterService filter.Service
|
||||
indexerService indexer.Service
|
||||
releaseService release.Service
|
||||
indexerMap map[string]string
|
||||
handlers map[string]*Handler
|
||||
|
||||
stopWG sync.WaitGroup
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
func NewService(repo domain.IrcRepo, announceService announce.Service) Service {
|
||||
func NewService(repo domain.IrcRepo, filterService filter.Service, indexerSvc indexer.Service, releaseSvc release.Service) Service {
|
||||
return &service{
|
||||
repo: repo,
|
||||
announceService: announceService,
|
||||
handlers: make(map[string]*Handler),
|
||||
repo: repo,
|
||||
filterService: filterService,
|
||||
indexerService: indexerSvc,
|
||||
releaseService: releaseSvc,
|
||||
handlers: make(map[string]*Handler),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *service) StartHandlers() {
|
||||
networks, err := s.repo.ListNetworks(context.Background())
|
||||
networks, err := s.repo.FindActiveNetworks(context.Background())
|
||||
if err != nil {
|
||||
log.Error().Msgf("failed to list networks: %v", err)
|
||||
}
|
||||
|
@ -61,9 +68,14 @@ func (s *service) StartHandlers() {
|
|||
}
|
||||
network.Channels = channels
|
||||
|
||||
handler := NewHandler(network, s.announceService)
|
||||
// find indexer definitions for network and add
|
||||
definitions := s.indexerService.GetIndexersByIRCNetwork(network.Server)
|
||||
|
||||
s.handlers[network.Name] = handler
|
||||
// init new irc handler
|
||||
handler := NewHandler(network, s.filterService, s.releaseService, definitions)
|
||||
|
||||
// TODO use network.Server? + nick? Need a way to use multiple indexers for one network if same nick
|
||||
s.handlers[network.Server] = handler
|
||||
s.lock.Unlock()
|
||||
|
||||
log.Debug().Msgf("starting network: %+v", network.Name)
|
||||
|
@ -91,23 +103,33 @@ func (s *service) StopHandlers() {
|
|||
|
||||
func (s *service) startNetwork(network domain.IrcNetwork) error {
|
||||
// look if we have the network in handlers already, if so start it
|
||||
if handler, found := s.handlers[network.Name]; found {
|
||||
if existingHandler, found := s.handlers[network.Server]; found {
|
||||
log.Debug().Msgf("starting network: %+v", network.Name)
|
||||
|
||||
if handler.conn != nil {
|
||||
if existingHandler.conn != nil {
|
||||
go func() {
|
||||
if err := handler.Run(); err != nil {
|
||||
log.Error().Err(err).Msgf("failed to start handler for network %q", handler.network.Name)
|
||||
if err := existingHandler.Run(); err != nil {
|
||||
log.Error().Err(err).Msgf("failed to start existingHandler for network %q", existingHandler.network.Name)
|
||||
}
|
||||
}()
|
||||
}
|
||||
} else {
|
||||
// if not found in handlers, lets add it and run it
|
||||
|
||||
handler := NewHandler(network, s.announceService)
|
||||
|
||||
s.lock.Lock()
|
||||
s.handlers[network.Name] = handler
|
||||
channels, err := s.repo.ListChannels(network.ID)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("failed to list channels for network %q", network.Server)
|
||||
}
|
||||
network.Channels = channels
|
||||
|
||||
// find indexer definitions for network and add
|
||||
definitions := s.indexerService.GetIndexersByIRCNetwork(network.Server)
|
||||
|
||||
// init new irc handler
|
||||
handler := NewHandler(network, s.filterService, s.releaseService, definitions)
|
||||
|
||||
s.handlers[network.Server] = handler
|
||||
s.lock.Unlock()
|
||||
|
||||
log.Debug().Msgf("starting network: %+v", network.Name)
|
||||
|
@ -126,6 +148,143 @@ func (s *service) startNetwork(network domain.IrcNetwork) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *service) checkIfNetworkRestartNeeded(network *domain.IrcNetwork) error {
|
||||
// look if we have the network in handlers, if so restart it
|
||||
// TODO check if we need to add indexerDefinitions etc
|
||||
if existingHandler, found := s.handlers[network.Server]; found {
|
||||
log.Debug().Msgf("decide if irc network handler needs restart or updating: %+v", network.Name)
|
||||
|
||||
// if server, tls, invite command, port : changed - restart
|
||||
// if nickserv account, nickserv password : changed - stay connected, and change those
|
||||
// if channels len : changes - join or leave
|
||||
if existingHandler.conn != nil {
|
||||
handler := existingHandler.GetNetwork()
|
||||
restartNeeded := false
|
||||
|
||||
if handler.Server != network.Server {
|
||||
restartNeeded = true
|
||||
} else if handler.Port != network.Port {
|
||||
restartNeeded = true
|
||||
} else if handler.TLS != network.TLS {
|
||||
restartNeeded = true
|
||||
} else if handler.InviteCommand != network.InviteCommand {
|
||||
restartNeeded = true
|
||||
}
|
||||
if restartNeeded {
|
||||
log.Info().Msgf("irc: restarting network: %+v", network.Name)
|
||||
|
||||
// we need to reinitialize with new network config
|
||||
existingHandler.UpdateNetwork(network)
|
||||
|
||||
go func() {
|
||||
if err := existingHandler.Restart(); err != nil {
|
||||
log.Error().Stack().Err(err).Msgf("failed to restart network %q", existingHandler.network.Name)
|
||||
}
|
||||
}()
|
||||
|
||||
// return now since the restart will read the network again OR FIXME
|
||||
return nil
|
||||
}
|
||||
|
||||
if handler.NickServ.Account != network.NickServ.Account {
|
||||
log.Debug().Msg("changing nick")
|
||||
|
||||
err := existingHandler.HandleNickChange(network.NickServ.Account)
|
||||
if err != nil {
|
||||
log.Error().Stack().Err(err).Msgf("failed to change nick %q", network.NickServ.Account)
|
||||
}
|
||||
} else if handler.NickServ.Password != network.NickServ.Password {
|
||||
log.Debug().Msg("nickserv: changing password")
|
||||
|
||||
err := existingHandler.HandleNickServIdentify(network.NickServ.Account, network.NickServ.Password)
|
||||
if err != nil {
|
||||
log.Error().Stack().Err(err).Msgf("failed to identify with nickserv %q", network.NickServ.Account)
|
||||
}
|
||||
}
|
||||
|
||||
// join or leave channels
|
||||
// loop over handler channels,
|
||||
var expectedChannels = make(map[string]struct{}, 0)
|
||||
var handlerChannels = make(map[string]struct{}, 0)
|
||||
var channelsToLeave = make([]string, 0)
|
||||
var channelsToJoin = make([]domain.IrcChannel, 0)
|
||||
|
||||
// create map of expected channels
|
||||
for _, channel := range network.Channels {
|
||||
expectedChannels[channel.Name] = struct{}{}
|
||||
}
|
||||
|
||||
// check current channels of handler against expected
|
||||
for _, handlerChan := range handler.Channels {
|
||||
handlerChannels[handlerChan.Name] = struct{}{}
|
||||
|
||||
_, ok := expectedChannels[handlerChan.Name]
|
||||
if ok {
|
||||
// if handler channel matches network channel next
|
||||
continue
|
||||
}
|
||||
|
||||
// if not expected, leave
|
||||
channelsToLeave = append(channelsToLeave, handlerChan.Name)
|
||||
}
|
||||
|
||||
for _, channel := range network.Channels {
|
||||
_, ok := handlerChannels[channel.Name]
|
||||
if ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// if expected channel not in handler channels, add to join
|
||||
// use channel struct for extra info
|
||||
channelsToJoin = append(channelsToJoin, channel)
|
||||
}
|
||||
|
||||
// leave channels
|
||||
for _, leaveChannel := range channelsToLeave {
|
||||
err := existingHandler.HandlePartChannel(leaveChannel)
|
||||
if err != nil {
|
||||
log.Error().Stack().Err(err).Msgf("failed to leave channel: %q", leaveChannel)
|
||||
}
|
||||
}
|
||||
|
||||
// join channels
|
||||
for _, joinChannel := range channelsToJoin {
|
||||
// TODO handle invite commands before?
|
||||
err := existingHandler.HandleJoinChannel(joinChannel.Name, joinChannel.Password)
|
||||
if err != nil {
|
||||
log.Error().Stack().Err(err).Msgf("failed to join channel: %q", joinChannel.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err := s.startNetwork(*network)
|
||||
if err != nil {
|
||||
log.Error().Stack().Err(err).Msgf("failed to start network: %q", network.Name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *service) restartNetwork(network domain.IrcNetwork) error {
|
||||
// look if we have the network in handlers, if so restart it
|
||||
if existingHandler, found := s.handlers[network.Server]; found {
|
||||
log.Info().Msgf("restarting network: %+v", network.Name)
|
||||
|
||||
if existingHandler.conn != nil {
|
||||
go func() {
|
||||
if err := existingHandler.Restart(); err != nil {
|
||||
log.Error().Err(err).Msgf("failed to restart network %q", existingHandler.network.Name)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// TODO handle full restart
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *service) StopNetwork(name string) error {
|
||||
if handler, found := s.handlers[name]; found {
|
||||
handler.Stop()
|
||||
|
@ -135,6 +294,27 @@ func (s *service) StopNetwork(name string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *service) StopAndRemoveNetwork(name string) error {
|
||||
if handler, found := s.handlers[name]; found {
|
||||
handler.Stop()
|
||||
|
||||
// remove from handlers
|
||||
delete(s.handlers, name)
|
||||
log.Debug().Msgf("stopped network: %+v", name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *service) StopNetworkIfRunning(name string) error {
|
||||
if handler, found := s.handlers[name]; found {
|
||||
handler.Stop()
|
||||
log.Debug().Msgf("stopped network: %+v", name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *service) GetNetworkByID(id int64) (*domain.IrcNetwork, error) {
|
||||
network, err := s.repo.GetNetworkByID(id)
|
||||
if err != nil {
|
||||
|
@ -184,7 +364,8 @@ func (s *service) DeleteNetwork(ctx context.Context, id int64) error {
|
|||
log.Debug().Msgf("delete network: %v", id)
|
||||
|
||||
// Remove network and handler
|
||||
if err = s.StopNetwork(network.Name); err != nil {
|
||||
//if err = s.StopNetwork(network.Server); err != nil {
|
||||
if err = s.StopAndRemoveNetwork(network.Server); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -195,55 +376,91 @@ func (s *service) DeleteNetwork(ctx context.Context, id int64) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *service) StoreNetwork(network *domain.IrcNetwork) error {
|
||||
if err := s.repo.StoreNetwork(network); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debug().Msgf("store network: %+v", network)
|
||||
func (s *service) UpdateNetwork(ctx context.Context, network *domain.IrcNetwork) error {
|
||||
|
||||
if network.Channels != nil {
|
||||
for _, channel := range network.Channels {
|
||||
if err := s.repo.StoreChannel(network.ID, &channel); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.repo.StoreNetworkChannels(ctx, network.ID, network.Channels); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.repo.UpdateNetwork(ctx, network); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debug().Msgf("irc.service: update network: %+v", network)
|
||||
|
||||
// stop or start network
|
||||
// TODO get current state to see if enabled or not?
|
||||
if network.Enabled {
|
||||
err := s.startNetwork(*network)
|
||||
// if server, tls, invite command, port : changed - restart
|
||||
// if nickserv account, nickserv password : changed - stay connected, and change those
|
||||
// if channels len : changes - join or leave
|
||||
err := s.checkIfNetworkRestartNeeded(network)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("could not start network: %+v", network.Name)
|
||||
return fmt.Errorf("could not start network: %v", network.Name)
|
||||
log.Error().Stack().Err(err).Msgf("could not restart network: %+v", network.Name)
|
||||
return fmt.Errorf("could not restart network: %v", network.Name)
|
||||
}
|
||||
|
||||
} else {
|
||||
err := s.StopNetwork(network.Name)
|
||||
// TODO take into account multiple channels per network
|
||||
//err := s.StopNetwork(network.Server)
|
||||
err := s.StopAndRemoveNetwork(network.Server)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("could not stop network: %+v", network.Name)
|
||||
log.Error().Stack().Err(err).Msgf("could not stop network: %+v", network.Name)
|
||||
return fmt.Errorf("could not stop network: %v", network.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// stop or start network
|
||||
//if !network.Enabled {
|
||||
// log.Debug().Msgf("stopping network: %+v", network.Name)
|
||||
//
|
||||
// err := s.StopNetwork(network.Name)
|
||||
// if err != nil {
|
||||
// log.Error().Err(err).Msgf("could not stop network: %+v", network.Name)
|
||||
// return fmt.Errorf("could not stop network: %v", network.Name)
|
||||
// }
|
||||
//} else {
|
||||
// log.Debug().Msgf("starting network: %+v", network.Name)
|
||||
//
|
||||
// err := s.startNetwork(*network)
|
||||
// if err != nil {
|
||||
// log.Error().Err(err).Msgf("could not start network: %+v", network.Name)
|
||||
// return fmt.Errorf("could not start network: %v", network.Name)
|
||||
// }
|
||||
//}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *service) StoreNetwork(ctx context.Context, network *domain.IrcNetwork) error {
|
||||
existingNetwork, err := s.repo.CheckExistingNetwork(ctx, network)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("could not check for existing network")
|
||||
return err
|
||||
}
|
||||
|
||||
if existingNetwork == nil {
|
||||
if err := s.repo.StoreNetwork(network); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debug().Msgf("store network: %+v", network)
|
||||
|
||||
if network.Channels != nil {
|
||||
for _, channel := range network.Channels {
|
||||
if err := s.repo.StoreChannel(network.ID, &channel); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if network.Channels != nil {
|
||||
for _, channel := range network.Channels {
|
||||
// TODO store or add. Make sure it doesn't delete before
|
||||
if err := s.repo.StoreChannel(existingNetwork.ID, &channel); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// append channels to existing network
|
||||
network.Channels = append(network.Channels, existingNetwork.Channels...)
|
||||
}
|
||||
|
||||
if existingNetwork.Enabled {
|
||||
// if server, tls, invite command, port : changed - restart
|
||||
// if nickserv account, nickserv password : changed - stay connected, and change those
|
||||
// if channels len : changes - join or leave
|
||||
|
||||
err := s.checkIfNetworkRestartNeeded(network)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("could not restart network: %+v", network.Name)
|
||||
return fmt.Errorf("could not restart network: %v", network.Name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
13
test/integration/docker-compose.yml
Normal file
13
test/integration/docker-compose.yml
Normal file
|
@ -0,0 +1,13 @@
|
|||
version: "3.8"
|
||||
|
||||
services:
|
||||
ergo:
|
||||
image: ghcr.io/ergochat/ergo:stable
|
||||
ports:
|
||||
- "6667:6667/tcp"
|
||||
- "6697:6697/tcp"
|
||||
volumes:
|
||||
- data:/ircd
|
||||
|
||||
volumes:
|
||||
data:
|
Loading…
Add table
Add a link
Reference in a new issue