Feature: Get size by api for ptp btn and ggn (#66)

* chore: add package

* feat: get size by api for ptp and btn

* feat: download and parse torrent if not api

* feat: bypass tls check and load meta from file

* fix: no invite command needed for btn

* feat: add ggn api

* feat: imrpove logging

* feat: build request url

* feat: improve err logging
This commit is contained in:
Ludvig Lundgren 2022-01-05 23:52:29 +01:00 committed by GitHub
parent d2aa7c1e7e
commit 2ea2293745
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
32 changed files with 2181 additions and 99 deletions

View file

@ -5,10 +5,8 @@ import (
"os"
"path"
"github.com/anacrolix/torrent/metainfo"
"github.com/rs/zerolog/log"
"github.com/autobrr/autobrr/internal/client"
"github.com/autobrr/autobrr/internal/domain"
)
@ -32,25 +30,30 @@ func (s *service) RunActions(actions []domain.Action, release domain.Release) er
s.bus.Publish("release:update-push-status", release.ID, domain.ReleasePushStatusApproved)
case domain.ActionTypeExec:
if tmpFile == "" {
tmpFile, hash, err = downloadFile(release.TorrentURL)
if release.TorrentTmpFile == "" {
t, err := release.DownloadTorrentFile(nil)
if err != nil {
log.Error().Stack().Err(err)
return err
}
tmpFile = t.TmpFileName
}
go func(release domain.Release, action domain.Action, tmpFile string) {
s.execCmd(release, action, tmpFile)
s.bus.Publish("release:update-push-status", release.ID, domain.ReleasePushStatusApproved)
}(release, action, tmpFile)
case domain.ActionTypeWatchFolder:
if tmpFile == "" {
tmpFile, hash, err = downloadFile(release.TorrentURL)
if release.TorrentTmpFile == "" {
t, err := release.DownloadTorrentFile(nil)
if err != nil {
log.Error().Stack().Err(err)
return err
}
tmpFile = t.TmpFileName
}
s.watchFolder(action.WatchFolder, tmpFile)
s.bus.Publish("release:update-push-status", release.ID, domain.ReleasePushStatusApproved)
@ -65,12 +68,14 @@ func (s *service) RunActions(actions []domain.Action, release domain.Release) er
s.bus.Publish("release:update-push-status-rejected", release.ID, "deluge busy")
continue
}
if tmpFile == "" {
tmpFile, hash, err = downloadFile(release.TorrentURL)
if release.TorrentTmpFile == "" {
t, err := release.DownloadTorrentFile(nil)
if err != nil {
log.Error().Stack().Err(err)
return err
}
tmpFile = t.TmpFileName
}
go func(action domain.Action, tmpFile string) {
@ -92,12 +97,15 @@ func (s *service) RunActions(actions []domain.Action, release domain.Release) er
continue
}
if tmpFile == "" {
tmpFile, hash, err = downloadFile(release.TorrentURL)
if release.TorrentTmpFile == "" {
t, err := release.DownloadTorrentFile(nil)
if err != nil {
log.Error().Stack().Err(err)
return err
}
tmpFile = t.TmpFileName
hash = t.MetaInfo.HashInfoBytes().String()
}
go func(action domain.Action, hash string, tmpFile string) {
@ -145,33 +153,43 @@ func (s *service) RunActions(actions []domain.Action, release domain.Release) er
return nil
}
// downloadFile returns tmpFile, hash, error
func downloadFile(url string) (string, string, error) {
// create http client
c := client.NewHttpClient()
func (s *service) CheckCanDownload(actions []domain.Action) bool {
for _, action := range actions {
if !action.Enabled {
// only run active actions
continue
}
// download torrent file
// TODO check extra headers, cookie
res, err := c.DownloadFile(url, nil)
if err != nil {
log.Error().Stack().Err(err).Msgf("could not download file: %v", url)
return "", "", err
log.Debug().Msgf("action-service: check can download action: %v", action.Name)
switch action.Type {
case domain.ActionTypeDelugeV1, domain.ActionTypeDelugeV2:
canDownload, err := s.delugeCheckRulesCanDownload(action)
if err != nil {
log.Error().Stack().Err(err).Msgf("error checking client rules: %v", action.Name)
continue
}
if !canDownload {
continue
}
return true
case domain.ActionTypeQbittorrent:
canDownload, err := s.qbittorrentCheckRulesCanDownload(action)
if err != nil {
log.Error().Stack().Err(err).Msgf("error checking client rules: %v", action.Name)
continue
}
if !canDownload {
continue
}
return true
}
}
// match more filters like torrent size
// Get meta info from file to find out the hash for later use
meta, err := metainfo.LoadFromFile(res.FileName)
//meta, err := metainfo.Load(res.Body)
if err != nil {
log.Error().Stack().Err(err).Msgf("metainfo could not open file: %v", res.FileName)
return "", "", err
}
// torrent info hash used for re-announce
hash := meta.HashInfoBytes().String()
return res.FileName, hash, nil
return false
}
func (s *service) test(name string) {
@ -190,10 +208,10 @@ func (s *service) watchFolder(dir string, torrentFile string) {
defer original.Close()
_, tmpFileName := path.Split(torrentFile)
fullFileName := path.Join(dir, tmpFileName)
fullFileName := path.Join(dir, tmpFileName+".torrent")
// Create new file
newFile, err := os.Create(fullFileName + ".torrent")
newFile, err := os.Create(fullFileName)
if err != nil {
log.Error().Stack().Err(err).Msgf("could not create new temp file '%v'", fullFileName)
return

View file

@ -17,6 +17,7 @@ type Service interface {
ToggleEnabled(actionID int) error
RunActions(actions []domain.Action, release domain.Release) error
CheckCanDownload(actions []domain.Action) bool
}
type service struct {

View file

@ -230,17 +230,13 @@ func (a *announceProcessor) onLinesMatched(def domain.IndexerDefinition, vars ma
return err
}
// generate torrent url
torrentUrl, err := a.processTorrentUrl(def.Parse.Match.TorrentURL, vars, def.SettingsMap, def.Parse.Match.Encode)
// parse torrentUrl
err = release.ParseTorrentUrl(def.Parse.Match.TorrentURL, vars, def.SettingsMap, def.Parse.Match.Encode)
if err != nil {
log.Error().Stack().Err(err).Msg("announce: could not process torrent url")
log.Error().Stack().Err(err).Msg("announce: could not parse torrent url")
return err
}
if torrentUrl != "" {
release.TorrentURL = torrentUrl
}
return nil
}

View file

@ -1,15 +1,14 @@
package client
import (
"crypto/md5"
"encoding/hex"
"errors"
"fmt"
"io"
"net/http"
"os"
"time"
"github.com/anacrolix/torrent/metainfo"
"github.com/rs/zerolog/log"
)
@ -18,6 +17,11 @@ type DownloadFileResponse struct {
FileName string
}
type DownloadTorrentFileResponse struct {
MetaInfo *metainfo.MetaInfo
TmpFileName string
}
type HttpClient struct {
http *http.Client
}
@ -33,42 +37,36 @@ func NewHttpClient() *HttpClient {
func (c *HttpClient) DownloadFile(url string, opts map[string]string) (*DownloadFileResponse, error) {
if url == "" {
return nil, nil
return nil, errors.New("download_file: url can't be empty")
}
// create md5 hash of url for tmp file
hash := md5.Sum([]byte(url))
hashString := hex.EncodeToString(hash[:])
tmpFileName := fmt.Sprintf("/tmp/%v", hashString)
// Create the file
out, err := os.Create(tmpFileName)
// Create tmp file
tmpFile, err := os.CreateTemp("", "autobrr-")
if err != nil {
log.Error().Stack().Err(err).Msgf("error creating temp file: %v", tmpFileName)
log.Error().Stack().Err(err).Msg("error creating temp file")
return nil, err
}
defer out.Close()
defer tmpFile.Close()
// Get the data
resp, err := http.Get(url)
if err != nil {
log.Error().Stack().Err(err).Msgf("error downloading file %v from %v", tmpFileName, url)
log.Error().Stack().Err(err).Msgf("error downloading file from %v", url)
return nil, err
}
defer resp.Body.Close()
// retry logic
if resp.StatusCode != 200 {
log.Error().Stack().Err(err).Msgf("error downloading file: %v - bad status: %d", tmpFileName, resp.StatusCode)
if resp.StatusCode != http.StatusOK {
log.Error().Stack().Err(err).Msgf("error downloading file from: %v - bad status: %d", url, resp.StatusCode)
return nil, err
}
// Write the body to file
_, err = io.Copy(out, resp.Body)
_, err = io.Copy(tmpFile, resp.Body)
if err != nil {
log.Error().Stack().Err(err).Msgf("error writing downloaded file: %v", tmpFileName)
log.Error().Stack().Err(err).Msgf("error writing downloaded file: %v", tmpFile.Name())
return nil, err
}
@ -76,7 +74,7 @@ func (c *HttpClient) DownloadFile(url string, opts map[string]string) (*Download
res := DownloadFileResponse{
Body: &resp.Body,
FileName: tmpFileName,
FileName: tmpFile.Name(),
}
if res.FileName == "" || res.Body == nil {
@ -84,7 +82,65 @@ func (c *HttpClient) DownloadFile(url string, opts map[string]string) (*Download
return nil, errors.New("error downloading file, no tmp file")
}
log.Debug().Msgf("successfully downloaded file: %v", tmpFileName)
log.Debug().Msgf("successfully downloaded file: %v", tmpFile.Name())
return &res, nil
}
func (c *HttpClient) DownloadTorrentFile(url string, opts map[string]string) (*DownloadTorrentFileResponse, error) {
if url == "" {
return nil, errors.New("download_file: url can't be empty")
}
// Create tmp file
tmpFile, err := os.CreateTemp("", "autobrr-")
if err != nil {
log.Error().Stack().Err(err).Msg("error creating temp file")
return nil, err
}
defer tmpFile.Close()
// Get the data
resp, err := http.Get(url)
if err != nil {
log.Error().Stack().Err(err).Msgf("error downloading file from %v", url)
return nil, err
}
defer resp.Body.Close()
// retry logic
if resp.StatusCode != http.StatusOK {
log.Error().Stack().Err(err).Msgf("error downloading file from: %v - bad status: %d", url, resp.StatusCode)
return nil, err
}
// Write the body to file
_, err = io.Copy(tmpFile, resp.Body)
if err != nil {
log.Error().Stack().Err(err).Msgf("error writing downloaded file: %v", tmpFile.Name())
return nil, err
}
meta, err := metainfo.Load(resp.Body)
if err != nil {
log.Error().Stack().Err(err).Msgf("metainfo could not load file contents: %v", tmpFile.Name())
return nil, err
}
// remove file if fail
res := DownloadTorrentFileResponse{
MetaInfo: meta,
TmpFileName: tmpFile.Name(),
}
if res.TmpFileName == "" || res.MetaInfo == nil {
log.Error().Stack().Err(err).Msgf("tmp file error - empty body: %v", url)
return nil, errors.New("error downloading file, no tmp file")
}
log.Debug().Msgf("successfully downloaded file: %v", tmpFile.Name())
return &res, nil
}

View file

@ -1,6 +1,10 @@
package domain
import "context"
import (
"context"
"github.com/dustin/go-humanize"
)
type IndexerRepo interface {
Store(indexer Indexer) (*Indexer, error)
@ -29,12 +33,22 @@ type IndexerDefinition struct {
Privacy string `json:"privacy"`
Protocol string `json:"protocol"`
URLS []string `json:"urls"`
Supports []string `json:"supports"`
Settings []IndexerSetting `json:"settings"`
SettingsMap map[string]string `json:"-"`
IRC *IndexerIRC `json:"irc"`
Parse IndexerParse `json:"parse"`
}
func (i IndexerDefinition) HasApi() bool {
for _, a := range i.Supports {
if a == "api" {
return true
}
}
return false
}
type IndexerSetting struct {
Name string `json:"name"`
Required bool `json:"required,omitempty"`
@ -74,3 +88,22 @@ type IndexerParseMatch struct {
TorrentURL string `json:"torrenturl"`
Encode []string `json:"encode"`
}
type TorrentBasic struct {
Id string `json:"Id"`
TorrentId string `json:"TorrentId,omitempty"`
InfoHash string `json:"InfoHash"`
Size string `json:"Size"`
}
func (t TorrentBasic) ReleaseSizeBytes() uint64 {
if t.Size == "" {
return 0
}
releaseSizeBytes, err := humanize.ParseBytes(t.Size)
if err != nil {
// log could not parse into bytes
}
return releaseSizeBytes
}

View file

@ -1,16 +1,24 @@
package domain
import (
"bytes"
"context"
"crypto/tls"
"fmt"
"html"
"io"
"net/http"
"net/url"
"os"
"regexp"
"strconv"
"strings"
"text/template"
"time"
"github.com/autobrr/autobrr/pkg/wildcard"
"github.com/anacrolix/torrent/metainfo"
"github.com/dustin/go-humanize"
"github.com/pkg/errors"
"github.com/rs/zerolog/log"
@ -37,6 +45,7 @@ type Release struct {
GroupID string `json:"group_id"`
TorrentID string `json:"torrent_id"`
TorrentURL string `json:"-"`
TorrentTmpFile string `json:"-"`
TorrentName string `json:"torrent_name"` // full release name
Size uint64 `json:"size"`
Raw string `json:"raw"` // Raw release
@ -478,6 +487,120 @@ func (r *Release) extractReleaseTags() error {
return nil
}
func (r *Release) ParseTorrentUrl(match string, vars map[string]string, extraVars map[string]string, encode []string) error {
tmpVars := map[string]string{}
// copy vars to new tmp map
for k, v := range vars {
tmpVars[k] = v
}
// merge extra vars with vars
if extraVars != nil {
for k, v := range extraVars {
tmpVars[k] = v
}
}
// handle url encode of values
if encode != nil {
for _, e := range encode {
if v, ok := tmpVars[e]; ok {
// url encode value
t := url.QueryEscape(v)
tmpVars[e] = t
}
}
}
// setup text template to inject variables into
tmpl, err := template.New("torrenturl").Parse(match)
if err != nil {
log.Error().Err(err).Msg("could not create torrent url template")
return err
}
var urlBytes bytes.Buffer
err = tmpl.Execute(&urlBytes, &tmpVars)
if err != nil {
log.Error().Err(err).Msg("could not write torrent url template output")
return err
}
r.TorrentURL = urlBytes.String()
// TODO handle cookies
return nil
}
func (r *Release) DownloadTorrentFile(opts map[string]string) (*DownloadTorrentFileResponse, error) {
if r.TorrentURL == "" {
return nil, errors.New("download_file: url can't be empty")
} else if r.TorrentTmpFile != "" {
// already downloaded
return nil, nil
}
customTransport := http.DefaultTransport.(*http.Transport).Clone()
customTransport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
client := &http.Client{Transport: customTransport}
// Get the data
resp, err := client.Get(r.TorrentURL)
if err != nil {
log.Error().Stack().Err(err).Msg("error downloading file")
return nil, err
}
defer resp.Body.Close()
// retry logic
if resp.StatusCode != http.StatusOK {
log.Error().Stack().Err(err).Msgf("error downloading file from: %v - bad status: %d", r.TorrentURL, resp.StatusCode)
return nil, err
}
// Create tmp file
tmpFile, err := os.CreateTemp("", "autobrr-")
if err != nil {
log.Error().Stack().Err(err).Msg("error creating temp file")
return nil, err
}
defer tmpFile.Close()
r.TorrentTmpFile = tmpFile.Name()
// Write the body to file
_, err = io.Copy(tmpFile, resp.Body)
if err != nil {
log.Error().Stack().Err(err).Msgf("error writing downloaded file: %v", tmpFile.Name())
return nil, err
}
meta, err := metainfo.LoadFromFile(tmpFile.Name())
if err != nil {
log.Error().Stack().Err(err).Msgf("metainfo could not load file contents: %v", tmpFile.Name())
return nil, err
}
// remove file if fail
res := DownloadTorrentFileResponse{
MetaInfo: meta,
TmpFileName: tmpFile.Name(),
}
if res.TmpFileName == "" || res.MetaInfo == nil {
log.Error().Stack().Err(err).Msgf("tmp file error - empty body: %v", r.TorrentURL)
return nil, errors.New("error downloading file, no tmp file")
}
log.Debug().Msgf("successfully downloaded file: %v", tmpFile.Name())
return &res, nil
}
func (r *Release) addRejection(reason string) {
r.Rejections = append(r.Rejections, reason)
}
@ -612,9 +735,9 @@ func (r *Release) CheckFilter(filter Filter) bool {
}
// CheckSizeFilter additional size check
// for indexers that doesn't announce size, like some cabals
// for indexers that doesn't announce size, like some gazelle based
// set flag r.AdditionalSizeCheckRequired if there's a size in the filter, otherwise go a head
// implement API for ptp,btn,bhd,ggn to check for size if needed
// implement API for ptp,btn,ggn to check for size if needed
// for others pull down torrent and do check
func (r *Release) CheckSizeFilter(minSize string, maxSize string) bool {
@ -667,6 +790,10 @@ func (r *Release) MapVars(varMap map[string]string) error {
r.TorrentName = html.UnescapeString(torrentName)
}
if torrentID, err := getStringMapValue(varMap, "torrentId"); err == nil {
r.TorrentID = torrentID
}
if category, err := getStringMapValue(varMap, "category"); err == nil {
r.Category = category
}
@ -1153,6 +1280,11 @@ func cleanReleaseName(input string) string {
return processedString
}
type DownloadTorrentFileResponse struct {
MetaInfo *metainfo.MetaInfo
TmpFileName string
}
type ReleaseStats struct {
TotalCount int64 `json:"total_count"`
FilteredCount int64 `json:"filtered_count"`

View file

@ -4,6 +4,8 @@ import (
"context"
"errors"
"github.com/anacrolix/torrent/metainfo"
"github.com/dustin/go-humanize"
"github.com/rs/zerolog/log"
"github.com/autobrr/autobrr/internal/domain"
@ -25,12 +27,14 @@ type service struct {
repo domain.FilterRepo
actionRepo domain.ActionRepo
indexerSvc indexer.Service
apiService indexer.APIService
}
func NewService(repo domain.FilterRepo, actionRepo domain.ActionRepo, indexerSvc indexer.Service) Service {
func NewService(repo domain.FilterRepo, actionRepo domain.ActionRepo, apiService indexer.APIService, indexerSvc indexer.Service) Service {
return &service{
repo: repo,
actionRepo: actionRepo,
apiService: apiService,
indexerSvc: indexerSvc,
}
}
@ -175,44 +179,113 @@ func (s *service) Delete(ctx context.Context, filterID int) error {
}
func (s *service) FindAndCheckFilters(release *domain.Release) (bool, *domain.Filter, error) {
// find all enabled filters for indexer
filters, err := s.repo.FindByIndexerIdentifier(release.Indexer)
if err != nil {
log.Error().Err(err).Msgf("could not find filters for indexer: %v", release.Indexer)
log.Error().Err(err).Msgf("filter-service.find_and_check_filters: could not find filters for indexer: %v", release.Indexer)
return false, nil, err
}
log.Trace().Msgf("filter-service.find_and_check_filters: found (%d) active filters to check for indexer '%v'", len(filters), release.Indexer)
// save outside of loop to check multiple filters with only one fetch
var torrentInfo *domain.TorrentBasic
var torrentFileRes *domain.DownloadTorrentFileResponse
var torrentMetaInfo metainfo.Info
// loop and check release to filter until match
for _, f := range filters {
log.Trace().Msgf("checking filter: %+v", f.Name)
log.Trace().Msgf("filter-service.find_and_check_filters: checking filter: %+v", f.Name)
matchedFilter := release.CheckFilter(f)
// if matched, attach actions and return the f
if matchedFilter {
//release.Filter = &f
//release.FilterID = f.ID
//release.FilterName = f.Name
// if matched, do additional size check if needed, attach actions and return the filter
log.Debug().Msgf("found and matched filter: %+v", f.Name)
log.Debug().Msgf("filter-service.find_and_check_filters: found and matched filter: %+v", f.Name)
// TODO do additional size check against indexer api or torrent for size
// Some indexers do not announce the size and if size (min,max) is set in a filter then it will need
// additional size check. Some indexers have api implemented to fetch this data and for the others
// it will download the torrent file to parse and make the size check. This is all to minimize the amount of downloads.
// do additional size check against indexer api or torrent for size
if release.AdditionalSizeCheckRequired {
log.Debug().Msgf("additional size check required for: %+v", f.Name)
// check if indexer = btn,ptp,ggn,red
// fetch api for data
// else download torrent and add to tmpPath
// if size != response.size
// r.RecheckSizeFilter(f)
//continue
log.Debug().Msgf("filter-service.find_and_check_filters: (%v) additional size check required", f.Name)
// check if indexer = btn,ptp (ggn,red later)
if release.Indexer == "ptp" || release.Indexer == "btn" || release.Indexer == "ggn" {
// fetch torrent info from api
// save outside of loop to check multiple filters with only one fetch
if torrentInfo == nil {
torrentInfo, err = s.apiService.GetTorrentByID(release.Indexer, release.TorrentID)
if err != nil || torrentInfo == nil {
log.Error().Stack().Err(err).Msgf("filter-service.find_and_check_filters: (%v) could not get torrent: '%v' from: %v", f.Name, release.TorrentID, release.Indexer)
continue
}
log.Debug().Msgf("filter-service.find_and_check_filters: (%v) got torrent info: %+v", f.Name, torrentInfo)
}
// compare size against filters
match, err := checkSizeFilter(f.MinSize, f.MaxSize, torrentInfo.ReleaseSizeBytes())
if err != nil {
log.Error().Stack().Err(err).Msgf("filter-service.find_and_check_filters: (%v) could not check size filter", f.Name)
continue
}
// no match, lets continue to next filter
if !match {
log.Debug().Msgf("filter-service.find_and_check_filters: (%v) filter did not match after additional size check, trying next", f.Name)
continue
}
// store size on the release
release.Size = torrentInfo.ReleaseSizeBytes()
} else {
log.Trace().Msgf("filter-service.find_and_check_filters: (%v) additional size check required: preparing to download metafile", f.Name)
// if indexer doesn't have api, download torrent and add to tmpPath
torrentFileRes, err = release.DownloadTorrentFile(nil)
if err != nil {
log.Error().Stack().Err(err).Msgf("filter-service.find_and_check_filters: (%v) could not download torrent file with id: '%v' from: %v", f.Name, release.TorrentID, release.Indexer)
continue
}
// parse torrent metainfo
torrentMetaInfo, err = torrentFileRes.MetaInfo.UnmarshalInfo()
if err != nil {
log.Error().Stack().Err(err).Msgf("filter-service.find_and_check_filters: could not download torrent file: '%v' from: %v", release.TorrentID, release.Indexer)
continue
}
// compare size against filter
match, err := checkSizeFilter(f.MinSize, f.MaxSize, uint64(torrentMetaInfo.TotalLength()))
if err != nil {
log.Error().Stack().Err(err).Msgf("filter-service.find_and_check_filters: (%v) could not check size filter", f.Name)
continue
}
// no match, lets continue to next filter
if !match {
log.Debug().Msgf("filter-service.find_and_check_filters: (%v) filter did not match after additional size check, trying next", f.Name)
continue
}
// store size on the release
release.Size = uint64(torrentMetaInfo.TotalLength())
}
}
// find actions and attach
// found matching filter, lets find the filter actions and attach
actions, err := s.actionRepo.FindByFilterID(f.ID)
if err != nil {
log.Error().Err(err).Msgf("could not find actions for filter: %+v", f.Name)
}
// if no actions, continue to next filter
if len(actions) == 0 {
log.Trace().Msgf("filter-service.find_and_check_filters: no actions found for filter '%v', trying next one..", f.Name)
continue
}
f.Actions = actions
return true, &f, nil
@ -222,3 +295,35 @@ func (s *service) FindAndCheckFilters(release *domain.Release) (bool, *domain.Fi
// if no match, return nil
return false, nil, nil
}
func checkSizeFilter(minSize string, maxSize string, releaseSize uint64) (bool, error) {
// handle both min and max
if minSize != "" {
// string to bytes
minSizeBytes, err := humanize.ParseBytes(minSize)
if err != nil {
// log could not parse into bytes
}
if releaseSize <= minSizeBytes {
//r.addRejection("size: smaller than min size")
return false, nil
}
}
if maxSize != "" {
// string to bytes
maxSizeBytes, err := humanize.ParseBytes(maxSize)
if err != nil {
// log could not parse into bytes
}
if releaseSize >= maxSizeBytes {
//r.addRejection("size: larger than max size")
return false, nil
}
}
return true, nil
}

121
internal/indexer/api.go Normal file
View file

@ -0,0 +1,121 @@
package indexer
import (
"fmt"
"github.com/rs/zerolog/log"
"github.com/autobrr/autobrr/internal/domain"
"github.com/autobrr/autobrr/pkg/btn"
"github.com/autobrr/autobrr/pkg/ggn"
"github.com/autobrr/autobrr/pkg/ptp"
)
type APIService interface {
TestConnection(indexer string) (bool, error)
GetTorrentByID(indexer string, torrentID string) (*domain.TorrentBasic, error)
AddClient(indexer string, settings map[string]string) error
RemoveClient(indexer string) error
}
type apiClient interface {
GetTorrentByID(torrentID string) (*domain.TorrentBasic, error)
TestAPI() (bool, error)
}
type apiService struct {
apiClients map[string]apiClient
}
func NewAPIService() APIService {
return &apiService{
apiClients: make(map[string]apiClient),
}
}
func (s *apiService) GetTorrentByID(indexer string, torrentID string) (*domain.TorrentBasic, error) {
v, ok := s.apiClients[indexer]
if !ok {
return nil, nil
}
log.Trace().Str("service", "api").Str("method", "GetTorrentByID").Msgf("'%v' trying to fetch torrent from api", indexer)
t, err := v.GetTorrentByID(torrentID)
if err != nil {
log.Error().Stack().Err(err).Msgf("could not get torrent: '%v' from: %v", torrentID, indexer)
return nil, err
}
log.Trace().Str("service", "api").Str("method", "GetTorrentByID").Msgf("'%v' successfully fetched torrent from api: %+v", indexer, t)
return t, nil
}
func (s *apiService) TestConnection(indexer string) (bool, error) {
v, ok := s.apiClients[indexer]
if !ok {
return false, nil
}
t, err := v.TestAPI()
if err != nil {
return false, err
}
return t, nil
}
func (s *apiService) AddClient(indexer string, settings map[string]string) error {
// basic validation
if indexer == "" {
return fmt.Errorf("api_service.add_client: validation falied: indexer can't be empty")
} else if len(settings) == 0 {
return fmt.Errorf("api_service.add_client: validation falied: settings can't be empty")
}
log.Trace().Msgf("api-service.add_client: init api client for '%v'", indexer)
// init client
switch indexer {
case "btn":
key, ok := settings["api_key"]
if !ok || key == "" {
return fmt.Errorf("api_service: could not initialize btn client: missing var 'api_key'")
}
s.apiClients[indexer] = btn.NewClient("", key)
case "ptp":
user, ok := settings["api_user"]
if !ok || user == "" {
return fmt.Errorf("api_service: could not initialize ptp client: missing var 'api_user'")
}
key, ok := settings["api_key"]
if !ok || key == "" {
return fmt.Errorf("api_service: could not initialize ptp client: missing var 'api_key'")
}
s.apiClients[indexer] = ptp.NewClient("", user, key)
case "ggn":
key, ok := settings["api_key"]
if !ok || key == "" {
return fmt.Errorf("api_service: could not initialize ggn client: missing var 'api_key'")
}
s.apiClients[indexer] = ggn.NewClient("", key)
default:
return fmt.Errorf("api_service: could not initialize client: unsupported indexer '%v'", indexer)
}
return nil
}
func (s *apiService) RemoveClient(indexer string) error {
_, ok := s.apiClients[indexer]
if ok {
delete(s.apiClients, indexer)
}
return nil
}

View file

@ -11,6 +11,7 @@ protocol: torrent
supports:
- irc
- rss
- api
source: gazelle
settings:
- name: authkey
@ -21,6 +22,22 @@ settings:
type: secret
label: Torrent pass
help: Right click DL on a torrent and get the torrent_pass.
- name: api_key
type: secret
label: API Key
help: Username -> Edit Profile -> API
api:
url: https://api.broadcasthe.net
type: jsonrpc
limits:
max: 150
per: hour
settings:
- name: api_key
type: secret
label: API Key
help: Username -> Edit Profile -> API
irc:
network: BroadcasTheNet
@ -42,12 +59,6 @@ irc:
required: true
label: NickServ Password
help: NickServ password
- name: invite_command
type: secret
default: "CableGuy IDENTIFY USERNAME IRCKey"
required: true
label: Invite command
help: Invite auth with CableGuy.
parse:
type: multi

View file

@ -11,6 +11,7 @@ protocol: torrent
supports:
- irc
- rss
- api
source: gazelle
settings:
- name: authkey
@ -21,6 +22,22 @@ settings:
type: secret
label: Torrent pass
help: Right click DL on a torrent and get the torrent_pass.
- name: api_key
type: secret
label: API Key
help: Username -> Edit / Settings -> API Keys
api:
url: https://gazellegames.net/api.php
type: json
limits:
max: 5
per: 10 seconds
settings:
- name: api_key
type: secret
label: API Key
help: Username -> Edit / Settings -> API Keys
irc:
network: GGn

View file

@ -11,6 +11,7 @@ protocol: torrent
supports:
- irc
- rss
- api
source: gazelle
settings:
- name: authkey
@ -21,6 +22,30 @@ settings:
type: secret
label: Torrent pass
help: Right click DL on a torrent and get the torrent_pass.
- name: api_user
type: secret
label: API User
help: Edit profile -> Security -> Generate new api keys
- name: api_key
type: secret
label: API Key
help: Edit profile -> Security -> Generate new api keys
api:
url: https://passthepopcorn.me/
type: json
limits:
max: 60
per: minute
settings:
- name: api_user
type: secret
label: API User
help: Edit profile -> Security -> Generate new api keys
- name: api_key
type: secret
label: API Key
help: Edit profile -> Security -> Generate new api keys
irc:
network: PassThePopcorn

View file

@ -26,7 +26,8 @@ type Service interface {
}
type service struct {
repo domain.IndexerRepo
repo domain.IndexerRepo
apiService APIService
// contains all raw indexer definitions
indexerDefinitions map[string]domain.IndexerDefinition
@ -37,9 +38,10 @@ type service struct {
lookupIRCServerDefinition map[string]map[string]domain.IndexerDefinition
}
func NewService(repo domain.IndexerRepo) Service {
func NewService(repo domain.IndexerRepo, apiService APIService) Service {
return &service{
repo: repo,
apiService: apiService,
indexerDefinitions: make(map[string]domain.IndexerDefinition),
mapIndexerIRCToName: make(map[string]string),
lookupIRCServerDefinition: make(map[string]map[string]domain.IndexerDefinition),
@ -150,6 +152,7 @@ func (s *service) mapIndexer(indexer domain.Indexer) (*domain.IndexerDefinition,
Privacy: in.Privacy,
Protocol: in.Protocol,
URLS: in.URLS,
Supports: in.Supports,
Settings: nil,
SettingsMap: make(map[string]string),
IRC: in.IRC,
@ -184,23 +187,34 @@ func (s *service) GetTemplates() ([]domain.IndexerDefinition, error) {
}
func (s *service) Start() error {
// load all indexer definitions
err := s.LoadIndexerDefinitions()
if err != nil {
return err
}
// load the indexers' setup by the user
indexerDefinitions, err := s.GetAll()
if err != nil {
return err
}
for _, indexerDefinition := range indexerDefinitions {
s.mapIRCIndexerLookup(indexerDefinition.Identifier, *indexerDefinition)
for _, indexer := range indexerDefinitions {
s.mapIRCIndexerLookup(indexer.Identifier, *indexer)
// add to irc server lookup table
s.mapIRCServerDefinitionLookup(indexerDefinition.IRC.Server, *indexerDefinition)
s.mapIRCServerDefinitionLookup(indexer.IRC.Server, *indexer)
// check if it has api and add to api service
if indexer.Enabled && indexer.HasApi() {
if err := s.apiService.AddClient(indexer.Identifier, indexer.SettingsMap); err != nil {
log.Error().Stack().Err(err).Msgf("indexer.start: could not init api client for: '%v'", indexer.Identifier)
}
}
}
log.Info().Msgf("Loaded %d indexers", len(indexerDefinitions))
return nil
}
@ -305,7 +319,7 @@ func (s *service) LoadIndexerDefinitions() error {
}
}
log.Info().Msgf("Loaded %d indexer definitions", len(s.indexerDefinitions))
log.Debug().Msgf("Loaded %d indexer definitions", len(s.indexerDefinitions))
return nil
}

View file

@ -10,11 +10,13 @@ import (
"github.com/r3labs/sse/v2"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/rs/zerolog/pkgerrors"
"gopkg.in/natefinch/lumberjack.v2"
)
func Setup(cfg domain.Config, sse *sse.Server) {
zerolog.TimeFieldFormat = time.RFC3339
zerolog.ErrorStackMarshaler = pkgerrors.MarshalStack
switch cfg.LogLevel {
case "INFO":