mirror of
https://github.com/idanoo/autobrr
synced 2025-07-22 16:29:12 +00:00
feat(filters): RED and OPS fetch record label from API (#1881)
* feat(filters): RED and OPS fetch record label from API * test: add record label to RED and OPS test data * refactor: record label check --------- Co-authored-by: ze0s <ze0s@riseup.net>
This commit is contained in:
parent
221bc35371
commit
d153ac44b8
16 changed files with 380 additions and 154 deletions
|
@ -226,6 +226,8 @@ func (r *FilterRepo) FindByID(ctx context.Context, filterID int) (*domain.Filter
|
|||
"f.except_categories",
|
||||
"f.match_uploaders",
|
||||
"f.except_uploaders",
|
||||
"f.match_record_labels",
|
||||
"f.except_record_labels",
|
||||
"f.match_language",
|
||||
"f.except_language",
|
||||
"f.tags",
|
||||
|
@ -261,7 +263,7 @@ func (r *FilterRepo) FindByID(ctx context.Context, filterID int) (*domain.Filter
|
|||
var f domain.Filter
|
||||
|
||||
// filter
|
||||
var minSize, maxSize, maxDownloadsUnit, matchReleases, exceptReleases, matchReleaseGroups, exceptReleaseGroups, matchReleaseTags, exceptReleaseTags, matchDescription, exceptDescription, freeleechPercent, shows, seasons, episodes, years, months, days, artists, albums, matchCategories, exceptCategories, matchUploaders, exceptUploaders, tags, exceptTags, tagsMatchLogic, exceptTagsMatchLogic sql.NullString
|
||||
var minSize, maxSize, maxDownloadsUnit, matchReleases, exceptReleases, matchReleaseGroups, exceptReleaseGroups, matchReleaseTags, exceptReleaseTags, matchDescription, exceptDescription, freeleechPercent, shows, seasons, episodes, years, months, days, artists, albums, matchCategories, exceptCategories, matchUploaders, exceptUploaders, matchRecordLabels, exceptRecordLabels, tags, exceptTags, tagsMatchLogic, exceptTagsMatchLogic sql.NullString
|
||||
var useRegex, scene, freeleech, hasLog, hasCue, perfectFlac sql.NullBool
|
||||
var delay, maxDownloads, logScore sql.NullInt32
|
||||
|
||||
|
@ -319,6 +321,8 @@ func (r *FilterRepo) FindByID(ctx context.Context, filterID int) (*domain.Filter
|
|||
&exceptCategories,
|
||||
&matchUploaders,
|
||||
&exceptUploaders,
|
||||
&matchRecordLabels,
|
||||
&exceptRecordLabels,
|
||||
pq.Array(&f.MatchLanguage),
|
||||
pq.Array(&f.ExceptLanguage),
|
||||
&tags,
|
||||
|
@ -372,6 +376,8 @@ func (r *FilterRepo) FindByID(ctx context.Context, filterID int) (*domain.Filter
|
|||
f.ExceptCategories = exceptCategories.String
|
||||
f.MatchUploaders = matchUploaders.String
|
||||
f.ExceptUploaders = exceptUploaders.String
|
||||
f.MatchRecordLabels = matchRecordLabels.String
|
||||
f.ExceptRecordLabels = exceptRecordLabels.String
|
||||
f.Tags = tags.String
|
||||
f.ExceptTags = exceptTags.String
|
||||
f.TagsMatchLogic = tagsMatchLogic.String
|
||||
|
@ -444,6 +450,8 @@ func (r *FilterRepo) findByIndexerIdentifier(ctx context.Context, indexer string
|
|||
"f.except_categories",
|
||||
"f.match_uploaders",
|
||||
"f.except_uploaders",
|
||||
"f.match_record_labels",
|
||||
"f.except_record_labels",
|
||||
"f.match_language",
|
||||
"f.except_language",
|
||||
"f.tags",
|
||||
|
@ -484,7 +492,7 @@ func (r *FilterRepo) findByIndexerIdentifier(ctx context.Context, indexer string
|
|||
for rows.Next() {
|
||||
var f domain.Filter
|
||||
|
||||
var minSize, maxSize, maxDownloadsUnit, matchReleases, exceptReleases, matchReleaseGroups, exceptReleaseGroups, matchReleaseTags, exceptReleaseTags, matchDescription, exceptDescription, freeleechPercent, shows, seasons, episodes, years, months, days, artists, albums, matchCategories, exceptCategories, matchUploaders, exceptUploaders, tags, exceptTags, tagsMatchLogic, exceptTagsMatchLogic sql.NullString
|
||||
var minSize, maxSize, maxDownloadsUnit, matchReleases, exceptReleases, matchReleaseGroups, exceptReleaseGroups, matchReleaseTags, exceptReleaseTags, matchDescription, exceptDescription, freeleechPercent, shows, seasons, episodes, years, months, days, artists, albums, matchCategories, exceptCategories, matchUploaders, exceptUploaders, matchRecordLabels, exceptRecordLabels, tags, exceptTags, tagsMatchLogic, exceptTagsMatchLogic sql.NullString
|
||||
var useRegex, scene, freeleech, hasLog, hasCue, perfectFlac sql.NullBool
|
||||
var delay, maxDownloads, logScore sql.NullInt32
|
||||
|
||||
|
@ -542,6 +550,8 @@ func (r *FilterRepo) findByIndexerIdentifier(ctx context.Context, indexer string
|
|||
&exceptCategories,
|
||||
&matchUploaders,
|
||||
&exceptUploaders,
|
||||
&matchRecordLabels,
|
||||
&exceptRecordLabels,
|
||||
pq.Array(&f.MatchLanguage),
|
||||
pq.Array(&f.ExceptLanguage),
|
||||
&tags,
|
||||
|
@ -591,6 +601,8 @@ func (r *FilterRepo) findByIndexerIdentifier(ctx context.Context, indexer string
|
|||
f.ExceptCategories = exceptCategories.String
|
||||
f.MatchUploaders = matchUploaders.String
|
||||
f.ExceptUploaders = exceptUploaders.String
|
||||
f.MatchRecordLabels = matchRecordLabels.String
|
||||
f.ExceptRecordLabels = exceptRecordLabels.String
|
||||
f.Tags = tags.String
|
||||
f.ExceptTags = exceptTags.String
|
||||
f.TagsMatchLogic = tagsMatchLogic.String
|
||||
|
@ -738,6 +750,8 @@ func (r *FilterRepo) Store(ctx context.Context, filter *domain.Filter) error {
|
|||
"except_categories",
|
||||
"match_uploaders",
|
||||
"except_uploaders",
|
||||
"match_record_labels",
|
||||
"except_record_labels",
|
||||
"match_language",
|
||||
"except_language",
|
||||
"tags",
|
||||
|
@ -804,6 +818,8 @@ func (r *FilterRepo) Store(ctx context.Context, filter *domain.Filter) error {
|
|||
filter.ExceptCategories,
|
||||
filter.MatchUploaders,
|
||||
filter.ExceptUploaders,
|
||||
filter.MatchRecordLabels,
|
||||
filter.ExceptRecordLabels,
|
||||
pq.Array(filter.MatchLanguage),
|
||||
pq.Array(filter.ExceptLanguage),
|
||||
filter.Tags,
|
||||
|
@ -888,6 +904,8 @@ func (r *FilterRepo) Update(ctx context.Context, filter *domain.Filter) error {
|
|||
Set("except_categories", filter.ExceptCategories).
|
||||
Set("match_uploaders", filter.MatchUploaders).
|
||||
Set("except_uploaders", filter.ExceptUploaders).
|
||||
Set("match_record_labels", filter.MatchRecordLabels).
|
||||
Set("except_record_labels", filter.ExceptRecordLabels).
|
||||
Set("match_language", pq.Array(filter.MatchLanguage)).
|
||||
Set("except_language", pq.Array(filter.ExceptLanguage)).
|
||||
Set("tags", filter.Tags).
|
||||
|
@ -1063,6 +1081,12 @@ func (r *FilterRepo) UpdatePartial(ctx context.Context, filter domain.FilterUpda
|
|||
if filter.ExceptUploaders != nil {
|
||||
q = q.Set("except_uploaders", filter.ExceptUploaders)
|
||||
}
|
||||
if filter.MatchRecordLabels != nil {
|
||||
q = q.Set("match_record_labels", filter.MatchRecordLabels)
|
||||
}
|
||||
if filter.ExceptRecordLabels != nil {
|
||||
q = q.Set("except_record_labels", filter.ExceptRecordLabels)
|
||||
}
|
||||
if filter.MatchLanguage != nil {
|
||||
q = q.Set("match_language", pq.Array(filter.MatchLanguage))
|
||||
}
|
||||
|
|
|
@ -144,6 +144,8 @@ CREATE TABLE filter
|
|||
except_categories TEXT,
|
||||
match_uploaders TEXT,
|
||||
except_uploaders TEXT,
|
||||
match_record_labels TEXT,
|
||||
except_record_labels TEXT,
|
||||
match_language TEXT [] DEFAULT '{}',
|
||||
except_language TEXT [] DEFAULT '{}',
|
||||
tags TEXT,
|
||||
|
@ -1066,5 +1068,11 @@ CREATE TABLE list_filter
|
|||
FOREIGN KEY (filter_id) REFERENCES filter(id) ON DELETE CASCADE,
|
||||
PRIMARY KEY (list_id, filter_id)
|
||||
);
|
||||
`,
|
||||
`ALTER TABLE filter
|
||||
ADD COLUMN match_record_labels TEXT DEFAULT '';
|
||||
|
||||
ALTER TABLE filter
|
||||
ADD COLUMN except_record_labels TEXT DEFAULT '';
|
||||
`,
|
||||
}
|
||||
|
|
|
@ -144,6 +144,8 @@ CREATE TABLE filter
|
|||
except_categories TEXT,
|
||||
match_uploaders TEXT,
|
||||
except_uploaders TEXT,
|
||||
match_record_labels TEXT,
|
||||
except_record_labels TEXT,
|
||||
match_language TEXT [] DEFAULT '{}',
|
||||
except_language TEXT [] DEFAULT '{}',
|
||||
tags TEXT,
|
||||
|
@ -1708,5 +1710,11 @@ CREATE TABLE list_filter
|
|||
FOREIGN KEY (filter_id) REFERENCES filter(id) ON DELETE CASCADE,
|
||||
PRIMARY KEY (list_id, filter_id)
|
||||
);
|
||||
`,
|
||||
`ALTER TABLE filter
|
||||
ADD COLUMN match_record_labels TEXT DEFAULT '';
|
||||
|
||||
ALTER TABLE filter
|
||||
ADD COLUMN except_record_labels TEXT DEFAULT '';
|
||||
`,
|
||||
}
|
||||
|
|
|
@ -144,6 +144,8 @@ type Filter struct {
|
|||
ExceptCategories string `json:"except_categories,omitempty"`
|
||||
MatchUploaders string `json:"match_uploaders,omitempty"`
|
||||
ExceptUploaders string `json:"except_uploaders,omitempty"`
|
||||
MatchRecordLabels string `json:"match_record_labels,omitempty"`
|
||||
ExceptRecordLabels string `json:"except_record_labels,omitempty"`
|
||||
MatchLanguage []string `json:"match_language,omitempty"`
|
||||
ExceptLanguage []string `json:"except_language,omitempty"`
|
||||
Tags string `json:"tags,omitempty"`
|
||||
|
@ -274,6 +276,8 @@ type FilterUpdate struct {
|
|||
ExceptCategories *string `json:"except_categories,omitempty"`
|
||||
MatchUploaders *string `json:"match_uploaders,omitempty"`
|
||||
ExceptUploaders *string `json:"except_uploaders,omitempty"`
|
||||
MatchRecordLabels *string `json:"match_record_labels,omitempty"`
|
||||
ExceptRecordLabels *string `json:"except_record_labels,omitempty"`
|
||||
MatchLanguage *[]string `json:"match_language,omitempty"`
|
||||
ExceptLanguage *[]string `json:"except_language,omitempty"`
|
||||
Tags *string `json:"tags,omitempty"`
|
||||
|
@ -364,6 +368,9 @@ func (f *Filter) Sanitize() error {
|
|||
f.Artists = sanitize.FilterString(f.Artists)
|
||||
f.Albums = sanitize.FilterString(f.Albums)
|
||||
|
||||
f.MatchRecordLabels = sanitize.FilterString(f.MatchRecordLabels)
|
||||
f.ExceptRecordLabels = sanitize.FilterString(f.ExceptRecordLabels)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -465,6 +472,10 @@ func (f *Filter) CheckFilter(r *Release) (*RejectionReasons, bool) {
|
|||
// f.checkUploader sets the rejections
|
||||
}
|
||||
|
||||
if (f.MatchRecordLabels != "" || f.ExceptRecordLabels != "") && !f.checkRecordLabel(r) {
|
||||
// f.checkRecordLabel sets the rejections
|
||||
}
|
||||
|
||||
if len(f.MatchLanguage) > 0 && !sliceContainsSlice(r.Language, f.MatchLanguage) {
|
||||
f.RejectReasons.Add("match language", r.Language, f.MatchLanguage)
|
||||
}
|
||||
|
@ -749,6 +760,26 @@ func (f *Filter) checkUploader(r *Release) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// checkRecordLabel checks if the record label is within the given list.
|
||||
// if the haystack is not empty but the record label is, then a further
|
||||
// investigation is needed
|
||||
func (f *Filter) checkRecordLabel(r *Release) bool {
|
||||
if r.RecordLabel == "" && (r.Indexer.Identifier == "redacted" || r.Indexer.Identifier == "ops") {
|
||||
r.AdditionalRecordLabelCheckRequired = true
|
||||
return true
|
||||
}
|
||||
|
||||
if f.MatchRecordLabels != "" && !contains(r.RecordLabel, f.MatchRecordLabels) {
|
||||
f.RejectReasons.Add("match record labels", r.RecordLabel, f.MatchRecordLabels)
|
||||
}
|
||||
|
||||
if f.ExceptRecordLabels != "" && contains(r.RecordLabel, f.ExceptRecordLabels) {
|
||||
f.RejectReasons.Add("except record labels", r.RecordLabel, f.ExceptRecordLabels)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// IsPerfectFLAC Perfect is "CD FLAC Cue Log 100% Lossless or 24bit Lossless"
|
||||
func (f *Filter) IsPerfectFLAC(r *Release) ([]string, bool) {
|
||||
rejections := []string{}
|
||||
|
@ -1200,6 +1231,20 @@ func (f *Filter) CheckUploader(uploader string) (bool, error) {
|
|||
return true, nil
|
||||
}
|
||||
|
||||
func (f *Filter) CheckRecordLabel(recordLabel string) (bool, error) {
|
||||
if f.MatchRecordLabels != "" && !contains(recordLabel, f.MatchRecordLabels) {
|
||||
f.RejectReasons.Add("match record label", recordLabel, f.MatchRecordLabels)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if f.ExceptRecordLabels != "" && contains(recordLabel, f.ExceptRecordLabels) {
|
||||
f.RejectReasons.Add("except record label", recordLabel, f.ExceptRecordLabels)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// parsedSizeLimits parses filter bytes limits (expressed as a string) into a
|
||||
// uint64 number of bytes. The bounds are returned as *uint64 number of bytes,
|
||||
// with "nil" representing "no limit". We break out filter size limit parsing
|
||||
|
|
|
@ -434,6 +434,7 @@ type TorrentBasic struct {
|
|||
InfoHash string `json:"InfoHash"`
|
||||
Size string `json:"Size"`
|
||||
Uploader string `json:"Uploader"`
|
||||
RecordLabel string `json:"RecordLabel"`
|
||||
}
|
||||
|
||||
func (t TorrentBasic) ReleaseSizeBytes() uint64 {
|
||||
|
|
|
@ -78,6 +78,7 @@ type Macro struct {
|
|||
TorrentTmpFile string
|
||||
Type string
|
||||
Uploader string
|
||||
RecordLabel string
|
||||
Website string
|
||||
Year int
|
||||
Month int
|
||||
|
@ -150,6 +151,7 @@ func NewMacro(release Release) Macro {
|
|||
TorrentTmpFile: release.TorrentTmpFile,
|
||||
Type: release.Type,
|
||||
Uploader: release.Uploader,
|
||||
RecordLabel: release.RecordLabel,
|
||||
Website: release.Website,
|
||||
Year: release.Year,
|
||||
Month: release.Month,
|
||||
|
|
|
@ -101,6 +101,7 @@ type Release struct {
|
|||
FreeleechPercent int `json:"-"`
|
||||
Bonus []string `json:"-"`
|
||||
Uploader string `json:"uploader"`
|
||||
RecordLabel string `json:"record_label"`
|
||||
PreTime string `json:"pre_time"`
|
||||
Other []string `json:"-"`
|
||||
RawCookie string `json:"-"`
|
||||
|
@ -108,6 +109,7 @@ type Release struct {
|
|||
Leechers int `json:"-"`
|
||||
AdditionalSizeCheckRequired bool `json:"-"`
|
||||
AdditionalUploaderCheckRequired bool `json:"-"`
|
||||
AdditionalRecordLabelCheckRequired bool `json:"-"`
|
||||
FilterID int `json:"-"`
|
||||
Filter *Filter `json:"-"`
|
||||
ActionStatus []ReleaseActionStatus `json:"action_status"`
|
||||
|
@ -846,6 +848,10 @@ func (r *Release) MapVars(def *IndexerDefinition, varMap map[string]string) erro
|
|||
r.Uploader = uploader
|
||||
}
|
||||
|
||||
if recordLabel, err := getStringMapValue(varMap, "recordLabel"); err == nil {
|
||||
r.RecordLabel = recordLabel
|
||||
}
|
||||
|
||||
if torrentSize, err := getStringMapValue(varMap, "torrentSize"); err == nil {
|
||||
// Some indexers like BTFiles announces size with comma. Humanize does not handle that well and strips it.
|
||||
torrentSize = strings.Replace(torrentSize, ",", ".", 1)
|
||||
|
|
|
@ -43,6 +43,7 @@ type Service interface {
|
|||
Delete(ctx context.Context, filterID int) error
|
||||
AdditionalSizeCheck(ctx context.Context, f *domain.Filter, release *domain.Release) (bool, error)
|
||||
AdditionalUploaderCheck(ctx context.Context, f *domain.Filter, release *domain.Release) (bool, error)
|
||||
AdditionalRecordLabelCheck(ctx context.Context, f *domain.Filter, release *domain.Release) (bool, error)
|
||||
CheckSmartEpisodeCanDownload(ctx context.Context, params *domain.SmartEpisodeParams) (bool, error)
|
||||
GetDownloadsByFilterId(ctx context.Context, filterID int) (*domain.FilterDownloads, error)
|
||||
}
|
||||
|
@ -462,6 +463,21 @@ func (s *service) CheckFilter(ctx context.Context, f *domain.Filter, release *do
|
|||
}
|
||||
}
|
||||
|
||||
if release.AdditionalRecordLabelCheckRequired {
|
||||
l.Debug().Msgf("(%s) additional record label check required", f.Name)
|
||||
|
||||
ok, err := s.AdditionalRecordLabelCheck(ctx, f, release)
|
||||
if err != nil {
|
||||
l.Error().Err(err).Msgf("(%s) additional record label check error", f.Name)
|
||||
return false, err
|
||||
}
|
||||
|
||||
if !ok {
|
||||
l.Trace().Msgf("(%s) additional record label check not matching what filter wanted", f.Name)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// run external filters
|
||||
if f.External != nil {
|
||||
externalOk, err := s.RunExternalFilters(ctx, f, f.External, release)
|
||||
|
@ -503,7 +519,7 @@ func (s *service) AdditionalSizeCheck(ctx context.Context, f *domain.Filter, rel
|
|||
|
||||
switch release.Indexer.Identifier {
|
||||
case "btn", "ggn", "redacted", "ops", "mock":
|
||||
if (release.Size == 0 && release.AdditionalSizeCheckRequired) || (release.Uploader == "" && release.AdditionalUploaderCheckRequired) {
|
||||
if (release.Size == 0 && release.AdditionalSizeCheckRequired) || (release.Uploader == "" && release.AdditionalUploaderCheckRequired) || (release.RecordLabel == "" && release.AdditionalRecordLabelCheckRequired) {
|
||||
l.Trace().Msgf("(%s) preparing to check size via api", f.Name)
|
||||
|
||||
torrentInfo, err := s.apiService.GetTorrentByID(ctx, release.Indexer.Identifier, release.TorrentID)
|
||||
|
@ -522,6 +538,10 @@ func (s *service) AdditionalSizeCheck(ctx context.Context, f *domain.Filter, rel
|
|||
if release.Uploader == "" {
|
||||
release.Uploader = torrentInfo.Uploader
|
||||
}
|
||||
|
||||
if release.RecordLabel == "" {
|
||||
release.RecordLabel = torrentInfo.RecordLabel
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
|
@ -547,7 +567,7 @@ func (s *service) AdditionalSizeCheck(ctx context.Context, f *domain.Filter, rel
|
|||
|
||||
if !sizeOk {
|
||||
l.Debug().Msgf("(%s) filter did not match after additional size check, trying next", f.Name)
|
||||
return false, err
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
|
@ -575,13 +595,13 @@ func (s *service) AdditionalUploaderCheck(ctx context.Context, f *domain.Filter,
|
|||
|
||||
if !uploaderOk {
|
||||
l.Debug().Msgf("(%s) filter did not match after additional uploaders check, trying next", f.Name)
|
||||
return false, err
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
l.Debug().Msgf("(%s) additional api size check required", f.Name)
|
||||
l.Debug().Msgf("(%s) additional api uploader check required", f.Name)
|
||||
|
||||
switch release.Indexer.Identifier {
|
||||
case "redacted", "ops", "mock":
|
||||
|
@ -598,9 +618,10 @@ func (s *service) AdditionalUploaderCheck(ctx context.Context, f *domain.Filter,
|
|||
torrentSize := torrentInfo.ReleaseSizeBytes()
|
||||
if release.Size == 0 && torrentSize > 0 {
|
||||
release.Size = torrentSize
|
||||
}
|
||||
|
||||
// reset AdditionalSizeCheckRequired to not re-trigger check
|
||||
release.AdditionalSizeCheckRequired = false
|
||||
if release.RecordLabel == "" {
|
||||
release.RecordLabel = torrentInfo.RecordLabel
|
||||
}
|
||||
|
||||
if release.Uploader == "" {
|
||||
|
@ -622,9 +643,88 @@ func (s *service) AdditionalUploaderCheck(ctx context.Context, f *domain.Filter,
|
|||
|
||||
if !uploaderOk {
|
||||
l.Debug().Msgf("(%s) filter did not match after additional uploaders check, trying next", f.Name)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (s *service) AdditionalRecordLabelCheck(ctx context.Context, f *domain.Filter, release *domain.Release) (ok bool, err error) {
|
||||
defer func() {
|
||||
// try recover panic if anything went wrong with API or size checks
|
||||
errors.RecoverPanic(recover(), &err)
|
||||
if err != nil {
|
||||
ok = false
|
||||
}
|
||||
}()
|
||||
|
||||
// do additional check against indexer api
|
||||
l := s.log.With().Str("method", "AdditionalRecordLabelCheck").Logger()
|
||||
|
||||
// if record label was fetched before during size check or uploader check we check it and return early
|
||||
if release.RecordLabel != "" {
|
||||
recordLabelOk, err := f.CheckRecordLabel(release.RecordLabel)
|
||||
if err != nil {
|
||||
l.Error().Err(err).Msgf("(%s) error comparing release and record label", f.Name)
|
||||
return false, err
|
||||
}
|
||||
|
||||
// reset AdditionalRecordLabelCheckRequired to not re-trigger check
|
||||
release.AdditionalRecordLabelCheckRequired = false
|
||||
|
||||
if !recordLabelOk {
|
||||
l.Debug().Msgf("(%s) filter did not match after additional record label check, trying next", f.Name)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
l.Debug().Msgf("(%s) additional api record label check required", f.Name)
|
||||
|
||||
switch release.Indexer.Identifier {
|
||||
case "redacted", "ops", "mock":
|
||||
l.Trace().Msgf("(%s) preparing to check via api", f.Name)
|
||||
|
||||
torrentInfo, err := s.apiService.GetTorrentByID(ctx, release.Indexer.Identifier, release.TorrentID)
|
||||
if err != nil || torrentInfo == nil {
|
||||
l.Error().Err(err).Msgf("(%s) could not get torrent info from api: '%s' from: %s", f.Name, release.TorrentID, release.Indexer.Identifier)
|
||||
return false, err
|
||||
}
|
||||
|
||||
l.Debug().Msgf("(%s) got torrent info from api: %+v", f.Name, torrentInfo)
|
||||
|
||||
torrentSize := torrentInfo.ReleaseSizeBytes()
|
||||
if release.Size == 0 && torrentSize > 0 {
|
||||
release.Size = torrentSize
|
||||
}
|
||||
|
||||
if release.Uploader == "" {
|
||||
release.Uploader = torrentInfo.Uploader
|
||||
}
|
||||
|
||||
if release.RecordLabel == "" {
|
||||
release.RecordLabel = torrentInfo.RecordLabel
|
||||
}
|
||||
|
||||
default:
|
||||
return false, errors.New("additional record label check not supported for this indexer: %s", release.Indexer.Identifier)
|
||||
}
|
||||
|
||||
recordLabelOk, err := f.CheckRecordLabel(release.RecordLabel)
|
||||
if err != nil {
|
||||
l.Error().Err(err).Msgf("(%s) error comparing release and record label", f.Name)
|
||||
return false, err
|
||||
}
|
||||
|
||||
// reset AdditionalRecordLabelCheckRequired to not re-trigger check
|
||||
release.AdditionalRecordLabelCheckRequired = false
|
||||
|
||||
if !recordLabelOk {
|
||||
l.Debug().Msgf("(%s) filter did not match after additional record label check, trying next", f.Name)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -113,8 +113,8 @@ type Torrent struct {
|
|||
Remastered bool `json:"remastered"`
|
||||
RemasterYear int `json:"remasterYear"`
|
||||
RemasterTitle string `json:"remasterTitle"`
|
||||
RemasterRecordLabel string `json:"remasterRecordLabel"`
|
||||
RemasterCatalogueNumber string `json:"remasterCatalogueNumber"`
|
||||
RecordLabel string `json:"remasterRecordLabel"` // remasterRecordLabel is the record label of the release, which should be used instead of the record label of the group
|
||||
CatalogueNumber string `json:"remasterCatalogueNumber"` // remasterCatalogueNumber is the catalogue number of the release, which should be used instead of the catalogue number of the group
|
||||
Scene bool `json:"scene"`
|
||||
HasLog bool `json:"hasLog"`
|
||||
HasCue bool `json:"hasCue"`
|
||||
|
@ -247,6 +247,7 @@ func (c *Client) GetTorrentByID(ctx context.Context, torrentID string) (*domain.
|
|||
InfoHash: response.Response.Torrent.InfoHash,
|
||||
Size: strconv.Itoa(response.Response.Torrent.Size),
|
||||
Uploader: response.Response.Torrent.Username,
|
||||
RecordLabel: response.Response.Torrent.RecordLabel,
|
||||
}
|
||||
|
||||
return res, nil
|
||||
|
|
|
@ -76,6 +76,7 @@ func TestOrpheusClient_GetTorrentByID(t *testing.T) {
|
|||
InfoHash: "",
|
||||
Size: "255299244",
|
||||
Uploader: "uploader",
|
||||
RecordLabel: "FAJo Music",
|
||||
},
|
||||
wantErr: "",
|
||||
},
|
||||
|
|
|
@ -113,8 +113,8 @@ type Torrent struct {
|
|||
Remastered bool `json:"remastered"`
|
||||
RemasterYear int `json:"remasterYear"`
|
||||
RemasterTitle string `json:"remasterTitle"`
|
||||
RemasterRecordLabel string `json:"remasterRecordLabel"`
|
||||
RemasterCatalogueNumber string `json:"remasterCatalogueNumber"`
|
||||
RecordLabel string `json:"remasterRecordLabel"` // remasterRecordLabel is the record label of the release, which should be used instead of the record label of the group
|
||||
CatalogueNumber string `json:"remasterCatalogueNumber"` // remasterCatalogueNumber is the catalogue number of the release, which should be used instead of the catalogue number of the group
|
||||
Scene bool `json:"scene"`
|
||||
HasLog bool `json:"hasLog"`
|
||||
HasCue bool `json:"hasCue"`
|
||||
|
@ -235,6 +235,7 @@ func (c *Client) GetTorrentByID(ctx context.Context, torrentID string) (*domain.
|
|||
InfoHash: response.Response.Torrent.InfoHash,
|
||||
Size: strconv.Itoa(response.Response.Torrent.Size),
|
||||
Uploader: response.Response.Torrent.Username,
|
||||
RecordLabel: response.Response.Torrent.RecordLabel,
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
|
|
@ -76,6 +76,7 @@ func TestREDClient_GetTorrentByID(t *testing.T) {
|
|||
InfoHash: "B2BABD3A361EAFC6C4E9142C422DF7DDF5D7E163",
|
||||
Size: "527749302",
|
||||
Uploader: "Uploader",
|
||||
RecordLabel: "FAJo Music",
|
||||
},
|
||||
wantErr: "",
|
||||
},
|
||||
|
|
2
pkg/red/testdata/get_torrent_by_id.json
vendored
2
pkg/red/testdata/get_torrent_by_id.json
vendored
|
@ -52,7 +52,7 @@
|
|||
"remastered": false,
|
||||
"remasterYear": 0,
|
||||
"remasterTitle": "",
|
||||
"remasterRecordLabel": "",
|
||||
"remasterRecordLabel": "FAJo Music",
|
||||
"remasterCatalogueNumber": "",
|
||||
"scene": true,
|
||||
"hasLog": false,
|
||||
|
|
|
@ -431,6 +431,8 @@ export const FilterDetails = () => {
|
|||
except_tags_match_logic: filter.except_tags_match_logic,
|
||||
match_uploaders: filter.match_uploaders,
|
||||
except_uploaders: filter.except_uploaders,
|
||||
match_record_labels: filter.match_record_labels,
|
||||
except_record_labels: filter.except_record_labels,
|
||||
match_language: filter.match_language || [],
|
||||
except_language: filter.except_language || [],
|
||||
freeleech: filter.freeleech,
|
||||
|
|
|
@ -43,6 +43,30 @@ export const Music = () => {
|
|||
</div>
|
||||
}
|
||||
/>
|
||||
<TextAreaAutoResize
|
||||
name="match_record_labels"
|
||||
label="Match record labels"
|
||||
columns={6}
|
||||
placeholder="eg. Anjunabeats, Armada"
|
||||
tooltip={
|
||||
<div>
|
||||
<p>Comma separated list of record labels to match. Only Orpheus and Redacted support this.</p>
|
||||
<DocsLink href="https://autobrr.com/filters#music" />
|
||||
</div>
|
||||
}
|
||||
/>
|
||||
<TextAreaAutoResize
|
||||
name="except_record_labels"
|
||||
label="Except record labels"
|
||||
columns={6}
|
||||
placeholder="eg. Anjunadeep, Armind"
|
||||
tooltip={
|
||||
<div>
|
||||
<p>Comma separated list of record labels to ignore (takes priority over Match record labels). Only Orpheus and Redacted support this.</p>
|
||||
<DocsLink href="https://autobrr.com/filters#music" />
|
||||
</div>
|
||||
}
|
||||
/>
|
||||
</FilterLayout>
|
||||
</FilterSection>
|
||||
|
||||
|
|
2
web/src/types/Filter.d.ts
vendored
2
web/src/types/Filter.d.ts
vendored
|
@ -62,6 +62,8 @@ interface Filter {
|
|||
except_categories: string;
|
||||
match_uploaders: string;
|
||||
except_uploaders: string;
|
||||
match_record_labels: string;
|
||||
except_record_labels: string;
|
||||
match_language: string[];
|
||||
except_language: string[];
|
||||
tags: string;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue