mirror of
https://github.com/idanoo/autobrr
synced 2025-07-23 16:59:12 +00:00
feat: add backend
This commit is contained in:
parent
bc418ff248
commit
a838d994a6
68 changed files with 9561 additions and 0 deletions
197
internal/database/action.go
Normal file
197
internal/database/action.go
Normal file
|
@ -0,0 +1,197 @@
|
|||
package database
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
|
||||
"github.com/autobrr/autobrr/internal/domain"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
type ActionRepo struct {
|
||||
db *sql.DB
|
||||
}
|
||||
|
||||
func NewActionRepo(db *sql.DB) domain.ActionRepo {
|
||||
return &ActionRepo{db: db}
|
||||
}
|
||||
|
||||
func (r *ActionRepo) FindByFilterID(filterID int) ([]domain.Action, error) {
|
||||
|
||||
rows, err := r.db.Query("SELECT id, name, type, enabled, exec_cmd, exec_args, watch_folder, category, tags, label, save_path, paused, ignore_rules, limit_download_speed, limit_upload_speed, client_id FROM action WHERE action.filter_id = ?", filterID)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err)
|
||||
}
|
||||
|
||||
defer rows.Close()
|
||||
|
||||
var actions []domain.Action
|
||||
for rows.Next() {
|
||||
var a domain.Action
|
||||
|
||||
var execCmd, execArgs, watchFolder, category, tags, label, savePath sql.NullString
|
||||
var limitUl, limitDl sql.NullInt64
|
||||
var clientID sql.NullInt32
|
||||
// filterID
|
||||
var paused, ignoreRules sql.NullBool
|
||||
|
||||
if err := rows.Scan(&a.ID, &a.Name, &a.Type, &a.Enabled, &execCmd, &execArgs, &watchFolder, &category, &tags, &label, &savePath, &paused, &ignoreRules, &limitDl, &limitUl, &clientID); err != nil {
|
||||
log.Fatal().Err(err)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
a.ExecCmd = execCmd.String
|
||||
a.ExecArgs = execArgs.String
|
||||
a.WatchFolder = watchFolder.String
|
||||
a.Category = category.String
|
||||
a.Tags = tags.String
|
||||
a.Label = label.String
|
||||
a.SavePath = savePath.String
|
||||
a.Paused = paused.Bool
|
||||
a.IgnoreRules = ignoreRules.Bool
|
||||
a.LimitUploadSpeed = limitUl.Int64
|
||||
a.LimitDownloadSpeed = limitDl.Int64
|
||||
a.ClientID = clientID.Int32
|
||||
|
||||
actions = append(actions, a)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return actions, nil
|
||||
}
|
||||
|
||||
func (r *ActionRepo) List() ([]domain.Action, error) {
|
||||
|
||||
rows, err := r.db.Query("SELECT id, name, type, enabled, exec_cmd, exec_args, watch_folder, category, tags, label, save_path, paused, ignore_rules, limit_download_speed, limit_upload_speed, client_id FROM action")
|
||||
if err != nil {
|
||||
log.Fatal().Err(err)
|
||||
}
|
||||
|
||||
defer rows.Close()
|
||||
|
||||
var actions []domain.Action
|
||||
for rows.Next() {
|
||||
var a domain.Action
|
||||
|
||||
var execCmd, execArgs, watchFolder, category, tags, label, savePath sql.NullString
|
||||
var limitUl, limitDl sql.NullInt64
|
||||
var clientID sql.NullInt32
|
||||
var paused, ignoreRules sql.NullBool
|
||||
|
||||
if err := rows.Scan(&a.ID, &a.Name, &a.Type, &a.Enabled, &execCmd, &execArgs, &watchFolder, &category, &tags, &label, &savePath, &paused, &ignoreRules, &limitDl, &limitUl, &clientID); err != nil {
|
||||
log.Fatal().Err(err)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
a.Category = category.String
|
||||
a.Tags = tags.String
|
||||
a.Label = label.String
|
||||
a.SavePath = savePath.String
|
||||
a.Paused = paused.Bool
|
||||
a.IgnoreRules = ignoreRules.Bool
|
||||
a.LimitUploadSpeed = limitUl.Int64
|
||||
a.LimitDownloadSpeed = limitDl.Int64
|
||||
a.ClientID = clientID.Int32
|
||||
|
||||
actions = append(actions, a)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return actions, nil
|
||||
}
|
||||
|
||||
func (r *ActionRepo) Delete(actionID int) error {
|
||||
res, err := r.db.Exec(`DELETE FROM action WHERE action.id = ?`, actionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rows, _ := res.RowsAffected()
|
||||
|
||||
log.Info().Msgf("rows affected %v", rows)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ActionRepo) Store(action domain.Action) (*domain.Action, error) {
|
||||
|
||||
execCmd := toNullString(action.ExecCmd)
|
||||
execArgs := toNullString(action.ExecArgs)
|
||||
watchFolder := toNullString(action.WatchFolder)
|
||||
category := toNullString(action.Category)
|
||||
tags := toNullString(action.Tags)
|
||||
label := toNullString(action.Label)
|
||||
savePath := toNullString(action.SavePath)
|
||||
|
||||
limitDL := toNullInt64(action.LimitDownloadSpeed)
|
||||
limitUL := toNullInt64(action.LimitUploadSpeed)
|
||||
clientID := toNullInt32(action.ClientID)
|
||||
filterID := toNullInt32(int32(action.FilterID))
|
||||
|
||||
var err error
|
||||
if action.ID != 0 {
|
||||
log.Info().Msg("UPDATE existing record")
|
||||
_, err = r.db.Exec(`UPDATE action SET name = ?, type = ?, enabled = ?, exec_cmd = ?, exec_args = ?, watch_folder = ? , category =? , tags = ?, label = ?, save_path = ?, paused = ?, ignore_rules = ?, limit_upload_speed = ?, limit_download_speed = ?, client_id = ?
|
||||
WHERE id = ?`, action.Name, action.Type, action.Enabled, execCmd, execArgs, watchFolder, category, tags, label, savePath, action.Paused, action.IgnoreRules, limitUL, limitDL, clientID, action.ID)
|
||||
} else {
|
||||
var res sql.Result
|
||||
|
||||
res, err = r.db.Exec(`INSERT INTO action(name, type, enabled, exec_cmd, exec_args, watch_folder, category, tags, label, save_path, paused, ignore_rules, limit_upload_speed, limit_download_speed, client_id, filter_id)
|
||||
VALUES (?, ?, ?, ?, ?,? ,?, ?,?,?,?,?,?,?,?,?) ON CONFLICT DO NOTHING`, action.Name, action.Type, action.Enabled, execCmd, execArgs, watchFolder, category, tags, label, savePath, action.Paused, action.IgnoreRules, limitUL, limitDL, clientID, filterID)
|
||||
if err != nil {
|
||||
log.Error().Err(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resId, _ := res.LastInsertId()
|
||||
log.Info().Msgf("LAST INSERT ID %v", resId)
|
||||
action.ID = int(resId)
|
||||
}
|
||||
|
||||
return &action, nil
|
||||
}
|
||||
|
||||
func (r *ActionRepo) ToggleEnabled(actionID int) error {
|
||||
|
||||
var err error
|
||||
var res sql.Result
|
||||
|
||||
res, err = r.db.Exec(`UPDATE action SET enabled = NOT enabled WHERE id = ?`, actionID)
|
||||
if err != nil {
|
||||
log.Error().Err(err)
|
||||
return err
|
||||
}
|
||||
|
||||
resId, _ := res.LastInsertId()
|
||||
log.Info().Msgf("LAST INSERT ID %v", resId)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func toNullString(s string) sql.NullString {
|
||||
return sql.NullString{
|
||||
String: s,
|
||||
Valid: s != "",
|
||||
}
|
||||
}
|
||||
|
||||
func toNullInt32(s int32) sql.NullInt32 {
|
||||
return sql.NullInt32{
|
||||
Int32: s,
|
||||
Valid: s != 0,
|
||||
}
|
||||
}
|
||||
func toNullInt64(s int64) sql.NullInt64 {
|
||||
return sql.NullInt64{
|
||||
Int64: s,
|
||||
Valid: s != 0,
|
||||
}
|
||||
}
|
19
internal/database/announce.go
Normal file
19
internal/database/announce.go
Normal file
|
@ -0,0 +1,19 @@
|
|||
package database
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
|
||||
"github.com/autobrr/autobrr/internal/domain"
|
||||
)
|
||||
|
||||
type AnnounceRepo struct {
|
||||
db *sql.DB
|
||||
}
|
||||
|
||||
func NewAnnounceRepo(db *sql.DB) domain.AnnounceRepo {
|
||||
return &AnnounceRepo{db: db}
|
||||
}
|
||||
|
||||
func (a *AnnounceRepo) Store(announce domain.Announce) error {
|
||||
return nil
|
||||
}
|
134
internal/database/download_client.go
Normal file
134
internal/database/download_client.go
Normal file
|
@ -0,0 +1,134 @@
|
|||
package database
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
|
||||
"github.com/autobrr/autobrr/internal/domain"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
type DownloadClientRepo struct {
|
||||
db *sql.DB
|
||||
}
|
||||
|
||||
func NewDownloadClientRepo(db *sql.DB) domain.DownloadClientRepo {
|
||||
return &DownloadClientRepo{db: db}
|
||||
}
|
||||
|
||||
func (r *DownloadClientRepo) List() ([]domain.DownloadClient, error) {
|
||||
|
||||
rows, err := r.db.Query("SELECT id, name, type, enabled, host, port, ssl, username, password FROM client")
|
||||
if err != nil {
|
||||
log.Fatal().Err(err)
|
||||
}
|
||||
|
||||
defer rows.Close()
|
||||
|
||||
clients := make([]domain.DownloadClient, 0)
|
||||
|
||||
for rows.Next() {
|
||||
var f domain.DownloadClient
|
||||
|
||||
if err := rows.Scan(&f.ID, &f.Name, &f.Type, &f.Enabled, &f.Host, &f.Port, &f.SSL, &f.Username, &f.Password); err != nil {
|
||||
log.Error().Err(err)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
clients = append(clients, f)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return clients, nil
|
||||
}
|
||||
|
||||
func (r *DownloadClientRepo) FindByID(id int32) (*domain.DownloadClient, error) {
|
||||
|
||||
query := `
|
||||
SELECT id, name, type, enabled, host, port, ssl, username, password FROM client WHERE id = ?
|
||||
`
|
||||
|
||||
row := r.db.QueryRow(query, id)
|
||||
if err := row.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var client domain.DownloadClient
|
||||
|
||||
if err := row.Scan(&client.ID, &client.Name, &client.Type, &client.Enabled, &client.Host, &client.Port, &client.SSL, &client.Username, &client.Password); err != nil {
|
||||
log.Error().Err(err).Msg("could not scan download client to struct")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &client, nil
|
||||
}
|
||||
|
||||
func (r *DownloadClientRepo) FindByActionID(actionID int) ([]domain.DownloadClient, error) {
|
||||
|
||||
rows, err := r.db.Query("SELECT id, name, type, enabled, host, port, ssl, username, password FROM client, action_client WHERE client.id = action_client.client_id AND action_client.action_id = ?", actionID)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err)
|
||||
}
|
||||
|
||||
defer rows.Close()
|
||||
|
||||
var clients []domain.DownloadClient
|
||||
for rows.Next() {
|
||||
var f domain.DownloadClient
|
||||
|
||||
if err := rows.Scan(&f.ID, &f.Name, &f.Type, &f.Enabled, &f.Host, &f.Port, &f.SSL, &f.Username, &f.Password); err != nil {
|
||||
log.Error().Err(err)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
clients = append(clients, f)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return clients, nil
|
||||
}
|
||||
|
||||
func (r *DownloadClientRepo) Store(client domain.DownloadClient) (*domain.DownloadClient, error) {
|
||||
|
||||
var err error
|
||||
if client.ID != 0 {
|
||||
log.Info().Msg("UPDATE existing record")
|
||||
_, err = r.db.Exec(`UPDATE client SET name = ?, type = ?, enabled = ?, host = ?, port = ?, ssl = ?, username = ?, password = ? WHERE id = ?`, client.Name, client.Type, client.Enabled, client.Host, client.Port, client.SSL, client.Username, client.Password, client.ID)
|
||||
} else {
|
||||
var res sql.Result
|
||||
|
||||
res, err = r.db.Exec(`INSERT INTO client(name, type, enabled, host, port, ssl, username, password)
|
||||
VALUES (?, ?, ?, ?, ?, ? , ?, ?) ON CONFLICT DO NOTHING`, client.Name, client.Type, client.Enabled, client.Host, client.Port, client.SSL, client.Username, client.Password)
|
||||
if err != nil {
|
||||
log.Error().Err(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resId, _ := res.LastInsertId()
|
||||
log.Info().Msgf("LAST INSERT ID %v", resId)
|
||||
client.ID = int(resId)
|
||||
}
|
||||
|
||||
return &client, nil
|
||||
}
|
||||
|
||||
func (r *DownloadClientRepo) Delete(clientID int) error {
|
||||
res, err := r.db.Exec(`DELETE FROM client WHERE client.id = ?`, clientID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rows, _ := res.RowsAffected()
|
||||
|
||||
log.Info().Msgf("rows affected %v", rows)
|
||||
|
||||
return nil
|
||||
}
|
441
internal/database/filter.go
Normal file
441
internal/database/filter.go
Normal file
|
@ -0,0 +1,441 @@
|
|||
package database
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/lib/pq"
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"github.com/autobrr/autobrr/internal/domain"
|
||||
)
|
||||
|
||||
type FilterRepo struct {
|
||||
db *sql.DB
|
||||
}
|
||||
|
||||
func NewFilterRepo(db *sql.DB) domain.FilterRepo {
|
||||
return &FilterRepo{db: db}
|
||||
}
|
||||
|
||||
func (r *FilterRepo) ListFilters() ([]domain.Filter, error) {
|
||||
|
||||
rows, err := r.db.Query("SELECT id, enabled, name, match_releases, except_releases, created_at, updated_at FROM filter")
|
||||
if err != nil {
|
||||
log.Fatal().Err(err)
|
||||
}
|
||||
|
||||
defer rows.Close()
|
||||
|
||||
var filters []domain.Filter
|
||||
for rows.Next() {
|
||||
var f domain.Filter
|
||||
|
||||
var matchReleases, exceptReleases sql.NullString
|
||||
var createdAt, updatedAt string
|
||||
|
||||
if err := rows.Scan(&f.ID, &f.Enabled, &f.Name, &matchReleases, &exceptReleases, &createdAt, &updatedAt); err != nil {
|
||||
log.Error().Stack().Err(err).Msg("filters_list: error scanning data to struct")
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f.MatchReleases = matchReleases.String
|
||||
f.ExceptReleases = exceptReleases.String
|
||||
|
||||
ua, _ := time.Parse(time.RFC3339, updatedAt)
|
||||
ca, _ := time.Parse(time.RFC3339, createdAt)
|
||||
|
||||
f.UpdatedAt = ua
|
||||
f.CreatedAt = ca
|
||||
|
||||
filters = append(filters, f)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return filters, nil
|
||||
}
|
||||
|
||||
func (r *FilterRepo) FindByID(filterID int) (*domain.Filter, error) {
|
||||
|
||||
row := r.db.QueryRow("SELECT id, enabled, name, min_size, max_size, delay, match_releases, except_releases, use_regex, match_release_groups, except_release_groups, scene, freeleech, freeleech_percent, shows, seasons, episodes, resolutions, codecs, sources, containers, years, match_categories, except_categories, match_uploaders, except_uploaders, tags, except_tags, created_at, updated_at FROM filter WHERE id = ?", filterID)
|
||||
|
||||
var f domain.Filter
|
||||
|
||||
if err := row.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var minSize, maxSize, matchReleases, exceptReleases, matchReleaseGroups, exceptReleaseGroups, freeleechPercent, shows, seasons, episodes, years, matchCategories, exceptCategories, matchUploaders, exceptUploaders, tags, exceptTags sql.NullString
|
||||
var useRegex, scene, freeleech sql.NullBool
|
||||
var delay sql.NullInt32
|
||||
var createdAt, updatedAt string
|
||||
|
||||
if err := row.Scan(&f.ID, &f.Enabled, &f.Name, &minSize, &maxSize, &delay, &matchReleases, &exceptReleases, &useRegex, &matchReleaseGroups, &exceptReleaseGroups, &scene, &freeleech, &freeleechPercent, &shows, &seasons, &episodes, pq.Array(&f.Resolutions), pq.Array(&f.Codecs), pq.Array(&f.Sources), pq.Array(&f.Containers), &years, &matchCategories, &exceptCategories, &matchUploaders, &exceptUploaders, &tags, &exceptTags, &createdAt, &updatedAt); err != nil {
|
||||
log.Error().Stack().Err(err).Msgf("filter: %v : error scanning data to struct", filterID)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f.MinSize = minSize.String
|
||||
f.MaxSize = maxSize.String
|
||||
f.Delay = int(delay.Int32)
|
||||
f.MatchReleases = matchReleases.String
|
||||
f.ExceptReleases = exceptReleases.String
|
||||
f.MatchReleaseGroups = matchReleaseGroups.String
|
||||
f.ExceptReleaseGroups = exceptReleaseGroups.String
|
||||
f.FreeleechPercent = freeleechPercent.String
|
||||
f.Shows = shows.String
|
||||
f.Seasons = seasons.String
|
||||
f.Episodes = minSize.String
|
||||
f.Years = years.String
|
||||
f.MatchCategories = matchCategories.String
|
||||
f.ExceptCategories = exceptCategories.String
|
||||
f.MatchUploaders = matchUploaders.String
|
||||
f.ExceptUploaders = exceptUploaders.String
|
||||
f.Tags = tags.String
|
||||
f.ExceptTags = exceptTags.String
|
||||
f.UseRegex = useRegex.Bool
|
||||
f.Scene = scene.Bool
|
||||
f.Freeleech = freeleech.Bool
|
||||
|
||||
updatedTime, _ := time.Parse(time.RFC3339, updatedAt)
|
||||
createdTime, _ := time.Parse(time.RFC3339, createdAt)
|
||||
|
||||
f.UpdatedAt = updatedTime
|
||||
f.CreatedAt = createdTime
|
||||
|
||||
return &f, nil
|
||||
}
|
||||
|
||||
// TODO remove
|
||||
func (r *FilterRepo) FindFiltersForSite(site string) ([]domain.Filter, error) {
|
||||
|
||||
rows, err := r.db.Query("SELECT id, enabled, name, match_releases, except_releases, created_at, updated_at FROM filter WHERE match_sites LIKE ?", site)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err)
|
||||
}
|
||||
|
||||
defer rows.Close()
|
||||
|
||||
var filters []domain.Filter
|
||||
for rows.Next() {
|
||||
var f domain.Filter
|
||||
|
||||
if err := rows.Scan(&f.ID, &f.Enabled, &f.Name, pq.Array(&f.MatchReleases), pq.Array(&f.ExceptReleases), &f.CreatedAt, &f.UpdatedAt); err != nil {
|
||||
log.Error().Stack().Err(err).Msg("error scanning data to struct")
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
filters = append(filters, f)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return filters, nil
|
||||
}
|
||||
|
||||
func (r *FilterRepo) FindByIndexerIdentifier(indexer string) ([]domain.Filter, error) {
|
||||
|
||||
rows, err := r.db.Query(`
|
||||
SELECT
|
||||
f.id,
|
||||
f.enabled,
|
||||
f.name,
|
||||
f.min_size,
|
||||
f.max_size,
|
||||
f.delay,
|
||||
f.match_releases,
|
||||
f.except_releases,
|
||||
f.use_regex,
|
||||
f.match_release_groups,
|
||||
f.except_release_groups,
|
||||
f.scene,
|
||||
f.freeleech,
|
||||
f.freeleech_percent,
|
||||
f.shows,
|
||||
f.seasons,
|
||||
f.episodes,
|
||||
f.resolutions,
|
||||
f.codecs,
|
||||
f.sources,
|
||||
f.containers,
|
||||
f.years,
|
||||
f.match_categories,
|
||||
f.except_categories,
|
||||
f.match_uploaders,
|
||||
f.except_uploaders,
|
||||
f.tags,
|
||||
f.except_tags,
|
||||
f.created_at,
|
||||
f.updated_at
|
||||
FROM filter f
|
||||
JOIN filter_indexer fi on f.id = fi.filter_id
|
||||
JOIN indexer i on i.id = fi.indexer_id
|
||||
WHERE i.identifier = ?`, indexer)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err)
|
||||
}
|
||||
|
||||
defer rows.Close()
|
||||
|
||||
var filters []domain.Filter
|
||||
for rows.Next() {
|
||||
var f domain.Filter
|
||||
|
||||
var minSize, maxSize, matchReleases, exceptReleases, matchReleaseGroups, exceptReleaseGroups, freeleechPercent, shows, seasons, episodes, years, matchCategories, exceptCategories, matchUploaders, exceptUploaders, tags, exceptTags sql.NullString
|
||||
var useRegex, scene, freeleech sql.NullBool
|
||||
var delay sql.NullInt32
|
||||
var createdAt, updatedAt string
|
||||
|
||||
if err := rows.Scan(&f.ID, &f.Enabled, &f.Name, &minSize, &maxSize, &delay, &matchReleases, &exceptReleases, &useRegex, &matchReleaseGroups, &exceptReleaseGroups, &scene, &freeleech, &freeleechPercent, &shows, &seasons, &episodes, pq.Array(&f.Resolutions), pq.Array(&f.Codecs), pq.Array(&f.Sources), pq.Array(&f.Containers), &years, &matchCategories, &exceptCategories, &matchUploaders, &exceptUploaders, &tags, &exceptTags, &createdAt, &updatedAt); err != nil {
|
||||
log.Error().Stack().Err(err).Msg("error scanning data to struct")
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f.MinSize = minSize.String
|
||||
f.MaxSize = maxSize.String
|
||||
f.Delay = int(delay.Int32)
|
||||
f.MatchReleases = matchReleases.String
|
||||
f.ExceptReleases = exceptReleases.String
|
||||
f.MatchReleaseGroups = matchReleaseGroups.String
|
||||
f.ExceptReleaseGroups = exceptReleaseGroups.String
|
||||
f.FreeleechPercent = freeleechPercent.String
|
||||
f.Shows = shows.String
|
||||
f.Seasons = seasons.String
|
||||
f.Episodes = minSize.String
|
||||
f.Years = years.String
|
||||
f.MatchCategories = matchCategories.String
|
||||
f.ExceptCategories = exceptCategories.String
|
||||
f.MatchUploaders = matchUploaders.String
|
||||
f.ExceptUploaders = exceptUploaders.String
|
||||
f.Tags = tags.String
|
||||
f.ExceptTags = exceptTags.String
|
||||
f.UseRegex = useRegex.Bool
|
||||
f.Scene = scene.Bool
|
||||
f.Freeleech = freeleech.Bool
|
||||
|
||||
updatedTime, _ := time.Parse(time.RFC3339, updatedAt)
|
||||
createdTime, _ := time.Parse(time.RFC3339, createdAt)
|
||||
|
||||
f.UpdatedAt = updatedTime
|
||||
f.CreatedAt = createdTime
|
||||
|
||||
filters = append(filters, f)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return filters, nil
|
||||
}
|
||||
|
||||
func (r *FilterRepo) Store(filter domain.Filter) (*domain.Filter, error) {
|
||||
|
||||
var err error
|
||||
if filter.ID != 0 {
|
||||
log.Debug().Msg("update existing record")
|
||||
} else {
|
||||
var res sql.Result
|
||||
|
||||
res, err = r.db.Exec(`INSERT INTO filter (
|
||||
name,
|
||||
enabled,
|
||||
min_size,
|
||||
max_size,
|
||||
delay,
|
||||
match_releases,
|
||||
except_releases,
|
||||
use_regex,
|
||||
match_release_groups,
|
||||
except_release_groups,
|
||||
scene,
|
||||
freeleech,
|
||||
freeleech_percent,
|
||||
shows,
|
||||
seasons,
|
||||
episodes,
|
||||
resolutions,
|
||||
codecs,
|
||||
sources,
|
||||
containers,
|
||||
years,
|
||||
match_categories,
|
||||
except_categories,
|
||||
match_uploaders,
|
||||
except_uploaders,
|
||||
tags,
|
||||
except_tags
|
||||
)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $25, $26, $27) ON CONFLICT DO NOTHING`,
|
||||
filter.Name,
|
||||
filter.Enabled,
|
||||
filter.MinSize,
|
||||
filter.MaxSize,
|
||||
filter.Delay,
|
||||
filter.MatchReleases,
|
||||
filter.ExceptReleases,
|
||||
filter.UseRegex,
|
||||
filter.MatchReleaseGroups,
|
||||
filter.ExceptReleaseGroups,
|
||||
filter.Scene,
|
||||
filter.Freeleech,
|
||||
filter.FreeleechPercent,
|
||||
filter.Shows,
|
||||
filter.Seasons,
|
||||
filter.Episodes,
|
||||
pq.Array(filter.Resolutions),
|
||||
pq.Array(filter.Codecs),
|
||||
pq.Array(filter.Sources),
|
||||
pq.Array(filter.Containers),
|
||||
filter.Years,
|
||||
filter.MatchCategories,
|
||||
filter.ExceptCategories,
|
||||
filter.MatchUploaders,
|
||||
filter.ExceptUploaders,
|
||||
filter.Tags,
|
||||
filter.ExceptTags,
|
||||
)
|
||||
if err != nil {
|
||||
log.Error().Stack().Err(err).Msg("error executing query")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resId, _ := res.LastInsertId()
|
||||
filter.ID = int(resId)
|
||||
}
|
||||
|
||||
return &filter, nil
|
||||
}
|
||||
|
||||
func (r *FilterRepo) Update(filter domain.Filter) (*domain.Filter, error) {
|
||||
|
||||
//var res sql.Result
|
||||
|
||||
var err error
|
||||
_, err = r.db.Exec(`
|
||||
UPDATE filter SET
|
||||
name = ?,
|
||||
enabled = ?,
|
||||
min_size = ?,
|
||||
max_size = ?,
|
||||
delay = ?,
|
||||
match_releases = ?,
|
||||
except_releases = ?,
|
||||
use_regex = ?,
|
||||
match_release_groups = ?,
|
||||
except_release_groups = ?,
|
||||
scene = ?,
|
||||
freeleech = ?,
|
||||
freeleech_percent = ?,
|
||||
shows = ?,
|
||||
seasons = ?,
|
||||
episodes = ?,
|
||||
resolutions = ?,
|
||||
codecs = ?,
|
||||
sources = ?,
|
||||
containers = ?,
|
||||
years = ?,
|
||||
match_categories = ?,
|
||||
except_categories = ?,
|
||||
match_uploaders = ?,
|
||||
except_uploaders = ?,
|
||||
tags = ?,
|
||||
except_tags = ?,
|
||||
updated_at = CURRENT_TIMESTAMP
|
||||
WHERE id = ?`,
|
||||
filter.Name,
|
||||
filter.Enabled,
|
||||
filter.MinSize,
|
||||
filter.MaxSize,
|
||||
filter.Delay,
|
||||
filter.MatchReleases,
|
||||
filter.ExceptReleases,
|
||||
filter.UseRegex,
|
||||
filter.MatchReleaseGroups,
|
||||
filter.ExceptReleaseGroups,
|
||||
filter.Scene,
|
||||
filter.Freeleech,
|
||||
filter.FreeleechPercent,
|
||||
filter.Shows,
|
||||
filter.Seasons,
|
||||
filter.Episodes,
|
||||
pq.Array(filter.Resolutions),
|
||||
pq.Array(filter.Codecs),
|
||||
pq.Array(filter.Sources),
|
||||
pq.Array(filter.Containers),
|
||||
filter.Years,
|
||||
filter.MatchCategories,
|
||||
filter.ExceptCategories,
|
||||
filter.MatchUploaders,
|
||||
filter.ExceptUploaders,
|
||||
filter.Tags,
|
||||
filter.ExceptTags,
|
||||
filter.ID,
|
||||
)
|
||||
if err != nil {
|
||||
log.Error().Stack().Err(err).Msg("error executing query")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &filter, nil
|
||||
}
|
||||
|
||||
func (r *FilterRepo) StoreIndexerConnection(filterID int, indexerID int) error {
|
||||
query := `INSERT INTO filter_indexer (filter_id, indexer_id) VALUES ($1, $2)`
|
||||
_, err := r.db.Exec(query, filterID, indexerID)
|
||||
if err != nil {
|
||||
log.Error().Stack().Err(err).Msg("error executing query")
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *FilterRepo) DeleteIndexerConnections(filterID int) error {
|
||||
|
||||
query := `DELETE FROM filter_indexer WHERE filter_id = ?`
|
||||
_, err := r.db.Exec(query, filterID)
|
||||
if err != nil {
|
||||
log.Error().Stack().Err(err).Msg("error executing query")
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *FilterRepo) Delete(filterID int) error {
|
||||
|
||||
res, err := r.db.Exec(`DELETE FROM filter WHERE id = ?`, filterID)
|
||||
if err != nil {
|
||||
log.Error().Stack().Err(err).Msg("error executing query")
|
||||
return err
|
||||
}
|
||||
|
||||
rows, _ := res.RowsAffected()
|
||||
|
||||
log.Info().Msgf("rows affected %v", rows)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Split string to slice. We store comma separated strings and convert to slice
|
||||
func stringToSlice(str string) []string {
|
||||
if str == "" {
|
||||
return []string{}
|
||||
} else if !strings.Contains(str, ",") {
|
||||
return []string{str}
|
||||
}
|
||||
|
||||
split := strings.Split(str, ",")
|
||||
|
||||
return split
|
||||
}
|
152
internal/database/indexer.go
Normal file
152
internal/database/indexer.go
Normal file
|
@ -0,0 +1,152 @@
|
|||
package database
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/autobrr/autobrr/internal/domain"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
type IndexerRepo struct {
|
||||
db *sql.DB
|
||||
}
|
||||
|
||||
func NewIndexerRepo(db *sql.DB) domain.IndexerRepo {
|
||||
return &IndexerRepo{
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *IndexerRepo) Store(indexer domain.Indexer) (*domain.Indexer, error) {
|
||||
|
||||
settings, err := json.Marshal(indexer.Settings)
|
||||
if err != nil {
|
||||
log.Error().Stack().Err(err).Msg("error marshaling json data")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = r.db.Exec(`INSERT INTO indexer (enabled, name, identifier, settings) VALUES (?, ?, ?, ?)`, indexer.Enabled, indexer.Name, indexer.Identifier, settings)
|
||||
if err != nil {
|
||||
log.Error().Stack().Err(err).Msg("error executing query")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &indexer, nil
|
||||
}
|
||||
|
||||
func (r *IndexerRepo) Update(indexer domain.Indexer) (*domain.Indexer, error) {
|
||||
|
||||
sett, err := json.Marshal(indexer.Settings)
|
||||
if err != nil {
|
||||
log.Error().Stack().Err(err).Msg("error marshaling json data")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = r.db.Exec(`UPDATE indexer SET enabled = ?, name = ?, settings = ?, updated_at = CURRENT_TIMESTAMP WHERE id = ?`, indexer.Enabled, indexer.Name, sett, indexer.ID)
|
||||
if err != nil {
|
||||
log.Error().Stack().Err(err).Msg("error executing query")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &indexer, nil
|
||||
}
|
||||
|
||||
func (r *IndexerRepo) List() ([]domain.Indexer, error) {
|
||||
|
||||
rows, err := r.db.Query("SELECT id, enabled, name, identifier, settings FROM indexer")
|
||||
if err != nil {
|
||||
log.Fatal().Err(err)
|
||||
}
|
||||
|
||||
defer rows.Close()
|
||||
|
||||
var indexers []domain.Indexer
|
||||
for rows.Next() {
|
||||
var f domain.Indexer
|
||||
|
||||
var settings string
|
||||
var settingsMap map[string]string
|
||||
|
||||
if err := rows.Scan(&f.ID, &f.Enabled, &f.Name, &f.Identifier, &settings); err != nil {
|
||||
log.Error().Stack().Err(err).Msg("indexer.list: error scanning data to struct")
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = json.Unmarshal([]byte(settings), &settingsMap)
|
||||
if err != nil {
|
||||
log.Error().Stack().Err(err).Msg("indexer.list: error unmarshal settings")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f.Settings = settingsMap
|
||||
|
||||
indexers = append(indexers, f)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return indexers, nil
|
||||
}
|
||||
|
||||
func (r *IndexerRepo) FindByFilterID(id int) ([]domain.Indexer, error) {
|
||||
rows, err := r.db.Query(`
|
||||
SELECT i.id, i.enabled, i.name, i.identifier
|
||||
FROM indexer i
|
||||
JOIN filter_indexer fi on i.id = fi.indexer_id
|
||||
WHERE fi.filter_id = ?`, id)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err)
|
||||
}
|
||||
|
||||
defer rows.Close()
|
||||
|
||||
var indexers []domain.Indexer
|
||||
for rows.Next() {
|
||||
var f domain.Indexer
|
||||
|
||||
//var settings string
|
||||
//var settingsMap map[string]string
|
||||
|
||||
if err := rows.Scan(&f.ID, &f.Enabled, &f.Name, &f.Identifier); err != nil {
|
||||
log.Error().Stack().Err(err).Msg("indexer.list: error scanning data to struct")
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
//err = json.Unmarshal([]byte(settings), &settingsMap)
|
||||
//if err != nil {
|
||||
// log.Error().Stack().Err(err).Msg("indexer.list: error unmarshal settings")
|
||||
// return nil, err
|
||||
//}
|
||||
//
|
||||
//f.Settings = settingsMap
|
||||
|
||||
indexers = append(indexers, f)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return indexers, nil
|
||||
|
||||
}
|
||||
|
||||
func (r *IndexerRepo) Delete(id int) error {
|
||||
|
||||
res, err := r.db.Exec(`DELETE FROM indexer WHERE id = ?`, id)
|
||||
if err != nil {
|
||||
log.Error().Stack().Err(err).Msg("error executing query")
|
||||
return err
|
||||
}
|
||||
|
||||
rows, _ := res.RowsAffected()
|
||||
|
||||
log.Info().Msgf("rows affected %v", rows)
|
||||
|
||||
return nil
|
||||
}
|
277
internal/database/irc.go
Normal file
277
internal/database/irc.go
Normal file
|
@ -0,0 +1,277 @@
|
|||
package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"strings"
|
||||
|
||||
"github.com/autobrr/autobrr/internal/domain"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
type IrcRepo struct {
|
||||
db *sql.DB
|
||||
}
|
||||
|
||||
func NewIrcRepo(db *sql.DB) domain.IrcRepo {
|
||||
return &IrcRepo{db: db}
|
||||
}
|
||||
|
||||
func (ir *IrcRepo) Store(announce domain.Announce) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ir *IrcRepo) GetNetworkByID(id int64) (*domain.IrcNetwork, error) {
|
||||
|
||||
row := ir.db.QueryRow("SELECT id, enabled, name, addr, tls, nick, pass, connect_commands, sasl_mechanism, sasl_plain_username, sasl_plain_password FROM irc_network WHERE id = ?", id)
|
||||
if err := row.Err(); err != nil {
|
||||
log.Fatal().Err(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var n domain.IrcNetwork
|
||||
|
||||
var pass, connectCommands sql.NullString
|
||||
var saslMechanism, saslPlainUsername, saslPlainPassword sql.NullString
|
||||
var tls sql.NullBool
|
||||
|
||||
if err := row.Scan(&n.ID, &n.Enabled, &n.Name, &n.Addr, &tls, &n.Nick, &pass, &connectCommands, &saslMechanism, &saslPlainUsername, &saslPlainPassword); err != nil {
|
||||
log.Fatal().Err(err)
|
||||
}
|
||||
|
||||
n.TLS = tls.Bool
|
||||
n.Pass = pass.String
|
||||
if connectCommands.Valid {
|
||||
n.ConnectCommands = strings.Split(connectCommands.String, "\r\n")
|
||||
}
|
||||
n.SASL.Mechanism = saslMechanism.String
|
||||
n.SASL.Plain.Username = saslPlainUsername.String
|
||||
n.SASL.Plain.Password = saslPlainPassword.String
|
||||
|
||||
return &n, nil
|
||||
}
|
||||
|
||||
func (ir *IrcRepo) DeleteNetwork(ctx context.Context, id int64) error {
|
||||
tx, err := ir.db.BeginTx(ctx, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer tx.Rollback()
|
||||
|
||||
_, err = tx.ExecContext(ctx, `DELETE FROM irc_network WHERE id = ?`, id)
|
||||
if err != nil {
|
||||
log.Error().Stack().Err(err).Msgf("error deleting network: %v", id)
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = tx.ExecContext(ctx, `DELETE FROM irc_channel WHERE network_id = ?`, id)
|
||||
if err != nil {
|
||||
log.Error().Stack().Err(err).Msgf("error deleting channels for network: %v", id)
|
||||
return err
|
||||
}
|
||||
|
||||
err = tx.Commit()
|
||||
if err != nil {
|
||||
log.Error().Stack().Err(err).Msgf("error deleting network: %v", id)
|
||||
return err
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ir *IrcRepo) ListNetworks(ctx context.Context) ([]domain.IrcNetwork, error) {
|
||||
|
||||
rows, err := ir.db.QueryContext(ctx, "SELECT id, enabled, name, addr, tls, nick, pass, connect_commands FROM irc_network")
|
||||
if err != nil {
|
||||
log.Fatal().Err(err)
|
||||
}
|
||||
|
||||
defer rows.Close()
|
||||
|
||||
var networks []domain.IrcNetwork
|
||||
for rows.Next() {
|
||||
var net domain.IrcNetwork
|
||||
|
||||
//var username, realname, pass, connectCommands sql.NullString
|
||||
var pass, connectCommands sql.NullString
|
||||
var tls sql.NullBool
|
||||
|
||||
if err := rows.Scan(&net.ID, &net.Enabled, &net.Name, &net.Addr, &tls, &net.Nick, &pass, &connectCommands); err != nil {
|
||||
log.Fatal().Err(err)
|
||||
}
|
||||
|
||||
net.TLS = tls.Bool
|
||||
net.Pass = pass.String
|
||||
|
||||
if connectCommands.Valid {
|
||||
net.ConnectCommands = strings.Split(connectCommands.String, "\r\n")
|
||||
}
|
||||
|
||||
networks = append(networks, net)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return networks, nil
|
||||
}
|
||||
|
||||
func (ir *IrcRepo) ListChannels(networkID int64) ([]domain.IrcChannel, error) {
|
||||
|
||||
rows, err := ir.db.Query("SELECT id, name, enabled FROM irc_channel WHERE network_id = ?", networkID)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var channels []domain.IrcChannel
|
||||
for rows.Next() {
|
||||
var ch domain.IrcChannel
|
||||
|
||||
//if err := rows.Scan(&ch.ID, &ch.Name, &ch.Enabled, &ch.Pass, &ch.InviteCommand, &ch.InviteHTTPURL, &ch.InviteHTTPHeader, &ch.InviteHTTPData); err != nil {
|
||||
if err := rows.Scan(&ch.ID, &ch.Name, &ch.Enabled); err != nil {
|
||||
log.Fatal().Err(err)
|
||||
}
|
||||
|
||||
channels = append(channels, ch)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return channels, nil
|
||||
}
|
||||
|
||||
func (ir *IrcRepo) StoreNetwork(network *domain.IrcNetwork) error {
|
||||
|
||||
netName := toNullString(network.Name)
|
||||
pass := toNullString(network.Pass)
|
||||
connectCommands := toNullString(strings.Join(network.ConnectCommands, "\r\n"))
|
||||
|
||||
var saslMechanism, saslPlainUsername, saslPlainPassword sql.NullString
|
||||
if network.SASL.Mechanism != "" {
|
||||
saslMechanism = toNullString(network.SASL.Mechanism)
|
||||
switch network.SASL.Mechanism {
|
||||
case "PLAIN":
|
||||
saslPlainUsername = toNullString(network.SASL.Plain.Username)
|
||||
saslPlainPassword = toNullString(network.SASL.Plain.Password)
|
||||
default:
|
||||
log.Warn().Msgf("unsupported SASL mechanism: %q", network.SASL.Mechanism)
|
||||
//return fmt.Errorf("cannot store network: unsupported SASL mechanism %q", network.SASL.Mechanism)
|
||||
}
|
||||
}
|
||||
|
||||
var err error
|
||||
if network.ID != 0 {
|
||||
// update record
|
||||
_, err = ir.db.Exec(`UPDATE irc_network
|
||||
SET enabled = ?,
|
||||
name = ?,
|
||||
addr = ?,
|
||||
tls = ?,
|
||||
nick = ?,
|
||||
pass = ?,
|
||||
connect_commands = ?,
|
||||
sasl_mechanism = ?,
|
||||
sasl_plain_username = ?,
|
||||
sasl_plain_password = ?,
|
||||
updated_at = CURRENT_TIMESTAMP
|
||||
WHERE id = ?`,
|
||||
network.Enabled,
|
||||
netName,
|
||||
network.Addr,
|
||||
network.TLS,
|
||||
network.Nick,
|
||||
pass,
|
||||
connectCommands,
|
||||
saslMechanism,
|
||||
saslPlainUsername,
|
||||
saslPlainPassword,
|
||||
network.ID,
|
||||
)
|
||||
} else {
|
||||
var res sql.Result
|
||||
|
||||
res, err = ir.db.Exec(`INSERT INTO irc_network (
|
||||
enabled,
|
||||
name,
|
||||
addr,
|
||||
tls,
|
||||
nick,
|
||||
pass,
|
||||
connect_commands,
|
||||
sasl_mechanism,
|
||||
sasl_plain_username,
|
||||
sasl_plain_password
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
network.Enabled,
|
||||
netName,
|
||||
network.Addr,
|
||||
network.TLS,
|
||||
network.Nick,
|
||||
pass,
|
||||
connectCommands,
|
||||
saslMechanism,
|
||||
saslPlainUsername,
|
||||
saslPlainPassword,
|
||||
)
|
||||
if err != nil {
|
||||
log.Error().Stack().Err(err).Msg("error executing query")
|
||||
return err
|
||||
}
|
||||
|
||||
network.ID, err = res.LastInsertId()
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (ir *IrcRepo) StoreChannel(networkID int64, channel *domain.IrcChannel) error {
|
||||
pass := toNullString(channel.Password)
|
||||
|
||||
var err error
|
||||
if channel.ID != 0 {
|
||||
// update record
|
||||
_, err = ir.db.Exec(`UPDATE irc_channel
|
||||
SET
|
||||
enabled = ?,
|
||||
detached = ?,
|
||||
name = ?,
|
||||
password = ?
|
||||
WHERE
|
||||
id = ?`,
|
||||
channel.Enabled,
|
||||
channel.Detached,
|
||||
channel.Name,
|
||||
pass,
|
||||
channel.ID,
|
||||
)
|
||||
} else {
|
||||
var res sql.Result
|
||||
|
||||
res, err = ir.db.Exec(`INSERT INTO irc_channel (
|
||||
enabled,
|
||||
detached,
|
||||
name,
|
||||
password,
|
||||
network_id
|
||||
) VALUES (?, ?, ?, ?, ?)`,
|
||||
channel.Enabled,
|
||||
true,
|
||||
channel.Name,
|
||||
pass,
|
||||
networkID,
|
||||
)
|
||||
if err != nil {
|
||||
log.Error().Stack().Err(err).Msg("error executing query")
|
||||
return err
|
||||
}
|
||||
|
||||
channel.ID, err = res.LastInsertId()
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
175
internal/database/migrate.go
Normal file
175
internal/database/migrate.go
Normal file
|
@ -0,0 +1,175 @@
|
|||
package database
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
const schema = `
|
||||
CREATE TABLE indexer
|
||||
(
|
||||
id INTEGER PRIMARY KEY,
|
||||
identifier TEXT,
|
||||
enabled BOOLEAN,
|
||||
name TEXT NOT NULL,
|
||||
settings TEXT,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
CREATE TABLE irc_network
|
||||
(
|
||||
id INTEGER PRIMARY KEY,
|
||||
enabled BOOLEAN,
|
||||
name TEXT NOT NULL,
|
||||
addr TEXT NOT NULL,
|
||||
nick TEXT NOT NULL,
|
||||
tls BOOLEAN,
|
||||
pass TEXT,
|
||||
connect_commands TEXT,
|
||||
sasl_mechanism TEXT,
|
||||
sasl_plain_username TEXT,
|
||||
sasl_plain_password TEXT,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
unique (addr, nick)
|
||||
);
|
||||
|
||||
CREATE TABLE irc_channel
|
||||
(
|
||||
id INTEGER PRIMARY KEY,
|
||||
enabled BOOLEAN,
|
||||
name TEXT NOT NULL,
|
||||
password TEXT,
|
||||
detached BOOLEAN,
|
||||
network_id INTEGER NOT NULL,
|
||||
FOREIGN KEY (network_id) REFERENCES irc_network(id),
|
||||
unique (network_id, name)
|
||||
);
|
||||
|
||||
CREATE TABLE filter
|
||||
(
|
||||
id INTEGER PRIMARY KEY,
|
||||
enabled BOOLEAN,
|
||||
name TEXT NOT NULL,
|
||||
min_size TEXT,
|
||||
max_size TEXT,
|
||||
delay INTEGER,
|
||||
match_releases TEXT,
|
||||
except_releases TEXT,
|
||||
use_regex BOOLEAN,
|
||||
match_release_groups TEXT,
|
||||
except_release_groups TEXT,
|
||||
scene BOOLEAN,
|
||||
freeleech BOOLEAN,
|
||||
freeleech_percent TEXT,
|
||||
shows TEXT,
|
||||
seasons TEXT,
|
||||
episodes TEXT,
|
||||
resolutions TEXT [] DEFAULT '{}' NOT NULL,
|
||||
codecs TEXT [] DEFAULT '{}' NOT NULL,
|
||||
sources TEXT [] DEFAULT '{}' NOT NULL,
|
||||
containers TEXT [] DEFAULT '{}' NOT NULL,
|
||||
years TEXT,
|
||||
match_categories TEXT,
|
||||
except_categories TEXT,
|
||||
match_uploaders TEXT,
|
||||
except_uploaders TEXT,
|
||||
tags TEXT,
|
||||
except_tags TEXT,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
CREATE TABLE filter_indexer
|
||||
(
|
||||
filter_id INTEGER,
|
||||
indexer_id INTEGER,
|
||||
FOREIGN KEY (filter_id) REFERENCES filter(id),
|
||||
FOREIGN KEY (indexer_id) REFERENCES indexer(id),
|
||||
PRIMARY KEY (filter_id, indexer_id)
|
||||
);
|
||||
|
||||
CREATE TABLE client
|
||||
(
|
||||
id INTEGER PRIMARY KEY,
|
||||
name TEXT NOT NULL,
|
||||
enabled BOOLEAN,
|
||||
type TEXT,
|
||||
host TEXT NOT NULL,
|
||||
port INTEGER,
|
||||
ssl BOOLEAN,
|
||||
username TEXT,
|
||||
password TEXT,
|
||||
settings TEXT
|
||||
);
|
||||
|
||||
CREATE TABLE action
|
||||
(
|
||||
id INTEGER PRIMARY KEY,
|
||||
name TEXT,
|
||||
type TEXT,
|
||||
enabled BOOLEAN,
|
||||
exec_cmd TEXT,
|
||||
exec_args TEXT,
|
||||
watch_folder TEXT,
|
||||
category TEXT,
|
||||
tags TEXT,
|
||||
label TEXT,
|
||||
save_path TEXT,
|
||||
paused BOOLEAN,
|
||||
ignore_rules BOOLEAN,
|
||||
limit_upload_speed INT,
|
||||
limit_download_speed INT,
|
||||
client_id INTEGER,
|
||||
filter_id INTEGER,
|
||||
FOREIGN KEY (client_id) REFERENCES client(id),
|
||||
FOREIGN KEY (filter_id) REFERENCES filter(id)
|
||||
);
|
||||
`
|
||||
|
||||
var migrations = []string{
|
||||
"",
|
||||
}
|
||||
|
||||
func Migrate(db *sql.DB) error {
|
||||
log.Info().Msg("Migrating database...")
|
||||
|
||||
var version int
|
||||
if err := db.QueryRow("PRAGMA user_version").Scan(&version); err != nil {
|
||||
return fmt.Errorf("failed to query schema version: %v", err)
|
||||
}
|
||||
|
||||
if version == len(migrations) {
|
||||
return nil
|
||||
} else if version > len(migrations) {
|
||||
return fmt.Errorf("autobrr (version %d) older than schema (version: %d)", len(migrations), version)
|
||||
}
|
||||
|
||||
tx, err := db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
if version == 0 {
|
||||
if _, err := tx.Exec(schema); err != nil {
|
||||
return fmt.Errorf("failed to initialize schema: %v", err)
|
||||
}
|
||||
} else {
|
||||
for i := version; i < len(migrations); i++ {
|
||||
if _, err := tx.Exec(migrations[i]); err != nil {
|
||||
return fmt.Errorf("failed to execute migration #%v: %v", i, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_, err = tx.Exec(fmt.Sprintf("PRAGMA user_version = %d", len(migrations)))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to bump schema version: %v", err)
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
13
internal/database/utils.go
Normal file
13
internal/database/utils.go
Normal file
|
@ -0,0 +1,13 @@
|
|||
package database
|
||||
|
||||
import (
|
||||
"path"
|
||||
)
|
||||
|
||||
func DataSourceName(configPath string, name string) string {
|
||||
if configPath != "" {
|
||||
return path.Join(configPath, name)
|
||||
}
|
||||
|
||||
return name
|
||||
}
|
58
internal/database/utils_test.go
Normal file
58
internal/database/utils_test.go
Normal file
|
@ -0,0 +1,58 @@
|
|||
package database
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestDataSourceName(t *testing.T) {
|
||||
type args struct {
|
||||
configPath string
|
||||
name string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "default",
|
||||
args: args{
|
||||
configPath: "",
|
||||
name: "autobrr.db",
|
||||
},
|
||||
want: "autobrr.db",
|
||||
},
|
||||
{
|
||||
name: "path_1",
|
||||
args: args{
|
||||
configPath: "/config",
|
||||
name: "autobrr.db",
|
||||
},
|
||||
want: "/config/autobrr.db",
|
||||
},
|
||||
{
|
||||
name: "path_2",
|
||||
args: args{
|
||||
configPath: "/config/",
|
||||
name: "autobrr.db",
|
||||
},
|
||||
want: "/config/autobrr.db",
|
||||
},
|
||||
{
|
||||
name: "path_3",
|
||||
args: args{
|
||||
configPath: "/config//",
|
||||
name: "autobrr.db",
|
||||
},
|
||||
want: "/config/autobrr.db",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := DataSourceName(tt.args.configPath, tt.args.name)
|
||||
assert.Equal(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue