package main import ( "bytes" "database/sql" "encoding/json" "encoding/xml" "flag" "fmt" "html/template" "io" "log" "net/http" "path/filepath" "runtime/debug" "strconv" "strings" "time" "github.com/etix/goscrape" _ "github.com/mattn/go-sqlite3" ) var db *sql.DB type Torrent struct { URL string `json:"url"` TopLevelGroupName string `json:"top_level_group_name"` GroupName string `json:"group_name"` DisplayName string `json:"display_name"` AddedToTorrentsList string `json:"added_to_torrents_list_at"` IsMetadata bool `json:"is_metadata"` BTIH string `json:"btih"` MagnetLink string `json:"magnet_link"` TorrentSize int `json:"torrent_size"` NumFiles int `json:"num_files"` DataSize int64 `json:"data_size"` AACurrentlySeeding bool `json:"aa_currently_seeding"` Obsolete bool `json:"obsolete"` Embargo bool `json:"embargo"` Seeders int `json:"seeders"` Leechers int `json:"leechers"` Completed int `json:"completed"` StatsScrapedAt string `json:"stats_scraped_at"` PartiallyBroken bool `json:"partially_broken"` FormattedSeeders string FormattedLeechers string SeedersColor string LeechersColor string FormattedDataSize string FormattedAddedDate string MetadataLabel string StatusLabel template.HTML SeederStats []SeederStat `json:"seederStats"` } type GroupData struct { TopLevelGroupName string GroupName string Torrents []Torrent TotalCount int } func wrap(handler func(http.ResponseWriter, *http.Request) error) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { defer func() { if rec := recover(); rec != nil { log.Printf("Panic in %s: %v\n%s", r.URL.Path, rec, debug.Stack()) http.Error(w, "Internal Server Error", http.StatusInternalServerError) } }() err := handler(w, r) if err != nil { log.Printf("Error in %s: %v", r.URL.Path, err) http.Error(w, "Internal Server Error", http.StatusInternalServerError) } } } func main() { port := flag.Int("port", 8080, "Port to run the server on") dbDir := flag.String("directory", ".", "Directory to store the SQLite database") flag.Parse() dbPath := filepath.Join(*dbDir, "torrents.db") var err error db, err = sql.Open("sqlite3", dbPath) if err != nil { log.Fatalf("Error opening database: %v", err) } defer db.Close() createTable() go updateTorrents() go updateStats() go updateDailySeederStats() http.HandleFunc("/", wrap(handleRoot)) http.HandleFunc("/full/", wrap(handleFullList)) http.HandleFunc("/stats/", wrap(handleStats)) http.HandleFunc("/seeder-stats/", wrap(handleSeederStats)) http.HandleFunc("/json", wrap(handleTorrentStats)) http.HandleFunc("/generate-torrent-list", wrap(handleGenerateTorrentList)) log.Printf("Starting server on port %d", *port) log.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", *port), nil)) } func createTable() { _, err := db.Exec(` CREATE TABLE IF NOT EXISTS torrents ( btih TEXT PRIMARY KEY, url TEXT, top_level_group_name TEXT, group_name TEXT, display_name TEXT, added_to_torrents_list DATETIME, is_metadata INTEGER, magnet_link TEXT, torrent_size INTEGER, num_files INTEGER, data_size INTEGER, aa_currently_seeding INTEGER, obsolete INTEGER, embargo INTEGER, seeders INTEGER, leechers INTEGER, completed INTEGER, stats_scraped_at DATETIME, partially_broken INTEGER ) `) if err != nil { log.Fatalf("Error creating table: %v", err) } _, err = db.Exec(` CREATE TABLE IF NOT EXISTS torrent_updates ( id INTEGER PRIMARY KEY AUTOINCREMENT, btih TEXT, seeders INTEGER, leechers INTEGER, completed INTEGER, stats_scraped_at DATETIME, updated_at DATETIME DEFAULT CURRENT_TIMESTAMP ) `) if err != nil { log.Fatalf("Error creating torrent_updates table: %v", err) } _, err = db.Exec(` CREATE TABLE IF NOT EXISTS daily_seeder_stats ( date DATE PRIMARY KEY, low_seeders_tb FLOAT, -- Total TB for torrents with <4 seeders medium_seeders_tb FLOAT, -- Total TB for torrents with 4-10 seeders high_seeders_tb FLOAT -- Total TB for torrents with >10 seeders ); `) if err != nil { log.Fatalf("Error creating torrent_updates table: %v", err) } } func updateTorrents() { ticker := time.NewTicker(24 * time.Hour) defer ticker.Stop() for { if err := performTorrentUpdate(); err != nil { log.Printf("Error updating torrents: %v", err) } <-ticker.C } } func performTorrentUpdate() error { resp, err := http.Get("https://annas-archive.org/dyn/torrents.json") if err != nil { return fmt.Errorf("error fetching torrents: %w", err) } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { return fmt.Errorf("error reading response body: %w", err) } var torrents []Torrent if err := json.Unmarshal(body, &torrents); err != nil { return fmt.Errorf("error decoding torrents: %w", err) } for _, t := range torrents { _, err := db.Exec(` INSERT OR REPLACE INTO torrents ( btih, url, top_level_group_name, group_name, display_name, added_to_torrents_list, is_metadata, magnet_link, torrent_size, num_files, data_size, aa_currently_seeding, obsolete, embargo, seeders, leechers, completed, stats_scraped_at, partially_broken ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, t.BTIH, t.URL, t.TopLevelGroupName, t.GroupName, t.DisplayName, t.AddedToTorrentsList, t.IsMetadata, t.MagnetLink, t.TorrentSize, t.NumFiles, t.DataSize, t.AACurrentlySeeding, t.Obsolete, t.Embargo, t.Seeders, t.Leechers, t.Completed, t.StatsScrapedAt, t.PartiallyBroken) if err != nil { log.Printf("Error inserting/updating torrent %s: %v", t.BTIH, err) } else { log.Printf("Successfully inserted/updated torrent: %s", t.BTIH) } } log.Println("Torrents updated successfully") return nil } func updateStats() { log.Println("Starting stats scraping process...") ticker := time.NewTicker(30 * time.Minute) for { <-ticker.C // Query to select all btih values rows, err := db.Query("SELECT btih FROM torrents") if err != nil { log.Fatalf("Error querying torrents: %v", err) } defer rows.Close() // Slice to hold the infohash values var allInfohashes []string for rows.Next() { var btih string if err := rows.Scan(&btih); err != nil { log.Printf("Error scanning row: %v", err) continue } allInfohashes = append(allInfohashes, btih) } // Check for any errors encountered during iteration if err := rows.Err(); err != nil { log.Fatalf("Error iterating rows: %v", err) } // Create a new instance of the goscrape library s, err := goscrape.New("udp://tracker.opentrackr.org:1337/announce") if err != nil { log.Fatal("Error creating goscrape instance:", err) } // Process infohashes in bundles of 50 const bundleSize = 50 for i := 0; i < len(allInfohashes); i += bundleSize { end := i + bundleSize if end > len(allInfohashes) { end = len(allInfohashes) } bundle := allInfohashes[i:end] // Convert bundle to [][]byte infohash := make([][]byte, len(bundle)) for j, hash := range bundle { infohash[j] = []byte(hash) } // Scrape the current bundle res, err := s.Scrape(infohash...) if err != nil { log.Fatal("Error scraping infohashes:", err) } // Loop over the results and print them for _, r := range res { fmt.Println("Infohash:\t", string(r.Infohash)) fmt.Println("Seeders:\t", r.Seeders) fmt.Println("Leechers:\t", r.Leechers) fmt.Println("Completed:\t", r.Completed) fmt.Println("") // Prepare the SQL statement for updating the torrents table stmt, err := db.Prepare(` UPDATE torrents SET seeders = ?, leechers = ?, completed = ?, stats_scraped_at = ? WHERE btih = ? `) if err != nil { log.Printf("Error preparing update statement: %v", err) return } // Execute the update statement with the provided values result, err := stmt.Exec(r.Seeders, r.Leechers, r.Completed, time.Now(), string(r.Infohash)) if err != nil { log.Printf("Error updating torrent: %v", err) } else { rowsAffected, _ := result.RowsAffected() log.Printf("Successfully updated torrent. Rows affected: %d", rowsAffected) } // Insert update details into torrent_updates table updateStmt, err := db.Prepare(` INSERT INTO torrent_updates ( btih, seeders, leechers, completed, stats_scraped_at ) VALUES (?, ?, ?, ?, ?) `) if err != nil { log.Printf("Error preparing insert statement for torrent_updates: %v", err) return } _, err = updateStmt.Exec(string(r.Infohash), r.Seeders, r.Leechers, r.Completed, time.Now()) if err != nil { log.Printf("Error inserting update into torrent_updates: %v", err) } // Ensure to close the statement when done defer updateStmt.Close() } } log.Println("Stats updated successfully") } } func handleSeederStats(w http.ResponseWriter, r *http.Request) error { if r.Method != http.MethodGet { return fmt.Errorf("method not allowed: %s", r.Method) } rows, err := db.Query("SELECT date, low_seeders_tb, medium_seeders_tb, high_seeders_tb FROM daily_seeder_stats ORDER BY date") if err != nil { return fmt.Errorf("error querying database: %w", err) } defer rows.Close() var stats []struct { Date string `json:"date"` LowSeedersTB float64 `json:"lowSeedersTB"` MediumSeedersTB float64 `json:"mediumSeedersTB"` HighSeedersTB float64 `json:"highSeedersTB"` } for rows.Next() { var stat struct { Date time.Time `json:"-"` LowSeedersTB float64 `json:"lowSeedersTB"` MediumSeedersTB float64 `json:"mediumSeedersTB"` HighSeedersTB float64 `json:"highSeedersTB"` } if err := rows.Scan(&stat.Date, &stat.LowSeedersTB, &stat.MediumSeedersTB, &stat.HighSeedersTB); err != nil { return fmt.Errorf("error scanning row: %w", err) } formattedStat := struct { Date string `json:"date"` LowSeedersTB float64 `json:"lowSeedersTB"` MediumSeedersTB float64 `json:"mediumSeedersTB"` HighSeedersTB float64 `json:"highSeedersTB"` }{ Date: stat.Date.Format("2006-01-02"), // Format date as YYYY-MM-DD LowSeedersTB: stat.LowSeedersTB, MediumSeedersTB: stat.MediumSeedersTB, HighSeedersTB: stat.HighSeedersTB, } stats = append(stats, formattedStat) } if err := rows.Err(); err != nil { return fmt.Errorf("error after iterating rows: %w", err) } w.Header().Set("Content-Type", "application/json") return json.NewEncoder(w).Encode(stats) } func updateDailySeederStats() { log.Println("Starting daily seeder stats update process...") performUpdate := func() error { tx, err := db.Begin() // Start a transaction if err != nil { return fmt.Errorf("error starting transaction: %w", err) } defer tx.Rollback() // Ensure the transaction is rolled back if anything fails // Calculate total TB for each seeder category rows, err := tx.Query(` SELECT SUM(CASE WHEN seeders < 4 THEN data_size ELSE 0 END) / 1099511627776.0 AS low_seeders_tb, SUM(CASE WHEN seeders BETWEEN 4 AND 10 THEN data_size ELSE 0 END) / 1099511627776.0 AS medium_seeders_tb, SUM(CASE WHEN seeders > 10 THEN data_size ELSE 0 END) / 1099511627776.0 AS high_seeders_tb FROM torrents WHERE embargo = 0 `) if err != nil { return fmt.Errorf("error querying seeder stats: %w", err) } defer rows.Close() var lowSeedersTB, mediumSeedersTB, highSeedersTB float64 if rows.Next() { err = rows.Scan(&lowSeedersTB, &mediumSeedersTB, &highSeedersTB) if err != nil { return fmt.Errorf("error scanning row: %w", err) } } // Check for any errors encountered during iteration if err := rows.Err(); err != nil { return fmt.Errorf("error during row iteration: %w", err) } // Insert or update the daily stats _, err = tx.Exec(` INSERT OR REPLACE INTO daily_seeder_stats (date, low_seeders_tb, medium_seeders_tb, high_seeders_tb) VALUES (DATE('now'), ?, ?, ?)`, lowSeedersTB, mediumSeedersTB, highSeedersTB) if err != nil { return fmt.Errorf("error inserting/updating daily seeder stats: %w", err) } // Commit the transaction if err := tx.Commit(); err != nil { return fmt.Errorf("error committing transaction: %w", err) } log.Println("Daily seeder stats updated successfully") return nil } // Perform the first update immediately if err := performUpdate(); err != nil { log.Printf("Error in initial performUpdate: %v", err) } // Set up a ticker to run once a day ticker := time.NewTicker(24 * time.Hour) for range ticker.C { if err := performUpdate(); err != nil { log.Printf("Error in performUpdate: %v", err) } } } func handleTorrentStats(w http.ResponseWriter, r *http.Request) error { if r.Method != http.MethodGet { return fmt.Errorf("method not allowed: %s", r.Method) } rows, err := db.Query(` SELECT url, top_level_group_name, group_name, display_name, added_to_torrents_list, is_metadata, btih, magnet_link, torrent_size, num_files, data_size, aa_currently_seeding, obsolete, embargo, seeders, leechers, completed, stats_scraped_at, partially_broken FROM torrents `) if err != nil { return fmt.Errorf("error querying database: %w", err) } defer rows.Close() var torrents []Torrent for rows.Next() { var t Torrent if err := rows.Scan( &t.URL, &t.TopLevelGroupName, &t.GroupName, &t.DisplayName, &t.AddedToTorrentsList, &t.IsMetadata, &t.BTIH, &t.MagnetLink, &t.TorrentSize, &t.NumFiles, &t.DataSize, &t.AACurrentlySeeding, &t.Obsolete, &t.Embargo, &t.Seeders, &t.Leechers, &t.Completed, &t.StatsScrapedAt, &t.PartiallyBroken, ); err != nil { return fmt.Errorf("error scanning row: %w", err) } torrents = append(torrents, t) } if err := rows.Err(); err != nil { return fmt.Errorf("error after iterating rows: %w", err) } w.Header().Set("Content-Type", "application/json") return json.NewEncoder(w).Encode(torrents) } func handleRoot(w http.ResponseWriter, r *http.Request) error { torrents, err := fetchTorrents("ORDER BY top_level_group_name, group_name, seeders ASC") if err != nil { return fmt.Errorf("error fetching torrents: %w", err) } groups := groupTorrents(torrents) return renderTemplate(w, "root", groups) } func handleFullList(w http.ResponseWriter, r *http.Request) error { groupName := strings.TrimPrefix(r.URL.Path, "/full/") torrents, err := fetchTorrents("WHERE group_name = ? ORDER BY seeders ASC", groupName) if err != nil { return fmt.Errorf("error fetching torrents: %w", err) } data := struct { GroupName string Torrents []Torrent }{ GroupName: groupName, Torrents: torrents, } return renderTemplate(w, "fullList", data) } func handleStats(w http.ResponseWriter, r *http.Request) error { btih := r.URL.Path[len("/stats/"):] torrents, err := fetchTorrents("WHERE btih = ?", btih) if err != nil { return fmt.Errorf("error fetching torrent stats: %w", err) } if len(torrents) == 0 { http.NotFound(w, r) return nil } // Fetch daily average seeder counts seederStats, err := fetchDailySeederStats(btih) if err != nil { return fmt.Errorf("error fetching seeder stats: %w", err) } // Add SeederStats to the Torrent struct torrents[0].SeederStats = seederStats return renderTemplate(w, "stats", torrents[0]) } type SeederStat struct { Date string `json:"date"` Seeders float64 `json:"seeders"` } func fetchDailySeederStats(btih string) ([]SeederStat, error) { query := ` SELECT DATE(stats_scraped_at) as date, AVG(seeders) as avg_seeders FROM torrent_updates WHERE btih = ? GROUP BY DATE(stats_scraped_at) ORDER BY date ASC ` rows, err := db.Query(query, btih) if err != nil { return nil, err } defer rows.Close() var stats []SeederStat for rows.Next() { var stat SeederStat err := rows.Scan(&stat.Date, &stat.Seeders) if err != nil { return nil, err } stats = append(stats, stat) } return stats, nil } func handleGenerateTorrentList(w http.ResponseWriter, r *http.Request) error { if r.Method != http.MethodPost { return fmt.Errorf("method not allowed: %s", r.Method) } maxTB, err := strconv.ParseFloat(r.FormValue("maxTB"), 64) if err != nil { return fmt.Errorf("invalid max TB value: %w", err) } listType := r.FormValue("listType") format := r.FormValue("format") torrents, err := generateTorrentList(maxTB, listType) if err != nil { return fmt.Errorf("error generating torrent list: %w", err) } switch format { case "xml": w.Header().Set("Content-Type", "application/xml") xmlResponse, err := convertToXML(torrents) if err != nil { return fmt.Errorf("error converting to XML: %w", err) } _, err = w.Write(xmlResponse) return err case "txt": w.Header().Set("Content-Type", "text/plain") txtResponse := convertToTXT(torrents) _, err = w.Write([]byte(txtResponse)) return err case "json": fallthrough default: w.Header().Set("Content-Type", "application/json") return json.NewEncoder(w).Encode(torrents) } } func fetchTorrents(query string, args ...interface{}) ([]Torrent, error) { rows, err := db.Query("SELECT * FROM torrents "+query, args...) if err != nil { return nil, fmt.Errorf("error querying torrents: %w", err) } defer rows.Close() var torrents []Torrent for rows.Next() { var t Torrent var addedDateStr, statsScrapedAtStr sql.NullString err := rows.Scan( &t.BTIH, &t.URL, &t.TopLevelGroupName, &t.GroupName, &t.DisplayName, &addedDateStr, &t.IsMetadata, &t.MagnetLink, &t.TorrentSize, &t.NumFiles, &t.DataSize, &t.AACurrentlySeeding, &t.Obsolete, &t.Embargo, &t.Seeders, &t.Leechers, &t.Completed, &statsScrapedAtStr, &t.PartiallyBroken) if err != nil { return nil, fmt.Errorf("error scanning torrent: %w", err) } // Handle NULL values if addedDateStr.Valid { t.AddedToTorrentsList = addedDateStr.String } else { t.AddedToTorrentsList = "" } if statsScrapedAtStr.Valid { t.StatsScrapedAt = statsScrapedAtStr.String } else { t.StatsScrapedAt = "" } formatTorrent(&t) torrents = append(torrents, t) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("error iterating rows: %w", err) } return torrents, nil } func formatTorrent(t *Torrent) { t.FormattedSeeders = formatNumber(t.Seeders) t.FormattedLeechers = formatNumber(t.Leechers) t.FormattedDataSize = formatDataSize(t.DataSize) t.FormattedAddedDate = formatDate(t.AddedToTorrentsList) t.MetadataLabel = "data" if t.IsMetadata { t.MetadataLabel = "metadata" } t.StatusLabel = formatStatus(t.AACurrentlySeeding, t.Seeders, t.Leechers, t.StatsScrapedAt) } func groupTorrents(torrents []Torrent) []GroupData { groups := make(map[string]map[string][]Torrent) for _, t := range torrents { if groups[t.TopLevelGroupName] == nil { groups[t.TopLevelGroupName] = make(map[string][]Torrent) } groups[t.TopLevelGroupName][t.GroupName] = append(groups[t.TopLevelGroupName][t.GroupName], t) } var groupData []GroupData for topLevelGroup, groupMap := range groups { for group, torrents := range groupMap { limitedTorrents := torrents if len(torrents) > 20 { limitedTorrents = torrents[:20] } groupData = append(groupData, GroupData{ TopLevelGroupName: topLevelGroup, GroupName: group, Torrents: limitedTorrents, TotalCount: len(torrents), }) } } return groupData } func renderTemplate(w http.ResponseWriter, name string, data interface{}) error { funcMap := template.FuncMap{ "urlsafe": func(s string) template.URL { return template.URL(s) }, "json": func(v interface{}) template.JS { b, err := json.Marshal(v) if err != nil { return template.JS("null") } return template.JS(b) }, } tmpl := template.Must(template.New(name).Funcs(funcMap).Parse(getTemplateContent(name))) return tmpl.Execute(w, data) } func getTemplateContent(name string) string { header := ` Anna's Archive Mirror
` footer := `
` templates := map[string]string{ "root": header + `

Anna's Archive Mirror

This is a mirror of the Anna's Archive torrent page. We strive to keep everything as up-to-date as possible, however, we are not affiliated with Anna's Archive and this list is maintained privately.

This torrent list is the “ultimate unified list” of releases by Anna’s Archive, Library Genesis, Sci-Hub, and others. By seeding these torrents, you help preserve humanity’s knowledge and culture. These torrents represent the vast majority of human knowledge that can be mirrored in bulk.

These torrents are not meant for downloading individual books. They are meant for long-term preservation. With these torrents you can set up a full mirror of Anna’s Archive, using their source code and metadata (which can be generated or downloaded as ElasticSearch and MariaDB databases). We scrape the full torrent list from Anna's Archive every 24 hours. We also have full lists of torrents, as JSON, available here.

{{range .}}

{{.TopLevelGroupName}}

{{.GroupName}}

{{range .Torrents}} {{end}}
Torrent Name Date Added Data Size Num Files Type Status Magnet Link Torrent Link
{{.DisplayName}} {{.FormattedAddedDate}} {{.FormattedDataSize}} {{.NumFiles}} {{.MetadataLabel}} {{.StatusLabel}} Magnet Torrent
{{if gt .TotalCount 20}} View full list {{end}} {{end}} ` + footer, "fullList": header + `

{{.GroupName}} - Full List

{{range .Torrents}} {{end}}
Torrent Name Date Added Data Size Num Files Type Status Magnet Link Torrent Link
{{.DisplayName}} {{.FormattedAddedDate}} {{.FormattedDataSize}} {{.NumFiles}} {{.MetadataLabel}} {{.StatusLabel}} Magnet Torrent
Back to main page ` + footer, "stats": header + `

Torrent Stats: {{.DisplayName}}

Property Value
Added To Torrents List{{.AddedToTorrentsList}}
Seeders{{.FormattedSeeders}}
Leechers{{.FormattedLeechers}}
Completed{{.Completed}}
Size{{.FormattedDataSize}}
Magnet LinkMagnet
Last Update{{.StatsScrapedAt}}

Seeder History

Back to main page ` + footer, } return templates[name] } func formatDate(dateStr string) string { if len(dateStr) >= 10 { return dateStr[:10] } return dateStr } func formatTimeSince(dateStr string) string { // First try parsing the extended format with fractional seconds and time zone offset layoutWithTimezone := "2006-01-02T15:04:05.999999999Z07:00" t, err := time.Parse(layoutWithTimezone, dateStr) if err != nil { // If it fails, fall back to the standard format without timezone offset layoutWithoutTimezone := "2006-01-02T15:04:05Z" t, err = time.Parse(layoutWithoutTimezone, dateStr) if err != nil { // If both parsings fail, return the original string return dateStr } } now := time.Now() diff := now.Sub(t) var unit string var value int64 switch { case diff < time.Minute: unit = "s" value = int64(diff.Seconds()) case diff < time.Hour: unit = "m" value = int64(diff.Minutes()) case diff < 24*time.Hour: unit = "h" value = int64(diff.Hours()) default: unit = "d" value = int64(diff.Hours() / 24) } return fmt.Sprintf("%d%s", value, unit) } func formatDataSize(size int64) string { sizeMB := float64(size) / (1024 * 1024) if sizeMB > 1024 { sizeGB := sizeMB / 1024 if sizeGB > 1024 { return fmt.Sprintf("%.2f TB", sizeGB/1024) } return fmt.Sprintf("%.2f GB", sizeGB) } return fmt.Sprintf("%.2f MB", sizeMB) } func formatStatus(currentlySeeding bool, seeders, leechers int, scrapedAtStr string) template.HTML { status := "⚫" switch { case seeders < 2: status = "🔴" case seeders > 10: status = "🟢" default: status = "🟡" } archived := "" if currentlySeeding { archived = "(📓)" } timeSince := formatTimeSince(scrapedAtStr) return template.HTML(fmt.Sprintf("%s %d seed / %d leech %s %s", status, seeders, leechers, archived, timeSince)) } func formatNumber(n int) string { if n < 1000 { return fmt.Sprintf("%d", n) } return fmt.Sprintf("%dK", n/1000) } func generateTorrentList(maxTB float64, listType string) ([]map[string]interface{}, error) { maxBytes := int64(maxTB * 1024 * 1024 * 1024 * 1024) // Convert TB to bytes var totalBytes int64 var result []map[string]interface{} rows, err := db.Query(` SELECT btih, magnet_link, url, data_size, seeders FROM torrents WHERE obsolete = 0 AND embargo = 0 AND seeders > 0 ORDER BY seeders ASC, data_size DESC `) if err != nil { return nil, err } defer rows.Close() for rows.Next() { var btih, magnetLink, url string var dataSize, seeders int64 err := rows.Scan(&btih, &magnetLink, &url, &dataSize, &seeders) if err != nil { return nil, err } if totalBytes+dataSize > maxBytes { break } torrent := map[string]interface{}{ "btih": btih, "data_size": dataSize, "seeders": seeders, } switch listType { case "magnet": torrent["magnet_link"] = magnetLink case "torrent": torrent["torrent_url"] = url case "both": torrent["magnet_link"] = magnetLink torrent["torrent_url"] = url } result = append(result, torrent) totalBytes += dataSize } return result, nil } // convertToXML converts the torrent list to XML format func convertToXML(torrents []map[string]interface{}) ([]byte, error) { type Torrent struct { BTIH string `xml:"btih"` DataSize int64 `xml:"data_size"` Seeders int64 `xml:"seeders"` MagnetLink string `xml:"magnet_link,omitempty"` TorrentURL string `xml:"torrent_url,omitempty"` } type Torrents struct { XMLName xml.Name `xml:"torrents"` List []Torrent `xml:"torrent"` } var xmlTorrents []Torrent for _, t := range torrents { torrent := Torrent{ BTIH: getStringValue(t, "btih"), DataSize: getInt64Value(t, "data_size"), Seeders: getInt64Value(t, "seeders"), MagnetLink: getStringValue(t, "magnet_link"), TorrentURL: getStringValue(t, "torrent_url"), } xmlTorrents = append(xmlTorrents, torrent) } output, err := xml.MarshalIndent(Torrents{List: xmlTorrents}, "", " ") if err != nil { return nil, err } return append([]byte(xml.Header), output...), nil } // Helper function to safely get a string value from the map func getStringValue(m map[string]interface{}, key string) string { if value, ok := m[key].(string); ok { return value } return "" } // Helper function to safely get an int64 value from the map func getInt64Value(m map[string]interface{}, key string) int64 { if value, ok := m[key].(int64); ok { return value } return 0 } // convertToTXT converts the torrent list to plain text format func convertToTXT(torrents []map[string]interface{}) string { var buffer bytes.Buffer for _, t := range torrents { if magnetLink, ok := t["magnet_link"]; ok { buffer.WriteString(fmt.Sprintf("%s\n", magnetLink.(string))) } if torrentURL, ok := t["torrent_url"]; ok { buffer.WriteString(fmt.Sprintf("%s\n", torrentURL.(string))) } buffer.WriteString("\n") } return buffer.String() }