Added all

This commit is contained in:
Guillem Hernandez Sola
2026-04-07 19:37:59 +02:00
commit da6dabcc62
42 changed files with 1959 additions and 0 deletions

36
media/audio-extractor.sh Executable file
View File

@@ -0,0 +1,36 @@
#!/bin/bash
# Check if ffmpeg is installed
if ! command -v ffmpeg &> /dev/null
then
echo "ffmpeg could not be found. Please install ffmpeg first."
exit 1
fi
# Check for correct number of arguments
if [ "$#" -ne 2 ]; then
echo "Usage: $0 <input_mp4_file> <output_mp3_file>"
exit 1
fi
# Input and output files
INPUT_FILE="$1"
OUTPUT_FILE="$2"
# Check if the input file exists
if [ ! -f "$INPUT_FILE" ]; then
echo "Input file '$INPUT_FILE' does not exist."
exit 1
fi
# Extract MP3 from MP4
ffmpeg -i "$INPUT_FILE" -q:a 0 -map a "$OUTPUT_FILE"
# Check if the operation was successful
if [ $? -eq 0 ]; then
echo "MP3 file successfully created: $OUTPUT_FILE"
else
echo "Failed to extract MP3 from $INPUT_FILE."
exit 1
fi

45
media/convert_mp4_to_webm.sh Executable file
View File

@@ -0,0 +1,45 @@
#!/bin/bash
# Function to convert MP4 to WebM
convert_mp4_to_webm() {
input_file="$1"
output_file="$2"
# Check if input file exists
if [ ! -f "$input_file" ]; then
echo "Error: File '$input_file' does not exist."
exit 1
fi
# Check if the input file is an MP4
if [[ "$input_file" != *.mp4 ]]; then
echo "Error: Input file must be an MP4 file."
exit 1
fi
# Set output file name if not provided
if [ -z "$output_file" ]; then
output_file="${input_file%.mp4}.webm"
fi
# Convert the MP4 to WebM using ffmpeg
ffmpeg -i "$input_file" -c:v libvpx -c:a libvorbis "$output_file"
# Check if conversion was successful
if [ $? -eq 0 ]; then
echo "Successfully converted to '$output_file'."
else
echo "Error: Conversion failed."
exit 1
fi
}
# Check for input arguments
if [ $# -lt 1 ]; then
echo "Usage: $0 <input_file> [output_file]"
exit 1
fi
# Call the function with arguments
convert_mp4_to_webm "$1" "$2"

12
media/copy_cbr_files.sh Normal file
View File

@@ -0,0 +1,12 @@
#!/bin/bash
# Destination directory for .cbr files
DEST_DIR="cbr_files"
# Create the destination directory if it doesn't exist
mkdir -p "$DEST_DIR"
# Find all .cbr files recursively in volum_* directories and copy them to cbr_files
find volum_* -type f -name "*.cbr" -exec cp {} "$DEST_DIR" \;
echo "All .cbr files have been copied to $DEST_DIR."

View File

@@ -0,0 +1,48 @@
#!/bin/bash
# Script to download images from an Instagram post
# Requires: curl, jq
# Function to display usage
function usage() {
echo "Usage: $0 <instagram_post_url>"
exit 1
}
# Check if URL is provided
if [ $# -ne 1 ]; then
usage
fi
# Instagram post URL
POST_URL=$1
# Fetch the HTML of the Instagram post
echo "Fetching post data..."
HTML=$(curl -s -L "$POST_URL")
# Extract image URLs from the HTML
echo "Extracting image URLs..."
IMAGE_URLS=$(echo "$HTML" | sed -n 's/.*"display_url":"\([^"]*\)".*/\1/p')
# Check if any image URLs were found
if [ -z "$IMAGE_URLS" ]; then
echo "Failed to extract image URLs. Make sure the URL is valid and public."
exit 1
fi
# Create a directory to save images
SAVE_DIR="instagram_images"
mkdir -p "$SAVE_DIR"
# Download each image
echo "Downloading images..."
COUNT=1
for URL in $IMAGE_URLS; do
FILE_NAME="$SAVE_DIR/image_$COUNT.jpg"
curl -s -o "$FILE_NAME" "$URL"
echo "Downloaded: $FILE_NAME"
((COUNT++))
done
echo "All images downloaded to $SAVE_DIR."

View File

@@ -0,0 +1,22 @@
# export_instagram_cookies.py
import browser_cookie3
cj = browser_cookie3.chrome(domain_name='instagram.com')
with open('cookies.txt', 'w') as f:
f.write("# Netscape HTTP Cookie File\n")
f.write("# This file was generated by browser-cookie3\n")
f.write("# https://curl.se/docs/http-cookies.html\n\n")
for cookie in cj:
# domain, flag, path, secure, expiration, name, value
f.write(
f"{cookie.domain}\t"
f"{'TRUE' if cookie.domain.startswith('.') else 'FALSE'}\t"
f"{cookie.path}\t"
f"{'TRUE' if cookie.secure else 'FALSE'}\t"
f"{int(cookie.expires) if cookie.expires else 0}\t"
f"{cookie.name}\t"
f"{cookie.value}\n"
)
print("cookies.txt exported!")
# This script exports Instagram cookies from Chrome to a cookies.txt file.

View File

@@ -0,0 +1,57 @@
#!/bin/bash
# Directory to save downloaded images
OUTPUT_DIR="./downloads"
mkdir -p "$OUTPUT_DIR"
# Instagram Full Image Downloader
# Usage: ./image-instagram-downloader.sh <instagram_image_url>
if [ $# -ne 1 ]; then
echo "Usage: $0 <instagram_image_url>"
exit 1
fi
URL="$1"
# Fetch HTML content of the Instagram post
HTML_DATA=$(curl -s "$URL")
if [ -z "$HTML_DATA" ]; then
echo "Could not fetch HTML data."
exit 2
fi
# Extract JSON data containing the full-resolution image URL
JSON_DATA=$(echo "$HTML_DATA" | grep -oE '<script type="application/ld\+json">[^<]+' | sed 's/<script type="application\/ld+json">//')
if [ -z "$JSON_DATA" ]; then
echo "Could not find JSON data. Falling back to thumbnail."
# Extract thumbnail image URL
ENCODED_IMG_URL=$(echo "$HTML_DATA" | awk -F'<meta property="og:image" content="' '{print $2}' | awk -F'"' '{print $1}')
FULL_IMG_URL=$(echo "$ENCODED_IMG_URL" | sed 's/&amp;/\&/g')
else
# Parse JSON data to extract full-resolution image URL
FULL_IMG_URL=$(echo "$JSON_DATA" | grep -oE '"url":"https:[^"]+' | sed 's/"url":"//')
fi
if [ -z "$FULL_IMG_URL" ]; then
echo "Could not find full image URL."
exit 3
fi
echo "Extracted Full Image URL: $FULL_IMG_URL"
# Download the image
FILENAME=$(basename "$FULL_IMG_URL" | cut -d'?' -f1)
curl -L -A "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" "$FULL_IMG_URL" -o "$OUTPUT_DIR/$FILENAME"
# Check if the file is a valid image
FILE_TYPE=$(file "$OUTPUT_DIR/$FILENAME" | grep -oE 'image|HTML')
if [[ "$FILE_TYPE" == "HTML" ]]; then
echo "Downloaded file is not an image. Please check the extracted URL."
exit 4
fi
echo "Downloaded: $OUTPUT_DIR/$FILENAME"

33
media/instagram-downloader.sh Executable file
View File

@@ -0,0 +1,33 @@
#!/bin/bash
OUTPUT_DIR="./downloads"
mkdir -p "$OUTPUT_DIR"
if [ -z "$1" ]; then
echo "Usage: $0 <INSTAGRAM_REEL_URL>"
exit 1
fi
URL="$1"
if ! command -v yt-dlp &> /dev/null; then
echo "yt-dlp is required. Install it with: pip install yt-dlp"
exit 2
fi
if ! command -v python3 &> /dev/null; then
echo "Python3 is required. Please install it."
exit 3
fi
# Export cookies.txt using Python script
python3 export_instagram_cookies.py
if [ ! -f cookies.txt ]; then
echo "Failed to export cookies.txt. Aborting."
exit 4
fi
# Use cookies.txt to handle private reels
yt-dlp --cookies cookies.txt "$URL" -o "$OUTPUT_DIR/%(title)s.%(ext)s"
echo "Download completed. Files are saved in $OUTPUT_DIR"
#rm cookies.txt

61
media/spotify-rss.py Normal file
View File

@@ -0,0 +1,61 @@
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
import requests
import urllib.parse
# --- Configuration ---
# Replace these with your actual Spotify Developer credentials
SPOTIPY_CLIENT_ID = '3756ae92386d45e9971aa03d2f4b1eca'
SPOTIPY_CLIENT_SECRET = '6c8ea8c409d944cc895571f3f49985df'
# The ID from your link: https://open.spotify.com/show/13Gs651PIKKI7zRF0p3PcJ
SHOW_ID = '13Gs651PIKKI7zRF0p3PcJ'
def find_real_rss_feed():
# Step 1: Authenticate with Spotify
auth_manager = SpotifyClientCredentials(
client_id=SPOTIPY_CLIENT_ID,
client_secret=SPOTIPY_CLIENT_SECRET
)
sp = spotipy.Spotify(auth_manager=auth_manager)
# Step 2: Get the Show Name from Spotify
print(f"Fetching details for Spotify Show ID: {SHOW_ID}...")
try:
show = sp.show(SHOW_ID)
show_name = show['name']
publisher = show['publisher']
print(f"Found Show: '{show_name}' by {publisher}")
except Exception as e:
print(f"Error fetching from Spotify: {e}")
return
# Step 3: Search the iTunes API for that Show Name
print("\nSearching public directories (iTunes) for the real RSS feed...")
# We encode the name so it can be safely used in a URL (e.g., spaces become %20)
encoded_name = urllib.parse.quote(show_name)
itunes_api_url = f"https://itunes.apple.com/search?term={encoded_name}&entity=podcast&limit=3"
try:
response = requests.get(itunes_api_url)
data = response.json()
if data['resultCount'] > 0:
# Grab the first result's RSS feed URL
# We check the first few results to ensure the author matches, but usually the first is correct
real_rss_url = data['results'][0].get('feedUrl')
print("\n✅ SUCCESS! Found the public RSS feed:")
print("-" * 50)
print(real_rss_url)
print("-" * 50)
print("You can copy and paste this URL directly into Apple Podcasts, Pocket Casts, or any standard RSS reader. It contains all the real .mp3 files!")
else:
print("\n❌ Could not find this show in public directories.")
print("This usually means the podcast is a 'Spotify Exclusive' and does not have a public RSS feed.")
except Exception as e:
print(f"Error searching iTunes: {e}")
if __name__ == '__main__':
find_real_rss_feed()

33
media/url-video-downloader.sh Executable file
View File

@@ -0,0 +1,33 @@
#!/bin/bash
# Script to download videos from URLs shared on Bluesky
# Requires yt-dlp and ffmpeg to be installed
# Directory to save downloaded videos
OUTPUT_DIR="./downloads"
mkdir -p "$OUTPUT_DIR"
# Check if yt-dlp is installed
if ! command -v yt-dlp &>/dev/null; then
echo "yt-dlp is not installed. Please install it and try again."
exit 1
fi
# Check if the URL is passed as an argument
if [[ -z "$1" ]]; then
echo "No URL provided as an argument. Usage: ./script.sh <video_url>"
exit 1
fi
VIDEO_URL="$1"
# Download the video with a shortened filename (using video ID)
echo "Downloading video from $VIDEO_URL..."
yt-dlp -k -o "$OUTPUT_DIR/%(id)s.%(ext)s" "$VIDEO_URL"
if [[ $? -eq 0 ]]; then
echo "Video downloaded successfully to $OUTPUT_DIR."
else
echo "Failed to download the video. Please check the URL and try again."
exit 1
fi

34
media/video-downloader.sh Executable file
View File

@@ -0,0 +1,34 @@
#!/bin/bash
# Script to download videos from URLs shared on Bluesky
# Requires yt-dlp and ffmpeg to be installed
# Directory to save downloaded videos
OUTPUT_DIR="./downloads"
mkdir -p "$OUTPUT_DIR"
# Check if yt-dlp is installed
if ! command -v yt-dlp &>/dev/null; then
echo "yt-dlp is not installed. Please install it and try again."
exit 1
fi
# Prompt for the video URL
read -p "Enter the video URL: " VIDEO_URL
# Validate the URL
if [[ -z "$VIDEO_URL" ]]; then
echo "No URL entered. Exiting."
exit 1
fi
# Download the video
echo "Downloading video from $VIDEO_URL..."
yt-dlp -o "$OUTPUT_DIR/%(title)s.%(ext)s" "$VIDEO_URL"
if [[ $? -eq 0 ]]; then
echo "Video downloaded successfully to $OUTPUT_DIR."
else
echo "Failed to download the video. Please check the URL and try again."
exit 1
fi

44
media/webm-to-mp4-converter.sh Executable file
View File

@@ -0,0 +1,44 @@
#!/bin/bash
# Script to convert a .webm file to .mp4 using ffmpeg
# Check if ffmpeg is installed
if ! command -v ffmpeg &> /dev/null
then
echo "ffmpeg could not be found. Please install ffmpeg first."
exit 1
fi
# Check if input file is provided
if [ "$#" -lt 1 ]; then
echo "Usage: $0 input_file.webm [output_file.mp4]"
exit 1
fi
# Input file
input_file="$1"
# Check if the input file exists
if [ ! -f "$input_file" ]; then
echo "Input file '$input_file' not found."
exit 1
fi
# Set output file name
if [ "$#" -ge 2 ]; then
output_file="$2"
else
output_file="${input_file%.*}.mp4"
fi
# Convert .webm to .mp4 using ffmpeg
ffmpeg -i "$input_file" -c:v libx264 -preset slow -crf 23 -c:a aac "$output_file"
# Check if conversion was successful
if [ $? -eq 0 ]; then
echo "Conversion successful. Output file: $output_file"
else
echo "Conversion failed."
exit 1
fi

13
media/zip_cbr_files.sh Executable file
View File

@@ -0,0 +1,13 @@
#!/bin/bash
# Loop over a range of episode numbers
for i in {1..27}; do
# Format the episode number with leading zeros if needed
episode_number=$(printf $i)
echo $episode_number
zip capitol_${episode_number}.cbr volume_1_episode_${episode_number}_*.jpg
echo "Created capitol_${episode_number}.cbr"
done
echo "All .cbr files have been created."