This commit is contained in:
2025-10-01 16:06:53 +02:00
commit 93c6b1c261
12 changed files with 874 additions and 0 deletions

13
.gitignore vendored Normal file
View File

@@ -0,0 +1,13 @@
# Folders
.venv
utils/__pycache__
0-Queue/*
0-Source/*
1-Temp/*
2-Out/*
# Files
*.wmv
*.mkv
*.ass
*.vtt

76
Readme.md Normal file
View File

@@ -0,0 +1,76 @@
## Upscale
Automatically upscale and interpolate video files.
### Usage
0. Preparing video file (MKVToolNix)
- Make sure video file does NOT have an image attachment
- Video file should have ONE `ass` subtitle file
- Name video file similar to the following: `Your Title - 01.mkv`
1. Place `.mkv` file inside `0-Source` folder
2. Execute `python Start.py`
### Configuration
The `Start.py` file has two options INSIDE, edit in text editor:
- `INTERPOLATE_4K`: `True` or `False` (wether or not 4k48fps should be outputted)
- `MAX_INPUT_WIDTH`: `'720'` (downscales the video to this width, as 1080p upscaling would be insane)
### Requirements
- **OS**: Windows or Linux
- **GPU**: NVIDIA GPU with 16GB of VRAM
- **RAM**: Min: 16GB, Recommended: 32GB
- **Python**: >=3.12 (Windows) or Python 3.13 (Linux)
#### Additional Requirements for 4k48fps
- **GPU**: NVIDIA GPU with 24GB of VRAM
- **RAM** (Linux): 48GB (there is a memory leak in VapourSynth)
### Installation (Windows)
1. Download and install [Python 3.12](https://www.python.org/ftp/python/3.12.8/python-3.12.8-amd64.exe) - Check option to add to path
2. Download and install [Imagemagick](https://imagemagick.org/script/download.php#windows) - Check option to install legacy tools (Required for creating player previews `thumbs.vtt` and `sprite.jpg`)
3. Download and install [MKVToolNix](https://mkvtoolnix.download/downloads.html#windows) - **Add to system path environment manually**
4. Download [ffmpeg](https://www.gyan.dev/ffmpeg/builds/#release-builds) - Either place it inside the MKVToolNix install directory or create a new directory somewhere and **add it to system path environment**
5. Download and install [VapourSynth R72](https://github.com/vapoursynth/vapoursynth/releases) - Check options to install vsrepo, avisynth support not required
6. Install `vsrepo` packages, in Terminal (cmd, NOT Powershell!):
```cmd
:: FFmpeg Source Plugin
vsrepo.py install ffms2
:: (optional) Miscfilters Plugin, used for scenedetection in vsrife
:: Unsure if it even works
vsrepo.py install misc
```
7. Install `pip` packages
```cmd
pip install packaging setuptools wheel
pip install pymediainfo python-dateutil
pip install torch torchvision --index-url https://download.pytorch.org/whl/cu126
pip install vsrealesrgan vsrife
```
8. Install models used by `vsrealesrgan` and `vsrife`
```ps
python -m vsrealesrgan
python -m vsrife
```
### Installation (Arch Linux)
Sadly there is some sort of memory leak in vsrife or vapoursynth under Linux.
If you have 32GB of RAM it should suffice for everything EXCEPT 4k48p, there you probably need 48GB or 64GB of RAM.
```bash
# System Packages
sudo pacman -S python vapoursynth ffms2 ffmpeg mkvtoolnix-cli mediainfo imagemagick
# Setup virtual environment (in upscale dirctory)
python -m venv .venv
source .venv/bin/activate
# Install pip dependencies
pip install typing-extensions packaging setuptools wheel
pip install pymediainfo python-dateutil
pip install torch torchvision --index-url https://download.pytorch.org/whl/cu126
pip install vsrealesrgan vsrife
# Install models
python -m vsrealesrgan
python -m vsrife
```

3
Start.bat Normal file
View File

@@ -0,0 +1,3 @@
python Start.py
pause

50
Start.py Normal file
View File

@@ -0,0 +1,50 @@
import os
import re
from utils.encodeCDN import EncodeCDN
from utils.encodeDDL import EncodeDDL
from utils.interpolate import Interpolate
from utils.interpolate4k import Interpolate4K
from utils.upcale import upscale
from utils.mediainfo import get_aspect_ratio
INTERPOLATE_4K = False
MAX_INPUT_WIDTH = '720'
def create_folder(folder_path):
if not os.path.exists(folder_path):
os.makedirs(folder_path)
for filename in os.listdir('0-Source'):
input_file = os.path.join('0-Source', filename)
if not os.path.isfile(input_file):
continue
# Parse File Name
temp_name = re.sub(r'\[.*?\]|\(.*?\)', "", filename).rsplit('.', 1)[0].strip()
folder_name = re.sub(r'[^A-Za-z ]+', '', temp_name).strip()
episode_number = re.findall(r'\d+', temp_name)[-1]
cdn_folder_name = folder_name.replace(" ", ".")
cdn_folder = os.path.join('2-Out', folder_name, cdn_folder_name, 'E' + episode_number)
muxed_folder = os.path.join('2-Out', folder_name, 'Muxed')
upscale_output_folder = os.path.join('2-Out', folder_name, folder_name + ' [2160p]')
upscale_output = os.path.join(upscale_output_folder, temp_name + ' [4k][HEVC].mkv')
interpolate_output = os.path.join(upscale_output_folder, temp_name + ' [1080p][48fps][HEVC].mkv')
interpolate_4k_output = os.path.join(upscale_output_folder, temp_name + ' [2160p][48fps][HEVC].mkv')
print('Parsed Name: ' + temp_name)
create_folder(cdn_folder)
create_folder(muxed_folder)
create_folder(upscale_output_folder)
aspect_ratio = get_aspect_ratio(input_file)
upscale(input_file, upscale_output, MAX_INPUT_WIDTH, aspect_ratio)
Interpolate(interpolate_output, upscale_output, temp_name, aspect_ratio)
Interpolate4K(interpolate_4k_output, upscale_output, INTERPOLATE_4K, temp_name)
EncodeDDL(input_file, cdn_folder, folder_name, temp_name, upscale_output, aspect_ratio, interpolate_output, INTERPOLATE_4K, interpolate_4k_output)
EncodeCDN(input_file, cdn_folder, aspect_ratio, upscale_output, interpolate_output, INTERPOLATE_4K, interpolate_4k_output)

320
makesprites.py Normal file
View File

@@ -0,0 +1,320 @@
#!/usr/bin/python3
import subprocess
import shlex
import sys
import logging
import os
import datetime
import math
import glob
from dateutil import relativedelta
##################################
# Generate tooltip thumbnail images & corresponding WebVTT file for a video (e.g MP4).
# Final product is one *_sprite.jpg file and one *_thumbs.vtt file.
#
# DEPENDENCIES: required: ffmpeg & imagemagick
# optional: sips (comes with MacOSX) - yields slightly smaller sprites
# download ImageMagick: http://www.imagemagick.org/script/index.php OR http://www.imagemagick.org/script/binary-releases.php (on MacOSX: "sudo port install ImageMagick")
# download ffmpeg: http://www.ffmpeg.org/download.html
# jwplayer reference: http://www.longtailvideo.com/support/jw-player/31778/adding-tooltip-thumbnails/
#
# TESTING NOTES: Tested putting time gaps between thumbnail segments, but had no visual effect in JWplayer, so omitted.
# Tested using an offset so that thumbnail would show what would display mid-way through clip rather than for the 1st second of the clip, but was not an improvement.
##################################
# TODO determine optimal number of images/segment distance based on length of video? (so longer videos don't have huge sprites)
USE_SIPS = False # True to use sips if using MacOSX (creates slightly smaller sprites), else set to False to use ImageMagick
THUMB_RATE_SECONDS = 5 # every Nth second take a snapshot
THUMB_WIDTH = 320 # 100-150 is width recommended by JWPlayer; I like smaller files
SKIP_FIRST = True # True to skip a thumbnail of second 1; often not a useful image, plus JWPlayer doesn't seem to show it anyway, and user knows beginning without needing preview
SPRITE_NAME = "sprite.jpg" # jpg is much smaller than png, so using jpg
VTTFILE_NAME = "thumbs.vtt"
THUMB_OUTDIR = "thumbs"
USE_UNIQUE_OUTDIR = False # true to make a unique timestamped output dir each time, else False to overwrite/replace existing outdir
TIMESYNC_ADJUST = 1 # set to 1 to not adjust time (gets multiplied by thumbRate); On my machine,ffmpeg snapshots show earlier images than expected timestamp by about 1/2 the thumbRate (for one vid, 10s thumbrate->images were 6s earlier than expected;45->22s early,90->44 sec early)
logger = logging.getLogger(sys.argv[0])
logSetup = False
class SpriteTask():
"""small wrapper class as convenience accessor for external scripts"""
def __init__(self, videofile):
self.remotefile = videofile.startswith("http")
if not self.remotefile and not os.path.exists(videofile):
sys.exit("File does not exist: %s" % videofile)
basefile = os.path.basename(videofile)
basefile_nospeed = removespeed(basefile) # strip trailing speed suffix from file/dir names, if present
newoutdir = makeOutDir(basefile_nospeed)
fileprefix, ext = os.path.splitext(basefile_nospeed)
spritefile = SPRITE_NAME
vttfile = VTTFILE_NAME
self.videofile = videofile
self.vttfile = vttfile
self.spritefile = spritefile
self.outdir = newoutdir
def getVideoFile(self):
return self.videofile
def getOutdir(self):
return self.outdir
def getSpriteFile(self):
return self.spritefile
def getVTTFile(self):
return self.vttfile
def makeOutDir(videofile):
"""create unique output dir based on video file name and current timestamp"""
base, ext = os.path.splitext(videofile)
script = sys.argv[0]
basepath = os.path.dirname(
os.path.abspath(script)) # make output dir always relative to this script regardless of shell directory
if len(THUMB_OUTDIR) > 0 and THUMB_OUTDIR[0] == '/':
outputdir = THUMB_OUTDIR
else:
outputdir = os.path.join(basepath, THUMB_OUTDIR)
if USE_UNIQUE_OUTDIR:
newoutdir = "%s.%s" % (os.path.join(outputdir, base), datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
else:
newoutdir = "%s_%s" % (os.path.join(outputdir, base), "vtt")
if not os.path.exists(newoutdir):
logger.info("Making dir: %s" % newoutdir)
os.makedirs(newoutdir)
elif os.path.exists(newoutdir) and not USE_UNIQUE_OUTDIR:
# remove previous contents if reusing outdir
files = os.listdir(newoutdir)
print("Removing previous contents of output directory: %s" % newoutdir)
for f in files:
os.unlink(os.path.join(newoutdir, f))
return newoutdir
def doCmd(cmd, logger=logger): # execute a shell command and return/print its output
logger.info("START [%s] : %s " % (datetime.datetime.now(), cmd))
args = shlex.split(cmd) # tokenize args
output = None
try:
output = subprocess.check_output(args, stderr=subprocess.STDOUT) # pipe stderr into stdout
except Exception as e:
ret = "ERROR [%s] An exception occurred\n%s\n%s" % (datetime.datetime.now(), output, str(e))
logger.error(ret)
raise e # todo ?
ret = "END [%s]\n%s" % (datetime.datetime.now(), output)
logger.info(ret)
sys.stdout.flush()
return output
def takesnaps(videofile, newoutdir, thumbRate=None):
"""
take snapshot image of video every Nth second and output to sequence file names and custom directory
reference: https://trac.ffmpeg.org/wiki/Create%20a%20thumbnail%20image%20every%20X%20seconds%20of%20the%20video
"""
if not thumbRate:
thumbRate = THUMB_RATE_SECONDS
rate = "1/%d" % int(thumbRate) # 1/60=1 per minute, 1/120=1 every 2 minutes
cmd = "ffmpeg -i %s -f image2 -bt 20M -vf fps=%s -aspect 16:9 %s/tv%%03d.jpg" % (
shlex.quote(videofile), rate, shlex.quote(newoutdir))
doCmd(cmd)
if SKIP_FIRST:
# remove the first image
logger.info("Removing first image, unneeded")
os.unlink("%s/tv001.jpg" % newoutdir)
count = len(os.listdir(newoutdir))
logger.info("%d thumbs written in %s" % (count, newoutdir))
# return the list of generated files
return count, get_thumb_images(newoutdir)
def get_thumb_images(newdir):
return glob.glob("%s/tv*.jpg" % newdir)
def resize(files):
"""change image output size to 100 width (originally matches size of video)
- pass a list of files as string rather than use '*' with sips command because
subprocess does not treat * as wildcard like shell does"""
if USE_SIPS:
# HERE IS MAC SPECIFIC PROGRAM THAT YIELDS SLIGHTLY SMALLER JPGs
doCmd("sips --resampleWidth %d %s" % (THUMB_WIDTH, " ".join(map(shlex.quote, files))))
else:
# THIS COMMAND WORKS FINE TOO AND COMES WITH IMAGEMAGICK, IF NOT USING A MAC
doCmd("mogrify -geometry %dx %s" % (THUMB_WIDTH, " ".join(map(shlex.quote, files))))
def get_geometry(file):
"""execute command to give geometry HxW+X+Y of each file matching command
identify -format "%g - %f\n" * #all files
identify -format "%g - %f\n" onefile.jpg #one file
SAMPLE OUTPUT
100x66+0+0 - _tv001.jpg
100x2772+0+0 - sprite2.jpg
4200x66+0+0 - sprite2h.jpg"""
geom = doCmd("""identify -format "%%g - %%f\n" %s""" % shlex.quote(file))
parts = geom.decode().split("-", 1)
return parts[0].strip() # return just the geometry prefix of the line, sans extra whitespace
def makevtt(spritefile, numsegments, coords, gridsize, writefile, thumbRate=None):
"""generate & write vtt file mapping video time to each image's coordinates
in our spritemap"""
# split geometry string into individual parts
##4200x66+0+0 === WxH+X+Y
if not thumbRate:
thumbRate = THUMB_RATE_SECONDS
wh, xy = coords.split("+", 1)
w, h = wh.split("x")
w = int(w)
h = int(h)
# x,y = xy.split("+")
# ======= SAMPLE WEBVTT FILE=====
# WEBVTT
#
# 00:00.000 --> 00:05.000
# /assets/thumbnails.jpg#xywh=0,0,160,90
#
# 00:05.000 --> 00:10.000
# /assets/preview2.jpg#xywh=160,0,320,90
#
# 00:10.000 --> 00:15.000
# /assets/preview3.jpg#xywh=0,90,160,180
#
# 00:15.000 --> 00:20.000
# /assets/preview4.jpg#xywh=160,90,320,180
# ==== END SAMPLE ========
basefile = os.path.basename(spritefile)
vtt = ["WEBVTT", ""] # line buffer for file contents
if SKIP_FIRST:
clipstart = thumbRate # offset time to skip the first image
else:
clipstart = 0
# NOTE - putting a time gap between thumbnail end & next start has no visual effect in JWPlayer, so not doing it.
clipend = int(clipstart) + int(thumbRate)
adjust = float(thumbRate) * TIMESYNC_ADJUST
for imgnum in range(1, numsegments + 1):
xywh = get_grid_coordinates(imgnum, gridsize, w, h)
start = get_time_str(clipstart, adjust=adjust)
end = get_time_str(clipend, adjust=adjust)
clipstart = int(clipend)
clipend = int(clipend) + int(thumbRate)
vtt.append("Img %d" % imgnum)
vtt.append("%s --> %s" % (start, end)) # 00:00.000 --> 00:05.000
vtt.append("%s#xywh=%s" % (basefile, xywh))
vtt.append("") # Linebreak
vtt = "\n".join(vtt)
# output to file
writevtt(writefile, vtt)
def get_time_str(numseconds, adjust=None):
""" convert time in seconds to VTT format time (HH:)MM:SS.ddd"""
if adjust: # offset the time by the adjust amount, if applicable
seconds = max(float(numseconds) + adjust, 0) # don't go below 0! can't have a negative timestamp
else:
seconds = numseconds
delta = relativedelta.relativedelta(seconds=seconds)
return "%02d:%02d:%02d.000" % (delta.hours, delta.minutes, delta.seconds)
def get_grid_coordinates(imgnum, gridsize, w, h):
""" given an image number in our sprite, map the coordinates to it in X,Y,W,H format"""
y = int((imgnum - 1) / gridsize)
x = int((imgnum - 1) - (y * gridsize))
imgx = x * w
imgy = y * h
return "%s,%s,%s,%s" % (imgx, imgy, w, h)
def makesprite(outdir, spritefile, coords, gridsize):
"""montage _tv*.jpg -tile 8x8 -geometry 100x66+0+0 montage.jpg #GRID of images
NOT USING: convert tv*.jpg -append sprite.jpg #SINGLE VERTICAL LINE of images
NOT USING: convert tv*.jpg +append sprite.jpg #SINGLE HORIZONTAL LINE of images
base the sprite size on the number of thumbs we need to make into a grid."""
grid = "%dx%d" % (gridsize, gridsize)
cmd = "montage %s/tv*.jpg -tile %s -geometry %s %s" % (shlex.quote(outdir), grid, coords, shlex.quote(
spritefile)) # if video had more than 144 thumbs, would need to be bigger grid, making it big to cover all our case
doCmd(cmd)
def writevtt(vttfile, contents):
""" output VTT file """
with open(vttfile, mode="w") as h:
h.write(contents)
logger.info("Wrote: %s" % vttfile)
def removespeed(videofile):
"""some of my files are suffixed with datarate, e.g. myfile_3200.mp4;
this trims the speed from the name since it's irrelevant to my sprite names (which apply regardless of speed);
you won't need this if it's not relevant to your filenames"""
videofile = videofile.strip()
speed = videofile.rfind("_")
speedlast = videofile.rfind(".")
maybespeed = videofile[speed + 1:speedlast]
try:
int(maybespeed)
videofile = videofile[:speed] + videofile[speedlast:]
except:
pass
return videofile
def run(task, thumbRate=None):
addLogging()
if not thumbRate:
thumbRate = THUMB_RATE_SECONDS
outdir = task.getOutdir()
spritefile = task.getSpriteFile()
# create snapshots
numfiles, thumbfiles = takesnaps(task.getVideoFile(), outdir, thumbRate=thumbRate)
# resize them to be mini
resize(thumbfiles)
# get coordinates from a resized file to use in spritemapping
gridsize = int(math.ceil(math.sqrt(numfiles)))
coords = get_geometry(thumbfiles[0]) # use the first file (since they are all same size) to get geometry settings
# convert small files into a single sprite grid
makesprite(outdir, spritefile, coords, gridsize)
# generate a vtt with coordinates to each image in sprite
makevtt(spritefile, numfiles, coords, gridsize, task.getVTTFile(), thumbRate=thumbRate)
def addLogging():
global logSetup
if not logSetup:
basescript = os.path.splitext(os.path.basename(sys.argv[0]))[0]
LOG_FILENAME = 'logs/%s.%s.log' % (basescript, datetime.datetime.now().strftime(
"%Y%m%d_%H%M%S")) # new log per job so we can run this program concurrently
# CONSOLE AND FILE LOGGING
print("Writing log to: %s" % LOG_FILENAME)
if not os.path.exists('logs'):
os.makedirs('logs')
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(LOG_FILENAME)
logger.addHandler(handler)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
logSetup = True # set flag so we don't reset log in same batch
if __name__ == "__main__":
if not len(sys.argv) > 1:
sys.exit("Please pass the full path or url to the video file for which to create thumbnails.")
if len(sys.argv) >= 3:
THUMB_OUTDIR = sys.argv[2]
if len(sys.argv) == 4:
THUMB_RATE_SECONDS = sys.argv[3]
videofile = sys.argv[1]
task = SpriteTask(videofile)
run(task)

183
utils/encodeCDN.py Normal file
View File

@@ -0,0 +1,183 @@
import os
import subprocess
import shutil
import platform
segment = ' -init_seg_name chunks/init-stream$RepresentationID$.m4s -media_seg_name chunks/chunk-stream$RepresentationID$-$Number%05d$.m4s'
if platform.system() == 'Linux':
segment = ' -init_seg_name chunks/init-stream\$RepresentationID\$.m4s -media_seg_name chunks/chunk-stream\$RepresentationID\$-\$Number%05d\$.m4s'
def changeM4SToHTML(mpdpath, chunkspath):
for filename in os.listdir(chunkspath):
infilename = os.path.join(chunkspath, filename)
if not os.path.isfile(infilename): continue
newname = infilename.replace('.m4s', '.webp')
_ = os.rename(infilename, newname)
# Modify manifest
with open(mpdpath, 'r') as file :
filedata = file.read()
# Replace the target string
filedata = filedata.replace('.m4s', '.webp')
# Write the file out again
with open(mpdpath, 'w') as file:
file.write(filedata)
def createFolder(cdnFolder, resolution):
if platform.system() == 'Linux':
if not os.path.exists(os.path.join(cdnFolder, resolution, 'chunks')):
os.makedirs(os.path.join(cdnFolder, resolution, 'chunks'))
else:
if not os.path.exists('chunks'):
os.makedirs('chunks')
if not os.path.exists(os.path.join(cdnFolder, resolution)):
os.makedirs(os.path.join(cdnFolder, resolution))
def create_sprites(cdn_folder):
"""
Creates video player sprites
"""
video_file = os.path.join(cdn_folder, 'x264.720p.mp4')
# Generating Sprites
if not os.path.exists(os.path.join(cdn_folder, 'thumbs.vtt')) and os.path.exists(video_file):
os.system(f'python makesprites.py "{video_file}"')
os.rename("thumbs.vtt", os.path.join(cdn_folder, 'thumbs.vtt'))
os.rename("sprite.jpg", os.path.join(cdn_folder, 'sprite.jpg'))
shutil.rmtree('thumbs')
shutil.rmtree('logs')
return
print('Skipped Sprites')
def encode_720p_fallback(cdn_folder, video_source, upscale_output, aspect_ratio):
"""
Fallback video stream for apple devices
"""
output = os.path.join(cdn_folder, 'x264.720p.mp4')
if os.path.exists(output):
print('Skipped 720p Encode')
return
command = (
f'ffmpeg -v quiet -stats -i "{upscale_output}" -i "{video_source}" '
'-map 0:v:0 -map 1:a:0 '
'-c:v libx264 -crf 22 -pix_fmt yuv420p '
f'-vf scale=1280:720,setsar=1:1 -aspect {aspect_ratio} '
'-c:a aac -b:a 128k '
f'-sn -map_metadata -1 -movflags +faststart "{output}"'
)
subprocess.call(command, shell=True)
def encode_720p(cdn_folder, video_source, upscale_output, aspect_ratio):
output = os.path.join(cdn_folder, '720', 'manifest.mpd')
if os.path.exists(output):
print('Skipped 720p DASH Encode')
return
print('Encoding 720p')
createFolder(cdn_folder, '720')
command = (
f'ffmpeg -v quiet -stats -i "{upscale_output}" -i "{video_source}" '
'-map 0:v:0 -map 1:a:0 '
'-c:v libx264 -crf 22 -preset medium -pix_fmt yuv420p -g 24 -keyint_min 24 -sc_threshold 0 -x264-params keyint=24:min-keyint=24:scenecut=0 '
f'-vf scale=1280:720,setsar=1:1 -aspect {aspect_ratio} '
f'{segment} "{output}"'
)
subprocess.call(command, shell=True)
if platform.system() != 'Linux':
shutil.move('chunks', os.path.join(cdn_folder, '720', 'chunks'))
changeM4SToHTML(output, os.path.join(cdn_folder, '720', 'chunks'))
def EncodeCDN(f, cdnFolder, inputAspect, upscaleOut, interpolateOut, INTERPOLATE_4K, interpolate_4k_output):
out1080mpd = os.path.join(cdnFolder, '1080', 'manifest.mpd')
out1080mpd48 = os.path.join(cdnFolder, '1080i', 'manifest.mpd')
out2160mpd = os.path.join(cdnFolder, '2160', 'manifest.mpd')
out2160mpd48 = os.path.join(cdnFolder, '2160i', 'manifest.mpd')
# 720p
encode_720p(cdnFolder, f, upscaleOut, inputAspect)
encode_720p_fallback(cdnFolder, f, upscaleOut, inputAspect)
create_sprites(cdnFolder)
# 1080p
if not os.path.exists(out1080mpd):
print('Encoding 1080p')
createFolder(cdnFolder, '1080')
subprocess.call('ffmpeg -v quiet -stats -i "' + upscaleOut + '" -i "' + f + '" -map 0:v:0 -map 1:a:0'
+ ' -c:v libsvtav1 -preset 6 -crf 26 -pix_fmt yuv420p -svtav1-params keyint=1s:tune=0 -vf scale=1920:1080,setsar=1:1 -aspect ' + inputAspect
+ ' -c:a aac -b:a 128k -ac 2 -sn -map_metadata -1 -seg_duration 10 -use_template 1 -use_timeline 1'
+ segment + ' "' + out1080mpd + '"', shell=True)
if platform.system() != 'Linux':
shutil.move('chunks', os.path.join(cdnFolder, '1080', 'chunks'))
changeM4SToHTML(out1080mpd, os.path.join(cdnFolder, '1080', 'chunks'))
else:
print('Skipped 1080p Encode')
# 1080p 48fps
if not os.path.exists(out1080mpd48):
print('Encoding 1080p 48fps')
createFolder(cdnFolder, '1080i')
# DASH av1 1080p
subprocess.call('ffmpeg -v quiet -stats -i "' + interpolateOut + '" -i "' + f + '" -map 0:v:0 -map 1:a:0'
+ ' -c:v libsvtav1 -preset 6 -crf 26 -pix_fmt yuv420p -svtav1-params keyint=1s:tune=0 -vf scale=1920:1080,setsar=1:1 -aspect ' + inputAspect
+ ' -c:a aac -b:a 128k -ac 2 -sn -map_metadata -1 -seg_duration 10 -use_template 1 -use_timeline 1'
+ segment + ' "' + out1080mpd48 + '"', shell=True)
if platform.system() != 'Linux':
shutil.move('chunks', os.path.join(cdnFolder, '1080i', 'chunks'))
changeM4SToHTML(out1080mpd48, os.path.join(cdnFolder, '1080i', 'chunks'))
else:
print('Skipped 1080p 48fps Encode')
# 2160p
if not os.path.exists(out2160mpd):
print('Encoding 2160p')
createFolder(cdnFolder, '2160')
subprocess.call('ffmpeg -v quiet -stats -i "' + upscaleOut + '" -i "' + f + '" -map 0:v:0 -map 1:a:0'
+ ' -c:v libsvtav1 -preset 6 -crf 26 -pix_fmt yuv420p -svtav1-params keyint=1s:tune=0 -vf "scale=\'min(3840,iw)\':-2,setsar=1:1" -aspect ' + inputAspect
+ ' -c:a aac -b:a 128k -ac 2 -sn -map_metadata -1 -seg_duration 10 -use_template 1 -use_timeline 1'
+ segment + ' "' + out2160mpd + '"', shell=True)
if platform.system() != 'Linux':
shutil.move('chunks', os.path.join(cdnFolder, '2160', 'chunks'))
changeM4SToHTML(out2160mpd, os.path.join(cdnFolder, '2160', 'chunks'))
else:
print('Skipped 2160p Encode')
# 2160p 48fps
if not os.path.exists(out2160mpd48) and INTERPOLATE_4K:
print('Encoding 2160p 48fps')
createFolder(cdnFolder, '2160i')
subprocess.call('ffmpeg -v quiet -stats -i "' + interpolate_4k_output + '" -i "' + f + '" -map 0:v:0 -map 1:a:0'
+ ' -c:v libsvtav1 -preset 6 -crf 26 -pix_fmt yuv420p -svtav1-params keyint=1s:tune=0 -vf "scale=\'min(3840,iw)\':-2,setsar=1:1" -aspect ' + inputAspect
+ ' -c:a aac -b:a 128k -ac 2 -sn -map_metadata -1 -seg_duration 10 -use_template 1 -use_timeline 1'
+ segment + ' "' + out2160mpd48 + '"', shell=True)
if platform.system() != 'Linux':
shutil.move('chunks', os.path.join(cdnFolder, '2160i', 'chunks'))
changeM4SToHTML(out2160mpd48, os.path.join(cdnFolder, '2160i', 'chunks'))
else:
print('Skipped 2160p 48fps Encode')

52
utils/encodeDDL.py Normal file
View File

@@ -0,0 +1,52 @@
import os
import subprocess
def extract_subs(video_source, subtitle_out, vtt_out):
if os.path.exists(subtitle_out):
print('Skipped Sub Extract')
return
print('Extracting Sub')
subprocess.call(f'ffmpeg -v quiet -stats -i "{video_source}" -c copy "{subtitle_out}"', shell=True)
subprocess.call(f'ffmpeg -v quiet -stats -i "{subtitle_out}" "{vtt_out}"', shell=True)
def encode_video(video_source, input_file, output_file, mux_file, temp_name, input_aspect, width, height):
if os.path.exists(output_file):
print(f'Skipped {height}p HEVC Encode')
return
print(f'Encoding {height}p HEVC')
command = (f'ffmpeg -v quiet -stats -i "{input_file}" -i "{video_source}"'
' -map 0:v:0 -map 1:a:0 -map 1:s:0 -map 1:t? -map 1:d?'
f' -disposition:v:0 default -metadata Title="{temp_name} [hstream.moe]"'
' -metadata:s:v:0 title="Upscaled by hstream.moe"'
' -c:v hevc_nvenc -qp 18 -pix_fmt yuv420p10le'
f' -vf "scale=\'min({width},iw)\':-2,setsar=1:1" -aspect {input_aspect}'
' -c:a aac -b:a 160k -c:s copy'
f' "{output_file}"'
)
subprocess.call(command, shell=True)
subprocess.run(f'mkvmerge --output "{mux_file}" "{output_file}"', shell=True)
def EncodeDDL(video_source, cdn_folder, folder_name, temp_name, upscale_out, input_aspect, interpolate_out, INTERPOLATE_4K, interpolate_4k_output):
# Extract subtitles
out_ass = os.path.join(cdn_folder, 'eng.ass')
out_vtt = os.path.join(cdn_folder, 'eng.vtt')
extract_subs(video_source, out_ass, out_vtt)
# Encoding settings
resolutions = [
(1920, 1080, upscale_out, "[1080p-HEVC]"),
(1920, 1080, interpolate_out, "[1080p-HEVC][48fps]"),
(3840, 2160, upscale_out, "[2160p-HEVC]")
]
# Also encode 4k 48fps if enabled
if INTERPOLATE_4K:
resolutions.append((3840, 2160, interpolate_4k_output, "[2160p-HEVC][48fps]"))
for width, height, input_file, suffix in resolutions:
tmp_out = os.path.join('2-Out', folder_name, f"{temp_name} {suffix}[hstream.moe].mkv")
mux_out = os.path.join('2-Out', folder_name, 'Muxed', f"{temp_name} {suffix}[hstream.moe].mkv")
encode_video(video_source, input_file, tmp_out, mux_out, temp_name, input_aspect, width, height)

50
utils/interpolate.py Normal file
View File

@@ -0,0 +1,50 @@
import os
import subprocess
def createInterpolateScript(upscaleOut, tempName, inputAspect):
if os.path.isfile(upscaleOut + '.vpy'):
print('Interpolate script exists')
return
script = []
if inputAspect == '4:3':
script = ['from vsrife import rife',
'import vapoursynth as vs',
'from vapoursynth import core',
'clip = core.ffms2.Source(source="./' + tempName + ' [4k][HEVC].mkv")',
'clip = vs.core.resize.Bicubic(clip, width=1440, height=1080, format=vs.RGBS, matrix_in_s="709")',
'clip = rife(clip=clip, model="4.15", factor_num=2, factor_den=1)',
'clip = vs.core.resize.Bicubic(clip, format=vs.YUV420P8, matrix_s="709")',
'clip.set_output()']
else:
script = ['from vsrife import rife',
'import vapoursynth as vs',
'from vapoursynth import core',
'clip = core.ffms2.Source(source="./' + tempName + ' [4k][HEVC].mkv")',
'clip = vs.core.resize.Bicubic(clip, width=1920, height=1080, format=vs.RGBS, matrix_in_s="709")',
'clip = rife(clip=clip, model="4.15", factor_num=2, factor_den=1)',
'clip = vs.core.resize.Bicubic(clip, format=vs.YUV420P8, matrix_s="709")',
'clip.set_output()']
if not os.path.isfile(upscaleOut + '.vpy'):
with open(upscaleOut + '.vpy', 'a') as fs:
fs.writelines([i + '\n' for i in script])
def Interpolate(interpolateOut, upscaleOut, tempName, inputAspect):
if os.path.isfile(interpolateOut):
print('Already interpolated')
return
createInterpolateScript(upscaleOut, tempName, inputAspect)
if not os.path.isfile(upscaleOut + '.vpy'):
print('=== Interpolation script not found ===')
return
print('Interpolating')
subprocess.call('vspipe -c y4m "' + upscaleOut + '.vpy" - | ffmpeg -v quiet -stats -i - -c:v hevc_nvenc -qp 5 "' + interpolateOut + '"', shell=True)
# Remove Temp Files
os.remove(upscaleOut + '.ffindex')
os.remove(upscaleOut + '.vpy')

42
utils/interpolate4k.py Normal file
View File

@@ -0,0 +1,42 @@
import os
import subprocess
def createInterpolateScript(upscaleOut, tempName):
if os.path.isfile(upscaleOut + '.vpy'):
print('Interpolate script exists')
return
script = ['from vsrife import rife',
'import vapoursynth as vs',
'from vapoursynth import core',
'clip = core.ffms2.Source(source="./' + tempName + ' [4k][HEVC].mkv")',
'clip = vs.core.resize.Bicubic(clip, width=3840, height=2160, format=vs.RGBS, matrix_in_s="709")',
'clip = rife(clip=clip, model="4.15.lite", factor_num=2, factor_den=1)',
'clip = vs.core.resize.Bicubic(clip, format=vs.YUV420P8, matrix_s="709")',
'clip.set_output()']
if not os.path.isfile(upscaleOut + '.vpy'):
with open(upscaleOut + '.vpy', 'a') as fs:
fs.writelines([i + '\n' for i in script])
def Interpolate4K(interpolateOut, upscaleOut, interpolateVideo, tempName):
if not interpolateVideo:
print('Skipped interpolation')
return
if os.path.isfile(interpolateOut):
print('Already interpolated')
return
createInterpolateScript(upscaleOut, tempName)
if not os.path.isfile(upscaleOut + '.vpy'):
print('=== Interpolation script not found ===')
return
print('Interpolating')
subprocess.call('vspipe -c y4m "' + upscaleOut + '.vpy" - | ffmpeg -v quiet -stats -i - -c:v hevc_nvenc -qp 5 "' + interpolateOut + '"', shell=True)
# Remove Temp Files
os.remove(upscaleOut + '.ffindex')
os.remove(upscaleOut + '.vpy')

30
utils/mediainfo.py Normal file
View File

@@ -0,0 +1,30 @@
from pymediainfo import MediaInfo
def get_aspect_ratio(inputFile):
media_info = MediaInfo.parse(inputFile)
for track in media_info.tracks:
if track.track_type == "Video":
aspect_ratio = track.other_display_aspect_ratio
print('Detected Aspect Ratio : ' + aspect_ratio[0])
return aspect_ratio[0]
# Fallback value
print('Falling Back To Aspect Ratio: 16:9')
return '16:9'
def get_framerate(inputFile):
media_info = MediaInfo.parse(inputFile)
for track in media_info.tracks:
if track.track_type == "Video":
frame_rate = track.frame_rate
if frame_rate == '29.970':
print(f"Detected Framerate : 30000/1001")
return '30000/1001'
if frame_rate == '24.000':
print(f"Detected Framerate : 24000/1000")
return '24000/1000'
print(f"Detected Framerate : 24000/1001")
return '24000/1001'

38
utils/upcale.py Normal file
View File

@@ -0,0 +1,38 @@
import os
import subprocess
from utils.mediainfo import get_framerate
def re_encode(input_file, upscale_out, temp_out, max_width, input_aspect):
if os.path.exists(upscale_out):
print('Skipped Pre-Encode')
return
command = (
f'ffmpeg -v quiet -stats -i "{input_file}" '
'-c:v ffv1 -level 3 '
f'-vf "fps={get_framerate(input_file)},scale=-1:\'min({max_width},ih)\'" -aspect {input_aspect} '
'-pix_fmt yuv420p -color_primaries 1 -color_trc 1 -colorspace 1 '
'-an -sn -map_metadata -1 '
f'"{temp_out}"'
)
subprocess.call(command, shell=True)
def upscale(input_file, upscale_out, max_width, input_aspect):
temp_out = os.path.join('1-Temp', 'source.mkv')
vsgan = os.path.join('utils', 'vs-realesrgan.vpy')
# Re-Encode to fix issues
re_encode(input_file, upscale_out, temp_out, max_width, input_aspect)
if os.path.exists(upscale_out):
print('Skipped Upscale')
return
print('Started Upscale')
subprocess.call(f'vspipe -c y4m {vsgan} - | ffmpeg -v quiet -stats -f yuv4mpegpipe -i - -c:v hevc_nvenc -qp 5 -aspect {input_aspect} "{upscale_out}"', shell=True)
# Remove Temp Files
os.remove(temp_out)
os.remove(temp_out + '.ffindex')

17
utils/vs-realesrgan.vpy Normal file
View File

@@ -0,0 +1,17 @@
import vapoursynth as vs
from vapoursynth import core
from vsrealesrgan import realesrgan
# Video Source
clip = core.ffms2.Source(source='../1-Temp/source.mkv')
# Set Input Colorspace
clip = core.resize.Bicubic(clip, format=vs.RGBS, matrix_in_s="709")
# Upscale
clip = realesrgan(clip=clip, model=4)
# Set Output Colorspace
clip = core.resize.Bicubic(clip, format=vs.YUV420P8, matrix_s="709")
clip.set_output()