initial commit and version 1.0

This commit is contained in:
2025-04-21 15:14:03 +02:00
commit ae6b2bbf44
82 changed files with 10782 additions and 0 deletions

View File

View File

@@ -0,0 +1,142 @@
"""Contains different functions to make end and opening credits, even though it is
difficult to fill everyone needs in this matter.
"""
from moviepy.decorators import convert_path_to_string
from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip
from moviepy.video.fx.Resize import Resize
from moviepy.video.VideoClip import ImageClip, TextClip
class CreditsClip(TextClip):
"""Credits clip.
Parameters
----------
creditfile
A string or path like object pointing to a text file
whose content must be as follows:
..code:: python
# This is a comment
# The next line says : leave 4 blank lines
.blank 4
..Executive Story Editor
MARCEL DURAND
..Associate Producers
MARTIN MARCEL
DIDIER MARTIN
..Music Supervisor
JEAN DIDIER
width
Total width of the credits text in pixels
gap
Horizontal gap in pixels between the jobs and the names
color
Color of the text. See ``TextClip.list('color')``
for a list of acceptable names.
font
Name of the font to use. See ``TextClip.list('font')`` for
the list of fonts you can use on your computer.
font_size
Size of font to use
stroke_color
Color of the stroke (=contour line) of the text. If ``None``,
there will be no stroke.
stroke_width
Width of the stroke, in pixels. Can be a float, like 1.5.
bg_color
Color of the background. If ``None``, the background will be transparent.
Returns
-------
image
An ImageClip instance that looks like this and can be scrolled
to make some credits: ::
Executive Story Editor MARCEL DURAND
Associate Producers MARTIN MARCEL
DIDIER MARTIN
Music Supervisor JEAN DIDIER
"""
@convert_path_to_string("creditfile")
def __init__(
self,
creditfile,
width,
color="white",
stroke_color="black",
stroke_width=2,
font="Impact-Normal",
font_size=60,
bg_color=None,
gap=0,
):
# Parse the .txt file
texts = []
one_line = True
with open(creditfile) as file:
for line in file:
if line.startswith(("\n", "#")):
# exclude blank lines or comments
continue
elif line.startswith(".blank"):
# ..blank n
for i in range(int(line.split(" ")[1])):
texts.append(["\n", "\n"])
elif line.startswith(".."):
texts.append([line[2:], ""])
one_line = True
elif one_line:
texts.append(["", line])
one_line = False
else:
texts.append(["\n", line])
left, right = ("".join(line) for line in zip(*texts))
# Make two columns for the credits
left, right = [
TextClip(
text=txt,
color=color,
stroke_color=stroke_color,
stroke_width=stroke_width,
font=font,
font_size=font_size,
text_align=align,
)
for txt, align in [(left, "left"), (right, "right")]
]
both_columns = CompositeVideoClip(
[left, right.with_position((left.w + gap, 0))],
size=(left.w + right.w + gap, right.h),
bg_color=bg_color,
)
# Scale to the required size
scaled = both_columns.with_effects([Resize(width=width)])
# Transform the CompositeVideoClip into an ImageClip
# Calls ImageClip.__init__()
super(TextClip, self).__init__(scaled.get_frame(0))
self.mask = ImageClip(scaled.mask.get_frame(0), is_mask=True)

522
moviepy/video/tools/cuts.py Normal file
View File

@@ -0,0 +1,522 @@
"""Contains everything that can help automate the cuts in MoviePy."""
from collections import defaultdict
import numpy as np
from moviepy.decorators import convert_parameter_to_seconds, use_clip_fps_by_default
@use_clip_fps_by_default
@convert_parameter_to_seconds(["start_time"])
def find_video_period(clip, fps=None, start_time=0.3):
"""Find the period of a video based on frames correlation.
Parameters
----------
clip : moviepy.Clip.Clip
Clip for which the video period will be computed.
fps : int, optional
Number of frames per second used computing the period. Higher values will
produce more accurate periods, but the execution time will be longer.
start_time : float, optional
First timeframe used to calculate the period of the clip.
Examples
--------
.. code:: python
from moviepy import *
from moviepy.video.tools.cuts import find_video_period
clip = VideoFileClip("media/chaplin.mp4").subclipped(0, 1).loop(2)
round(videotools.find_video_period(clip, fps=80), 6)
1
"""
def frame(t):
return clip.get_frame(t).flatten()
timings = np.arange(start_time, clip.duration, 1 / fps)[1:]
ref = frame(0)
corrs = [np.corrcoef(ref, frame(t))[0, 1] for t in timings]
return timings[np.argmax(corrs)]
class FramesMatch:
"""Frames match inside a set of frames.
Parameters
----------
start_time : float
Starting time.
end_time : float
End time.
min_distance : float
Lower bound on the distance between the first and last frames
max_distance : float
Upper bound on the distance between the first and last frames
"""
def __init__(self, start_time, end_time, min_distance, max_distance):
self.start_time = start_time
self.end_time = end_time
self.min_distance = min_distance
self.max_distance = max_distance
self.time_span = end_time - start_time
def __str__(self): # pragma: no cover
return "(%.04f, %.04f, %.04f, %.04f)" % (
self.start_time,
self.end_time,
self.min_distance,
self.max_distance,
)
def __repr__(self): # pragma: no cover
return self.__str__()
def __iter__(self): # pragma: no cover
return iter(
(self.start_time, self.end_time, self.min_distance, self.max_distance)
)
def __eq__(self, other):
return (
other.start_time == self.start_time
and other.end_time == self.end_time
and other.min_distance == self.min_distance
and other.max_distance == self.max_distance
)
class FramesMatches(list):
"""Frames matches inside a set of frames.
You can instantiate it passing a list of FramesMatch objects or
using the class methods ``load`` and ``from_clip``.
Parameters
----------
lst : list
Iterable of FramesMatch objects.
"""
def __init__(self, lst):
list.__init__(self, sorted(lst, key=lambda e: e.max_distance))
def best(self, n=1, percent=None):
"""Returns a new instance of FramesMatches object or a FramesMatch
from the current class instance given different conditions.
By default returns the first FramesMatch that the current instance
stores.
Parameters
----------
n : int, optional
Number of matches to retrieve from the current FramesMatches object.
Only has effect when ``percent=None``.
percent : float, optional
Percent of the current match to retrieve.
Returns
-------
FramesMatch or FramesMatches : If the number of matches to retrieve is
greater than 1 returns a FramesMatches object, otherwise a
FramesMatch.
"""
if percent is not None:
n = len(self) * percent / 100
return self[0] if n == 1 else FramesMatches(self[: int(n)])
def filter(self, condition):
"""Return a FramesMatches object obtained by filtering out the
FramesMatch which do not satistify a condition.
Parameters
----------
condition : func
Function which takes a FrameMatch object as parameter and returns a
bool.
Examples
--------
.. code:: python
# Only keep the matches corresponding to (> 1 second) sequences.
new_matches = matches.filter(lambda match: match.time_span > 1)
"""
return FramesMatches(filter(condition, self))
def save(self, filename):
"""Save a FramesMatches object to a file.
Parameters
----------
filename : str
Path to the file in which will be dumped the FramesMatches object data.
"""
np.savetxt(
filename,
np.array([np.array(list(e)) for e in self]),
fmt="%.03f",
delimiter="\t",
)
@staticmethod
def load(filename):
"""Load a FramesMatches object from a file.
Parameters
----------
filename : str
Path to the file to use loading a FramesMatches object.
Examples
--------
>>> matching_frames = FramesMatches.load("somefile")
"""
arr = np.loadtxt(filename)
mfs = [FramesMatch(*e) for e in arr]
return FramesMatches(mfs)
@staticmethod
def from_clip(clip, distance_threshold, max_duration, fps=None, logger="bar"):
"""Finds all the frames that look alike in a clip, for instance to make
a looping GIF.
Parameters
----------
clip : moviepy.video.VideoClip.VideoClip
A MoviePy video clip.
distance_threshold : float
Distance above which a match is rejected.
max_duration : float
Maximal duration (in seconds) between two matching frames.
fps : int, optional
Frames per second (default will be ``clip.fps``).
logger : str, optional
Either ``"bar"`` for progress bar or ``None`` or any Proglog logger.
Returns
-------
FramesMatches
All pairs of frames with ``end_time - start_time < max_duration``
and whose distance is under ``distance_threshold``.
Examples
--------
We find all matching frames in a given video and turn the best match
with a duration of 1.5 seconds or more into a GIF:
.. code:: python
from moviepy import VideoFileClip
from moviepy.video.tools.cuts import FramesMatches
clip = VideoFileClip("foo.mp4").resize(width=200)
matches = FramesMatches.from_clip(
clip, distance_threshold=10, max_duration=3, # will take time
)
best = matches.filter(lambda m: m.time_span > 1.5).best()
clip.subclipped(best.start_time, best.end_time).write_gif("foo.gif")
"""
N_pixels = clip.w * clip.h * 3
def dot_product(F1, F2):
return (F1 * F2).sum() / N_pixels
frame_dict = {} # will store the frames and their mutual distances
def distance(t1, t2):
uv = dot_product(frame_dict[t1]["frame"], frame_dict[t2]["frame"])
u, v = frame_dict[t1]["|F|sq"], frame_dict[t2]["|F|sq"]
return np.sqrt(u + v - 2 * uv)
matching_frames = [] # the final result.
for t, frame in clip.iter_frames(with_times=True, logger=logger):
flat_frame = 1.0 * frame.flatten()
F_norm_sq = dot_product(flat_frame, flat_frame)
F_norm = np.sqrt(F_norm_sq)
for t2 in list(frame_dict.keys()):
# forget old frames, add 't' to the others frames
# check for early rejections based on differing norms
if (t - t2) > max_duration:
frame_dict.pop(t2)
else:
frame_dict[t2][t] = {
"min": abs(frame_dict[t2]["|F|"] - F_norm),
"max": frame_dict[t2]["|F|"] + F_norm,
}
frame_dict[t2][t]["rejected"] = (
frame_dict[t2][t]["min"] > distance_threshold
)
t_F = sorted(frame_dict.keys())
frame_dict[t] = {"frame": flat_frame, "|F|sq": F_norm_sq, "|F|": F_norm}
for i, t2 in enumerate(t_F):
# Compare F(t) to all the previous frames
if frame_dict[t2][t]["rejected"]:
continue
dist = distance(t, t2)
frame_dict[t2][t]["min"] = frame_dict[t2][t]["max"] = dist
frame_dict[t2][t]["rejected"] = dist >= distance_threshold
for t3 in t_F[i + 1 :]:
# For all the next times t3, use d(F(t), F(end_time)) to
# update the bounds on d(F(t), F(t3)). See if you can
# conclude on whether F(t) and F(t3) match.
t3t, t2t3 = frame_dict[t3][t], frame_dict[t2][t3]
t3t["max"] = min(t3t["max"], dist + t2t3["max"])
t3t["min"] = max(t3t["min"], dist - t2t3["max"], t2t3["min"] - dist)
if t3t["min"] > distance_threshold:
t3t["rejected"] = True
# Store all the good matches (end_time,t)
matching_frames += [
(t1, t, frame_dict[t1][t]["min"], frame_dict[t1][t]["max"])
for t1 in frame_dict
if (t1 != t) and not frame_dict[t1][t]["rejected"]
]
return FramesMatches([FramesMatch(*e) for e in matching_frames])
def select_scenes(
self, match_threshold, min_time_span, nomatch_threshold=None, time_distance=0
):
"""Select the scenes at which a video clip can be reproduced as the
smoothest possible way, mainly oriented for the creation of GIF images.
Parameters
----------
match_threshold : float
Maximum distance possible between frames. The smaller, the
better-looping the GIFs are.
min_time_span : float
Minimum duration for a scene. Only matches with a duration longer
than the value passed to this parameters will be extracted.
nomatch_threshold : float, optional
Minimum distance possible between frames. If is ``None``, then it is
chosen equal to ``match_threshold``.
time_distance : float, optional
Minimum time offset possible between matches.
Returns
-------
FramesMatches : New instance of the class with the selected scenes.
Examples
--------
.. code:: python
from pprint import pprint
from moviepy import *
from moviepy.video.tools.cuts import FramesMatches
ch_clip = VideoFileClip("media/chaplin.mp4").subclipped(1, 4)
mirror_and_clip = [ch_clip.with_effects([vfx.TimeMirror()]), ch_clip]
clip = concatenate_videoclips(mirror_and_clip)
result = FramesMatches.from_clip(clip, 10, 3).select_scenes(
1, 2, nomatch_threshold=0,
)
print(result)
# [(1.0000, 4.0000, 0.0000, 0.0000),
# (1.1600, 3.8400, 0.0000, 0.0000),
# (1.2800, 3.7200, 0.0000, 0.0000),
# (1.4000, 3.6000, 0.0000, 0.0000)]
"""
if nomatch_threshold is None:
nomatch_threshold = match_threshold
dict_starts = defaultdict(lambda: [])
for start, end, min_distance, max_distance in self:
dict_starts[start].append([end, min_distance, max_distance])
starts_ends = sorted(dict_starts.items(), key=lambda k: k[0])
result = []
min_start = 0
for start, ends_distances in starts_ends:
if start < min_start:
continue
ends = [end for (end, min_distance, max_distance) in ends_distances]
great_matches = [
(end, min_distance, max_distance)
for (end, min_distance, max_distance) in ends_distances
if max_distance < match_threshold
]
great_long_matches = [
(end, min_distance, max_distance)
for (end, min_distance, max_distance) in great_matches
if (end - start) > min_time_span
]
if not great_long_matches:
continue # No GIF can be made starting at this time
poor_matches = {
end
for (end, min_distance, max_distance) in ends_distances
if min_distance > nomatch_threshold
}
short_matches = {end for end in ends if (end - start) <= 0.6}
if not poor_matches.intersection(short_matches):
continue
end = max(end for (end, min_distance, max_distance) in great_long_matches)
end, min_distance, max_distance = next(
e for e in great_long_matches if e[0] == end
)
result.append(FramesMatch(start, end, min_distance, max_distance))
min_start = start + time_distance
return FramesMatches(result)
def write_gifs(self, clip, gifs_dir, **kwargs):
"""Extract the matching frames represented by the instance from a clip
and write them as GIFs in a directory, one GIF for each matching frame.
Parameters
----------
clip : video.VideoClip.VideoClip
A video clip whose frames scenes you want to obtain as GIF images.
gif_dir : str
Directory in which the GIF images will be written.
kwargs
Passed as ``clip.write_gif`` optional arguments.
Examples
--------
.. code:: python
import os
from pprint import pprint
from moviepy import *
from moviepy.video.tools.cuts import FramesMatches
ch_clip = VideoFileClip("media/chaplin.mp4").subclipped(1, 4)
clip = concatenate_videoclips([ch_clip.time_mirror(), ch_clip])
result = FramesMatches.from_clip(clip, 10, 3).select_scenes(
1, 2, nomatch_threshold=0,
)
os.mkdir("foo")
result.write_gifs(clip, "foo")
# MoviePy - Building file foo/00000100_00000400.gif with imageio.
# MoviePy - Building file foo/00000115_00000384.gif with imageio.
# MoviePy - Building file foo/00000128_00000372.gif with imageio.
# MoviePy - Building file foo/00000140_00000360.gif with imageio.
"""
for start, end, _, _ in self:
name = "%s/%08d_%08d.gif" % (gifs_dir, 100 * start, 100 * end)
clip.subclipped(start, end).write_gif(name, **kwargs)
@use_clip_fps_by_default
def detect_scenes(
clip=None, luminosities=None, luminosity_threshold=10, logger="bar", fps=None
):
"""Detects scenes of a clip based on luminosity changes.
Note that for large clip this may take some time.
Returns
-------
tuple : cuts, luminosities
cuts is a series of cuts [(0,t1), (t1,t2),...(...,tf)]
luminosities are the luminosities computed for each
frame of the clip.
Parameters
----------
clip : video.VideoClip.VideoClip, optional
A video clip. Can be None if a list of luminosities is
provided instead. If provided, the luminosity of each
frame of the clip will be computed. If the clip has no
'fps' attribute, you must provide it.
luminosities : list, optional
A list of luminosities, e.g. returned by detect_scenes
in a previous run.
luminosity_threshold : float, optional
Determines a threshold above which the 'luminosity jumps'
will be considered as scene changes. A scene change is defined
as a change between 2 consecutive frames that is larger than
(avg * thr) where avg is the average of the absolute changes
between consecutive frames.
logger : str, optional
Either ``"bar"`` for progress bar or ``None`` or any Proglog logger.
fps : int, optional
Frames per second value. Must be provided if you provide
no clip or a clip without fps attribute.
"""
if luminosities is None:
luminosities = [
f.sum() for f in clip.iter_frames(fps=fps, dtype="uint32", logger=logger)
]
luminosities = np.array(luminosities, dtype=float)
if clip is not None:
end = clip.duration
else:
end = len(luminosities) * (1.0 / fps)
luminosity_diffs = abs(np.diff(luminosities))
avg = luminosity_diffs.mean()
luminosity_jumps = (
1 + np.array(np.nonzero(luminosity_diffs > luminosity_threshold * avg))[0]
)
timings = [0] + list((1.0 / fps) * luminosity_jumps) + [end]
cuts = [(t1, t2) for t1, t2 in zip(timings, timings[1:])]
return cuts, luminosities

View File

@@ -0,0 +1,319 @@
"""Deals with making images (np arrays). It provides drawing
methods that are difficult to do with the existing Python libraries.
"""
import numpy as np
def color_gradient(
size,
p1,
p2=None,
vector=None,
radius=None,
color_1=0.0,
color_2=1.0,
shape="linear",
offset=0,
):
"""Draw a linear, bilinear, or radial gradient.
The result is a picture of size ``size``, whose color varies
gradually from color `color_1` in position ``p1`` to color ``color_2``
in position ``p2``.
If it is a RGB picture the result must be transformed into
a 'uint8' array to be displayed normally:
Parameters
----------
size : tuple or list
Size (width, height) in pixels of the final image array.
p1 : tuple or list
Position for the first coordinate of the gradient in pixels (x, y).
The color 'before' ``p1`` is ``color_1`` and it gradually changes in
the direction of ``p2`` until it is ``color_2`` when it reaches ``p2``.
p2 : tuple or list, optional
Position for the second coordinate of the gradient in pixels (x, y).
Coordinates (x, y) of the limit point for ``color_1``
and ``color_2``.
vector : tuple or list, optional
A vector (x, y) in pixels that can be provided instead of ``p2``.
``p2`` is then defined as (p1 + vector).
color_1 : tuple or list, optional
Starting color for the gradient. As default, black. Either floats
between 0 and 1 (for gradients used in masks) or [R, G, B] arrays
(for colored gradients).
color_2 : tuple or list, optional
Color for the second point in the gradient. As default, white. Either
floats between 0 and 1 (for gradients used in masks) or [R, G, B]
arrays (for colored gradients).
shape : str, optional
Shape of the gradient. Can be either ``"linear"``, ``"bilinear"`` or
``"circular"``. In a linear gradient the color varies in one direction,
from point ``p1`` to point ``p2``. In a bilinear gradient it also
varies symmetrically from ``p1`` in the other direction. In a circular
gradient it goes from ``color_1`` to ``color_2`` in all directions.
radius : float, optional
If ``shape="radial"``, the radius of the gradient is defined with the
parameter ``radius``, in pixels.
offset : float, optional
Real number between 0 and 1 indicating the fraction of the vector
at which the gradient actually starts. For instance if ``offset``
is 0.9 in a gradient going from p1 to p2, then the gradient will
only occur near p2 (before that everything is of color ``color_1``)
If the offset is 0.9 in a radial gradient, the gradient will
occur in the region located between 90% and 100% of the radius,
this creates a blurry disc of radius ``d(p1, p2)``.
Returns
-------
image
An Numpy array of dimensions (width, height, n_colors) of type float
representing the image of the gradient.
Examples
--------
.. code:: python
color_gradient((10, 1), (0, 0), p2=(10, 0)) # from white to black
#[[1. 0.9 0.8 0.7 0.6 0.5 0.4 0.3 0.2 0.1]]
# from red to green
color_gradient(
(10, 1), (0, 0),
p2=(10, 0),
color_1=(255, 0, 0),
color_2=(0, 255, 0)
)
# [[[ 0. 255. 0. ]
# [ 25.5 229.5 0. ]
# [ 51. 204. 0. ]
# [ 76.5 178.5 0. ]
# [102. 153. 0. ]
# [127.5 127.5 0. ]
# [153. 102. 0. ]
# [178.5 76.5 0. ]
# [204. 51. 0. ]
# [229.5 25.5 0. ]]]
"""
# np-arrayize and change x,y coordinates to y,x
w, h = size
color_1 = np.array(color_1).astype(float)
color_2 = np.array(color_2).astype(float)
if shape == "bilinear":
if vector is None:
if p2 is None:
raise ValueError("You must provide either 'p2' or 'vector'")
vector = np.array(p2) - np.array(p1)
m1, m2 = [
color_gradient(
size,
p1,
vector=v,
color_1=1.0,
color_2=0.0,
shape="linear",
offset=offset,
)
for v in [vector, [-v for v in vector]]
]
arr = np.maximum(m1, m2)
if color_1.size > 1:
arr = np.dstack(3 * [arr])
return arr * color_1 + (1 - arr) * color_2
p1 = np.array(p1[::-1]).astype(float)
M = np.dstack(np.meshgrid(range(w), range(h))[::-1]).astype(float)
if shape == "linear":
if vector is None:
if p2 is not None:
vector = np.array(p2[::-1]) - p1
else:
raise ValueError("You must provide either 'p2' or 'vector'")
else:
vector = np.array(vector[::-1])
norm = np.linalg.norm(vector)
n_vec = vector / norm**2 # norm 1/norm(vector)
p1 = p1 + offset * vector
arr = (M - p1).dot(n_vec) / (1 - offset)
arr = np.minimum(1, np.maximum(0, arr))
if color_1.size > 1:
arr = np.dstack(3 * [arr])
return arr * color_1 + (1 - arr) * color_2
elif shape == "radial":
if (radius or 0) == 0:
arr = np.ones((h, w))
else:
arr = (np.sqrt(((M - p1) ** 2).sum(axis=2))) - offset * radius
arr = arr / ((1 - offset) * radius)
arr = np.minimum(1.0, np.maximum(0, arr))
if color_1.size > 1:
arr = np.dstack(3 * [arr])
return (1 - arr) * color_1 + arr * color_2
raise ValueError("Invalid shape, should be either 'radial', 'linear' or 'bilinear'")
def color_split(
size,
x=None,
y=None,
p1=None,
p2=None,
vector=None,
color_1=0,
color_2=1.0,
gradient_width=0,
):
"""Make an image split in 2 colored regions.
Returns an array of size ``size`` divided in two regions called 1 and
2 in what follows, and which will have colors color_1 and color_2
respectively.
Parameters
----------
x : int, optional
If provided, the image is split horizontally in x, the left
region being region 1.
y : int, optional
If provided, the image is split vertically in y, the top region
being region 1.
p1, p2: tuple or list, optional
Positions (x1, y1), (x2, y2) in pixels, where the numbers can be
floats. Region 1 is defined as the whole region on the left when
going from ``p1`` to ``p2``.
p1, vector: tuple or list, optional
``p1`` is (x1,y1) and vector (v1,v2), where the numbers can be
floats. Region 1 is then the region on the left when starting
in position ``p1`` and going in the direction given by ``vector``.
gradient_width : float, optional
If not zero, the split is not sharp, but gradual over a region of
width ``gradient_width`` (in pixels). This is preferable in many
situations (for instance for antialiasing).
Examples
--------
.. code:: python
size = [200, 200]
# an image with all pixels with x<50 =0, the others =1
color_split(size, x=50, color_1=0, color_2=1)
# an image with all pixels with y<50 red, the others green
color_split(size, x=50, color_1=[255, 0, 0], color_2=[0, 255, 0])
# An image split along an arbitrary line (see below)
color_split(size, p1=[20, 50], p2=[25, 70], color_1=0, color_2=1)
"""
if gradient_width or ((x is None) and (y is None)):
if p2 is not None:
vector = np.array(p2) - np.array(p1)
elif x is not None:
vector = np.array([0, -1.0])
p1 = np.array([x, 0])
elif y is not None:
vector = np.array([1.0, 0.0])
p1 = np.array([0, y])
x, y = vector
vector = np.array([y, -x]).astype("float")
norm = np.linalg.norm(vector)
vector = max(0.1, gradient_width) * vector / norm
return color_gradient(
size, p1, vector=vector, color_1=color_1, color_2=color_2, shape="linear"
)
else:
w, h = size
shape = (h, w) if np.isscalar(color_1) else (h, w, len(color_1))
arr = np.zeros(shape)
if x:
arr[:, :x] = color_1
arr[:, x:] = color_2
elif y:
arr[:y] = color_1
arr[y:] = color_2
return arr
def circle(screensize, center, radius, color=1.0, bg_color=0, blur=1):
"""Draw an image with a circle.
Draws a circle of color ``color``, on a background of color ``bg_color``,
on a screen of size ``screensize`` at the position ``center=(x, y)``,
with a radius ``radius`` but slightly blurred on the border by ``blur``
pixels.
Parameters
----------
screensize : tuple or list
Size of the canvas.
center : tuple or list
Center of the circle.
radius : float
Radius of the circle, in pixels.
bg_color : tuple or float, optional
Color for the background of the canvas. As default, black.
blur : float, optional
Blur for the border of the circle.
Examples
--------
.. code:: python
from moviepy.video.tools.drawing import circle
circle(
(5, 5), # size
(2, 2), # center
2, # radius
)
# array([[0. , 0. , 0. , 0. , 0. ],
# [0. , 0.58578644, 1. , 0.58578644, 0. ],
# [0. , 1. , 1. , 1. , 0. ],
# [0. , 0.58578644, 1. , 0.58578644, 0. ],
# [0. , 0. , 0. , 0. , 0. ]])
"""
offset = 1.0 * (radius - blur) / radius if radius else 0
return color_gradient(
screensize,
p1=center,
radius=radius,
color_1=color,
color_2=bg_color,
shape="radial",
offset=offset,
)

View File

@@ -0,0 +1,238 @@
"""Classes for easy interpolation of trajectories and curves."""
import numpy as np
class Interpolator:
"""Poorman's linear interpolator.
Parameters
----------
tt : list, optional
List of time frames for the interpolator.
ss : list, optional
List of values for the interpolator.
ttss : list, optional
Lists of time frames and their correspondients values for the
interpolator. This argument can be used instead of ``tt`` and ``ss``
to instantiate the interpolator using an unique argument.
left : float, optional
Value to return when ``t < tt[0]``.
right : float, optional
Value to return when ``t > tt[-1]``.
Examples
--------
.. code:: python
# instantiate using `tt` and `ss`
interpolator = Interpolator(tt=[0, 1, 2], ss=[3, 4, 5])
# instantiate using `ttss`
interpolator = Interpolator(ttss=[[0, 3], [1, 4], [2, 5]]) # [t, value]
"""
def __init__(self, tt=None, ss=None, ttss=None, left=None, right=None):
if ttss is not None:
tt, ss = zip(*ttss)
self.tt = 1.0 * np.array(tt)
self.ss = 1.0 * np.array(ss)
self.left = left
self.right = right
self.tmin, self.tmax = min(tt), max(tt)
def __call__(self, t):
"""Interpolates ``t``.
Parameters
----------
t : float
Time frame for which the correspondent value will be returned.
"""
return np.interp(t, self.tt, self.ss, self.left, self.right)
class Trajectory:
"""Trajectory compound by time frames and (x, y) pixels.
It's designed as an interpolator, so you can get the position at a given
time ``t``. You can instantiate it from a file using the methods
``from_file`` and ``load_list``.
Parameters
----------
tt : list or numpy.ndarray
Time frames.
xx : list or numpy.ndarray
X positions in the trajectory.
yy : list or numpy.ndarray
Y positions in the trajectory.
Examples
--------
>>> trajectory = Trajectory([0, .166, .333], [554, 474, 384], [100, 90, 91])
"""
def __init__(self, tt, xx, yy):
self.tt = 1.0 * np.array(tt)
self.xx = np.array(xx)
self.yy = np.array(yy)
self.update_interpolators()
def __call__(self, t):
"""Interpolates the trajectory at the given time ``t``.
Parameters
----------
t : float
Time for which to the corresponding position will be returned.
"""
return np.array([self.xi(t), self.yi(t)])
def addx(self, x):
"""Adds a value to the ``xx`` position of the trajectory.
Parameters
----------
x : int
Value added to ``xx`` in the trajectory.
Returns
-------
Trajectory : new instance with the new X position included.
"""
return Trajectory(self.tt, self.xx + x, self.yy)
def addy(self, y):
"""Adds a value to the ``yy`` position of the trajectory.
Parameters
----------
y : int
Value added to ``yy`` in the trajectory.
Returns
-------
Trajectory : new instance with the new Y position included.
"""
return Trajectory(self.tt, self.xx, self.yy + y)
def update_interpolators(self):
"""Updates the internal X and Y position interpolators for the instance."""
self.xi = Interpolator(self.tt, self.xx)
self.yi = Interpolator(self.tt, self.yy)
def txy(self, tms=False):
"""Returns all times with the X and Y values of each position.
Parameters
----------
tms : bool, optional
If is ``True``, the time will be returned in milliseconds.
"""
return zip((1000 if tms else 1) * self.tt, self.xx, self.yy)
def to_file(self, filename):
"""Saves the trajectory data in a text file.
Parameters
----------
filename : str
Path to the location of the new trajectory text file.
"""
np.savetxt(
filename,
np.array(list(self.txy(tms=True))),
fmt="%d",
delimiter="\t",
)
@staticmethod
def from_file(filename):
"""Instantiates an object of Trajectory using a data text file.
Parameters
----------
filename : str
Path to the location of trajectory text file to load.
Returns
-------
Trajectory : new instance loaded from text file.
"""
arr = np.loadtxt(filename, delimiter="\t")
tt, xx, yy = arr.T
return Trajectory(1.0 * tt / 1000, xx, yy)
@staticmethod
def save_list(trajs, filename):
"""Saves a set of trajectories into a text file.
Parameters
----------
trajs : list
List of trajectories to be saved.
filename : str
Path of the text file that will store the trajectories data.
"""
N = len(trajs)
arr = np.hstack([np.array(list(t.txy(tms=True))) for t in trajs])
np.savetxt(
filename,
arr,
fmt="%d",
delimiter="\t",
header="\t".join(N * ["t(ms)", "x", "y"]),
)
@staticmethod
def load_list(filename):
"""Loads a list of trajectories from a data text file.
Parameters
----------
filename : str
Path of the text file that stores the data of a set of trajectories.
Returns
-------
list : List of trajectories loaded from the file.
"""
arr = np.loadtxt(filename, delimiter="\t").T
Nlines = arr.shape[0]
return [
Trajectory(tt=1.0 * a[0] / 1000, xx=a[1], yy=a[2])
for a in np.split(arr, Nlines / 3)
]

View File

@@ -0,0 +1,198 @@
"""Experimental module for subtitles support."""
import re
import numpy as np
from moviepy.decorators import convert_path_to_string
from moviepy.tools import convert_to_seconds
from moviepy.video.VideoClip import TextClip, VideoClip
class SubtitlesClip(VideoClip):
"""A Clip that serves as "subtitle track" in videos.
One particularity of this class is that the images of the
subtitle texts are not generated beforehand, but only if
needed.
Parameters
----------
subtitles
Either the name of a file as a string or path-like object, or a list
font
Path to a font file to be used. Optional if make_textclip is provided.
make_textclip
A custom function to use for text clip generation. If None, a TextClip
will be generated.
The function must take a text as argument and return a VideoClip
to be used as caption
encoding
Optional, specifies srt file encoding.
Any standard Python encoding is allowed (listed at
https://docs.python.org/3.8/library/codecs.html#standard-encodings)
Examples
--------
.. code:: python
from moviepy.video.tools.subtitles import SubtitlesClip
from moviepy.video.io.VideoFileClip import VideoFileClip
generator = lambda text: TextClip(text, font='./path/to/font.ttf',
font_size=24, color='white')
sub = SubtitlesClip("subtitles.srt", make_textclip=generator, encoding='utf-8')
myvideo = VideoFileClip("myvideo.avi")
final = CompositeVideoClip([clip, subtitles])
final.write_videofile("final.mp4", fps=myvideo.fps)
"""
def __init__(self, subtitles, font=None, make_textclip=None, encoding=None):
VideoClip.__init__(self, has_constant_size=False)
if not isinstance(subtitles, list):
# `subtitles` is a string or path-like object
subtitles = file_to_subtitles(subtitles, encoding=encoding)
# subtitles = [(map(convert_to_seconds, times), text)
# for times, text in subtitles]
self.subtitles = subtitles
self.textclips = dict()
self.font = font
if make_textclip is None:
if self.font is None:
raise ValueError("Argument font is required if make_textclip is None.")
def make_textclip(txt):
return TextClip(
font=self.font,
text=txt,
font_size=24,
color="#ffffff",
stroke_color="#000000",
stroke_width=1,
)
self.make_textclip = make_textclip
self.start = 0
self.duration = max([tb for ((ta, tb), txt) in self.subtitles])
self.end = self.duration
def add_textclip_if_none(t):
"""Will generate a textclip if it hasn't been generated asked
to generate it yet. If there is no subtitle to show at t, return
false.
"""
sub = [
((text_start, text_end), text)
for ((text_start, text_end), text) in self.textclips.keys()
if (text_start <= t < text_end)
]
if not sub:
sub = [
((text_start, text_end), text)
for ((text_start, text_end), text) in self.subtitles
if (text_start <= t < text_end)
]
if not sub:
return False
sub = sub[0]
if sub not in self.textclips.keys():
self.textclips[sub] = self.make_textclip(sub[1])
return sub
def frame_function(t):
sub = add_textclip_if_none(t)
return self.textclips[sub].get_frame(t) if sub else np.array([[[0, 0, 0]]])
def make_mask_frame(t):
sub = add_textclip_if_none(t)
return self.textclips[sub].mask.get_frame(t) if sub else np.array([[0]])
self.frame_function = frame_function
hasmask = bool(self.make_textclip("T").mask)
self.mask = VideoClip(make_mask_frame, is_mask=True) if hasmask else None
def in_subclip(self, start_time=None, end_time=None):
"""Returns a sequence of [(t1,t2), text] covering all the given subclip
from start_time to end_time. The first and last times will be cropped so as
to be exactly start_time and end_time if possible.
"""
def is_in_subclip(t1, t2):
try:
return (start_time <= t1 < end_time) or (start_time < t2 <= end_time)
except Exception:
return False
def try_cropping(t1, t2):
try:
return max(t1, start_time), min(t2, end_time)
except Exception:
return t1, t2
return [
(try_cropping(t1, t2), txt)
for ((t1, t2), txt) in self.subtitles
if is_in_subclip(t1, t2)
]
def __iter__(self):
return iter(self.subtitles)
def __getitem__(self, k):
return self.subtitles[k]
def __str__(self):
def to_srt(sub_element):
(start_time, end_time), text = sub_element
formatted_start_time = convert_to_seconds(start_time)
formatted_end_time = convert_to_seconds(end_time)
return "%s - %s\n%s" % (formatted_start_time, formatted_end_time, text)
return "\n\n".join(to_srt(sub) for sub in self.subtitles)
def match_expr(self, expr):
"""Matches a regular expression against the subtitles of the clip."""
return SubtitlesClip(
[sub for sub in self.subtitles if re.findall(expr, sub[1]) != []]
)
def write_srt(self, filename):
"""Writes an ``.srt`` file with the content of the clip."""
with open(filename, "w+") as file:
file.write(str(self))
@convert_path_to_string("filename")
def file_to_subtitles(filename, encoding=None):
"""Converts a srt file into subtitles.
The returned list is of the form ``[((start_time,end_time),'some text'),...]``
and can be fed to SubtitlesClip.
Only works for '.srt' format for the moment.
"""
times_texts = []
current_times = None
current_text = ""
with open(filename, "r", encoding=encoding) as file:
for line in file:
times = re.findall("([0-9]*:[0-9]*:[0-9]*,[0-9]*)", line)
if times:
current_times = [convert_to_seconds(t) for t in times]
elif line.strip() == "":
times_texts.append((current_times, current_text.strip("\n")))
current_times, current_text = None, ""
elif current_times:
current_text += line
return times_texts