generated from thinkode/modelRepository
initial commit and version 1.0
This commit is contained in:
1985
moviepy/video/VideoClip.py
Normal file
1985
moviepy/video/VideoClip.py
Normal file
File diff suppressed because it is too large
Load Diff
1
moviepy/video/__init__.py
Normal file
1
moviepy/video/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Everything about video manipulation."""
|
||||
378
moviepy/video/compositing/CompositeVideoClip.py
Normal file
378
moviepy/video/compositing/CompositeVideoClip.py
Normal file
@@ -0,0 +1,378 @@
|
||||
"""Main video composition interface of MoviePy."""
|
||||
|
||||
from functools import reduce
|
||||
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
|
||||
from moviepy.audio.AudioClip import CompositeAudioClip
|
||||
from moviepy.video.VideoClip import ColorClip, VideoClip
|
||||
|
||||
|
||||
class CompositeVideoClip(VideoClip):
|
||||
"""
|
||||
A VideoClip made of other videoclips displayed together. This is the
|
||||
base class for most compositions.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
size
|
||||
The size (width, height) of the final clip.
|
||||
|
||||
clips
|
||||
A list of videoclips.
|
||||
|
||||
Clips with a higher ``layer`` attribute will be displayed
|
||||
on top of other clips in a lower layer.
|
||||
If two or more clips share the same ``layer``,
|
||||
then the one appearing latest in ``clips`` will be displayed
|
||||
on top (i.e. it has the higher layer).
|
||||
|
||||
For each clip:
|
||||
|
||||
- The attribute ``pos`` determines where the clip is placed.
|
||||
See ``VideoClip.set_pos``
|
||||
- The mask of the clip determines which parts are visible.
|
||||
|
||||
Finally, if all the clips in the list have their ``duration``
|
||||
attribute set, then the duration of the composite video clip
|
||||
is computed automatically
|
||||
|
||||
bg_color
|
||||
Color for the unmasked and unfilled regions. Set to None for these
|
||||
regions to be transparent (will be slower).
|
||||
|
||||
use_bgclip
|
||||
Set to True if the first clip in the list should be used as the
|
||||
'background' on which all other clips are blitted. That first clip must
|
||||
have the same size as the final clip. If it has no transparency, the final
|
||||
clip will have no mask.
|
||||
|
||||
The clip with the highest FPS will be the FPS of the composite clip.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, clips, size=None, bg_color=None, use_bgclip=False, is_mask=False
|
||||
):
|
||||
if size is None:
|
||||
size = clips[0].size
|
||||
|
||||
if use_bgclip and (clips[0].mask is None):
|
||||
transparent = False
|
||||
else:
|
||||
transparent = True if bg_color is None else False
|
||||
|
||||
# If we must not use first clip as background and we dont have a color
|
||||
# we generate a black background if clip should not be transparent and
|
||||
# a transparent background if transparent
|
||||
if (not use_bgclip) and bg_color is None:
|
||||
if transparent:
|
||||
bg_color = 0.0 if is_mask else (0, 0, 0, 0)
|
||||
else:
|
||||
bg_color = 0.0 if is_mask else (0, 0, 0)
|
||||
|
||||
fpss = [clip.fps for clip in clips if getattr(clip, "fps", None)]
|
||||
self.fps = max(fpss) if fpss else None
|
||||
|
||||
VideoClip.__init__(self)
|
||||
|
||||
self.size = size
|
||||
self.is_mask = is_mask
|
||||
self.clips = clips
|
||||
self.bg_color = bg_color
|
||||
|
||||
# Use first clip as background if necessary, else use color
|
||||
# either set by user or previously generated
|
||||
if use_bgclip:
|
||||
self.bg = clips[0]
|
||||
self.clips = clips[1:]
|
||||
self.created_bg = False
|
||||
else:
|
||||
self.clips = clips
|
||||
self.bg = ColorClip(size, color=self.bg_color, is_mask=is_mask)
|
||||
self.created_bg = True
|
||||
|
||||
# order self.clips by layer
|
||||
self.clips = sorted(self.clips, key=lambda clip: clip.layer_index)
|
||||
|
||||
# compute duration
|
||||
ends = [clip.end for clip in self.clips]
|
||||
if None not in ends:
|
||||
duration = max(ends)
|
||||
self.duration = duration
|
||||
self.end = duration
|
||||
|
||||
# compute audio
|
||||
audioclips = [v.audio for v in self.clips if v.audio is not None]
|
||||
if audioclips:
|
||||
self.audio = CompositeAudioClip(audioclips)
|
||||
|
||||
# compute mask if necessary
|
||||
if transparent:
|
||||
maskclips = [
|
||||
(clip.mask if (clip.mask is not None) else clip.with_mask().mask)
|
||||
.with_position(clip.pos)
|
||||
.with_end(clip.end)
|
||||
.with_start(clip.start, change_end=False)
|
||||
.with_layer_index(clip.layer_index)
|
||||
for clip in self.clips
|
||||
]
|
||||
|
||||
if use_bgclip and self.bg.mask:
|
||||
maskclips = [self.bg.mask] + maskclips
|
||||
|
||||
self.mask = CompositeVideoClip(
|
||||
maskclips, self.size, is_mask=True, bg_color=0.0
|
||||
)
|
||||
|
||||
def frame_function(self, t):
|
||||
"""The clips playing at time `t` are blitted over one another."""
|
||||
# For the mask we recalculate the final transparency we'll need
|
||||
# to apply on the result image
|
||||
if self.is_mask:
|
||||
mask = np.zeros((self.size[1], self.size[0]), dtype=float)
|
||||
for clip in self.playing_clips(t):
|
||||
mask = clip.compose_mask(mask, t)
|
||||
|
||||
return mask
|
||||
|
||||
# Try doing clip merging with pillow
|
||||
bg_t = t - self.bg.start
|
||||
bg_frame = self.bg.get_frame(bg_t).astype("uint8")
|
||||
bg_img = Image.fromarray(bg_frame)
|
||||
|
||||
if self.bg.mask:
|
||||
bgm_t = t - self.bg.mask.start
|
||||
bg_mask = (self.bg.mask.get_frame(bgm_t) * 255).astype("uint8")
|
||||
bg_mask_img = Image.fromarray(bg_mask).convert("L")
|
||||
|
||||
# Resize bg_mask_img to match bg_img, always use top left corner
|
||||
if bg_mask_img.size != bg_img.size:
|
||||
mask_width, mask_height = bg_mask_img.size
|
||||
img_width, img_height = bg_img.size
|
||||
|
||||
if mask_width > img_width or mask_height > img_height:
|
||||
bg_mask_img = bg_mask_img.crop((0, 0, img_width, img_height))
|
||||
else:
|
||||
new_mask = Image.new("L", (img_width, img_height), 0)
|
||||
new_mask.paste(bg_mask_img, (0, 0))
|
||||
bg_mask_img = new_mask
|
||||
|
||||
bg_img = bg_img.convert("RGBA")
|
||||
bg_img.putalpha(bg_mask_img)
|
||||
|
||||
# For each clip apply on top of current img
|
||||
current_img = bg_img
|
||||
for clip in self.playing_clips(t):
|
||||
current_img = clip.compose_on(current_img, t)
|
||||
|
||||
# Turn Pillow image into a numpy array
|
||||
frame = np.array(current_img)
|
||||
|
||||
# If frame have transparency, remove it
|
||||
# our mask will take care of it during rendering
|
||||
if frame.shape[2] == 4:
|
||||
return frame[:, :, :3]
|
||||
|
||||
return frame
|
||||
|
||||
def playing_clips(self, t=0):
|
||||
"""Returns a list of the clips in the composite clips that are
|
||||
actually playing at the given time `t`.
|
||||
"""
|
||||
return [clip for clip in self.clips if clip.is_playing(t)]
|
||||
|
||||
def close(self):
|
||||
"""Closes the instance, releasing all the resources."""
|
||||
if self.created_bg and self.bg:
|
||||
# Only close the background clip if it was locally created.
|
||||
# Otherwise, it remains the job of whoever created it.
|
||||
self.bg.close()
|
||||
self.bg = None
|
||||
if hasattr(self, "audio") and self.audio:
|
||||
self.audio.close()
|
||||
self.audio = None
|
||||
|
||||
|
||||
def clips_array(array, rows_widths=None, cols_heights=None, bg_color=None):
|
||||
"""Given a matrix whose rows are clips, creates a CompositeVideoClip where
|
||||
all clips are placed side by side horizontally for each clip in each row
|
||||
and one row on top of the other for each row. So given next matrix of clips
|
||||
with same size:
|
||||
|
||||
```python
|
||||
clips_array([[clip1, clip2, clip3], [clip4, clip5, clip6]])
|
||||
```
|
||||
|
||||
the result will be a CompositeVideoClip with a layout displayed like:
|
||||
|
||||
```
|
||||
┏━━━━━━━┳━━━━━━━┳━━━━━━━┓
|
||||
┃ ┃ ┃ ┃
|
||||
┃ clip1 ┃ clip2 ┃ clip3 ┃
|
||||
┃ ┃ ┃ ┃
|
||||
┣━━━━━━━╋━━━━━━━╋━━━━━━━┫
|
||||
┃ ┃ ┃ ┃
|
||||
┃ clip4 ┃ clip5 ┃ clip6 ┃
|
||||
┃ ┃ ┃ ┃
|
||||
┗━━━━━━━┻━━━━━━━┻━━━━━━━┛
|
||||
```
|
||||
|
||||
If some clips doesn't fulfill the space required by the rows or columns
|
||||
in which are placed, that space will be filled by the color defined in
|
||||
``bg_color``.
|
||||
|
||||
array
|
||||
Matrix of clips included in the returned composited video clip.
|
||||
|
||||
rows_widths
|
||||
Widths of the different rows in pixels. If ``None``, is set automatically.
|
||||
|
||||
cols_heights
|
||||
Heights of the different columns in pixels. If ``None``, is set automatically.
|
||||
|
||||
bg_color
|
||||
Fill color for the masked and unfilled regions. Set to ``None`` for these
|
||||
regions to be transparent (processing will be slower).
|
||||
"""
|
||||
array = np.array(array)
|
||||
sizes_array = np.array([[clip.size for clip in line] for line in array])
|
||||
|
||||
# find row width and col_widths automatically if not provided
|
||||
if rows_widths is None:
|
||||
rows_widths = sizes_array[:, :, 1].max(axis=1)
|
||||
if cols_heights is None:
|
||||
cols_heights = sizes_array[:, :, 0].max(axis=0)
|
||||
|
||||
# compute start positions of X for rows and Y for columns
|
||||
xs = np.cumsum([0] + list(cols_heights))
|
||||
ys = np.cumsum([0] + list(rows_widths))
|
||||
|
||||
for j, (x, ch) in enumerate(zip(xs[:-1], cols_heights)):
|
||||
for i, (y, rw) in enumerate(zip(ys[:-1], rows_widths)):
|
||||
clip = array[i, j]
|
||||
w, h = clip.size
|
||||
# if clip not fulfill row width or column height
|
||||
if (w < ch) or (h < rw):
|
||||
clip = CompositeVideoClip(
|
||||
[clip.with_position("center")], size=(ch, rw), bg_color=bg_color
|
||||
).with_duration(clip.duration)
|
||||
|
||||
array[i, j] = clip.with_position((x, y))
|
||||
|
||||
return CompositeVideoClip(array.flatten(), size=(xs[-1], ys[-1]), bg_color=bg_color)
|
||||
|
||||
|
||||
def concatenate_videoclips(
|
||||
clips, method="chain", transition=None, bg_color=None, is_mask=False, padding=0
|
||||
):
|
||||
"""Concatenates several video clips.
|
||||
|
||||
Returns a video clip made by clip by concatenating several video clips.
|
||||
(Concatenated means that they will be played one after another).
|
||||
|
||||
There are two methods:
|
||||
|
||||
- method="chain": will produce a clip that simply outputs
|
||||
the frames of the successive clips, without any correction if they are
|
||||
not of the same size of anything. If none of the clips have masks the
|
||||
resulting clip has no mask, else the mask is a concatenation of masks
|
||||
(using completely opaque for clips that don't have masks, obviously).
|
||||
If you have clips of different size and you want to write directly the
|
||||
result of the concatenation to a file, use the method "compose" instead.
|
||||
|
||||
- method="compose", if the clips do not have the same resolution, the final
|
||||
resolution will be such that no clip has to be resized.
|
||||
As a consequence the final clip has the height of the highest clip and the
|
||||
width of the widest clip of the list. All the clips with smaller dimensions
|
||||
will appear centered. The border will be transparent if mask=True, else it
|
||||
will be of the color specified by ``bg_color``.
|
||||
|
||||
The clip with the highest FPS will be the FPS of the result clip.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
clips
|
||||
A list of video clips which must all have their ``duration``
|
||||
attributes set.
|
||||
method
|
||||
"chain" or "compose": see above.
|
||||
transition
|
||||
A clip that will be played between each two clips of the list.
|
||||
|
||||
bg_color
|
||||
Only for method='compose'. Color of the background.
|
||||
Set to None for a transparent clip
|
||||
|
||||
padding
|
||||
Only for method='compose'. Duration during two consecutive clips.
|
||||
Note that for negative padding, a clip will partly play at the same
|
||||
time as the clip it follows (negative padding is cool for clips who fade
|
||||
in on one another). A non-null padding automatically sets the method to
|
||||
`compose`.
|
||||
|
||||
"""
|
||||
if transition is not None:
|
||||
clip_transition_pairs = [[v, transition] for v in clips[:-1]]
|
||||
clips = reduce(lambda x, y: x + y, clip_transition_pairs) + [clips[-1]]
|
||||
transition = None
|
||||
|
||||
timings = np.cumsum([0] + [clip.duration for clip in clips])
|
||||
|
||||
sizes = [clip.size for clip in clips]
|
||||
|
||||
w = max(size[0] for size in sizes)
|
||||
h = max(size[1] for size in sizes)
|
||||
|
||||
timings = np.maximum(0, timings + padding * np.arange(len(timings)))
|
||||
timings[-1] -= padding # Last element is the duration of the whole
|
||||
|
||||
if method == "chain":
|
||||
|
||||
def frame_function(t):
|
||||
i = max([i for i, e in enumerate(timings) if e <= t])
|
||||
return clips[i].get_frame(t - timings[i])
|
||||
|
||||
def get_mask(clip):
|
||||
mask = clip.mask or ColorClip(clip.size, color=1, is_mask=True)
|
||||
if mask.duration is None:
|
||||
mask.duration = clip.duration
|
||||
return mask
|
||||
|
||||
result = VideoClip(is_mask=is_mask, frame_function=frame_function)
|
||||
if any([clip.mask is not None for clip in clips]):
|
||||
masks = [get_mask(clip) for clip in clips]
|
||||
result.mask = concatenate_videoclips(masks, method="chain", is_mask=True)
|
||||
result.clips = clips
|
||||
elif method == "compose":
|
||||
result = CompositeVideoClip(
|
||||
[
|
||||
clip.with_start(t).with_position("center")
|
||||
for (clip, t) in zip(clips, timings)
|
||||
],
|
||||
size=(w, h),
|
||||
bg_color=bg_color,
|
||||
is_mask=is_mask,
|
||||
)
|
||||
else:
|
||||
raise Exception(
|
||||
"MoviePy Error: The 'method' argument of "
|
||||
"concatenate_videoclips must be 'chain' or 'compose'"
|
||||
)
|
||||
|
||||
result.timings = timings
|
||||
|
||||
result.start_times = timings[:-1]
|
||||
result.start, result.duration, result.end = 0, timings[-1], timings[-1]
|
||||
|
||||
audio_t = [
|
||||
(clip.audio, t) for clip, t in zip(clips, timings) if clip.audio is not None
|
||||
]
|
||||
if audio_t:
|
||||
result.audio = CompositeAudioClip([a.with_start(t) for a, t in audio_t])
|
||||
|
||||
fpss = [clip.fps for clip in clips if getattr(clip, "fps", None) is not None]
|
||||
result.fps = max(fpss) if fpss else None
|
||||
return result
|
||||
1
moviepy/video/compositing/__init__.py
Normal file
1
moviepy/video/compositing/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""All for compositing video clips."""
|
||||
83
moviepy/video/fx/AccelDecel.py
Normal file
83
moviepy/video/fx/AccelDecel.py
Normal file
@@ -0,0 +1,83 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
from moviepy.Effect import Effect
|
||||
|
||||
|
||||
@dataclass
|
||||
class AccelDecel(Effect):
|
||||
"""Accelerates and decelerates a clip, useful for GIF making.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
new_duration : float
|
||||
Duration for the new transformed clip. If None, will be that of the
|
||||
current clip.
|
||||
|
||||
abruptness : float
|
||||
Slope shape in the acceleration-deceleration function. It will depend
|
||||
on the value of the parameter:
|
||||
|
||||
* ``-1 < abruptness < 0``: speed up, down, up.
|
||||
* ``abruptness == 0``: no effect.
|
||||
* ``abruptness > 0``: speed down, up, down.
|
||||
|
||||
soonness : float
|
||||
For positive abruptness, determines how soon the transformation occurs.
|
||||
Should be a positive number.
|
||||
|
||||
Raises
|
||||
------
|
||||
|
||||
ValueError
|
||||
When ``sooness`` argument is lower than 0.
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
The following graphs show functions generated by different combinations
|
||||
of arguments, where the value of the slopes represents the speed of the
|
||||
videos generated, being the linear function (in red) a combination that
|
||||
does not produce any transformation.
|
||||
|
||||
.. image:: /_static/medias/accel_decel-fx-params.png
|
||||
:alt: acced_decel FX parameters combinations
|
||||
"""
|
||||
|
||||
new_duration: float = None
|
||||
abruptness: float = 1.0
|
||||
soonness: float = 1.0
|
||||
|
||||
def _f_accel_decel(
|
||||
self, t, old_duration, new_duration, abruptness=1.0, soonness=1.0
|
||||
):
|
||||
a = 1.0 + abruptness
|
||||
|
||||
def _f(t):
|
||||
def f1(t):
|
||||
return (0.5) ** (1 - a) * (t**a)
|
||||
|
||||
def f2(t):
|
||||
return 1 - f1(1 - t)
|
||||
|
||||
return (t < 0.5) * f1(t) + (t >= 0.5) * f2(t)
|
||||
|
||||
return old_duration * _f((t / new_duration) ** soonness)
|
||||
|
||||
def apply(self, clip):
|
||||
"""Apply the effect to the clip."""
|
||||
if self.new_duration is None:
|
||||
self.new_duration = clip.duration
|
||||
|
||||
if self.soonness < 0:
|
||||
raise ValueError("'sooness' should be a positive number")
|
||||
|
||||
return clip.time_transform(
|
||||
lambda t: self._f_accel_decel(
|
||||
t=t,
|
||||
old_duration=clip.duration,
|
||||
new_duration=self.new_duration,
|
||||
abruptness=self.abruptness,
|
||||
soonness=self.soonness,
|
||||
)
|
||||
).with_duration(self.new_duration)
|
||||
38
moviepy/video/fx/BlackAndWhite.py
Normal file
38
moviepy/video/fx/BlackAndWhite.py
Normal file
@@ -0,0 +1,38 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
import numpy as np
|
||||
|
||||
from moviepy.Effect import Effect
|
||||
|
||||
|
||||
@dataclass
|
||||
class BlackAndWhite(Effect):
|
||||
"""Desaturates the picture, makes it black and white.
|
||||
Parameter RGB allows to set weights for the different color
|
||||
channels.
|
||||
If RBG is 'CRT_phosphor' a special set of values is used.
|
||||
preserve_luminosity maintains the sum of RGB to 1.
|
||||
"""
|
||||
|
||||
RGB: str = None
|
||||
preserve_luminosity: bool = True
|
||||
|
||||
def apply(self, clip):
|
||||
"""Apply the effect to the clip."""
|
||||
if self.RGB is None:
|
||||
self.RGB = [1, 1, 1]
|
||||
|
||||
if self.RGB == "CRT_phosphor":
|
||||
self.RGB = [0.2125, 0.7154, 0.0721]
|
||||
|
||||
R, G, B = (
|
||||
1.0
|
||||
* np.array(self.RGB)
|
||||
/ (sum(self.RGB) if self.preserve_luminosity else 1)
|
||||
)
|
||||
|
||||
def filter(im):
|
||||
im = R * im[:, :, 0] + G * im[:, :, 1] + B * im[:, :, 2]
|
||||
return np.dstack(3 * [im]).astype("uint8")
|
||||
|
||||
return clip.image_transform(filter)
|
||||
27
moviepy/video/fx/Blink.py
Normal file
27
moviepy/video/fx/Blink.py
Normal file
@@ -0,0 +1,27 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
from moviepy.Effect import Effect
|
||||
|
||||
|
||||
@dataclass
|
||||
class Blink(Effect):
|
||||
"""
|
||||
Makes the clip blink. At each blink it will be displayed ``duration_on``
|
||||
seconds and disappear ``duration_off`` seconds. Will only work in
|
||||
composite clips.
|
||||
"""
|
||||
|
||||
duration_on: float
|
||||
duration_off: float
|
||||
|
||||
def apply(self, clip):
|
||||
"""Apply the effect to the clip."""
|
||||
if clip.mask is None:
|
||||
clip = clip.with_mask()
|
||||
|
||||
duration = self.duration_on + self.duration_off
|
||||
clip.mask = clip.mask.transform(
|
||||
lambda get_frame, t: get_frame(t) * ((t % duration) < self.duration_on)
|
||||
)
|
||||
|
||||
return clip
|
||||
80
moviepy/video/fx/Crop.py
Normal file
80
moviepy/video/fx/Crop.py
Normal file
@@ -0,0 +1,80 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
from moviepy.Clip import Clip
|
||||
from moviepy.Effect import Effect
|
||||
|
||||
|
||||
@dataclass
|
||||
class Crop(Effect):
|
||||
"""Effect to crop a clip to get a new clip in which just a rectangular
|
||||
subregion of the original clip is conserved. `x1,y1` indicates the top left
|
||||
corner and `x2,y2` is the lower right corner of the cropped region. All
|
||||
coordinates are in pixels. Float numbers are accepted.
|
||||
|
||||
To crop an arbitrary rectangle:
|
||||
|
||||
>>> Crop(x1=50, y1=60, x2=460, y2=275)
|
||||
|
||||
Only remove the part above y=30:
|
||||
|
||||
>>> Crop(y1=30)
|
||||
|
||||
Crop a rectangle that starts 10 pixels left and is 200px wide
|
||||
|
||||
>>> Crop(x1=10, width=200)
|
||||
|
||||
Crop a rectangle centered in x,y=(300,400), width=50, height=150 :
|
||||
|
||||
>>> Crop(x_center=300, y_center=400, width=50, height=150)
|
||||
|
||||
Any combination of the above should work, like for this rectangle
|
||||
centered in x=300, with explicit y-boundaries:
|
||||
|
||||
>>> Crop(x_center=300, width=400, y1=100, y2=600)
|
||||
|
||||
"""
|
||||
|
||||
x1: int = None
|
||||
y1: int = None
|
||||
x2: int = None
|
||||
y2: int = None
|
||||
width: int = None
|
||||
height: int = None
|
||||
x_center: int = None
|
||||
y_center: int = None
|
||||
|
||||
def apply(self, clip: Clip) -> Clip:
|
||||
"""Apply the effect to the clip."""
|
||||
if self.width and self.x1 is not None:
|
||||
self.x2 = self.x1 + self.width
|
||||
elif self.width and self.x2 is not None:
|
||||
self.x1 = self.x2 - self.width
|
||||
|
||||
if self.height and self.y1 is not None:
|
||||
self.y2 = self.y1 + self.height
|
||||
elif self.height and self.y2 is not None:
|
||||
self.y1 = self.y2 - self.height
|
||||
|
||||
if self.x_center:
|
||||
self.x1, self.x2 = (
|
||||
self.x_center - self.width / 2,
|
||||
self.x_center + self.width / 2,
|
||||
)
|
||||
|
||||
if self.y_center:
|
||||
self.y1, self.y2 = (
|
||||
self.y_center - self.height / 2,
|
||||
self.y_center + self.height / 2,
|
||||
)
|
||||
|
||||
self.x1 = self.x1 or 0
|
||||
self.y1 = self.y1 or 0
|
||||
self.x2 = self.x2 or clip.size[0]
|
||||
self.y2 = self.y2 or clip.size[1]
|
||||
|
||||
return clip.image_transform(
|
||||
lambda frame: frame[
|
||||
int(self.y1) : int(self.y2), int(self.x1) : int(self.x2)
|
||||
],
|
||||
apply_to=["mask"],
|
||||
)
|
||||
27
moviepy/video/fx/CrossFadeIn.py
Normal file
27
moviepy/video/fx/CrossFadeIn.py
Normal file
@@ -0,0 +1,27 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
from moviepy.Clip import Clip
|
||||
from moviepy.Effect import Effect
|
||||
from moviepy.video.fx.FadeIn import FadeIn
|
||||
|
||||
|
||||
@dataclass
|
||||
class CrossFadeIn(Effect):
|
||||
"""Makes the clip appear progressively, over ``duration`` seconds.
|
||||
Only works when the clip is included in a CompositeVideoClip.
|
||||
"""
|
||||
|
||||
duration: float
|
||||
|
||||
def apply(self, clip: Clip) -> Clip:
|
||||
"""Apply the effect to the clip."""
|
||||
if clip.duration is None:
|
||||
raise ValueError("Attribute 'duration' not set")
|
||||
|
||||
if clip.mask is None:
|
||||
clip = clip.with_mask()
|
||||
|
||||
clip.mask.duration = clip.duration
|
||||
clip.mask = clip.mask.with_effects([FadeIn(self.duration)])
|
||||
|
||||
return clip
|
||||
27
moviepy/video/fx/CrossFadeOut.py
Normal file
27
moviepy/video/fx/CrossFadeOut.py
Normal file
@@ -0,0 +1,27 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
from moviepy.Clip import Clip
|
||||
from moviepy.Effect import Effect
|
||||
from moviepy.video.fx.FadeOut import FadeOut
|
||||
|
||||
|
||||
@dataclass
|
||||
class CrossFadeOut(Effect):
|
||||
"""Makes the clip disappear progressively, over ``duration`` seconds.
|
||||
Only works when the clip is included in a CompositeVideoClip.
|
||||
"""
|
||||
|
||||
duration: float
|
||||
|
||||
def apply(self, clip: Clip) -> Clip:
|
||||
"""Apply the effect to the clip."""
|
||||
if clip.duration is None:
|
||||
raise ValueError("Attribute 'duration' not set")
|
||||
|
||||
if clip.mask is None:
|
||||
clip = clip.with_mask()
|
||||
|
||||
clip.mask.duration = clip.duration
|
||||
clip.mask = clip.mask.with_effects([FadeOut(self.duration)])
|
||||
|
||||
return clip
|
||||
34
moviepy/video/fx/EvenSize.py
Normal file
34
moviepy/video/fx/EvenSize.py
Normal file
@@ -0,0 +1,34 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
from moviepy.Clip import Clip
|
||||
from moviepy.Effect import Effect
|
||||
|
||||
|
||||
@dataclass
|
||||
class EvenSize(Effect):
|
||||
"""Crops the clip to make dimensions even."""
|
||||
|
||||
def apply(self, clip: Clip) -> Clip:
|
||||
"""Apply the effect to the clip."""
|
||||
w, h = clip.size
|
||||
w_even = w % 2 == 0
|
||||
h_even = h % 2 == 0
|
||||
if w_even and h_even:
|
||||
return clip
|
||||
|
||||
if not w_even and not h_even:
|
||||
|
||||
def image_filter(a):
|
||||
return a[:-1, :-1, :]
|
||||
|
||||
elif h_even:
|
||||
|
||||
def image_filter(a):
|
||||
return a[:, :-1, :]
|
||||
|
||||
else:
|
||||
|
||||
def image_filter(a):
|
||||
return a[:-1, :, :]
|
||||
|
||||
return clip.image_transform(image_filter, apply_to=["mask"])
|
||||
36
moviepy/video/fx/FadeIn.py
Normal file
36
moviepy/video/fx/FadeIn.py
Normal file
@@ -0,0 +1,36 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
import numpy as np
|
||||
|
||||
from moviepy.Clip import Clip
|
||||
from moviepy.Effect import Effect
|
||||
|
||||
|
||||
@dataclass
|
||||
class FadeIn(Effect):
|
||||
"""Makes the clip progressively appear from some color (black by default),
|
||||
over ``duration`` seconds at the beginning of the clip. Can be used for
|
||||
masks too, where the initial color must be a number between 0 and 1.
|
||||
|
||||
For cross-fading (progressive appearance or disappearance of a clip
|
||||
over another clip, see ``CrossFadeIn``
|
||||
"""
|
||||
|
||||
duration: float
|
||||
initial_color: list = None
|
||||
|
||||
def apply(self, clip: Clip) -> Clip:
|
||||
"""Apply the effect to the clip."""
|
||||
if self.initial_color is None:
|
||||
self.initial_color = 0 if clip.is_mask else [0, 0, 0]
|
||||
|
||||
self.initial_color = np.array(self.initial_color)
|
||||
|
||||
def filter(get_frame, t):
|
||||
if t >= self.duration:
|
||||
return get_frame(t)
|
||||
else:
|
||||
fading = 1.0 * t / self.duration
|
||||
return fading * get_frame(t) + (1 - fading) * self.initial_color
|
||||
|
||||
return clip.transform(filter)
|
||||
39
moviepy/video/fx/FadeOut.py
Normal file
39
moviepy/video/fx/FadeOut.py
Normal file
@@ -0,0 +1,39 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
import numpy as np
|
||||
|
||||
from moviepy.Clip import Clip
|
||||
from moviepy.Effect import Effect
|
||||
|
||||
|
||||
@dataclass
|
||||
class FadeOut(Effect):
|
||||
"""Makes the clip progressively fade to some color (black by default),
|
||||
over ``duration`` seconds at the end of the clip. Can be used for masks too,
|
||||
where the final color must be a number between 0 and 1.
|
||||
|
||||
For cross-fading (progressive appearance or disappearance of a clip over another
|
||||
clip), see ``CrossFadeOut``
|
||||
"""
|
||||
|
||||
duration: float
|
||||
final_color: list = None
|
||||
|
||||
def apply(self, clip: Clip) -> Clip:
|
||||
"""Apply the effect to the clip."""
|
||||
if clip.duration is None:
|
||||
raise ValueError("Attribute 'duration' not set")
|
||||
|
||||
if self.final_color is None:
|
||||
self.final_color = 0 if clip.is_mask else [0, 0, 0]
|
||||
|
||||
self.final_color = np.array(self.final_color)
|
||||
|
||||
def filter(get_frame, t):
|
||||
if (clip.duration - t) >= self.duration:
|
||||
return get_frame(t)
|
||||
else:
|
||||
fading = 1.0 * (clip.duration - t) / self.duration
|
||||
return fading * get_frame(t) + (1 - fading) * self.final_color
|
||||
|
||||
return clip.transform(filter)
|
||||
43
moviepy/video/fx/Freeze.py
Normal file
43
moviepy/video/fx/Freeze.py
Normal file
@@ -0,0 +1,43 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
from moviepy.Clip import Clip
|
||||
from moviepy.Effect import Effect
|
||||
from moviepy.video.compositing.CompositeVideoClip import concatenate_videoclips
|
||||
|
||||
|
||||
@dataclass
|
||||
class Freeze(Effect):
|
||||
"""Momentarily freeze the clip at time t.
|
||||
|
||||
Set `t='end'` to freeze the clip at the end (actually it will freeze on the
|
||||
frame at time clip.duration - padding_end seconds - 1 / clip_fps).
|
||||
With ``duration`` you can specify the duration of the freeze.
|
||||
With ``total_duration`` you can specify the total duration of
|
||||
the clip and the freeze (i.e. the duration of the freeze is
|
||||
automatically computed). One of them must be provided.
|
||||
"""
|
||||
|
||||
t: float = 0
|
||||
freeze_duration: float = None
|
||||
total_duration: float = None
|
||||
padding_end: float = 0
|
||||
|
||||
def apply(self, clip: Clip) -> Clip:
|
||||
"""Apply the effect to the clip."""
|
||||
if clip.duration is None:
|
||||
raise ValueError("Attribute 'duration' not set")
|
||||
|
||||
if self.t == "end":
|
||||
self.t = clip.duration - self.padding_end - 1 / clip.fps
|
||||
|
||||
if self.freeze_duration is None:
|
||||
if self.total_duration is None:
|
||||
raise ValueError(
|
||||
"You must provide either 'freeze_duration' or 'total_duration'"
|
||||
)
|
||||
self.freeze_duration = self.total_duration - clip.duration
|
||||
|
||||
before = [clip[: self.t]] if (self.t != 0) else []
|
||||
freeze = [clip.to_ImageClip(self.t).with_duration(self.freeze_duration)]
|
||||
after = [clip[self.t :]] if (self.t != clip.duration) else []
|
||||
return concatenate_videoclips(before + freeze + after)
|
||||
68
moviepy/video/fx/FreezeRegion.py
Normal file
68
moviepy/video/fx/FreezeRegion.py
Normal file
@@ -0,0 +1,68 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
from moviepy.Clip import Clip
|
||||
from moviepy.Effect import Effect
|
||||
from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip
|
||||
from moviepy.video.fx.Crop import Crop
|
||||
|
||||
|
||||
@dataclass
|
||||
class FreezeRegion(Effect):
|
||||
"""Freezes one region of the clip while the rest remains animated.
|
||||
|
||||
You can choose one of three methods by providing either `region`,
|
||||
`outside_region`, or `mask`.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
t
|
||||
Time at which to freeze the freezed region.
|
||||
|
||||
region
|
||||
A tuple (x1, y1, x2, y2) defining the region of the screen (in pixels)
|
||||
which will be freezed. You can provide outside_region or mask instead.
|
||||
|
||||
outside_region
|
||||
A tuple (x1, y1, x2, y2) defining the region of the screen (in pixels)
|
||||
which will be the only non-freezed region.
|
||||
|
||||
mask
|
||||
If not None, will overlay a freezed version of the clip on the current clip,
|
||||
with the provided mask. In other words, the "visible" pixels in the mask
|
||||
indicate the freezed region in the final picture.
|
||||
|
||||
"""
|
||||
|
||||
t: float = 0
|
||||
region: tuple = None
|
||||
outside_region: tuple = None
|
||||
mask: Clip = None
|
||||
|
||||
def apply(self, clip: Clip) -> Clip:
|
||||
"""Apply the effect to the clip."""
|
||||
if self.region is not None:
|
||||
x1, y1, _x2, _y2 = self.region
|
||||
freeze = (
|
||||
clip.with_effects([Crop(*self.region)])
|
||||
.to_ImageClip(t=self.t)
|
||||
.with_duration(clip.duration)
|
||||
.with_position((x1, y1))
|
||||
)
|
||||
return CompositeVideoClip([clip, freeze])
|
||||
|
||||
elif self.outside_region is not None:
|
||||
x1, y1, x2, y2 = self.outside_region
|
||||
animated_region = clip.with_effects(
|
||||
[Crop(*self.outside_region)]
|
||||
).with_position((x1, y1))
|
||||
freeze = clip.to_ImageClip(t=self.t).with_duration(clip.duration)
|
||||
return CompositeVideoClip([freeze, animated_region])
|
||||
|
||||
elif self.mask is not None:
|
||||
freeze = (
|
||||
clip.to_ImageClip(t=self.t)
|
||||
.with_duration(clip.duration)
|
||||
.with_mask(self.mask)
|
||||
)
|
||||
return CompositeVideoClip([clip, freeze])
|
||||
20
moviepy/video/fx/GammaCorrection.py
Normal file
20
moviepy/video/fx/GammaCorrection.py
Normal file
@@ -0,0 +1,20 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
from moviepy.Clip import Clip
|
||||
from moviepy.Effect import Effect
|
||||
|
||||
|
||||
@dataclass
|
||||
class GammaCorrection(Effect):
|
||||
"""Gamma-correction of a video clip."""
|
||||
|
||||
gamma: float
|
||||
|
||||
def apply(self, clip: Clip) -> Clip:
|
||||
"""Apply the effect to the clip."""
|
||||
|
||||
def filter(im):
|
||||
corrected = 255 * (1.0 * im / 255) ** self.gamma
|
||||
return corrected.astype("uint8")
|
||||
|
||||
return clip.image_transform(filter)
|
||||
45
moviepy/video/fx/HeadBlur.py
Normal file
45
moviepy/video/fx/HeadBlur.py
Normal file
@@ -0,0 +1,45 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
import numpy as np
|
||||
from PIL import Image, ImageDraw, ImageFilter
|
||||
|
||||
from moviepy.Clip import Clip
|
||||
from moviepy.Effect import Effect
|
||||
|
||||
|
||||
@dataclass
|
||||
class HeadBlur(Effect):
|
||||
"""Returns a filter that will blur a moving part (a head ?) of the frames.
|
||||
|
||||
The position of the blur at time t is defined by (fx(t), fy(t)), the radius
|
||||
of the blurring by ``radius`` and the intensity of the blurring by ``intensity``.
|
||||
"""
|
||||
|
||||
fx: callable
|
||||
fy: callable
|
||||
radius: float
|
||||
intensity: float = None
|
||||
|
||||
def apply(self, clip: Clip) -> Clip:
|
||||
"""Apply the effect to the clip."""
|
||||
if self.intensity is None:
|
||||
self.intensity = int(2 * self.radius / 3)
|
||||
|
||||
def filter(gf, t):
|
||||
im = gf(t).copy()
|
||||
h, w, d = im.shape
|
||||
x, y = int(self.fx(t)), int(self.fy(t))
|
||||
x1, x2 = max(0, x - self.radius), min(x + self.radius, w)
|
||||
y1, y2 = max(0, y - self.radius), min(y + self.radius, h)
|
||||
|
||||
image = Image.fromarray(im)
|
||||
mask = Image.new("RGB", image.size)
|
||||
draw = ImageDraw.Draw(mask)
|
||||
draw.ellipse([x1, y1, x2, y2], fill=(255, 255, 255))
|
||||
|
||||
blurred = image.filter(ImageFilter.GaussianBlur(radius=self.intensity))
|
||||
|
||||
res = np.where(np.array(mask) > 0, np.array(blurred), np.array(image))
|
||||
return res
|
||||
|
||||
return clip.transform(filter)
|
||||
18
moviepy/video/fx/InvertColors.py
Normal file
18
moviepy/video/fx/InvertColors.py
Normal file
@@ -0,0 +1,18 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
from moviepy.Clip import Clip
|
||||
from moviepy.Effect import Effect
|
||||
|
||||
|
||||
@dataclass
|
||||
class InvertColors(Effect):
|
||||
"""Returns the color-inversed clip.
|
||||
|
||||
The values of all pixels are replaced with (255-v) or (1-v) for masks
|
||||
Black becomes white, green becomes purple, etc.
|
||||
"""
|
||||
|
||||
def apply(self, clip: Clip) -> Clip:
|
||||
"""Apply the effect to the clip."""
|
||||
maxi = 1.0 if clip.is_mask else 255
|
||||
return clip.image_transform(lambda f: maxi - f)
|
||||
43
moviepy/video/fx/Loop.py
Normal file
43
moviepy/video/fx/Loop.py
Normal file
@@ -0,0 +1,43 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
from moviepy.Clip import Clip
|
||||
from moviepy.Effect import Effect
|
||||
|
||||
|
||||
@dataclass
|
||||
class Loop(Effect):
|
||||
"""
|
||||
Returns a clip that plays the current clip in an infinite loop.
|
||||
Ideal for clips coming from GIFs.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
n
|
||||
Number of times the clip should be played. If `None` the
|
||||
the clip will loop indefinitely (i.e. with no set duration).
|
||||
|
||||
duration
|
||||
Total duration of the clip. Can be specified instead of n.
|
||||
"""
|
||||
|
||||
n: int = None
|
||||
duration: float = None
|
||||
|
||||
def apply(self, clip: Clip) -> Clip:
|
||||
"""Apply the effect to the clip."""
|
||||
if clip.duration is None:
|
||||
raise ValueError("Attribute 'duration' not set")
|
||||
|
||||
previous_duration = clip.duration
|
||||
clip = clip.time_transform(
|
||||
lambda t: t % previous_duration, apply_to=["mask", "audio"]
|
||||
)
|
||||
|
||||
if self.n:
|
||||
self.duration = self.n * previous_duration
|
||||
|
||||
if self.duration:
|
||||
clip = clip.with_duration(self.duration)
|
||||
|
||||
return clip
|
||||
27
moviepy/video/fx/LumContrast.py
Normal file
27
moviepy/video/fx/LumContrast.py
Normal file
@@ -0,0 +1,27 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
from moviepy.Clip import Clip
|
||||
from moviepy.Effect import Effect
|
||||
|
||||
|
||||
@dataclass
|
||||
class LumContrast(Effect):
|
||||
"""Luminosity-contrast correction of a clip."""
|
||||
|
||||
lum: float = 0
|
||||
contrast: float = 0
|
||||
contrast_threshold: float = 127
|
||||
|
||||
def apply(self, clip: Clip) -> Clip:
|
||||
"""Apply the effect to the clip."""
|
||||
|
||||
def image_filter(im):
|
||||
im = 1.0 * im # float conversion
|
||||
corrected = (
|
||||
im + self.lum + self.contrast * (im - float(self.contrast_threshold))
|
||||
)
|
||||
corrected[corrected < 0] = 0
|
||||
corrected[corrected > 255] = 255
|
||||
return corrected.astype("uint8")
|
||||
|
||||
return clip.image_transform(image_filter)
|
||||
30
moviepy/video/fx/MakeLoopable.py
Normal file
30
moviepy/video/fx/MakeLoopable.py
Normal file
@@ -0,0 +1,30 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
from moviepy.Clip import Clip
|
||||
from moviepy.Effect import Effect
|
||||
from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip
|
||||
from moviepy.video.fx.CrossFadeIn import CrossFadeIn
|
||||
|
||||
|
||||
@dataclass
|
||||
class MakeLoopable(Effect):
|
||||
"""Makes the clip fade in progressively at its own end, this way it can be
|
||||
looped indefinitely.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
overlap_duration : float
|
||||
Duration of the fade-in (in seconds).
|
||||
"""
|
||||
|
||||
overlap_duration: float
|
||||
|
||||
def apply(self, clip: Clip) -> Clip:
|
||||
"""Apply the effect to the clip."""
|
||||
clip2 = clip.with_effects([CrossFadeIn(self.overlap_duration)]).with_start(
|
||||
clip.duration - self.overlap_duration
|
||||
)
|
||||
return CompositeVideoClip([clip, clip2]).subclipped(
|
||||
self.overlap_duration, clip.duration
|
||||
)
|
||||
90
moviepy/video/fx/Margin.py
Normal file
90
moviepy/video/fx/Margin.py
Normal file
@@ -0,0 +1,90 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
import numpy as np
|
||||
|
||||
from moviepy.Clip import Clip
|
||||
from moviepy.Effect import Effect
|
||||
from moviepy.video.VideoClip import ImageClip
|
||||
|
||||
|
||||
@dataclass
|
||||
class Margin(Effect):
|
||||
"""Draws an external margin all around the frame.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
margin_size : int, optional
|
||||
If not ``None``, then the new clip has a margin size of
|
||||
size ``margin_size`` in pixels on the left, right, top, and bottom.
|
||||
|
||||
left : int, optional
|
||||
If ``margin_size=None``, margin size for the new clip in left direction.
|
||||
|
||||
right : int, optional
|
||||
If ``margin_size=None``, margin size for the new clip in right direction.
|
||||
|
||||
top : int, optional
|
||||
If ``margin_size=None``, margin size for the new clip in top direction.
|
||||
|
||||
bottom : int, optional
|
||||
If ``margin_size=None``, margin size for the new clip in bottom direction.
|
||||
|
||||
color : tuple, optional
|
||||
Color of the margin.
|
||||
|
||||
opacity : float, optional
|
||||
Opacity of the margin. Setting this value to 0 yields transparent margins.
|
||||
"""
|
||||
|
||||
margin_size: int = None
|
||||
left: int = 0
|
||||
right: int = 0
|
||||
top: int = 0
|
||||
bottom: int = 0
|
||||
color: tuple = (0, 0, 0)
|
||||
opacity: float = 1.0
|
||||
|
||||
def add_margin(self, clip: Clip):
|
||||
"""Add margins to the clip."""
|
||||
if (self.opacity != 1.0) and (clip.mask is None) and not (clip.is_mask):
|
||||
clip = clip.with_mask()
|
||||
|
||||
if self.margin_size is not None:
|
||||
self.left = self.right = self.top = self.bottom = self.margin_size
|
||||
|
||||
def make_bg(w, h):
|
||||
new_w, new_h = w + self.left + self.right, h + self.top + self.bottom
|
||||
if clip.is_mask:
|
||||
shape = (new_h, new_w)
|
||||
bg = np.tile(self.opacity, (new_h, new_w)).astype(float).reshape(shape)
|
||||
else:
|
||||
shape = (new_h, new_w, 3)
|
||||
bg = np.tile(self.color, (new_h, new_w)).reshape(shape)
|
||||
return bg
|
||||
|
||||
if isinstance(clip, ImageClip):
|
||||
im = make_bg(clip.w, clip.h)
|
||||
im[self.top : self.top + clip.h, self.left : self.left + clip.w] = clip.img
|
||||
return clip.image_transform(lambda pic: im)
|
||||
|
||||
else:
|
||||
|
||||
def filter(get_frame, t):
|
||||
pic = get_frame(t)
|
||||
h, w = pic.shape[:2]
|
||||
im = make_bg(w, h)
|
||||
im[self.top : self.top + h, self.left : self.left + w] = pic
|
||||
return im
|
||||
|
||||
return clip.transform(filter)
|
||||
|
||||
def apply(self, clip: Clip) -> Clip:
|
||||
"""Apply the effect to the clip."""
|
||||
# We apply once on clip and once on mask if we have one
|
||||
clip = self.add_margin(clip=clip)
|
||||
|
||||
if clip.mask:
|
||||
clip.mask = self.add_margin(clip=clip.mask)
|
||||
|
||||
return clip
|
||||
45
moviepy/video/fx/MaskColor.py
Normal file
45
moviepy/video/fx/MaskColor.py
Normal file
@@ -0,0 +1,45 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
import numpy as np
|
||||
|
||||
from moviepy.Clip import Clip
|
||||
from moviepy.Effect import Effect
|
||||
|
||||
|
||||
@dataclass
|
||||
class MaskColor(Effect):
|
||||
"""Returns a new clip with a mask for transparency where the original
|
||||
clip is of the given color.
|
||||
|
||||
You can also have a "progressive" mask by specifying a non-null distance
|
||||
threshold ``threshold``. In this case, if the distance between a pixel and
|
||||
the given color is d, the transparency will be
|
||||
|
||||
d**stiffness / (threshold**stiffness + d**stiffness)
|
||||
|
||||
which is 1 when d>>threshold and 0 for d<<threshold, the stiffness of the
|
||||
effect being parametrized by ``stiffness``
|
||||
"""
|
||||
|
||||
color: tuple = (0, 0, 0)
|
||||
threshold: float = 0
|
||||
stiffness: float = 1
|
||||
|
||||
def apply(self, clip: Clip) -> Clip:
|
||||
"""Apply the effect to the clip."""
|
||||
color = np.array(self.color)
|
||||
|
||||
def hill(x):
|
||||
if self.threshold:
|
||||
return x**self.stiffness / (
|
||||
self.threshold**self.stiffness + x**self.stiffness
|
||||
)
|
||||
else:
|
||||
return 1.0 * (x != 0)
|
||||
|
||||
def flim(im):
|
||||
return hill(np.sqrt(((im - color) ** 2).sum(axis=2)))
|
||||
|
||||
mask = clip.image_transform(flim)
|
||||
mask.is_mask = True
|
||||
return clip.with_mask(mask)
|
||||
52
moviepy/video/fx/MasksAnd.py
Normal file
52
moviepy/video/fx/MasksAnd.py
Normal file
@@ -0,0 +1,52 @@
|
||||
from dataclasses import dataclass
|
||||
from typing import Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from moviepy.Clip import Clip
|
||||
from moviepy.Effect import Effect
|
||||
from moviepy.video.VideoClip import ImageClip
|
||||
|
||||
|
||||
@dataclass
|
||||
class MasksAnd(Effect):
|
||||
"""Returns the logical 'and' (minimum pixel color values) between two masks.
|
||||
|
||||
The result has the duration of the clip to which has been applied, if it has any.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
other_clip ImageClip or np.ndarray
|
||||
Clip used to mask the original clip.
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
.. code:: python
|
||||
|
||||
clip = ColorClip(color=(255, 0, 0), size=(1, 1)) # red
|
||||
mask = ColorClip(color=(0, 255, 0), size=(1, 1)) # green
|
||||
masked_clip = clip.with_effects([vfx.MasksAnd(mask)]) # black
|
||||
masked_clip.get_frame(0)
|
||||
[[[0 0 0]]]
|
||||
"""
|
||||
|
||||
other_clip: Union[Clip, np.ndarray]
|
||||
|
||||
def apply(self, clip: Clip) -> Clip:
|
||||
"""Apply the effect to the clip."""
|
||||
# to ensure that 'and' of two ImageClips will be an ImageClip
|
||||
if isinstance(self.other_clip, ImageClip):
|
||||
self.other_clip = self.other_clip.img
|
||||
|
||||
if isinstance(self.other_clip, np.ndarray):
|
||||
return clip.image_transform(
|
||||
lambda frame: np.minimum(frame, self.other_clip)
|
||||
)
|
||||
else:
|
||||
return clip.transform(
|
||||
lambda get_frame, t: np.minimum(
|
||||
get_frame(t), self.other_clip.get_frame(t)
|
||||
)
|
||||
)
|
||||
52
moviepy/video/fx/MasksOr.py
Normal file
52
moviepy/video/fx/MasksOr.py
Normal file
@@ -0,0 +1,52 @@
|
||||
from dataclasses import dataclass
|
||||
from typing import Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from moviepy.Clip import Clip
|
||||
from moviepy.Effect import Effect
|
||||
from moviepy.video.VideoClip import ImageClip
|
||||
|
||||
|
||||
@dataclass
|
||||
class MasksOr(Effect):
|
||||
"""Returns the logical 'or' (maximum pixel color values) between two masks.
|
||||
|
||||
The result has the duration of the clip to which has been applied, if it has any.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
other_clip ImageClip or np.ndarray
|
||||
Clip used to mask the original clip.
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
.. code:: python
|
||||
|
||||
clip = ColorClip(color=(255, 0, 0), size=(1, 1)) # red
|
||||
mask = ColorClip(color=(0, 255, 0), size=(1, 1)) # green
|
||||
masked_clip = clip.with_effects([vfx.MasksOr(mask)]) # yellow
|
||||
masked_clip.get_frame(0)
|
||||
[[[255 255 0]]]
|
||||
"""
|
||||
|
||||
other_clip: Union[Clip, np.ndarray]
|
||||
|
||||
def apply(self, clip: Clip) -> Clip:
|
||||
"""Apply the effect to the clip."""
|
||||
# to ensure that 'or' of two ImageClips will be an ImageClip
|
||||
if isinstance(self.other_clip, ImageClip):
|
||||
self.other_clip = self.other_clip.img
|
||||
|
||||
if isinstance(self.other_clip, np.ndarray):
|
||||
return clip.image_transform(
|
||||
lambda frame: np.maximum(frame, self.other_clip)
|
||||
)
|
||||
else:
|
||||
return clip.transform(
|
||||
lambda get_frame, t: np.maximum(
|
||||
get_frame(t), self.other_clip.get_frame(t)
|
||||
)
|
||||
)
|
||||
16
moviepy/video/fx/MirrorX.py
Normal file
16
moviepy/video/fx/MirrorX.py
Normal file
@@ -0,0 +1,16 @@
|
||||
from dataclasses import dataclass
|
||||
from typing import List, Union
|
||||
|
||||
from moviepy.Clip import Clip
|
||||
from moviepy.Effect import Effect
|
||||
|
||||
|
||||
@dataclass
|
||||
class MirrorX(Effect):
|
||||
"""Flips the clip horizontally (and its mask too, by default)."""
|
||||
|
||||
apply_to: Union[List, str] = "mask"
|
||||
|
||||
def apply(self, clip: Clip) -> Clip:
|
||||
"""Apply the effect to the clip."""
|
||||
return clip.image_transform(lambda img: img[:, ::-1], apply_to=self.apply_to)
|
||||
16
moviepy/video/fx/MirrorY.py
Normal file
16
moviepy/video/fx/MirrorY.py
Normal file
@@ -0,0 +1,16 @@
|
||||
from dataclasses import dataclass
|
||||
from typing import List, Union
|
||||
|
||||
from moviepy.Clip import Clip
|
||||
from moviepy.Effect import Effect
|
||||
|
||||
|
||||
@dataclass
|
||||
class MirrorY(Effect):
|
||||
"""Flips the clip vertically (and its mask too, by default)."""
|
||||
|
||||
apply_to: Union[List, str] = "mask"
|
||||
|
||||
def apply(self, clip: Clip) -> Clip:
|
||||
"""Apply the effect to the clip."""
|
||||
return clip.image_transform(lambda img: img[::-1], apply_to=self.apply_to)
|
||||
23
moviepy/video/fx/MultiplyColor.py
Normal file
23
moviepy/video/fx/MultiplyColor.py
Normal file
@@ -0,0 +1,23 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
import numpy as np
|
||||
|
||||
from moviepy.Clip import Clip
|
||||
from moviepy.Effect import Effect
|
||||
|
||||
|
||||
@dataclass
|
||||
class MultiplyColor(Effect):
|
||||
"""
|
||||
Multiplies the clip's colors by the given factor, can be used
|
||||
to decrease or increase the clip's brightness (is that the
|
||||
right word ?)
|
||||
"""
|
||||
|
||||
factor: float
|
||||
|
||||
def apply(self, clip: Clip) -> Clip:
|
||||
"""Apply the effect to the clip."""
|
||||
return clip.image_transform(
|
||||
lambda frame: np.minimum(255, (self.factor * frame)).astype("uint8")
|
||||
)
|
||||
31
moviepy/video/fx/MultiplySpeed.py
Normal file
31
moviepy/video/fx/MultiplySpeed.py
Normal file
@@ -0,0 +1,31 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
from moviepy.Clip import Clip
|
||||
from moviepy.Effect import Effect
|
||||
|
||||
|
||||
@dataclass
|
||||
class MultiplySpeed(Effect):
|
||||
"""Returns a clip playing the current clip but at a speed multiplied by ``factor``.
|
||||
|
||||
Instead of factor one can indicate the desired ``final_duration`` of the clip, and
|
||||
the factor will be automatically computed. The same effect is applied to the clip's
|
||||
audio and mask if any.
|
||||
"""
|
||||
|
||||
factor: float = None
|
||||
final_duration: float = None
|
||||
|
||||
def apply(self, clip: Clip) -> Clip:
|
||||
"""Apply the effect to the clip."""
|
||||
if self.final_duration:
|
||||
self.factor = 1.0 * clip.duration / self.final_duration
|
||||
|
||||
new_clip = clip.time_transform(
|
||||
lambda t: self.factor * t, apply_to=["mask", "audio"]
|
||||
)
|
||||
|
||||
if clip.duration is not None:
|
||||
new_clip = new_clip.with_duration(1.0 * clip.duration / self.factor)
|
||||
|
||||
return new_clip
|
||||
63
moviepy/video/fx/Painting.py
Normal file
63
moviepy/video/fx/Painting.py
Normal file
@@ -0,0 +1,63 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
import numpy as np
|
||||
from PIL import Image, ImageFilter
|
||||
|
||||
from moviepy.Clip import Clip
|
||||
from moviepy.Effect import Effect
|
||||
|
||||
|
||||
@dataclass
|
||||
class Painting(Effect):
|
||||
"""Transforms any photo into some kind of painting.
|
||||
|
||||
Transforms any photo into some kind of painting. Saturation
|
||||
tells at which point the colors of the result should be
|
||||
flashy. ``black`` gives the amount of black lines wanted.
|
||||
|
||||
np_image : a numpy image
|
||||
"""
|
||||
|
||||
saturation: float = 1.4
|
||||
black: float = 0.006
|
||||
|
||||
def to_painting(self, np_image, saturation=1.4, black=0.006):
|
||||
"""Transforms any photo into some kind of painting.
|
||||
|
||||
Transforms any photo into some kind of painting. Saturation
|
||||
tells at which point the colors of the result should be
|
||||
flashy. ``black`` gives the amount of black lines wanted.
|
||||
|
||||
np_image : a numpy image
|
||||
"""
|
||||
image = Image.fromarray(np_image)
|
||||
image = image.filter(ImageFilter.EDGE_ENHANCE_MORE)
|
||||
|
||||
# Convert the image to grayscale
|
||||
grayscale_image = image.convert("L")
|
||||
|
||||
# Find the image edges
|
||||
edges_image = grayscale_image.filter(ImageFilter.FIND_EDGES)
|
||||
|
||||
# Convert the edges image to a numpy array
|
||||
edges = np.array(edges_image)
|
||||
|
||||
# Create the darkening effect
|
||||
darkening = black * (255 * np.dstack(3 * [edges]))
|
||||
|
||||
# Apply the painting effect
|
||||
painting = saturation * np.array(image) - darkening
|
||||
|
||||
# Clip the pixel values to the valid range of 0-255
|
||||
painting = np.maximum(0, np.minimum(255, painting))
|
||||
|
||||
# Convert the pixel values to unsigned 8-bit integers
|
||||
painting = painting.astype("uint8")
|
||||
|
||||
return painting
|
||||
|
||||
def apply(self, clip: Clip) -> Clip:
|
||||
"""Apply the effect to the clip."""
|
||||
return clip.image_transform(
|
||||
lambda im: self.to_painting(im, self.saturation, self.black)
|
||||
)
|
||||
158
moviepy/video/fx/Resize.py
Normal file
158
moviepy/video/fx/Resize.py
Normal file
@@ -0,0 +1,158 @@
|
||||
import numbers
|
||||
from dataclasses import dataclass
|
||||
from typing import Union
|
||||
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
|
||||
from moviepy.Effect import Effect
|
||||
|
||||
|
||||
@dataclass
|
||||
class Resize(Effect):
|
||||
"""Effect returning a video clip that is a resized version of the clip.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
new_size : tuple or float or function, optional
|
||||
Can be either
|
||||
- ``(width, height)`` in pixels or a float representing
|
||||
- A scaling factor, like ``0.5``.
|
||||
- A function of time returning one of these.
|
||||
|
||||
height : int, optional
|
||||
Height of the new clip in pixels. The width is then computed so
|
||||
that the width/height ratio is conserved.
|
||||
|
||||
width : int, optional
|
||||
Width of the new clip in pixels. The height is then computed so
|
||||
that the width/height ratio is conserved.
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
.. code:: python
|
||||
|
||||
clip.with_effects([vfx.Resize((460,720))]) # New resolution: (460,720)
|
||||
clip.with_effects([vfx.Resize(0.6)]) # width and height multiplied by 0.6
|
||||
clip.with_effects([vfx.Resize(width=800)]) # height computed automatically.
|
||||
clip.with_effects([vfx.Resize(lambda t : 1+0.02*t)]) # slow clip swelling
|
||||
"""
|
||||
|
||||
new_size: Union[tuple, float, callable] = None
|
||||
height: int = None
|
||||
width: int = None
|
||||
apply_to_mask: bool = True
|
||||
|
||||
def resizer(self, pic, new_size):
|
||||
"""Resize the image using PIL."""
|
||||
new_size = list(map(int, new_size))
|
||||
pil_img = Image.fromarray(pic)
|
||||
resized_pil = pil_img.resize(new_size, Image.Resampling.LANCZOS)
|
||||
return np.array(resized_pil)
|
||||
|
||||
def apply(self, clip):
|
||||
"""Apply the effect to the clip."""
|
||||
w, h = clip.size
|
||||
|
||||
if self.new_size is not None:
|
||||
|
||||
def translate_new_size(new_size_):
|
||||
"""Returns a [w, h] pair from `new_size_`. If `new_size_` is a
|
||||
scalar, then work out the correct pair using the clip's size.
|
||||
Otherwise just return `new_size_`
|
||||
"""
|
||||
if isinstance(new_size_, numbers.Number):
|
||||
return [new_size_ * w, new_size_ * h]
|
||||
else:
|
||||
return new_size_
|
||||
|
||||
if hasattr(self.new_size, "__call__"):
|
||||
# The resizing is a function of time
|
||||
|
||||
def get_new_size(t):
|
||||
return translate_new_size(self.new_size(t))
|
||||
|
||||
if clip.is_mask:
|
||||
|
||||
def filter(get_frame, t):
|
||||
return (
|
||||
self.resizer(
|
||||
(255 * get_frame(t)).astype("uint8"), get_new_size(t)
|
||||
)
|
||||
/ 255.0
|
||||
)
|
||||
|
||||
else:
|
||||
|
||||
def filter(get_frame, t):
|
||||
return self.resizer(
|
||||
get_frame(t).astype("uint8"), get_new_size(t)
|
||||
)
|
||||
|
||||
newclip = clip.transform(
|
||||
filter,
|
||||
keep_duration=True,
|
||||
apply_to=(["mask"] if self.apply_to_mask else []),
|
||||
)
|
||||
if self.apply_to_mask and clip.mask is not None:
|
||||
newclip.mask = clip.mask.with_effects(
|
||||
[Resize(self.new_size, apply_to_mask=False)]
|
||||
)
|
||||
|
||||
return newclip
|
||||
|
||||
else:
|
||||
self.new_size = translate_new_size(self.new_size)
|
||||
|
||||
elif self.height is not None:
|
||||
if hasattr(self.height, "__call__"):
|
||||
|
||||
def func(t):
|
||||
return 1.0 * int(self.height(t)) / h
|
||||
|
||||
return clip.with_effects([Resize(func)])
|
||||
|
||||
else:
|
||||
self.new_size = [w * self.height / h, self.height]
|
||||
|
||||
elif self.width is not None:
|
||||
if hasattr(self.width, "__call__"):
|
||||
|
||||
def func(t):
|
||||
return 1.0 * self.width(t) / w
|
||||
|
||||
return clip.with_effects([Resize(func)])
|
||||
|
||||
else:
|
||||
self.new_size = [self.width, h * self.width / w]
|
||||
else:
|
||||
raise ValueError(
|
||||
"You must provide either 'new_size' or 'height' or 'width'"
|
||||
)
|
||||
|
||||
# From here, the resizing is constant (not a function of time), size=newsize
|
||||
|
||||
if clip.is_mask:
|
||||
|
||||
def image_filter(pic):
|
||||
return (
|
||||
1.0
|
||||
* self.resizer((255 * pic).astype("uint8"), self.new_size)
|
||||
/ 255.0
|
||||
)
|
||||
|
||||
else:
|
||||
|
||||
def image_filter(pic):
|
||||
return self.resizer(pic.astype("uint8"), self.new_size)
|
||||
|
||||
new_clip = clip.image_transform(image_filter)
|
||||
|
||||
if self.apply_to_mask and clip.mask is not None:
|
||||
new_clip.mask = clip.mask.with_effects(
|
||||
[Resize(self.new_size, apply_to_mask=False)]
|
||||
)
|
||||
|
||||
return new_clip
|
||||
128
moviepy/video/fx/Rotate.py
Normal file
128
moviepy/video/fx/Rotate.py
Normal file
@@ -0,0 +1,128 @@
|
||||
import math
|
||||
from dataclasses import dataclass
|
||||
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
|
||||
from moviepy.Clip import Clip
|
||||
from moviepy.Effect import Effect
|
||||
|
||||
|
||||
@dataclass
|
||||
class Rotate(Effect):
|
||||
"""
|
||||
Rotates the specified clip by ``angle`` degrees (or radians) anticlockwise
|
||||
If the angle is not a multiple of 90 (degrees) or ``center``, ``translate``,
|
||||
and ``bg_color`` are not ``None``, there will be black borders.
|
||||
You can make them transparent with:
|
||||
|
||||
>>> new_clip = clip.with_mask().rotate(72)
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
clip : VideoClip
|
||||
A video clip.
|
||||
|
||||
angle : float
|
||||
Either a value or a function angle(t) representing the angle of rotation.
|
||||
|
||||
unit : str, optional
|
||||
Unit of parameter `angle` (either "deg" for degrees or "rad" for radians).
|
||||
|
||||
resample : str, optional
|
||||
An optional resampling filter. One of "nearest", "bilinear", or "bicubic".
|
||||
|
||||
expand : bool, optional
|
||||
If true, expands the output image to make it large enough to hold the
|
||||
entire rotated image. If false or omitted, make the output image the same
|
||||
size as the input image.
|
||||
|
||||
translate : tuple, optional
|
||||
An optional post-rotate translation (a 2-tuple).
|
||||
|
||||
center : tuple, optional
|
||||
Optional center of rotation (a 2-tuple). Origin is the upper left corner.
|
||||
|
||||
bg_color : tuple, optional
|
||||
An optional color for area outside the rotated image. Only has effect if
|
||||
``expand`` is true.
|
||||
"""
|
||||
|
||||
angle: float
|
||||
unit: str = "deg"
|
||||
resample: str = "bicubic"
|
||||
expand: bool = True
|
||||
center: tuple = None
|
||||
translate: tuple = None
|
||||
bg_color: tuple = None
|
||||
|
||||
def apply(self, clip: Clip) -> Clip:
|
||||
"""Apply the effect to the clip."""
|
||||
try:
|
||||
resample = {
|
||||
"bilinear": Image.BILINEAR,
|
||||
"nearest": Image.NEAREST,
|
||||
"bicubic": Image.BICUBIC,
|
||||
}[self.resample]
|
||||
except KeyError:
|
||||
raise ValueError(
|
||||
"'resample' argument must be either 'bilinear', 'nearest' or 'bicubic'"
|
||||
)
|
||||
|
||||
if hasattr(self.angle, "__call__"):
|
||||
get_angle = self.angle
|
||||
else:
|
||||
get_angle = lambda t: self.angle
|
||||
|
||||
def filter(get_frame, t):
|
||||
angle = get_angle(t)
|
||||
im = get_frame(t)
|
||||
|
||||
if self.unit == "rad":
|
||||
angle = math.degrees(angle)
|
||||
|
||||
angle %= 360
|
||||
if not self.center and not self.translate and not self.bg_color:
|
||||
if (angle == 0) and self.expand:
|
||||
return im
|
||||
if (angle == 90) and self.expand:
|
||||
transpose = [1, 0] if len(im.shape) == 2 else [1, 0, 2]
|
||||
return np.transpose(im, axes=transpose)[::-1]
|
||||
elif (angle == 270) and self.expand:
|
||||
transpose = [1, 0] if len(im.shape) == 2 else [1, 0, 2]
|
||||
return np.transpose(im, axes=transpose)[:, ::-1]
|
||||
elif (angle == 180) and self.expand:
|
||||
return im[::-1, ::-1]
|
||||
|
||||
pillow_kwargs = {}
|
||||
|
||||
if self.bg_color is not None:
|
||||
pillow_kwargs["fillcolor"] = self.bg_color
|
||||
|
||||
if self.center is not None:
|
||||
pillow_kwargs["center"] = self.center
|
||||
|
||||
if self.translate is not None:
|
||||
pillow_kwargs["translate"] = self.translate
|
||||
|
||||
# PIL expects uint8 type data. However a mask image has values in the
|
||||
# range [0, 1] and is of float type. To handle this we scale it up by
|
||||
# a factor 'a' for use with PIL and then back again by 'a' afterwards.
|
||||
if im.dtype == "float64":
|
||||
# this is a mask image
|
||||
a = 255.0
|
||||
else:
|
||||
a = 1
|
||||
|
||||
# call PIL.rotate
|
||||
return (
|
||||
np.array(
|
||||
Image.fromarray(np.array(a * im).astype(np.uint8)).rotate(
|
||||
angle, expand=self.expand, resample=resample, **pillow_kwargs
|
||||
)
|
||||
)
|
||||
/ a
|
||||
)
|
||||
|
||||
return clip.transform(filter, apply_to=["mask"])
|
||||
57
moviepy/video/fx/Scroll.py
Normal file
57
moviepy/video/fx/Scroll.py
Normal file
@@ -0,0 +1,57 @@
|
||||
from moviepy.Effect import Effect
|
||||
|
||||
|
||||
class Scroll(Effect):
|
||||
"""Effect that scrolls horizontally or vertically a clip, e.g. to make end credits
|
||||
|
||||
Parameters
|
||||
----------
|
||||
w, h
|
||||
The width and height of the final clip. Default to clip.w and clip.h
|
||||
|
||||
x_speed, y_speed
|
||||
The speed of the scroll in the x and y directions.
|
||||
|
||||
x_start, y_start
|
||||
The starting position of the scroll in the x and y directions.
|
||||
|
||||
|
||||
apply_to
|
||||
Whether to apply the effect to the mask too.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
w=None,
|
||||
h=None,
|
||||
x_speed=0,
|
||||
y_speed=0,
|
||||
x_start=0,
|
||||
y_start=0,
|
||||
apply_to="mask",
|
||||
):
|
||||
self.w = w
|
||||
self.h = h
|
||||
self.x_speed = x_speed
|
||||
self.y_speed = y_speed
|
||||
self.x_start = x_start
|
||||
self.y_start = y_start
|
||||
self.apply_to = apply_to
|
||||
|
||||
def apply(self, clip):
|
||||
"""Apply the effect to the clip."""
|
||||
if self.h is None:
|
||||
self.h = clip.h
|
||||
|
||||
if self.w is None:
|
||||
self.w = clip.w
|
||||
|
||||
x_max = self.w - 1
|
||||
y_max = self.h - 1
|
||||
|
||||
def filter(get_frame, t):
|
||||
x = int(max(0, min(x_max, self.x_start + round(self.x_speed * t))))
|
||||
y = int(max(0, min(y_max, self.y_start + round(self.y_speed * t))))
|
||||
return get_frame(t)[y : y + self.h, x : x + self.w]
|
||||
|
||||
return clip.transform(filter, apply_to=self.apply_to)
|
||||
60
moviepy/video/fx/SlideIn.py
Normal file
60
moviepy/video/fx/SlideIn.py
Normal file
@@ -0,0 +1,60 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
from moviepy.Clip import Clip
|
||||
from moviepy.Effect import Effect
|
||||
|
||||
|
||||
@dataclass
|
||||
class SlideIn(Effect):
|
||||
"""Makes the clip arrive from one side of the screen.
|
||||
|
||||
Only works when the clip is included in a CompositeVideoClip,
|
||||
and if the clip has the same size as the whole composition.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
clip : moviepy.Clip.Clip
|
||||
A video clip.
|
||||
|
||||
duration : float
|
||||
Time taken for the clip to be fully visible
|
||||
|
||||
side : str
|
||||
Side of the screen where the clip comes from. One of
|
||||
'top', 'bottom', 'left' or 'right'.
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
.. code:: python
|
||||
|
||||
from moviepy import *
|
||||
|
||||
clips = [... make a list of clips]
|
||||
slided_clips = [
|
||||
CompositeVideoClip([clip.with_effects([vfx.SlideIn(1, "left")])])
|
||||
for clip in clips
|
||||
]
|
||||
final_clip = concatenate_videoclips(slided_clips, padding=-1)
|
||||
|
||||
clip = ColorClip(
|
||||
color=(255, 0, 0), duration=1, size=(300, 300)
|
||||
).with_fps(60)
|
||||
final_clip = CompositeVideoClip([clip.with_effects([vfx.SlideIn(1, "right")])])
|
||||
"""
|
||||
|
||||
duration: float
|
||||
side: str
|
||||
|
||||
def apply(self, clip: Clip) -> Clip:
|
||||
"""Apply the effect to the clip."""
|
||||
w, h = clip.size
|
||||
pos_dict = {
|
||||
"left": lambda t: (min(0, w * (t / self.duration - 1)), "center"),
|
||||
"right": lambda t: (max(0, w * (1 - t / self.duration)), "center"),
|
||||
"top": lambda t: ("center", min(0, h * (t / self.duration - 1))),
|
||||
"bottom": lambda t: ("center", max(0, h * (1 - t / self.duration))),
|
||||
}
|
||||
|
||||
return clip.with_position(pos_dict[self.side])
|
||||
64
moviepy/video/fx/SlideOut.py
Normal file
64
moviepy/video/fx/SlideOut.py
Normal file
@@ -0,0 +1,64 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
from moviepy.Clip import Clip
|
||||
from moviepy.Effect import Effect
|
||||
|
||||
|
||||
@dataclass
|
||||
class SlideOut(Effect):
|
||||
"""Makes the clip goes away by one side of the screen.
|
||||
|
||||
Only works when the clip is included in a CompositeVideoClip,
|
||||
and if the clip has the same size as the whole composition.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
clip : moviepy.Clip.Clip
|
||||
A video clip.
|
||||
|
||||
duration : float
|
||||
Time taken for the clip to be fully visible
|
||||
|
||||
side : str
|
||||
Side of the screen where the clip goes. One of
|
||||
'top', 'bottom', 'left' or 'right'.
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
.. code:: python
|
||||
|
||||
from moviepy import *
|
||||
|
||||
clips = [... make a list of clips]
|
||||
slided_clips = [
|
||||
CompositeVideoClip([clip.with_effects([vfx.SlideOut(1, "left")])])
|
||||
for clip in clips
|
||||
]
|
||||
final_clip = concatenate_videoclips(slided_clips, padding=-1)
|
||||
|
||||
clip = ColorClip(
|
||||
color=(255, 0, 0), duration=1, size=(300, 300)
|
||||
).with_fps(60)
|
||||
final_clip = CompositeVideoClip([clip.with_effects([vfx.SlideOut(1, "right")])])
|
||||
"""
|
||||
|
||||
duration: float
|
||||
side: str
|
||||
|
||||
def apply(self, clip: Clip) -> Clip:
|
||||
"""Apply the effect to the clip."""
|
||||
if clip.duration is None:
|
||||
raise ValueError("Attribute 'duration' not set")
|
||||
|
||||
w, h = clip.size
|
||||
ts = clip.duration - self.duration # start time of the effect.
|
||||
pos_dict = {
|
||||
"left": lambda t: (min(0, w * (-(t - ts) / self.duration)), "center"),
|
||||
"right": lambda t: (max(0, w * ((t - ts) / self.duration)), "center"),
|
||||
"top": lambda t: ("center", min(0, h * (-(t - ts) / self.duration))),
|
||||
"bottom": lambda t: ("center", max(0, h * ((t - ts) / self.duration))),
|
||||
}
|
||||
|
||||
return clip.with_position(pos_dict[self.side])
|
||||
29
moviepy/video/fx/SuperSample.py
Normal file
29
moviepy/video/fx/SuperSample.py
Normal file
@@ -0,0 +1,29 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
import numpy as np
|
||||
|
||||
from moviepy.Clip import Clip
|
||||
from moviepy.Effect import Effect
|
||||
|
||||
|
||||
@dataclass
|
||||
class SuperSample(Effect):
|
||||
"""Replaces each frame at time t by the mean of `n_frames` equally spaced frames
|
||||
taken in the interval [t-d, t+d]. This results in motion blur.
|
||||
"""
|
||||
|
||||
d: float
|
||||
n_frames: int
|
||||
|
||||
def apply(self, clip: Clip) -> Clip:
|
||||
"""Apply the effect to the clip."""
|
||||
|
||||
def filter(get_frame, t):
|
||||
timings = np.linspace(t - self.d, t + self.d, self.n_frames)
|
||||
frame_average = np.mean(
|
||||
1.0 * np.array([get_frame(t_) for t_ in timings], dtype="uint16"),
|
||||
axis=0,
|
||||
)
|
||||
return frame_average.astype("uint8")
|
||||
|
||||
return clip.transform(filter)
|
||||
20
moviepy/video/fx/TimeMirror.py
Normal file
20
moviepy/video/fx/TimeMirror.py
Normal file
@@ -0,0 +1,20 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
from moviepy.Clip import Clip
|
||||
from moviepy.Effect import Effect
|
||||
|
||||
|
||||
@dataclass
|
||||
class TimeMirror(Effect):
|
||||
"""
|
||||
Returns a clip that plays the current clip backwards.
|
||||
The clip must have its ``duration`` attribute set.
|
||||
The same effect is applied to the clip's audio and mask if any.
|
||||
"""
|
||||
|
||||
def apply(self, clip: Clip) -> Clip:
|
||||
"""Apply the effect to the clip."""
|
||||
if clip.duration is None:
|
||||
raise ValueError("Attribute 'duration' not set")
|
||||
|
||||
return clip[::-1]
|
||||
22
moviepy/video/fx/TimeSymmetrize.py
Normal file
22
moviepy/video/fx/TimeSymmetrize.py
Normal file
@@ -0,0 +1,22 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
from moviepy.Clip import Clip
|
||||
from moviepy.Effect import Effect
|
||||
|
||||
|
||||
@dataclass
|
||||
class TimeSymmetrize(Effect):
|
||||
"""
|
||||
Returns a clip that plays the current clip once forwards and
|
||||
then once backwards. This is very practival to make video that
|
||||
loop well, e.g. to create animated GIFs.
|
||||
This effect is automatically applied to the clip's mask and audio
|
||||
if they exist.
|
||||
"""
|
||||
|
||||
def apply(self, clip: Clip) -> Clip:
|
||||
"""Apply the effect to the clip."""
|
||||
if clip.duration is None:
|
||||
raise ValueError("Attribute 'duration' not set")
|
||||
|
||||
return clip + clip[::-1]
|
||||
76
moviepy/video/fx/__init__.py
Normal file
76
moviepy/video/fx/__init__.py
Normal file
@@ -0,0 +1,76 @@
|
||||
"""All the visual effects that can be applied to VideoClip."""
|
||||
|
||||
# import every video fx function
|
||||
|
||||
from moviepy.video.fx.AccelDecel import AccelDecel
|
||||
from moviepy.video.fx.BlackAndWhite import BlackAndWhite
|
||||
from moviepy.video.fx.Blink import Blink
|
||||
from moviepy.video.fx.Crop import Crop
|
||||
from moviepy.video.fx.CrossFadeIn import CrossFadeIn
|
||||
from moviepy.video.fx.CrossFadeOut import CrossFadeOut
|
||||
from moviepy.video.fx.EvenSize import EvenSize
|
||||
from moviepy.video.fx.FadeIn import FadeIn
|
||||
from moviepy.video.fx.FadeOut import FadeOut
|
||||
from moviepy.video.fx.Freeze import Freeze
|
||||
from moviepy.video.fx.FreezeRegion import FreezeRegion
|
||||
from moviepy.video.fx.GammaCorrection import GammaCorrection
|
||||
from moviepy.video.fx.HeadBlur import HeadBlur
|
||||
from moviepy.video.fx.InvertColors import InvertColors
|
||||
from moviepy.video.fx.Loop import Loop
|
||||
from moviepy.video.fx.LumContrast import LumContrast
|
||||
from moviepy.video.fx.MakeLoopable import MakeLoopable
|
||||
from moviepy.video.fx.Margin import Margin
|
||||
from moviepy.video.fx.MaskColor import MaskColor
|
||||
from moviepy.video.fx.MasksAnd import MasksAnd
|
||||
from moviepy.video.fx.MasksOr import MasksOr
|
||||
from moviepy.video.fx.MirrorX import MirrorX
|
||||
from moviepy.video.fx.MirrorY import MirrorY
|
||||
from moviepy.video.fx.MultiplyColor import MultiplyColor
|
||||
from moviepy.video.fx.MultiplySpeed import MultiplySpeed
|
||||
from moviepy.video.fx.Painting import Painting
|
||||
from moviepy.video.fx.Resize import Resize
|
||||
from moviepy.video.fx.Rotate import Rotate
|
||||
from moviepy.video.fx.Scroll import Scroll
|
||||
from moviepy.video.fx.SlideIn import SlideIn
|
||||
from moviepy.video.fx.SlideOut import SlideOut
|
||||
from moviepy.video.fx.SuperSample import SuperSample
|
||||
from moviepy.video.fx.TimeMirror import TimeMirror
|
||||
from moviepy.video.fx.TimeSymmetrize import TimeSymmetrize
|
||||
|
||||
|
||||
__all__ = (
|
||||
"AccelDecel",
|
||||
"BlackAndWhite",
|
||||
"Blink",
|
||||
"Crop",
|
||||
"CrossFadeIn",
|
||||
"CrossFadeOut",
|
||||
"EvenSize",
|
||||
"FadeIn",
|
||||
"FadeOut",
|
||||
"Freeze",
|
||||
"FreezeRegion",
|
||||
"GammaCorrection",
|
||||
"HeadBlur",
|
||||
"InvertColors",
|
||||
"Loop",
|
||||
"LumContrast",
|
||||
"MakeLoopable",
|
||||
"Margin",
|
||||
"MasksAnd",
|
||||
"MaskColor",
|
||||
"MasksOr",
|
||||
"MirrorX",
|
||||
"MirrorY",
|
||||
"MultiplyColor",
|
||||
"MultiplySpeed",
|
||||
"Painting",
|
||||
"Resize",
|
||||
"Rotate",
|
||||
"Scroll",
|
||||
"SlideIn",
|
||||
"SlideOut",
|
||||
"SuperSample",
|
||||
"TimeMirror",
|
||||
"TimeSymmetrize",
|
||||
)
|
||||
167
moviepy/video/io/ImageSequenceClip.py
Normal file
167
moviepy/video/io/ImageSequenceClip.py
Normal file
@@ -0,0 +1,167 @@
|
||||
"""Implements ImageSequenceClip, a class to create a video clip from a set
|
||||
of image files.
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
from imageio.v2 import imread
|
||||
|
||||
from moviepy.video.VideoClip import VideoClip
|
||||
|
||||
|
||||
class ImageSequenceClip(VideoClip):
|
||||
"""A VideoClip made from a series of images.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
sequence
|
||||
Can be one of these:
|
||||
|
||||
- The name of a folder (containing only pictures). The pictures
|
||||
will be considered in alphanumerical order.
|
||||
- A list of names of image files. In this case you can choose to
|
||||
load the pictures in memory pictures
|
||||
- A list of Numpy arrays representing images. In this last case,
|
||||
masks are not supported currently.
|
||||
|
||||
fps
|
||||
Number of picture frames to read per second. Instead, you can provide
|
||||
the duration of each image with durations (see below)
|
||||
|
||||
durations
|
||||
List of the duration of each picture.
|
||||
|
||||
with_mask
|
||||
Should the alpha layer of PNG images be considered as a mask ?
|
||||
|
||||
is_mask
|
||||
Will this sequence of pictures be used as an animated mask.
|
||||
|
||||
load_images
|
||||
Specify that all images should be loaded into the RAM. This is only
|
||||
interesting if you have a small number of images that will be used
|
||||
more than once.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
sequence,
|
||||
fps=None,
|
||||
durations=None,
|
||||
with_mask=True,
|
||||
is_mask=False,
|
||||
load_images=False,
|
||||
):
|
||||
# CODE WRITTEN AS IT CAME, MAY BE IMPROVED IN THE FUTURE
|
||||
|
||||
if (fps is None) and (durations is None):
|
||||
raise ValueError("Please provide either 'fps' or 'durations'.")
|
||||
VideoClip.__init__(self, is_mask=is_mask)
|
||||
|
||||
# Parse the data
|
||||
|
||||
fromfiles = True
|
||||
|
||||
if isinstance(sequence, list):
|
||||
if isinstance(sequence[0], str):
|
||||
if load_images:
|
||||
sequence = [imread(file) for file in sequence]
|
||||
fromfiles = False
|
||||
else:
|
||||
fromfiles = True
|
||||
else:
|
||||
# sequence is already a list of numpy arrays
|
||||
fromfiles = False
|
||||
else:
|
||||
# sequence is a folder name, make it a list of files:
|
||||
fromfiles = True
|
||||
sequence = sorted(
|
||||
[os.path.join(sequence, file) for file in os.listdir(sequence)]
|
||||
)
|
||||
|
||||
# check that all the images are of the same size
|
||||
if isinstance(sequence[0], str):
|
||||
size = imread(sequence[0]).shape
|
||||
else:
|
||||
size = sequence[0].shape
|
||||
|
||||
for image in sequence:
|
||||
image1 = image
|
||||
if isinstance(image, str):
|
||||
image1 = imread(image)
|
||||
if size != image1.shape:
|
||||
raise Exception(
|
||||
"MoviePy: ImageSequenceClip requires all images to be the same size"
|
||||
)
|
||||
|
||||
self.fps = fps
|
||||
if fps is not None:
|
||||
durations = [1.0 / fps for image in sequence]
|
||||
self.images_starts = [
|
||||
1.0 * i / fps - np.finfo(np.float32).eps for i in range(len(sequence))
|
||||
]
|
||||
else:
|
||||
self.images_starts = [0] + list(np.cumsum(durations))
|
||||
self.durations = durations
|
||||
self.duration = sum(durations)
|
||||
self.end = self.duration
|
||||
self.sequence = sequence
|
||||
|
||||
if fps is None:
|
||||
self.fps = self.duration / len(sequence)
|
||||
|
||||
def find_image_index(t):
|
||||
return max(
|
||||
[i for i in range(len(self.sequence)) if self.images_starts[i] <= t]
|
||||
)
|
||||
|
||||
if fromfiles:
|
||||
self.last_index = None
|
||||
self.last_image = None
|
||||
|
||||
def frame_function(t):
|
||||
index = find_image_index(t)
|
||||
|
||||
if index != self.last_index:
|
||||
self.last_image = imread(self.sequence[index])[:, :, :3]
|
||||
self.last_index = index
|
||||
|
||||
return self.last_image
|
||||
|
||||
if with_mask and (imread(self.sequence[0]).shape[2] == 4):
|
||||
self.mask = VideoClip(is_mask=True)
|
||||
self.mask.last_index = None
|
||||
self.mask.last_image = None
|
||||
|
||||
def mask_frame_function(t):
|
||||
index = find_image_index(t)
|
||||
if index != self.mask.last_index:
|
||||
frame = imread(self.sequence[index])[:, :, 3]
|
||||
self.mask.last_image = frame.astype(float) / 255
|
||||
self.mask.last_index = index
|
||||
|
||||
return self.mask.last_image
|
||||
|
||||
self.mask.frame_function = mask_frame_function
|
||||
self.mask.size = mask_frame_function(0).shape[:2][::-1]
|
||||
|
||||
else:
|
||||
|
||||
def frame_function(t):
|
||||
index = find_image_index(t)
|
||||
return self.sequence[index][:, :, :3]
|
||||
|
||||
if with_mask and (self.sequence[0].shape[2] == 4):
|
||||
self.mask = VideoClip(is_mask=True)
|
||||
|
||||
def mask_frame_function(t):
|
||||
index = find_image_index(t)
|
||||
return 1.0 * self.sequence[index][:, :, 3] / 255
|
||||
|
||||
self.mask.frame_function = mask_frame_function
|
||||
self.mask.size = mask_frame_function(0).shape[:2][::-1]
|
||||
|
||||
self.frame_function = frame_function
|
||||
self.size = frame_function(0).shape[:2][::-1]
|
||||
175
moviepy/video/io/VideoFileClip.py
Normal file
175
moviepy/video/io/VideoFileClip.py
Normal file
@@ -0,0 +1,175 @@
|
||||
"""Implements VideoFileClip, a class for video clips creation using video files."""
|
||||
|
||||
from moviepy.audio.io.AudioFileClip import AudioFileClip
|
||||
from moviepy.decorators import convert_path_to_string
|
||||
from moviepy.video.io.ffmpeg_reader import FFMPEG_VideoReader
|
||||
from moviepy.video.VideoClip import VideoClip
|
||||
|
||||
|
||||
class VideoFileClip(VideoClip):
|
||||
"""A video clip originating from a movie file. For instance:
|
||||
|
||||
.. code:: python
|
||||
|
||||
clip = VideoFileClip("myHolidays.mp4")
|
||||
clip.close()
|
||||
with VideoFileClip("myMaskVideo.avi") as clip2:
|
||||
pass # Implicit close called by context manager.
|
||||
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
filename:
|
||||
The name of the video file, as a string or a path-like object.
|
||||
It can have any extension supported by ffmpeg:
|
||||
.ogv, .mp4, .mpeg, .avi, .mov etc.
|
||||
|
||||
has_mask:
|
||||
Set this to 'True' if there is a mask included in the videofile.
|
||||
Video files rarely contain masks, but some video codecs enable
|
||||
that. For instance if you have a MoviePy VideoClip with a mask you
|
||||
can save it to a videofile with a mask. (see also
|
||||
``VideoClip.write_videofile`` for more details).
|
||||
|
||||
audio:
|
||||
Set to `False` if the clip doesn't have any audio or if you do not
|
||||
wish to read the audio.
|
||||
|
||||
target_resolution:
|
||||
Set to (desired_width, desired_height) to have ffmpeg resize the frames
|
||||
before returning them. This is much faster than streaming in high-res
|
||||
and then resizing. If either dimension is None, the frames are resized
|
||||
by keeping the existing aspect ratio.
|
||||
|
||||
resize_algorithm:
|
||||
The algorithm used for resizing. Default: "bicubic", other popular
|
||||
options include "bilinear" and "fast_bilinear". For more information, see
|
||||
https://ffmpeg.org/ffmpeg-scaler.html
|
||||
|
||||
fps_source:
|
||||
The fps value to collect from the metadata. Set by default to 'fps', but
|
||||
can be set to 'tbr', which may be helpful if you are finding that it is reading
|
||||
the incorrect fps from the file.
|
||||
|
||||
pixel_format
|
||||
Optional: Pixel format for the video to read. If is not specified
|
||||
'rgb24' will be used as the default format unless ``has_mask`` is set
|
||||
as ``True``, then 'rgba' will be used.
|
||||
|
||||
is_mask
|
||||
`True` if the clip is going to be used as a mask.
|
||||
|
||||
|
||||
Attributes
|
||||
----------
|
||||
|
||||
filename:
|
||||
Name of the original video file.
|
||||
|
||||
fps:
|
||||
Frames per second in the original file.
|
||||
|
||||
|
||||
Read docs for Clip() and VideoClip() for other, more generic, attributes.
|
||||
|
||||
Lifetime
|
||||
--------
|
||||
|
||||
Note that this creates subprocesses and locks files. If you construct one
|
||||
of these instances, you must call close() afterwards, or the subresources
|
||||
will not be cleaned up until the process ends.
|
||||
|
||||
If copies are made, and close() is called on one, it may cause methods on
|
||||
the other copies to fail.
|
||||
"""
|
||||
|
||||
@convert_path_to_string("filename")
|
||||
def __init__(
|
||||
self,
|
||||
filename,
|
||||
decode_file=False,
|
||||
has_mask=False,
|
||||
audio=True,
|
||||
audio_buffersize=200000,
|
||||
target_resolution=None,
|
||||
resize_algorithm="bicubic",
|
||||
audio_fps=44100,
|
||||
audio_nbytes=2,
|
||||
fps_source="fps",
|
||||
pixel_format=None,
|
||||
is_mask=False,
|
||||
):
|
||||
VideoClip.__init__(self, is_mask=is_mask)
|
||||
|
||||
# Make a reader
|
||||
if not pixel_format:
|
||||
pixel_format = "rgba" if has_mask else "rgb24"
|
||||
|
||||
self.reader = FFMPEG_VideoReader(
|
||||
filename,
|
||||
decode_file=decode_file,
|
||||
pixel_format=pixel_format,
|
||||
target_resolution=target_resolution,
|
||||
resize_algo=resize_algorithm,
|
||||
fps_source=fps_source,
|
||||
)
|
||||
|
||||
# Make some of the reader's attributes accessible from the clip
|
||||
self.duration = self.reader.duration
|
||||
self.end = self.reader.duration
|
||||
|
||||
self.fps = self.reader.fps
|
||||
self.size = self.reader.size
|
||||
self.rotation = self.reader.rotation
|
||||
|
||||
self.filename = filename
|
||||
|
||||
if has_mask:
|
||||
self.frame_function = lambda t: self.reader.get_frame(t)[:, :, :3]
|
||||
|
||||
def mask_frame_function(t):
|
||||
return self.reader.get_frame(t)[:, :, 3] / 255.0
|
||||
|
||||
self.mask = VideoClip(
|
||||
is_mask=True, frame_function=mask_frame_function
|
||||
).with_duration(self.duration)
|
||||
self.mask.fps = self.fps
|
||||
|
||||
else:
|
||||
self.frame_function = lambda t: self.reader.get_frame(t)
|
||||
|
||||
# Make a reader for the audio, if any.
|
||||
if audio and self.reader.infos["audio_found"]:
|
||||
self.audio = AudioFileClip(
|
||||
filename,
|
||||
buffersize=audio_buffersize,
|
||||
fps=audio_fps,
|
||||
nbytes=audio_nbytes,
|
||||
)
|
||||
|
||||
def __deepcopy__(self, memo):
|
||||
"""Implements ``copy.deepcopy(clip)`` behaviour as ``copy.copy(clip)``.
|
||||
|
||||
VideoFileClip class instances can't be deeply copied because the locked Thread
|
||||
of ``proc`` isn't pickleable. Without this override, calls to
|
||||
``copy.deepcopy(clip)`` would raise a ``TypeError``:
|
||||
|
||||
```
|
||||
TypeError: cannot pickle '_thread.lock' object
|
||||
```
|
||||
"""
|
||||
return self.__copy__()
|
||||
|
||||
def close(self):
|
||||
"""Close the internal reader."""
|
||||
if self.reader:
|
||||
self.reader.close()
|
||||
self.reader = None
|
||||
|
||||
try:
|
||||
if self.audio:
|
||||
self.audio.close()
|
||||
self.audio = None
|
||||
except AttributeError: # pragma: no cover
|
||||
pass
|
||||
1
moviepy/video/io/__init__.py
Normal file
1
moviepy/video/io/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Classes and methods for reading, writing and previewing video files."""
|
||||
284
moviepy/video/io/display_in_notebook.py
Normal file
284
moviepy/video/io/display_in_notebook.py
Normal file
@@ -0,0 +1,284 @@
|
||||
"""Implements ``display_in_notebook``, a function to embed images/videos/audio in the
|
||||
Jupyter Notebook.
|
||||
"""
|
||||
|
||||
# Notes:
|
||||
# All media are physically embedded in the Jupyter Notebook
|
||||
# (instead of simple links to the original files)
|
||||
# That is because most browsers use a cache system and they won't
|
||||
# properly refresh the media when the original files are changed.
|
||||
|
||||
import inspect
|
||||
import os
|
||||
from base64 import b64encode
|
||||
|
||||
from moviepy.audio.AudioClip import AudioClip
|
||||
from moviepy.tools import extensions_dict
|
||||
from moviepy.video.io.ffmpeg_reader import ffmpeg_parse_infos
|
||||
from moviepy.video.VideoClip import ImageClip, VideoClip
|
||||
|
||||
|
||||
try: # pragma: no cover
|
||||
from IPython.display import HTML
|
||||
|
||||
ipython_available = True
|
||||
|
||||
class HTML2(HTML): # noqa D101
|
||||
def __add__(self, other):
|
||||
return HTML2(self.data + other.data)
|
||||
|
||||
except ImportError:
|
||||
|
||||
def HTML2(content): # noqa D103
|
||||
return content
|
||||
|
||||
ipython_available = False
|
||||
|
||||
|
||||
sorry = "Sorry, seems like your browser doesn't support HTML5 audio/video"
|
||||
templates = {
|
||||
"audio": (
|
||||
"<audio controls>"
|
||||
"<source %(options)s src='data:audio/%(ext)s;base64,%(data)s'>"
|
||||
+ sorry
|
||||
+ "</audio>"
|
||||
),
|
||||
"image": "<img %(options)s src='data:image/%(ext)s;base64,%(data)s'>",
|
||||
"video": (
|
||||
"<video %(options)s"
|
||||
"src='data:video/%(ext)s;base64,%(data)s' controls>" + sorry + "</video>"
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
def html_embed(
|
||||
clip, filetype=None, maxduration=60, rd_kwargs=None, center=True, **html_kwargs
|
||||
):
|
||||
"""Returns HTML5 code embedding the clip.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
clip : moviepy.Clip.Clip
|
||||
Either a file name, or a clip to preview.
|
||||
Either an image, a sound or a video. Clips will actually be
|
||||
written to a file and embedded as if a filename was provided.
|
||||
|
||||
filetype : str, optional
|
||||
One of 'video','image','audio'. If None is given, it is determined
|
||||
based on the extension of ``filename``, but this can bug.
|
||||
|
||||
maxduration : float, optional
|
||||
An error will be raised if the clip's duration is more than the indicated
|
||||
value (in seconds), to avoid spoiling the browser's cache and the RAM.
|
||||
|
||||
rd_kwargs : dict, optional
|
||||
Keyword arguments for the rendering, like ``dict(fps=15, bitrate="50k")``.
|
||||
Allow you to give some options to the render process. You can, for
|
||||
example, disable the logger bar passing ``dict(logger=None)``.
|
||||
|
||||
center : bool, optional
|
||||
If true (default), the content will be wrapped in a
|
||||
``<div align=middle>`` HTML container, so the content will be displayed
|
||||
at the center.
|
||||
|
||||
html_kwargs
|
||||
Allow you to give some options, like ``width=260``, ``autoplay=True``,
|
||||
``loop=1`` etc.
|
||||
|
||||
Examples
|
||||
--------
|
||||
.. code:: python
|
||||
|
||||
from moviepy import *
|
||||
# later ...
|
||||
html_embed(clip, width=360)
|
||||
html_embed(clip.audio)
|
||||
|
||||
clip.write_gif("test.gif")
|
||||
html_embed('test.gif')
|
||||
|
||||
clip.save_frame("first_frame.jpeg")
|
||||
html_embed("first_frame.jpeg")
|
||||
"""
|
||||
if rd_kwargs is None: # pragma: no cover
|
||||
rd_kwargs = {}
|
||||
|
||||
if "Clip" in str(clip.__class__):
|
||||
TEMP_PREFIX = "__temp__"
|
||||
if isinstance(clip, ImageClip):
|
||||
filename = TEMP_PREFIX + ".png"
|
||||
kwargs = {"filename": filename, "with_mask": True}
|
||||
argnames = inspect.getfullargspec(clip.save_frame).args
|
||||
kwargs.update(
|
||||
{key: value for key, value in rd_kwargs.items() if key in argnames}
|
||||
)
|
||||
clip.save_frame(**kwargs)
|
||||
elif isinstance(clip, VideoClip):
|
||||
filename = TEMP_PREFIX + ".mp4"
|
||||
kwargs = {"filename": filename, "preset": "ultrafast"}
|
||||
kwargs.update(rd_kwargs)
|
||||
clip.write_videofile(**kwargs)
|
||||
elif isinstance(clip, AudioClip):
|
||||
filename = TEMP_PREFIX + ".mp3"
|
||||
kwargs = {"filename": filename}
|
||||
kwargs.update(rd_kwargs)
|
||||
clip.write_audiofile(**kwargs)
|
||||
else:
|
||||
raise ValueError("Unknown class for the clip. Cannot embed and preview.")
|
||||
|
||||
return html_embed(
|
||||
filename,
|
||||
maxduration=maxduration,
|
||||
rd_kwargs=rd_kwargs,
|
||||
center=center,
|
||||
**html_kwargs,
|
||||
)
|
||||
|
||||
filename = clip
|
||||
options = " ".join(["%s='%s'" % (str(k), str(v)) for k, v in html_kwargs.items()])
|
||||
name, ext = os.path.splitext(filename)
|
||||
ext = ext[1:]
|
||||
|
||||
if filetype is None:
|
||||
ext = filename.split(".")[-1].lower()
|
||||
if ext == "gif":
|
||||
filetype = "image"
|
||||
elif ext in extensions_dict:
|
||||
filetype = extensions_dict[ext]["type"]
|
||||
else:
|
||||
raise ValueError(
|
||||
"No file type is known for the provided file. Please provide "
|
||||
"argument `filetype` (one of 'image', 'video', 'sound') to the "
|
||||
"display_in_notebook function."
|
||||
)
|
||||
|
||||
if filetype == "video":
|
||||
# The next lines set the HTML5-cvompatible extension and check that the
|
||||
# extension is HTML5-valid
|
||||
exts_htmltype = {"mp4": "mp4", "webm": "webm", "ogv": "ogg"}
|
||||
allowed_exts = " ".join(exts_htmltype.keys())
|
||||
try:
|
||||
ext = exts_htmltype[ext]
|
||||
except Exception:
|
||||
raise ValueError(
|
||||
"This video extension cannot be displayed in the "
|
||||
"Jupyter Notebook. Allowed extensions: " + allowed_exts
|
||||
)
|
||||
|
||||
if filetype in ["audio", "video"]:
|
||||
duration = ffmpeg_parse_infos(filename, decode_file=True)["duration"]
|
||||
if duration > maxduration:
|
||||
raise ValueError(
|
||||
(
|
||||
"The duration of video %s (%.1f) exceeds the 'maxduration'"
|
||||
" attribute. You can increase 'maxduration', by passing"
|
||||
" 'maxduration' parameter to display_in_notebook function."
|
||||
" But note that embedding large videos may take all the memory"
|
||||
" away!"
|
||||
)
|
||||
% (filename, duration)
|
||||
)
|
||||
|
||||
with open(filename, "rb") as file:
|
||||
data = b64encode(file.read()).decode("utf-8")
|
||||
|
||||
template = templates[filetype]
|
||||
|
||||
result = template % {"data": data, "options": options, "ext": ext}
|
||||
if center:
|
||||
result = r"<div align=middle>%s</div>" % result
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def display_in_notebook(
|
||||
clip,
|
||||
filetype=None,
|
||||
maxduration=60,
|
||||
t=None,
|
||||
fps=None,
|
||||
rd_kwargs=None,
|
||||
center=True,
|
||||
**html_kwargs,
|
||||
):
|
||||
"""Displays clip content in an Jupyter Notebook.
|
||||
|
||||
Remarks: If your browser doesn't support HTML5, this should warn you.
|
||||
If nothing is displayed, maybe your file or filename is wrong.
|
||||
Important: The media will be physically embedded in the notebook.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
clip : moviepy.Clip.Clip
|
||||
Either the name of a file, or a clip to preview. The clip will actually
|
||||
be written to a file and embedded as if a filename was provided.
|
||||
|
||||
filetype : str, optional
|
||||
One of ``"video"``, ``"image"`` or ``"audio"``. If None is given, it is
|
||||
determined based on the extension of ``filename``, but this can bug.
|
||||
|
||||
maxduration : float, optional
|
||||
An error will be raised if the clip's duration is more than the indicated
|
||||
value (in seconds), to avoid spoiling the browser's cache and the RAM.
|
||||
|
||||
t : float, optional
|
||||
If not None, only the frame at time t will be displayed in the notebook,
|
||||
instead of a video of the clip.
|
||||
|
||||
fps : int, optional
|
||||
Enables to specify an fps, as required for clips whose fps is unknown.
|
||||
|
||||
rd_kwargs : dict, optional
|
||||
Keyword arguments for the rendering, like ``dict(fps=15, bitrate="50k")``.
|
||||
Allow you to give some options to the render process. You can, for
|
||||
example, disable the logger bar passing ``dict(logger=None)``.
|
||||
|
||||
center : bool, optional
|
||||
If true (default), the content will be wrapped in a
|
||||
``<div align=middle>`` HTML container, so the content will be displayed
|
||||
at the center.
|
||||
|
||||
kwargs
|
||||
Allow you to give some options, like ``width=260``, etc. When editing
|
||||
looping gifs, a good choice is ``loop=1, autoplay=1``.
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
.. code:: python
|
||||
|
||||
from moviepy import *
|
||||
# later ...
|
||||
clip.display_in_notebook(width=360)
|
||||
clip.audio.display_in_notebook()
|
||||
|
||||
clip.write_gif("test.gif")
|
||||
display_in_notebook('test.gif')
|
||||
|
||||
clip.save_frame("first_frame.jpeg")
|
||||
display_in_notebook("first_frame.jpeg")
|
||||
"""
|
||||
if not ipython_available:
|
||||
raise ImportError("Only works inside an Jupyter Notebook")
|
||||
|
||||
if rd_kwargs is None:
|
||||
rd_kwargs = {}
|
||||
|
||||
if fps is not None:
|
||||
rd_kwargs["fps"] = fps
|
||||
|
||||
if t is not None:
|
||||
clip = clip.to_ImageClip(t)
|
||||
|
||||
return HTML2(
|
||||
html_embed(
|
||||
clip,
|
||||
filetype=filetype,
|
||||
maxduration=maxduration,
|
||||
center=center,
|
||||
rd_kwargs=rd_kwargs,
|
||||
**html_kwargs,
|
||||
)
|
||||
)
|
||||
882
moviepy/video/io/ffmpeg_reader.py
Normal file
882
moviepy/video/io/ffmpeg_reader.py
Normal file
@@ -0,0 +1,882 @@
|
||||
"""Implements all the functions to read a video or a picture using ffmpeg."""
|
||||
|
||||
import os
|
||||
import re
|
||||
import subprocess as sp
|
||||
import warnings
|
||||
from log import log_step
|
||||
|
||||
import numpy as np
|
||||
|
||||
from moviepy.config import FFMPEG_BINARY # ffmpeg, ffmpeg.exe, etc...
|
||||
from moviepy.tools import (
|
||||
convert_to_seconds,
|
||||
cross_platform_popen_params,
|
||||
ffmpeg_escape_filename,
|
||||
)
|
||||
|
||||
|
||||
class FFMPEG_VideoReader:
|
||||
"""Class for video byte-level reading with ffmpeg."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
filename,
|
||||
decode_file=True,
|
||||
print_infos=False,
|
||||
bufsize=None,
|
||||
pixel_format="rgb24",
|
||||
check_duration=True,
|
||||
target_resolution=None,
|
||||
resize_algo="bicubic",
|
||||
fps_source="fps",
|
||||
):
|
||||
self.filename = filename
|
||||
self.proc = None
|
||||
infos = ffmpeg_parse_infos(
|
||||
filename,
|
||||
check_duration=check_duration,
|
||||
fps_source=fps_source,
|
||||
decode_file=decode_file,
|
||||
print_infos=print_infos,
|
||||
)
|
||||
# If framerate is unavailable, assume 1.0 FPS to avoid divide-by-zero errors.
|
||||
self.fps = infos.get("video_fps", 1.0)
|
||||
# If frame size is unavailable, set 1x1 divide-by-zero errors.
|
||||
self.size = infos.get("video_size", (1, 1))
|
||||
|
||||
# ffmpeg automatically rotates videos if rotation information is
|
||||
# available, so exchange width and height
|
||||
self.rotation = abs(infos.get("video_rotation", 0))
|
||||
if self.rotation in [90, 270]:
|
||||
self.size = [self.size[1], self.size[0]]
|
||||
|
||||
if target_resolution:
|
||||
if None in target_resolution:
|
||||
ratio = 1
|
||||
for idx, target in enumerate(target_resolution):
|
||||
if target:
|
||||
ratio = target / self.size[idx]
|
||||
self.size = (int(self.size[0] * ratio), int(self.size[1] * ratio))
|
||||
else:
|
||||
self.size = target_resolution
|
||||
self.resize_algo = resize_algo
|
||||
|
||||
self.duration = infos.get("video_duration", 0.0)
|
||||
self.ffmpeg_duration = infos.get("duration", 0.0)
|
||||
self.n_frames = infos.get("video_n_frames", 0)
|
||||
self.bitrate = infos.get("video_bitrate", 0)
|
||||
|
||||
self.infos = infos
|
||||
|
||||
self.pixel_format = pixel_format
|
||||
self.depth = 4 if pixel_format[-1] == "a" else 3
|
||||
# 'a' represents 'alpha' which means that each pixel has 4 values instead of 3.
|
||||
# See https://github.com/Zulko/moviepy/issues/1070#issuecomment-644457274
|
||||
|
||||
if bufsize is None:
|
||||
w, h = self.size
|
||||
bufsize = self.depth * w * h + 100
|
||||
|
||||
self.bufsize = bufsize
|
||||
self.initialize()
|
||||
|
||||
def initialize(self, start_time=0):
|
||||
"""
|
||||
Opens the file, creates the pipe.
|
||||
|
||||
Sets self.pos to the appropriate value (1 if start_time == 0 because
|
||||
it pre-reads the first frame).
|
||||
"""
|
||||
self.close(delete_lastread=False) # if any
|
||||
|
||||
if start_time != 0:
|
||||
offset = min(1, start_time)
|
||||
i_arg = [
|
||||
"-ss",
|
||||
"%.06f" % (start_time - offset),
|
||||
"-i",
|
||||
ffmpeg_escape_filename(self.filename),
|
||||
"-ss",
|
||||
"%.06f" % offset,
|
||||
]
|
||||
else:
|
||||
i_arg = ["-i", ffmpeg_escape_filename(self.filename)]
|
||||
|
||||
# For webm video (vp8 and vp9) with transparent layer, force libvpx/libvpx-vp9
|
||||
# as ffmpeg native webm decoder dont decode alpha layer
|
||||
# (see
|
||||
# https://www.reddit.com/r/ffmpeg/comments/fgpyfb/help_with_webm_with_alpha_channel/
|
||||
# )
|
||||
if self.depth == 4:
|
||||
codec_name = self.infos.get("video_codec_name")
|
||||
if codec_name == "vp9":
|
||||
i_arg = ["-c:v", "libvpx-vp9"] + i_arg
|
||||
elif codec_name == "vp8":
|
||||
i_arg = ["-c:v", "libvpx"] + i_arg
|
||||
|
||||
# print(self.infos)
|
||||
log_step("init", 100, self.infos)
|
||||
|
||||
cmd = (
|
||||
[FFMPEG_BINARY]
|
||||
+ i_arg
|
||||
+ [
|
||||
"-loglevel",
|
||||
"error",
|
||||
"-f",
|
||||
"image2pipe",
|
||||
"-vf",
|
||||
"scale=%d:%d" % tuple(self.size),
|
||||
"-sws_flags",
|
||||
self.resize_algo,
|
||||
"-pix_fmt",
|
||||
self.pixel_format,
|
||||
"-vcodec",
|
||||
"rawvideo",
|
||||
"-",
|
||||
]
|
||||
)
|
||||
|
||||
# print(" ".join(cmd))
|
||||
|
||||
popen_params = cross_platform_popen_params(
|
||||
{
|
||||
"bufsize": self.bufsize,
|
||||
"stdout": sp.PIPE,
|
||||
"stderr": sp.PIPE,
|
||||
"stdin": sp.DEVNULL,
|
||||
}
|
||||
)
|
||||
self.proc = sp.Popen(cmd, **popen_params)
|
||||
|
||||
# self.pos represents the (0-indexed) index of the frame that is next in line
|
||||
# to be read by self.read_frame().
|
||||
# Eg when self.pos is 1, the 2nd frame will be read next.
|
||||
self.pos = self.get_frame_number(start_time)
|
||||
self.last_read = self.read_frame()
|
||||
|
||||
def skip_frames(self, n=1):
|
||||
"""Reads and throws away n frames"""
|
||||
w, h = self.size
|
||||
for i in range(n):
|
||||
self.proc.stdout.read(self.depth * w * h)
|
||||
|
||||
# self.proc.stdout.flush()
|
||||
self.pos += n
|
||||
|
||||
def read_frame(self):
|
||||
"""
|
||||
Reads the next frame from the file.
|
||||
Note that upon (re)initialization, the first frame will already have been read
|
||||
and stored in ``self.last_read``.
|
||||
"""
|
||||
w, h = self.size
|
||||
nbytes = self.depth * w * h
|
||||
|
||||
s = self.proc.stdout.read(nbytes)
|
||||
|
||||
if len(s) != nbytes:
|
||||
warnings.warn(
|
||||
(
|
||||
"In file %s, %d bytes wanted but %d bytes read at frame index"
|
||||
" %d (out of a total %d frames), at time %.02f/%.02f sec."
|
||||
" Using the last valid frame instead."
|
||||
)
|
||||
% (
|
||||
self.filename,
|
||||
nbytes,
|
||||
len(s),
|
||||
self.pos,
|
||||
self.n_frames,
|
||||
1.0 * self.pos / self.fps,
|
||||
self.duration,
|
||||
),
|
||||
UserWarning,
|
||||
)
|
||||
if not hasattr(self, "last_read"):
|
||||
raise IOError(
|
||||
(
|
||||
"MoviePy error: failed to read the first frame of "
|
||||
f"video file {self.filename}. That might mean that the file is "
|
||||
"corrupted. That may also mean that you are using "
|
||||
"a deprecated version of FFMPEG. On Ubuntu/Debian "
|
||||
"for instance the version in the repos is deprecated. "
|
||||
"Please update to a recent version from the website."
|
||||
)
|
||||
)
|
||||
|
||||
result = self.last_read
|
||||
|
||||
else:
|
||||
if hasattr(np, "frombuffer"):
|
||||
result = np.frombuffer(s, dtype="uint8")
|
||||
else:
|
||||
result = np.fromstring(s, dtype="uint8")
|
||||
result.shape = (h, w, len(s) // (w * h)) # reshape((h, w, len(s)//(w*h)))
|
||||
self.last_read = result
|
||||
|
||||
# We have to do this down here because `self.pos` is used in the warning above
|
||||
self.pos += 1
|
||||
|
||||
return result
|
||||
|
||||
def get_frame(self, t):
|
||||
"""Read a file video frame at time t.
|
||||
|
||||
Note for coders: getting an arbitrary frame in the video with
|
||||
ffmpeg can be painfully slow if some decoding has to be done.
|
||||
This function tries to avoid fetching arbitrary frames
|
||||
whenever possible, by moving between adjacent frames.
|
||||
"""
|
||||
# + 1 so that it represents the frame position that it will be
|
||||
# after the frame is read. This makes the later comparisons easier.
|
||||
pos = self.get_frame_number(t) + 1
|
||||
|
||||
# Initialize proc if it is not open
|
||||
if not self.proc:
|
||||
# raise Exception("Proc not detected")
|
||||
self.initialize(t)
|
||||
return self.last_read
|
||||
|
||||
if pos == self.pos:
|
||||
return self.last_read
|
||||
elif (pos < self.pos) or (pos > self.pos + 100):
|
||||
# We can't just skip forward to `pos` or it would take too long
|
||||
self.initialize(t)
|
||||
return self.last_read
|
||||
else:
|
||||
# If pos == self.pos + 1, this line has no effect
|
||||
self.skip_frames(pos - self.pos - 1)
|
||||
result = self.read_frame()
|
||||
return result
|
||||
|
||||
@property
|
||||
def lastread(self):
|
||||
"""Alias of `self.last_read` for backwards compatibility with MoviePy 1.x."""
|
||||
return self.last_read
|
||||
|
||||
def get_frame_number(self, t):
|
||||
"""Helper method to return the frame number at time ``t``"""
|
||||
# I used this horrible '+0.00001' hack because sometimes due to numerical
|
||||
# imprecisions a 3.0 can become a 2.99999999... which makes the int()
|
||||
# go to the previous integer. This makes the fetching more robust when you
|
||||
# are getting the nth frame by writing get_frame(n/fps).
|
||||
return int(self.fps * t + 0.00001)
|
||||
|
||||
def close(self, delete_lastread=True):
|
||||
"""Closes the reader terminating the process, if is still open."""
|
||||
if self.proc:
|
||||
if self.proc.poll() is None:
|
||||
self.proc.terminate()
|
||||
self.proc.stdout.close()
|
||||
self.proc.stderr.close()
|
||||
self.proc.wait()
|
||||
self.proc = None
|
||||
if delete_lastread and hasattr(self, "last_read"):
|
||||
del self.last_read
|
||||
|
||||
def __del__(self):
|
||||
self.close()
|
||||
|
||||
|
||||
def ffmpeg_read_image(filename, with_mask=True, pixel_format=None):
|
||||
"""Read an image file (PNG, BMP, JPEG...).
|
||||
|
||||
Wraps FFMPEG_Videoreader to read just one image.
|
||||
Returns an ImageClip.
|
||||
|
||||
This function is not meant to be used directly in MoviePy.
|
||||
Use ImageClip instead to make clips out of image files.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
filename
|
||||
Name of the image file. Can be of any format supported by ffmpeg.
|
||||
|
||||
with_mask
|
||||
If the image has a transparency layer, ``with_mask=true`` will save
|
||||
this layer as the mask of the returned ImageClip
|
||||
|
||||
pixel_format
|
||||
Optional: Pixel format for the image to read. If is not specified
|
||||
'rgb24' will be used as the default format unless ``with_mask`` is set
|
||||
as ``True``, then 'rgba' will be used.
|
||||
|
||||
"""
|
||||
if not pixel_format:
|
||||
pixel_format = "rgba" if with_mask else "rgb24"
|
||||
reader = FFMPEG_VideoReader(
|
||||
filename, pixel_format=pixel_format, check_duration=False
|
||||
)
|
||||
im = reader.last_read
|
||||
del reader
|
||||
return im
|
||||
|
||||
|
||||
class FFmpegInfosParser:
|
||||
"""Finite state ffmpeg `-i` command option file information parser.
|
||||
Is designed to parse the output fast, in one loop. Iterates line by
|
||||
line of the `ffmpeg -i <filename> [-f null -]` command output changing
|
||||
the internal state of the parser.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
filename
|
||||
Name of the file parsed, only used to raise accurate error messages.
|
||||
|
||||
infos
|
||||
Information returned by FFmpeg.
|
||||
|
||||
fps_source
|
||||
Indicates what source data will be preferably used to retrieve fps data.
|
||||
|
||||
check_duration
|
||||
Enable or disable the parsing of the duration of the file. Useful to
|
||||
skip the duration check, for example, for images.
|
||||
|
||||
decode_file
|
||||
Indicates if the whole file has been decoded. The duration parsing strategy
|
||||
will differ depending on this argument.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
infos,
|
||||
filename,
|
||||
fps_source="fps",
|
||||
check_duration=True,
|
||||
decode_file=False,
|
||||
):
|
||||
self.infos = infos
|
||||
self.filename = filename
|
||||
self.check_duration = check_duration
|
||||
self.fps_source = fps_source
|
||||
self.duration_tag_separator = "time=" if decode_file else "Duration: "
|
||||
|
||||
self._reset_state()
|
||||
|
||||
def _reset_state(self):
|
||||
"""Reinitializes the state of the parser. Used internally at
|
||||
initialization and at the end of the parsing process.
|
||||
"""
|
||||
# could be 2 possible types of metadata:
|
||||
# - file_metadata: Metadata of the container. Here are the tags set
|
||||
# by the user using `-metadata` ffmpeg option
|
||||
# - stream_metadata: Metadata for each stream of the container.
|
||||
self._inside_file_metadata = False
|
||||
|
||||
# this state is needed if `duration_tag_separator == "time="` because
|
||||
# execution of ffmpeg decoding the whole file using `-f null -` appends
|
||||
# to the output the blocks "Stream mapping:" and "Output:", which
|
||||
# should be ignored
|
||||
self._inside_output = False
|
||||
|
||||
# flag which indicates that a default stream has not been found yet
|
||||
self._default_stream_found = False
|
||||
|
||||
# current input file, stream and chapter, which will be built at runtime
|
||||
self._current_input_file = {"streams": []}
|
||||
self._current_stream = None
|
||||
self._current_chapter = None
|
||||
|
||||
# resulting data of the parsing process
|
||||
self.result = {
|
||||
"video_found": False,
|
||||
"audio_found": False,
|
||||
"metadata": {},
|
||||
"inputs": [],
|
||||
}
|
||||
|
||||
# keep the value of latest metadata value parsed so we can build
|
||||
# at next lines a multiline metadata value
|
||||
self._last_metadata_field_added = None
|
||||
|
||||
def parse(self):
|
||||
"""Parses the information returned by FFmpeg in stderr executing their binary
|
||||
for a file with ``-i`` option and returns a dictionary with all data needed
|
||||
by MoviePy.
|
||||
"""
|
||||
# chapters by input file
|
||||
input_chapters = []
|
||||
|
||||
for line in self.infos.splitlines()[1:]:
|
||||
if (
|
||||
self.duration_tag_separator == "time="
|
||||
and self.check_duration
|
||||
and "time=" in line
|
||||
):
|
||||
# parse duration using file decodification
|
||||
self.result["duration"] = self.parse_duration(line)
|
||||
elif self._inside_output or line[0] != " ":
|
||||
if self.duration_tag_separator == "time=" and not self._inside_output:
|
||||
self._inside_output = True
|
||||
# skip lines like "At least one output file must be specified"
|
||||
elif not self._inside_file_metadata and line.startswith(" Metadata:"):
|
||||
# enter " Metadata:" group
|
||||
self._inside_file_metadata = True
|
||||
elif line.startswith(" Duration:"):
|
||||
# exit " Metadata:" group
|
||||
self._inside_file_metadata = False
|
||||
if self.check_duration and self.duration_tag_separator == "Duration: ":
|
||||
self.result["duration"] = self.parse_duration(line)
|
||||
|
||||
# parse global bitrate (in kb/s)
|
||||
bitrate_match = re.search(r"bitrate: (\d+) kb/s", line)
|
||||
self.result["bitrate"] = (
|
||||
int(bitrate_match.group(1)) if bitrate_match else None
|
||||
)
|
||||
|
||||
# parse start time (in seconds)
|
||||
start_match = re.search(r"start: (\d+\.?\d+)", line)
|
||||
self.result["start"] = (
|
||||
float(start_match.group(1)) if start_match else None
|
||||
)
|
||||
elif self._inside_file_metadata:
|
||||
# file metadata line
|
||||
field, value = self.parse_metadata_field_value(line)
|
||||
|
||||
# multiline metadata value parsing
|
||||
if field == "":
|
||||
field = self._last_metadata_field_added
|
||||
value = self.result["metadata"][field] + "\n" + value
|
||||
else:
|
||||
self._last_metadata_field_added = field
|
||||
self.result["metadata"][field] = value
|
||||
elif line.lstrip().startswith("Stream "):
|
||||
# exit stream " Metadata:"
|
||||
if self._current_stream:
|
||||
self._current_input_file["streams"].append(self._current_stream)
|
||||
|
||||
# get input number, stream number, language and type
|
||||
main_info_match = re.search(
|
||||
r"^Stream\s#(\d+):(\d+)(?:\[\w+\])?\(?(\w+)?\)?:\s(\w+):",
|
||||
line.lstrip(),
|
||||
)
|
||||
(
|
||||
input_number,
|
||||
stream_number,
|
||||
language,
|
||||
stream_type,
|
||||
) = main_info_match.groups()
|
||||
input_number = int(input_number)
|
||||
stream_number = int(stream_number)
|
||||
stream_type_lower = stream_type.lower()
|
||||
|
||||
if language == "und":
|
||||
language = None
|
||||
|
||||
# start builiding the current stream
|
||||
self._current_stream = {
|
||||
"input_number": input_number,
|
||||
"stream_number": stream_number,
|
||||
"stream_type": stream_type_lower,
|
||||
"language": language,
|
||||
"default": not self._default_stream_found
|
||||
or line.endswith("(default)"),
|
||||
}
|
||||
self._default_stream_found = True
|
||||
|
||||
# for default streams, set their numbers globally, so it's
|
||||
# easy to get without iterating all
|
||||
if self._current_stream["default"]:
|
||||
self.result[
|
||||
f"default_{stream_type_lower}_input_number"
|
||||
] = input_number
|
||||
self.result[
|
||||
f"default_{stream_type_lower}_stream_number"
|
||||
] = stream_number
|
||||
|
||||
# exit chapter
|
||||
if self._current_chapter:
|
||||
input_chapters[input_number].append(self._current_chapter)
|
||||
self._current_chapter = None
|
||||
|
||||
if "input_number" not in self._current_input_file:
|
||||
# first input file
|
||||
self._current_input_file["input_number"] = input_number
|
||||
elif self._current_input_file["input_number"] != input_number:
|
||||
# new input file
|
||||
|
||||
# include their chapters if there are for this input file
|
||||
if len(input_chapters) >= input_number + 1:
|
||||
self._current_input_file["chapters"] = input_chapters[
|
||||
input_number
|
||||
]
|
||||
|
||||
# add new input file to self.result
|
||||
self.result["inputs"].append(self._current_input_file)
|
||||
self._current_input_file = {"input_number": input_number}
|
||||
|
||||
# parse relevant data by stream type
|
||||
try:
|
||||
global_data, stream_data = self.parse_data_by_stream_type(
|
||||
stream_type, line
|
||||
)
|
||||
except NotImplementedError as exc:
|
||||
warnings.warn(
|
||||
f"{str(exc)}\nffmpeg output:\n\n{self.infos}", UserWarning
|
||||
)
|
||||
else:
|
||||
self.result.update(global_data)
|
||||
self._current_stream.update(stream_data)
|
||||
elif line.startswith(" Metadata:"):
|
||||
# enter group " Metadata:"
|
||||
continue
|
||||
elif self._current_stream:
|
||||
# stream metadata line
|
||||
if "metadata" not in self._current_stream:
|
||||
self._current_stream["metadata"] = {}
|
||||
|
||||
field, value = self.parse_metadata_field_value(line)
|
||||
|
||||
if self._current_stream["stream_type"] == "video":
|
||||
field, value = self.video_metadata_type_casting(field, value)
|
||||
if field == "rotate":
|
||||
self.result["video_rotation"] = value
|
||||
|
||||
# multiline metadata value parsing
|
||||
if field == "":
|
||||
field = self._last_metadata_field_added
|
||||
value = self._current_stream["metadata"][field] + "\n" + value
|
||||
else:
|
||||
self._last_metadata_field_added = field
|
||||
self._current_stream["metadata"][field] = value
|
||||
elif line.startswith(" Chapter"):
|
||||
# Chapter data line
|
||||
if self._current_chapter:
|
||||
# there is a previews chapter?
|
||||
if len(input_chapters) < self._current_chapter["input_number"] + 1:
|
||||
input_chapters.append([])
|
||||
# include in the chapters by input matrix
|
||||
input_chapters[self._current_chapter["input_number"]].append(
|
||||
self._current_chapter
|
||||
)
|
||||
|
||||
# extract chapter data
|
||||
chapter_data_match = re.search(
|
||||
r"^ Chapter #(\d+):(\d+): start (\d+\.?\d+?), end (\d+\.?\d+?)",
|
||||
line,
|
||||
)
|
||||
input_number, chapter_number, start, end = chapter_data_match.groups()
|
||||
|
||||
# start building the chapter
|
||||
self._current_chapter = {
|
||||
"input_number": int(input_number),
|
||||
"chapter_number": int(chapter_number),
|
||||
"start": float(start),
|
||||
"end": float(end),
|
||||
}
|
||||
elif self._current_chapter:
|
||||
# inside chapter metadata
|
||||
if "metadata" not in self._current_chapter:
|
||||
self._current_chapter["metadata"] = {}
|
||||
field, value = self.parse_metadata_field_value(line)
|
||||
|
||||
# multiline metadata value parsing
|
||||
if field == "":
|
||||
field = self._last_metadata_field_added
|
||||
value = self._current_chapter["metadata"][field] + "\n" + value
|
||||
else:
|
||||
self._last_metadata_field_added = field
|
||||
self._current_chapter["metadata"][field] = value
|
||||
|
||||
# last input file, must be included in self.result
|
||||
if self._current_input_file:
|
||||
self._current_input_file["streams"].append(self._current_stream)
|
||||
# include their chapters, if there are any
|
||||
if (
|
||||
"input_number" in self._current_input_file
|
||||
and len(input_chapters) == self._current_input_file["input_number"] + 1
|
||||
):
|
||||
self._current_input_file["chapters"] = input_chapters[
|
||||
self._current_input_file["input_number"]
|
||||
]
|
||||
self.result["inputs"].append(self._current_input_file)
|
||||
|
||||
# some video duration utilities
|
||||
if self.result["video_found"] and self.check_duration:
|
||||
self.result["video_duration"] = self.result["duration"]
|
||||
self.result["video_n_frames"] = int(
|
||||
self.result["duration"] * self.result.get("video_fps", 0)
|
||||
)
|
||||
else:
|
||||
self.result["video_n_frames"] = 0
|
||||
self.result["video_duration"] = 0.0
|
||||
# We could have also recomputed duration from the number of frames, as follows:
|
||||
# >>> result['video_duration'] = result['video_n_frames'] / result['video_fps']
|
||||
|
||||
# not default audio found, assume first audio stream is the default
|
||||
if self.result["audio_found"] and not self.result.get("audio_bitrate"):
|
||||
self.result["audio_bitrate"] = None
|
||||
for streams_input in self.result["inputs"]:
|
||||
for stream in streams_input["streams"]:
|
||||
if stream["stream_type"] == "audio" and stream.get("bitrate"):
|
||||
self.result["audio_bitrate"] = stream["bitrate"]
|
||||
break
|
||||
|
||||
if self.result["audio_bitrate"] is not None:
|
||||
break
|
||||
|
||||
result = self.result
|
||||
|
||||
# reset state of the parser
|
||||
self._reset_state()
|
||||
|
||||
return result
|
||||
|
||||
def parse_data_by_stream_type(self, stream_type, line):
|
||||
"""Parses data from "Stream ... {stream_type}" line."""
|
||||
try:
|
||||
return {
|
||||
"Audio": self.parse_audio_stream_data,
|
||||
"Video": self.parse_video_stream_data,
|
||||
"Data": lambda _line: ({}, {}),
|
||||
}[stream_type](line)
|
||||
except KeyError:
|
||||
raise NotImplementedError(
|
||||
f"{stream_type} stream parsing is not supported by moviepy and"
|
||||
" will be ignored"
|
||||
)
|
||||
|
||||
def parse_audio_stream_data(self, line):
|
||||
"""Parses data from "Stream ... Audio" line."""
|
||||
global_data, stream_data = ({"audio_found": True}, {})
|
||||
try:
|
||||
stream_data["fps"] = int(re.search(r" (\d+) Hz", line).group(1))
|
||||
except (AttributeError, ValueError):
|
||||
# AttributeError: 'NoneType' object has no attribute 'group'
|
||||
# ValueError: invalid literal for int() with base 10: '<string>'
|
||||
stream_data["fps"] = "unknown"
|
||||
match_audio_bitrate = re.search(r"(\d+) kb/s", line)
|
||||
stream_data["bitrate"] = (
|
||||
int(match_audio_bitrate.group(1)) if match_audio_bitrate else None
|
||||
)
|
||||
if self._current_stream["default"]:
|
||||
global_data["audio_fps"] = stream_data["fps"]
|
||||
global_data["audio_bitrate"] = stream_data["bitrate"]
|
||||
return (global_data, stream_data)
|
||||
|
||||
def parse_video_stream_data(self, line):
|
||||
"""Parses data from "Stream ... Video" line."""
|
||||
global_data, stream_data = ({"video_found": True}, {})
|
||||
|
||||
try:
|
||||
match_video_size = re.search(r" (\d+)x(\d+)[,\s]", line)
|
||||
if match_video_size:
|
||||
# size, of the form 460x320 (w x h)
|
||||
stream_data["size"] = [int(num) for num in match_video_size.groups()]
|
||||
except Exception:
|
||||
raise IOError(
|
||||
(
|
||||
"MoviePy error: failed to read video dimensions in"
|
||||
" file '%s'.\nHere are the file infos returned by"
|
||||
"ffmpeg:\n\n%s"
|
||||
)
|
||||
% (self.filename, self.infos)
|
||||
)
|
||||
|
||||
match_bitrate = re.search(r"(\d+) kb/s", line)
|
||||
stream_data["bitrate"] = int(match_bitrate.group(1)) if match_bitrate else None
|
||||
|
||||
# Get the frame rate. Sometimes it's 'tbr', sometimes 'fps', sometimes
|
||||
# tbc, and sometimes tbc/2...
|
||||
# Current policy: Trust fps first, then tbr unless fps_source is
|
||||
# specified as 'tbr' in which case try tbr then fps
|
||||
|
||||
# If result is near from x*1000/1001 where x is 23,24,25,50,
|
||||
# replace by x*1000/1001 (very common case for the fps).
|
||||
|
||||
if self.fps_source == "fps":
|
||||
try:
|
||||
fps = self.parse_fps(line)
|
||||
except (AttributeError, ValueError):
|
||||
fps = self.parse_tbr(line)
|
||||
elif self.fps_source == "tbr":
|
||||
try:
|
||||
fps = self.parse_tbr(line)
|
||||
except (AttributeError, ValueError):
|
||||
fps = self.parse_fps(line)
|
||||
else:
|
||||
raise ValueError(
|
||||
("fps source '%s' not supported parsing the video '%s'")
|
||||
% (self.fps_source, self.filename)
|
||||
)
|
||||
|
||||
# It is known that a fps of 24 is often written as 24000/1001
|
||||
# but then ffmpeg nicely rounds it to 23.98, which we hate.
|
||||
coef = 1000.0 / 1001.0
|
||||
for x in [23, 24, 25, 30, 50]:
|
||||
if (fps != x) and abs(fps - x * coef) < 0.01:
|
||||
fps = x * coef
|
||||
stream_data["fps"] = fps
|
||||
|
||||
# Try to extract video codec and profile
|
||||
main_info_match = re.search(
|
||||
r"Video:\s(\w+)?\s?(\([^)]+\))?",
|
||||
line.lstrip(),
|
||||
)
|
||||
if main_info_match is not None:
|
||||
(codec_name, profile) = main_info_match.groups()
|
||||
stream_data["codec_name"] = codec_name
|
||||
stream_data["profile"] = profile
|
||||
|
||||
if self._current_stream["default"] or "video_codec_name" not in self.result:
|
||||
global_data["video_codec_name"] = stream_data.get("codec_name", None)
|
||||
|
||||
if self._current_stream["default"] or "video_profile" not in self.result:
|
||||
global_data["video_profile"] = stream_data.get("profile", None)
|
||||
|
||||
if self._current_stream["default"] or "video_size" not in self.result:
|
||||
global_data["video_size"] = stream_data.get("size", None)
|
||||
if self._current_stream["default"] or "video_bitrate" not in self.result:
|
||||
global_data["video_bitrate"] = stream_data.get("bitrate", None)
|
||||
if self._current_stream["default"] or "video_fps" not in self.result:
|
||||
global_data["video_fps"] = stream_data["fps"]
|
||||
|
||||
return (global_data, stream_data)
|
||||
|
||||
def parse_fps(self, line):
|
||||
"""Parses number of FPS from a line of the ``ffmpeg -i`` command output."""
|
||||
return float(re.search(r" (\d+.?\d*) fps", line).group(1))
|
||||
|
||||
def parse_tbr(self, line):
|
||||
"""Parses number of TBS from a line of the ``ffmpeg -i`` command output."""
|
||||
s_tbr = re.search(r" (\d+.?\d*k?) tbr", line).group(1)
|
||||
|
||||
# Sometimes comes as e.g. 12k. We need to replace that with 12000.
|
||||
if s_tbr[-1] == "k":
|
||||
tbr = float(s_tbr[:-1]) * 1000
|
||||
else:
|
||||
tbr = float(s_tbr)
|
||||
return tbr
|
||||
|
||||
def parse_duration(self, line):
|
||||
"""Parse the duration from the line that outputs the duration of
|
||||
the container.
|
||||
"""
|
||||
try:
|
||||
time_raw_string = line.split(self.duration_tag_separator)[-1]
|
||||
match_duration = re.search(
|
||||
r"([0-9][0-9]:[0-9][0-9]:[0-9][0-9].[0-9][0-9])",
|
||||
time_raw_string,
|
||||
)
|
||||
return convert_to_seconds(match_duration.group(1))
|
||||
except Exception:
|
||||
raise IOError(
|
||||
(
|
||||
"MoviePy error: failed to read the duration of file '%s'.\n"
|
||||
"Here are the file infos returned by ffmpeg:\n\n%s"
|
||||
)
|
||||
% (self.filename, self.infos)
|
||||
)
|
||||
|
||||
def parse_metadata_field_value(
|
||||
self,
|
||||
line,
|
||||
):
|
||||
"""Returns a tuple with a metadata field-value pair given a ffmpeg `-i`
|
||||
command output line.
|
||||
"""
|
||||
raw_field, raw_value = line.split(":", 1)
|
||||
return (raw_field.strip(" "), raw_value.strip(" "))
|
||||
|
||||
def video_metadata_type_casting(self, field, value):
|
||||
"""Cast needed video metadata fields to other types than the default str."""
|
||||
if field == "rotate":
|
||||
return (field, float(value))
|
||||
return (field, value)
|
||||
|
||||
|
||||
def ffmpeg_parse_infos(
|
||||
filename,
|
||||
check_duration=True,
|
||||
fps_source="fps",
|
||||
decode_file=False,
|
||||
print_infos=False,
|
||||
):
|
||||
"""Get the information of a file using ffmpeg.
|
||||
|
||||
Returns a dictionary with next fields:
|
||||
|
||||
- ``"duration"``
|
||||
- ``"metadata"``
|
||||
- ``"inputs"``
|
||||
- ``"video_found"``
|
||||
- ``"video_fps"``
|
||||
- ``"video_n_frames"``
|
||||
- ``"video_duration"``
|
||||
- ``"video_bitrate"``
|
||||
- ``"video_metadata"``
|
||||
- ``"audio_found"``
|
||||
- ``"audio_fps"``
|
||||
- ``"audio_bitrate"``
|
||||
- ``"audio_metadata"``
|
||||
- ``"video_codec_name"``
|
||||
- ``"video_profile"``
|
||||
|
||||
Note that "video_duration" is slightly smaller than "duration" to avoid
|
||||
fetching the incomplete frames at the end, which raises an error.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
filename
|
||||
Name of the file parsed, only used to raise accurate error messages.
|
||||
|
||||
infos
|
||||
Information returned by FFmpeg.
|
||||
|
||||
fps_source
|
||||
Indicates what source data will be preferably used to retrieve fps data.
|
||||
|
||||
check_duration
|
||||
Enable or disable the parsing of the duration of the file. Useful to
|
||||
skip the duration check, for example, for images.
|
||||
|
||||
decode_file
|
||||
Indicates if the whole file must be read to retrieve their duration.
|
||||
This is needed for some files in order to get the correct duration (see
|
||||
https://github.com/Zulko/moviepy/pull/1222).
|
||||
"""
|
||||
# Open the file in a pipe, read output
|
||||
cmd = [FFMPEG_BINARY, "-hide_banner", "-i", ffmpeg_escape_filename(filename)]
|
||||
if decode_file:
|
||||
cmd.extend(["-f", "null", "-"])
|
||||
|
||||
popen_params = cross_platform_popen_params(
|
||||
{
|
||||
"bufsize": 10**5,
|
||||
"stdout": sp.PIPE,
|
||||
"stderr": sp.PIPE,
|
||||
"stdin": sp.DEVNULL,
|
||||
}
|
||||
)
|
||||
|
||||
proc = sp.Popen(cmd, **popen_params)
|
||||
(output, error) = proc.communicate()
|
||||
infos = error.decode("utf8", errors="ignore")
|
||||
|
||||
proc.terminate()
|
||||
del proc
|
||||
|
||||
if print_infos:
|
||||
# print the whole info text returned by FFMPEG
|
||||
print(infos)
|
||||
|
||||
try:
|
||||
return FFmpegInfosParser(
|
||||
infos,
|
||||
filename,
|
||||
fps_source=fps_source,
|
||||
check_duration=check_duration,
|
||||
decode_file=decode_file,
|
||||
).parse()
|
||||
except Exception as exc:
|
||||
if os.path.isdir(filename):
|
||||
raise IsADirectoryError(f"'{filename}' is a directory")
|
||||
elif not os.path.exists(filename):
|
||||
raise FileNotFoundError(f"'{filename}' not found")
|
||||
raise IOError(f"Error passing `ffmpeg -i` command output:\n\n{infos}") from exc
|
||||
209
moviepy/video/io/ffmpeg_tools.py
Normal file
209
moviepy/video/io/ffmpeg_tools.py
Normal file
@@ -0,0 +1,209 @@
|
||||
"""Miscellaneous bindings to ffmpeg."""
|
||||
|
||||
import os
|
||||
|
||||
from moviepy.config import FFMPEG_BINARY
|
||||
from moviepy.decorators import convert_parameter_to_seconds, convert_path_to_string
|
||||
from moviepy.tools import ffmpeg_escape_filename, subprocess_call
|
||||
|
||||
|
||||
@convert_path_to_string(("inputfile", "outputfile"))
|
||||
@convert_parameter_to_seconds(("start_time", "end_time"))
|
||||
def ffmpeg_extract_subclip(
|
||||
inputfile, start_time, end_time, outputfile=None, logger="bar"
|
||||
):
|
||||
"""Makes a new video file playing video file between two times.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
inputfile : str
|
||||
Path to the file from which the subclip will be extracted.
|
||||
|
||||
start_time : float
|
||||
Moment of the input clip that marks the start of the produced subclip.
|
||||
|
||||
end_time : float
|
||||
Moment of the input clip that marks the end of the produced subclip.
|
||||
|
||||
outputfile : str, optional
|
||||
Path to the output file. Defaults to
|
||||
``<inputfile_name>SUB<start_time>_<end_time><ext>``.
|
||||
"""
|
||||
if not outputfile:
|
||||
name, ext = os.path.splitext(inputfile)
|
||||
t1, t2 = [int(1000 * t) for t in [start_time, end_time]]
|
||||
outputfile = "%sSUB%d_%d%s" % (name, t1, t2, ext)
|
||||
|
||||
cmd = [
|
||||
FFMPEG_BINARY,
|
||||
"-y",
|
||||
"-ss",
|
||||
"%0.2f" % start_time,
|
||||
"-i",
|
||||
ffmpeg_escape_filename(inputfile),
|
||||
"-t",
|
||||
"%0.2f" % (end_time - start_time),
|
||||
"-map",
|
||||
"0",
|
||||
"-vcodec",
|
||||
"copy",
|
||||
"-acodec",
|
||||
"copy",
|
||||
"-copyts",
|
||||
ffmpeg_escape_filename(outputfile),
|
||||
]
|
||||
subprocess_call(cmd, logger=logger)
|
||||
|
||||
|
||||
@convert_path_to_string(("videofile", "audiofile", "outputfile"))
|
||||
def ffmpeg_merge_video_audio(
|
||||
videofile,
|
||||
audiofile,
|
||||
outputfile,
|
||||
video_codec="copy",
|
||||
audio_codec="copy",
|
||||
logger="bar",
|
||||
):
|
||||
"""Merges video file and audio file into one movie file.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
videofile : str
|
||||
Path to the video file used in the merge.
|
||||
|
||||
audiofile : str
|
||||
Path to the audio file used in the merge.
|
||||
|
||||
outputfile : str
|
||||
Path to the output file.
|
||||
|
||||
video_codec : str, optional
|
||||
Video codec used by FFmpeg in the merge.
|
||||
|
||||
audio_codec : str, optional
|
||||
Audio codec used by FFmpeg in the merge.
|
||||
"""
|
||||
cmd = [
|
||||
FFMPEG_BINARY,
|
||||
"-y",
|
||||
"-i",
|
||||
ffmpeg_escape_filename(audiofile),
|
||||
"-i",
|
||||
ffmpeg_escape_filename(videofile),
|
||||
"-vcodec",
|
||||
video_codec,
|
||||
"-acodec",
|
||||
audio_codec,
|
||||
ffmpeg_escape_filename(outputfile),
|
||||
]
|
||||
|
||||
subprocess_call(cmd, logger=logger)
|
||||
|
||||
|
||||
@convert_path_to_string(("inputfile", "outputfile"))
|
||||
def ffmpeg_extract_audio(inputfile, outputfile, bitrate=3000, fps=44100, logger="bar"):
|
||||
"""Extract the sound from a video file and save it in ``outputfile``.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
inputfile : str
|
||||
The path to the file from which the audio will be extracted.
|
||||
|
||||
outputfile : str
|
||||
The path to the file to which the audio will be stored.
|
||||
|
||||
bitrate : int, optional
|
||||
Bitrate for the new audio file.
|
||||
|
||||
fps : int, optional
|
||||
Frame rate for the new audio file.
|
||||
"""
|
||||
cmd = [
|
||||
FFMPEG_BINARY,
|
||||
"-y",
|
||||
"-i",
|
||||
ffmpeg_escape_filename(inputfile),
|
||||
"-ab",
|
||||
"%dk" % bitrate,
|
||||
"-ar",
|
||||
"%d" % fps,
|
||||
ffmpeg_escape_filename(outputfile),
|
||||
]
|
||||
subprocess_call(cmd, logger=logger)
|
||||
|
||||
|
||||
@convert_path_to_string(("inputfile", "outputfile"))
|
||||
def ffmpeg_resize(inputfile, outputfile, size, logger="bar"):
|
||||
"""Resizes a file to new size and write the result in another.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
inputfile : str
|
||||
Path to the file to be resized.
|
||||
|
||||
outputfile : str
|
||||
Path to the output file.
|
||||
|
||||
size : list or tuple
|
||||
New size in format ``[width, height]`` for the output file.
|
||||
"""
|
||||
cmd = [
|
||||
FFMPEG_BINARY,
|
||||
"-i",
|
||||
ffmpeg_escape_filename(inputfile),
|
||||
"-vf",
|
||||
"scale=%d:%d" % (size[0], size[1]),
|
||||
ffmpeg_escape_filename(outputfile),
|
||||
]
|
||||
|
||||
subprocess_call(cmd, logger=logger)
|
||||
|
||||
|
||||
@convert_path_to_string(("inputfile", "outputfile", "output_dir"))
|
||||
def ffmpeg_stabilize_video(
|
||||
inputfile, outputfile=None, output_dir="", overwrite_file=True, logger="bar"
|
||||
):
|
||||
"""
|
||||
Stabilizes ``filename`` and write the result to ``output``.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
inputfile : str
|
||||
The name of the shaky video.
|
||||
|
||||
outputfile : str, optional
|
||||
The name of new stabilized video. Defaults to appending '_stabilized' to
|
||||
the input file name.
|
||||
|
||||
output_dir : str, optional
|
||||
The directory to place the output video in. Defaults to the current
|
||||
working directory.
|
||||
|
||||
overwrite_file : bool, optional
|
||||
If ``outputfile`` already exists in ``output_dir``, then overwrite
|
||||
``outputfile`` Defaults to True.
|
||||
"""
|
||||
if not outputfile:
|
||||
without_dir = os.path.basename(inputfile)
|
||||
name, ext = os.path.splitext(without_dir)
|
||||
outputfile = f"{name}_stabilized{ext}"
|
||||
|
||||
outputfile = os.path.join(output_dir, outputfile)
|
||||
cmd = [
|
||||
FFMPEG_BINARY,
|
||||
"-i",
|
||||
ffmpeg_escape_filename(inputfile),
|
||||
"-vf",
|
||||
"deshake",
|
||||
ffmpeg_escape_filename(outputfile),
|
||||
]
|
||||
|
||||
if overwrite_file:
|
||||
cmd.append("-y")
|
||||
|
||||
subprocess_call(cmd, logger=logger)
|
||||
344
moviepy/video/io/ffmpeg_writer.py
Normal file
344
moviepy/video/io/ffmpeg_writer.py
Normal file
@@ -0,0 +1,344 @@
|
||||
"""
|
||||
On the long term this will implement several methods to make videos
|
||||
out of VideoClips
|
||||
"""
|
||||
|
||||
import subprocess as sp
|
||||
|
||||
import numpy as np
|
||||
from proglog import proglog
|
||||
|
||||
from moviepy.config import FFMPEG_BINARY
|
||||
from moviepy.tools import cross_platform_popen_params, ffmpeg_escape_filename
|
||||
|
||||
|
||||
class FFMPEG_VideoWriter:
|
||||
"""A class for FFMPEG-based video writing.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
filename : str
|
||||
Any filename like ``"video.mp4"`` etc. but if you want to avoid
|
||||
complications it is recommended to use the generic extension ``".avi"``
|
||||
for all your videos.
|
||||
|
||||
size : tuple or list
|
||||
Size of the output video in pixels (width, height).
|
||||
|
||||
fps : int
|
||||
Frames per second in the output video file.
|
||||
|
||||
codec : str, optional
|
||||
FFMPEG codec. It seems that in terms of quality the hierarchy is
|
||||
'rawvideo' = 'png' > 'mpeg4' > 'libx264'
|
||||
'png' manages the same lossless quality as 'rawvideo' but yields
|
||||
smaller files. Type ``ffmpeg -codecs`` in a terminal to get a list
|
||||
of accepted codecs.
|
||||
|
||||
Note for default 'libx264': by default the pixel format yuv420p
|
||||
is used. If the video dimensions are not both even (e.g. 720x405)
|
||||
another pixel format is used, and this can cause problem in some
|
||||
video readers.
|
||||
|
||||
audiofile : str, optional
|
||||
The name of an audio file that will be incorporated to the video.
|
||||
|
||||
preset : str, optional
|
||||
Sets the time that FFMPEG will take to compress the video. The slower,
|
||||
the better the compression rate. Possibilities are: ``"ultrafast"``,
|
||||
``"superfast"``, ``"veryfast"``, ``"faster"``, ``"fast"``, ``"medium"``
|
||||
(default), ``"slow"``, ``"slower"``, ``"veryslow"``, ``"placebo"``.
|
||||
|
||||
bitrate : str, optional
|
||||
Only relevant for codecs which accept a bitrate. "5000k" offers
|
||||
nice results in general.
|
||||
|
||||
with_mask : bool, optional
|
||||
Set to ``True`` if there is a mask in the video to be encoded.
|
||||
|
||||
pixel_format : str, optional
|
||||
Optional: Pixel format for the output video file. If is not specified
|
||||
``"rgb24"`` will be used as the default format unless ``with_mask`` is
|
||||
set as ``True``, then ``"rgba"`` will be used.
|
||||
|
||||
logfile : int, optional
|
||||
File descriptor for logging output. If not defined, ``subprocess.PIPE``
|
||||
will be used. Defined using another value, the log level of the ffmpeg
|
||||
command will be "info", otherwise "error".
|
||||
|
||||
threads : int, optional
|
||||
Number of threads used to write the output with ffmpeg.
|
||||
|
||||
ffmpeg_params : list, optional
|
||||
Additional parameters passed to ffmpeg command.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
filename,
|
||||
size,
|
||||
fps,
|
||||
codec="libx264",
|
||||
audiofile=None,
|
||||
preset="medium",
|
||||
bitrate=None,
|
||||
with_mask=False,
|
||||
logfile=None,
|
||||
threads=None,
|
||||
ffmpeg_params=None,
|
||||
pixel_format=None,
|
||||
):
|
||||
if logfile is None:
|
||||
logfile = sp.PIPE
|
||||
self.logfile = logfile
|
||||
self.filename = filename
|
||||
self.codec = codec
|
||||
self.ext = self.filename.split(".")[-1]
|
||||
|
||||
pixel_format = "rgba" if with_mask else "rgb24"
|
||||
|
||||
# order is important
|
||||
cmd = [
|
||||
FFMPEG_BINARY,
|
||||
"-y",
|
||||
"-loglevel",
|
||||
"error" if logfile == sp.PIPE else "info",
|
||||
"-f",
|
||||
"rawvideo",
|
||||
"-vcodec",
|
||||
"rawvideo",
|
||||
"-s",
|
||||
"%dx%d" % (size[0], size[1]),
|
||||
"-pix_fmt",
|
||||
pixel_format,
|
||||
"-r",
|
||||
"%.02f" % fps,
|
||||
"-an",
|
||||
"-i",
|
||||
"-",
|
||||
]
|
||||
if audiofile is not None:
|
||||
cmd.extend(["-i", audiofile, "-acodec", "copy"])
|
||||
|
||||
cmd.extend(["-vcodec", codec, "-preset", preset])
|
||||
|
||||
if ffmpeg_params is not None:
|
||||
cmd.extend(ffmpeg_params)
|
||||
|
||||
if bitrate is not None:
|
||||
cmd.extend(["-b", bitrate])
|
||||
|
||||
if threads is not None:
|
||||
cmd.extend(["-threads", str(threads)])
|
||||
|
||||
# Disable auto alt ref for transparent webm and set pix format yo yuva420p
|
||||
if codec == "libvpx" and with_mask:
|
||||
cmd.extend(["-pix_fmt", "yuva420p"])
|
||||
cmd.extend(["-auto-alt-ref", "0"])
|
||||
elif (codec == "libx264") and (size[0] % 2 == 0) and (size[1] % 2 == 0):
|
||||
cmd.extend(["-pix_fmt", "yuva420p"])
|
||||
|
||||
cmd.extend([ffmpeg_escape_filename(filename)])
|
||||
|
||||
popen_params = cross_platform_popen_params(
|
||||
{"stdout": sp.DEVNULL, "stderr": logfile, "stdin": sp.PIPE}
|
||||
)
|
||||
|
||||
self.proc = sp.Popen(cmd, **popen_params)
|
||||
|
||||
def write_frame(self, img_array):
|
||||
"""Writes one frame in the file."""
|
||||
try:
|
||||
self.proc.stdin.write(img_array.tobytes())
|
||||
except IOError as err:
|
||||
_, ffmpeg_error = self.proc.communicate()
|
||||
if ffmpeg_error is not None:
|
||||
ffmpeg_error = ffmpeg_error.decode()
|
||||
else:
|
||||
# The error was redirected to a logfile with `write_logfile=True`,
|
||||
# so read the error from that file instead
|
||||
self.logfile.seek(0)
|
||||
ffmpeg_error = self.logfile.read()
|
||||
|
||||
error = (
|
||||
f"{err}\n\nMoviePy error: FFMPEG encountered the following error while "
|
||||
f"writing file {self.filename}:\n\n {ffmpeg_error}"
|
||||
)
|
||||
|
||||
if "Unknown encoder" in ffmpeg_error:
|
||||
error += (
|
||||
"\n\nThe video export failed because FFMPEG didn't find the "
|
||||
f"specified codec for video encoding {self.codec}. "
|
||||
"Please install this codec or change the codec when calling "
|
||||
"write_videofile.\nFor instance:\n"
|
||||
" >>> clip.write_videofile('myvid.webm', codec='libvpx')"
|
||||
)
|
||||
|
||||
elif "incorrect codec parameters ?" in ffmpeg_error:
|
||||
error += (
|
||||
"\n\nThe video export failed, possibly because the codec "
|
||||
f"specified for the video {self.codec} is not compatible with "
|
||||
f"the given extension {self.ext}.\n"
|
||||
"Please specify a valid 'codec' argument in write_videofile.\n"
|
||||
"This would be 'libx264' or 'mpeg4' for mp4, "
|
||||
"'libtheora' for ogv, 'libvpx for webm.\n"
|
||||
"Another possible reason is that the audio codec was not "
|
||||
"compatible with the video codec. For instance, the video "
|
||||
"extensions 'ogv' and 'webm' only allow 'libvorbis' (default) as a"
|
||||
"video codec."
|
||||
)
|
||||
|
||||
elif "bitrate not specified" in ffmpeg_error:
|
||||
error += (
|
||||
"\n\nThe video export failed, possibly because the bitrate "
|
||||
"specified was too high or too low for the video codec."
|
||||
)
|
||||
|
||||
elif "Invalid encoder type" in ffmpeg_error:
|
||||
error += (
|
||||
"\n\nThe video export failed because the codec "
|
||||
"or file extension you provided is not suitable for video"
|
||||
)
|
||||
|
||||
raise IOError(error)
|
||||
|
||||
def close(self):
|
||||
"""Closes the writer, terminating the subprocess if is still alive."""
|
||||
if self.proc:
|
||||
self.proc.stdin.close()
|
||||
if self.proc.stderr is not None:
|
||||
self.proc.stderr.close()
|
||||
self.proc.wait()
|
||||
|
||||
self.proc = None
|
||||
|
||||
# Support the Context Manager protocol, to ensure that resources are cleaned up.
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
self.close()
|
||||
|
||||
|
||||
def ffmpeg_write_video(
|
||||
clip,
|
||||
filename,
|
||||
fps,
|
||||
codec="libx264",
|
||||
bitrate=None,
|
||||
preset="medium",
|
||||
write_logfile=False,
|
||||
audiofile=None,
|
||||
threads=None,
|
||||
ffmpeg_params=None,
|
||||
logger="bar",
|
||||
pixel_format=None,
|
||||
):
|
||||
"""Write the clip to a videofile. See VideoClip.write_videofile for details
|
||||
on the parameters.
|
||||
"""
|
||||
logger = proglog.default_bar_logger(logger)
|
||||
|
||||
if write_logfile:
|
||||
logfile = open(filename + ".log", "w+")
|
||||
else:
|
||||
logfile = None
|
||||
|
||||
logger(message="MoviePy - Writing video %s\n" % filename)
|
||||
|
||||
has_mask = clip.mask is not None
|
||||
|
||||
with FFMPEG_VideoWriter(
|
||||
filename,
|
||||
clip.size,
|
||||
fps,
|
||||
codec=codec,
|
||||
preset=preset,
|
||||
bitrate=bitrate,
|
||||
with_mask=has_mask,
|
||||
logfile=logfile,
|
||||
audiofile=audiofile,
|
||||
threads=threads,
|
||||
ffmpeg_params=ffmpeg_params,
|
||||
pixel_format=pixel_format,
|
||||
) as writer:
|
||||
for t, frame in clip.iter_frames(
|
||||
logger=logger, with_times=True, fps=fps, dtype="uint8"
|
||||
):
|
||||
if clip.mask is not None:
|
||||
mask = 255 * clip.mask.get_frame(t)
|
||||
if mask.dtype != "uint8":
|
||||
mask = mask.astype("uint8")
|
||||
frame = np.dstack([frame, mask])
|
||||
|
||||
writer.write_frame(frame)
|
||||
|
||||
if write_logfile:
|
||||
logfile.close()
|
||||
logger(message="MoviePy - Done !")
|
||||
|
||||
|
||||
def ffmpeg_write_image(filename, image, logfile=False, pixel_format=None):
|
||||
"""Writes an image (HxWx3 or HxWx4 numpy array) to a file, using ffmpeg.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
filename : str
|
||||
Path to the output file.
|
||||
|
||||
image : np.ndarray
|
||||
Numpy array with the image data.
|
||||
|
||||
logfile : bool, optional
|
||||
Writes the ffmpeg output inside a logging file (``True``) or not
|
||||
(``False``).
|
||||
|
||||
pixel_format : str, optional
|
||||
Pixel format for ffmpeg. If not defined, it will be discovered checking
|
||||
if the image data contains an alpha channel (``"rgba"``) or not
|
||||
(``"rgb24"``).
|
||||
"""
|
||||
if image.dtype != "uint8":
|
||||
image = image.astype("uint8")
|
||||
|
||||
if not pixel_format:
|
||||
pixel_format = "rgba" if (image.shape[2] == 4) else "rgb24"
|
||||
|
||||
cmd = [
|
||||
FFMPEG_BINARY,
|
||||
"-y",
|
||||
"-s",
|
||||
"%dx%d" % (image.shape[:2][::-1]),
|
||||
"-f",
|
||||
"rawvideo",
|
||||
"-pix_fmt",
|
||||
pixel_format,
|
||||
"-i",
|
||||
"-",
|
||||
ffmpeg_escape_filename(filename),
|
||||
]
|
||||
|
||||
if logfile:
|
||||
log_file = open(filename + ".log", "w+")
|
||||
else:
|
||||
log_file = sp.PIPE
|
||||
|
||||
popen_params = cross_platform_popen_params(
|
||||
{"stdout": sp.DEVNULL, "stderr": log_file, "stdin": sp.PIPE}
|
||||
)
|
||||
|
||||
proc = sp.Popen(cmd, **popen_params)
|
||||
out, err = proc.communicate(image.tobytes())
|
||||
|
||||
if proc.returncode:
|
||||
error = (
|
||||
f"{err}\n\nMoviePy error: FFMPEG encountered the following error while "
|
||||
f"writing file {filename} with command {cmd}:\n\n {err.decode()}"
|
||||
)
|
||||
|
||||
raise IOError(error)
|
||||
|
||||
del proc
|
||||
137
moviepy/video/io/ffplay_previewer.py
Normal file
137
moviepy/video/io/ffplay_previewer.py
Normal file
@@ -0,0 +1,137 @@
|
||||
"""
|
||||
On the long term this will implement several methods to make videos
|
||||
out of VideoClips
|
||||
"""
|
||||
|
||||
import subprocess as sp
|
||||
|
||||
from moviepy.config import FFPLAY_BINARY
|
||||
from moviepy.tools import cross_platform_popen_params
|
||||
|
||||
|
||||
class FFPLAY_VideoPreviewer:
|
||||
"""A class for FFPLAY-based video preview.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
size : tuple or list
|
||||
Size of the output video in pixels (width, height).
|
||||
|
||||
fps : int
|
||||
Frames per second in the output video file.
|
||||
|
||||
pixel_format : str
|
||||
Pixel format for the output video file, ``rgb24`` for normal video, ``rgba``
|
||||
if video with mask.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
size,
|
||||
fps,
|
||||
pixel_format,
|
||||
):
|
||||
# order is important
|
||||
cmd = [
|
||||
FFPLAY_BINARY,
|
||||
"-autoexit", # If you don't precise, ffplay won't stop at end
|
||||
"-f",
|
||||
"rawvideo",
|
||||
"-pixel_format",
|
||||
pixel_format,
|
||||
"-video_size",
|
||||
"%dx%d" % (size[0], size[1]),
|
||||
"-framerate",
|
||||
"%.02f" % fps,
|
||||
"-",
|
||||
]
|
||||
|
||||
popen_params = cross_platform_popen_params(
|
||||
{"stdout": sp.DEVNULL, "stderr": sp.STDOUT, "stdin": sp.PIPE}
|
||||
)
|
||||
|
||||
self.proc = sp.Popen(cmd, **popen_params)
|
||||
|
||||
def show_frame(self, img_array):
|
||||
"""Writes one frame in the file."""
|
||||
try:
|
||||
self.proc.stdin.write(img_array.tobytes())
|
||||
except IOError as err:
|
||||
_, ffplay_error = self.proc.communicate()
|
||||
if ffplay_error is not None:
|
||||
ffplay_error = ffplay_error.decode()
|
||||
|
||||
error = (
|
||||
f"{err}\n\nMoviePy error: FFPLAY encountered the following error while "
|
||||
f"previewing clip :\n\n {ffplay_error}"
|
||||
)
|
||||
|
||||
raise IOError(error)
|
||||
|
||||
def close(self):
|
||||
"""Closes the writer, terminating the subprocess if is still alive."""
|
||||
if self.proc:
|
||||
self.proc.stdin.close()
|
||||
if self.proc.stderr is not None:
|
||||
self.proc.stderr.close()
|
||||
self.proc.wait()
|
||||
|
||||
self.proc = None
|
||||
|
||||
# Support the Context Manager protocol, to ensure that resources are cleaned up.
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
self.close()
|
||||
|
||||
|
||||
def ffplay_preview_video(
|
||||
clip, fps, pixel_format="rgb24", audio_flag=None, video_flag=None
|
||||
):
|
||||
"""Preview the clip using ffplay. See VideoClip.preview for details
|
||||
on the parameters.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
clip : VideoClip
|
||||
The clip to preview
|
||||
|
||||
fps : int
|
||||
Number of frames per seconds in the displayed video.
|
||||
|
||||
pixel_format : str, optional
|
||||
Warning: This is not used anywhere in the code and should probably
|
||||
be removed.
|
||||
It is believed pixel format rgb24 does not work properly for now because
|
||||
it requires applying a mask on CompositeVideoClip and that is believed to
|
||||
not be working.
|
||||
|
||||
Pixel format for the output video file, ``rgb24`` for normal video, ``rgba``
|
||||
if video with mask
|
||||
|
||||
audio_flag : Thread.Event, optional
|
||||
A thread event that video will wait for. If not provided we ignore audio
|
||||
|
||||
video_flag : Thread.Event, optional
|
||||
A thread event that video will set after first frame has been shown. If not
|
||||
provided, we simply ignore
|
||||
"""
|
||||
with FFPLAY_VideoPreviewer(clip.size, fps, pixel_format) as previewer:
|
||||
first_frame = True
|
||||
for t, frame in clip.iter_frames(with_times=True, fps=fps, dtype="uint8"):
|
||||
previewer.show_frame(frame)
|
||||
|
||||
# After first frame is shown, if we have audio/video flag, set video ready
|
||||
# and wait for audio
|
||||
if first_frame:
|
||||
first_frame = False
|
||||
|
||||
if video_flag:
|
||||
video_flag.set() # say to the audio: video is ready
|
||||
|
||||
if audio_flag:
|
||||
audio_flag.wait() # wait for the audio to be ready
|
||||
20
moviepy/video/io/gif_writers.py
Normal file
20
moviepy/video/io/gif_writers.py
Normal file
@@ -0,0 +1,20 @@
|
||||
"""MoviePy video GIFs writing."""
|
||||
|
||||
import imageio.v3 as iio
|
||||
import proglog
|
||||
|
||||
from moviepy.decorators import requires_duration, use_clip_fps_by_default
|
||||
|
||||
|
||||
@requires_duration
|
||||
@use_clip_fps_by_default
|
||||
def write_gif_with_imageio(clip, filename, fps=None, loop=0, logger="bar"):
|
||||
"""Writes the gif with the Python library ImageIO (calls FreeImage)."""
|
||||
logger = proglog.default_bar_logger(logger)
|
||||
|
||||
with iio.imopen(filename, "w", plugin="pillow") as writer:
|
||||
logger(message="MoviePy - Building file %s with imageio." % filename)
|
||||
for frame in clip.iter_frames(fps=fps, logger=logger, dtype="uint8"):
|
||||
writer.write(
|
||||
frame, duration=1000 / fps, loop=loop
|
||||
) # Duration is in ms not s
|
||||
0
moviepy/video/tools/__init__.py
Normal file
0
moviepy/video/tools/__init__.py
Normal file
142
moviepy/video/tools/credits.py
Normal file
142
moviepy/video/tools/credits.py
Normal file
@@ -0,0 +1,142 @@
|
||||
"""Contains different functions to make end and opening credits, even though it is
|
||||
difficult to fill everyone needs in this matter.
|
||||
"""
|
||||
|
||||
from moviepy.decorators import convert_path_to_string
|
||||
from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip
|
||||
from moviepy.video.fx.Resize import Resize
|
||||
from moviepy.video.VideoClip import ImageClip, TextClip
|
||||
|
||||
|
||||
class CreditsClip(TextClip):
|
||||
"""Credits clip.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
creditfile
|
||||
A string or path like object pointing to a text file
|
||||
whose content must be as follows:
|
||||
|
||||
..code:: python
|
||||
|
||||
# This is a comment
|
||||
# The next line says : leave 4 blank lines
|
||||
.blank 4
|
||||
|
||||
..Executive Story Editor
|
||||
MARCEL DURAND
|
||||
|
||||
..Associate Producers
|
||||
MARTIN MARCEL
|
||||
DIDIER MARTIN
|
||||
|
||||
..Music Supervisor
|
||||
JEAN DIDIER
|
||||
|
||||
width
|
||||
Total width of the credits text in pixels
|
||||
|
||||
gap
|
||||
Horizontal gap in pixels between the jobs and the names
|
||||
|
||||
color
|
||||
Color of the text. See ``TextClip.list('color')``
|
||||
for a list of acceptable names.
|
||||
|
||||
font
|
||||
Name of the font to use. See ``TextClip.list('font')`` for
|
||||
the list of fonts you can use on your computer.
|
||||
|
||||
font_size
|
||||
Size of font to use
|
||||
|
||||
stroke_color
|
||||
Color of the stroke (=contour line) of the text. If ``None``,
|
||||
there will be no stroke.
|
||||
|
||||
stroke_width
|
||||
Width of the stroke, in pixels. Can be a float, like 1.5.
|
||||
|
||||
bg_color
|
||||
Color of the background. If ``None``, the background will be transparent.
|
||||
|
||||
Returns
|
||||
-------
|
||||
|
||||
image
|
||||
An ImageClip instance that looks like this and can be scrolled
|
||||
to make some credits: ::
|
||||
|
||||
Executive Story Editor MARCEL DURAND
|
||||
Associate Producers MARTIN MARCEL
|
||||
DIDIER MARTIN
|
||||
Music Supervisor JEAN DIDIER
|
||||
|
||||
"""
|
||||
|
||||
@convert_path_to_string("creditfile")
|
||||
def __init__(
|
||||
self,
|
||||
creditfile,
|
||||
width,
|
||||
color="white",
|
||||
stroke_color="black",
|
||||
stroke_width=2,
|
||||
font="Impact-Normal",
|
||||
font_size=60,
|
||||
bg_color=None,
|
||||
gap=0,
|
||||
):
|
||||
# Parse the .txt file
|
||||
texts = []
|
||||
one_line = True
|
||||
|
||||
with open(creditfile) as file:
|
||||
for line in file:
|
||||
if line.startswith(("\n", "#")):
|
||||
# exclude blank lines or comments
|
||||
continue
|
||||
elif line.startswith(".blank"):
|
||||
# ..blank n
|
||||
for i in range(int(line.split(" ")[1])):
|
||||
texts.append(["\n", "\n"])
|
||||
elif line.startswith(".."):
|
||||
texts.append([line[2:], ""])
|
||||
one_line = True
|
||||
elif one_line:
|
||||
texts.append(["", line])
|
||||
one_line = False
|
||||
else:
|
||||
texts.append(["\n", line])
|
||||
|
||||
left, right = ("".join(line) for line in zip(*texts))
|
||||
|
||||
# Make two columns for the credits
|
||||
left, right = [
|
||||
TextClip(
|
||||
text=txt,
|
||||
color=color,
|
||||
stroke_color=stroke_color,
|
||||
stroke_width=stroke_width,
|
||||
font=font,
|
||||
font_size=font_size,
|
||||
text_align=align,
|
||||
)
|
||||
for txt, align in [(left, "left"), (right, "right")]
|
||||
]
|
||||
|
||||
both_columns = CompositeVideoClip(
|
||||
[left, right.with_position((left.w + gap, 0))],
|
||||
size=(left.w + right.w + gap, right.h),
|
||||
bg_color=bg_color,
|
||||
)
|
||||
|
||||
# Scale to the required size
|
||||
scaled = both_columns.with_effects([Resize(width=width)])
|
||||
|
||||
# Transform the CompositeVideoClip into an ImageClip
|
||||
|
||||
# Calls ImageClip.__init__()
|
||||
super(TextClip, self).__init__(scaled.get_frame(0))
|
||||
self.mask = ImageClip(scaled.mask.get_frame(0), is_mask=True)
|
||||
522
moviepy/video/tools/cuts.py
Normal file
522
moviepy/video/tools/cuts.py
Normal file
@@ -0,0 +1,522 @@
|
||||
"""Contains everything that can help automate the cuts in MoviePy."""
|
||||
|
||||
from collections import defaultdict
|
||||
|
||||
import numpy as np
|
||||
|
||||
from moviepy.decorators import convert_parameter_to_seconds, use_clip_fps_by_default
|
||||
|
||||
|
||||
@use_clip_fps_by_default
|
||||
@convert_parameter_to_seconds(["start_time"])
|
||||
def find_video_period(clip, fps=None, start_time=0.3):
|
||||
"""Find the period of a video based on frames correlation.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
clip : moviepy.Clip.Clip
|
||||
Clip for which the video period will be computed.
|
||||
|
||||
fps : int, optional
|
||||
Number of frames per second used computing the period. Higher values will
|
||||
produce more accurate periods, but the execution time will be longer.
|
||||
|
||||
start_time : float, optional
|
||||
First timeframe used to calculate the period of the clip.
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
.. code:: python
|
||||
|
||||
from moviepy import *
|
||||
from moviepy.video.tools.cuts import find_video_period
|
||||
|
||||
clip = VideoFileClip("media/chaplin.mp4").subclipped(0, 1).loop(2)
|
||||
round(videotools.find_video_period(clip, fps=80), 6)
|
||||
1
|
||||
"""
|
||||
|
||||
def frame(t):
|
||||
return clip.get_frame(t).flatten()
|
||||
|
||||
timings = np.arange(start_time, clip.duration, 1 / fps)[1:]
|
||||
ref = frame(0)
|
||||
corrs = [np.corrcoef(ref, frame(t))[0, 1] for t in timings]
|
||||
return timings[np.argmax(corrs)]
|
||||
|
||||
|
||||
class FramesMatch:
|
||||
"""Frames match inside a set of frames.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
start_time : float
|
||||
Starting time.
|
||||
|
||||
end_time : float
|
||||
End time.
|
||||
|
||||
min_distance : float
|
||||
Lower bound on the distance between the first and last frames
|
||||
|
||||
max_distance : float
|
||||
Upper bound on the distance between the first and last frames
|
||||
"""
|
||||
|
||||
def __init__(self, start_time, end_time, min_distance, max_distance):
|
||||
self.start_time = start_time
|
||||
self.end_time = end_time
|
||||
self.min_distance = min_distance
|
||||
self.max_distance = max_distance
|
||||
self.time_span = end_time - start_time
|
||||
|
||||
def __str__(self): # pragma: no cover
|
||||
return "(%.04f, %.04f, %.04f, %.04f)" % (
|
||||
self.start_time,
|
||||
self.end_time,
|
||||
self.min_distance,
|
||||
self.max_distance,
|
||||
)
|
||||
|
||||
def __repr__(self): # pragma: no cover
|
||||
return self.__str__()
|
||||
|
||||
def __iter__(self): # pragma: no cover
|
||||
return iter(
|
||||
(self.start_time, self.end_time, self.min_distance, self.max_distance)
|
||||
)
|
||||
|
||||
def __eq__(self, other):
|
||||
return (
|
||||
other.start_time == self.start_time
|
||||
and other.end_time == self.end_time
|
||||
and other.min_distance == self.min_distance
|
||||
and other.max_distance == self.max_distance
|
||||
)
|
||||
|
||||
|
||||
class FramesMatches(list):
|
||||
"""Frames matches inside a set of frames.
|
||||
|
||||
You can instantiate it passing a list of FramesMatch objects or
|
||||
using the class methods ``load`` and ``from_clip``.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
lst : list
|
||||
Iterable of FramesMatch objects.
|
||||
"""
|
||||
|
||||
def __init__(self, lst):
|
||||
list.__init__(self, sorted(lst, key=lambda e: e.max_distance))
|
||||
|
||||
def best(self, n=1, percent=None):
|
||||
"""Returns a new instance of FramesMatches object or a FramesMatch
|
||||
from the current class instance given different conditions.
|
||||
|
||||
By default returns the first FramesMatch that the current instance
|
||||
stores.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
n : int, optional
|
||||
Number of matches to retrieve from the current FramesMatches object.
|
||||
Only has effect when ``percent=None``.
|
||||
|
||||
percent : float, optional
|
||||
Percent of the current match to retrieve.
|
||||
|
||||
Returns
|
||||
-------
|
||||
|
||||
FramesMatch or FramesMatches : If the number of matches to retrieve is
|
||||
greater than 1 returns a FramesMatches object, otherwise a
|
||||
FramesMatch.
|
||||
|
||||
"""
|
||||
if percent is not None:
|
||||
n = len(self) * percent / 100
|
||||
return self[0] if n == 1 else FramesMatches(self[: int(n)])
|
||||
|
||||
def filter(self, condition):
|
||||
"""Return a FramesMatches object obtained by filtering out the
|
||||
FramesMatch which do not satistify a condition.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
condition : func
|
||||
Function which takes a FrameMatch object as parameter and returns a
|
||||
bool.
|
||||
|
||||
Examples
|
||||
--------
|
||||
.. code:: python
|
||||
|
||||
# Only keep the matches corresponding to (> 1 second) sequences.
|
||||
new_matches = matches.filter(lambda match: match.time_span > 1)
|
||||
"""
|
||||
return FramesMatches(filter(condition, self))
|
||||
|
||||
def save(self, filename):
|
||||
"""Save a FramesMatches object to a file.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
filename : str
|
||||
Path to the file in which will be dumped the FramesMatches object data.
|
||||
"""
|
||||
np.savetxt(
|
||||
filename,
|
||||
np.array([np.array(list(e)) for e in self]),
|
||||
fmt="%.03f",
|
||||
delimiter="\t",
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def load(filename):
|
||||
"""Load a FramesMatches object from a file.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
filename : str
|
||||
Path to the file to use loading a FramesMatches object.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> matching_frames = FramesMatches.load("somefile")
|
||||
"""
|
||||
arr = np.loadtxt(filename)
|
||||
mfs = [FramesMatch(*e) for e in arr]
|
||||
return FramesMatches(mfs)
|
||||
|
||||
@staticmethod
|
||||
def from_clip(clip, distance_threshold, max_duration, fps=None, logger="bar"):
|
||||
"""Finds all the frames that look alike in a clip, for instance to make
|
||||
a looping GIF.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
clip : moviepy.video.VideoClip.VideoClip
|
||||
A MoviePy video clip.
|
||||
|
||||
distance_threshold : float
|
||||
Distance above which a match is rejected.
|
||||
|
||||
max_duration : float
|
||||
Maximal duration (in seconds) between two matching frames.
|
||||
|
||||
fps : int, optional
|
||||
Frames per second (default will be ``clip.fps``).
|
||||
|
||||
logger : str, optional
|
||||
Either ``"bar"`` for progress bar or ``None`` or any Proglog logger.
|
||||
|
||||
Returns
|
||||
-------
|
||||
|
||||
FramesMatches
|
||||
All pairs of frames with ``end_time - start_time < max_duration``
|
||||
and whose distance is under ``distance_threshold``.
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
We find all matching frames in a given video and turn the best match
|
||||
with a duration of 1.5 seconds or more into a GIF:
|
||||
|
||||
.. code:: python
|
||||
|
||||
from moviepy import VideoFileClip
|
||||
from moviepy.video.tools.cuts import FramesMatches
|
||||
|
||||
clip = VideoFileClip("foo.mp4").resize(width=200)
|
||||
matches = FramesMatches.from_clip(
|
||||
clip, distance_threshold=10, max_duration=3, # will take time
|
||||
)
|
||||
best = matches.filter(lambda m: m.time_span > 1.5).best()
|
||||
clip.subclipped(best.start_time, best.end_time).write_gif("foo.gif")
|
||||
"""
|
||||
N_pixels = clip.w * clip.h * 3
|
||||
|
||||
def dot_product(F1, F2):
|
||||
return (F1 * F2).sum() / N_pixels
|
||||
|
||||
frame_dict = {} # will store the frames and their mutual distances
|
||||
|
||||
def distance(t1, t2):
|
||||
uv = dot_product(frame_dict[t1]["frame"], frame_dict[t2]["frame"])
|
||||
u, v = frame_dict[t1]["|F|sq"], frame_dict[t2]["|F|sq"]
|
||||
return np.sqrt(u + v - 2 * uv)
|
||||
|
||||
matching_frames = [] # the final result.
|
||||
|
||||
for t, frame in clip.iter_frames(with_times=True, logger=logger):
|
||||
flat_frame = 1.0 * frame.flatten()
|
||||
F_norm_sq = dot_product(flat_frame, flat_frame)
|
||||
F_norm = np.sqrt(F_norm_sq)
|
||||
|
||||
for t2 in list(frame_dict.keys()):
|
||||
# forget old frames, add 't' to the others frames
|
||||
# check for early rejections based on differing norms
|
||||
if (t - t2) > max_duration:
|
||||
frame_dict.pop(t2)
|
||||
else:
|
||||
frame_dict[t2][t] = {
|
||||
"min": abs(frame_dict[t2]["|F|"] - F_norm),
|
||||
"max": frame_dict[t2]["|F|"] + F_norm,
|
||||
}
|
||||
frame_dict[t2][t]["rejected"] = (
|
||||
frame_dict[t2][t]["min"] > distance_threshold
|
||||
)
|
||||
|
||||
t_F = sorted(frame_dict.keys())
|
||||
|
||||
frame_dict[t] = {"frame": flat_frame, "|F|sq": F_norm_sq, "|F|": F_norm}
|
||||
|
||||
for i, t2 in enumerate(t_F):
|
||||
# Compare F(t) to all the previous frames
|
||||
|
||||
if frame_dict[t2][t]["rejected"]:
|
||||
continue
|
||||
|
||||
dist = distance(t, t2)
|
||||
frame_dict[t2][t]["min"] = frame_dict[t2][t]["max"] = dist
|
||||
frame_dict[t2][t]["rejected"] = dist >= distance_threshold
|
||||
|
||||
for t3 in t_F[i + 1 :]:
|
||||
# For all the next times t3, use d(F(t), F(end_time)) to
|
||||
# update the bounds on d(F(t), F(t3)). See if you can
|
||||
# conclude on whether F(t) and F(t3) match.
|
||||
t3t, t2t3 = frame_dict[t3][t], frame_dict[t2][t3]
|
||||
t3t["max"] = min(t3t["max"], dist + t2t3["max"])
|
||||
t3t["min"] = max(t3t["min"], dist - t2t3["max"], t2t3["min"] - dist)
|
||||
|
||||
if t3t["min"] > distance_threshold:
|
||||
t3t["rejected"] = True
|
||||
|
||||
# Store all the good matches (end_time,t)
|
||||
matching_frames += [
|
||||
(t1, t, frame_dict[t1][t]["min"], frame_dict[t1][t]["max"])
|
||||
for t1 in frame_dict
|
||||
if (t1 != t) and not frame_dict[t1][t]["rejected"]
|
||||
]
|
||||
|
||||
return FramesMatches([FramesMatch(*e) for e in matching_frames])
|
||||
|
||||
def select_scenes(
|
||||
self, match_threshold, min_time_span, nomatch_threshold=None, time_distance=0
|
||||
):
|
||||
"""Select the scenes at which a video clip can be reproduced as the
|
||||
smoothest possible way, mainly oriented for the creation of GIF images.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
match_threshold : float
|
||||
Maximum distance possible between frames. The smaller, the
|
||||
better-looping the GIFs are.
|
||||
|
||||
min_time_span : float
|
||||
Minimum duration for a scene. Only matches with a duration longer
|
||||
than the value passed to this parameters will be extracted.
|
||||
|
||||
nomatch_threshold : float, optional
|
||||
Minimum distance possible between frames. If is ``None``, then it is
|
||||
chosen equal to ``match_threshold``.
|
||||
|
||||
time_distance : float, optional
|
||||
Minimum time offset possible between matches.
|
||||
|
||||
Returns
|
||||
-------
|
||||
|
||||
FramesMatches : New instance of the class with the selected scenes.
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
.. code:: python
|
||||
|
||||
from pprint import pprint
|
||||
from moviepy import *
|
||||
from moviepy.video.tools.cuts import FramesMatches
|
||||
|
||||
ch_clip = VideoFileClip("media/chaplin.mp4").subclipped(1, 4)
|
||||
mirror_and_clip = [ch_clip.with_effects([vfx.TimeMirror()]), ch_clip]
|
||||
clip = concatenate_videoclips(mirror_and_clip)
|
||||
|
||||
result = FramesMatches.from_clip(clip, 10, 3).select_scenes(
|
||||
1, 2, nomatch_threshold=0,
|
||||
)
|
||||
print(result)
|
||||
# [(1.0000, 4.0000, 0.0000, 0.0000),
|
||||
# (1.1600, 3.8400, 0.0000, 0.0000),
|
||||
# (1.2800, 3.7200, 0.0000, 0.0000),
|
||||
# (1.4000, 3.6000, 0.0000, 0.0000)]
|
||||
"""
|
||||
if nomatch_threshold is None:
|
||||
nomatch_threshold = match_threshold
|
||||
|
||||
dict_starts = defaultdict(lambda: [])
|
||||
for start, end, min_distance, max_distance in self:
|
||||
dict_starts[start].append([end, min_distance, max_distance])
|
||||
|
||||
starts_ends = sorted(dict_starts.items(), key=lambda k: k[0])
|
||||
|
||||
result = []
|
||||
min_start = 0
|
||||
for start, ends_distances in starts_ends:
|
||||
if start < min_start:
|
||||
continue
|
||||
|
||||
ends = [end for (end, min_distance, max_distance) in ends_distances]
|
||||
great_matches = [
|
||||
(end, min_distance, max_distance)
|
||||
for (end, min_distance, max_distance) in ends_distances
|
||||
if max_distance < match_threshold
|
||||
]
|
||||
|
||||
great_long_matches = [
|
||||
(end, min_distance, max_distance)
|
||||
for (end, min_distance, max_distance) in great_matches
|
||||
if (end - start) > min_time_span
|
||||
]
|
||||
|
||||
if not great_long_matches:
|
||||
continue # No GIF can be made starting at this time
|
||||
|
||||
poor_matches = {
|
||||
end
|
||||
for (end, min_distance, max_distance) in ends_distances
|
||||
if min_distance > nomatch_threshold
|
||||
}
|
||||
short_matches = {end for end in ends if (end - start) <= 0.6}
|
||||
|
||||
if not poor_matches.intersection(short_matches):
|
||||
continue
|
||||
|
||||
end = max(end for (end, min_distance, max_distance) in great_long_matches)
|
||||
end, min_distance, max_distance = next(
|
||||
e for e in great_long_matches if e[0] == end
|
||||
)
|
||||
|
||||
result.append(FramesMatch(start, end, min_distance, max_distance))
|
||||
min_start = start + time_distance
|
||||
|
||||
return FramesMatches(result)
|
||||
|
||||
def write_gifs(self, clip, gifs_dir, **kwargs):
|
||||
"""Extract the matching frames represented by the instance from a clip
|
||||
and write them as GIFs in a directory, one GIF for each matching frame.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
clip : video.VideoClip.VideoClip
|
||||
A video clip whose frames scenes you want to obtain as GIF images.
|
||||
|
||||
gif_dir : str
|
||||
Directory in which the GIF images will be written.
|
||||
|
||||
kwargs
|
||||
Passed as ``clip.write_gif`` optional arguments.
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
.. code:: python
|
||||
|
||||
import os
|
||||
from pprint import pprint
|
||||
from moviepy import *
|
||||
from moviepy.video.tools.cuts import FramesMatches
|
||||
|
||||
ch_clip = VideoFileClip("media/chaplin.mp4").subclipped(1, 4)
|
||||
clip = concatenate_videoclips([ch_clip.time_mirror(), ch_clip])
|
||||
|
||||
result = FramesMatches.from_clip(clip, 10, 3).select_scenes(
|
||||
1, 2, nomatch_threshold=0,
|
||||
)
|
||||
|
||||
os.mkdir("foo")
|
||||
result.write_gifs(clip, "foo")
|
||||
# MoviePy - Building file foo/00000100_00000400.gif with imageio.
|
||||
# MoviePy - Building file foo/00000115_00000384.gif with imageio.
|
||||
# MoviePy - Building file foo/00000128_00000372.gif with imageio.
|
||||
# MoviePy - Building file foo/00000140_00000360.gif with imageio.
|
||||
"""
|
||||
for start, end, _, _ in self:
|
||||
name = "%s/%08d_%08d.gif" % (gifs_dir, 100 * start, 100 * end)
|
||||
clip.subclipped(start, end).write_gif(name, **kwargs)
|
||||
|
||||
|
||||
@use_clip_fps_by_default
|
||||
def detect_scenes(
|
||||
clip=None, luminosities=None, luminosity_threshold=10, logger="bar", fps=None
|
||||
):
|
||||
"""Detects scenes of a clip based on luminosity changes.
|
||||
|
||||
Note that for large clip this may take some time.
|
||||
|
||||
Returns
|
||||
-------
|
||||
|
||||
tuple : cuts, luminosities
|
||||
cuts is a series of cuts [(0,t1), (t1,t2),...(...,tf)]
|
||||
luminosities are the luminosities computed for each
|
||||
frame of the clip.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
clip : video.VideoClip.VideoClip, optional
|
||||
A video clip. Can be None if a list of luminosities is
|
||||
provided instead. If provided, the luminosity of each
|
||||
frame of the clip will be computed. If the clip has no
|
||||
'fps' attribute, you must provide it.
|
||||
|
||||
luminosities : list, optional
|
||||
A list of luminosities, e.g. returned by detect_scenes
|
||||
in a previous run.
|
||||
|
||||
luminosity_threshold : float, optional
|
||||
Determines a threshold above which the 'luminosity jumps'
|
||||
will be considered as scene changes. A scene change is defined
|
||||
as a change between 2 consecutive frames that is larger than
|
||||
(avg * thr) where avg is the average of the absolute changes
|
||||
between consecutive frames.
|
||||
|
||||
logger : str, optional
|
||||
Either ``"bar"`` for progress bar or ``None`` or any Proglog logger.
|
||||
|
||||
fps : int, optional
|
||||
Frames per second value. Must be provided if you provide
|
||||
no clip or a clip without fps attribute.
|
||||
"""
|
||||
if luminosities is None:
|
||||
luminosities = [
|
||||
f.sum() for f in clip.iter_frames(fps=fps, dtype="uint32", logger=logger)
|
||||
]
|
||||
|
||||
luminosities = np.array(luminosities, dtype=float)
|
||||
if clip is not None:
|
||||
end = clip.duration
|
||||
else:
|
||||
end = len(luminosities) * (1.0 / fps)
|
||||
luminosity_diffs = abs(np.diff(luminosities))
|
||||
avg = luminosity_diffs.mean()
|
||||
luminosity_jumps = (
|
||||
1 + np.array(np.nonzero(luminosity_diffs > luminosity_threshold * avg))[0]
|
||||
)
|
||||
timings = [0] + list((1.0 / fps) * luminosity_jumps) + [end]
|
||||
cuts = [(t1, t2) for t1, t2 in zip(timings, timings[1:])]
|
||||
return cuts, luminosities
|
||||
319
moviepy/video/tools/drawing.py
Normal file
319
moviepy/video/tools/drawing.py
Normal file
@@ -0,0 +1,319 @@
|
||||
"""Deals with making images (np arrays). It provides drawing
|
||||
methods that are difficult to do with the existing Python libraries.
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
def color_gradient(
|
||||
size,
|
||||
p1,
|
||||
p2=None,
|
||||
vector=None,
|
||||
radius=None,
|
||||
color_1=0.0,
|
||||
color_2=1.0,
|
||||
shape="linear",
|
||||
offset=0,
|
||||
):
|
||||
"""Draw a linear, bilinear, or radial gradient.
|
||||
|
||||
The result is a picture of size ``size``, whose color varies
|
||||
gradually from color `color_1` in position ``p1`` to color ``color_2``
|
||||
in position ``p2``.
|
||||
|
||||
If it is a RGB picture the result must be transformed into
|
||||
a 'uint8' array to be displayed normally:
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
size : tuple or list
|
||||
Size (width, height) in pixels of the final image array.
|
||||
|
||||
p1 : tuple or list
|
||||
Position for the first coordinate of the gradient in pixels (x, y).
|
||||
The color 'before' ``p1`` is ``color_1`` and it gradually changes in
|
||||
the direction of ``p2`` until it is ``color_2`` when it reaches ``p2``.
|
||||
|
||||
p2 : tuple or list, optional
|
||||
Position for the second coordinate of the gradient in pixels (x, y).
|
||||
Coordinates (x, y) of the limit point for ``color_1``
|
||||
and ``color_2``.
|
||||
|
||||
vector : tuple or list, optional
|
||||
A vector (x, y) in pixels that can be provided instead of ``p2``.
|
||||
``p2`` is then defined as (p1 + vector).
|
||||
|
||||
color_1 : tuple or list, optional
|
||||
Starting color for the gradient. As default, black. Either floats
|
||||
between 0 and 1 (for gradients used in masks) or [R, G, B] arrays
|
||||
(for colored gradients).
|
||||
|
||||
color_2 : tuple or list, optional
|
||||
Color for the second point in the gradient. As default, white. Either
|
||||
floats between 0 and 1 (for gradients used in masks) or [R, G, B]
|
||||
arrays (for colored gradients).
|
||||
|
||||
shape : str, optional
|
||||
Shape of the gradient. Can be either ``"linear"``, ``"bilinear"`` or
|
||||
``"circular"``. In a linear gradient the color varies in one direction,
|
||||
from point ``p1`` to point ``p2``. In a bilinear gradient it also
|
||||
varies symmetrically from ``p1`` in the other direction. In a circular
|
||||
gradient it goes from ``color_1`` to ``color_2`` in all directions.
|
||||
|
||||
radius : float, optional
|
||||
If ``shape="radial"``, the radius of the gradient is defined with the
|
||||
parameter ``radius``, in pixels.
|
||||
|
||||
offset : float, optional
|
||||
Real number between 0 and 1 indicating the fraction of the vector
|
||||
at which the gradient actually starts. For instance if ``offset``
|
||||
is 0.9 in a gradient going from p1 to p2, then the gradient will
|
||||
only occur near p2 (before that everything is of color ``color_1``)
|
||||
If the offset is 0.9 in a radial gradient, the gradient will
|
||||
occur in the region located between 90% and 100% of the radius,
|
||||
this creates a blurry disc of radius ``d(p1, p2)``.
|
||||
|
||||
Returns
|
||||
-------
|
||||
|
||||
image
|
||||
An Numpy array of dimensions (width, height, n_colors) of type float
|
||||
representing the image of the gradient.
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
.. code:: python
|
||||
|
||||
color_gradient((10, 1), (0, 0), p2=(10, 0)) # from white to black
|
||||
#[[1. 0.9 0.8 0.7 0.6 0.5 0.4 0.3 0.2 0.1]]
|
||||
# from red to green
|
||||
color_gradient(
|
||||
(10, 1), (0, 0),
|
||||
p2=(10, 0),
|
||||
color_1=(255, 0, 0),
|
||||
color_2=(0, 255, 0)
|
||||
)
|
||||
# [[[ 0. 255. 0. ]
|
||||
# [ 25.5 229.5 0. ]
|
||||
# [ 51. 204. 0. ]
|
||||
# [ 76.5 178.5 0. ]
|
||||
# [102. 153. 0. ]
|
||||
# [127.5 127.5 0. ]
|
||||
# [153. 102. 0. ]
|
||||
# [178.5 76.5 0. ]
|
||||
# [204. 51. 0. ]
|
||||
# [229.5 25.5 0. ]]]
|
||||
"""
|
||||
# np-arrayize and change x,y coordinates to y,x
|
||||
w, h = size
|
||||
|
||||
color_1 = np.array(color_1).astype(float)
|
||||
color_2 = np.array(color_2).astype(float)
|
||||
|
||||
if shape == "bilinear":
|
||||
if vector is None:
|
||||
if p2 is None:
|
||||
raise ValueError("You must provide either 'p2' or 'vector'")
|
||||
vector = np.array(p2) - np.array(p1)
|
||||
|
||||
m1, m2 = [
|
||||
color_gradient(
|
||||
size,
|
||||
p1,
|
||||
vector=v,
|
||||
color_1=1.0,
|
||||
color_2=0.0,
|
||||
shape="linear",
|
||||
offset=offset,
|
||||
)
|
||||
for v in [vector, [-v for v in vector]]
|
||||
]
|
||||
|
||||
arr = np.maximum(m1, m2)
|
||||
if color_1.size > 1:
|
||||
arr = np.dstack(3 * [arr])
|
||||
return arr * color_1 + (1 - arr) * color_2
|
||||
|
||||
p1 = np.array(p1[::-1]).astype(float)
|
||||
|
||||
M = np.dstack(np.meshgrid(range(w), range(h))[::-1]).astype(float)
|
||||
|
||||
if shape == "linear":
|
||||
if vector is None:
|
||||
if p2 is not None:
|
||||
vector = np.array(p2[::-1]) - p1
|
||||
else:
|
||||
raise ValueError("You must provide either 'p2' or 'vector'")
|
||||
else:
|
||||
vector = np.array(vector[::-1])
|
||||
|
||||
norm = np.linalg.norm(vector)
|
||||
n_vec = vector / norm**2 # norm 1/norm(vector)
|
||||
|
||||
p1 = p1 + offset * vector
|
||||
arr = (M - p1).dot(n_vec) / (1 - offset)
|
||||
arr = np.minimum(1, np.maximum(0, arr))
|
||||
if color_1.size > 1:
|
||||
arr = np.dstack(3 * [arr])
|
||||
return arr * color_1 + (1 - arr) * color_2
|
||||
|
||||
elif shape == "radial":
|
||||
if (radius or 0) == 0:
|
||||
arr = np.ones((h, w))
|
||||
else:
|
||||
arr = (np.sqrt(((M - p1) ** 2).sum(axis=2))) - offset * radius
|
||||
arr = arr / ((1 - offset) * radius)
|
||||
arr = np.minimum(1.0, np.maximum(0, arr))
|
||||
|
||||
if color_1.size > 1:
|
||||
arr = np.dstack(3 * [arr])
|
||||
return (1 - arr) * color_1 + arr * color_2
|
||||
raise ValueError("Invalid shape, should be either 'radial', 'linear' or 'bilinear'")
|
||||
|
||||
|
||||
def color_split(
|
||||
size,
|
||||
x=None,
|
||||
y=None,
|
||||
p1=None,
|
||||
p2=None,
|
||||
vector=None,
|
||||
color_1=0,
|
||||
color_2=1.0,
|
||||
gradient_width=0,
|
||||
):
|
||||
"""Make an image split in 2 colored regions.
|
||||
|
||||
Returns an array of size ``size`` divided in two regions called 1 and
|
||||
2 in what follows, and which will have colors color_1 and color_2
|
||||
respectively.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
x : int, optional
|
||||
If provided, the image is split horizontally in x, the left
|
||||
region being region 1.
|
||||
|
||||
y : int, optional
|
||||
If provided, the image is split vertically in y, the top region
|
||||
being region 1.
|
||||
|
||||
p1, p2: tuple or list, optional
|
||||
Positions (x1, y1), (x2, y2) in pixels, where the numbers can be
|
||||
floats. Region 1 is defined as the whole region on the left when
|
||||
going from ``p1`` to ``p2``.
|
||||
|
||||
p1, vector: tuple or list, optional
|
||||
``p1`` is (x1,y1) and vector (v1,v2), where the numbers can be
|
||||
floats. Region 1 is then the region on the left when starting
|
||||
in position ``p1`` and going in the direction given by ``vector``.
|
||||
|
||||
gradient_width : float, optional
|
||||
If not zero, the split is not sharp, but gradual over a region of
|
||||
width ``gradient_width`` (in pixels). This is preferable in many
|
||||
situations (for instance for antialiasing).
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
.. code:: python
|
||||
|
||||
size = [200, 200]
|
||||
|
||||
# an image with all pixels with x<50 =0, the others =1
|
||||
color_split(size, x=50, color_1=0, color_2=1)
|
||||
|
||||
# an image with all pixels with y<50 red, the others green
|
||||
color_split(size, x=50, color_1=[255, 0, 0], color_2=[0, 255, 0])
|
||||
|
||||
# An image split along an arbitrary line (see below)
|
||||
color_split(size, p1=[20, 50], p2=[25, 70], color_1=0, color_2=1)
|
||||
"""
|
||||
if gradient_width or ((x is None) and (y is None)):
|
||||
if p2 is not None:
|
||||
vector = np.array(p2) - np.array(p1)
|
||||
elif x is not None:
|
||||
vector = np.array([0, -1.0])
|
||||
p1 = np.array([x, 0])
|
||||
elif y is not None:
|
||||
vector = np.array([1.0, 0.0])
|
||||
p1 = np.array([0, y])
|
||||
|
||||
x, y = vector
|
||||
vector = np.array([y, -x]).astype("float")
|
||||
norm = np.linalg.norm(vector)
|
||||
vector = max(0.1, gradient_width) * vector / norm
|
||||
return color_gradient(
|
||||
size, p1, vector=vector, color_1=color_1, color_2=color_2, shape="linear"
|
||||
)
|
||||
else:
|
||||
w, h = size
|
||||
shape = (h, w) if np.isscalar(color_1) else (h, w, len(color_1))
|
||||
arr = np.zeros(shape)
|
||||
if x:
|
||||
arr[:, :x] = color_1
|
||||
arr[:, x:] = color_2
|
||||
elif y:
|
||||
arr[:y] = color_1
|
||||
arr[y:] = color_2
|
||||
return arr
|
||||
|
||||
|
||||
def circle(screensize, center, radius, color=1.0, bg_color=0, blur=1):
|
||||
"""Draw an image with a circle.
|
||||
|
||||
Draws a circle of color ``color``, on a background of color ``bg_color``,
|
||||
on a screen of size ``screensize`` at the position ``center=(x, y)``,
|
||||
with a radius ``radius`` but slightly blurred on the border by ``blur``
|
||||
pixels.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
screensize : tuple or list
|
||||
Size of the canvas.
|
||||
|
||||
center : tuple or list
|
||||
Center of the circle.
|
||||
|
||||
radius : float
|
||||
Radius of the circle, in pixels.
|
||||
|
||||
bg_color : tuple or float, optional
|
||||
Color for the background of the canvas. As default, black.
|
||||
|
||||
blur : float, optional
|
||||
Blur for the border of the circle.
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
.. code:: python
|
||||
|
||||
from moviepy.video.tools.drawing import circle
|
||||
|
||||
circle(
|
||||
(5, 5), # size
|
||||
(2, 2), # center
|
||||
2, # radius
|
||||
)
|
||||
# array([[0. , 0. , 0. , 0. , 0. ],
|
||||
# [0. , 0.58578644, 1. , 0.58578644, 0. ],
|
||||
# [0. , 1. , 1. , 1. , 0. ],
|
||||
# [0. , 0.58578644, 1. , 0.58578644, 0. ],
|
||||
# [0. , 0. , 0. , 0. , 0. ]])
|
||||
"""
|
||||
offset = 1.0 * (radius - blur) / radius if radius else 0
|
||||
return color_gradient(
|
||||
screensize,
|
||||
p1=center,
|
||||
radius=radius,
|
||||
color_1=color,
|
||||
color_2=bg_color,
|
||||
shape="radial",
|
||||
offset=offset,
|
||||
)
|
||||
238
moviepy/video/tools/interpolators.py
Normal file
238
moviepy/video/tools/interpolators.py
Normal file
@@ -0,0 +1,238 @@
|
||||
"""Classes for easy interpolation of trajectories and curves."""
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
class Interpolator:
|
||||
"""Poorman's linear interpolator.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
tt : list, optional
|
||||
List of time frames for the interpolator.
|
||||
|
||||
ss : list, optional
|
||||
List of values for the interpolator.
|
||||
|
||||
ttss : list, optional
|
||||
Lists of time frames and their correspondients values for the
|
||||
interpolator. This argument can be used instead of ``tt`` and ``ss``
|
||||
to instantiate the interpolator using an unique argument.
|
||||
|
||||
left : float, optional
|
||||
Value to return when ``t < tt[0]``.
|
||||
|
||||
right : float, optional
|
||||
Value to return when ``t > tt[-1]``.
|
||||
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
.. code:: python
|
||||
|
||||
# instantiate using `tt` and `ss`
|
||||
interpolator = Interpolator(tt=[0, 1, 2], ss=[3, 4, 5])
|
||||
|
||||
# instantiate using `ttss`
|
||||
interpolator = Interpolator(ttss=[[0, 3], [1, 4], [2, 5]]) # [t, value]
|
||||
"""
|
||||
|
||||
def __init__(self, tt=None, ss=None, ttss=None, left=None, right=None):
|
||||
if ttss is not None:
|
||||
tt, ss = zip(*ttss)
|
||||
|
||||
self.tt = 1.0 * np.array(tt)
|
||||
self.ss = 1.0 * np.array(ss)
|
||||
self.left = left
|
||||
self.right = right
|
||||
self.tmin, self.tmax = min(tt), max(tt)
|
||||
|
||||
def __call__(self, t):
|
||||
"""Interpolates ``t``.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
t : float
|
||||
Time frame for which the correspondent value will be returned.
|
||||
"""
|
||||
return np.interp(t, self.tt, self.ss, self.left, self.right)
|
||||
|
||||
|
||||
class Trajectory:
|
||||
"""Trajectory compound by time frames and (x, y) pixels.
|
||||
|
||||
It's designed as an interpolator, so you can get the position at a given
|
||||
time ``t``. You can instantiate it from a file using the methods
|
||||
``from_file`` and ``load_list``.
|
||||
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
tt : list or numpy.ndarray
|
||||
Time frames.
|
||||
|
||||
xx : list or numpy.ndarray
|
||||
X positions in the trajectory.
|
||||
|
||||
yy : list or numpy.ndarray
|
||||
Y positions in the trajectory.
|
||||
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
>>> trajectory = Trajectory([0, .166, .333], [554, 474, 384], [100, 90, 91])
|
||||
"""
|
||||
|
||||
def __init__(self, tt, xx, yy):
|
||||
self.tt = 1.0 * np.array(tt)
|
||||
self.xx = np.array(xx)
|
||||
self.yy = np.array(yy)
|
||||
self.update_interpolators()
|
||||
|
||||
def __call__(self, t):
|
||||
"""Interpolates the trajectory at the given time ``t``.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
t : float
|
||||
Time for which to the corresponding position will be returned.
|
||||
"""
|
||||
return np.array([self.xi(t), self.yi(t)])
|
||||
|
||||
def addx(self, x):
|
||||
"""Adds a value to the ``xx`` position of the trajectory.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
x : int
|
||||
Value added to ``xx`` in the trajectory.
|
||||
|
||||
|
||||
Returns
|
||||
-------
|
||||
|
||||
Trajectory : new instance with the new X position included.
|
||||
"""
|
||||
return Trajectory(self.tt, self.xx + x, self.yy)
|
||||
|
||||
def addy(self, y):
|
||||
"""Adds a value to the ``yy`` position of the trajectory.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
y : int
|
||||
Value added to ``yy`` in the trajectory.
|
||||
|
||||
|
||||
Returns
|
||||
-------
|
||||
|
||||
Trajectory : new instance with the new Y position included.
|
||||
"""
|
||||
return Trajectory(self.tt, self.xx, self.yy + y)
|
||||
|
||||
def update_interpolators(self):
|
||||
"""Updates the internal X and Y position interpolators for the instance."""
|
||||
self.xi = Interpolator(self.tt, self.xx)
|
||||
self.yi = Interpolator(self.tt, self.yy)
|
||||
|
||||
def txy(self, tms=False):
|
||||
"""Returns all times with the X and Y values of each position.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
tms : bool, optional
|
||||
If is ``True``, the time will be returned in milliseconds.
|
||||
"""
|
||||
return zip((1000 if tms else 1) * self.tt, self.xx, self.yy)
|
||||
|
||||
def to_file(self, filename):
|
||||
"""Saves the trajectory data in a text file.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
filename : str
|
||||
Path to the location of the new trajectory text file.
|
||||
"""
|
||||
np.savetxt(
|
||||
filename,
|
||||
np.array(list(self.txy(tms=True))),
|
||||
fmt="%d",
|
||||
delimiter="\t",
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def from_file(filename):
|
||||
"""Instantiates an object of Trajectory using a data text file.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
filename : str
|
||||
Path to the location of trajectory text file to load.
|
||||
|
||||
|
||||
Returns
|
||||
-------
|
||||
|
||||
Trajectory : new instance loaded from text file.
|
||||
"""
|
||||
arr = np.loadtxt(filename, delimiter="\t")
|
||||
tt, xx, yy = arr.T
|
||||
return Trajectory(1.0 * tt / 1000, xx, yy)
|
||||
|
||||
@staticmethod
|
||||
def save_list(trajs, filename):
|
||||
"""Saves a set of trajectories into a text file.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
trajs : list
|
||||
List of trajectories to be saved.
|
||||
|
||||
filename : str
|
||||
Path of the text file that will store the trajectories data.
|
||||
"""
|
||||
N = len(trajs)
|
||||
arr = np.hstack([np.array(list(t.txy(tms=True))) for t in trajs])
|
||||
np.savetxt(
|
||||
filename,
|
||||
arr,
|
||||
fmt="%d",
|
||||
delimiter="\t",
|
||||
header="\t".join(N * ["t(ms)", "x", "y"]),
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def load_list(filename):
|
||||
"""Loads a list of trajectories from a data text file.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
filename : str
|
||||
Path of the text file that stores the data of a set of trajectories.
|
||||
|
||||
|
||||
Returns
|
||||
-------
|
||||
|
||||
list : List of trajectories loaded from the file.
|
||||
"""
|
||||
arr = np.loadtxt(filename, delimiter="\t").T
|
||||
Nlines = arr.shape[0]
|
||||
return [
|
||||
Trajectory(tt=1.0 * a[0] / 1000, xx=a[1], yy=a[2])
|
||||
for a in np.split(arr, Nlines / 3)
|
||||
]
|
||||
198
moviepy/video/tools/subtitles.py
Normal file
198
moviepy/video/tools/subtitles.py
Normal file
@@ -0,0 +1,198 @@
|
||||
"""Experimental module for subtitles support."""
|
||||
|
||||
import re
|
||||
|
||||
import numpy as np
|
||||
|
||||
from moviepy.decorators import convert_path_to_string
|
||||
from moviepy.tools import convert_to_seconds
|
||||
from moviepy.video.VideoClip import TextClip, VideoClip
|
||||
|
||||
|
||||
class SubtitlesClip(VideoClip):
|
||||
"""A Clip that serves as "subtitle track" in videos.
|
||||
|
||||
One particularity of this class is that the images of the
|
||||
subtitle texts are not generated beforehand, but only if
|
||||
needed.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
subtitles
|
||||
Either the name of a file as a string or path-like object, or a list
|
||||
|
||||
font
|
||||
Path to a font file to be used. Optional if make_textclip is provided.
|
||||
|
||||
make_textclip
|
||||
A custom function to use for text clip generation. If None, a TextClip
|
||||
will be generated.
|
||||
|
||||
The function must take a text as argument and return a VideoClip
|
||||
to be used as caption
|
||||
|
||||
encoding
|
||||
Optional, specifies srt file encoding.
|
||||
Any standard Python encoding is allowed (listed at
|
||||
https://docs.python.org/3.8/library/codecs.html#standard-encodings)
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
.. code:: python
|
||||
|
||||
from moviepy.video.tools.subtitles import SubtitlesClip
|
||||
from moviepy.video.io.VideoFileClip import VideoFileClip
|
||||
generator = lambda text: TextClip(text, font='./path/to/font.ttf',
|
||||
font_size=24, color='white')
|
||||
sub = SubtitlesClip("subtitles.srt", make_textclip=generator, encoding='utf-8')
|
||||
myvideo = VideoFileClip("myvideo.avi")
|
||||
final = CompositeVideoClip([clip, subtitles])
|
||||
final.write_videofile("final.mp4", fps=myvideo.fps)
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, subtitles, font=None, make_textclip=None, encoding=None):
|
||||
VideoClip.__init__(self, has_constant_size=False)
|
||||
|
||||
if not isinstance(subtitles, list):
|
||||
# `subtitles` is a string or path-like object
|
||||
subtitles = file_to_subtitles(subtitles, encoding=encoding)
|
||||
|
||||
# subtitles = [(map(convert_to_seconds, times), text)
|
||||
# for times, text in subtitles]
|
||||
self.subtitles = subtitles
|
||||
self.textclips = dict()
|
||||
|
||||
self.font = font
|
||||
|
||||
if make_textclip is None:
|
||||
if self.font is None:
|
||||
raise ValueError("Argument font is required if make_textclip is None.")
|
||||
|
||||
def make_textclip(txt):
|
||||
return TextClip(
|
||||
font=self.font,
|
||||
text=txt,
|
||||
font_size=24,
|
||||
color="#ffffff",
|
||||
stroke_color="#000000",
|
||||
stroke_width=1,
|
||||
)
|
||||
|
||||
self.make_textclip = make_textclip
|
||||
self.start = 0
|
||||
self.duration = max([tb for ((ta, tb), txt) in self.subtitles])
|
||||
self.end = self.duration
|
||||
|
||||
def add_textclip_if_none(t):
|
||||
"""Will generate a textclip if it hasn't been generated asked
|
||||
to generate it yet. If there is no subtitle to show at t, return
|
||||
false.
|
||||
"""
|
||||
sub = [
|
||||
((text_start, text_end), text)
|
||||
for ((text_start, text_end), text) in self.textclips.keys()
|
||||
if (text_start <= t < text_end)
|
||||
]
|
||||
if not sub:
|
||||
sub = [
|
||||
((text_start, text_end), text)
|
||||
for ((text_start, text_end), text) in self.subtitles
|
||||
if (text_start <= t < text_end)
|
||||
]
|
||||
if not sub:
|
||||
return False
|
||||
sub = sub[0]
|
||||
if sub not in self.textclips.keys():
|
||||
self.textclips[sub] = self.make_textclip(sub[1])
|
||||
|
||||
return sub
|
||||
|
||||
def frame_function(t):
|
||||
sub = add_textclip_if_none(t)
|
||||
return self.textclips[sub].get_frame(t) if sub else np.array([[[0, 0, 0]]])
|
||||
|
||||
def make_mask_frame(t):
|
||||
sub = add_textclip_if_none(t)
|
||||
return self.textclips[sub].mask.get_frame(t) if sub else np.array([[0]])
|
||||
|
||||
self.frame_function = frame_function
|
||||
hasmask = bool(self.make_textclip("T").mask)
|
||||
self.mask = VideoClip(make_mask_frame, is_mask=True) if hasmask else None
|
||||
|
||||
def in_subclip(self, start_time=None, end_time=None):
|
||||
"""Returns a sequence of [(t1,t2), text] covering all the given subclip
|
||||
from start_time to end_time. The first and last times will be cropped so as
|
||||
to be exactly start_time and end_time if possible.
|
||||
"""
|
||||
|
||||
def is_in_subclip(t1, t2):
|
||||
try:
|
||||
return (start_time <= t1 < end_time) or (start_time < t2 <= end_time)
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
def try_cropping(t1, t2):
|
||||
try:
|
||||
return max(t1, start_time), min(t2, end_time)
|
||||
except Exception:
|
||||
return t1, t2
|
||||
|
||||
return [
|
||||
(try_cropping(t1, t2), txt)
|
||||
for ((t1, t2), txt) in self.subtitles
|
||||
if is_in_subclip(t1, t2)
|
||||
]
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.subtitles)
|
||||
|
||||
def __getitem__(self, k):
|
||||
return self.subtitles[k]
|
||||
|
||||
def __str__(self):
|
||||
def to_srt(sub_element):
|
||||
(start_time, end_time), text = sub_element
|
||||
formatted_start_time = convert_to_seconds(start_time)
|
||||
formatted_end_time = convert_to_seconds(end_time)
|
||||
return "%s - %s\n%s" % (formatted_start_time, formatted_end_time, text)
|
||||
|
||||
return "\n\n".join(to_srt(sub) for sub in self.subtitles)
|
||||
|
||||
def match_expr(self, expr):
|
||||
"""Matches a regular expression against the subtitles of the clip."""
|
||||
return SubtitlesClip(
|
||||
[sub for sub in self.subtitles if re.findall(expr, sub[1]) != []]
|
||||
)
|
||||
|
||||
def write_srt(self, filename):
|
||||
"""Writes an ``.srt`` file with the content of the clip."""
|
||||
with open(filename, "w+") as file:
|
||||
file.write(str(self))
|
||||
|
||||
|
||||
@convert_path_to_string("filename")
|
||||
def file_to_subtitles(filename, encoding=None):
|
||||
"""Converts a srt file into subtitles.
|
||||
|
||||
The returned list is of the form ``[((start_time,end_time),'some text'),...]``
|
||||
and can be fed to SubtitlesClip.
|
||||
|
||||
Only works for '.srt' format for the moment.
|
||||
"""
|
||||
times_texts = []
|
||||
current_times = None
|
||||
current_text = ""
|
||||
with open(filename, "r", encoding=encoding) as file:
|
||||
for line in file:
|
||||
times = re.findall("([0-9]*:[0-9]*:[0-9]*,[0-9]*)", line)
|
||||
if times:
|
||||
current_times = [convert_to_seconds(t) for t in times]
|
||||
elif line.strip() == "":
|
||||
times_texts.append((current_times, current_text.strip("\n")))
|
||||
current_times, current_text = None, ""
|
||||
elif current_times:
|
||||
current_text += line
|
||||
return times_texts
|
||||
Reference in New Issue
Block a user