There is currently no Web API targeted to video editing.
The MediaStream and MediaRecorder APIs are meant to deal with live sources.
Because of the structure of video files, you can't just slice a part of it to make a new video, nor can you just concatenate small video files to make one longer. In both cases, you need to rebuild its metadata in order to make a new video file.
The only current API able to produce MediaFiles is the MediaRecorder.
There is currently only two implementors of the MediaRecorder API, but they support about 3 different codecs in two different containers, which does mean that you would need to build yourself at least 5 metadata parsers to only support current implementations (which will keep growing in number, and which may need update as implementations are updated).
Sounds like a tough job.
Maybe the incoming WebAssembly API will allow us to port ffmpeg to browsers, which would make it a lot simpler, but I have to admit I don't know WA at all, so I'm not even sure it is really doable.
I hear you saying "Ok, there is no tool made just for that, but we are hackers, and we have other tools, with great power."
Well, yes. If we're really willing to do it, we can hack something...
As said before, the MediaStream and MediaRecorder are meant for live video. We can thus convert static video files to live streams with the [HTMLVideoElement | HTMLCanvasElement].captureStream()
methods.
We can also record those live-streams to a static File thanks to the MediaRecorder API.
What we cannot do however is to change the current stream-source a MediaRecorder as been fed with.
So in order to merge small video Files into one longer, we'll need to
- load these videos into
<video>
elements
- draw these
<video>
elements on a <canvas>
element in wanted order
- feed an AudioContext's stream source with the
<video>
elements
- merge the canvas.captureStream and AudioStreamSource's streams in a single MediaStream
- Record this MediaStream
But this means that the merging is actually a re-recording of all the videos, and this can only be done in real-time (speed = x1)
Here is a live proof of concept where we first slice an original Video File in multiple smaller parts, shuffle these parts to mimic some montage, then create a canvas based player, also able to record this montage and export it.
NotaBene : This is the first version, and I still have a lot of bugs (notabely in Firefox, should work almost fine in chrome).
(() => {
if (!('MediaRecorder' in window)) {
throw new Error('unsupported browser');
}
// some global params
const CHUNK_DURATION = 1000;
const MAX_SLICES = 15; // get only 15 slices
const FPS = 30;
async function init() {
const url = 'https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4';
const slices = await getSlices(url); // slice the original media in longer chunks
mess_up_array(slices); // Let's shuffle these slices,
// otherwise there is no point merging it in a new file
generateSelect(slices); // displays each chunk independentely
window.player = new SlicePlayer(slices); // init our player
};
const SlicePlayer = class {
/*
@args: Array of populated HTMLVideoElements
*/
constructor(parts) {
this.parts = parts;
this.initVideoContext();
this.initAudioContext();
this.currentIndex = 0; // to know which video we'll play
this.currentTime = 0;
this.duration = parts.reduce((a, b) => b._duration + a, 0); // the sum of all parts' durations
// (see below why "_")
this.initDOM();
// attach our onended callback only on the last vid
this.parts[this.parts.length - 1].onended = e => this.onended();
this.resetAll(); // set all videos' currentTime to 0 + draw first frame
}
initVideoContext() {
const c = this.canvas = document.createElement('canvas');
c.width = this.parts[0].videoWidth;
c.height = this.parts[0].videoHeight;
this.v_ctx = c.getContext('2d');
}
initAudioContext() {
const a = this.a_ctx = new AudioContext();
const gain = this.volume_node = a.createGain();
gain.connect(a.destination);
// extract the audio from our video elements so that we can record it
this.audioSources = this.parts.map(v => a.createMediaElementSource(v));
this.audioSources.forEach(s => s.connect(gain));
}
initDOM() {
// all DOM things...
canvas_player_timeline.max = this.duration;
canvas_player_cont.appendChild(this.canvas);
canvas_player_play_btn.onclick = e => this.startVid(this.currentIndex);
canvas_player_cont.style.display = 'inline-block';
canvas_player_timeline.oninput = e => {
if (!this.recording)
this.onseeking(e);
};
canvas_player_record_btn.onclick = e => this.record();
}
resetAll() {
this.currentTime = canvas_player_timeline.value = 0;
// when the first part as actually been reset to start
this.parts[0].onseeked = e => {
this.parts[0].onseeked = null;
this.draw(0); // draw it
};
this.parts.forEach(v => v.currentTime = 0);
if (this.playing && this.stopLoop) {
this.playing = false;
this.stopLoop();
}
}
startVid(index) { // starts playing the video at given index
if (index > this.parts.length - 1) { // that was the last one
this.onended();
return;
}
this.playing = true;
this.currentIndex = index; // update our currentIndex
this.parts[index].play().then(() => {
// try to avoid at maximum the gaps between different parts
if (this.recording && this.recorder.state === 'paused') {
this.recorder.resume();
}
});
this.startLoop();
}
startNext() { // starts the next part before the current one actually ended
const nextPart = this.parts[this.currentIndex + 1];
if (!nextPart) { // current === last
return;
}
this.playing = true;
if (!nextPart.paused) { // already playing ?
return;
}
// try to avoid at maximum the gaps between different parts
if (this.recording && this.recorder && this.recorder.state === 'recording') {
this.recorder.pause();
}
nextPart.play()
.then(() => {
++this.currentIndex; // this is now the current video
if (!this.playing) { // somehow got stop in between ?
this.playing = true;
this.startLoop(); // start again
}
// try to avoid at maximum the gaps between different parts
if (this.recording && this.recorder.state === 'paused') {
this.recorder.resume();
}
});
}
startLoop() { // starts our update loop
// see https://stackoverflow.com/questions/40687010/
this.stopLoop = audioTimerLoop(e => this.update(), 1000 / FPS);
}
update(t) { // at every tick
const currentPart = this.parts[this.currentIndex];
this.updateTimeLine(); // update the timeline
if (!this.playing || currentPart.paused) { // somehow got stopped
this.playing = false;
if (this.stopLoop) {
this.stopLoop(); // stop the loop
}
}
this.draw(this.currentIndex); // draw the current video on the canvas
// calculate how long we've got until the end of this part
const remainingTime = currentPart._duration - currentPart.currentTime;
if (remainingTime < (2 / FPS)) { // less than 2 frames ?
setTimeout(e => this.startNext(), remainingTime / 2); // start the next part
}
}
draw(index) { // draw the video[index] on the canvas
this.v_ctx.drawImage(this.parts[index], 0, 0);
}
updateTimeLine() {
// get the sum of all parts' currentTime
this.currentTime = this.parts.reduce((a, b) =>
(isFinite(b.currentTime) ? b.currentTime : b._duration) + a, 0);
canvas_player_timeline.value = this.currentTime;
}
onended() { // triggered when the last part ends
// if we are recording, stop the recorder
if (this.recording && this.recorder.state !== 'inactive') {
this.recorder.stop();
}
// go back to first frame
this.resetAll();
this.currentIndex = 0;
this.playing = false;
}
onseeking(evt) { // when we click the timeline
// first reset all videos' currentTime to 0
this.parts.forEach(v => v.currentTime = 0);
this.currentTime = +evt.target.value;
let index = 0;
let sum = 0;
// find which part should be played at this time
for (index; index < this.parts.length; index++) {
let p = this.parts[index];
if (sum + p._duration > this.currentTime) {
break;
}
sum += p._duration;
p.currentTime = p._duration;
}
this.currentIndex = index;
// set the currentTime of this part
this.parts[index].currentTime = this.currentTime - sum;
if (this.playing) { // if we were playing
this.startVid(index); // set this part as the current one
} else {
this.parts[index].onseeked = e => { // wait we actually seeked the correct position
this.parts[index].onseeked = null;
this.draw(index); // and draw a single frame
};
}
}
record() { // inits the recording
this.recording = true; // let the app know we're recording
this.resetAll(); // go back to first frame
canvas_controls.classList.add('disabled'); // disable controls
const v_stream = this.canvas.captureStream(FPS); // make a stream of our canvas
const dest = this.a_ctx.createMediaStreamDestination(); // make a stream of our AudioContext
this.volume_node.connect(dest);
// FF bug... see https://bugzilla.mozilla.org/show_bug.cgi?id=1296531
let merged_stream = null;
if (!('mozCaptureStream' in HTMLVideoElement.prototype)) {
v_stream.addTrack(dest.stream.getAudioTracks()[0]);
merged_stream = v_stream;
} else {
merged_stream = new MediaStream(
v_stream.getVideoTracks().concat(dest.stream.getAudioTracks())
);
}
const chunks = [];
const rec = this.recorder = new MediaRecorder(merged_stream, {
mimeType: MediaRecorder._preferred_type
});
rec.ondataavailable = e => chunks.push(e.data);
rec.onstop = e => {
merged_stream.getTracks().forEach(track => track.stop());
this.export(new Blob(chunks));
}
rec.start();
this.startVid(0); // start playing
}
export (blob) { // once the recording is over
const a = document.createElement('a');
a.download = a.innerHTML = 'merged.webm';
a.href = URL.createObjectURL(blob, {
type: MediaRecorder._preferred_type
});
exports_cont.appendChild(a);
canvas_controls.classList.remove('disabled');
this.recording = false;
this.resetAll();
}
}
// END Player
function generateSelect(slices) { // generates a select to show each slice