Quantcast
Channel: MediaSPIP
Viewing all 117673 articles
Browse latest View live

How to create a video from images with FFmpeg?

$
0
0
ffmpeg -r 1/5 -start_number 2 -i img%03d.png -c:v libx264 -r 30 -pix_fmt yuv420p out.mp4

This line worked fine but I want to create a video file from images in another folder. Image names in my folder are:

img001.jpg
img002.jpg
img003.jpg
...

How could I input images files from a different folder? Example: C:\mypics

I tried this command but ffmpeg generated a video with the first image (img001.jpg) only.

ffmpeg -r 1/5 -start_number 0 -i C:\myimages\img%03d.png -c:v libx264 -r 30 -pix_fmt yuv420p out.mp4

How to merge two videos?

$
0
0

I use https://github.com/Zulko/moviepy library for merge two videos with python. It merged successfully but sound of videos is not exists in merged.mp4.

The python code :

clip1 = VideoFileClip("2.mp4",audio=True)
clip2 = VideoFileClip("1.mp4",audio=True)
final_clip = concatenate_videoclips([clip1,clip2],method="compose")
final_clip.write_videofile("merged.mp4")

I also tried with ffmpeg

ffmpeg -i 'concat:1.mp4|2.mp4' -codec copy merged.mp4

ffmpeg couldn't merge videos. It create merged.mp4 which has only 1.mp4

How can I merge two videos with python or another way?

-- https://github.com/Zulko/moviepy

Errors while building customised ffmpeg4.1 with opencv

$
0
0

I am trying to build my customised ffmpeg of version 4.1. As we know that ffmpeg4.1 needs libavcodec.so.58 and other related libraries(newest). I have some functions related to opencv in my code such as cvLoadImage and other. I updated the libopencv-highgui-dev, libopencv_core-dev and libopencv_imgproc-dev libraries. When I run the makefile I got the following errors,

/usr/lib/x86_64-linux-gnu/libopencv_highgui.so: undefined reference to `av_free_packet@LIBAVCODEC_57'
/usr/lib/x86_64-linux-gnu/libopencv_highgui.so: undefined reference to `av_write_frame@LIBAVFORMAT_57'
/usr/lib/x86_64-linux-gnu/libopencv_highgui.so: undefined reference to `av_frame_alloc@LIBAVUTIL_55'
/usr/lib/x86_64-linux-gnu/libopencv_highgui.so: undefined reference to `avformat_free_context@LIBAVFORMAT_57'
/usr/lib/x86_64-linux-gnu/libopencv_highgui.so: undefined reference to `avformat_write_header@LIBAVFORMAT_57'
/usr/lib/x86_64-linux-gnu/libopencv_highgui.so: undefined reference to `av_rescale_q@LIBAVUTIL_55'
/usr/lib/x86_64-linux-gnu/libopencv_highgui.so: undefined reference to `av_write_trailer@LIBAVFORMAT_57'
/usr/lib/x86_64-linux-gnu/libopencv_highgui.so: undefined reference to `sws_getCachedContext@LIBSWSCALE_4'
/usr/lib/x86_64-linux-gnu/libopencv_highgui.so: undefined reference to `avcodec_find_encoder@LIBAVCODEC_57'
/usr/lib/x86_64-linux-gnu/libopencv_highgui.so: undefined reference to `av_free@LIBAVUTIL_55'
/usr/lib/x86_64-linux-gnu/libopencv_highgui.so: undefined reference to `avcodec_encode_video2@LIBAVCODEC_57'
/usr/lib/x86_64-linux-gnu/libopencv_highgui.so: undefined reference to `av_seek_frame@LIBAVFORMAT_57'
/usr/lib/x86_64-linux-gnu/libopencv_highgui.so: undefined reference to `av_lockmgr_register@LIBAVCODEC_57'
/usr/lib/x86_64-linux-gnu/libopencv_highgui.so: undefined reference to `avcodec_decode_video2@LIBAVCODEC_57'
/usr/lib/x86_64-linux-gnu/libopencv_highgui.so: undefined reference to `avcodec_open2@LIBAVCODEC_57'
/usr/lib/x86_64-linux-gnu/libopencv_highgui.so: undefined reference to `av_malloc@LIBAVUTIL_55'
/usr/lib/x86_64-linux-gnu/libopencv_highgui.so: undefined reference to `avio_close@LIBAVFORMAT_57'
/usr/lib/x86_64-linux-gnu/libopencv_highgui.so: undefined reference to `av_init_packet@LIBAVCODEC_57'
/usr/lib/x86_64-linux-gnu/libopencv_highgui.so: undefined reference to `av_log_set_level@LIBAVUTIL_55'
/usr/lib/x86_64-linux-gnu/libopencv_highgui.so: undefined reference to `avpicture_get_size@LIBAVCODEC_57'
/usr/lib/x86_64-linux-gnu/libopencv_highgui.so: undefined reference to `avformat_get_riff_video_tags@LIBAVFORMAT_57'
/usr/lib/x86_64-linux-gnu/libopencv_highgui.so: undefined reference to `avformat_close_input@LIBAVFORMAT_57'
/usr/lib/x86_64-linux-gnu/libopencv_highgui.so: undefined reference to `avpicture_fill@LIBAVCODEC_57'
/usr/lib/x86_64-linux-gnu/libopencv_highgui.so: undefined reference to `avformat_network_init@LIBAVFORMAT_57'
/usr/lib/x86_64-linux-gnu/libopencv_highgui.so: undefined reference to `avformat_find_stream_info@LIBAVFORMAT_57'
/usr/lib/x86_64-linux-gnu/libopencv_highgui.so: undefined reference to `avcodec_close@LIBAVCODEC_57'
/usr/lib/x86_64-linux-gnu/libopencv_highgui.so: undefined reference to `av_guess_codec@LIBAVFORMAT_57'
/usr/lib/x86_64-linux-gnu/libopencv_highgui.so: undefined reference to `avio_open@LIBAVFORMAT_57'
/usr/lib/x86_64-linux-gnu/libopencv_highgui.so: undefined reference to `av_guess_format@LIBAVFORMAT_57'
/usr/lib/x86_64-linux-gnu/libopencv_highgui.so: undefined reference to `avformat_open_input@LIBAVFORMAT_57'
/usr/lib/x86_64-linux-gnu/libopencv_highgui.so: undefined reference to `av_register_all@LIBAVFORMAT_57'
/usr/lib/x86_64-linux-gnu/libopencv_highgui.so: undefined reference to `sws_getContext@LIBSWSCALE_4'
/usr/lib/x86_64-linux-gnu/libopencv_highgui.so: undefined reference to `avformat_new_stream@LIBAVFORMAT_57'
/usr/lib/x86_64-linux-gnu/libopencv_highgui.so: undefined reference to `av_frame_free@LIBAVUTIL_55'
/usr/lib/x86_64-linux-gnu/libopencv_highgui.so: undefined reference to `av_sub_q@LIBAVUTIL_55'
/usr/lib/x86_64-linux-gnu/libopencv_highgui.so: undefined reference to `av_freep@LIBAVUTIL_55'
/usr/lib/x86_64-linux-gnu/libopencv_highgui.so: undefined reference to `avcodec_find_decoder@LIBAVCODEC_57'
/usr/lib/x86_64-linux-gnu/libopencv_highgui.so: undefined reference to `sws_freeContext@LIBSWSCALE_4'
/usr/lib/x86_64-linux-gnu/libopencv_highgui.so: undefined reference to `sws_scale@LIBSWSCALE_4'
/usr/lib/x86_64-linux-gnu/libopencv_highgui.so: undefined reference to `avformat_alloc_context@LIBAVFORMAT_57'
/usr/lib/x86_64-linux-gnu/libopencv_highgui.so: undefined reference to `av_read_frame@LIBAVFORMAT_57'
/usr/lib/x86_64-linux-gnu/libopencv_highgui.so: undefined reference to `av_codec_get_id@LIBAVFORMAT_57'
/usr/lib/x86_64-linux-gnu/libopencv_highgui.so: undefined reference to `avcodec_flush_buffers@LIBAVCODEC_57'
collect2: error: ld returned 1 exit status
make: *** [new-ffmpeg] Error 1

I have clearly linked the newest libraries and included them in my makefile. Errors list suggesting me that, libopencv-highgui.so is pointing to libavcodec.57 version api's. Has opencv not upgraded to support libavXX.so.58 libs? or am I doing anything wrong here.

Please help me with this, or any suggestions would be helpful.

How to properly close a FFmpeg stream and AVFormatContext without leaking memory?

$
0
0

I have built an app that uses FFmpeg to connect to remote IP cameras in order to receive video and audio frames via RTSP 2.0.

The app is built using Xcode 10-11 and Objective-C with a custom FFmpeg build config.

The architecture is the following:

MyApp Document_0 RTSPContainerObject_0 RTSPObject_0 RTSPContainerObject_1 RTSPObject_1 ...
Document_1
...

GOAL:

  1. After closing Document_0 no FFmpeg objects should be leaked.
  2. The closing process should stop-frame reading and destroy all objects which use FFmpeg.

PROBLEM:

enter image description here

  1. Somehow Xcode's memory debugger shows two instances of MyApp.

FACTS:

  • macOS'es Activity Monitor doesn't show two instances of MyApp.

  • macOS'es Activity Monitor doesn't any instances of FFmpeg or other child processes.

  • The issue is not related to some leftover memory due to a late memory snapshot since it can be reproduced easily.

  • Xcode's memory debugger shows that the second instance only having RTSPObject'sAVFormatContext and no other objects.

    1. The second instance has an AVFormatContext and the RTPSObject still has a pointer to the AVFormatContext.

FACTS:

  • Opening and closing the second document Document_1 leads to the same problem and having two objects leaked. This means that there is a bug that creates scalable problems. More and more memory is used and unavailable.

Here is my termination code:

 - (void)terminate
{ // * Video and audio frame provisioning termination * [self stopVideoStream]; [self stopAudioStream]; // * // * Video codec termination * avcodec_free_context(&_videoCodecContext); // NULL pointer safe. self.videoCodecContext = NULL; // * // * Audio codec termination *
avcodec_free_context(&_audioCodecContext); // NULL pointer safe.
self.audioCodecContext = NULL;
// * if (self.packet)
{ // Free the packet that was allocated by av_read_frame. av_packet_unref(&packet); // The documentation doesn't mention NULL safety. self.packet = NULL;
} if (self.currentAudioPacket)
{ av_packet_unref(_currentAudioPacket); self.currentAudioPacket = NULL;
} // Free raw frame data.
av_freep(&_rawFrameData); // NULL pointer safe. // Free the swscaler context swsContext.
self.isFrameConversionContextAllocated = NO;
sws_freeContext(scallingContext); // NULL pointer safe. [self.audioPacketQueue removeAllObjects]; self.audioPacketQueue = nil; self.audioPacketQueueLock = nil;
self.packetQueueLock = nil;
self.audioStream = nil;
BXLogInDomain(kLogDomainSources, kLogLevelVerbose, @"%s:%d: All streams have been terminated!", __FUNCTION__, __LINE__); // * Session context termination *
AVFormatContext *pFormatCtx = self.sessionContext;
BOOL shouldProceedWithInputSessionTermination = self.isInputStreamOpen && self.shouldTerminateStreams && pFormatCtx;
NSLog(@"\nTerminating session context...");
if (shouldProceedWithInputSessionTermination)
{ NSLog(@"\nTerminating..."); //av_write_trailer(pFormatCtx); // Discard all internally buffered data. avformat_flush(pFormatCtx); // The documentation doesn't mention NULL safety. // Close an opened input AVFormatContext and free it and all its contents. // WARNING: Closing an non-opened stream will cause avformat_close_input to crash. avformat_close_input(&pFormatCtx); // The documentation doesn't mention NULL safety. NSLog(@"Logging leftovers - %p, %p %p", self.sessionContext, _sessionContext, pFormatCtx); avformat_free_context(pFormatCtx); NSLog(@"Logging content = %c", *self.sessionContext); //avformat_free_context(pFormatCtx); - Not needed because avformat_close_input is closing it. self.sessionContext = NULL;
}
// * }

IMPORTANT: The termination sequence is:

 New frame will be read.
-[(RTSPObject)StreamInput currentVideoFrameDurationSec]
-[(RTSPObject)StreamInput frameDuration:]
-[(RTSPObject)StreamInput currentCGImageRef]
-[(RTSPObject)StreamInput convertRawFrameToRGB]
-[(RTSPObject)StreamInput pixelBufferFromImage:]
-[(RTSPObject)StreamInput cleanup]
-[(RTSPObject)StreamInput dealloc]
-[(RTSPObject)StreamInput stopVideoStream]
-[(RTSPObject)StreamInput stopAudioStream] Terminating session context...
Terminating...
Logging leftovers - 0x109ec6400, 0x109ec6400 0x109ec6400
Logging content = \330
-[Document dealloc]

NOT WORKING SOLUTIONS:

  • Changing the order of object releases (The AVFormatContext has been freed first but it didn't lead to any change).
  • Calling RTSPObject's cleanup method much sooner to give FFmpeg more time to handle object releases.
  • Reading a lot of SO answers and FFmpeg documentation to find a clean cleanup process or newer code which might highlight why the object release doesn't happen properly.

I am currently reading the documentation on AVFormatContext since I believe that I am forgetting to release something. This believe is based on the memory debuggers output that AVFormatContext is still around.

Here is my creation code:

#pragma mark # Helpers - Start - (NSError *)openInputStreamWithVideoStreamId:(int)videoStreamId audioStreamId:(int)audioStreamId useFirst:(BOOL)useFirstStreamAvailable inInit:(BOOL)isInitProcess
{ // NSLog(@"%s", __PRETTY_FUNCTION__); // RTSP self.status = StreamProvisioningStatusStarting; AVCodec *decoderCodec; NSString *rtspURL = self.streamURL; NSString *errorMessage = nil; NSError *error = nil; self.sessionContext = NULL; self.sessionContext = avformat_alloc_context(); AVFormatContext *pFormatCtx = self.sessionContext; if (!pFormatCtx) { // Create approp error. return error; } // MUST be called before avformat_open_input(). av_dict_free(&_sessionOptions); self.sessionOptions = 0; if (self.usesTcp) { // "rtsp_transport" - Set RTSP transport protocols. // Allowed are: udp_multicast, tcp, udp, http. av_dict_set(&_sessionOptions, "rtsp_transport", "tcp", 0); } av_dict_set(&_sessionOptions, "rtsp_transport", "tcp", 0); // Open an input stream and read the header with the demuxer options. // WARNING: The stream must be closed with avformat_close_input() if (avformat_open_input(&pFormatCtx, rtspURL.UTF8String, NULL, &_sessionOptions) != 0) { // WARNING: Note that a user-supplied AVFormatContext (pFormatCtx) will be freed on failure. self.isInputStreamOpen = NO; // Create approp error. return error; } self.isInputStreamOpen = YES; // user-supplied AVFormatContext pFormatCtx might have been modified. self.sessionContext = pFormatCtx; // Retrieve stream information. if (avformat_find_stream_info(pFormatCtx,NULL) < 0) { // Create approp error. return error; } // Find the first video stream int streamCount = pFormatCtx->nb_streams; if (streamCount == 0) { // Create approp error. return error; } int noStreamsAvailable = pFormatCtx->streams == NULL; if (noStreamsAvailable) { // Create approp error. return error; } // Result. An Index can change, an identifier shouldn't. self.selectedVideoStreamId = STREAM_NOT_FOUND; self.selectedAudioStreamId = STREAM_NOT_FOUND; // Fallback. int firstVideoStreamIndex = STREAM_NOT_FOUND; int firstAudioStreamIndex = STREAM_NOT_FOUND; self.selectedVideoStreamIndex = STREAM_NOT_FOUND; self.selectedAudioStreamIndex = STREAM_NOT_FOUND; for (int i = 0; i < streamCount; i++) { // Looking for video streams. AVStream *stream = pFormatCtx->streams[i]; if (!stream) { continue; } AVCodecParameters *codecPar = stream->codecpar; if (!codecPar) { continue; } if (codecPar->codec_type==AVMEDIA_TYPE_VIDEO) { if (stream->id == videoStreamId) { self.selectedVideoStreamId = videoStreamId; self.selectedVideoStreamIndex = i; } if (firstVideoStreamIndex == STREAM_NOT_FOUND) { firstVideoStreamIndex = i; } } // Looking for audio streams. if (codecPar->codec_type==AVMEDIA_TYPE_AUDIO) { if (stream->id == audioStreamId) { self.selectedAudioStreamId = audioStreamId; self.selectedAudioStreamIndex = i; } if (firstAudioStreamIndex == STREAM_NOT_FOUND) { firstAudioStreamIndex = i; } } } // Use first video and audio stream available (if possible). if (self.selectedVideoStreamIndex == STREAM_NOT_FOUND && useFirstStreamAvailable && firstVideoStreamIndex != STREAM_NOT_FOUND) { self.selectedVideoStreamIndex = firstVideoStreamIndex; self.selectedVideoStreamId = pFormatCtx->streams[firstVideoStreamIndex]->id; } if (self.selectedAudioStreamIndex == STREAM_NOT_FOUND && useFirstStreamAvailable && firstAudioStreamIndex != STREAM_NOT_FOUND) { self.selectedAudioStreamIndex = firstAudioStreamIndex; self.selectedAudioStreamId = pFormatCtx->streams[firstAudioStreamIndex]->id; } if (self.selectedVideoStreamIndex == STREAM_NOT_FOUND) { // Create approp error. return error; } // See AVCodecID for codec listing. // * Video codec setup: // 1. Find the decoder for the video stream with the gived codec id. AVStream *stream = pFormatCtx->streams[self.selectedVideoStreamIndex]; if (!stream) { // Create approp error. return error; } AVCodecParameters *codecPar = stream->codecpar; if (!codecPar) { // Create approp error. return error; } decoderCodec = avcodec_find_decoder(codecPar->codec_id); if (decoderCodec == NULL) { // Create approp error. return error; } // Get a pointer to the codec context for the video stream. // WARNING: The resulting AVCodecContext should be freed with avcodec_free_context(). // Replaced: // self.videoCodecContext = pFormatCtx->streams[self.selectedVideoStreamIndex]->codec; // With: self.videoCodecContext = avcodec_alloc_context3(decoderCodec); avcodec_parameters_to_context(self.videoCodecContext, codecPar); self.videoCodecContext->thread_count = 4; NSString *description = [NSString stringWithUTF8String:decoderCodec->long_name]; // 2. Open codec. if (avcodec_open2(self.videoCodecContext, decoderCodec, NULL) < 0) { // Create approp error. return error; } // * Audio codec setup: if (self.selectedAudioStreamIndex > -1) { [self setupAudioDecoder]; } // Allocate a raw video frame data structure. Contains audio and video data. self.rawFrameData = av_frame_alloc(); self.outputWidth = self.videoCodecContext->width; self.outputHeight = self.videoCodecContext->height; if (!isInitProcess) { // Triggering notifications in init process won't change UI since the object is created locally. All // objects which need data access to this object will not be able to get it. Thats why we don't notifiy anyone about the changes. [NSNotificationCenter.defaultCenter postNotificationName:NSNotification.rtspVideoStreamSelectionChanged object:nil userInfo: self.selectedVideoStream]; [NSNotificationCenter.defaultCenter postNotificationName:NSNotification.rtspAudioStreamSelectionChanged object:nil userInfo: self.selectedAudioStream]; } return nil;
}

UPDATE 1

The initial architecture allowed using any given thread. Most of the below code would mostly run on the main thread. This solution was not appropriate since the opening of the stream input can take several seconds for which the main thread is blocked while waiting for a network response inside FFmpeg. To solve this issue I have implemented the following solution:

  • Creation and the initial setup are only allowed on the background_thread (see code snippet "1" below).
  • Changes are allowed on the current_thread(Any).
  • Termination is allowed on the current_thread(Any).

After removing main thread checks and dispatch_asyncs to background threads, leaking has stopped and I can't reproduce the issue anymore:

// Code that produces the issue. dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{ // 1 - Create and do initial setup. // This block creates the issue. [self.rtspObject = [[RTSPObject alloc] initWithURL: ... ];
[self.rtspObject openInputStreamWithVideoStreamId: ... audioStreamId: ... useFirst: ... inInit: ...];
});

I still don't understand why Xcode's memory debugger says that this block is retained?

Any advice or idea is welcome.

-- enter image description here

Revision 119037: incrementer, juste pour pouvoir checkout surun commit qui n'est pas le ...

$
0
0
incrementer, juste pour pouvoir checkout surun commit qui n'est pas le commit de copie -- Log

How to use ffmpeg on iOS?

$
0
0

I have to use the following command for ffmpeg

ffmpeg -i "Video.mp4" -map 0:1 "AudioWithVocalfromStreamOne.mp3" -map 0:2 "AudioWithoutVocalfromStreamTwo.mp3"

I have successfully installed ffmpeg on my mac. I tried using FFMPEGWrapper, but it doesn't seem to take any such parameters for using "map".

I need to know how to extract the streams from a video. I already have access to the streams, as server side is providing it to me using ffprobe at their end.

-- FFMPEGWrapper

How do I use the CLI interface of FFMpeg from a static build?

$
0
0

I have added this (https://github.com/kewlbear/FFmpeg-iOS-build-script) version of ffmpeg to my project. I can't see the entry point to the library in the headers included.

How do I get access to the same text command based system that the stand alone application has, or an equivalent?

I would also be happy if someone could point me towards documentation that allows you to use FFmpeg without the command line interface.

This is what I am trying to execute (I have it working on windows and android using the CLI version of ffmpeg)

ffmpeg -framerate 30 -i snap%03d.jpg -itsoffset 00:00:03.23333 -itsoffset 00:00:05 -i soundEffect.WAV -c:v libx264 -vf fps=30 -pix_fmt yuv420p result.mp4
-- https://github.com/kewlbear/FFmpeg-iOS-build-script

Laravel FFmpeg error Call to undefined method FFMpeg\FFMpeg::fromDisk()

$
0
0

I'm using laravel ffmpeg to create a thumnail fot the video, but when i run the code, it return me

Call to undefined method FFMpeg\FFMpeg::fromDisk()

I don't know what happen to this error, i'm follow the instruction in github. here is my code.

use FFMpeg\FFMpeg;
use FFMpeg\FFProbe; $thumbnail_name = md5($request->video_name).'_thumbnail.jpg';
$thumbnail_path = '/assets/' . $request->video_name;
FFMpeg::fromDisk('videos') ->open($export_as) ->getFrameFromSeconds(10) ->export() ->toDisk('thumnails') ->save($thumbnail_path);

i tried the fromFilesystem method, but it is not working, i also change the value in fromDisk() to public/assets even from c drive like C:\xampp\htdocs\vidpuz\public\assets but also not working, it keep return undefined method error.


Laravel not recognize laravel-ffmpeg methods

$
0
0

I'd like to put a watermark to a streaming video with Laravel-ffmpeg repo from Github, but it doesn't recognize its own methods. I've installed the package with composer according to the repo-s readme.md file from here: https://github.com/pascalbaljetmedia/laravel-ffmpeg

I know that there was a similar question here. But nobody answered the question and I've got the method inside the files.

The installation worked properly. And to stream the video I use these codes: https://codesamplez.com/programming/php-html5-video-streaming-tutorial

But when I put the code above to the open() method, Laravel doesn't find the fromDisk method from the FFMpeg.php file.

FFMpeg::fromDisk('videos') ->open('steve_howe.mp4') ->addFilter($clipFilter) ->export() ->toDisk('converted_videos') ->inFormat(new \FFMpeg\Format\Video\X264) ->save('short_steve.mkv');

My composer.json look like this:

{ "name": "laravel/laravel", "type": "project", "description": "The Laravel Framework.", "keywords": [ "framework", "laravel" ], "license": "MIT", "require": { "php": "^7.1.3", "fideloper/proxy": "^4.0", "illuminate/config": "5.8.*", "illuminate/filesystem": "5.8.*", "illuminate/log": "5.8.*", "illuminate/support": "5.8.*", "laravel/framework": "5.8.*", "laravel/tinker": "^1.0", "league/flysystem": "~1.0", "pbmedia/laravel-ffmpeg": "^4.0", "php-ffmpeg/php-ffmpeg": "^0.13", "symfony/process": "~4.0" }, "require-dev": { "beyondcode/laravel-dump-server": "^1.0", "filp/whoops": "^2.0", "fzaninotto/faker": "^1.4", "mockery/mockery": "^1.0", "nunomaduro/collision": "^3.0", "phpunit/phpunit": "^7.5" }, "config": { "optimize-autoloader": true, "preferred-install": "dist", "sort-packages": true }, "extra": { "laravel": { "dont-discover": [] } }, "autoload": { "psr-4": { "App\\": "app/" }, "classmap": [ "database/seeds", "database/factories" ] }, "autoload-dev": { "psr-4": { "Tests\\": "tests/" } }, "minimum-stability": "dev", "prefer-stable": true, "scripts": { "post-autoload-dump": [ "Illuminate\\Foundation\\ComposerScripts::postAutoloadDump", "@php artisan package:discover --ansi" ], "post-root-package-install": [ "@php -r \"file_exists('.env') || copy('.env.example', '.env');\"" ], "post-create-project-cmd": [ "@php artisan key:generate --ansi" ] }
}

How could I say to Laravel that recognize the methods?

Thank you for the help in advance

-- https://github.com/pascalbaljetmedia/laravel-ffmpeg, https://codesamplez.com/programming/php-html5-video-streaming-tutorial

When creating a Xing or Info tag in an MP3, may I use any MP3 header or does it have to match other frames?

$
0
0

I have a set of bare MP3 files. Bare as in I removed all tags (no ID3, no Xing, no Info) from those files.

Just before sending one of these files to the client, I want to add an Info tag. All of my files are CBR so we will use an Info tag (no Xing).

Right now I get the first 4 bytes of the existing MP3 to get the Version (MPEG-1, Layer III), Bitrate, Frequency, Stereo Mode, etc. and thus determine the size of one frame. I create the tag that way, reusing these 4 bytes for the Info tag and determining the size of the frame.

For those wondering, these 4 bytes may look like this:

FF FB 78 04

To me it felt like you are expected to use the exact same first 4 bytes in the Info tag as found in the other audio frames of the MP3, but when using ffmpeg, they stick an Info tag with a hard coded header (wrong bitrate, wrong frequency, etc.)

My question is: Is ffmpeg really doing it right? (LAME doesn't do that) Could I do the same, skipping the load of the first 4 bytes and still have the greater majority of the players out there play my files as expected?

Note: since I read these 4 bytes over the network, it would definitely save a lot of time and some bandwidth to not have to load these 4 bytes on a HEAD request. Resources I could use for the GET requests instead...

ERROR: jni not found building ffmpeg for Android

$
0
0

I have successfully built ffmpeg libraries for Android and now I want to add MediaCodec support.

I have added --enable-mediacodec --enable-jni to my configure command line and -I$SYSROOT/usr/include to extra-cflags.

At the end of config.log is the following:

check_headers jni.h
test_cpp
BEGIN /tmp/ffconf.m9iLX2Qm/test.c 1 #include  2 int x;
END /tmp/ffconf.m9iLX2Qm/test.c
/home/jr/android-ndk/toolchains/llvm/prebuilt/linux-x86_64/bin/clang --sysroot=/home/jr/android-ndk/sysroot -D_ISOC99_SOURCE -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE -Dstrtod=avpriv_strtod -DPIC -target aarch64-none-linux-android -I/home/jr/android-ndk/sysroot/usr/include/aarch64-linux-android -I/home/jr/android-ndk/sysroot/usr/include -O3 -fPIC -D__ANDROID_API__=21 -std=c11 -fPIE -fomit-frame-pointer -fPIC -E -o /tmp/ffconf.m9iLX2Qm/test.o /tmp/ffconf.m9iLX2Qm/test.c
ERROR: jni not found

There is a jni.h in /home/jr/android-ndk/sysroot/usr/include.

Is there anything else I need to add to my build script to get this to work?

FFMPEG Error initializing complex filters when using watermark [duplicate]

$
0
0

This question already has an answer here:

Im trying to convert mp4 file using ffmpeg on windows by using this command ,

ffmpeg -i 1.mp4 -vcodec libx264 -movflags +faststart -threads 0 -r 25 -g 50 -crf 25 -me_method hex -trellis 0 -bf 8 -acodec aac -strict -2 -ar 44100 -ab 128k -vf subtitles=1.srt:force_style='FontName=Changa,Fontsize=20' -qscale:v 3 -preset veryfast 1z.mp4

its converted without problem but i need to use png watermark file to be converted with the video bottom right i have tried all options using search but no one works sometimes i got problem for complex rules , im a new with ffmpeg and i dont know how to do it , your help really appreciated . Thanks

Command-line streaming webcam with audio from Ubuntu server in WebM format

$
0
0

I am trying to stream video and audio from my webcam connected to my headless Ubuntu server (running Maverick 10.10). I want to be able to stream in WebM format (VP8 video + OGG). Bandwidth is limited, and so the stream must be below 1Mbps.

I have tried using FFmpeg. I am able to record WebM video from the webcam with the following:

ffmpeg -s 640x360 \
-f video4linux2 -i /dev/video0 -isync -vcodec libvpx -vb 768000 -r 10 -vsync 1 \
-f alsa -ac 1 -i hw:1,0 -acodec libvorbis -ab 32000 -ar 11025 \
-f webm /var/www/telemed/test.webm 

However despite experimenting with all manner of vsync and async options, I can either get out of sync audio, or Benny Hill style fast-forward video with matching fast audio. I have also been unable to get this actually working with ffserver (by replacing the test.webm path and filename with the relevant feed filename).

The objective is to get a live, audio + video feed which is viewable in a modern browser, in a tight bandwidth, using only open-source components. (None of that MP3 format legal chaff)

My questions are therefore: How would you go about streaming webm from a webcam via Linux with in-sync audio? What software you use?

Have you succeeded in encoding webm from a webcam with in-sync audio via FFmpeg? If so, what command did you issue?

Is it worth persevering with FFmpeg + FFserver, or are there other more suitable command-line tools around (e.g. VLC which doesn't seem too well built for encoding)?

Is something like Gstreamer + flumotion configurable from the command line? If so, where do I find command line documentation because flumotion doc is rather light on command line details?

Thanks in advance!

FFMPEG does not produce output a valid mp4 when called from Java

$
0
0

I am merging an image with an audio file into a mp4-video using the ffmpeg-command:

ffmpeg -y -r 30 -loop 1 -i  -i  -shortest -acodec copy -vcodec libx264 

To call this command from Java I'm using this code:

ProcessBuilder builder = new ProcessBuilder("ffmpeg", "-y", "-r", String.valueOf(generalConfig.getOutputFPS()), "-loop", "1", "-i", pictureFile, "-i", transspoken.getVoiceFile(), "-shortest", "-acodec", "copy", "-vcodec", "libx264", snippetOut);
builder.redirectOutput(new File(getTMPName() + ".txt"));
builder.redirectError(new File(getTMPName() + ".txt"));
builder.redirectErrorStream(true); LogUtils.debug("FFMPEG-Render", String.join("", builder.command()));
try { Process p = builder.start(); p.waitFor();
} catch (IOException | InterruptedException e) { e.printStackTrace();
}

This code runs without problem on my local machine running Ubuntu 18.04 Desktop and OpenJDK 11.0.4. However, when run on a Ubuntu 18.04 Server droplet, running the same OpenJDK-Version, the code results in a corrupted output. Strangely, there is absolutely no problem when I ssh into the server and call exactly the same command to generate the mp4 file. So I am in a the really strange situation where:

  1. The command works fine when called automatically on my local machine.
  2. The command works fine when called manually on the server.
  3. The command does not work when called automatically on the server.

Output of the automated FFMPEG call on the server:

 ffmpeg version 3.4.6-0ubuntu0.18.04.1 Copyright (c) 2000-2019 the FFmpeg developers built with gcc 7 (Ubuntu 7.3.0-16ubuntu3) configuration: --prefix=/usr --extra-version=0ubuntu0.18.04.1 --toolchain=hardened --libdir=/usr/lib/x86_64-linux-gnu --incdir=/usr/include/x86_64-linux-gnu --enable-gpl --disable-stripping --enable-avresample --enable-avisynth --enable-gnutls --enable-ladspa --enable-libass --enable-libbluray --enable-libbs2b --enable-libcaca --enable-libcdio --enable-libflite --enable-libfontconfig --enable-libfreetype --enable-libfribidi --enable-libgme --enable-libgsm --enable-libmp3lame --enable-libmysofa --enable-libopenjpeg --enable-libopenmpt --enable-libopus --enable-libpulse --enable-librubberband --enable-librsvg --enable-libshine --enable-libsnappy --enable-libsoxr --enable-libspeex --enable-libssh --enable-libtheora --enable-libtwolame --enable-libvorbis --enable-libvpx --enable-libwavpack --enable-libwebp --enable-libx265 --enable-libxml2 --enable-libxvid --enable-libzmq --enable-libzvbi --enable-omx --enable-openal --enable-opengl --enable-sdl2 --enable-libdc1394 --enable-libdrm --enable-libiec61883 --enable-chromaprint --enable-frei0r --enable-libopencv --enable-libx264 --enable-shared libavutil 55. 78.100 / 55. 78.100 libavcodec 57.107.100 / 57.107.100 libavformat 57. 83.100 / 57. 83.100 libavdevice 57. 10.100 / 57. 10.100 libavfilter 6.107.100 / 6.107.100 libavresample 3. 7. 0 / 3. 7. 0 libswscale 4. 8.100 / 4. 8.100 libswresample 2. 9.100 / 2. 9.100 libpostproc 54. 7.100 / 54. 7.100
[png_pipe @ 0x560225784a60] Stream #0: not enough frames to estimate rate; consider increasing probesize
Input #0, png_pipe, from 'tmp/render_1.png': Duration: N/A, bitrate: N/A Stream #0:0: Video: png, rgb24(pc), 1920x1080, 25 tbr, 25 tbn, 25 tbc
[aac @ 0x560225789940] Estimating duration from bitrate, this may be inaccurate
Input #1, aac, from 'tmp/marytts_1.aac': Duration: 00:00:04.80, bitrate: 25 kb/s Stream #1:0: Audio: aac (LC), 16000 Hz, mono, fltp, 25 kb/s
Stream mapping: Stream #0:0 -> #0:0 (png (native) -> h264 (libx264)) Stream #1:0 -> #0:1 (copy)
Press [q] to stop, [?] for help
[libx264 @ 0x56022578c2e0] using cpu capabilities: MMX2 SSE2Fast SSSE3 SSE4.2 AVX FMA3 BMI2 AVX2 AVX512
[libx264 @ 0x56022578c2e0] profile High 4:4:4 Predictive, level 4.0, 4:4:4 8-bit
[libx264 @ 0x56022578c2e0] 264 - core 152 r2854 e9a5903 - H.264/MPEG-4 AVC codec - Copyleft 2003-2017 - http://www.videolan.org/x264.html - options: cabac=1 ref=3 deblock=1:0:0 analyse=0x1:0x111 me=hex subme=7 psy=1 psy_rd=1.00:0.00 mixed_ref=1 me_range=16 chroma_me=1 trellis=1 8x8dct=0 cqm=0 deadzone=21,11 fast_pskip=1 chroma_qp_offset=4 threads=1 lookahead_threads=1 sliced_threads=0 nr=0 decimate=1 interlaced=0 bluray_compat=0 constrained_intra=0 bframes=3 b_pyramid=2 b_adapt=1 b_bias=0 direct=1 weightb=1 open_gop=0 weightp=2 keyint=250 keyint_min=25 scenecut=40 intra_refresh=0 rc_lookahead=40 rc=crf mbtree=1 crf=23.0 qcomp=0.60 qpmin=0 qpmax=69 qpstep=4 ip_ratio=1.40 aq=1:1.00
Output #0, mp4, to 'tmp/render_2.mp4': Metadata: encoder : Lavf57.83.100 Stream #0:0: Video: h264 (libx264) (avc1 / 0x31637661), yuv444p, 1920x1080, q=-1--1, 30 fps, 15360 tbn, 30 tbc Metadata: encoder : Lavc57.107.100 libx264 Side data: cpb: bitrate max/min/avg: 0/0/0 buffer size: 0 vbv_delay: -1 Stream #0:1: Audio: aac (LC) (mp4a / 0x6134706D), 16000 Hz, mono, fltp, 25 kb/s
frame= 6 fps=0.0 q=0.0 size= 0kB time=00:00:00.00 bitrate=N/A speed= 0x frame= 12 fps= 11 q=0.0 size= 0kB time=00:00:00.00 bitrate=N/A speed= 0x frame= 18 fps= 11 q=0.0 size= 0kB time=00:00:00.00 bitrate=N/A speed= 0x frame= 23 fps= 10 q=0.0 size= 0kB time=00:00:00.00 bitrate=N/A speed= 0x frame= 27 fps=9.8 q=0.0 size= 0kB time=00:00:00.00 bitrate=N/A speed= 0x frame= 30 fps=9.2 q=0.0 size= 0kB time=00:00:00.00 bitrate=N/A speed= 0x frame= 32 fps=7.9 q=0.0 size= 0kB time=00:00:00.00 bitrate=N/A speed= 0x frame= 33 fps=7.1 q=0.0 size= 0kB time=00:00:00.00 bitrate=N/A speed= 0x frame= 34 fps=6.4 q=0.0 size= 0kB time=00:00:00.00 bitrate=N/A speed= 0x frame= 35 fps=5.4 q=0.0 size= 0kB time=00:00:00.00 bitrate=N/A speed= 0x frame= 38 fps=5.4 q=0.0 size= 0kB time=00:00:00.00 bitrate=N/A speed= 0x frame= 42 fps=5.0 q=29.0 size= 0kB time=00:00:00.00 bitrate=N/A speed= 0x 

The error output is empty. FFProbe on the corrupted file:

$ ffprobe tmp/render_2.mp4 ffprobe version 3.4.6-0ubuntu0.18.04.1 Copyright (c) 2007-2019 the FFmpeg developers built with gcc 7 (Ubuntu 7.3.0-16ubuntu3) configuration: --prefix=/usr --extra-version=0ubuntu0.18.04.1 --toolchain=hardened --libdir=/usr/lib/x86_64-linux-gnu --incdir=/usr/include/x86_64-linux-gnu --enable-gpl --disable-stripping --enable-avresample --enable-avisynth --enable-gnutls --enable-ladspa --enable-libass --enable-libbluray --enable-libbs2b --enable-libcaca --enable-libcdio --enable-libflite --enable-libfontconfig --enable-libfreetype --enable-libfribidi --enable-libgme --enable-libgsm --enable-libmp3lame --enable-libmysofa --enable-libopenjpeg --enable-libopenmpt --enable-libopus --enable-libpulse --enable-librubberband --enable-librsvg --enable-libshine --enable-libsnappy --enable-libsoxr --enable-libspeex --enable-libssh --enable-libtheora --enable-libtwolame --enable-libvorbis --enable-libvpx --enable-libwavpack --enable-libwebp --enable-libx265 --enable-libxml2 --enable-libxvid --enable-libzmq --enable-libzvbi --enable-omx --enable-openal --enable-opengl --enable-sdl2 --enable-libdc1394 --enable-libdrm --enable-libiec61883 --enable-chromaprint --enable-frei0r --enable-libopencv --enable-libx264 --enable-shared libavutil 55. 78.100 / 55. 78.100 libavcodec 57.107.100 / 57.107.100 libavformat 57. 83.100 / 57. 83.100 libavdevice 57. 10.100 / 57. 10.100 libavfilter 6.107.100 / 6.107.100 libavresample 3. 7. 0 / 3. 7. 0 libswscale 4. 8.100 / 4. 8.100 libswresample 2. 9.100 / 2. 9.100 libpostproc 54. 7.100 / 54. 7.100 [mov,mp4,m4a,3gp,3g2,mj2 @ 0x558b07498080] moov atom not found tmp/render_2.mp4: Invalid data found when processing input

At first I thought that the java program is not properly waiting for FFMpeg to finish thereby causing the corrupted output but some time measurements made me somewhat confident that that is not the cause of this bug.

EDIT:

Output of the ffmpeg command on the server when called via ssh:

$ ffmpeg -y -r 30 -loop 1 -i tmp/render_1.png -i tmp/marytts_1.aac -shortest -acodec copy -vcodec libx264 tmp/render_2.mp4
ffmpeg version 3.4.6-0ubuntu0.18.04.1 Copyright (c) 2000-2019 the FFmpeg developers built with gcc 7 (Ubuntu 7.3.0-16ubuntu3) configuration: --prefix=/usr --extra-version=0ubuntu0.18.04.1 --toolchain=hardened --libdir=/usr/lib/x86_64-linux-gnu --incdir=/usr/include/x86_64-linux-gnu --enable-gpl --disable-stripping --enable-avresample --enable-avisynth --enable-gnutls --enable-ladspa --enable-libass --enable-libbluray --enable-libbs2b --enable-libcaca --enable-libcdio --enable-libflite --enable-libfontconfig --enable-libfreetype --enable-libfribidi --enable-libgme --enable-libgsm --enable-libmp3lame --enable-libmysofa --enable-libopenjpeg --enable-libopenmpt --enable-libopus --enable-libpulse --enable-librubberband --enable-librsvg --enable-libshine --enable-libsnappy --enable-libsoxr --enable-libspeex --enable-libssh --enable-libtheora --enable-libtwolame --enable-libvorbis --enable-libvpx --enable-libwavpack --enable-libwebp --enable-libx265 --enable-libxml2 --enable-libxvid --enable-libzmq --enable-libzvbi --enable-omx --enable-openal --enable-opengl --enable-sdl2 --enable-libdc1394 --enable-libdrm --enable-libiec61883 --enable-chromaprint --enable-frei0r --enable-libopencv --enable-libx264 --enable-shared libavutil 55. 78.100 / 55. 78.100 libavcodec 57.107.100 / 57.107.100 libavformat 57. 83.100 / 57. 83.100 libavdevice 57. 10.100 / 57. 10.100 libavfilter 6.107.100 / 6.107.100 libavresample 3. 7. 0 / 3. 7. 0 libswscale 4. 8.100 / 4. 8.100 libswresample 2. 9.100 / 2. 9.100 libpostproc 54. 7.100 / 54. 7.100
[png_pipe @ 0x562e37a77a60] Stream #0: not enough frames to estimate rate; consider increasing probesize
Input #0, png_pipe, from 'tmp/render_1.png': Duration: N/A, bitrate: N/A Stream #0:0: Video: png, rgb24(pc), 1920x1080, 25 tbr, 25 tbn, 25 tbc
[aac @ 0x562e37a7c940] Estimating duration from bitrate, this may be inaccurate
Input #1, aac, from 'tmp/marytts_1.aac': Duration: 00:00:04.80, bitrate: 25 kb/s Stream #1:0: Audio: aac (LC), 16000 Hz, mono, fltp, 25 kb/s
Stream mapping: Stream #0:0 -> #0:0 (png (native) -> h264 (libx264)) Stream #1:0 -> #0:1 (copy)
Press [q] to stop, [?] for help
[libx264 @ 0x562e37a7f2e0] using cpu capabilities: MMX2 SSE2Fast SSSE3 SSE4.2 AVX FMA3 BMI2 AVX2 AVX512
[libx264 @ 0x562e37a7f2e0] profile High 4:4:4 Predictive, level 4.0, 4:4:4 8-bit
[libx264 @ 0x562e37a7f2e0] 264 - core 152 r2854 e9a5903 - H.264/MPEG-4 AVC codec - Copyleft 2003-2017 - http://www.videolan.org/x264.html - options: cabac=1 ref=3 deblock=1:0:0 analyse=0x1:0x111 me=hex subme=7 psy=1 psy_rd=1.00:0.00 mixed_ref=1 me_range=16 chroma_me=1 trellis=1 8x8dct=0 cqm=0 deadzone=21,11 fast_pskip=1 chroma_qp_offset=4 threads=1 lookahead_threads=1 sliced_threads=0 nr=0 decimate=1 interlaced=0 bluray_compat=0 constrained_intra=0 bframes=3 b_pyramid=2 b_adapt=1 b_bias=0 direct=1 weightb=1 open_gop=0 weightp=2 keyint=250 keyint_min=25 scenecut=40 intra_refresh=0 rc_lookahead=40 rc=crf mbtree=1 crf=23.0 qcomp=0.60 qpmin=0 qpmax=69 qpstep=4 ip_ratio=1.40 aq=1:1.00
Output #0, mp4, to 'tmp/render_2.mp4': Metadata: encoder : Lavf57.83.100 Stream #0:0: Video: h264 (libx264) (avc1 / 0x31637661), yuv444p, 1920x1080, q=-1--1, 30 fps, 15360 tbn, 30 tbc Metadata: encoder : Lavc57.107.100 libx264 Side data: cpb: bitrate max/min/avg: 0/0/0 buffer size: 0 vbv_delay: -1 Stream #0:1: Audio: aac (LC) (mp4a / 0x6134706D), 16000 Hz, mono, fltp, 25 kb/s
frame= 194 fps= 10 q=29.0 Lsize= 280kB time=00:00:05.00 bitrate= 458.3kbits/s speed=0.261x video:261kB audio:15kB subtitle:0kB other streams:0kB global headers:0kB muxing overhead: 1.337588%
[libx264 @ 0x562e37a7f2e0] frame I:1 Avg QP:16.02 size:255181
[libx264 @ 0x562e37a7f2e0] frame P:49 Avg QP:10.58 size: 93
[libx264 @ 0x562e37a7f2e0] frame B:144 Avg QP:13.67 size: 69
[libx264 @ 0x562e37a7f2e0] consecutive B-frames: 1.0% 0.0% 0.0% 99.0%
[libx264 @ 0x562e37a7f2e0] mb I I16..4: 49.3% 0.0% 50.7%
[libx264 @ 0x562e37a7f2e0] mb P I16..4: 0.0% 0.0% 0.0% P16..4: 0.1% 0.0% 0.0% 0.0% 0.0% skip:99.9%
[libx264 @ 0x562e37a7f2e0] mb B I16..4: 0.0% 0.0% 0.0% B16..8: 0.0% 0.0% 0.0% direct: 0.0% skip:100.0% L0: 0.0% L1:100.0% BI: 0.0%
[libx264 @ 0x562e37a7f2e0] coded y,u,v intra: 49.6% 41.1% 42.4% inter: 0.0% 0.0% 0.0%
[libx264 @ 0x562e37a7f2e0] i16 v,h,dc,p: 97% 0% 2% 1%
[libx264 @ 0x562e37a7f2e0] i4 v,h,dc,ddl,ddr,vr,hd,vl,hu: 22% 15% 7% 7% 9% 10% 9% 9% 11%
[libx264 @ 0x562e37a7f2e0] Weighted P-Frames: Y:0.0% UV:0.0%
[libx264 @ 0x562e37a7f2e0] ref P L0: 72.6% 13.6% 11.6% 2.3%
[libx264 @ 0x562e37a7f2e0] kb/s:333.64

Output of local automated call:

ffmpeg version 3.4.6-0ubuntu0.18.04.1 Copyright (c) 2000-2019 the FFmpeg developers built with gcc 7 (Ubuntu 7.3.0-16ubuntu3) configuration: --prefix=/usr --extra-version=0ubuntu0.18.04.1 --toolchain=hardened --libdir=/usr/lib/x86_64-linux-gnu --incdir=/usr/include/x86_64-linux-gnu --enable-gpl --disable-stripping --enable-avresample --enable-avisynth --enable-gnutls --enable-ladspa --enable-libass --enable-libbluray --enable-libbs2b --enable-libcaca --enable-libcdio --enable-libflite --enable-libfontconfig --enable-libfreetype --enable-libfribidi --enable-libgme --enable-libgsm --enable-libmp3lame --enable-libmysofa --enable-libopenjpeg --enable-libopenmpt --enable-libopus --enable-libpulse --enable-librubberband --enable-librsvg --enable-libshine --enable-libsnappy --enable-libsoxr --enable-libspeex --enable-libssh --enable-libtheora --enable-libtwolame --enable-libvorbis --enable-libvpx --enable-libwavpack --enable-libwebp --enable-libx265 --enable-libxml2 --enable-libxvid --enable-libzmq --enable-libzvbi --enable-omx --enable-openal --enable-opengl --enable-sdl2 --enable-libdc1394 --enable-libdrm --enable-libiec61883 --enable-chromaprint --enable-frei0r --enable-libopencv --enable-libx264 --enable-shared WARNING: library configuration mismatch avcodec configuration: --prefix=/usr --extra-version=0ubuntu0.18.04.1 --toolchain=hardened --libdir=/usr/lib/x86_64-linux-gnu --incdir=/usr/include/x86_64-linux-gnu --enable-gpl --disable-stripping --enable-avresample --enable-avisynth --enable-gnutls --enable-ladspa --enable-libass --enable-libbluray --enable-libbs2b --enable-libcaca --enable-libcdio --enable-libflite --enable-libfontconfig --enable-libfreetype --enable-libfribidi --enable-libgme --enable-libgsm --enable-libmp3lame --enable-libmysofa --enable-libopenjpeg --enable-libopenmpt --enable-libopus --enable-libpulse --enable-librubberband --enable-librsvg --enable-libshine --enable-libsnappy --enable-libsoxr --enable-libspeex --enable-libssh --enable-libtheora --enable-libtwolame --enable-libvorbis --enable-libvpx --enable-libwavpack --enable-libwebp --enable-libx265 --enable-libxml2 --enable-libxvid --enable-libzmq --enable-libzvbi --enable-omx --enable-openal --enable-opengl --enable-sdl2 --enable-libdc1394 --enable-libdrm --enable-libiec61883 --enable-chromaprint --enable-frei0r --enable-libopencv --enable-libx264 --enable-shared --enable-version3 --disable-doc --disable-programs --enable-libopencore_amrnb --enable-libopencore_amrwb --enable-libtesseract --enable-libvo_amrwbenc libavutil 55. 78.100 / 55. 78.100 libavcodec 57.107.100 / 57.107.100 libavformat 57. 83.100 / 57. 83.100 libavdevice 57. 10.100 / 57. 10.100 libavfilter 6.107.100 / 6.107.100 libavresample 3. 7. 0 / 3. 7. 0 libswscale 4. 8.100 / 4. 8.100 libswresample 2. 9.100 / 2. 9.100 libpostproc 54. 7.100 / 54. 7.100
[png_pipe @ 0x558d89df15c0] Stream #0: not enough frames to estimate rate; consider increasing probesize
Input #0, png_pipe, from 'tmp/render_1.png': Duration: N/A, bitrate: N/A Stream #0:0: Video: png, rgb24(pc), 1920x1080, 25 tbr, 25 tbn, 25 tbc
[aac @ 0x558d89df64a0] Estimating duration from bitrate, this may be inaccurate
Input #1, aac, from 'tmp/marytts_1.aac': Duration: 00:00:04.80, bitrate: 25 kb/s Stream #1:0: Audio: aac (LC), 16000 Hz, mono, fltp, 25 kb/s
Stream mapping: Stream #0:0 -> #0:0 (png (native) -> h264 (libx264)) Stream #1:0 -> #0:1 (copy)
Press [q] to stop, [?] for help
[libx264 @ 0x558d89df8e40] using cpu capabilities: MMX2 SSE2Fast SSSE3 SSE4.2 AVX FMA3 BMI2 AVX2
[libx264 @ 0x558d89df8e40] profile High 4:4:4 Predictive, level 4.0, 4:4:4 8-bit
[libx264 @ 0x558d89df8e40] 264 - core 152 r2854 e9a5903 - H.264/MPEG-4 AVC codec - Copyleft 2003-2017 - http://www.videolan.org/x264.html - options: cabac=1 ref=3 deblock=1:0:0 analyse=0x1:0x111 me=hex subme=7 psy=1 psy_rd=1.00:0.00 mixed_ref=1 me_range=16 chroma_me=1 trellis=1 8x8dct=0 cqm=0 deadzone=21,11 fast_pskip=1 chroma_qp_offset=4 threads=6 lookahead_threads=1 sliced_threads=0 nr=0 decimate=1 interlaced=0 bluray_compat=0 constrained_intra=0 bframes=3 b_pyramid=2 b_adapt=1 b_bias=0 direct=1 weightb=1 open_gop=0 weightp=2 keyint=250 keyint_min=25 scenecut=40 intra_refresh=0 rc_lookahead=40 rc=crf mbtree=1 crf=23.0 qcomp=0.60 qpmin=0 qpmax=69 qpstep=4 ip_ratio=1.40 aq=1:1.00
Output #0, mp4, to 'tmp/render_2.mp4': Metadata: encoder : Lavf57.83.100 Stream #0:0: Video: h264 (libx264) (avc1 / 0x31637661), yuv444p, 1920x1080, q=-1--1, 30 fps, 15360 tbn, 30 tbc Metadata: encoder : Lavc57.107.100 libx264 Side data: cpb: bitrate max/min/avg: 0/0/0 buffer size: 0 vbv_delay: -1 Stream #0:1: Audio: aac (LC) (mp4a / 0x6134706D), 16000 Hz, mono, fltp, 25 kb/s
frame= 29 fps=0.0 q=0.0 size= 0kB time=00:00:00.00 bitrate=N/A speed= 0x frame= 51 fps= 51 q=29.0 size= 0kB time=00:00:00.00 bitrate=N/A speed= 0x frame= 67 fps= 44 q=29.0 size= 0kB time=00:00:00.51 bitrate= 0.8kbits/s speed=0.335x frame= 88 fps= 43 q=29.0 size= 256kB time=00:00:01.21 bitrate=1724.9kbits/s speed=0.595x frame= 111 fps= 43 q=29.0 size= 256kB time=00:00:01.93 bitrate=1084.9kbits/s speed=0.756x frame= 132 fps= 43 q=29.0 size= 256kB time=00:00:02.68 bitrate= 780.3kbits/s speed=0.877x frame= 153 fps= 43 q=29.0 size= 256kB time=00:00:03.39 bitrate= 618.4kbits/s speed=0.949x frame= 176 fps= 43 q=29.0 size= 256kB time=00:00:04.16 bitrate= 504.2kbits/s speed=1.02x frame= 196 fps= 43 q=29.0 size= 256kB time=00:00:04.80 bitrate= 437.0kbits/s speed=1.04x frame= 203 fps= 38 q=29.0 Lsize= 280kB time=00:00:05.00 bitrate= 458.0kbits/s speed=0.946x video:261kB audio:15kB subtitle:0kB other streams:0kB global headers:0kB muxing overhead: 1.338639%
[libx264 @ 0x558d89df8e40] frame I:1 Avg QP:16.02 size:255033
[libx264 @ 0x558d89df8e40] frame P:51 Avg QP:10.50 size: 91
[libx264 @ 0x558d89df8e40] frame B:151 Avg QP:13.67 size: 69
[libx264 @ 0x558d89df8e40] consecutive B-frames: 0.5% 1.0% 0.0% 98.5%
[libx264 @ 0x558d89df8e40] mb I I16..4: 49.4% 0.0% 50.6%
[libx264 @ 0x558d89df8e40] mb P I16..4: 0.0% 0.0% 0.0% P16..4: 0.1% 0.0% 0.0% 0.0% 0.0% skip:99.9%
[libx264 @ 0x558d89df8e40] mb B I16..4: 0.0% 0.0% 0.0% B16..8: 0.0% 0.0% 0.0% direct: 0.0% skip:100.0% L0: 0.0% L1:100.0% BI: 0.0%
[libx264 @ 0x558d89df8e40] coded y,u,v intra: 49.6% 41.1% 42.2% inter: 0.0% 0.0% 0.0%
[libx264 @ 0x558d89df8e40] i16 v,h,dc,p: 97% 0% 2% 1%
[libx264 @ 0x558d89df8e40] i4 v,h,dc,ddl,ddr,vr,hd,vl,hu: 22% 15% 7% 7% 9% 10% 9% 10% 11%
[libx264 @ 0x558d89df8e40] Weighted P-Frames: Y:0.0% UV:0.0%
[libx264 @ 0x558d89df8e40] ref P L0: 80.5% 10.5% 8.7% 0.3%
[libx264 @ 0x558d89df8e40] kb/s:319.34

Output of local manual call:

$ ffmpeg -y -r 30 -loop 1 -i tmp/render_1.png -i tmp/marytts_1.aac -shortest -acodec copy -vcodec libx264 tmp/render_2.mp4
ffmpeg version 3.4.6-0ubuntu0.18.04.1 Copyright (c) 2000-2019 the FFmpeg developers built with gcc 7 (Ubuntu 7.3.0-16ubuntu3) configuration: --prefix=/usr --extra-version=0ubuntu0.18.04.1 --toolchain=hardened --libdir=/usr/lib/x86_64-linux-gnu --incdir=/usr/include/x86_64-linux-gnu --enable-gpl --disable-stripping --enable-avresample --enable-avisynth --enable-gnutls --enable-ladspa --enable-libass --enable-libbluray --enable-libbs2b --enable-libcaca --enable-libcdio --enable-libflite --enable-libfontconfig --enable-libfreetype --enable-libfribidi --enable-libgme --enable-libgsm --enable-libmp3lame --enable-libmysofa --enable-libopenjpeg --enable-libopenmpt --enable-libopus --enable-libpulse --enable-librubberband --enable-librsvg --enable-libshine --enable-libsnappy --enable-libsoxr --enable-libspeex --enable-libssh --enable-libtheora --enable-libtwolame --enable-libvorbis --enable-libvpx --enable-libwavpack --enable-libwebp --enable-libx265 --enable-libxml2 --enable-libxvid --enable-libzmq --enable-libzvbi --enable-omx --enable-openal --enable-opengl --enable-sdl2 --enable-libdc1394 --enable-libdrm --enable-libiec61883 --enable-chromaprint --enable-frei0r --enable-libopencv --enable-libx264 --enable-shared WARNING: library configuration mismatch avcodec configuration: --prefix=/usr --extra-version=0ubuntu0.18.04.1 --toolchain=hardened --libdir=/usr/lib/x86_64-linux-gnu --incdir=/usr/include/x86_64-linux-gnu --enable-gpl --disable-stripping --enable-avresample --enable-avisynth --enable-gnutls --enable-ladspa --enable-libass --enable-libbluray --enable-libbs2b --enable-libcaca --enable-libcdio --enable-libflite --enable-libfontconfig --enable-libfreetype --enable-libfribidi --enable-libgme --enable-libgsm --enable-libmp3lame --enable-libmysofa --enable-libopenjpeg --enable-libopenmpt --enable-libopus --enable-libpulse --enable-librubberband --enable-librsvg --enable-libshine --enable-libsnappy --enable-libsoxr --enable-libspeex --enable-libssh --enable-libtheora --enable-libtwolame --enable-libvorbis --enable-libvpx --enable-libwavpack --enable-libwebp --enable-libx265 --enable-libxml2 --enable-libxvid --enable-libzmq --enable-libzvbi --enable-omx --enable-openal --enable-opengl --enable-sdl2 --enable-libdc1394 --enable-libdrm --enable-libiec61883 --enable-chromaprint --enable-frei0r --enable-libopencv --enable-libx264 --enable-shared --enable-version3 --disable-doc --disable-programs --enable-libopencore_amrnb --enable-libopencore_amrwb --enable-libtesseract --enable-libvo_amrwbenc libavutil 55. 78.100 / 55. 78.100 libavcodec 57.107.100 / 57.107.100 libavformat 57. 83.100 / 57. 83.100 libavdevice 57. 10.100 / 57. 10.100 libavfilter 6.107.100 / 6.107.100 libavresample 3. 7. 0 / 3. 7. 0 libswscale 4. 8.100 / 4. 8.100 libswresample 2. 9.100 / 2. 9.100 libpostproc 54. 7.100 / 54. 7.100
[png_pipe @ 0x558ddf2165c0] Stream #0: not enough frames to estimate rate; consider increasing probesize
Input #0, png_pipe, from 'tmp/render_1.png': Duration: N/A, bitrate: N/A Stream #0:0: Video: png, rgb24(pc), 1920x1080, 25 tbr, 25 tbn, 25 tbc
[aac @ 0x558ddf21b4a0] Estimating duration from bitrate, this may be inaccurate
Input #1, aac, from 'tmp/marytts_1.aac': Duration: 00:00:04.80, bitrate: 25 kb/s Stream #1:0: Audio: aac (LC), 16000 Hz, mono, fltp, 25 kb/s
Stream mapping: Stream #0:0 -> #0:0 (png (native) -> h264 (libx264)) Stream #1:0 -> #0:1 (copy)
Press [q] to stop, [?] for help
[libx264 @ 0x558ddf21de40] using cpu capabilities: MMX2 SSE2Fast SSSE3 SSE4.2 AVX FMA3 BMI2 AVX2
[libx264 @ 0x558ddf21de40] profile High 4:4:4 Predictive, level 4.0, 4:4:4 8-bit
[libx264 @ 0x558ddf21de40] 264 - core 152 r2854 e9a5903 - H.264/MPEG-4 AVC codec - Copyleft 2003-2017 - http://www.videolan.org/x264.html - options: cabac=1 ref=3 deblock=1:0:0 analyse=0x1:0x111 me=hex subme=7 psy=1 psy_rd=1.00:0.00 mixed_ref=1 me_range=16 chroma_me=1 trellis=1 8x8dct=0 cqm=0 deadzone=21,11 fast_pskip=1 chroma_qp_offset=4 threads=6 lookahead_threads=1 sliced_threads=0 nr=0 decimate=1 interlaced=0 bluray_compat=0 constrained_intra=0 bframes=3 b_pyramid=2 b_adapt=1 b_bias=0 direct=1 weightb=1 open_gop=0 weightp=2 keyint=250 keyint_min=25 scenecut=40 intra_refresh=0 rc_lookahead=40 rc=crf mbtree=1 crf=23.0 qcomp=0.60 qpmin=0 qpmax=69 qpstep=4 ip_ratio=1.40 aq=1:1.00
Output #0, mp4, to 'tmp/render_2.mp4': Metadata: encoder : Lavf57.83.100 Stream #0:0: Video: h264 (libx264) (avc1 / 0x31637661), yuv444p, 1920x1080, q=-1--1, 30 fps, 15360 tbn, 30 tbc Metadata: encoder : Lavc57.107.100 libx264 Side data: cpb: bitrate max/min/avg: 0/0/0 buffer size: 0 vbv_delay: -1 Stream #0:1: Audio: aac (LC) (mp4a / 0x6134706D), 16000 Hz, mono, fltp, 25 kb/s
frame= 203 fps= 40 q=29.0 Lsize= 280kB time=00:00:05.00 bitrate= 458.0kbits/s speed=0.986x video:261kB audio:15kB subtitle:0kB other streams:0kB global headers:0kB muxing overhead: 1.338639%
[libx264 @ 0x558ddf21de40] frame I:1 Avg QP:16.02 size:255033
[libx264 @ 0x558ddf21de40] frame P:51 Avg QP:10.50 size: 91
[libx264 @ 0x558ddf21de40] frame B:151 Avg QP:13.67 size: 69
[libx264 @ 0x558ddf21de40] consecutive B-frames: 0.5% 1.0% 0.0% 98.5%
[libx264 @ 0x558ddf21de40] mb I I16..4: 49.4% 0.0% 50.6%
[libx264 @ 0x558ddf21de40] mb P I16..4: 0.0% 0.0% 0.0% P16..4: 0.1% 0.0% 0.0% 0.0% 0.0% skip:99.9%
[libx264 @ 0x558ddf21de40] mb B I16..4: 0.0% 0.0% 0.0% B16..8: 0.0% 0.0% 0.0% direct: 0.0% skip:100.0% L0: 0.0% L1:100.0% BI: 0.0%
[libx264 @ 0x558ddf21de40] coded y,u,v intra: 49.6% 41.1% 42.2% inter: 0.0% 0.0% 0.0%
[libx264 @ 0x558ddf21de40] i16 v,h,dc,p: 97% 0% 2% 1%
[libx264 @ 0x558ddf21de40] i4 v,h,dc,ddl,ddr,vr,hd,vl,hu: 22% 15% 7% 7% 9% 10% 9% 10% 11%
[libx264 @ 0x558ddf21de40] Weighted P-Frames: Y:0.0% UV:0.0%
[libx264 @ 0x558ddf21de40] ref P L0: 80.5% 10.5% 8.7% 0.3%
[libx264 @ 0x558ddf21de40] kb/s:319.34

avcodec/simple_idct_template: fix integer overflow

$
0
0
avcodec/simple_idct_template: fix integer overflow
  • [DH] libavcodec/simple_idct_template.c

lavc/pnmdec: Fix 16bit decoding.

$
0
0
lavc/pnmdec: Fix 16bit decoding. Regression since cdb5479c
Reported by irc user tTh from Mixart-Myrys
  • [DH] libavcodec/pnmdec.c

ffmpeg overwrite v4l2-ctl configuration -> how to do real raw device to network copy?

$
0
0

I need to do a real device to a tcp socket copy like :

cat /dev/video0 | /bin/nc 127.0.0.1 8000 -w 1

This very basic command line work for my case. I want to do the same, but with the ffmpeg process. The aim is to standardize the streaming process, this case, a basic raw copy from a device to network, and advanced transcoding from any source to network, always with the same ffmpeg process.

I use v4l2-ctl before ffmpeg to make a lot of configuration that I want to keep.

I tried :

ffmpeg -loglevel debug -i /dev/video0 -f rawvideo tcp://127.0.0.1:8000 ffmpeg -loglevel debug -f v4l2 -i /dev/video0 -f rawvideo tcp://127.0.0.1:8000

The probleme here ffmpeg kill my v4l2 configuration, and I don't want to setup it twice (v4l2-ctl interface + ffmpeg interface) in my code.

I also tried :

ffmpeg -loglevel debug -f rawvideo -i /dev/video0 tcp://127.0.0.1:8000 ffmpeg -loglevel debug -f rawvideo -i /dev/video0 -f rawvideo tcp://127.0.0.1:8000

I always get this stderr + exit :

[IMGUTILS @ 0x7ec1d5f0] Picture size 0x0 is invalid
[AVIOContext @ 0x1e8bb40] Statistics: 26 bytes read, 0 seeks
/dev/video0: Invalid argument

I also tried the

-c:v copy

parameter for all combination above without success:(

How to do a raw binary copy (like "cat" or "dd" with NetCat) from a device to socket with ffmpeg (without killing v4l2 configuration) ?

Pascal

Is there a way to convert a moviepy VideoFileClip to a binary stream in memory?

$
0
0

I am trying to convert a VideoClipFile that's already loaded into memory and convert to a binary stream so that I can save it to a cloud storage system. However, the cloud storage system only accepts binary streams, so I can't just use the normal save in moviepy. The other option is to write the binary stream to disk, then open it as a binary stream, but if I have thousands of videos that's going to be too costly. Thanks for the help.

Fluent ffmpeg not running synchronously

$
0
0

I am writing a program where I need to process a video multiple times using ffmpeg. The ffmpeg codes (below) are inside a 'then' statement of a promise.

ffmpeg(path) .size('640x?') .aspect('1:1') .autopad('#682BAB') .saveToFile(`${userDirPath}/11-${userFileName}`) .on('end', () => { ffmpeg() .input('test-11-start.mp4') .mergeAdd(`${userDirPath}/11-${userFileName}`) .mergeAdd('test-11-end.mp4') .mergeToFile(`${userDirPath}/11-final-${userFileName}`, 'temp/') .on('end', () => console.log('FFmpeg done!')); });

There is another ffmpeg function after this (same, but with a different aspect ratio) and then, a 'then' statement with some other functions.

The problem is that this ffmpeg function runs asynchronously, and the next statements (which use the resulting file of ffmpeg func) are executed before it finishes executing and so I want it to run synchronously. I've tried async await (below) but it still runs asynchronously. What is wrong with code?

async function ffmpegMerge() { try { await ffmpeg(path) .size('640x?') .aspect('1:1') .autopad('#682BAB') .saveToFile(`${userDirPath}/11-${userFileName}`) .on('end', () => { ffmpeg() .input(`test-11-start.mp4`) .mergeAdd(`${userDirPath}/11-${userFileName}`) .mergeAdd(`test-11-end.mp4`) .mergeToFile(`${userDirPath}/11-final-${userFileName}.mp4`, 'temp/') .on('end', () => console.log('FFmpeg done!')); }) } catch (err) { return Promise.reject(new Error(err)); }
}

How do I stop ffmpeg from spamming itself when I auto restart?

$
0
0
const fs = require("fs");
const express = require("express");
const app = express();
const path = require("path");
const ffmpeg = require("fluent-ffmpeg");
const md5 = require("md5");
const readline = require("readline");
const formidable = require("formidable"); const dir = "Custom/Dir";
const thumb = __dirname + "/thumb";
const ph = __dirname + "/placeholder"; app.use("/serve", express.static(dir));
app.use("/thumb", express.static(thumb));
app.use("/ph", express.static(ph)); const list = [];
const listThumb = []; process.on("uncaughtException", (err) => { console.log("Caught Exception: " + err);
}); let passwords = fs.readFileSync("passwords.txt").toString().split("\n"); app.get("/", (req, res) => { res.sendFile(__dirname + "/index.html")
}); app.get("/upload", (req, res) => { res.sendFile(__dirname + "/upload.html");
}); app.post("/uploadFile", (req, res) => { let form = new formidable.IncomingForm(); form.parse(req, (err, fields, files) => { if (passwords.includes(md5(fields.password))) { fs.readFile(files.filetoupload.path, (err, data) => { let newPath = dir + "/" + files.filetoupload.name; if (!fs.existsSync(newPath)) { fs.writeFile(newPath, data, (err) => { res.writeHead(200, {"Content-Type": "text/html"}); res.write("

File Uploaded

"); res.end(); }); } else { res.writeHead(200, {"Content-Type": "text/html"}); res.write("

File already exists. Upload with a different name please.

"); res.end(); } }); } }); }); fs.readdir(dir, (err, files) => { if (err) { throw err; } else { let i = 0; files.forEach((file) => { list[i] = path.basename(file); if (!fs.existsSync(__dirname + "\\thumb\\" + list[i] + ".png")) { console.log("Generating: " + list[i] + ".png"); let proc = new ffmpeg({source: dir + "/" + file, nolog: true}); proc.setFfmpegPath(__dirname + "\\ffmpeg.exe"); proc.screenshots({ timestamps: [0.0], filename: list[i] + ".png", folder: __dirname + "\\thumb\\", size: "100x100" }); } i++; }); let serveDoc = "File Server"; for (let j = 0; j < list.length; j++) { if (path.extname(list[j]).toLowerCase() !== ".jpg"&& path.extname(list[j]).toLowerCase() !== ".jpeg"&& path.extname(list[j]).toLowerCase() !== ".png") { if (path.extname(list[j]).toLowerCase() == ".mp3" || path.extname(list[j]).toLowerCase() == ".wav") { serveDoc += "" + "" + "";; }/* else if (path.extname(list[j]).toLowerCase() == ".webm") { serveDoc += "" + "" + "";; }*/ else { serveDoc += "" + "" + ""; } } else { serveDoc += "" + "" + ""; } } serveDoc += ""; fs.writeFile("index.html", serveDoc, (err) => { if (err) throw err; }); } }); setTimeout(() => { process.exit(0); }, 1000 * 60 * 30); app.listen(80, (err) => { if (err) { throw err; } else { console.log("Listening on port 80."); } });

Issue is that the program needs to be restarted every X minutes so that the list of media will update on it's own. However, upon a restart, ffmpeg goes crazy and starts to spam a batch script repeatedly over and over again without stopping. The only way out of it is to restart my computer.

I've tried to use PM2, Forever, Supervisor. Nodemon afaik won't auto restart.

Viewing all 117673 articles
Browse latest View live


<script src="https://jsc.adskeeper.com/r/s/rssing.com.1596347.js" async> </script>