# TV mode: one publisher, many subscribers application mytv {
# enable live streaming live on;
# record first 1K of stream record all; record_path /tmp/av; record_max_size 1K;
# append current timestamp to each flv record_unique on;
# publish only from localhost allow publish 127.0.0.1; deny publish all;
#allow play all; }
# Transcoding (ffmpeg needed) application big { live on;
# On every pusblished stream run this command (ffmpeg) # with substitutions: $app/${app}, $name/${name} for application & stream name. # # This ffmpeg call receives stream from this application & # reduces the resolution down to 32x32. The stream is the published to # 'small' application (see below) under the same name. # # ffmpeg can do anything with the stream like video/audio # transcoding, resizing, altering container/codec params etc # # Multiple exec lines can be specified.
application small { live on; # Video with reduced resolution comes here from ffmpeg }
application webcam { live on;
# Stream from local webcam exec_static ffmpeg -f video4linux2 -i /dev/video0 -c:v libx264 -an -f flv rtmp://localhost:1935/webcam/mystream; }
application mypush { live on;
# Every stream published here # is automatically pushed to # these two machines push rtmp1.example.com; push rtmp2.example.com:1934; }
application mypull { live on;
# Pull all streams from remote machine # and play locally pull rtmp://rtmp3.example.com pageUrl=www.example.com/index.html; }
application mystaticpull { live on;
# Static pull is started at nginx start pull rtmp://rtmp4.example.com pageUrl=www.example.com/index.html name=mystream static; }
# video on demand application vod { play /var/flvs; }
application vod2 { play /var/mp4s; }
# Many publishers, many subscribers # no checks, no recording application videochat {
live on;
# The following notifications receive all # the session variables as well as # particular call arguments in HTTP POST # request
# Make HTTP request & use HTTP retcode # to decide whether to allow publishing # from this connection or not on_publish http://localhost:8080/publish;
# Same with playing on_play http://localhost:8080/play;
# Publish/play end (repeats on disconnect) on_done http://localhost:8080/done;
# All above mentioned notifications receive # standard connect() arguments as well as # play/publish ones. If any arguments are sent # with GET-style syntax to play & publish # these are also included. # Example URL: # rtmp://localhost/myapp/mystream?a=b&c=d
# record 10 video keyframes (no audio) every 2 minutes record keyframes; record_path /tmp/vc; record_max_frames 10; record_interval 2m;
# Async notify about an flv recorded on_record_done http://localhost:8080/record_done;
}
# HLS
# For HLS to work please create a directory in tmpfs (/tmp/hls here) # for the fragments. The directory contents is served via HTTP (see # http{} section in config) # # Incoming stream must be in H264/AAC. For iPhones use baseline H264 # profile (see ffmpeg example). # This example creates RTMP stream from movie ready for HLS: # # ffmpeg -loglevel verbose -re -i movie.avi -vcodec libx264 # -vprofile baseline -acodec libmp3lame -ar 44100 -ac 1 # -f flv rtmp://localhost:1935/hls/movie # # If you need to transcode live stream use 'exec' feature. # application hls { live on; hls on; hls_path /tmp/hls; }
# HTTP can be used for accessing RTMP stats http {
server {
listen 8080;
# This URL provides RTMP statistics in XML location /stat { rtmp_stat all;
# Use this stylesheet to view XML as web page # in browser rtmp_stat_stylesheet stat.xsl; }
location /stat.xsl { # XML stylesheet to view RTMP stats. # Copy stat.xsl wherever you want # and put the full directory path here root /path/to/stat.xsl/; }